summaryrefslogtreecommitdiffstats
path: root/sound/soc/sof
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /sound/soc/sof
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'sound/soc/sof')
-rw-r--r--sound/soc/sof/Kconfig295
-rw-r--r--sound/soc/sof/Makefile60
-rw-r--r--sound/soc/sof/amd/Kconfig63
-rw-r--r--sound/soc/sof/amd/Makefile16
-rw-r--r--sound/soc/sof/amd/acp-common.c209
-rw-r--r--sound/soc/sof/amd/acp-dsp-offset.h99
-rw-r--r--sound/soc/sof/amd/acp-ipc.c301
-rw-r--r--sound/soc/sof/amd/acp-loader.c257
-rw-r--r--sound/soc/sof/amd/acp-pcm.c120
-rw-r--r--sound/soc/sof/amd/acp-probes.c147
-rw-r--r--sound/soc/sof/amd/acp-stream.c187
-rw-r--r--sound/soc/sof/amd/acp-trace.c64
-rw-r--r--sound/soc/sof/amd/acp.c596
-rw-r--r--sound/soc/sof/amd/acp.h305
-rw-r--r--sound/soc/sof/amd/pci-rmb.c103
-rw-r--r--sound/soc/sof/amd/pci-rn.c107
-rw-r--r--sound/soc/sof/amd/pci-vangogh.c105
-rw-r--r--sound/soc/sof/amd/rembrandt.c146
-rw-r--r--sound/soc/sof/amd/renoir.c121
-rw-r--r--sound/soc/sof/amd/vangogh.c162
-rw-r--r--sound/soc/sof/compress.c391
-rw-r--r--sound/soc/sof/control.c221
-rw-r--r--sound/soc/sof/core.c523
-rw-r--r--sound/soc/sof/debug.c450
-rw-r--r--sound/soc/sof/imx/Kconfig53
-rw-r--r--sound/soc/sof/imx/Makefile11
-rw-r--r--sound/soc/sof/imx/imx-common.c101
-rw-r--r--sound/soc/sof/imx/imx-common.h27
-rw-r--r--sound/soc/sof/imx/imx8.c663
-rw-r--r--sound/soc/sof/imx/imx8m.c508
-rw-r--r--sound/soc/sof/imx/imx8ulp.c515
-rw-r--r--sound/soc/sof/intel/Kconfig364
-rw-r--r--sound/soc/sof/intel/Makefile43
-rw-r--r--sound/soc/sof/intel/apl.c124
-rw-r--r--sound/soc/sof/intel/atom.c420
-rw-r--r--sound/soc/sof/intel/atom.h74
-rw-r--r--sound/soc/sof/intel/bdw.c699
-rw-r--r--sound/soc/sof/intel/byt.c483
-rw-r--r--sound/soc/sof/intel/cnl.c512
-rw-r--r--sound/soc/sof/intel/ext_manifest.h35
-rw-r--r--sound/soc/sof/intel/hda-bus.c105
-rw-r--r--sound/soc/sof/intel/hda-codec.c449
-rw-r--r--sound/soc/sof/intel/hda-common-ops.c101
-rw-r--r--sound/soc/sof/intel/hda-ctrl.c316
-rw-r--r--sound/soc/sof/intel/hda-dai-ops.c661
-rw-r--r--sound/soc/sof/intel/hda-dai.c796
-rw-r--r--sound/soc/sof/intel/hda-dsp.c1110
-rw-r--r--sound/soc/sof/intel/hda-ipc.c441
-rw-r--r--sound/soc/sof/intel/hda-ipc.h56
-rw-r--r--sound/soc/sof/intel/hda-loader-skl.c578
-rw-r--r--sound/soc/sof/intel/hda-loader.c654
-rw-r--r--sound/soc/sof/intel/hda-mlink.c974
-rw-r--r--sound/soc/sof/intel/hda-pcm.c284
-rw-r--r--sound/soc/sof/intel/hda-probes.c148
-rw-r--r--sound/soc/sof/intel/hda-stream.c1056
-rw-r--r--sound/soc/sof/intel/hda-trace.c95
-rw-r--r--sound/soc/sof/intel/hda.c1768
-rw-r--r--sound/soc/sof/intel/hda.h990
-rw-r--r--sound/soc/sof/intel/icl.c198
-rw-r--r--sound/soc/sof/intel/lnl.c188
-rw-r--r--sound/soc/sof/intel/mtl.c745
-rw-r--r--sound/soc/sof/intel/mtl.h108
-rw-r--r--sound/soc/sof/intel/pci-apl.c109
-rw-r--r--sound/soc/sof/intel/pci-cnl.c147
-rw-r--r--sound/soc/sof/intel/pci-icl.c112
-rw-r--r--sound/soc/sof/intel/pci-lnl.c71
-rw-r--r--sound/soc/sof/intel/pci-mtl.c75
-rw-r--r--sound/soc/sof/intel/pci-skl.c93
-rw-r--r--sound/soc/sof/intel/pci-tgl.c321
-rw-r--r--sound/soc/sof/intel/pci-tng.c250
-rw-r--r--sound/soc/sof/intel/shim.h214
-rw-r--r--sound/soc/sof/intel/skl.c117
-rw-r--r--sound/soc/sof/intel/tgl.c244
-rw-r--r--sound/soc/sof/iomem-utils.c127
-rw-r--r--sound/soc/sof/ipc.c235
-rw-r--r--sound/soc/sof/ipc3-control.c731
-rw-r--r--sound/soc/sof/ipc3-dtrace.c665
-rw-r--r--sound/soc/sof/ipc3-loader.c414
-rw-r--r--sound/soc/sof/ipc3-pcm.c401
-rw-r--r--sound/soc/sof/ipc3-priv.h67
-rw-r--r--sound/soc/sof/ipc3-topology.c2582
-rw-r--r--sound/soc/sof/ipc3.c1161
-rw-r--r--sound/soc/sof/ipc4-control.c508
-rw-r--r--sound/soc/sof/ipc4-fw-reg.h155
-rw-r--r--sound/soc/sof/ipc4-loader.c491
-rw-r--r--sound/soc/sof/ipc4-mtrace.c681
-rw-r--r--sound/soc/sof/ipc4-pcm.c845
-rw-r--r--sound/soc/sof/ipc4-priv.h123
-rw-r--r--sound/soc/sof/ipc4-topology.c3044
-rw-r--r--sound/soc/sof/ipc4-topology.h461
-rw-r--r--sound/soc/sof/ipc4.c749
-rw-r--r--sound/soc/sof/loader.c190
-rw-r--r--sound/soc/sof/mediatek/Kconfig45
-rw-r--r--sound/soc/sof/mediatek/Makefile4
-rw-r--r--sound/soc/sof/mediatek/adsp_helper.h54
-rw-r--r--sound/soc/sof/mediatek/mt8186/Makefile4
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186-clk.c100
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186-clk.h24
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186-loader.c58
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186.c722
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186.h93
-rw-r--r--sound/soc/sof/mediatek/mt8195/Makefile3
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195-clk.c164
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195-clk.h28
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195-loader.c61
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195.c675
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195.h161
-rw-r--r--sound/soc/sof/mediatek/mtk-adsp-common.c84
-rw-r--r--sound/soc/sof/mediatek/mtk-adsp-common.h10
-rw-r--r--sound/soc/sof/nocodec.c116
-rw-r--r--sound/soc/sof/ops.c187
-rw-r--r--sound/soc/sof/ops.h628
-rw-r--r--sound/soc/sof/pcm.c736
-rw-r--r--sound/soc/sof/pm.c389
-rw-r--r--sound/soc/sof/sof-acpi-dev.c114
-rw-r--r--sound/soc/sof/sof-acpi-dev.h16
-rw-r--r--sound/soc/sof/sof-audio.c1127
-rw-r--r--sound/soc/sof/sof-audio.h646
-rw-r--r--sound/soc/sof/sof-client-ipc-flood-test.c394
-rw-r--r--sound/soc/sof/sof-client-ipc-kernel-injector.c162
-rw-r--r--sound/soc/sof/sof-client-ipc-msg-injector.c340
-rw-r--r--sound/soc/sof/sof-client-probes-ipc3.c232
-rw-r--r--sound/soc/sof/sof-client-probes-ipc4.c290
-rw-r--r--sound/soc/sof/sof-client-probes.c547
-rw-r--r--sound/soc/sof/sof-client-probes.h65
-rw-r--r--sound/soc/sof/sof-client.c612
-rw-r--r--sound/soc/sof/sof-client.h80
-rw-r--r--sound/soc/sof/sof-of-dev.c104
-rw-r--r--sound/soc/sof/sof-of-dev.h25
-rw-r--r--sound/soc/sof/sof-pci-dev.c372
-rw-r--r--sound/soc/sof/sof-pci-dev.h17
-rw-r--r--sound/soc/sof/sof-priv.h872
-rw-r--r--sound/soc/sof/sof-utils.c75
-rw-r--r--sound/soc/sof/sof-utils.h19
-rw-r--r--sound/soc/sof/stream-ipc.c129
-rw-r--r--sound/soc/sof/topology.c2463
-rw-r--r--sound/soc/sof/trace.c53
-rw-r--r--sound/soc/sof/xtensa/Kconfig3
-rw-r--r--sound/soc/sof/xtensa/Makefile5
-rw-r--r--sound/soc/sof/xtensa/core.c144
140 files changed, 50136 insertions, 0 deletions
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
new file mode 100644
index 000000000..80361139a
--- /dev/null
+++ b/sound/soc/sof/Kconfig
@@ -0,0 +1,295 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig SND_SOC_SOF_TOPLEVEL
+ bool "Sound Open Firmware Support"
+ help
+ This adds support for Sound Open Firmware (SOF). SOF is free and
+ generic open source audio DSP firmware for multiple devices.
+ Say Y if you have such a device that is supported by SOF.
+ If unsure select "N".
+
+if SND_SOC_SOF_TOPLEVEL
+
+config SND_SOC_SOF_PCI_DEV
+ tristate
+
+config SND_SOC_SOF_PCI
+ tristate "SOF PCI enumeration support"
+ depends on PCI
+ help
+ This adds support for PCI enumeration. This option is
+ required to enable Intel Skylake+ devices.
+ For backwards-compatibility with previous configurations the selection will
+ be used as default for platform-specific drivers.
+ Say Y if you need this option.
+ If unsure select "N".
+
+config SND_SOC_SOF_ACPI
+ tristate "SOF ACPI enumeration support"
+ depends on ACPI || COMPILE_TEST
+ help
+ This adds support for ACPI enumeration. This option is required
+ to enable Intel Broadwell/Baytrail/Cherrytrail devices.
+ For backwards-compatibility with previous configurations the selection will
+ be used as default for platform-specific drivers.
+ Say Y if you need this option.
+ If unsure select "N".
+
+config SND_SOC_SOF_ACPI_DEV
+ tristate
+
+config SND_SOC_SOF_OF
+ tristate "SOF OF enumeration support"
+ depends on OF
+ help
+ This adds support for Device Tree enumeration. This option is
+ required to enable i.MX8 or Mediatek devices.
+ Say Y if you need this option. If unsure select "N".
+
+config SND_SOC_SOF_OF_DEV
+ tristate
+
+config SND_SOC_SOF_COMPRESS
+ bool
+ select SND_SOC_COMPRESS
+
+config SND_SOC_SOF_DEBUG_PROBES
+ tristate
+ select SND_SOC_SOF_CLIENT
+ select SND_SOC_COMPRESS
+ help
+ This option enables the data probing feature that can be used to
+ gather data directly from specific points of the audio pipeline.
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_CLIENT
+ tristate
+ select AUXILIARY_BUS
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_DEVELOPER_SUPPORT
+ bool "SOF developer options support"
+ depends on EXPERT && SND_SOC_SOF
+ help
+ This option unlocks SOF developer options for debug/performance/
+ code hardening.
+ Distributions should not select this option, only SOF development
+ teams should select it.
+ Say Y if you are involved in SOF development and need this option.
+ If not, select N.
+
+if SND_SOC_SOF_DEVELOPER_SUPPORT
+
+config SND_SOC_SOF_FORCE_PROBE_WORKQUEUE
+ bool "SOF force probe workqueue"
+ select SND_SOC_SOF_PROBE_WORK_QUEUE
+ help
+ This option forces the use of a probe workqueue, which is only used
+ when HDaudio is enabled due to module dependencies. Forcing this
+ option is intended for debug only, but this should not add any
+ functional issues in nominal cases.
+ Say Y if you are involved in SOF development and need this option.
+ If not, select N.
+
+config SND_SOC_SOF_NOCODEC
+ tristate
+
+config SND_SOC_SOF_NOCODEC_SUPPORT
+ bool "SOF nocodec static mode support"
+ help
+ This adds support for a dummy/nocodec machine driver fallback
+ option if no known codec is detected. This is typically only
+ enabled for developers or devices where the sound card is
+ controlled externally.
+ This option is mutually exclusive at build time with the Intel HDAudio support.
+ Selecting it may have negative impacts and prevent e.g. microphone
+ functionality from being enabled on Intel CoffeeLake and later
+ platforms.
+ Distributions should not select this option!
+ Say Y if you need this nocodec fallback option.
+ If unsure select "N".
+
+config SND_SOC_SOF_STRICT_ABI_CHECKS
+ bool "SOF strict ABI checks"
+ help
+ This option enables strict ABI checks for firmware and topology
+ files.
+ When these files are more recent than the kernel, the kernel
+ will handle the functionality it supports and may report errors
+ during topology creation or run-time usage if new functionality
+ is invoked.
+ This option will stop topology creation and firmware load upfront.
+ It is intended for SOF CI/releases and not for users or distros.
+ Say Y if you want strict ABI checks for an SOF release.
+ If you are not involved in SOF releases and CI development,
+ select "N".
+
+config SND_SOC_SOF_DEBUG
+ bool "SOF debugging features"
+ help
+ This option can be used to enable or disable individual SOF firmware
+ and driver debugging options.
+ Say Y if you are debugging SOF FW or drivers.
+ If unsure select "N".
+
+if SND_SOC_SOF_DEBUG
+
+config SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT
+ bool "SOF nocodec debug mode support"
+ depends on !SND_SOC_SOF_NOCODEC_SUPPORT
+ help
+ This adds support for a dummy/nocodec machine driver fallback
+ option.
+ Unlike the SND_SOC_SOF_NOCODEC_SUPPORT, this option is NOT
+ mutually exclusive at build with the Intel HDAudio support. The
+ selection will be done depending on command line or modprobe.d settings
+ Distributions should not select this option!
+ Say Y if you need this nocodec debug fallback option.
+ If unsure select "N".
+
+config SND_SOC_SOF_FORCE_NOCODEC_MODE
+ bool "SOF force nocodec Mode"
+ depends on SND_SOC_SOF_NOCODEC_SUPPORT
+ help
+ This forces SOF to use dummy/nocodec as machine driver, even
+ though there is a codec detected on the real platform. This is
+ typically only enabled for developers for debug purposes, before
+ codec/machine driver is ready, or to exclude the impact of those
+ drivers.
+ Say Y if you need this force nocodec mode option.
+ If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_XRUN_STOP
+ bool "SOF stop on XRUN"
+ help
+ This option forces PCMs to stop on any XRUN event. This is useful to
+ preserve any trace data and pipeline status prior to the XRUN.
+ Say Y if you are debugging SOF FW pipeline XRUNs.
+ If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_VERBOSE_IPC
+ bool "SOF verbose IPC logs"
+ help
+ This option enables more verbose IPC logs, with command types in
+ human-readable form instead of just 32-bit hex dumps. This is useful
+ if you are trying to debug IPC with the DSP firmware.
+ If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_FORCE_IPC_POSITION
+ bool "SOF force to use IPC for position update on SKL+"
+ help
+ This option forces to handle stream position update IPCs and run PCM
+ elapse to inform ALSA about that, on platforms (e.g. Intel SKL+) that
+ with other approach (e.g. HDAC DPIB/posbuf) to elapse PCM.
+ On platforms (e.g. Intel SKL-) where position update IPC is the only
+ one choice, this setting won't impact anything.
+ If you are trying to debug pointer update with position IPCs or where
+ DPIB/posbuf is not ready, select "Y".
+ If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE
+ bool "SOF enable debugfs caching"
+ help
+ This option enables caching of debugfs
+ memory -> DSP resource (memory, register, etc)
+ before the audio DSP is suspended. This will increase the suspend
+ latency and therefore should be used for debug purposes only.
+ Say Y if you want to enable caching the memory windows.
+ If unsure, select "N".
+
+config SND_SOC_SOF_DEBUG_ENABLE_FIRMWARE_TRACE
+ bool "SOF enable firmware trace"
+ help
+ The firmware trace can be enabled either at build-time with
+ this option, or dynamically by setting flags in the SOF core
+ module parameter (similar to dynamic debug).
+ If unsure, select "N".
+
+config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST
+ tristate "SOF enable IPC flood test"
+ depends on SND_SOC_SOF
+ select SND_SOC_SOF_CLIENT
+ help
+ This option enables a separate client device for IPC flood test
+ which can be used to flood the DSP with test IPCs and gather stats
+ about response times.
+ Say Y if you want to enable IPC flood test.
+ If unsure, select "N".
+
+config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST_NUM
+ int "Number of IPC flood test clients"
+ range 1 32
+ default 2
+ depends on SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST
+ help
+ Select the number of IPC flood test clients to be created.
+
+config SND_SOC_SOF_DEBUG_IPC_MSG_INJECTOR
+ tristate "SOF enable IPC message injector"
+ depends on SND_SOC_SOF
+ select SND_SOC_SOF_CLIENT
+ help
+ This option enables the IPC message injector which can be used to send
+ crafted IPC messages to the DSP to test its robustness.
+ Say Y if you want to enable the IPC message injector.
+ If unsure, select "N".
+
+config SND_SOC_SOF_DEBUG_IPC_KERNEL_INJECTOR
+ tristate "SOF enable IPC kernel injector"
+ depends on SND_SOC_SOF
+ select SND_SOC_SOF_CLIENT
+ help
+ This option enables the IPC kernel injector which can be used to send
+ crafted IPC messages to the kernel to test its robustness against
+ DSP messages.
+ Say Y if you want to enable the IPC kernel injector.
+ If unsure, select "N".
+
+config SND_SOC_SOF_DEBUG_RETAIN_DSP_CONTEXT
+ bool "SOF retain DSP context on any FW exceptions"
+ help
+ This option keeps the DSP in D0 state so that firmware debug
+ information can be retained and dumped to userspace.
+ Say Y if you want to retain DSP context for FW exceptions.
+ If unsure, select "N".
+
+endif ## SND_SOC_SOF_DEBUG
+
+endif ## SND_SOC_SOF_DEVELOPER_SUPPORT
+
+config SND_SOC_SOF
+ tristate
+ select SND_SOC_TOPOLOGY
+ select SND_SOC_SOF_NOCODEC if SND_SOC_SOF_NOCODEC_SUPPORT
+ select SND_SOC_SOF_NOCODEC if SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+ The selection is made at the top level and does not exactly follow
+ module dependencies but since the module or built-in type is decided
+ at the top level it doesn't matter.
+
+config SND_SOC_SOF_PROBE_WORK_QUEUE
+ bool
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+ When selected, the probe is handled in two steps, for example to
+ avoid lockdeps if request_module is used in the probe.
+
+# Supported IPC versions
+config SND_SOC_SOF_IPC3
+ bool
+
+config SND_SOC_SOF_INTEL_IPC4
+ bool
+
+source "sound/soc/sof/amd/Kconfig"
+source "sound/soc/sof/imx/Kconfig"
+source "sound/soc/sof/intel/Kconfig"
+source "sound/soc/sof/mediatek/Kconfig"
+source "sound/soc/sof/xtensa/Kconfig"
+
+endif
diff --git a/sound/soc/sof/Makefile b/sound/soc/sof/Makefile
new file mode 100644
index 000000000..744d40bd8
--- /dev/null
+++ b/sound/soc/sof/Makefile
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+snd-sof-objs := core.o ops.o loader.o ipc.o pcm.o pm.o debug.o topology.o\
+ control.o trace.o iomem-utils.o sof-audio.o stream-ipc.o
+
+# IPC implementations
+ifneq ($(CONFIG_SND_SOC_SOF_IPC3),)
+snd-sof-objs += ipc3.o ipc3-loader.o ipc3-topology.o ipc3-control.o ipc3-pcm.o\
+ ipc3-dtrace.o
+endif
+ifneq ($(CONFIG_SND_SOC_SOF_INTEL_IPC4),)
+snd-sof-objs += ipc4.o ipc4-loader.o ipc4-topology.o ipc4-control.o ipc4-pcm.o\
+ ipc4-mtrace.o
+endif
+
+# SOF client support
+ifneq ($(CONFIG_SND_SOC_SOF_CLIENT),)
+snd-sof-objs += sof-client.o
+endif
+
+snd-sof-$(CONFIG_SND_SOC_SOF_COMPRESS) += compress.o
+
+snd-sof-pci-objs := sof-pci-dev.o
+snd-sof-acpi-objs := sof-acpi-dev.o
+snd-sof-of-objs := sof-of-dev.o
+
+snd-sof-ipc-flood-test-objs := sof-client-ipc-flood-test.o
+snd-sof-ipc-msg-injector-objs := sof-client-ipc-msg-injector.o
+snd-sof-ipc-kernel-injector-objs := sof-client-ipc-kernel-injector.o
+snd-sof-probes-objs := sof-client-probes.o
+ifneq ($(CONFIG_SND_SOC_SOF_IPC3),)
+snd-sof-probes-objs += sof-client-probes-ipc3.o
+endif
+ifneq ($(CONFIG_SND_SOC_SOF_INTEL_IPC4),)
+snd-sof-probes-objs += sof-client-probes-ipc4.o
+endif
+
+snd-sof-nocodec-objs := nocodec.o
+
+snd-sof-utils-objs := sof-utils.o
+
+obj-$(CONFIG_SND_SOC_SOF) += snd-sof.o
+obj-$(CONFIG_SND_SOC_SOF_NOCODEC) += snd-sof-nocodec.o
+
+obj-$(CONFIG_SND_SOC_SOF) += snd-sof-utils.o
+
+obj-$(CONFIG_SND_SOC_SOF_ACPI_DEV) += snd-sof-acpi.o
+obj-$(CONFIG_SND_SOC_SOF_OF_DEV) += snd-sof-of.o
+obj-$(CONFIG_SND_SOC_SOF_PCI_DEV) += snd-sof-pci.o
+
+obj-$(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST) += snd-sof-ipc-flood-test.o
+obj-$(CONFIG_SND_SOC_SOF_DEBUG_IPC_MSG_INJECTOR) += snd-sof-ipc-msg-injector.o
+obj-$(CONFIG_SND_SOC_SOF_DEBUG_IPC_KERNEL_INJECTOR) += snd-sof-ipc-kernel-injector.o
+obj-$(CONFIG_SND_SOC_SOF_DEBUG_PROBES) += snd-sof-probes.o
+
+obj-$(CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL) += intel/
+obj-$(CONFIG_SND_SOC_SOF_IMX_TOPLEVEL) += imx/
+obj-$(CONFIG_SND_SOC_SOF_AMD_TOPLEVEL) += amd/
+obj-$(CONFIG_SND_SOC_SOF_XTENSA) += xtensa/
+obj-$(CONFIG_SND_SOC_SOF_MTK_TOPLEVEL) += mediatek/
diff --git a/sound/soc/sof/amd/Kconfig b/sound/soc/sof/amd/Kconfig
new file mode 100644
index 000000000..f2faa08f0
--- /dev/null
+++ b/sound/soc/sof/amd/Kconfig
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+# This file is provided under a dual BSD/GPLv2 license. When using or
+# redistributing this file, you may do so under either license.
+#
+# Copyright(c) 2021, 2023 Advanced Micro Devices, Inc. All rights reserved.
+
+config SND_SOC_SOF_AMD_TOPLEVEL
+ tristate "SOF support for AMD audio DSPs"
+ depends on X86 || COMPILE_TEST
+ help
+ This adds support for Sound Open Firmware for AMD platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+if SND_SOC_SOF_AMD_TOPLEVEL
+
+config SND_SOC_SOF_AMD_COMMON
+ tristate
+ select SND_SOC_SOF
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_PCI_DEV
+ select SND_AMD_ACP_CONFIG
+ select SND_SOC_SOF_XTENSA
+ select SND_SOC_SOF_ACP_PROBES
+ select SND_SOC_ACPI if ACPI
+ help
+ This option is not user-selectable but automatically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_AMD_RENOIR
+ tristate "SOF support for RENOIR"
+ depends on SND_SOC_SOF_PCI
+ select SND_SOC_SOF_AMD_COMMON
+ help
+ Select this option for SOF support on AMD Renoir platform
+
+config SND_SOC_SOF_AMD_VANGOGH
+ tristate "SOF support for VANGOGH"
+ depends on SND_SOC_SOF_PCI
+ select SND_SOC_SOF_AMD_COMMON
+ help
+ Select this option for SOF support
+ on AMD Vangogh platform.
+ Say Y if you want to enable SOF on Vangogh.
+ If unsure select "N".
+
+config SND_SOC_SOF_AMD_REMBRANDT
+ tristate "SOF support for REMBRANDT"
+ depends on SND_SOC_SOF_PCI
+ select SND_SOC_SOF_AMD_COMMON
+ help
+ Select this option for SOF support on AMD Rembrandt platform
+ Say Y if you want to enable SOF on Rembrandt.
+ If unsure select "N".
+
+config SND_SOC_SOF_ACP_PROBES
+ tristate
+ select SND_SOC_SOF_DEBUG_PROBES
+ help
+ This option is not user-selectable but automatically handled by
+ 'select' statements at a higher level
+
+endif
diff --git a/sound/soc/sof/amd/Makefile b/sound/soc/sof/amd/Makefile
new file mode 100644
index 000000000..f3b375e67
--- /dev/null
+++ b/sound/soc/sof/amd/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+# This file is provided under a dual BSD/GPLv2 license. When using or
+# redistributing this file, you may do so under either license.
+#
+# Copyright(c) 2021, 2023 Advanced Micro Devices, Inc. All rights reserved.
+
+snd-sof-amd-acp-objs := acp.o acp-loader.o acp-ipc.o acp-pcm.o acp-stream.o acp-trace.o acp-common.o
+snd-sof-amd-acp-$(CONFIG_SND_SOC_SOF_ACP_PROBES) = acp-probes.o
+snd-sof-amd-renoir-objs := pci-rn.o renoir.o
+snd-sof-amd-rembrandt-objs := pci-rmb.o rembrandt.o
+snd-sof-amd-vangogh-objs := pci-vangogh.o vangogh.o
+
+obj-$(CONFIG_SND_SOC_SOF_AMD_COMMON) += snd-sof-amd-acp.o
+obj-$(CONFIG_SND_SOC_SOF_AMD_RENOIR) +=snd-sof-amd-renoir.o
+obj-$(CONFIG_SND_SOC_SOF_AMD_REMBRANDT) +=snd-sof-amd-rembrandt.o
+obj-$(CONFIG_SND_SOC_SOF_AMD_VANGOGH) +=snd-sof-amd-vangogh.o
diff --git a/sound/soc/sof/amd/acp-common.c b/sound/soc/sof/amd/acp-common.c
new file mode 100644
index 000000000..3a0c7688d
--- /dev/null
+++ b/sound/soc/sof/amd/acp-common.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Advanced Micro Devices, Inc.
+//
+// Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+// V sujith kumar Reddy <Vsujithkumar.Reddy@amd.com>
+
+/* ACP-specific Common code */
+
+#include "../sof-priv.h"
+#include "../sof-audio.h"
+#include "../ops.h"
+#include "../sof-audio.h"
+#include "acp.h"
+#include "acp-dsp-offset.h"
+#include <sound/sof/xtensa.h>
+
+/**
+ * amd_sof_ipc_dump() - This function is called when IPC tx times out.
+ * @sdev: SOF device.
+ */
+void amd_sof_ipc_dump(struct snd_sof_dev *sdev)
+{
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ u32 base = desc->dsp_intr_base;
+ u32 dsp_msg_write = sdev->debug_box.offset +
+ offsetof(struct scratch_ipc_conf, sof_dsp_msg_write);
+ u32 dsp_ack_write = sdev->debug_box.offset +
+ offsetof(struct scratch_ipc_conf, sof_dsp_ack_write);
+ u32 host_msg_write = sdev->debug_box.offset +
+ offsetof(struct scratch_ipc_conf, sof_host_msg_write);
+ u32 host_ack_write = sdev->debug_box.offset +
+ offsetof(struct scratch_ipc_conf, sof_host_ack_write);
+ u32 dsp_msg, dsp_ack, host_msg, host_ack, irq_stat;
+
+ dsp_msg = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_msg_write);
+ dsp_ack = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_ack_write);
+ host_msg = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + host_msg_write);
+ host_ack = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + host_ack_write);
+ irq_stat = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
+
+ dev_err(sdev->dev,
+ "dsp_msg = %#x dsp_ack = %#x host_msg = %#x host_ack = %#x irq_stat = %#x\n",
+ dsp_msg, dsp_ack, host_msg, host_ack, irq_stat);
+}
+
+/**
+ * amd_get_registers() - This function is called in case of DSP oops
+ * in order to gather information about the registers, filename and
+ * linenumber and stack.
+ * @sdev: SOF device.
+ * @xoops: Stores information about registers.
+ * @panic_info: Stores information about filename and line number.
+ * @stack: Stores the stack dump.
+ * @stack_words: Size of the stack dump.
+ */
+static void amd_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read registers */
+ acp_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+
+ offset += xoops->arch_hdr.totalsize;
+ acp_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ acp_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
+}
+
+/**
+ * amd_sof_dump() - This function is called when a panic message is
+ * received from the firmware.
+ * @sdev: SOF device.
+ * @flags: parameter not used but required by ops prototype
+ */
+void amd_sof_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[AMD_STACK_DUMP_SIZE];
+ u32 status;
+
+ /* Get information about the panic status from the debug box area.
+ * Compute the trace point based on the status.
+ */
+ if (sdev->dsp_oops_offset > sdev->debug_box.offset) {
+ acp_mailbox_read(sdev, sdev->debug_box.offset, &status, sizeof(u32));
+ } else {
+ /* Read DSP Panic status from dsp_box.
+ * As window information for exception box offset and size is not available
+ * before FW_READY
+ */
+ acp_mailbox_read(sdev, sdev->dsp_box.offset, &status, sizeof(u32));
+ sdev->dsp_oops_offset = sdev->dsp_box.offset + sizeof(status);
+ }
+
+ /* Get information about the registers, the filename and line
+ * number and the stack.
+ */
+ amd_get_registers(sdev, &xoops, &panic_info, stack, AMD_STACK_DUMP_SIZE);
+
+ /* Print the information to the console */
+ sof_print_oops_and_stack(sdev, KERN_ERR, status, status, &xoops,
+ &panic_info, stack, AMD_STACK_DUMP_SIZE);
+}
+
+struct snd_soc_acpi_mach *amd_sof_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach;
+
+ mach = snd_soc_acpi_find_machine(desc->machines);
+ if (!mach) {
+ dev_warn(sdev->dev, "No matching ASoC machine driver found\n");
+ return NULL;
+ }
+
+ sof_pdata->tplg_filename = mach->sof_tplg_filename;
+ sof_pdata->fw_filename = mach->fw_filename;
+
+ return mach;
+}
+
+/* AMD Common DSP ops */
+struct snd_sof_dsp_ops sof_acp_common_ops = {
+ /* probe and remove */
+ .probe = amd_sof_acp_probe,
+ .remove = amd_sof_acp_remove,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+
+ /* Block IO */
+ .block_read = acp_dsp_block_read,
+ .block_write = acp_dsp_block_write,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+ .pre_fw_run = acp_dsp_pre_fw_run,
+ .get_bar_index = acp_get_bar_index,
+
+ /* DSP core boot */
+ .run = acp_sof_dsp_run,
+
+ /*IPC */
+ .send_msg = acp_sof_ipc_send_msg,
+ .ipc_msg_data = acp_sof_ipc_msg_data,
+ .set_stream_data_offset = acp_set_stream_data_offset,
+ .get_mailbox_offset = acp_sof_ipc_get_mailbox_offset,
+ .get_window_offset = acp_sof_ipc_get_window_offset,
+ .irq_thread = acp_sof_ipc_irq_thread,
+
+ /* stream callbacks */
+ .pcm_open = acp_pcm_open,
+ .pcm_close = acp_pcm_close,
+ .pcm_hw_params = acp_pcm_hw_params,
+ .pcm_pointer = acp_pcm_pointer,
+
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+
+ /* Machine driver callbacks */
+ .machine_select = amd_sof_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+
+ /* Trace Logger */
+ .trace_init = acp_sof_trace_init,
+ .trace_release = acp_sof_trace_release,
+
+ /* PM */
+ .suspend = amd_sof_acp_suspend,
+ .resume = amd_sof_acp_resume,
+
+ .ipc_dump = amd_sof_ipc_dump,
+ .dbg_dump = amd_sof_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+
+ /* probe client device registation */
+ .register_ipc_clients = acp_probes_register,
+ .unregister_ipc_clients = acp_probes_unregister,
+};
+EXPORT_SYMBOL_NS(sof_acp_common_ops, SND_SOC_SOF_AMD_COMMON);
+
+MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_DESCRIPTION("ACP SOF COMMON Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/acp-dsp-offset.h b/sound/soc/sof/amd/acp-dsp-offset.h
new file mode 100644
index 000000000..a913f1cc4
--- /dev/null
+++ b/sound/soc/sof/amd/acp-dsp-offset.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2021, 2023 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Author: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+ */
+
+#ifndef _ACP_DSP_IP_OFFSET_H
+#define _ACP_DSP_IP_OFFSET_H
+
+/* Registers from ACP_DMA_0 block */
+#define ACP_DMA_CNTL_0 0x00
+#define ACP_DMA_DSCR_STRT_IDX_0 0x20
+#define ACP_DMA_DSCR_CNT_0 0x40
+#define ACP_DMA_PRIO_0 0x60
+#define ACP_DMA_CUR_DSCR_0 0x80
+#define ACP_DMA_ERR_STS_0 0xC0
+#define ACP_DMA_DESC_BASE_ADDR 0xE0
+#define ACP_DMA_DESC_MAX_NUM_DSCR 0xE4
+#define ACP_DMA_CH_STS 0xE8
+#define ACP_DMA_CH_GROUP 0xEC
+#define ACP_DMA_CH_RST_STS 0xF0
+
+/* Registers from ACP_DSP_0 block */
+#define ACP_DSP0_RUNSTALL 0x414
+
+/* Registers from ACP_AXI2AXIATU block */
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1 0xC00
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_1 0xC04
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_2 0xC08
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_2 0xC0C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_3 0xC10
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_3 0xC14
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_4 0xC18
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_4 0xC1C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_5 0xC20
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_5 0xC24
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_6 0xC28
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_6 0xC2C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_7 0xC30
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_7 0xC34
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_8 0xC38
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_8 0xC3C
+#define ACPAXI2AXI_ATU_CTRL 0xC40
+#define ACP_SOFT_RESET 0x1000
+#define ACP_CONTROL 0x1004
+
+#define ACP3X_I2S_PIN_CONFIG 0x1400
+#define ACP5X_I2S_PIN_CONFIG 0x1400
+#define ACP6X_I2S_PIN_CONFIG 0x1440
+
+/* Registers offsets from ACP_PGFSM block */
+#define ACP3X_PGFSM_BASE 0x141C
+#define ACP5X_PGFSM_BASE 0x1424
+#define ACP6X_PGFSM_BASE 0x1024
+#define PGFSM_CONTROL_OFFSET 0x0
+#define PGFSM_STATUS_OFFSET 0x4
+#define ACP3X_CLKMUX_SEL 0x1424
+#define ACP5X_CLKMUX_SEL 0x142C
+#define ACP6X_CLKMUX_SEL 0x102C
+
+/* Registers from ACP_INTR block */
+#define ACP3X_EXT_INTR_STAT 0x1808
+#define ACP5X_EXT_INTR_STAT 0x1808
+#define ACP6X_EXT_INTR_STAT 0x1A0C
+
+#define ACP3X_DSP_SW_INTR_BASE 0x1814
+#define ACP5X_DSP_SW_INTR_BASE 0x1814
+#define ACP6X_DSP_SW_INTR_BASE 0x1808
+#define DSP_SW_INTR_CNTL_OFFSET 0x0
+#define DSP_SW_INTR_STAT_OFFSET 0x4
+#define DSP_SW_INTR_TRIG_OFFSET 0x8
+#define ACP_ERROR_STATUS 0x18C4
+#define ACP3X_AXI2DAGB_SEM_0 0x1880
+#define ACP5X_AXI2DAGB_SEM_0 0x1884
+#define ACP6X_AXI2DAGB_SEM_0 0x1874
+
+/* Registers from ACP_SHA block */
+#define ACP_SHA_DSP_FW_QUALIFIER 0x1C70
+#define ACP_SHA_DMA_CMD 0x1CB0
+#define ACP_SHA_MSG_LENGTH 0x1CB4
+#define ACP_SHA_DMA_STRT_ADDR 0x1CB8
+#define ACP_SHA_DMA_DESTINATION_ADDR 0x1CBC
+#define ACP_SHA_DMA_CMD_STS 0x1CC0
+#define ACP_SHA_DMA_ERR_STATUS 0x1CC4
+#define ACP_SHA_TRANSFER_BYTE_CNT 0x1CC8
+#define ACP_SHA_DMA_INCLUDE_HDR 0x1CCC
+#define ACP_SHA_PSP_ACK 0x1C74
+
+#define ACP_SCRATCH_REG_0 0x10000
+#define ACP6X_DSP_FUSION_RUNSTALL 0x0644
+
+/* Cache window registers */
+#define ACP_DSP0_CACHE_OFFSET0 0x0420
+#define ACP_DSP0_CACHE_SIZE0 0x0424
+#endif
diff --git a/sound/soc/sof/amd/acp-ipc.c b/sound/soc/sof/amd/acp-ipc.c
new file mode 100644
index 000000000..fcb54f545
--- /dev/null
+++ b/sound/soc/sof/amd/acp-ipc.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2021 Advanced Micro Devices, Inc.
+//
+// Authors: Balakishore Pati <Balakishore.pati@amd.com>
+// Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+
+/* ACP-specific SOF IPC code */
+
+#include <linux/module.h>
+#include "../ops.h"
+#include "acp.h"
+#include "acp-dsp-offset.h"
+
+void acp_mailbox_write(struct snd_sof_dev *sdev, u32 offset, void *message, size_t bytes)
+{
+ memcpy_to_scratch(sdev, offset, message, bytes);
+}
+EXPORT_SYMBOL_NS(acp_mailbox_write, SND_SOC_SOF_AMD_COMMON);
+
+void acp_mailbox_read(struct snd_sof_dev *sdev, u32 offset, void *message, size_t bytes)
+{
+ memcpy_from_scratch(sdev, offset, message, bytes);
+}
+EXPORT_SYMBOL_NS(acp_mailbox_read, SND_SOC_SOF_AMD_COMMON);
+
+static void acpbus_trigger_host_to_dsp_swintr(struct acp_dev_data *adata)
+{
+ struct snd_sof_dev *sdev = adata->dev;
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ u32 swintr_trigger;
+
+ swintr_trigger = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->dsp_intr_base +
+ DSP_SW_INTR_TRIG_OFFSET);
+ swintr_trigger |= 0x01;
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->dsp_intr_base + DSP_SW_INTR_TRIG_OFFSET,
+ swintr_trigger);
+}
+
+static void acp_ipc_host_msg_set(struct snd_sof_dev *sdev)
+{
+ unsigned int host_msg = sdev->debug_box.offset +
+ offsetof(struct scratch_ipc_conf, sof_host_msg_write);
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + host_msg, 1);
+}
+
+static void acp_dsp_ipc_host_done(struct snd_sof_dev *sdev)
+{
+ unsigned int dsp_msg = sdev->debug_box.offset +
+ offsetof(struct scratch_ipc_conf, sof_dsp_msg_write);
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_msg, 0);
+}
+
+static void acp_dsp_ipc_dsp_done(struct snd_sof_dev *sdev)
+{
+ unsigned int dsp_ack = sdev->debug_box.offset +
+ offsetof(struct scratch_ipc_conf, sof_dsp_ack_write);
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_ack, 0);
+}
+
+int acp_sof_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct acp_dev_data *adata = sdev->pdata->hw_pdata;
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ unsigned int offset = sdev->host_box.offset;
+ unsigned int count = ACP_HW_SEM_RETRY_COUNT;
+
+ while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) {
+ /* Wait until acquired HW Semaphore Lock or timeout*/
+ count--;
+ if (!count) {
+ dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ acp_mailbox_write(sdev, offset, msg->msg_data, msg->msg_size);
+ acp_ipc_host_msg_set(sdev);
+
+ /* Trigger host to dsp interrupt for the msg */
+ acpbus_trigger_host_to_dsp_swintr(adata);
+
+ /* Unlock or Release HW Semaphore */
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(acp_sof_ipc_send_msg, SND_SOC_SOF_AMD_COMMON);
+
+static void acp_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ struct sof_ipc_cmd_hdr *hdr;
+ unsigned int offset = sdev->host_box.offset;
+ int ret = 0;
+
+ /*
+ * Sometimes, there is unexpected reply ipc arriving. The reply
+ * ipc belongs to none of the ipcs sent from driver.
+ * In this case, the driver must ignore the ipc.
+ */
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
+ return;
+ }
+ hdr = msg->msg_data;
+ if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE) ||
+ hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
+ /*
+ * memory windows are powered off before sending IPC reply,
+ * so we can't read the mailbox for CTX_SAVE and PM_GATE
+ * replies.
+ */
+ reply.error = 0;
+ reply.hdr.cmd = SOF_IPC_GLB_REPLY;
+ reply.hdr.size = sizeof(reply);
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ goto out;
+ }
+ /* get IPC reply from DSP in the mailbox */
+ acp_mailbox_read(sdev, offset, &reply, sizeof(reply));
+ if (reply.error < 0) {
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ ret = reply.error;
+ } else {
+ /*
+ * To support an IPC tx_message with a
+ * reply_size set to zero.
+ */
+ if (!msg->reply_size)
+ goto out;
+
+ /* reply correct size ? */
+ if (reply.hdr.size != msg->reply_size &&
+ !(reply.hdr.cmd & SOF_IPC_GLB_PROBE)) {
+ dev_err(sdev->dev, "reply expected %zu got %u bytes\n",
+ msg->reply_size, reply.hdr.size);
+ ret = -EINVAL;
+ }
+ /* read the message */
+ if (msg->reply_size > 0)
+ acp_mailbox_read(sdev, offset, msg->reply_data, msg->reply_size);
+ }
+out:
+ msg->reply_error = ret;
+}
+
+irqreturn_t acp_sof_ipc_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ struct acp_dev_data *adata = sdev->pdata->hw_pdata;
+ unsigned int dsp_msg_write = sdev->debug_box.offset +
+ offsetof(struct scratch_ipc_conf, sof_dsp_msg_write);
+ unsigned int dsp_ack_write = sdev->debug_box.offset +
+ offsetof(struct scratch_ipc_conf, sof_dsp_ack_write);
+ bool ipc_irq = false;
+ int dsp_msg, dsp_ack;
+ unsigned int status;
+
+ if (sdev->first_boot && sdev->fw_state != SOF_FW_BOOT_COMPLETE) {
+ acp_mailbox_read(sdev, sdev->dsp_box.offset, &status, sizeof(status));
+ if ((status & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, sdev->dsp_box.offset + sizeof(status),
+ true);
+ status = 0;
+ acp_mailbox_write(sdev, sdev->dsp_box.offset, &status, sizeof(status));
+ return IRQ_HANDLED;
+ }
+ snd_sof_ipc_msgs_rx(sdev);
+ acp_dsp_ipc_host_done(sdev);
+ return IRQ_HANDLED;
+ }
+
+ dsp_msg = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_msg_write);
+ if (dsp_msg) {
+ snd_sof_ipc_msgs_rx(sdev);
+ acp_dsp_ipc_host_done(sdev);
+ ipc_irq = true;
+ }
+
+ dsp_ack = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_ack_write);
+ if (dsp_ack) {
+ spin_lock_irq(&sdev->ipc_lock);
+ /* handle immediate reply from DSP core */
+ acp_dsp_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, 0);
+ /* set the done bit */
+ acp_dsp_ipc_dsp_done(sdev);
+ spin_unlock_irq(&sdev->ipc_lock);
+ ipc_irq = true;
+ }
+
+ acp_mailbox_read(sdev, sdev->debug_box.offset, &status, sizeof(u32));
+ if ((status & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, sdev->dsp_oops_offset, true);
+ status = 0;
+ acp_mailbox_write(sdev, sdev->debug_box.offset, &status, sizeof(status));
+ return IRQ_HANDLED;
+ }
+
+ if (desc->probe_reg_offset) {
+ u32 val;
+ u32 posn;
+
+ /* Probe register consists of two parts
+ * (0-30) bit has cumulative position value
+ * 31 bit is a synchronization flag between DSP and CPU
+ * for the position update
+ */
+ val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->probe_reg_offset);
+ if (val & PROBE_STATUS_BIT) {
+ posn = val & ~PROBE_STATUS_BIT;
+ if (adata->probe_stream) {
+ /* Probe related posn value is of 31 bits limited to 2GB
+ * once wrapped DSP won't send posn interrupt.
+ */
+ adata->probe_stream->cstream_posn = posn;
+ snd_compr_fragment_elapsed(adata->probe_stream->cstream);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->probe_reg_offset, posn);
+ ipc_irq = true;
+ }
+ }
+ }
+
+ if (!ipc_irq)
+ dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_NS(acp_sof_ipc_irq_thread, SND_SOC_SOF_AMD_COMMON);
+
+int acp_sof_ipc_msg_data(struct snd_sof_dev *sdev, struct snd_sof_pcm_stream *sps,
+ void *p, size_t sz)
+{
+ unsigned int offset = sdev->dsp_box.offset;
+
+ if (!sps || !sdev->stream_box.size) {
+ acp_mailbox_read(sdev, offset, p, sz);
+ } else {
+ struct snd_pcm_substream *substream = sps->substream;
+ struct acp_dsp_stream *stream;
+
+ if (!substream || !substream->runtime)
+ return -ESTRPIPE;
+
+ stream = substream->runtime->private_data;
+
+ if (!stream)
+ return -ESTRPIPE;
+
+ acp_mailbox_read(sdev, stream->posn_offset, p, sz);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(acp_sof_ipc_msg_data, SND_SOC_SOF_AMD_COMMON);
+
+int acp_set_stream_data_offset(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ size_t posn_offset)
+{
+ struct snd_pcm_substream *substream = sps->substream;
+ struct acp_dsp_stream *stream = substream->runtime->private_data;
+
+ /* check for unaligned offset or overflow */
+ if (posn_offset > sdev->stream_box.size ||
+ posn_offset % sizeof(struct sof_ipc_stream_posn) != 0)
+ return -EINVAL;
+
+ stream->posn_offset = sdev->stream_box.offset + posn_offset;
+
+ dev_dbg(sdev->dev, "pcm: stream dir %d, posn mailbox offset is %zu",
+ substream->stream, stream->posn_offset);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(acp_set_stream_data_offset, SND_SOC_SOF_AMD_COMMON);
+
+int acp_sof_ipc_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+
+ return desc->sram_pte_offset;
+}
+EXPORT_SYMBOL_NS(acp_sof_ipc_get_mailbox_offset, SND_SOC_SOF_AMD_COMMON);
+
+int acp_sof_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return 0;
+}
+EXPORT_SYMBOL_NS(acp_sof_ipc_get_window_offset, SND_SOC_SOF_AMD_COMMON);
+
+MODULE_DESCRIPTION("AMD ACP sof-ipc driver");
diff --git a/sound/soc/sof/amd/acp-loader.c b/sound/soc/sof/amd/acp-loader.c
new file mode 100644
index 000000000..a427673cf
--- /dev/null
+++ b/sound/soc/sof/amd/acp-loader.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2021, 2023 Advanced Micro Devices, Inc.
+//
+// Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+
+/*
+ * Hardware interface for ACP DSP Firmware binaries loader
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "../ops.h"
+#include "acp-dsp-offset.h"
+#include "acp.h"
+
+#define FW_BIN 0
+#define FW_DATA_BIN 1
+
+#define FW_BIN_PTE_OFFSET 0x00
+#define FW_DATA_BIN_PTE_OFFSET 0x08
+
+#define ACP_DSP_RUN 0x00
+
+int acp_dsp_block_read(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
+ u32 offset, void *dest, size_t size)
+{
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ switch (blk_type) {
+ case SOF_FW_BLK_TYPE_SRAM:
+ offset = offset - desc->sram_pte_offset;
+ memcpy_from_scratch(sdev, offset, dest, size);
+ break;
+ default:
+ dev_err(sdev->dev, "bad blk type 0x%x\n", blk_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(acp_dsp_block_read, SND_SOC_SOF_AMD_COMMON);
+
+int acp_dsp_block_write(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
+ u32 offset, void *src, size_t size)
+{
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ struct acp_dev_data *adata;
+ void *dest;
+ u32 dma_size, page_count;
+ unsigned int size_fw;
+
+ adata = sdev->pdata->hw_pdata;
+
+ switch (blk_type) {
+ case SOF_FW_BLK_TYPE_IRAM:
+ if (!adata->bin_buf) {
+ size_fw = sdev->basefw.fw->size;
+ page_count = PAGE_ALIGN(size_fw) >> PAGE_SHIFT;
+ dma_size = page_count * ACP_PAGE_SIZE;
+ adata->bin_buf = dma_alloc_coherent(&pci->dev, dma_size,
+ &adata->sha_dma_addr,
+ GFP_ATOMIC);
+ if (!adata->bin_buf)
+ return -ENOMEM;
+ }
+ adata->fw_bin_size = size + offset;
+ dest = adata->bin_buf + offset;
+ break;
+ case SOF_FW_BLK_TYPE_DRAM:
+ if (!adata->data_buf) {
+ adata->data_buf = dma_alloc_coherent(&pci->dev,
+ ACP_DEFAULT_DRAM_LENGTH,
+ &adata->dma_addr,
+ GFP_ATOMIC);
+ if (!adata->data_buf)
+ return -ENOMEM;
+ }
+ dest = adata->data_buf + offset;
+ adata->fw_data_bin_size = size + offset;
+ break;
+ case SOF_FW_BLK_TYPE_SRAM:
+ offset = offset - desc->sram_pte_offset;
+ memcpy_to_scratch(sdev, offset, src, size);
+ return 0;
+ default:
+ dev_err(sdev->dev, "bad blk type 0x%x\n", blk_type);
+ return -EINVAL;
+ }
+
+ memcpy(dest, src, size);
+ return 0;
+}
+EXPORT_SYMBOL_NS(acp_dsp_block_write, SND_SOC_SOF_AMD_COMMON);
+
+int acp_get_bar_index(struct snd_sof_dev *sdev, u32 type)
+{
+ return type;
+}
+EXPORT_SYMBOL_NS(acp_get_bar_index, SND_SOC_SOF_AMD_COMMON);
+
+static void configure_pte_for_fw_loading(int type, int num_pages, struct acp_dev_data *adata)
+{
+ struct snd_sof_dev *sdev = adata->dev;
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ unsigned int low, high;
+ dma_addr_t addr;
+ u16 page_idx;
+ u32 offset;
+
+ switch (type) {
+ case FW_BIN:
+ offset = FW_BIN_PTE_OFFSET;
+ addr = adata->sha_dma_addr;
+ break;
+ case FW_DATA_BIN:
+ offset = adata->fw_bin_page_count * 8;
+ addr = adata->dma_addr;
+ break;
+ default:
+ dev_err(sdev->dev, "Invalid data type %x\n", type);
+ return;
+ }
+
+ /* Group Enable */
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_BASE_ADDR_GRP_1,
+ desc->sram_pte_offset | BIT(31));
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1,
+ PAGE_SIZE_4K_ENABLE);
+
+ for (page_idx = 0; page_idx < num_pages; page_idx++) {
+ low = lower_32_bits(addr);
+ high = upper_32_bits(addr);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + offset, low);
+ high |= BIT(31);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + offset + 4, high);
+ offset += 8;
+ addr += PAGE_SIZE;
+ }
+
+ /* Flush ATU Cache after PTE Update */
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_CTRL, ACP_ATU_CACHE_INVALID);
+}
+
+/* pre fw run operations */
+int acp_dsp_pre_fw_run(struct snd_sof_dev *sdev)
+{
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ struct acp_dev_data *adata;
+ unsigned int src_addr, size_fw;
+ u32 page_count, dma_size;
+ int ret;
+
+ adata = sdev->pdata->hw_pdata;
+
+ if (adata->signed_fw_image)
+ size_fw = adata->fw_bin_size - ACP_FIRMWARE_SIGNATURE;
+ else
+ size_fw = adata->fw_bin_size;
+
+ page_count = PAGE_ALIGN(size_fw) >> PAGE_SHIFT;
+ adata->fw_bin_page_count = page_count;
+
+ configure_pte_for_fw_loading(FW_BIN, page_count, adata);
+ ret = configure_and_run_sha_dma(adata, adata->bin_buf, ACP_SYSTEM_MEMORY_WINDOW,
+ ACP_IRAM_BASE_ADDRESS, size_fw);
+ if (ret < 0) {
+ dev_err(sdev->dev, "SHA DMA transfer failed status: %d\n", ret);
+ return ret;
+ }
+ configure_pte_for_fw_loading(FW_DATA_BIN, ACP_DRAM_PAGE_COUNT, adata);
+
+ src_addr = ACP_SYSTEM_MEMORY_WINDOW + page_count * ACP_PAGE_SIZE;
+ ret = configure_and_run_dma(adata, src_addr, ACP_DATA_RAM_BASE_ADDRESS,
+ adata->fw_data_bin_size);
+ if (ret < 0) {
+ dev_err(sdev->dev, "acp dma configuration failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = acp_dma_status(adata, 0);
+ if (ret < 0)
+ dev_err(sdev->dev, "acp dma transfer status: %d\n", ret);
+
+ if (desc->rev > 3) {
+ /* Cache Window enable */
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP0_CACHE_OFFSET0, desc->sram_pte_offset);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP0_CACHE_SIZE0, SRAM1_SIZE | BIT(31));
+ }
+
+ /* Free memory once DMA is complete */
+ dma_size = (PAGE_ALIGN(sdev->basefw.fw->size) >> PAGE_SHIFT) * ACP_PAGE_SIZE;
+ dma_free_coherent(&pci->dev, dma_size, adata->bin_buf, adata->sha_dma_addr);
+ dma_free_coherent(&pci->dev, ACP_DEFAULT_DRAM_LENGTH, adata->data_buf, adata->dma_addr);
+ adata->bin_buf = NULL;
+ adata->data_buf = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL_NS(acp_dsp_pre_fw_run, SND_SOC_SOF_AMD_COMMON);
+
+int acp_sof_dsp_run(struct snd_sof_dev *sdev)
+{
+ struct acp_dev_data *adata = sdev->pdata->hw_pdata;
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ int val;
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP0_RUNSTALL, ACP_DSP_RUN);
+ val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DSP0_RUNSTALL);
+ dev_dbg(sdev->dev, "ACP_DSP0_RUNSTALL : 0x%0x\n", val);
+
+ /* Some platforms won't support fusion DSP,keep offset zero for no support */
+ if (desc->fusion_dsp_offset && adata->enable_fw_debug) {
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->fusion_dsp_offset, ACP_DSP_RUN);
+ val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->fusion_dsp_offset);
+ dev_dbg(sdev->dev, "ACP_DSP0_FUSION_RUNSTALL : 0x%0x\n", val);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_NS(acp_sof_dsp_run, SND_SOC_SOF_AMD_COMMON);
+
+int acp_sof_load_signed_firmware(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ struct acp_dev_data *adata = plat_data->hw_pdata;
+ int ret;
+
+ ret = request_firmware(&sdev->basefw.fw, adata->fw_code_bin, sdev->dev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "sof signed firmware code bin is missing\n");
+ return ret;
+ } else {
+ dev_dbg(sdev->dev, "request_firmware %s successful\n", adata->fw_code_bin);
+ }
+ ret = snd_sof_dsp_block_write(sdev, SOF_FW_BLK_TYPE_IRAM, 0,
+ (void *)sdev->basefw.fw->data, sdev->basefw.fw->size);
+
+ ret = request_firmware(&adata->fw_dbin, adata->fw_data_bin, sdev->dev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "sof signed firmware data bin is missing\n");
+ return ret;
+
+ } else {
+ dev_dbg(sdev->dev, "request_firmware %s successful\n", adata->fw_data_bin);
+ }
+
+ ret = snd_sof_dsp_block_write(sdev, SOF_FW_BLK_TYPE_DRAM, 0,
+ (void *)adata->fw_dbin->data, adata->fw_dbin->size);
+ return ret;
+}
+EXPORT_SYMBOL_NS(acp_sof_load_signed_firmware, SND_SOC_SOF_AMD_COMMON);
diff --git a/sound/soc/sof/amd/acp-pcm.c b/sound/soc/sof/amd/acp-pcm.c
new file mode 100644
index 000000000..0828245bb
--- /dev/null
+++ b/sound/soc/sof/amd/acp-pcm.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2021 Advanced Micro Devices, Inc.
+//
+// Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+
+/*
+ * PCM interface for generic AMD audio ACP DSP block
+ */
+#include <sound/pcm_params.h>
+
+#include "../ops.h"
+#include "acp.h"
+#include "acp-dsp-offset.h"
+
+int acp_pcm_hw_params(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct acp_dsp_stream *stream = runtime->private_data;
+ unsigned int buf_offset, index;
+ u32 size;
+ int ret;
+
+ size = runtime->dma_bytes;
+ stream->num_pages = PFN_UP(runtime->dma_bytes);
+ stream->dmab = substream->runtime->dma_buffer_p;
+
+ ret = acp_dsp_stream_config(sdev, stream);
+ if (ret < 0) {
+ dev_err(sdev->dev, "stream configuration failed\n");
+ return ret;
+ }
+
+ platform_params->use_phy_address = true;
+ platform_params->phy_addr = stream->reg_offset;
+ platform_params->stream_tag = stream->stream_tag;
+ platform_params->cont_update_posn = 1;
+
+ /* write buffer size of stream in scratch memory */
+
+ buf_offset = sdev->debug_box.offset +
+ offsetof(struct scratch_reg_conf, buf_size);
+ index = stream->stream_tag - 1;
+ buf_offset = buf_offset + index * 4;
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + buf_offset, size);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(acp_pcm_hw_params, SND_SOC_SOF_AMD_COMMON);
+
+int acp_pcm_open(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream)
+{
+ struct acp_dsp_stream *stream;
+
+ stream = acp_dsp_stream_get(sdev, 0);
+ if (!stream)
+ return -ENODEV;
+
+ substream->runtime->private_data = stream;
+ stream->substream = substream;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(acp_pcm_open, SND_SOC_SOF_AMD_COMMON);
+
+int acp_pcm_close(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream)
+{
+ struct acp_dsp_stream *stream;
+
+ stream = substream->runtime->private_data;
+ if (!stream) {
+ dev_err(sdev->dev, "No open stream\n");
+ return -EINVAL;
+ }
+
+ stream->substream = NULL;
+ substream->runtime->private_data = NULL;
+
+ return acp_dsp_stream_put(sdev, stream);
+}
+EXPORT_SYMBOL_NS(acp_pcm_close, SND_SOC_SOF_AMD_COMMON);
+
+snd_pcm_uframes_t acp_pcm_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_component *scomp = sdev->component;
+ struct snd_sof_pcm_stream *stream;
+ struct sof_ipc_stream_posn posn;
+ struct snd_sof_pcm *spcm;
+ snd_pcm_uframes_t pos;
+ int ret;
+
+ spcm = snd_sof_find_spcm_dai(scomp, rtd);
+ if (!spcm) {
+ dev_warn_ratelimited(sdev->dev, "warn: can't find PCM with DAI ID %d\n",
+ rtd->dai_link->id);
+ return 0;
+ }
+
+ stream = &spcm->stream[substream->stream];
+ ret = snd_sof_ipc_msg_data(sdev, stream, &posn, sizeof(posn));
+ if (ret < 0) {
+ dev_warn(sdev->dev, "failed to read stream position: %d\n", ret);
+ return 0;
+ }
+
+ memcpy(&stream->posn, &posn, sizeof(posn));
+ pos = spcm->stream[substream->stream].posn.host_posn;
+ pos = bytes_to_frames(substream->runtime, pos);
+
+ return pos;
+}
+EXPORT_SYMBOL_NS(acp_pcm_pointer, SND_SOC_SOF_AMD_COMMON);
diff --git a/sound/soc/sof/amd/acp-probes.c b/sound/soc/sof/amd/acp-probes.c
new file mode 100644
index 000000000..778cf1a8b
--- /dev/null
+++ b/sound/soc/sof/amd/acp-probes.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2023 Advanced Micro Devices, Inc.
+//
+// Authors: V Sujith Kumar Reddy <Vsujithkumar.Reddy@amd.com>
+
+/*
+ * Probe interface for generic AMD audio ACP DSP block
+ */
+
+#include <linux/module.h>
+#include <sound/soc.h>
+#include "../sof-priv.h"
+#include "../sof-client-probes.h"
+#include "../sof-client.h"
+#include "../ops.h"
+#include "acp.h"
+#include "acp-dsp-offset.h"
+
+static int acp_probes_compr_startup(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai, u32 *stream_id)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ struct acp_dsp_stream *stream;
+ struct acp_dev_data *adata;
+
+ adata = sdev->pdata->hw_pdata;
+ stream = acp_dsp_stream_get(sdev, 0);
+ if (!stream)
+ return -ENODEV;
+
+ stream->cstream = cstream;
+ cstream->runtime->private_data = stream;
+
+ adata->probe_stream = stream;
+ *stream_id = stream->stream_tag;
+
+ return 0;
+}
+
+static int acp_probes_compr_shutdown(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ struct acp_dsp_stream *stream = cstream->runtime->private_data;
+ struct acp_dev_data *adata;
+ int ret;
+
+ ret = acp_dsp_stream_put(sdev, stream);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to release probe compress stream\n");
+ return ret;
+ }
+
+ adata = sdev->pdata->hw_pdata;
+ stream->cstream = NULL;
+ cstream->runtime->private_data = NULL;
+ adata->probe_stream = NULL;
+
+ return 0;
+}
+
+static int acp_probes_compr_set_params(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ struct acp_dsp_stream *stream = cstream->runtime->private_data;
+ unsigned int buf_offset, index;
+ u32 size;
+ int ret;
+
+ stream->dmab = cstream->runtime->dma_buffer_p;
+ stream->num_pages = PFN_UP(cstream->runtime->dma_bytes);
+ size = cstream->runtime->buffer_size;
+
+ ret = acp_dsp_stream_config(sdev, stream);
+ if (ret < 0) {
+ acp_dsp_stream_put(sdev, stream);
+ return ret;
+ }
+
+ /* write buffer size of stream in scratch memory */
+
+ buf_offset = sdev->debug_box.offset +
+ offsetof(struct scratch_reg_conf, buf_size);
+ index = stream->stream_tag - 1;
+ buf_offset = buf_offset + index * 4;
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + buf_offset, size);
+
+ return 0;
+}
+
+static int acp_probes_compr_trigger(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ /* Nothing to do here, as it is a mandatory callback just defined */
+ return 0;
+}
+
+static int acp_probes_compr_pointer(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp,
+ struct snd_soc_dai *dai)
+{
+ struct acp_dsp_stream *stream = cstream->runtime->private_data;
+ struct snd_soc_pcm_stream *pstream;
+
+ pstream = &dai->driver->capture;
+ tstamp->copied_total = stream->cstream_posn;
+ tstamp->sampling_rate = snd_pcm_rate_bit_to_rate(pstream->rates);
+
+ return 0;
+}
+
+/* SOF client implementation */
+static const struct sof_probes_host_ops acp_probes_ops = {
+ .startup = acp_probes_compr_startup,
+ .shutdown = acp_probes_compr_shutdown,
+ .set_params = acp_probes_compr_set_params,
+ .trigger = acp_probes_compr_trigger,
+ .pointer = acp_probes_compr_pointer,
+};
+
+int acp_probes_register(struct snd_sof_dev *sdev)
+{
+ return sof_client_dev_register(sdev, "acp-probes", 0, &acp_probes_ops,
+ sizeof(acp_probes_ops));
+}
+EXPORT_SYMBOL_NS(acp_probes_register, SND_SOC_SOF_AMD_COMMON);
+
+void acp_probes_unregister(struct snd_sof_dev *sdev)
+{
+ sof_client_dev_unregister(sdev, "acp-probes", 0);
+}
+EXPORT_SYMBOL_NS(acp_probes_unregister, SND_SOC_SOF_AMD_COMMON);
+
+MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
+
diff --git a/sound/soc/sof/amd/acp-stream.c b/sound/soc/sof/amd/acp-stream.c
new file mode 100644
index 000000000..6f40ef7ba
--- /dev/null
+++ b/sound/soc/sof/amd/acp-stream.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2021 Advanced Micro Devices, Inc.
+//
+// Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+
+/*
+ * Hardware interface for generic AMD audio DSP ACP IP
+ */
+
+#include "../ops.h"
+#include "acp-dsp-offset.h"
+#include "acp.h"
+
+#define PTE_GRP1_OFFSET 0x00000000
+#define PTE_GRP2_OFFSET 0x00800000
+#define PTE_GRP3_OFFSET 0x01000000
+#define PTE_GRP4_OFFSET 0x01800000
+#define PTE_GRP5_OFFSET 0x02000000
+#define PTE_GRP6_OFFSET 0x02800000
+#define PTE_GRP7_OFFSET 0x03000000
+#define PTE_GRP8_OFFSET 0x03800000
+
+int acp_dsp_stream_config(struct snd_sof_dev *sdev, struct acp_dsp_stream *stream)
+{
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ unsigned int pte_reg, pte_size, phy_addr_offset, index;
+ int stream_tag = stream->stream_tag;
+ u32 low, high, offset, reg_val;
+ dma_addr_t addr;
+ int page_idx;
+
+ switch (stream_tag) {
+ case 1:
+ pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_1;
+ pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1;
+ offset = offsetof(struct scratch_reg_conf, grp1_pte);
+ stream->reg_offset = PTE_GRP1_OFFSET;
+ break;
+ case 2:
+ pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_2;
+ pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_2;
+ offset = offsetof(struct scratch_reg_conf, grp2_pte);
+ stream->reg_offset = PTE_GRP2_OFFSET;
+ break;
+ case 3:
+ pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_3;
+ pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_3;
+ offset = offsetof(struct scratch_reg_conf, grp3_pte);
+ stream->reg_offset = PTE_GRP3_OFFSET;
+ break;
+ case 4:
+ pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_4;
+ pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_4;
+ offset = offsetof(struct scratch_reg_conf, grp4_pte);
+ stream->reg_offset = PTE_GRP4_OFFSET;
+ break;
+ case 5:
+ pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_5;
+ pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_5;
+ offset = offsetof(struct scratch_reg_conf, grp5_pte);
+ stream->reg_offset = PTE_GRP5_OFFSET;
+ break;
+ case 6:
+ pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_6;
+ pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_6;
+ offset = offsetof(struct scratch_reg_conf, grp6_pte);
+ stream->reg_offset = PTE_GRP6_OFFSET;
+ break;
+ case 7:
+ pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_7;
+ pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_7;
+ offset = offsetof(struct scratch_reg_conf, grp7_pte);
+ stream->reg_offset = PTE_GRP7_OFFSET;
+ break;
+ case 8:
+ pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_8;
+ pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_8;
+ offset = offsetof(struct scratch_reg_conf, grp8_pte);
+ stream->reg_offset = PTE_GRP8_OFFSET;
+ break;
+ default:
+ dev_err(sdev->dev, "Invalid stream tag %d\n", stream_tag);
+ return -EINVAL;
+ }
+
+ /* write phy_addr in scratch memory */
+
+ phy_addr_offset = sdev->debug_box.offset +
+ offsetof(struct scratch_reg_conf, reg_offset);
+ index = stream_tag - 1;
+ phy_addr_offset = phy_addr_offset + index * 4;
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 +
+ phy_addr_offset, stream->reg_offset);
+
+ /* Group Enable */
+ offset = offset + sdev->debug_box.offset;
+ reg_val = desc->sram_pte_offset + offset;
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, pte_reg, reg_val | BIT(31));
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, pte_size, PAGE_SIZE_4K_ENABLE);
+
+ for (page_idx = 0; page_idx < stream->num_pages; page_idx++) {
+ addr = snd_sgbuf_get_addr(stream->dmab, page_idx * PAGE_SIZE);
+
+ /* Load the low address of page int ACP SRAM through SRBM */
+ low = lower_32_bits(addr);
+ high = upper_32_bits(addr);
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + offset, low);
+
+ high |= BIT(31);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + offset + 4, high);
+ /* Move to next physically contiguous page */
+ offset += 8;
+ }
+
+ /* Flush ATU Cache after PTE Update */
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_CTRL, ACP_ATU_CACHE_INVALID);
+
+ return 0;
+}
+
+struct acp_dsp_stream *acp_dsp_stream_get(struct snd_sof_dev *sdev, int tag)
+{
+ struct acp_dev_data *adata = sdev->pdata->hw_pdata;
+ struct acp_dsp_stream *stream = adata->stream_buf;
+ int i;
+
+ for (i = 0; i < ACP_MAX_STREAM; i++, stream++) {
+ if (stream->active)
+ continue;
+
+ /* return stream if tag not specified*/
+ if (!tag) {
+ stream->active = 1;
+ return stream;
+ }
+
+ /* check if this is the requested stream tag */
+ if (stream->stream_tag == tag) {
+ stream->active = 1;
+ return stream;
+ }
+ }
+
+ dev_err(sdev->dev, "stream %d active or no inactive stream\n", tag);
+ return NULL;
+}
+EXPORT_SYMBOL_NS(acp_dsp_stream_get, SND_SOC_SOF_AMD_COMMON);
+
+int acp_dsp_stream_put(struct snd_sof_dev *sdev,
+ struct acp_dsp_stream *acp_stream)
+{
+ struct acp_dev_data *adata = sdev->pdata->hw_pdata;
+ struct acp_dsp_stream *stream = adata->stream_buf;
+ int i;
+
+ /* Free an active stream */
+ for (i = 0; i < ACP_MAX_STREAM; i++, stream++) {
+ if (stream == acp_stream) {
+ stream->active = 0;
+ return 0;
+ }
+ }
+
+ dev_err(sdev->dev, "Cannot find active stream tag %d\n", acp_stream->stream_tag);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_NS(acp_dsp_stream_put, SND_SOC_SOF_AMD_COMMON);
+
+int acp_dsp_stream_init(struct snd_sof_dev *sdev)
+{
+ struct acp_dev_data *adata = sdev->pdata->hw_pdata;
+ int i;
+
+ for (i = 0; i < ACP_MAX_STREAM; i++) {
+ adata->stream_buf[i].sdev = sdev;
+ adata->stream_buf[i].active = 0;
+ adata->stream_buf[i].stream_tag = i + 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_NS(acp_dsp_stream_init, SND_SOC_SOF_AMD_COMMON);
diff --git a/sound/soc/sof/amd/acp-trace.c b/sound/soc/sof/amd/acp-trace.c
new file mode 100644
index 000000000..c9482b27c
--- /dev/null
+++ b/sound/soc/sof/amd/acp-trace.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2021 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Authors: Vishnuvardhanrao Ravuapati <vishnuvardhanrao.ravulapati@amd.com>
+// V Sujith Kumar Reddy <Vsujithkumar.Reddy@amd.com>
+
+/*This file support Host TRACE Logger driver callback for SOF FW */
+
+#include "acp.h"
+
+#define ACP_LOGGER_STREAM 8
+#define NUM_PAGES 16
+
+int acp_sof_trace_release(struct snd_sof_dev *sdev)
+{
+ struct acp_dsp_stream *stream;
+ struct acp_dev_data *adata;
+ int ret;
+
+ adata = sdev->pdata->hw_pdata;
+ stream = adata->dtrace_stream;
+ ret = acp_dsp_stream_put(sdev, stream);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to release trace stream\n");
+ return ret;
+ }
+
+ adata->dtrace_stream = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_NS(acp_sof_trace_release, SND_SOC_SOF_AMD_COMMON);
+
+int acp_sof_trace_init(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
+ struct sof_ipc_dma_trace_params_ext *dtrace_params)
+{
+ struct acp_dsp_stream *stream;
+ struct acp_dev_data *adata;
+ int ret;
+
+ adata = sdev->pdata->hw_pdata;
+ stream = acp_dsp_stream_get(sdev, ACP_LOGGER_STREAM);
+ if (!stream)
+ return -ENODEV;
+
+ stream->dmab = dmab;
+ stream->num_pages = NUM_PAGES;
+
+ ret = acp_dsp_stream_config(sdev, stream);
+ if (ret < 0) {
+ acp_dsp_stream_put(sdev, stream);
+ return ret;
+ }
+
+ adata->dtrace_stream = stream;
+ dtrace_params->stream_tag = stream->stream_tag;
+ dtrace_params->buffer.phy_addr = stream->reg_offset;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(acp_sof_trace_init, SND_SOC_SOF_AMD_COMMON);
diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
new file mode 100644
index 000000000..19a801908
--- /dev/null
+++ b/sound/soc/sof/amd/acp.c
@@ -0,0 +1,596 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2021, 2023 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Authors: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
+// Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+
+/*
+ * Hardware interface for generic AMD ACP processor
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "../ops.h"
+#include "acp.h"
+#include "acp-dsp-offset.h"
+
+#define SECURED_FIRMWARE 1
+
+static bool enable_fw_debug;
+module_param(enable_fw_debug, bool, 0444);
+MODULE_PARM_DESC(enable_fw_debug, "Enable Firmware debug");
+
+const struct dmi_system_id acp_sof_quirk_table[] = {
+ {
+ /* Valve Jupiter device */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Galileo"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Sephiroth"),
+ },
+ .driver_data = (void *)SECURED_FIRMWARE,
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(acp_sof_quirk_table);
+
+static int smn_write(struct pci_dev *dev, u32 smn_addr, u32 data)
+{
+ pci_write_config_dword(dev, 0x60, smn_addr);
+ pci_write_config_dword(dev, 0x64, data);
+
+ return 0;
+}
+
+static int smn_read(struct pci_dev *dev, u32 smn_addr)
+{
+ u32 data = 0;
+
+ pci_write_config_dword(dev, 0x60, smn_addr);
+ pci_read_config_dword(dev, 0x64, &data);
+
+ return data;
+}
+
+static void init_dma_descriptor(struct acp_dev_data *adata)
+{
+ struct snd_sof_dev *sdev = adata->dev;
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ unsigned int addr;
+
+ addr = desc->sram_pte_offset + sdev->debug_box.offset +
+ offsetof(struct scratch_reg_conf, dma_desc);
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_BASE_ADDR, addr);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_MAX_NUM_DSCR, ACP_MAX_DESC_CNT);
+}
+
+static void configure_dma_descriptor(struct acp_dev_data *adata, unsigned short idx,
+ struct dma_descriptor *dscr_info)
+{
+ struct snd_sof_dev *sdev = adata->dev;
+ unsigned int offset;
+
+ offset = ACP_SCRATCH_REG_0 + sdev->debug_box.offset +
+ offsetof(struct scratch_reg_conf, dma_desc) +
+ idx * sizeof(struct dma_descriptor);
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset, dscr_info->src_addr);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x4, dscr_info->dest_addr);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x8, dscr_info->tx_cnt.u32_all);
+}
+
+static int config_dma_channel(struct acp_dev_data *adata, unsigned int ch,
+ unsigned int idx, unsigned int dscr_count)
+{
+ struct snd_sof_dev *sdev = adata->dev;
+ unsigned int val, status;
+ int ret;
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32),
+ ACP_DMA_CH_RST | ACP_DMA_CH_GRACEFUL_RST_EN);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_RST_STS, val,
+ val & (1 << ch), ACP_REG_POLL_INTERVAL,
+ ACP_REG_POLL_TIMEOUT_US);
+ if (ret < 0) {
+ status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_ERROR_STATUS);
+ val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_ERR_STS_0 + ch * sizeof(u32));
+
+ dev_err(sdev->dev, "ACP_DMA_ERR_STS :0x%x ACP_ERROR_STATUS :0x%x\n", val, status);
+ return ret;
+ }
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, (ACP_DMA_CNTL_0 + ch * sizeof(u32)), 0);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_CNT_0 + ch * sizeof(u32), dscr_count);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_STRT_IDX_0 + ch * sizeof(u32), idx);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_PRIO_0 + ch * sizeof(u32), 0);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32), ACP_DMA_CH_RUN);
+
+ return ret;
+}
+
+static int acpbus_dma_start(struct acp_dev_data *adata, unsigned int ch,
+ unsigned int dscr_count, struct dma_descriptor *dscr_info)
+{
+ struct snd_sof_dev *sdev = adata->dev;
+ int ret;
+ u16 dscr;
+
+ if (!dscr_info || !dscr_count)
+ return -EINVAL;
+
+ for (dscr = 0; dscr < dscr_count; dscr++)
+ configure_dma_descriptor(adata, dscr, dscr_info++);
+
+ ret = config_dma_channel(adata, ch, 0, dscr_count);
+ if (ret < 0)
+ dev_err(sdev->dev, "config dma ch failed:%d\n", ret);
+
+ return ret;
+}
+
+int configure_and_run_dma(struct acp_dev_data *adata, unsigned int src_addr,
+ unsigned int dest_addr, int dsp_data_size)
+{
+ struct snd_sof_dev *sdev = adata->dev;
+ unsigned int desc_count, index;
+ int ret;
+
+ for (desc_count = 0; desc_count < ACP_MAX_DESC && dsp_data_size >= 0;
+ desc_count++, dsp_data_size -= ACP_PAGE_SIZE) {
+ adata->dscr_info[desc_count].src_addr = src_addr + desc_count * ACP_PAGE_SIZE;
+ adata->dscr_info[desc_count].dest_addr = dest_addr + desc_count * ACP_PAGE_SIZE;
+ adata->dscr_info[desc_count].tx_cnt.bits.count = ACP_PAGE_SIZE;
+ if (dsp_data_size < ACP_PAGE_SIZE)
+ adata->dscr_info[desc_count].tx_cnt.bits.count = dsp_data_size;
+ }
+
+ ret = acpbus_dma_start(adata, 0, desc_count, adata->dscr_info);
+ if (ret)
+ dev_err(sdev->dev, "acpbus_dma_start failed\n");
+
+ /* Clear descriptor array */
+ for (index = 0; index < desc_count; index++)
+ memset(&adata->dscr_info[index], 0x00, sizeof(struct dma_descriptor));
+
+ return ret;
+}
+
+/*
+ * psp_mbox_ready- function to poll ready bit of psp mbox
+ * @adata: acp device data
+ * @ack: bool variable to check ready bit status or psp ack
+ */
+
+static int psp_mbox_ready(struct acp_dev_data *adata, bool ack)
+{
+ struct snd_sof_dev *sdev = adata->dev;
+ int ret;
+ u32 data;
+
+ ret = read_poll_timeout(smn_read, data, data & MBOX_READY_MASK, MBOX_DELAY_US,
+ ACP_PSP_TIMEOUT_US, false, adata->smn_dev, MP0_C2PMSG_114_REG);
+ if (!ret)
+ return 0;
+
+ dev_err(sdev->dev, "PSP error status %x\n", data & MBOX_STATUS_MASK);
+
+ if (ack)
+ return -ETIMEDOUT;
+
+ return -EBUSY;
+}
+
+/*
+ * psp_send_cmd - function to send psp command over mbox
+ * @adata: acp device data
+ * @cmd: non zero integer value for command type
+ */
+
+static int psp_send_cmd(struct acp_dev_data *adata, int cmd)
+{
+ struct snd_sof_dev *sdev = adata->dev;
+ int ret;
+ u32 data;
+
+ if (!cmd)
+ return -EINVAL;
+
+ /* Get a non-zero Doorbell value from PSP */
+ ret = read_poll_timeout(smn_read, data, data, MBOX_DELAY_US, ACP_PSP_TIMEOUT_US, false,
+ adata->smn_dev, MP0_C2PMSG_73_REG);
+
+ if (ret) {
+ dev_err(sdev->dev, "Failed to get Doorbell from MBOX %x\n", MP0_C2PMSG_73_REG);
+ return ret;
+ }
+
+ /* Check if PSP is ready for new command */
+ ret = psp_mbox_ready(adata, 0);
+ if (ret)
+ return ret;
+
+ smn_write(adata->smn_dev, MP0_C2PMSG_114_REG, cmd);
+
+ /* Ring the Doorbell for PSP */
+ smn_write(adata->smn_dev, MP0_C2PMSG_73_REG, data);
+
+ /* Check MBOX ready as PSP ack */
+ ret = psp_mbox_ready(adata, 1);
+
+ return ret;
+}
+
+int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr,
+ unsigned int start_addr, unsigned int dest_addr,
+ unsigned int image_length)
+{
+ struct snd_sof_dev *sdev = adata->dev;
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ unsigned int tx_count, fw_qualifier, val;
+ int ret;
+
+ if (!image_addr) {
+ dev_err(sdev->dev, "SHA DMA image address is NULL\n");
+ return -EINVAL;
+ }
+
+ val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD);
+ if (val & ACP_SHA_RUN) {
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RESET);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD_STS,
+ val, val & ACP_SHA_RESET,
+ ACP_REG_POLL_INTERVAL,
+ ACP_REG_POLL_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "SHA DMA Failed to Reset\n");
+ return ret;
+ }
+ }
+
+ if (adata->signed_fw_image)
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_INCLUDE_HDR, ACP_SHA_HEADER);
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_DESTINATION_ADDR, dest_addr);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_MSG_LENGTH, image_length);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RUN);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_TRANSFER_BYTE_CNT,
+ tx_count, tx_count == image_length,
+ ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "SHA DMA Failed to Transfer Length %x\n", tx_count);
+ return ret;
+ }
+
+ /* psp_send_cmd only required for renoir platform (rev - 3) */
+ if (desc->rev == 3) {
+ ret = psp_send_cmd(adata, MBOX_ACP_SHA_DMA_COMMAND);
+ if (ret)
+ return ret;
+ }
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DSP_FW_QUALIFIER,
+ fw_qualifier, fw_qualifier & DSP_FW_RUN_ENABLE,
+ ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "PSP validation failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int acp_dma_status(struct acp_dev_data *adata, unsigned char ch)
+{
+ struct snd_sof_dev *sdev = adata->dev;
+ unsigned int val;
+ int ret = 0;
+
+ val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32));
+ if (val & ACP_DMA_CH_RUN) {
+ ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_STS, val, !val,
+ ACP_REG_POLL_INTERVAL,
+ ACP_DMA_COMPLETE_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "DMA_CHANNEL %d status timeout\n", ch);
+ }
+
+ return ret;
+}
+
+void memcpy_from_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *dst, size_t bytes)
+{
+ unsigned int reg_offset = offset + ACP_SCRATCH_REG_0;
+ int i, j;
+
+ for (i = 0, j = 0; i < bytes; i = i + 4, j++)
+ dst[j] = snd_sof_dsp_read(sdev, ACP_DSP_BAR, reg_offset + i);
+}
+
+void memcpy_to_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *src, size_t bytes)
+{
+ unsigned int reg_offset = offset + ACP_SCRATCH_REG_0;
+ int i, j;
+
+ for (i = 0, j = 0; i < bytes; i = i + 4, j++)
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, reg_offset + i, src[j]);
+}
+
+static int acp_memory_init(struct snd_sof_dev *sdev)
+{
+ struct acp_dev_data *adata = sdev->pdata->hw_pdata;
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+
+ snd_sof_dsp_update_bits(sdev, ACP_DSP_BAR, desc->dsp_intr_base + DSP_SW_INTR_CNTL_OFFSET,
+ ACP_DSP_INTR_EN_MASK, ACP_DSP_INTR_EN_MASK);
+ init_dma_descriptor(adata);
+
+ return 0;
+}
+
+static irqreturn_t acp_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ unsigned int count = ACP_HW_SEM_RETRY_COUNT;
+
+ while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) {
+ /* Wait until acquired HW Semaphore lock or timeout */
+ count--;
+ if (!count) {
+ dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
+ return IRQ_NONE;
+ }
+ }
+
+ sof_ops(sdev)->irq_thread(irq, sdev);
+ /* Unlock or Release HW Semaphore */
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
+
+ return IRQ_HANDLED;
+};
+
+static irqreturn_t acp_irq_handler(int irq, void *dev_id)
+{
+ struct snd_sof_dev *sdev = dev_id;
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ unsigned int base = desc->dsp_intr_base;
+ unsigned int val;
+
+ val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
+ if (val & ACP_DSP_TO_HOST_IRQ) {
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET,
+ ACP_DSP_TO_HOST_IRQ);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static int acp_power_on(struct snd_sof_dev *sdev)
+{
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ unsigned int base = desc->pgfsm_base;
+ unsigned int val;
+ int ret;
+
+ val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET);
+
+ if (val == ACP_POWERED_ON)
+ return 0;
+
+ if (val & ACP_PGFSM_STATUS_MASK)
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET,
+ ACP_PGFSM_CNTL_POWER_ON_MASK);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val,
+ !val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "timeout in ACP_PGFSM_STATUS read\n");
+
+ return ret;
+}
+
+static int acp_reset(struct snd_sof_dev *sdev)
+{
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ unsigned int val;
+ int ret;
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_ASSERT_RESET);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val,
+ val & ACP_SOFT_RESET_DONE_MASK,
+ ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "timeout asserting reset\n");
+ return ret;
+ }
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_RELEASE_RESET);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val,
+ ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "timeout in releasing reset\n");
+
+ if (desc->acp_clkmux_sel)
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_clkmux_sel, ACP_CLOCK_ACLK);
+
+ if (desc->ext_intr_enb)
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_enb, 0x01);
+
+ return ret;
+}
+
+static int acp_init(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ /* power on */
+ ret = acp_power_on(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "ACP power on failed\n");
+ return ret;
+ }
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x01);
+ /* Reset */
+ return acp_reset(sdev);
+}
+
+int amd_sof_acp_suspend(struct snd_sof_dev *sdev, u32 target_state)
+{
+ int ret;
+
+ ret = acp_reset(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "ACP Reset failed\n");
+ return ret;
+ }
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x00);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(amd_sof_acp_suspend, SND_SOC_SOF_AMD_COMMON);
+
+int amd_sof_acp_resume(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ ret = acp_init(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "ACP Init failed\n");
+ return ret;
+ }
+ return acp_memory_init(sdev);
+}
+EXPORT_SYMBOL_NS(amd_sof_acp_resume, SND_SOC_SOF_AMD_COMMON);
+
+int amd_sof_acp_probe(struct snd_sof_dev *sdev)
+{
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ struct acp_dev_data *adata;
+ const struct sof_amd_acp_desc *chip;
+ const struct dmi_system_id *dmi_id;
+ unsigned int addr;
+ int ret;
+
+ chip = get_chip_info(sdev->pdata);
+ if (!chip) {
+ dev_err(sdev->dev, "no such device supported, chip id:%x\n", pci->device);
+ return -EIO;
+ }
+ adata = devm_kzalloc(sdev->dev, sizeof(struct acp_dev_data),
+ GFP_KERNEL);
+ if (!adata)
+ return -ENOMEM;
+
+ adata->dev = sdev;
+ adata->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec",
+ PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(adata->dmic_dev)) {
+ dev_err(sdev->dev, "failed to register platform for dmic codec\n");
+ return PTR_ERR(adata->dmic_dev);
+ }
+ addr = pci_resource_start(pci, ACP_DSP_BAR);
+ sdev->bar[ACP_DSP_BAR] = devm_ioremap(sdev->dev, addr, pci_resource_len(pci, ACP_DSP_BAR));
+ if (!sdev->bar[ACP_DSP_BAR]) {
+ dev_err(sdev->dev, "ioremap error\n");
+ ret = -ENXIO;
+ goto unregister_dev;
+ }
+
+ pci_set_master(pci);
+
+ sdev->pdata->hw_pdata = adata;
+ adata->smn_dev = pci_get_device(PCI_VENDOR_ID_AMD, chip->host_bridge_id, NULL);
+ if (!adata->smn_dev) {
+ dev_err(sdev->dev, "Failed to get host bridge device\n");
+ ret = -ENODEV;
+ goto unregister_dev;
+ }
+
+ sdev->ipc_irq = pci->irq;
+ ret = request_threaded_irq(sdev->ipc_irq, acp_irq_handler, acp_irq_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ goto free_smn_dev;
+ }
+
+ ret = acp_init(sdev);
+ if (ret < 0)
+ goto free_ipc_irq;
+
+ sdev->dsp_box.offset = 0;
+ sdev->dsp_box.size = BOX_SIZE_512;
+
+ sdev->host_box.offset = sdev->dsp_box.offset + sdev->dsp_box.size;
+ sdev->host_box.size = BOX_SIZE_512;
+
+ sdev->debug_box.offset = sdev->host_box.offset + sdev->host_box.size;
+ sdev->debug_box.size = BOX_SIZE_1024;
+
+ adata->signed_fw_image = false;
+ dmi_id = dmi_first_match(acp_sof_quirk_table);
+ if (dmi_id && dmi_id->driver_data) {
+ adata->fw_code_bin = kasprintf(GFP_KERNEL, "%s/sof-%s-code.bin",
+ plat_data->fw_filename_prefix,
+ chip->name);
+ adata->fw_data_bin = kasprintf(GFP_KERNEL, "%s/sof-%s-data.bin",
+ plat_data->fw_filename_prefix,
+ chip->name);
+ adata->signed_fw_image = dmi_id->driver_data;
+
+ dev_dbg(sdev->dev, "fw_code_bin:%s, fw_data_bin:%s\n", adata->fw_code_bin,
+ adata->fw_data_bin);
+ }
+ adata->enable_fw_debug = enable_fw_debug;
+ acp_memory_init(sdev);
+
+ acp_dsp_stream_init(sdev);
+
+ return 0;
+
+free_ipc_irq:
+ free_irq(sdev->ipc_irq, sdev);
+free_smn_dev:
+ pci_dev_put(adata->smn_dev);
+unregister_dev:
+ platform_device_unregister(adata->dmic_dev);
+ return ret;
+}
+EXPORT_SYMBOL_NS(amd_sof_acp_probe, SND_SOC_SOF_AMD_COMMON);
+
+int amd_sof_acp_remove(struct snd_sof_dev *sdev)
+{
+ struct acp_dev_data *adata = sdev->pdata->hw_pdata;
+
+ if (adata->smn_dev)
+ pci_dev_put(adata->smn_dev);
+
+ if (sdev->ipc_irq)
+ free_irq(sdev->ipc_irq, sdev);
+
+ if (adata->dmic_dev)
+ platform_device_unregister(adata->dmic_dev);
+
+ return acp_reset(sdev);
+}
+EXPORT_SYMBOL_NS(amd_sof_acp_remove, SND_SOC_SOF_AMD_COMMON);
+
+MODULE_DESCRIPTION("AMD ACP sof driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h
new file mode 100644
index 000000000..4dcceb764
--- /dev/null
+++ b/sound/soc/sof/amd/acp.h
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2021, 2023 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Author: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+ */
+
+#ifndef __SOF_AMD_ACP_H
+#define __SOF_AMD_ACP_H
+
+#include <linux/dmi.h>
+
+#include "../sof-priv.h"
+#include "../sof-audio.h"
+
+#define ACP_MAX_STREAM 8
+
+#define ACP_DSP_BAR 0
+
+#define ACP_HW_SEM_RETRY_COUNT 10000
+#define ACP_REG_POLL_INTERVAL 500
+#define ACP_REG_POLL_TIMEOUT_US 2000
+#define ACP_DMA_COMPLETE_TIMEOUT_US 5000
+
+#define ACP_PGFSM_CNTL_POWER_ON_MASK 0x01
+#define ACP_PGFSM_STATUS_MASK 0x03
+#define ACP_POWERED_ON 0x00
+#define ACP_ASSERT_RESET 0x01
+#define ACP_RELEASE_RESET 0x00
+#define ACP_SOFT_RESET_DONE_MASK 0x00010001
+
+#define ACP_DSP_INTR_EN_MASK 0x00000001
+#define ACP3X_SRAM_PTE_OFFSET 0x02050000
+#define ACP5X_SRAM_PTE_OFFSET 0x02050000
+#define ACP6X_SRAM_PTE_OFFSET 0x03800000
+#define PAGE_SIZE_4K_ENABLE 0x2
+#define ACP_PAGE_SIZE 0x1000
+#define ACP_DMA_CH_RUN 0x02
+#define ACP_MAX_DESC_CNT 0x02
+#define DSP_FW_RUN_ENABLE 0x01
+#define ACP_SHA_RUN 0x01
+#define ACP_SHA_RESET 0x02
+#define ACP_SHA_HEADER 0x01
+#define ACP_DMA_CH_RST 0x01
+#define ACP_DMA_CH_GRACEFUL_RST_EN 0x10
+#define ACP_ATU_CACHE_INVALID 0x01
+#define ACP_MAX_DESC 128
+#define ACPBUS_REG_BASE_OFFSET ACP_DMA_CNTL_0
+
+#define ACP_DEFAULT_DRAM_LENGTH 0x00080000
+#define ACP3X_SCRATCH_MEMORY_ADDRESS 0x02050000
+#define ACP_SYSTEM_MEMORY_WINDOW 0x4000000
+#define ACP_IRAM_BASE_ADDRESS 0x000000
+#define ACP_DATA_RAM_BASE_ADDRESS 0x01000000
+#define ACP_DRAM_PAGE_COUNT 128
+
+#define ACP_DSP_TO_HOST_IRQ 0x04
+
+#define ACP_RN_PCI_ID 0x01
+#define ACP_VANGOGH_PCI_ID 0x50
+#define ACP_RMB_PCI_ID 0x6F
+
+#define HOST_BRIDGE_CZN 0x1630
+#define HOST_BRIDGE_VGH 0x1645
+#define HOST_BRIDGE_RMB 0x14B5
+#define ACP_SHA_STAT 0x8000
+#define ACP_PSP_TIMEOUT_US 1000000
+#define ACP_EXT_INTR_ERROR_STAT 0x20000000
+#define MP0_C2PMSG_114_REG 0x3810AC8
+#define MP0_C2PMSG_73_REG 0x3810A24
+#define MBOX_ACP_SHA_DMA_COMMAND 0x70000
+#define MBOX_DELAY_US 1000
+#define MBOX_READY_MASK 0x80000000
+#define MBOX_STATUS_MASK 0xFFFF
+
+#define BOX_SIZE_512 0x200
+#define BOX_SIZE_1024 0x400
+
+#define EXCEPT_MAX_HDR_SIZE 0x400
+#define AMD_STACK_DUMP_SIZE 32
+
+#define SRAM1_SIZE 0x13A000
+#define PROBE_STATUS_BIT BIT(31)
+
+#define ACP_FIRMWARE_SIGNATURE 0x100
+
+enum clock_source {
+ ACP_CLOCK_96M = 0,
+ ACP_CLOCK_48M,
+ ACP_CLOCK_24M,
+ ACP_CLOCK_ACLK,
+ ACP_CLOCK_MCLK,
+};
+
+struct acp_atu_grp_pte {
+ u32 low;
+ u32 high;
+};
+
+union dma_tx_cnt {
+ struct {
+ unsigned int count : 19;
+ unsigned int reserved : 12;
+ unsigned ioc : 1;
+ } bitfields, bits;
+ unsigned int u32_all;
+ signed int i32_all;
+};
+
+struct dma_descriptor {
+ unsigned int src_addr;
+ unsigned int dest_addr;
+ union dma_tx_cnt tx_cnt;
+ unsigned int reserved;
+};
+
+/* Scratch memory structure for communication b/w host and dsp */
+struct scratch_ipc_conf {
+ /* Debug memory */
+ u8 sof_debug_box[1024];
+ /* Exception memory*/
+ u8 sof_except_box[1024];
+ /* Stream buffer */
+ u8 sof_stream_box[1024];
+ /* Trace buffer */
+ u8 sof_trace_box[1024];
+ /* Host msg flag */
+ u32 sof_host_msg_write;
+ /* Host ack flag*/
+ u32 sof_host_ack_write;
+ /* DSP msg flag */
+ u32 sof_dsp_msg_write;
+ /* Dsp ack flag */
+ u32 sof_dsp_ack_write;
+};
+
+struct scratch_reg_conf {
+ struct scratch_ipc_conf info;
+ struct acp_atu_grp_pte grp1_pte[16];
+ struct acp_atu_grp_pte grp2_pte[16];
+ struct acp_atu_grp_pte grp3_pte[16];
+ struct acp_atu_grp_pte grp4_pte[16];
+ struct acp_atu_grp_pte grp5_pte[16];
+ struct acp_atu_grp_pte grp6_pte[16];
+ struct acp_atu_grp_pte grp7_pte[16];
+ struct acp_atu_grp_pte grp8_pte[16];
+ struct dma_descriptor dma_desc[64];
+ unsigned int reg_offset[8];
+ unsigned int buf_size[8];
+ u8 acp_tx_fifo_buf[256];
+ u8 acp_rx_fifo_buf[256];
+ unsigned int reserve[];
+};
+
+struct acp_dsp_stream {
+ struct list_head list;
+ struct snd_sof_dev *sdev;
+ struct snd_pcm_substream *substream;
+ struct snd_dma_buffer *dmab;
+ int num_pages;
+ int stream_tag;
+ int active;
+ unsigned int reg_offset;
+ size_t posn_offset;
+ struct snd_compr_stream *cstream;
+ u64 cstream_posn;
+};
+
+struct sof_amd_acp_desc {
+ unsigned int rev;
+ const char *name;
+ unsigned int host_bridge_id;
+ u32 pgfsm_base;
+ u32 ext_intr_enb;
+ u32 ext_intr_stat;
+ u32 dsp_intr_base;
+ u32 sram_pte_offset;
+ u32 hw_semaphore_offset;
+ u32 acp_clkmux_sel;
+ u32 fusion_dsp_offset;
+ u32 probe_reg_offset;
+};
+
+/* Common device data struct for ACP devices */
+struct acp_dev_data {
+ struct snd_sof_dev *dev;
+ const struct firmware *fw_dbin;
+ /* DMIC device */
+ struct platform_device *dmic_dev;
+ unsigned int fw_bin_size;
+ unsigned int fw_data_bin_size;
+ const char *fw_code_bin;
+ const char *fw_data_bin;
+ u32 fw_bin_page_count;
+ dma_addr_t sha_dma_addr;
+ u8 *bin_buf;
+ dma_addr_t dma_addr;
+ u8 *data_buf;
+ bool signed_fw_image;
+ struct dma_descriptor dscr_info[ACP_MAX_DESC];
+ struct acp_dsp_stream stream_buf[ACP_MAX_STREAM];
+ struct acp_dsp_stream *dtrace_stream;
+ struct pci_dev *smn_dev;
+ struct acp_dsp_stream *probe_stream;
+ bool enable_fw_debug;
+};
+
+void memcpy_to_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *src, size_t bytes);
+void memcpy_from_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *dst, size_t bytes);
+
+int acp_dma_status(struct acp_dev_data *adata, unsigned char ch);
+int configure_and_run_dma(struct acp_dev_data *adata, unsigned int src_addr,
+ unsigned int dest_addr, int dsp_data_size);
+int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr,
+ unsigned int start_addr, unsigned int dest_addr,
+ unsigned int image_length);
+
+/* ACP device probe/remove */
+int amd_sof_acp_probe(struct snd_sof_dev *sdev);
+int amd_sof_acp_remove(struct snd_sof_dev *sdev);
+
+/* DSP Loader callbacks */
+int acp_sof_dsp_run(struct snd_sof_dev *sdev);
+int acp_dsp_pre_fw_run(struct snd_sof_dev *sdev);
+int acp_sof_load_signed_firmware(struct snd_sof_dev *sdev);
+int acp_get_bar_index(struct snd_sof_dev *sdev, u32 type);
+
+/* Block IO callbacks */
+int acp_dsp_block_write(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
+ u32 offset, void *src, size_t size);
+int acp_dsp_block_read(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
+ u32 offset, void *dest, size_t size);
+
+/* IPC callbacks */
+irqreturn_t acp_sof_ipc_irq_thread(int irq, void *context);
+int acp_sof_ipc_msg_data(struct snd_sof_dev *sdev, struct snd_sof_pcm_stream *sps,
+ void *p, size_t sz);
+int acp_set_stream_data_offset(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ size_t posn_offset);
+int acp_sof_ipc_send_msg(struct snd_sof_dev *sdev,
+ struct snd_sof_ipc_msg *msg);
+int acp_sof_ipc_get_mailbox_offset(struct snd_sof_dev *sdev);
+int acp_sof_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id);
+void acp_mailbox_write(struct snd_sof_dev *sdev, u32 offset, void *message, size_t bytes);
+void acp_mailbox_read(struct snd_sof_dev *sdev, u32 offset, void *message, size_t bytes);
+
+/* ACP - DSP stream callbacks */
+int acp_dsp_stream_config(struct snd_sof_dev *sdev, struct acp_dsp_stream *stream);
+int acp_dsp_stream_init(struct snd_sof_dev *sdev);
+struct acp_dsp_stream *acp_dsp_stream_get(struct snd_sof_dev *sdev, int tag);
+int acp_dsp_stream_put(struct snd_sof_dev *sdev, struct acp_dsp_stream *acp_stream);
+
+/*
+ * DSP PCM Operations.
+ */
+int acp_pcm_open(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream);
+int acp_pcm_close(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream);
+int acp_pcm_hw_params(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params);
+snd_pcm_uframes_t acp_pcm_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+
+extern struct snd_sof_dsp_ops sof_acp_common_ops;
+
+extern struct snd_sof_dsp_ops sof_renoir_ops;
+int sof_renoir_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_vangogh_ops;
+int sof_vangogh_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_rembrandt_ops;
+int sof_rembrandt_ops_init(struct snd_sof_dev *sdev);
+
+struct snd_soc_acpi_mach *amd_sof_machine_select(struct snd_sof_dev *sdev);
+/* Machine configuration */
+int snd_amd_acp_find_config(struct pci_dev *pci);
+
+/* Trace */
+int acp_sof_trace_init(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
+ struct sof_ipc_dma_trace_params_ext *dtrace_params);
+int acp_sof_trace_release(struct snd_sof_dev *sdev);
+
+/* PM Callbacks */
+int amd_sof_acp_suspend(struct snd_sof_dev *sdev, u32 target_state);
+int amd_sof_acp_resume(struct snd_sof_dev *sdev);
+
+void amd_sof_ipc_dump(struct snd_sof_dev *sdev);
+void amd_sof_dump(struct snd_sof_dev *sdev, u32 flags);
+
+static inline const struct sof_amd_acp_desc *get_chip_info(struct snd_sof_pdata *pdata)
+{
+ const struct sof_dev_desc *desc = pdata->desc;
+
+ return desc->chip_info;
+}
+
+int acp_probes_register(struct snd_sof_dev *sdev);
+void acp_probes_unregister(struct snd_sof_dev *sdev);
+
+extern struct snd_soc_acpi_mach snd_soc_acpi_amd_vangogh_sof_machines[];
+extern const struct dmi_system_id acp_sof_quirk_table[];
+#endif
diff --git a/sound/soc/sof/amd/pci-rmb.c b/sound/soc/sof/amd/pci-rmb.c
new file mode 100644
index 000000000..a7ae76efc
--- /dev/null
+++ b/sound/soc/sof/amd/pci-rmb.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+
+/*.
+ * PCI interface for Rembrandt ACP device
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <sound/sof.h>
+#include <sound/soc-acpi.h>
+
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+#include "../../amd/mach-config.h"
+#include "acp.h"
+#include "acp-dsp-offset.h"
+
+#define ACP6x_REG_START 0x1240000
+#define ACP6x_REG_END 0x125C000
+#define ACP6X_FUTURE_REG_ACLK_0 0x1854
+
+static const struct sof_amd_acp_desc rembrandt_chip_info = {
+ .rev = 6,
+ .host_bridge_id = HOST_BRIDGE_RMB,
+ .pgfsm_base = ACP6X_PGFSM_BASE,
+ .ext_intr_stat = ACP6X_EXT_INTR_STAT,
+ .dsp_intr_base = ACP6X_DSP_SW_INTR_BASE,
+ .sram_pte_offset = ACP6X_SRAM_PTE_OFFSET,
+ .hw_semaphore_offset = ACP6X_AXI2DAGB_SEM_0,
+ .fusion_dsp_offset = ACP6X_DSP_FUSION_RUNSTALL,
+ .probe_reg_offset = ACP6X_FUTURE_REG_ACLK_0,
+};
+
+static const struct sof_dev_desc rembrandt_desc = {
+ .machines = snd_soc_acpi_amd_rmb_sof_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &rembrandt_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "amd/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "amd/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-rmb.ri",
+ },
+ .nocodec_tplg_filename = "sof-acp.tplg",
+ .ops = &sof_rembrandt_ops,
+ .ops_init = sof_rembrandt_ops_init,
+};
+
+static int acp_pci_rmb_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ unsigned int flag;
+
+ if (pci->revision != ACP_RMB_PCI_ID)
+ return -ENODEV;
+
+ flag = snd_amd_acp_find_config(pci);
+ if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC)
+ return -ENODEV;
+
+ return sof_pci_probe(pci, pci_id);
+};
+
+static void acp_pci_rmb_remove(struct pci_dev *pci)
+{
+ sof_pci_remove(pci);
+}
+
+/* PCI IDs */
+static const struct pci_device_id rmb_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, ACP_PCI_DEV_ID),
+ .driver_data = (unsigned long)&rembrandt_desc},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, rmb_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_amd_rmb_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rmb_pci_ids,
+ .probe = acp_pci_rmb_probe,
+ .remove = acp_pci_rmb_remove,
+};
+module_pci_driver(snd_sof_pci_amd_rmb_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/amd/pci-rn.c b/sound/soc/sof/amd/pci-rn.c
new file mode 100644
index 000000000..c72d5d8af
--- /dev/null
+++ b/sound/soc/sof/amd/pci-rn.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2021 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+
+/*
+ * PCI interface for Renoir ACP device
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <sound/sof.h>
+#include <sound/soc-acpi.h>
+
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+#include "../../amd/mach-config.h"
+#include "acp.h"
+#include "acp-dsp-offset.h"
+
+#define ACP3x_REG_START 0x1240000
+#define ACP3x_REG_END 0x125C000
+#define ACP3X_FUTURE_REG_ACLK_0 0x1860
+
+static const struct sof_amd_acp_desc renoir_chip_info = {
+ .rev = 3,
+ .host_bridge_id = HOST_BRIDGE_CZN,
+ .pgfsm_base = ACP3X_PGFSM_BASE,
+ .ext_intr_stat = ACP3X_EXT_INTR_STAT,
+ .dsp_intr_base = ACP3X_DSP_SW_INTR_BASE,
+ .sram_pte_offset = ACP3X_SRAM_PTE_OFFSET,
+ .hw_semaphore_offset = ACP3X_AXI2DAGB_SEM_0,
+ .acp_clkmux_sel = ACP3X_CLKMUX_SEL,
+ .probe_reg_offset = ACP3X_FUTURE_REG_ACLK_0,
+};
+
+static const struct sof_dev_desc renoir_desc = {
+ .machines = snd_soc_acpi_amd_sof_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &renoir_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "amd/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "amd/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-rn.ri",
+ },
+ .nocodec_tplg_filename = "sof-acp.tplg",
+ .ops = &sof_renoir_ops,
+ .ops_init = sof_renoir_ops_init,
+};
+
+static int acp_pci_rn_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ unsigned int flag;
+
+ if (pci->revision != ACP_RN_PCI_ID)
+ return -ENODEV;
+
+ flag = snd_amd_acp_find_config(pci);
+ if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC)
+ return -ENODEV;
+
+ return sof_pci_probe(pci, pci_id);
+};
+
+static void acp_pci_rn_remove(struct pci_dev *pci)
+{
+ return sof_pci_remove(pci);
+}
+
+/* PCI IDs */
+static const struct pci_device_id rn_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, ACP_PCI_DEV_ID),
+ .driver_data = (unsigned long)&renoir_desc},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, rn_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_amd_rn_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rn_pci_ids,
+ .probe = acp_pci_rn_probe,
+ .remove = acp_pci_rn_remove,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_amd_rn_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/amd/pci-vangogh.c b/sound/soc/sof/amd/pci-vangogh.c
new file mode 100644
index 000000000..d8be42fbc
--- /dev/null
+++ b/sound/soc/sof/amd/pci-vangogh.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2023 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Authors: Venkata Prasad Potturu <venkataprasad.potturu@amd.com>
+
+/*.
+ * PCI interface for Vangogh ACP device
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <sound/sof.h>
+#include <sound/soc-acpi.h>
+
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+#include "../../amd/mach-config.h"
+#include "acp.h"
+#include "acp-dsp-offset.h"
+
+#define ACP5X_FUTURE_REG_ACLK_0 0x1864
+
+static const struct sof_amd_acp_desc vangogh_chip_info = {
+ .rev = 5,
+ .name = "vangogh",
+ .host_bridge_id = HOST_BRIDGE_VGH,
+ .pgfsm_base = ACP5X_PGFSM_BASE,
+ .ext_intr_stat = ACP5X_EXT_INTR_STAT,
+ .dsp_intr_base = ACP5X_DSP_SW_INTR_BASE,
+ .sram_pte_offset = ACP5X_SRAM_PTE_OFFSET,
+ .hw_semaphore_offset = ACP5X_AXI2DAGB_SEM_0,
+ .acp_clkmux_sel = ACP5X_CLKMUX_SEL,
+ .probe_reg_offset = ACP5X_FUTURE_REG_ACLK_0,
+};
+
+static const struct sof_dev_desc vangogh_desc = {
+ .machines = snd_soc_acpi_amd_vangogh_sof_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &vangogh_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "amd/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "amd/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-vangogh.ri",
+ },
+ .nocodec_tplg_filename = "sof-acp.tplg",
+ .ops = &sof_vangogh_ops,
+ .ops_init = sof_vangogh_ops_init,
+};
+
+static int acp_pci_vgh_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ unsigned int flag;
+
+ if (pci->revision != ACP_VANGOGH_PCI_ID)
+ return -ENODEV;
+
+ flag = snd_amd_acp_find_config(pci);
+ if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC)
+ return -ENODEV;
+
+ return sof_pci_probe(pci, pci_id);
+};
+
+static void acp_pci_vgh_remove(struct pci_dev *pci)
+{
+ sof_pci_remove(pci);
+}
+
+/* PCI IDs */
+static const struct pci_device_id vgh_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, ACP_PCI_DEV_ID),
+ .driver_data = (unsigned long)&vangogh_desc},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, vgh_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_amd_vgh_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = vgh_pci_ids,
+ .probe = acp_pci_vgh_probe,
+ .remove = acp_pci_vgh_remove,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_amd_vgh_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/amd/rembrandt.c b/sound/soc/sof/amd/rembrandt.c
new file mode 100644
index 000000000..f1d1ba57a
--- /dev/null
+++ b/sound/soc/sof/amd/rembrandt.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Advanced Micro Devices, Inc.
+//
+// Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+
+/*
+ * Hardware interface for Audio DSP on Rembrandt platform
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+
+#include "../ops.h"
+#include "../sof-audio.h"
+#include "acp.h"
+#include "acp-dsp-offset.h"
+
+#define I2S_HS_INSTANCE 0
+#define I2S_BT_INSTANCE 1
+#define I2S_SP_INSTANCE 2
+#define PDM_DMIC_INSTANCE 3
+#define I2S_HS_VIRTUAL_INSTANCE 4
+
+static struct snd_soc_dai_driver rembrandt_sof_dai[] = {
+ [I2S_HS_INSTANCE] = {
+ .id = I2S_HS_INSTANCE,
+ .name = "acp-sof-hs",
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ /* Supporting only stereo for I2S HS controller capture */
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ },
+
+ [I2S_BT_INSTANCE] = {
+ .id = I2S_BT_INSTANCE,
+ .name = "acp-sof-bt",
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ /* Supporting only stereo for I2S BT controller capture */
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ },
+
+ [I2S_SP_INSTANCE] = {
+ .id = I2S_SP_INSTANCE,
+ .name = "acp-sof-sp",
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ /* Supporting only stereo for I2S SP controller capture */
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ },
+
+ [PDM_DMIC_INSTANCE] = {
+ .id = PDM_DMIC_INSTANCE,
+ .name = "acp-sof-dmic",
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 4,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ },
+
+ [I2S_HS_VIRTUAL_INSTANCE] = {
+ .id = I2S_HS_VIRTUAL_INSTANCE,
+ .name = "acp-sof-hs-virtual",
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ },
+};
+
+/* Rembrandt ops */
+struct snd_sof_dsp_ops sof_rembrandt_ops;
+EXPORT_SYMBOL_NS(sof_rembrandt_ops, SND_SOC_SOF_AMD_COMMON);
+
+int sof_rembrandt_ops_init(struct snd_sof_dev *sdev)
+{
+ /* common defaults */
+ memcpy(&sof_rembrandt_ops, &sof_acp_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ sof_rembrandt_ops.drv = rembrandt_sof_dai;
+ sof_rembrandt_ops.num_drv = ARRAY_SIZE(rembrandt_sof_dai);
+
+ return 0;
+}
+
+MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
+MODULE_DESCRIPTION("REMBRANDT SOF Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/renoir.c b/sound/soc/sof/amd/renoir.c
new file mode 100644
index 000000000..47b863f62
--- /dev/null
+++ b/sound/soc/sof/amd/renoir.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2021 Advanced Micro Devices, Inc.
+//
+// Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+
+/*
+ * Hardware interface for Audio DSP on Renoir platform
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+
+#include "../ops.h"
+#include "../sof-audio.h"
+#include "acp.h"
+#include "acp-dsp-offset.h"
+
+#define I2S_BT_INSTANCE 0
+#define I2S_SP_INSTANCE 1
+#define PDM_DMIC_INSTANCE 2
+#define I2S_SP_VIRTUAL_INSTANCE 3
+
+static struct snd_soc_dai_driver renoir_sof_dai[] = {
+ [I2S_BT_INSTANCE] = {
+ .id = I2S_BT_INSTANCE,
+ .name = "acp-sof-bt",
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ /* Supporting only stereo for I2S BT controller capture */
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ },
+
+ [I2S_SP_INSTANCE] = {
+ .id = I2S_SP_INSTANCE,
+ .name = "acp-sof-sp",
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ /* Supporting only stereo for I2S SP controller capture */
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ },
+
+ [PDM_DMIC_INSTANCE] = {
+ .id = PDM_DMIC_INSTANCE,
+ .name = "acp-sof-dmic",
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 4,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ },
+
+ [I2S_SP_VIRTUAL_INSTANCE] = {
+ .id = I2S_SP_VIRTUAL_INSTANCE,
+ .name = "acp-sof-sp-virtual",
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ },
+};
+
+/* Renoir ops */
+struct snd_sof_dsp_ops sof_renoir_ops;
+EXPORT_SYMBOL_NS(sof_renoir_ops, SND_SOC_SOF_AMD_COMMON);
+
+int sof_renoir_ops_init(struct snd_sof_dev *sdev)
+{
+ /* common defaults */
+ memcpy(&sof_renoir_ops, &sof_acp_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ sof_renoir_ops.drv = renoir_sof_dai;
+ sof_renoir_ops.num_drv = ARRAY_SIZE(renoir_sof_dai);
+
+ return 0;
+}
+
+MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
+MODULE_DESCRIPTION("RENOIR SOF Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/amd/vangogh.c b/sound/soc/sof/amd/vangogh.c
new file mode 100644
index 000000000..de15d21aa
--- /dev/null
+++ b/sound/soc/sof/amd/vangogh.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2023 Advanced Micro Devices, Inc.
+//
+// Authors: Venkata Prasad Potturu <venkataprasad.potturu@amd.com>
+
+/*
+ * Hardware interface for Audio DSP on Vangogh platform
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+
+#include "../ops.h"
+#include "../sof-audio.h"
+#include "acp.h"
+#include "acp-dsp-offset.h"
+
+#define I2S_HS_INSTANCE 0
+#define I2S_BT_INSTANCE 1
+#define I2S_SP_INSTANCE 2
+#define PDM_DMIC_INSTANCE 3
+#define I2S_HS_VIRTUAL_INSTANCE 4
+
+static struct snd_soc_dai_driver vangogh_sof_dai[] = {
+ [I2S_HS_INSTANCE] = {
+ .id = I2S_HS_INSTANCE,
+ .name = "acp-sof-hs",
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ /* Supporting only stereo for I2S HS controller capture */
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ },
+
+ [I2S_BT_INSTANCE] = {
+ .id = I2S_BT_INSTANCE,
+ .name = "acp-sof-bt",
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ /* Supporting only stereo for I2S BT controller capture */
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ },
+
+ [I2S_SP_INSTANCE] = {
+ .id = I2S_SP_INSTANCE,
+ .name = "acp-sof-sp",
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ /* Supporting only stereo for I2S SP controller capture */
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ },
+
+ [PDM_DMIC_INSTANCE] = {
+ .id = PDM_DMIC_INSTANCE,
+ .name = "acp-sof-dmic",
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 4,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ },
+
+ [I2S_HS_VIRTUAL_INSTANCE] = {
+ .id = I2S_HS_VIRTUAL_INSTANCE,
+ .name = "acp-sof-hs-virtual",
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ /* Supporting only stereo for I2S HS-Virtual controller capture */
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ },
+};
+
+/* Vangogh ops */
+struct snd_sof_dsp_ops sof_vangogh_ops;
+EXPORT_SYMBOL_NS(sof_vangogh_ops, SND_SOC_SOF_AMD_COMMON);
+
+int sof_vangogh_ops_init(struct snd_sof_dev *sdev)
+{
+ const struct dmi_system_id *dmi_id;
+
+ /* common defaults */
+ memcpy(&sof_vangogh_ops, &sof_acp_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ sof_vangogh_ops.drv = vangogh_sof_dai;
+ sof_vangogh_ops.num_drv = ARRAY_SIZE(vangogh_sof_dai);
+
+ dmi_id = dmi_first_match(acp_sof_quirk_table);
+ if (dmi_id && dmi_id->driver_data)
+ sof_vangogh_ops.load_firmware = acp_sof_load_signed_firmware;
+
+ return 0;
+}
+
+MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
+MODULE_DESCRIPTION("VANGOGH SOF Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/compress.c b/sound/soc/sof/compress.c
new file mode 100644
index 000000000..d7b044f33
--- /dev/null
+++ b/sound/soc/sof/compress.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright 2021 NXP
+//
+// Author: Daniel Baluta <daniel.baluta@nxp.com>
+
+#include <sound/soc.h>
+#include <sound/sof.h>
+#include <sound/compress_driver.h>
+#include "sof-audio.h"
+#include "sof-priv.h"
+#include "sof-utils.h"
+#include "ops.h"
+
+static void sof_set_transferred_bytes(struct sof_compr_stream *sstream,
+ u64 host_pos, u64 buffer_size)
+{
+ u64 prev_pos;
+ unsigned int copied;
+
+ div64_u64_rem(sstream->copied_total, buffer_size, &prev_pos);
+
+ if (host_pos < prev_pos)
+ copied = (buffer_size - prev_pos) + host_pos;
+ else
+ copied = host_pos - prev_pos;
+
+ sstream->copied_total += copied;
+}
+
+static void snd_sof_compr_fragment_elapsed_work(struct work_struct *work)
+{
+ struct snd_sof_pcm_stream *sps =
+ container_of(work, struct snd_sof_pcm_stream,
+ period_elapsed_work);
+
+ snd_compr_fragment_elapsed(sps->cstream);
+}
+
+void snd_sof_compr_init_elapsed_work(struct work_struct *work)
+{
+ INIT_WORK(work, snd_sof_compr_fragment_elapsed_work);
+}
+
+/*
+ * sof compr fragment elapse, this could be called in irq thread context
+ */
+void snd_sof_compr_fragment_elapsed(struct snd_compr_stream *cstream)
+{
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_compr_runtime *crtd;
+ struct snd_soc_component *component;
+ struct sof_compr_stream *sstream;
+ struct snd_sof_pcm *spcm;
+
+ if (!cstream)
+ return;
+
+ rtd = cstream->private_data;
+ crtd = cstream->runtime;
+ sstream = crtd->private_data;
+ component = snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm) {
+ dev_err(component->dev,
+ "fragment elapsed called for unknown stream!\n");
+ return;
+ }
+
+ sof_set_transferred_bytes(sstream, spcm->stream[cstream->direction].posn.host_posn,
+ crtd->buffer_size);
+
+ /* use the same workqueue-based solution as for PCM, cf. snd_sof_pcm_elapsed */
+ schedule_work(&spcm->stream[cstream->direction].period_elapsed_work);
+}
+
+static int create_page_table(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ unsigned char *dma_area, size_t size)
+{
+ struct snd_dma_buffer *dmab = cstream->runtime->dma_buffer_p;
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ int dir = cstream->direction;
+ struct snd_sof_pcm *spcm;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ return snd_sof_create_page_table(component->dev, dmab,
+ spcm->stream[dir].page_table.area, size);
+}
+
+static int sof_compr_open(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream)
+{
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ struct snd_compr_runtime *crtd = cstream->runtime;
+ struct sof_compr_stream *sstream;
+ struct snd_sof_pcm *spcm;
+ int dir;
+
+ sstream = kzalloc(sizeof(*sstream), GFP_KERNEL);
+ if (!sstream)
+ return -ENOMEM;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm) {
+ kfree(sstream);
+ return -EINVAL;
+ }
+
+ dir = cstream->direction;
+
+ if (spcm->stream[dir].cstream) {
+ kfree(sstream);
+ return -EBUSY;
+ }
+
+ spcm->stream[dir].cstream = cstream;
+ spcm->stream[dir].posn.host_posn = 0;
+ spcm->stream[dir].posn.dai_posn = 0;
+ spcm->prepared[dir] = false;
+
+ crtd->private_data = sstream;
+
+ return 0;
+}
+
+static int sof_compr_free(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct sof_compr_stream *sstream = cstream->runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ struct sof_ipc_stream stream;
+ struct snd_sof_pcm *spcm;
+ int ret = 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_FREE;
+ stream.comp_id = spcm->stream[cstream->direction].comp_id;
+
+ if (spcm->prepared[cstream->direction]) {
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, &stream, sizeof(stream));
+ if (!ret)
+ spcm->prepared[cstream->direction] = false;
+ }
+
+ cancel_work_sync(&spcm->stream[cstream->direction].period_elapsed_work);
+ spcm->stream[cstream->direction].cstream = NULL;
+ kfree(sstream);
+
+ return ret;
+}
+
+static int sof_compr_set_params(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream, struct snd_compr_params *params)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ struct snd_compr_runtime *crtd = cstream->runtime;
+ struct sof_ipc_pcm_params_reply ipc_params_reply;
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
+ struct sof_compr_stream *sstream;
+ struct sof_ipc_pcm_params *pcm;
+ struct snd_sof_pcm *spcm;
+ size_t ext_data_size;
+ int ret;
+
+ if (v->abi_version < SOF_ABI_VER(3, 22, 0)) {
+ dev_err(component->dev,
+ "Compress params not supported with FW ABI version %d:%d:%d\n",
+ SOF_ABI_VERSION_MAJOR(v->abi_version),
+ SOF_ABI_VERSION_MINOR(v->abi_version),
+ SOF_ABI_VERSION_PATCH(v->abi_version));
+ return -EINVAL;
+ }
+
+ sstream = crtd->private_data;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+
+ if (!spcm)
+ return -EINVAL;
+
+ ext_data_size = sizeof(params->codec);
+
+ if (sizeof(*pcm) + ext_data_size > sdev->ipc->max_payload_size)
+ return -EINVAL;
+
+ pcm = kzalloc(sizeof(*pcm) + ext_data_size, GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ cstream->dma_buffer.dev.type = SNDRV_DMA_TYPE_DEV_SG;
+ cstream->dma_buffer.dev.dev = sdev->dev;
+ ret = snd_compr_malloc_pages(cstream, crtd->buffer_size);
+ if (ret < 0)
+ goto out;
+
+ ret = create_page_table(component, cstream, crtd->dma_area, crtd->dma_bytes);
+ if (ret < 0)
+ goto out;
+
+ pcm->params.buffer.pages = PFN_UP(crtd->dma_bytes);
+ pcm->hdr.size = sizeof(*pcm) + ext_data_size;
+ pcm->hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_PARAMS;
+
+ pcm->comp_id = spcm->stream[cstream->direction].comp_id;
+ pcm->params.hdr.size = sizeof(pcm->params) + ext_data_size;
+ pcm->params.buffer.phy_addr = spcm->stream[cstream->direction].page_table.addr;
+ pcm->params.buffer.size = crtd->dma_bytes;
+ pcm->params.direction = cstream->direction;
+ pcm->params.channels = params->codec.ch_out;
+ pcm->params.rate = params->codec.sample_rate;
+ pcm->params.buffer_fmt = SOF_IPC_BUFFER_INTERLEAVED;
+ pcm->params.frame_fmt = SOF_IPC_FRAME_S32_LE;
+ pcm->params.sample_container_bytes =
+ snd_pcm_format_physical_width(SNDRV_PCM_FORMAT_S32) >> 3;
+ pcm->params.host_period_bytes = params->buffer.fragment_size;
+ pcm->params.ext_data_length = ext_data_size;
+
+ memcpy((u8 *)pcm->params.ext_data, &params->codec, ext_data_size);
+
+ ret = sof_ipc_tx_message(sdev->ipc, pcm, sizeof(*pcm) + ext_data_size,
+ &ipc_params_reply, sizeof(ipc_params_reply));
+ if (ret < 0) {
+ dev_err(component->dev, "error ipc failed\n");
+ goto out;
+ }
+
+ ret = snd_sof_set_stream_data_offset(sdev, &spcm->stream[cstream->direction],
+ ipc_params_reply.posn_offset);
+ if (ret < 0) {
+ dev_err(component->dev, "Invalid stream data offset for Compr %d\n",
+ spcm->pcm.pcm_id);
+ goto out;
+ }
+
+ sstream->sampling_rate = params->codec.sample_rate;
+ sstream->channels = params->codec.ch_out;
+ sstream->sample_container_bytes = pcm->params.sample_container_bytes;
+
+ spcm->prepared[cstream->direction] = true;
+
+out:
+ kfree(pcm);
+
+ return ret;
+}
+
+static int sof_compr_get_params(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream, struct snd_codec *params)
+{
+ /* TODO: we don't query the supported codecs for now, if the
+ * application asks for an unsupported codec the set_params() will fail.
+ */
+ return 0;
+}
+
+static int sof_compr_trigger(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream, int cmd)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ struct sof_ipc_stream stream;
+ struct snd_sof_pcm *spcm;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG;
+ stream.comp_id = spcm->stream[cstream->direction].comp_id;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_START;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_PAUSE;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_RELEASE;
+ break;
+ default:
+ dev_err(component->dev, "error: unhandled trigger cmd %d\n", cmd);
+ break;
+ }
+
+ return sof_ipc_tx_message_no_reply(sdev->ipc, &stream, sizeof(stream));
+}
+
+static int sof_compr_copy_playback(struct snd_compr_runtime *rtd,
+ char __user *buf, size_t count)
+{
+ void *ptr;
+ unsigned int offset, n;
+ int ret;
+
+ div_u64_rem(rtd->total_bytes_available, rtd->buffer_size, &offset);
+ ptr = rtd->dma_area + offset;
+ n = rtd->buffer_size - offset;
+
+ if (count < n) {
+ ret = copy_from_user(ptr, buf, count);
+ } else {
+ ret = copy_from_user(ptr, buf, n);
+ ret += copy_from_user(rtd->dma_area, buf + n, count - n);
+ }
+
+ return count - ret;
+}
+
+static int sof_compr_copy_capture(struct snd_compr_runtime *rtd,
+ char __user *buf, size_t count)
+{
+ void *ptr;
+ unsigned int offset, n;
+ int ret;
+
+ div_u64_rem(rtd->total_bytes_transferred, rtd->buffer_size, &offset);
+ ptr = rtd->dma_area + offset;
+ n = rtd->buffer_size - offset;
+
+ if (count < n) {
+ ret = copy_to_user(buf, ptr, count);
+ } else {
+ ret = copy_to_user(buf, ptr, n);
+ ret += copy_to_user(buf + n, rtd->dma_area, count - n);
+ }
+
+ return count - ret;
+}
+
+static int sof_compr_copy(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ char __user *buf, size_t count)
+{
+ struct snd_compr_runtime *rtd = cstream->runtime;
+
+ if (count > rtd->buffer_size)
+ count = rtd->buffer_size;
+
+ if (cstream->direction == SND_COMPRESS_PLAYBACK)
+ return sof_compr_copy_playback(rtd, buf, count);
+ else
+ return sof_compr_copy_capture(rtd, buf, count);
+}
+
+static int sof_compr_pointer(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp)
+{
+ struct snd_sof_pcm *spcm;
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ struct sof_compr_stream *sstream = cstream->runtime->private_data;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ tstamp->sampling_rate = sstream->sampling_rate;
+ tstamp->copied_total = sstream->copied_total;
+ tstamp->pcm_io_frames = div_u64(spcm->stream[cstream->direction].posn.dai_posn,
+ sstream->channels * sstream->sample_container_bytes);
+
+ return 0;
+}
+
+struct snd_compress_ops sof_compressed_ops = {
+ .open = sof_compr_open,
+ .free = sof_compr_free,
+ .set_params = sof_compr_set_params,
+ .get_params = sof_compr_get_params,
+ .trigger = sof_compr_trigger,
+ .pointer = sof_compr_pointer,
+ .copy = sof_compr_copy,
+};
+EXPORT_SYMBOL(sof_compressed_ops);
diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
new file mode 100644
index 000000000..75e13f4fd
--- /dev/null
+++ b/sound/soc/sof/control.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/* Mixer Controls */
+
+#include <linux/pm_runtime.h>
+#include <linux/leds.h>
+#include "sof-priv.h"
+#include "sof-audio.h"
+
+int snd_sof_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *sm = (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->volume_get)
+ return tplg_ops->control->volume_get(scontrol, ucontrol);
+
+ return 0;
+}
+
+int snd_sof_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *sm = (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->volume_put)
+ return tplg_ops->control->volume_put(scontrol, ucontrol);
+
+ return false;
+}
+
+int snd_sof_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_mixer_control *sm = (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ unsigned int channels = scontrol->num_channels;
+ int platform_max;
+
+ if (!sm->platform_max)
+ sm->platform_max = sm->max;
+ platform_max = sm->platform_max;
+
+ if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume"))
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ else
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+
+ uinfo->count = channels;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = platform_max - sm->min;
+ return 0;
+}
+
+int snd_sof_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *sm = (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->switch_get)
+ return tplg_ops->control->switch_get(scontrol, ucontrol);
+
+ return 0;
+}
+
+int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *sm = (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->switch_put)
+ return tplg_ops->control->switch_put(scontrol, ucontrol);
+
+ return false;
+}
+
+int snd_sof_enum_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_enum *se = (struct soc_enum *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = se->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->enum_get)
+ return tplg_ops->control->enum_get(scontrol, ucontrol);
+
+ return 0;
+}
+
+int snd_sof_enum_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_enum *se = (struct soc_enum *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = se->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->enum_put)
+ return tplg_ops->control->enum_put(scontrol, ucontrol);
+
+ return false;
+}
+
+int snd_sof_bytes_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_bytes_ext *be = (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->bytes_get)
+ return tplg_ops->control->bytes_get(scontrol, ucontrol);
+
+ return 0;
+}
+
+int snd_sof_bytes_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_bytes_ext *be = (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->bytes_put)
+ return tplg_ops->control->bytes_put(scontrol, ucontrol);
+
+ return 0;
+}
+
+int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size)
+{
+ struct soc_bytes_ext *be = (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ /* make sure we have at least a header */
+ if (size < sizeof(struct snd_ctl_tlv))
+ return -EINVAL;
+
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->bytes_ext_put)
+ return tplg_ops->control->bytes_ext_put(scontrol, binary_data, size);
+
+ return 0;
+}
+
+int snd_sof_bytes_ext_volatile_get(struct snd_kcontrol *kcontrol, unsigned int __user *binary_data,
+ unsigned int size)
+{
+ struct soc_bytes_ext *be = (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ int ret, err;
+
+ ret = pm_runtime_resume_and_get(scomp->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err_ratelimited(scomp->dev, "%s: failed to resume %d\n", __func__, ret);
+ return ret;
+ }
+
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->bytes_ext_volatile_get)
+ ret = tplg_ops->control->bytes_ext_volatile_get(scontrol, binary_data, size);
+
+ pm_runtime_mark_last_busy(scomp->dev);
+ err = pm_runtime_put_autosuspend(scomp->dev);
+ if (err < 0)
+ dev_err_ratelimited(scomp->dev, "%s: failed to idle %d\n", __func__, err);
+
+ return ret;
+}
+
+int snd_sof_bytes_ext_get(struct snd_kcontrol *kcontrol,
+ unsigned int __user *binary_data,
+ unsigned int size)
+{
+ struct soc_bytes_ext *be = (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->bytes_ext_get)
+ return tplg_ops->control->bytes_ext_get(scontrol, binary_data, size);
+
+ return 0;
+}
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
new file mode 100644
index 000000000..0938b259f
--- /dev/null
+++ b/sound/soc/sof/core.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <sound/sof.h>
+#include "sof-priv.h"
+#include "ops.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sof.h>
+
+/* see SOF_DBG_ flags */
+static int sof_core_debug = IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_FIRMWARE_TRACE);
+module_param_named(sof_debug, sof_core_debug, int, 0444);
+MODULE_PARM_DESC(sof_debug, "SOF core debug options (0x0 all off)");
+
+/* SOF defaults if not provided by the platform in ms */
+#define TIMEOUT_DEFAULT_IPC_MS 500
+#define TIMEOUT_DEFAULT_BOOT_MS 2000
+
+/**
+ * sof_debug_check_flag - check if a given flag(s) is set in sof_core_debug
+ * @mask: Flag or combination of flags to check
+ *
+ * Returns true if all bits set in mask is also set in sof_core_debug, otherwise
+ * false
+ */
+bool sof_debug_check_flag(int mask)
+{
+ if ((sof_core_debug & mask) == mask)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(sof_debug_check_flag);
+
+/*
+ * FW Panic/fault handling.
+ */
+
+struct sof_panic_msg {
+ u32 id;
+ const char *msg;
+};
+
+/* standard FW panic types */
+static const struct sof_panic_msg panic_msg[] = {
+ {SOF_IPC_PANIC_MEM, "out of memory"},
+ {SOF_IPC_PANIC_WORK, "work subsystem init failed"},
+ {SOF_IPC_PANIC_IPC, "IPC subsystem init failed"},
+ {SOF_IPC_PANIC_ARCH, "arch init failed"},
+ {SOF_IPC_PANIC_PLATFORM, "platform init failed"},
+ {SOF_IPC_PANIC_TASK, "scheduler init failed"},
+ {SOF_IPC_PANIC_EXCEPTION, "runtime exception"},
+ {SOF_IPC_PANIC_DEADLOCK, "deadlock"},
+ {SOF_IPC_PANIC_STACK, "stack overflow"},
+ {SOF_IPC_PANIC_IDLE, "can't enter idle"},
+ {SOF_IPC_PANIC_WFI, "invalid wait state"},
+ {SOF_IPC_PANIC_ASSERT, "assertion failed"},
+};
+
+/**
+ * sof_print_oops_and_stack - Handle the printing of DSP oops and stack trace
+ * @sdev: Pointer to the device's sdev
+ * @level: prink log level to use for the printing
+ * @panic_code: the panic code
+ * @tracep_code: tracepoint code
+ * @oops: Pointer to DSP specific oops data
+ * @panic_info: Pointer to the received panic information message
+ * @stack: Pointer to the call stack data
+ * @stack_words: Number of words in the stack data
+ *
+ * helper to be called from .dbg_dump callbacks. No error code is
+ * provided, it's left as an exercise for the caller of .dbg_dump
+ * (typically IPC or loader)
+ */
+void sof_print_oops_and_stack(struct snd_sof_dev *sdev, const char *level,
+ u32 panic_code, u32 tracep_code, void *oops,
+ struct sof_ipc_panic_info *panic_info,
+ void *stack, size_t stack_words)
+{
+ u32 code;
+ int i;
+
+ /* is firmware dead ? */
+ if ((panic_code & SOF_IPC_PANIC_MAGIC_MASK) != SOF_IPC_PANIC_MAGIC) {
+ dev_printk(level, sdev->dev, "unexpected fault %#010x trace %#010x\n",
+ panic_code, tracep_code);
+ return; /* no fault ? */
+ }
+
+ code = panic_code & (SOF_IPC_PANIC_MAGIC_MASK | SOF_IPC_PANIC_CODE_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(panic_msg); i++) {
+ if (panic_msg[i].id == code) {
+ dev_printk(level, sdev->dev, "reason: %s (%#x)\n",
+ panic_msg[i].msg, code & SOF_IPC_PANIC_CODE_MASK);
+ dev_printk(level, sdev->dev, "trace point: %#010x\n", tracep_code);
+ goto out;
+ }
+ }
+
+ /* unknown error */
+ dev_printk(level, sdev->dev, "unknown panic code: %#x\n",
+ code & SOF_IPC_PANIC_CODE_MASK);
+ dev_printk(level, sdev->dev, "trace point: %#010x\n", tracep_code);
+
+out:
+ dev_printk(level, sdev->dev, "panic at %s:%d\n", panic_info->filename,
+ panic_info->linenum);
+ sof_oops(sdev, level, oops);
+ sof_stack(sdev, level, oops, stack, stack_words);
+}
+EXPORT_SYMBOL(sof_print_oops_and_stack);
+
+/* Helper to manage DSP state */
+void sof_set_fw_state(struct snd_sof_dev *sdev, enum sof_fw_state new_state)
+{
+ if (sdev->fw_state == new_state)
+ return;
+
+ dev_dbg(sdev->dev, "fw_state change: %d -> %d\n", sdev->fw_state, new_state);
+ sdev->fw_state = new_state;
+
+ switch (new_state) {
+ case SOF_FW_BOOT_NOT_STARTED:
+ case SOF_FW_BOOT_COMPLETE:
+ case SOF_FW_CRASHED:
+ sof_client_fw_state_dispatcher(sdev);
+ fallthrough;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(sof_set_fw_state);
+
+/*
+ * FW Boot State Transition Diagram
+ *
+ * +----------------------------------------------------------------------+
+ * | |
+ * ------------------ ------------------ |
+ * | | | | |
+ * | BOOT_FAILED |<-------| READY_FAILED | |
+ * | |<--+ | | ------------------ |
+ * ------------------ | ------------------ | | |
+ * ^ | ^ | CRASHED |---+ |
+ * | | | | | | |
+ * (FW Boot Timeout) | (FW_READY FAIL) ------------------ | |
+ * | | | ^ | |
+ * | | | |(DSP Panic) | |
+ * ------------------ | | ------------------ | |
+ * | | | | | | | |
+ * | IN_PROGRESS |---------------+------------->| COMPLETE | | |
+ * | | (FW Boot OK) (FW_READY OK) | | | |
+ * ------------------ | ------------------ | |
+ * ^ | | | |
+ * | | | | |
+ * (FW Loading OK) | (System Suspend/Runtime Suspend)
+ * | | | | |
+ * | (FW Loading Fail) | | |
+ * ------------------ | ------------------ | | |
+ * | | | | |<-----+ | |
+ * | PREPARE |---+ | NOT_STARTED |<---------------------+ |
+ * | | | |<--------------------------+
+ * ------------------ ------------------
+ * | ^ | ^
+ * | | | |
+ * | +-----------------------+ |
+ * | (DSP Probe OK) |
+ * | |
+ * | |
+ * +------------------------------------+
+ * (System Suspend/Runtime Suspend)
+ */
+
+static int sof_probe_continue(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ int ret;
+
+ /* probe the DSP hardware */
+ ret = snd_sof_probe(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to probe DSP %d\n", ret);
+ goto probe_err;
+ }
+
+ sof_set_fw_state(sdev, SOF_FW_BOOT_PREPARE);
+
+ /* check machine info */
+ ret = sof_machine_check(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to get machine info %d\n",
+ ret);
+ goto dsp_err;
+ }
+
+ /* set up platform component driver */
+ snd_sof_new_platform_drv(sdev);
+
+ if (sdev->dspless_mode_selected) {
+ sof_set_fw_state(sdev, SOF_DSPLESS_MODE);
+ goto skip_dsp_init;
+ }
+
+ /* register any debug/trace capabilities */
+ ret = snd_sof_dbg_init(sdev);
+ if (ret < 0) {
+ /*
+ * debugfs issues are suppressed in snd_sof_dbg_init() since
+ * we cannot rely on debugfs
+ * here we trap errors due to memory allocation only.
+ */
+ dev_err(sdev->dev, "error: failed to init DSP trace/debug %d\n",
+ ret);
+ goto dbg_err;
+ }
+
+ /* init the IPC */
+ sdev->ipc = snd_sof_ipc_init(sdev);
+ if (!sdev->ipc) {
+ ret = -ENOMEM;
+ dev_err(sdev->dev, "error: failed to init DSP IPC %d\n", ret);
+ goto ipc_err;
+ }
+
+ /* load the firmware */
+ ret = snd_sof_load_firmware(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to load DSP firmware %d\n",
+ ret);
+ sof_set_fw_state(sdev, SOF_FW_BOOT_FAILED);
+ goto fw_load_err;
+ }
+
+ sof_set_fw_state(sdev, SOF_FW_BOOT_IN_PROGRESS);
+
+ /*
+ * Boot the firmware. The FW boot status will be modified
+ * in snd_sof_run_firmware() depending on the outcome.
+ */
+ ret = snd_sof_run_firmware(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to boot DSP firmware %d\n",
+ ret);
+ sof_set_fw_state(sdev, SOF_FW_BOOT_FAILED);
+ goto fw_run_err;
+ }
+
+ if (sof_debug_check_flag(SOF_DBG_ENABLE_TRACE)) {
+ sdev->fw_trace_is_supported = true;
+
+ /* init firmware tracing */
+ ret = sof_fw_trace_init(sdev);
+ if (ret < 0) {
+ /* non fatal */
+ dev_warn(sdev->dev, "failed to initialize firmware tracing %d\n",
+ ret);
+ }
+ } else {
+ dev_dbg(sdev->dev, "SOF firmware trace disabled\n");
+ }
+
+skip_dsp_init:
+ /* hereafter all FW boot flows are for PM reasons */
+ sdev->first_boot = false;
+
+ /* now register audio DSP platform driver and dai */
+ ret = devm_snd_soc_register_component(sdev->dev, &sdev->plat_drv,
+ sof_ops(sdev)->drv,
+ sof_ops(sdev)->num_drv);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to register DSP DAI driver %d\n", ret);
+ goto fw_trace_err;
+ }
+
+ ret = snd_sof_machine_register(sdev, plat_data);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to register machine driver %d\n", ret);
+ goto fw_trace_err;
+ }
+
+ ret = sof_register_clients(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to register clients %d\n", ret);
+ goto sof_machine_err;
+ }
+
+ /*
+ * Some platforms in SOF, ex: BYT, may not have their platform PM
+ * callbacks set. Increment the usage count so as to
+ * prevent the device from entering runtime suspend.
+ */
+ if (!sof_ops(sdev)->runtime_suspend || !sof_ops(sdev)->runtime_resume)
+ pm_runtime_get_noresume(sdev->dev);
+
+ if (plat_data->sof_probe_complete)
+ plat_data->sof_probe_complete(sdev->dev);
+
+ sdev->probe_completed = true;
+
+ return 0;
+
+sof_machine_err:
+ snd_sof_machine_unregister(sdev, plat_data);
+fw_trace_err:
+ sof_fw_trace_free(sdev);
+fw_run_err:
+ snd_sof_fw_unload(sdev);
+fw_load_err:
+ snd_sof_ipc_free(sdev);
+ipc_err:
+dbg_err:
+ snd_sof_free_debug(sdev);
+dsp_err:
+ snd_sof_remove(sdev);
+probe_err:
+ sof_ops_free(sdev);
+
+ /* all resources freed, update state to match */
+ sof_set_fw_state(sdev, SOF_FW_BOOT_NOT_STARTED);
+ sdev->first_boot = true;
+
+ return ret;
+}
+
+static void sof_probe_work(struct work_struct *work)
+{
+ struct snd_sof_dev *sdev =
+ container_of(work, struct snd_sof_dev, probe_work);
+ int ret;
+
+ ret = sof_probe_continue(sdev);
+ if (ret < 0) {
+ /* errors cannot be propagated, log */
+ dev_err(sdev->dev, "error: %s failed err: %d\n", __func__, ret);
+ }
+}
+
+int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data)
+{
+ struct snd_sof_dev *sdev;
+ int ret;
+
+ sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ /* initialize sof device */
+ sdev->dev = dev;
+
+ /* initialize default DSP power state */
+ sdev->dsp_power_state.state = SOF_DSP_PM_D0;
+
+ sdev->pdata = plat_data;
+ sdev->first_boot = true;
+ dev_set_drvdata(dev, sdev);
+
+ if (sof_core_debug)
+ dev_info(dev, "sof_debug value: %#x\n", sof_core_debug);
+
+ if (sof_debug_check_flag(SOF_DBG_DSPLESS_MODE)) {
+ if (plat_data->desc->dspless_mode_supported) {
+ dev_info(dev, "Switching to DSPless mode\n");
+ sdev->dspless_mode_selected = true;
+ } else {
+ dev_info(dev, "DSPless mode is not supported by the platform\n");
+ }
+ }
+
+ /* check IPC support */
+ if (!(BIT(plat_data->ipc_type) & plat_data->desc->ipc_supported_mask)) {
+ dev_err(dev, "ipc_type %d is not supported on this platform, mask is %#x\n",
+ plat_data->ipc_type, plat_data->desc->ipc_supported_mask);
+ return -EINVAL;
+ }
+
+ /* init ops, if necessary */
+ ret = sof_ops_init(sdev);
+ if (ret < 0)
+ return ret;
+
+ /* check all mandatory ops */
+ if (!sof_ops(sdev) || !sof_ops(sdev)->probe) {
+ sof_ops_free(sdev);
+ dev_err(dev, "missing mandatory ops\n");
+ return -EINVAL;
+ }
+
+ if (!sdev->dspless_mode_selected &&
+ (!sof_ops(sdev)->run || !sof_ops(sdev)->block_read ||
+ !sof_ops(sdev)->block_write || !sof_ops(sdev)->send_msg ||
+ !sof_ops(sdev)->load_firmware || !sof_ops(sdev)->ipc_msg_data)) {
+ sof_ops_free(sdev);
+ dev_err(dev, "missing mandatory DSP ops\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&sdev->pcm_list);
+ INIT_LIST_HEAD(&sdev->kcontrol_list);
+ INIT_LIST_HEAD(&sdev->widget_list);
+ INIT_LIST_HEAD(&sdev->pipeline_list);
+ INIT_LIST_HEAD(&sdev->dai_list);
+ INIT_LIST_HEAD(&sdev->dai_link_list);
+ INIT_LIST_HEAD(&sdev->route_list);
+ INIT_LIST_HEAD(&sdev->ipc_client_list);
+ INIT_LIST_HEAD(&sdev->ipc_rx_handler_list);
+ INIT_LIST_HEAD(&sdev->fw_state_handler_list);
+ spin_lock_init(&sdev->ipc_lock);
+ spin_lock_init(&sdev->hw_lock);
+ mutex_init(&sdev->power_state_access);
+ mutex_init(&sdev->ipc_client_mutex);
+ mutex_init(&sdev->client_event_handler_mutex);
+
+ /* set default timeouts if none provided */
+ if (plat_data->desc->ipc_timeout == 0)
+ sdev->ipc_timeout = TIMEOUT_DEFAULT_IPC_MS;
+ else
+ sdev->ipc_timeout = plat_data->desc->ipc_timeout;
+ if (plat_data->desc->boot_timeout == 0)
+ sdev->boot_timeout = TIMEOUT_DEFAULT_BOOT_MS;
+ else
+ sdev->boot_timeout = plat_data->desc->boot_timeout;
+
+ sof_set_fw_state(sdev, SOF_FW_BOOT_NOT_STARTED);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)) {
+ INIT_WORK(&sdev->probe_work, sof_probe_work);
+ schedule_work(&sdev->probe_work);
+ return 0;
+ }
+
+ return sof_probe_continue(sdev);
+}
+EXPORT_SYMBOL(snd_sof_device_probe);
+
+bool snd_sof_device_probe_completed(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+
+ return sdev->probe_completed;
+}
+EXPORT_SYMBOL(snd_sof_device_probe_completed);
+
+int snd_sof_device_remove(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ int ret;
+ bool aborted = false;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+ aborted = cancel_work_sync(&sdev->probe_work);
+
+ /*
+ * Unregister any registered client device first before IPC and debugfs
+ * to allow client drivers to be removed cleanly
+ */
+ sof_unregister_clients(sdev);
+
+ /*
+ * Unregister machine driver. This will unbind the snd_card which
+ * will remove the component driver and unload the topology
+ * before freeing the snd_card.
+ */
+ snd_sof_machine_unregister(sdev, pdata);
+
+ if (sdev->fw_state > SOF_FW_BOOT_NOT_STARTED) {
+ sof_fw_trace_free(sdev);
+ ret = snd_sof_dsp_power_down_notify(sdev);
+ if (ret < 0)
+ dev_warn(dev, "error: %d failed to prepare DSP for device removal",
+ ret);
+
+ snd_sof_ipc_free(sdev);
+ snd_sof_free_debug(sdev);
+ snd_sof_remove(sdev);
+ sof_ops_free(sdev);
+ } else if (aborted) {
+ /* probe_work never ran */
+ sof_ops_free(sdev);
+ }
+
+ /* release firmware */
+ snd_sof_fw_unload(sdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_device_remove);
+
+int snd_sof_device_shutdown(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+ cancel_work_sync(&sdev->probe_work);
+
+ if (sdev->fw_state == SOF_FW_BOOT_COMPLETE) {
+ sof_fw_trace_free(sdev);
+ return snd_sof_shutdown(sdev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_device_shutdown);
+
+MODULE_AUTHOR("Liam Girdwood");
+MODULE_DESCRIPTION("Sound Open Firmware (SOF) Core");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:sof-audio");
+MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
new file mode 100644
index 000000000..d547318e0
--- /dev/null
+++ b/sound/soc/sof/debug.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+// Generic debug routines used to export DSP MMIO and memories to userspace
+// for firmware debugging.
+//
+
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <sound/sof/ext_manifest.h>
+#include <sound/sof/debug.h>
+#include "sof-priv.h"
+#include "ops.h"
+
+static ssize_t sof_dfsentry_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t size;
+ char *string;
+ int ret;
+
+ string = kzalloc(count+1, GFP_KERNEL);
+ if (!string)
+ return -ENOMEM;
+
+ size = simple_write_to_buffer(string, count, ppos, buffer, count);
+ ret = size;
+
+ kfree(string);
+ return ret;
+}
+
+static ssize_t sof_dfsentry_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ loff_t pos = *ppos;
+ size_t size_ret;
+ int skip = 0;
+ int size;
+ u8 *buf;
+
+ size = dfse->size;
+
+ /* validate position & count */
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= size || !count)
+ return 0;
+ /* find the minimum. min() is not used since it adds sparse warnings */
+ if (count > size - pos)
+ count = size - pos;
+
+ /* align io read start to u32 multiple */
+ pos = ALIGN_DOWN(pos, 4);
+
+ /* intermediate buffer size must be u32 multiple */
+ size = ALIGN(count, 4);
+
+ /* if start position is unaligned, read extra u32 */
+ if (unlikely(pos != *ppos)) {
+ skip = *ppos - pos;
+ if (pos + size + 4 < dfse->size)
+ size += 4;
+ }
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (dfse->type == SOF_DFSENTRY_TYPE_IOMEM) {
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+ /*
+ * If the DSP is active: copy from IO.
+ * If the DSP is suspended:
+ * - Copy from IO if the memory is always accessible.
+ * - Otherwise, copy from cached buffer.
+ */
+ if (pm_runtime_active(sdev->dev) ||
+ dfse->access_type == SOF_DEBUGFS_ACCESS_ALWAYS) {
+ memcpy_fromio(buf, dfse->io_mem + pos, size);
+ } else {
+ dev_info(sdev->dev,
+ "Copying cached debugfs data\n");
+ memcpy(buf, dfse->cache_buf + pos, size);
+ }
+#else
+ /* if the DSP is in D3 */
+ if (!pm_runtime_active(sdev->dev) &&
+ dfse->access_type == SOF_DEBUGFS_ACCESS_D0_ONLY) {
+ dev_err(sdev->dev,
+ "error: debugfs entry cannot be read in DSP D3\n");
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ memcpy_fromio(buf, dfse->io_mem + pos, size);
+#endif
+ } else {
+ memcpy(buf, ((u8 *)(dfse->buf) + pos), size);
+ }
+
+ /* copy to userspace */
+ size_ret = copy_to_user(buffer, buf + skip, count);
+
+ kfree(buf);
+
+ /* update count & position if copy succeeded */
+ if (size_ret)
+ return -EFAULT;
+
+ *ppos = pos + count;
+
+ return count;
+}
+
+static const struct file_operations sof_dfs_fops = {
+ .open = simple_open,
+ .read = sof_dfsentry_read,
+ .llseek = default_llseek,
+ .write = sof_dfsentry_write,
+};
+
+/* create FS entry for debug files that can expose DSP memories, registers */
+static int snd_sof_debugfs_io_item(struct snd_sof_dev *sdev,
+ void __iomem *base, size_t size,
+ const char *name,
+ enum sof_debugfs_access_type access_type)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ if (!sdev)
+ return -EINVAL;
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ dfse->type = SOF_DFSENTRY_TYPE_IOMEM;
+ dfse->io_mem = base;
+ dfse->size = size;
+ dfse->sdev = sdev;
+ dfse->access_type = access_type;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+ /*
+ * allocate cache buffer that will be used to save the mem window
+ * contents prior to suspend
+ */
+ if (access_type == SOF_DEBUGFS_ACCESS_D0_ONLY) {
+ dfse->cache_buf = devm_kzalloc(sdev->dev, size, GFP_KERNEL);
+ if (!dfse->cache_buf)
+ return -ENOMEM;
+ }
+#endif
+
+ debugfs_create_file(name, 0444, sdev->debugfs_root, dfse,
+ &sof_dfs_fops);
+
+ /* add to dfsentry list */
+ list_add(&dfse->list, &sdev->dfsentry_list);
+
+ return 0;
+}
+
+int snd_sof_debugfs_add_region_item_iomem(struct snd_sof_dev *sdev,
+ enum snd_sof_fw_blk_type blk_type, u32 offset,
+ size_t size, const char *name,
+ enum sof_debugfs_access_type access_type)
+{
+ int bar = snd_sof_dsp_get_bar_index(sdev, blk_type);
+
+ if (bar < 0)
+ return bar;
+
+ return snd_sof_debugfs_io_item(sdev, sdev->bar[bar] + offset, size, name,
+ access_type);
+}
+EXPORT_SYMBOL_GPL(snd_sof_debugfs_add_region_item_iomem);
+
+/* create FS entry for debug files to expose kernel memory */
+int snd_sof_debugfs_buf_item(struct snd_sof_dev *sdev,
+ void *base, size_t size,
+ const char *name, mode_t mode)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ if (!sdev)
+ return -EINVAL;
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ dfse->type = SOF_DFSENTRY_TYPE_BUF;
+ dfse->buf = base;
+ dfse->size = size;
+ dfse->sdev = sdev;
+
+ debugfs_create_file(name, mode, sdev->debugfs_root, dfse,
+ &sof_dfs_fops);
+ /* add to dfsentry list */
+ list_add(&dfse->list, &sdev->dfsentry_list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_sof_debugfs_buf_item);
+
+static int memory_info_update(struct snd_sof_dev *sdev, char *buf, size_t buff_size)
+{
+ struct sof_ipc_cmd_hdr msg = {
+ .size = sizeof(struct sof_ipc_cmd_hdr),
+ .cmd = SOF_IPC_GLB_DEBUG | SOF_IPC_DEBUG_MEM_USAGE,
+ };
+ struct sof_ipc_dbg_mem_usage *reply;
+ int len;
+ int ret;
+ int i;
+
+ reply = kmalloc(SOF_IPC_MSG_MAX_SIZE, GFP_KERNEL);
+ if (!reply)
+ return -ENOMEM;
+
+ ret = pm_runtime_resume_and_get(sdev->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err(sdev->dev, "error: enabling device failed: %d\n", ret);
+ goto error;
+ }
+
+ ret = sof_ipc_tx_message(sdev->ipc, &msg, msg.size, reply, SOF_IPC_MSG_MAX_SIZE);
+ pm_runtime_mark_last_busy(sdev->dev);
+ pm_runtime_put_autosuspend(sdev->dev);
+ if (ret < 0 || reply->rhdr.error < 0) {
+ ret = min(ret, reply->rhdr.error);
+ dev_err(sdev->dev, "error: reading memory info failed, %d\n", ret);
+ goto error;
+ }
+
+ if (struct_size(reply, elems, reply->num_elems) != reply->rhdr.hdr.size) {
+ dev_err(sdev->dev, "error: invalid memory info ipc struct size, %d\n",
+ reply->rhdr.hdr.size);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ for (i = 0, len = 0; i < reply->num_elems; i++) {
+ ret = scnprintf(buf + len, buff_size - len, "zone %d.%d used %#8x free %#8x\n",
+ reply->elems[i].zone, reply->elems[i].id,
+ reply->elems[i].used, reply->elems[i].free);
+ if (ret < 0)
+ goto error;
+ len += ret;
+ }
+
+ ret = len;
+error:
+ kfree(reply);
+ return ret;
+}
+
+static ssize_t memory_info_read(struct file *file, char __user *to, size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ int data_length;
+
+ /* read memory info from FW only once for each file read */
+ if (!*ppos) {
+ dfse->buf_data_size = 0;
+ data_length = memory_info_update(sdev, dfse->buf, dfse->size);
+ if (data_length < 0)
+ return data_length;
+ dfse->buf_data_size = data_length;
+ }
+
+ return simple_read_from_buffer(to, count, ppos, dfse->buf, dfse->buf_data_size);
+}
+
+static int memory_info_open(struct inode *inode, struct file *file)
+{
+ struct snd_sof_dfsentry *dfse = inode->i_private;
+ struct snd_sof_dev *sdev = dfse->sdev;
+
+ file->private_data = dfse;
+
+ /* allocate buffer memory only in first open run, to save memory when unused */
+ if (!dfse->buf) {
+ dfse->buf = devm_kmalloc(sdev->dev, PAGE_SIZE, GFP_KERNEL);
+ if (!dfse->buf)
+ return -ENOMEM;
+ dfse->size = PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static const struct file_operations memory_info_fops = {
+ .open = memory_info_open,
+ .read = memory_info_read,
+ .llseek = default_llseek,
+};
+
+int snd_sof_dbg_memory_info_init(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ /* don't allocate buffer before first usage, to save memory when unused */
+ dfse->type = SOF_DFSENTRY_TYPE_BUF;
+ dfse->sdev = sdev;
+
+ debugfs_create_file("memory_info", 0444, sdev->debugfs_root, dfse, &memory_info_fops);
+
+ /* add to dfsentry list */
+ list_add(&dfse->list, &sdev->dfsentry_list);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_sof_dbg_memory_info_init);
+
+int snd_sof_dbg_init(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_dsp_ops *ops = sof_ops(sdev);
+ const struct snd_sof_debugfs_map *map;
+ int i;
+ int err;
+
+ /* use "sof" as top level debugFS dir */
+ sdev->debugfs_root = debugfs_create_dir("sof", NULL);
+
+ /* init dfsentry list */
+ INIT_LIST_HEAD(&sdev->dfsentry_list);
+
+ /* create debugFS files for platform specific MMIO/DSP memories */
+ for (i = 0; i < ops->debug_map_count; i++) {
+ map = &ops->debug_map[i];
+
+ err = snd_sof_debugfs_io_item(sdev, sdev->bar[map->bar] +
+ map->offset, map->size,
+ map->name, map->access_type);
+ /* errors are only due to memory allocation, not debugfs */
+ if (err < 0)
+ return err;
+ }
+
+ return snd_sof_debugfs_buf_item(sdev, &sdev->fw_state,
+ sizeof(sdev->fw_state),
+ "fw_state", 0444);
+}
+EXPORT_SYMBOL_GPL(snd_sof_dbg_init);
+
+void snd_sof_free_debug(struct snd_sof_dev *sdev)
+{
+ debugfs_remove_recursive(sdev->debugfs_root);
+}
+EXPORT_SYMBOL_GPL(snd_sof_free_debug);
+
+static const struct soc_fw_state_info {
+ enum sof_fw_state state;
+ const char *name;
+} fw_state_dbg[] = {
+ {SOF_FW_BOOT_NOT_STARTED, "SOF_FW_BOOT_NOT_STARTED"},
+ {SOF_DSPLESS_MODE, "SOF_DSPLESS_MODE"},
+ {SOF_FW_BOOT_PREPARE, "SOF_FW_BOOT_PREPARE"},
+ {SOF_FW_BOOT_IN_PROGRESS, "SOF_FW_BOOT_IN_PROGRESS"},
+ {SOF_FW_BOOT_FAILED, "SOF_FW_BOOT_FAILED"},
+ {SOF_FW_BOOT_READY_FAILED, "SOF_FW_BOOT_READY_FAILED"},
+ {SOF_FW_BOOT_READY_OK, "SOF_FW_BOOT_READY_OK"},
+ {SOF_FW_BOOT_COMPLETE, "SOF_FW_BOOT_COMPLETE"},
+ {SOF_FW_CRASHED, "SOF_FW_CRASHED"},
+};
+
+static void snd_sof_dbg_print_fw_state(struct snd_sof_dev *sdev, const char *level)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_state_dbg); i++) {
+ if (sdev->fw_state == fw_state_dbg[i].state) {
+ dev_printk(level, sdev->dev, "fw_state: %s (%d)\n",
+ fw_state_dbg[i].name, i);
+ return;
+ }
+ }
+
+ dev_printk(level, sdev->dev, "fw_state: UNKNOWN (%d)\n", sdev->fw_state);
+}
+
+void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, const char *msg, u32 flags)
+{
+ char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
+ bool print_all = sof_debug_check_flag(SOF_DBG_PRINT_ALL_DUMPS);
+
+ if (flags & SOF_DBG_DUMP_OPTIONAL && !print_all)
+ return;
+
+ if (sof_ops(sdev)->dbg_dump && !sdev->dbg_dump_printed) {
+ dev_printk(level, sdev->dev,
+ "------------[ DSP dump start ]------------\n");
+ if (msg)
+ dev_printk(level, sdev->dev, "%s\n", msg);
+ snd_sof_dbg_print_fw_state(sdev, level);
+ sof_ops(sdev)->dbg_dump(sdev, flags);
+ dev_printk(level, sdev->dev,
+ "------------[ DSP dump end ]------------\n");
+ if (!print_all)
+ sdev->dbg_dump_printed = true;
+ } else if (msg) {
+ dev_printk(level, sdev->dev, "%s\n", msg);
+ }
+}
+EXPORT_SYMBOL(snd_sof_dsp_dbg_dump);
+
+static void snd_sof_ipc_dump(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->ipc_dump && !sdev->ipc_dump_printed) {
+ dev_err(sdev->dev, "------------[ IPC dump start ]------------\n");
+ sof_ops(sdev)->ipc_dump(sdev);
+ dev_err(sdev->dev, "------------[ IPC dump end ]------------\n");
+ if (!sof_debug_check_flag(SOF_DBG_PRINT_ALL_DUMPS))
+ sdev->ipc_dump_printed = true;
+ }
+}
+
+void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev, const char *msg)
+{
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_RETAIN_DSP_CONTEXT) ||
+ sof_debug_check_flag(SOF_DBG_RETAIN_CTX)) {
+ /* should we prevent DSP entering D3 ? */
+ if (!sdev->ipc_dump_printed)
+ dev_info(sdev->dev,
+ "Attempting to prevent DSP from entering D3 state to preserve context\n");
+ pm_runtime_get_if_in_use(sdev->dev);
+ }
+
+ /* dump vital information to the logs */
+ snd_sof_ipc_dump(sdev);
+ snd_sof_dsp_dbg_dump(sdev, msg, SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_MBOX);
+ sof_fw_trace_fw_crashed(sdev);
+}
+EXPORT_SYMBOL(snd_sof_handle_fw_exception);
diff --git a/sound/soc/sof/imx/Kconfig b/sound/soc/sof/imx/Kconfig
new file mode 100644
index 000000000..4751b04d5
--- /dev/null
+++ b/sound/soc/sof/imx/Kconfig
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+config SND_SOC_SOF_IMX_TOPLEVEL
+ bool "SOF support for NXP i.MX audio DSPs"
+ depends on ARM64|| COMPILE_TEST
+ depends on SND_SOC_SOF_OF
+ help
+ This adds support for Sound Open Firmware for NXP i.MX platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+if SND_SOC_SOF_IMX_TOPLEVEL
+
+config SND_SOC_SOF_IMX_COMMON
+ tristate
+ select SND_SOC_SOF_OF_DEV
+ select SND_SOC_SOF
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_XTENSA
+ select SND_SOC_SOF_COMPRESS
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_IMX8
+ tristate "SOF support for i.MX8"
+ depends on IMX_SCU
+ depends on IMX_DSP
+ select SND_SOC_SOF_IMX_COMMON
+ help
+ This adds support for Sound Open Firmware for NXP i.MX8 platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_IMX8M
+ tristate "SOF support for i.MX8M"
+ depends on IMX_DSP
+ select SND_SOC_SOF_IMX_COMMON
+ help
+ This adds support for Sound Open Firmware for NXP i.MX8M platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_IMX8ULP
+ tristate "SOF support for i.MX8ULP"
+ depends on IMX_DSP
+ select SND_SOC_SOF_IMX_COMMON
+ help
+ This adds support for Sound Open Firmware for NXP i.MX8ULP platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+endif ## SND_SOC_SOF_IMX_TOPLEVEL
diff --git a/sound/soc/sof/imx/Makefile b/sound/soc/sof/imx/Makefile
new file mode 100644
index 000000000..798b43a41
--- /dev/null
+++ b/sound/soc/sof/imx/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+snd-sof-imx8-objs := imx8.o
+snd-sof-imx8m-objs := imx8m.o
+snd-sof-imx8ulp-objs := imx8ulp.o
+
+snd-sof-imx-common-objs := imx-common.o
+
+obj-$(CONFIG_SND_SOC_SOF_IMX8) += snd-sof-imx8.o
+obj-$(CONFIG_SND_SOC_SOF_IMX8M) += snd-sof-imx8m.o
+obj-$(CONFIG_SND_SOC_SOF_IMX8ULP) += snd-sof-imx8ulp.o
+obj-$(CONFIG_SND_SOC_SOF_IMX_COMMON) += imx-common.o
diff --git a/sound/soc/sof/imx/imx-common.c b/sound/soc/sof/imx/imx-common.c
new file mode 100644
index 000000000..36e3d414a
--- /dev/null
+++ b/sound/soc/sof/imx/imx-common.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright 2020 NXP
+//
+// Common helpers for the audio DSP on i.MX8
+
+#include <linux/module.h>
+#include <sound/sof/xtensa.h>
+#include "../ops.h"
+
+#include "imx-common.h"
+
+/**
+ * imx8_get_registers() - This function is called in case of DSP oops
+ * in order to gather information about the registers, filename and
+ * linenumber and stack.
+ * @sdev: SOF device
+ * @xoops: Stores information about registers.
+ * @panic_info: Stores information about filename and line number.
+ * @stack: Stores the stack dump.
+ * @stack_words: Size of the stack dump.
+ */
+void imx8_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read registers */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
+}
+
+/**
+ * imx8_dump() - This function is called when a panic message is
+ * received from the firmware.
+ * @sdev: SOF device
+ * @flags: parameter not used but required by ops prototype
+ */
+void imx8_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[IMX8_STACK_DUMP_SIZE];
+ u32 status;
+
+ /* Get information about the panic status from the debug box area.
+ * Compute the trace point based on the status.
+ */
+ sof_mailbox_read(sdev, sdev->debug_box.offset + 0x4, &status, 4);
+
+ /* Get information about the registers, the filename and line
+ * number and the stack.
+ */
+ imx8_get_registers(sdev, &xoops, &panic_info, stack,
+ IMX8_STACK_DUMP_SIZE);
+
+ /* Print the information to the console */
+ sof_print_oops_and_stack(sdev, KERN_ERR, status, status, &xoops,
+ &panic_info, stack, IMX8_STACK_DUMP_SIZE);
+}
+EXPORT_SYMBOL(imx8_dump);
+
+int imx8_parse_clocks(struct snd_sof_dev *sdev, struct imx_clocks *clks)
+{
+ int ret;
+
+ ret = devm_clk_bulk_get(sdev->dev, clks->num_dsp_clks, clks->dsp_clks);
+ if (ret)
+ dev_err(sdev->dev, "Failed to request DSP clocks\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(imx8_parse_clocks);
+
+int imx8_enable_clocks(struct snd_sof_dev *sdev, struct imx_clocks *clks)
+{
+ return clk_bulk_prepare_enable(clks->num_dsp_clks, clks->dsp_clks);
+}
+EXPORT_SYMBOL(imx8_enable_clocks);
+
+void imx8_disable_clocks(struct snd_sof_dev *sdev, struct imx_clocks *clks)
+{
+ clk_bulk_disable_unprepare(clks->num_dsp_clks, clks->dsp_clks);
+}
+EXPORT_SYMBOL(imx8_disable_clocks);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/imx/imx-common.h b/sound/soc/sof/imx/imx-common.h
new file mode 100644
index 000000000..ec4b3a5c7
--- /dev/null
+++ b/sound/soc/sof/imx/imx-common.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef __IMX_COMMON_H__
+#define __IMX_COMMON_H__
+
+#include <linux/clk.h>
+
+#define EXCEPT_MAX_HDR_SIZE 0x400
+#define IMX8_STACK_DUMP_SIZE 32
+
+void imx8_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words);
+
+void imx8_dump(struct snd_sof_dev *sdev, u32 flags);
+
+struct imx_clocks {
+ struct clk_bulk_data *dsp_clks;
+ int num_dsp_clks;
+};
+
+int imx8_parse_clocks(struct snd_sof_dev *sdev, struct imx_clocks *clks);
+int imx8_enable_clocks(struct snd_sof_dev *sdev, struct imx_clocks *clks);
+void imx8_disable_clocks(struct snd_sof_dev *sdev, struct imx_clocks *clks);
+
+#endif
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
new file mode 100644
index 000000000..2844d9a80
--- /dev/null
+++ b/sound/soc/sof/imx/imx8.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright 2019 NXP
+//
+// Author: Daniel Baluta <daniel.baluta@nxp.com>
+//
+// Hardware interface for audio DSP on i.MX8
+
+#include <linux/firmware.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pm_domain.h>
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/dsp.h>
+
+#include <linux/firmware/imx/svc/misc.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include "../ops.h"
+#include "../sof-of-dev.h"
+#include "imx-common.h"
+
+/* DSP memories */
+#define IRAM_OFFSET 0x10000
+#define IRAM_SIZE (2 * 1024)
+#define DRAM0_OFFSET 0x0
+#define DRAM0_SIZE (32 * 1024)
+#define DRAM1_OFFSET 0x8000
+#define DRAM1_SIZE (32 * 1024)
+#define SYSRAM_OFFSET 0x18000
+#define SYSRAM_SIZE (256 * 1024)
+#define SYSROM_OFFSET 0x58000
+#define SYSROM_SIZE (192 * 1024)
+
+#define RESET_VECTOR_VADDR 0x596f8000
+
+#define MBOX_OFFSET 0x800000
+#define MBOX_SIZE 0x1000
+
+/* DSP clocks */
+static struct clk_bulk_data imx8_dsp_clks[] = {
+ { .id = "ipg" },
+ { .id = "ocram" },
+ { .id = "core" },
+};
+
+struct imx8_priv {
+ struct device *dev;
+ struct snd_sof_dev *sdev;
+
+ /* DSP IPC handler */
+ struct imx_dsp_ipc *dsp_ipc;
+ struct platform_device *ipc_dev;
+
+ /* System Controller IPC handler */
+ struct imx_sc_ipc *sc_ipc;
+
+ /* Power domain handling */
+ int num_domains;
+ struct device **pd_dev;
+ struct device_link **link;
+
+ struct imx_clocks *clks;
+};
+
+static int imx8_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+
+static int imx8_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+
+static void imx8_dsp_handle_reply(struct imx_dsp_ipc *ipc)
+{
+ struct imx8_priv *priv = imx_dsp_get_data(ipc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sdev->ipc_lock, flags);
+ snd_sof_ipc_process_reply(priv->sdev, 0);
+ spin_unlock_irqrestore(&priv->sdev->ipc_lock, flags);
+}
+
+static void imx8_dsp_handle_request(struct imx_dsp_ipc *ipc)
+{
+ struct imx8_priv *priv = imx_dsp_get_data(ipc);
+ u32 p; /* panic code */
+
+ /* Read the message from the debug box. */
+ sof_mailbox_read(priv->sdev, priv->sdev->debug_box.offset + 4, &p, sizeof(p));
+
+ /* Check to see if the message is a panic code (0x0dead***) */
+ if ((p & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC)
+ snd_sof_dsp_panic(priv->sdev, p, true);
+ else
+ snd_sof_ipc_msgs_rx(priv->sdev);
+}
+
+static struct imx_dsp_ops dsp_ops = {
+ .handle_reply = imx8_dsp_handle_reply,
+ .handle_request = imx8_dsp_handle_request,
+};
+
+static int imx8_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct imx8_priv *priv = sdev->pdata->hw_pdata;
+
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ imx_dsp_ring_doorbell(priv->dsp_ipc, 0);
+
+ return 0;
+}
+
+/*
+ * DSP control.
+ */
+static int imx8x_run(struct snd_sof_dev *sdev)
+{
+ struct imx8_priv *dsp_priv = sdev->pdata->hw_pdata;
+ int ret;
+
+ ret = imx_sc_misc_set_control(dsp_priv->sc_ipc, IMX_SC_R_DSP,
+ IMX_SC_C_OFS_SEL, 1);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error system address offset source select\n");
+ return ret;
+ }
+
+ ret = imx_sc_misc_set_control(dsp_priv->sc_ipc, IMX_SC_R_DSP,
+ IMX_SC_C_OFS_AUDIO, 0x80);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error system address offset of AUDIO\n");
+ return ret;
+ }
+
+ ret = imx_sc_misc_set_control(dsp_priv->sc_ipc, IMX_SC_R_DSP,
+ IMX_SC_C_OFS_PERIPH, 0x5A);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error system address offset of PERIPH %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = imx_sc_misc_set_control(dsp_priv->sc_ipc, IMX_SC_R_DSP,
+ IMX_SC_C_OFS_IRQ, 0x51);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error system address offset of IRQ\n");
+ return ret;
+ }
+
+ imx_sc_pm_cpu_start(dsp_priv->sc_ipc, IMX_SC_R_DSP, true,
+ RESET_VECTOR_VADDR);
+
+ return 0;
+}
+
+static int imx8_run(struct snd_sof_dev *sdev)
+{
+ struct imx8_priv *dsp_priv = sdev->pdata->hw_pdata;
+ int ret;
+
+ ret = imx_sc_misc_set_control(dsp_priv->sc_ipc, IMX_SC_R_DSP,
+ IMX_SC_C_OFS_SEL, 0);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error system address offset source select\n");
+ return ret;
+ }
+
+ imx_sc_pm_cpu_start(dsp_priv->sc_ipc, IMX_SC_R_DSP, true,
+ RESET_VECTOR_VADDR);
+
+ return 0;
+}
+
+static int imx8_probe(struct snd_sof_dev *sdev)
+{
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *res_node;
+ struct resource *mmio;
+ struct imx8_priv *priv;
+ struct resource res;
+ u32 base, size;
+ int ret = 0;
+ int i;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clks = devm_kzalloc(&pdev->dev, sizeof(*priv->clks), GFP_KERNEL);
+ if (!priv->clks)
+ return -ENOMEM;
+
+ sdev->num_cores = 1;
+ sdev->pdata->hw_pdata = priv;
+ priv->dev = sdev->dev;
+ priv->sdev = sdev;
+
+ /* power up device associated power domains */
+ priv->num_domains = of_count_phandle_with_args(np, "power-domains",
+ "#power-domain-cells");
+ if (priv->num_domains < 0) {
+ dev_err(sdev->dev, "no power-domains property in %pOF\n", np);
+ return priv->num_domains;
+ }
+
+ priv->pd_dev = devm_kmalloc_array(&pdev->dev, priv->num_domains,
+ sizeof(*priv->pd_dev), GFP_KERNEL);
+ if (!priv->pd_dev)
+ return -ENOMEM;
+
+ priv->link = devm_kmalloc_array(&pdev->dev, priv->num_domains,
+ sizeof(*priv->link), GFP_KERNEL);
+ if (!priv->link)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->num_domains; i++) {
+ priv->pd_dev[i] = dev_pm_domain_attach_by_id(&pdev->dev, i);
+ if (IS_ERR(priv->pd_dev[i])) {
+ ret = PTR_ERR(priv->pd_dev[i]);
+ goto exit_unroll_pm;
+ }
+ priv->link[i] = device_link_add(&pdev->dev, priv->pd_dev[i],
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!priv->link[i]) {
+ ret = -ENOMEM;
+ dev_pm_domain_detach(priv->pd_dev[i], false);
+ goto exit_unroll_pm;
+ }
+ }
+
+ ret = imx_scu_get_handle(&priv->sc_ipc);
+ if (ret) {
+ dev_err(sdev->dev, "Cannot obtain SCU handle (err = %d)\n",
+ ret);
+ goto exit_unroll_pm;
+ }
+
+ priv->ipc_dev = platform_device_register_data(sdev->dev, "imx-dsp",
+ PLATFORM_DEVID_NONE,
+ pdev, sizeof(*pdev));
+ if (IS_ERR(priv->ipc_dev)) {
+ ret = PTR_ERR(priv->ipc_dev);
+ goto exit_unroll_pm;
+ }
+
+ priv->dsp_ipc = dev_get_drvdata(&priv->ipc_dev->dev);
+ if (!priv->dsp_ipc) {
+ /* DSP IPC driver not probed yet, try later */
+ ret = -EPROBE_DEFER;
+ dev_err(sdev->dev, "Failed to get drvdata\n");
+ goto exit_pdev_unregister;
+ }
+
+ imx_dsp_set_data(priv->dsp_ipc, priv);
+ priv->dsp_ipc->ops = &dsp_ops;
+
+ /* DSP base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get DSP base at idx 0\n");
+ ret = -EINVAL;
+ goto exit_pdev_unregister;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_IRAM] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[SOF_FW_BLK_TYPE_IRAM]) {
+ dev_err(sdev->dev, "failed to ioremap base 0x%x size 0x%x\n",
+ base, size);
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+ sdev->mmio_bar = SOF_FW_BLK_TYPE_IRAM;
+
+ res_node = of_parse_phandle(np, "memory-region", 0);
+ if (!res_node) {
+ dev_err(&pdev->dev, "failed to get memory region node\n");
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+
+ ret = of_address_to_resource(res_node, 0, &res);
+ of_node_put(res_node);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get reserved region address\n");
+ goto exit_pdev_unregister;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap_wc(sdev->dev, res.start,
+ resource_size(&res));
+ if (!sdev->bar[SOF_FW_BLK_TYPE_SRAM]) {
+ dev_err(sdev->dev, "failed to ioremap mem 0x%x size 0x%x\n",
+ base, size);
+ ret = -ENOMEM;
+ goto exit_pdev_unregister;
+ }
+ sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ /* init clocks info */
+ priv->clks->dsp_clks = imx8_dsp_clks;
+ priv->clks->num_dsp_clks = ARRAY_SIZE(imx8_dsp_clks);
+
+ ret = imx8_parse_clocks(sdev, priv->clks);
+ if (ret < 0)
+ goto exit_pdev_unregister;
+
+ ret = imx8_enable_clocks(sdev, priv->clks);
+ if (ret < 0)
+ goto exit_pdev_unregister;
+
+ return 0;
+
+exit_pdev_unregister:
+ platform_device_unregister(priv->ipc_dev);
+exit_unroll_pm:
+ while (--i >= 0) {
+ device_link_del(priv->link[i]);
+ dev_pm_domain_detach(priv->pd_dev[i], false);
+ }
+
+ return ret;
+}
+
+static int imx8_remove(struct snd_sof_dev *sdev)
+{
+ struct imx8_priv *priv = sdev->pdata->hw_pdata;
+ int i;
+
+ imx8_disable_clocks(sdev, priv->clks);
+ platform_device_unregister(priv->ipc_dev);
+
+ for (i = 0; i < priv->num_domains; i++) {
+ device_link_del(priv->link[i]);
+ dev_pm_domain_detach(priv->pd_dev[i], false);
+ }
+
+ return 0;
+}
+
+/* on i.MX8 there is 1 to 1 match between type and BAR idx */
+static int imx8_get_bar_index(struct snd_sof_dev *sdev, u32 type)
+{
+ /* Only IRAM and SRAM bars are valid */
+ switch (type) {
+ case SOF_FW_BLK_TYPE_IRAM:
+ case SOF_FW_BLK_TYPE_SRAM:
+ return type;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void imx8_suspend(struct snd_sof_dev *sdev)
+{
+ int i;
+ struct imx8_priv *priv = (struct imx8_priv *)sdev->pdata->hw_pdata;
+
+ for (i = 0; i < DSP_MU_CHAN_NUM; i++)
+ imx_dsp_free_channel(priv->dsp_ipc, i);
+
+ imx8_disable_clocks(sdev, priv->clks);
+}
+
+static int imx8_resume(struct snd_sof_dev *sdev)
+{
+ struct imx8_priv *priv = (struct imx8_priv *)sdev->pdata->hw_pdata;
+ int ret;
+ int i;
+
+ ret = imx8_enable_clocks(sdev, priv->clks);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < DSP_MU_CHAN_NUM; i++)
+ imx_dsp_request_channel(priv->dsp_ipc, i);
+
+ return 0;
+}
+
+static int imx8_dsp_runtime_resume(struct snd_sof_dev *sdev)
+{
+ int ret;
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+
+ ret = imx8_resume(sdev);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static int imx8_dsp_runtime_suspend(struct snd_sof_dev *sdev)
+{
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = SOF_DSP_PM_D3,
+ };
+
+ imx8_suspend(sdev);
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static int imx8_dsp_suspend(struct snd_sof_dev *sdev, unsigned int target_state)
+{
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = target_state,
+ };
+
+ if (!pm_runtime_suspended(sdev->dev))
+ imx8_suspend(sdev);
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static int imx8_dsp_resume(struct snd_sof_dev *sdev)
+{
+ int ret;
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+
+ ret = imx8_resume(sdev);
+ if (ret < 0)
+ return ret;
+
+ if (pm_runtime_suspended(sdev->dev)) {
+ pm_runtime_disable(sdev->dev);
+ pm_runtime_set_active(sdev->dev);
+ pm_runtime_mark_last_busy(sdev->dev);
+ pm_runtime_enable(sdev->dev);
+ pm_runtime_idle(sdev->dev);
+ }
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static struct snd_soc_dai_driver imx8_dai[] = {
+{
+ .name = "esai0",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "sai1",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+},
+};
+
+static int imx8_dsp_set_power_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ sdev->dsp_power_state = *target_state;
+
+ return 0;
+}
+
+/* i.MX8 ops */
+static struct snd_sof_dsp_ops sof_imx8_ops = {
+ /* probe and remove */
+ .probe = imx8_probe,
+ .remove = imx8_remove,
+ /* DSP core boot */
+ .run = imx8_run,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* ipc */
+ .send_msg = imx8_send_msg,
+ .get_mailbox_offset = imx8_get_mailbox_offset,
+ .get_window_offset = imx8_get_window_offset,
+
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ .get_bar_index = imx8_get_bar_index,
+
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* Debug information */
+ .dbg_dump = imx8_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_close = sof_stream_pcm_close,
+
+ /* Firmware ops */
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+
+ /* DAI drivers */
+ .drv = imx8_dai,
+ .num_drv = ARRAY_SIZE(imx8_dai),
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+
+ /* PM */
+ .runtime_suspend = imx8_dsp_runtime_suspend,
+ .runtime_resume = imx8_dsp_runtime_resume,
+
+ .suspend = imx8_dsp_suspend,
+ .resume = imx8_dsp_resume,
+
+ .set_power_state = imx8_dsp_set_power_state,
+};
+
+/* i.MX8X ops */
+static struct snd_sof_dsp_ops sof_imx8x_ops = {
+ /* probe and remove */
+ .probe = imx8_probe,
+ .remove = imx8_remove,
+ /* DSP core boot */
+ .run = imx8x_run,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* ipc */
+ .send_msg = imx8_send_msg,
+ .get_mailbox_offset = imx8_get_mailbox_offset,
+ .get_window_offset = imx8_get_window_offset,
+
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ .get_bar_index = imx8_get_bar_index,
+
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* Debug information */
+ .dbg_dump = imx8_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_close = sof_stream_pcm_close,
+
+ /* Firmware ops */
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+
+ /* DAI drivers */
+ .drv = imx8_dai,
+ .num_drv = ARRAY_SIZE(imx8_dai),
+
+ /* PM */
+ .runtime_suspend = imx8_dsp_runtime_suspend,
+ .runtime_resume = imx8_dsp_runtime_resume,
+
+ .suspend = imx8_dsp_suspend,
+ .resume = imx8_dsp_resume,
+
+ .set_power_state = imx8_dsp_set_power_state,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP
+};
+
+static struct sof_dev_desc sof_of_imx8qxp_desc = {
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "imx/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "imx/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-imx8x.ri",
+ },
+ .nocodec_tplg_filename = "sof-imx8-nocodec.tplg",
+ .ops = &sof_imx8x_ops,
+};
+
+static struct sof_dev_desc sof_of_imx8qm_desc = {
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "imx/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "imx/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-imx8.ri",
+ },
+ .nocodec_tplg_filename = "sof-imx8-nocodec.tplg",
+ .ops = &sof_imx8_ops,
+};
+
+static const struct of_device_id sof_of_imx8_ids[] = {
+ { .compatible = "fsl,imx8qxp-dsp", .data = &sof_of_imx8qxp_desc},
+ { .compatible = "fsl,imx8qm-dsp", .data = &sof_of_imx8qm_desc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, sof_of_imx8_ids);
+
+/* DT driver definition */
+static struct platform_driver snd_sof_of_imx8_driver = {
+ .probe = sof_of_probe,
+ .remove = sof_of_remove,
+ .driver = {
+ .name = "sof-audio-of-imx8",
+ .pm = &sof_of_pm,
+ .of_match_table = sof_of_imx8_ids,
+ },
+};
+module_platform_driver(snd_sof_of_imx8_driver);
+
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
new file mode 100644
index 000000000..1243f8a61
--- /dev/null
+++ b/sound/soc/sof/imx/imx8m.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright 2020 NXP
+//
+// Author: Daniel Baluta <daniel.baluta@nxp.com>
+//
+// Hardware interface for audio DSP on i.MX8M
+
+#include <linux/bits.h>
+#include <linux/firmware.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <linux/firmware/imx/dsp.h>
+
+#include "../ops.h"
+#include "../sof-of-dev.h"
+#include "imx-common.h"
+
+#define MBOX_OFFSET 0x800000
+#define MBOX_SIZE 0x1000
+
+static struct clk_bulk_data imx8m_dsp_clks[] = {
+ { .id = "ipg" },
+ { .id = "ocram" },
+ { .id = "core" },
+};
+
+/* DAP registers */
+#define IMX8M_DAP_DEBUG 0x28800000
+#define IMX8M_DAP_DEBUG_SIZE (64 * 1024)
+#define IMX8M_DAP_PWRCTL (0x4000 + 0x3020)
+#define IMX8M_PWRCTL_CORERESET BIT(16)
+
+/* DSP audio mix registers */
+#define AudioDSP_REG0 0x100
+#define AudioDSP_REG1 0x104
+#define AudioDSP_REG2 0x108
+#define AudioDSP_REG3 0x10c
+
+#define AudioDSP_REG2_RUNSTALL BIT(5)
+
+struct imx8m_priv {
+ struct device *dev;
+ struct snd_sof_dev *sdev;
+
+ /* DSP IPC handler */
+ struct imx_dsp_ipc *dsp_ipc;
+ struct platform_device *ipc_dev;
+
+ struct imx_clocks *clks;
+
+ void __iomem *dap;
+ struct regmap *regmap;
+};
+
+static int imx8m_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+
+static int imx8m_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+
+static void imx8m_dsp_handle_reply(struct imx_dsp_ipc *ipc)
+{
+ struct imx8m_priv *priv = imx_dsp_get_data(ipc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sdev->ipc_lock, flags);
+ snd_sof_ipc_process_reply(priv->sdev, 0);
+ spin_unlock_irqrestore(&priv->sdev->ipc_lock, flags);
+}
+
+static void imx8m_dsp_handle_request(struct imx_dsp_ipc *ipc)
+{
+ struct imx8m_priv *priv = imx_dsp_get_data(ipc);
+ u32 p; /* Panic code */
+
+ /* Read the message from the debug box. */
+ sof_mailbox_read(priv->sdev, priv->sdev->debug_box.offset + 4, &p, sizeof(p));
+
+ /* Check to see if the message is a panic code (0x0dead***) */
+ if ((p & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC)
+ snd_sof_dsp_panic(priv->sdev, p, true);
+ else
+ snd_sof_ipc_msgs_rx(priv->sdev);
+}
+
+static struct imx_dsp_ops imx8m_dsp_ops = {
+ .handle_reply = imx8m_dsp_handle_reply,
+ .handle_request = imx8m_dsp_handle_request,
+};
+
+static int imx8m_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct imx8m_priv *priv = sdev->pdata->hw_pdata;
+
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ imx_dsp_ring_doorbell(priv->dsp_ipc, 0);
+
+ return 0;
+}
+
+/*
+ * DSP control.
+ */
+static int imx8m_run(struct snd_sof_dev *sdev)
+{
+ struct imx8m_priv *priv = (struct imx8m_priv *)sdev->pdata->hw_pdata;
+
+ regmap_update_bits(priv->regmap, AudioDSP_REG2, AudioDSP_REG2_RUNSTALL, 0);
+
+ return 0;
+}
+
+static int imx8m_reset(struct snd_sof_dev *sdev)
+{
+ struct imx8m_priv *priv = (struct imx8m_priv *)sdev->pdata->hw_pdata;
+ u32 pwrctl;
+
+ /* put DSP into reset and stall */
+ pwrctl = readl(priv->dap + IMX8M_DAP_PWRCTL);
+ pwrctl |= IMX8M_PWRCTL_CORERESET;
+ writel(pwrctl, priv->dap + IMX8M_DAP_PWRCTL);
+
+ /* keep reset asserted for 10 cycles */
+ usleep_range(1, 2);
+
+ regmap_update_bits(priv->regmap, AudioDSP_REG2,
+ AudioDSP_REG2_RUNSTALL, AudioDSP_REG2_RUNSTALL);
+
+ /* take the DSP out of reset and keep stalled for FW loading */
+ pwrctl = readl(priv->dap + IMX8M_DAP_PWRCTL);
+ pwrctl &= ~IMX8M_PWRCTL_CORERESET;
+ writel(pwrctl, priv->dap + IMX8M_DAP_PWRCTL);
+
+ return 0;
+}
+
+static int imx8m_probe(struct snd_sof_dev *sdev)
+{
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *res_node;
+ struct resource *mmio;
+ struct imx8m_priv *priv;
+ struct resource res;
+ u32 base, size;
+ int ret = 0;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clks = devm_kzalloc(&pdev->dev, sizeof(*priv->clks), GFP_KERNEL);
+ if (!priv->clks)
+ return -ENOMEM;
+
+ sdev->num_cores = 1;
+ sdev->pdata->hw_pdata = priv;
+ priv->dev = sdev->dev;
+ priv->sdev = sdev;
+
+ priv->ipc_dev = platform_device_register_data(sdev->dev, "imx-dsp",
+ PLATFORM_DEVID_NONE,
+ pdev, sizeof(*pdev));
+ if (IS_ERR(priv->ipc_dev))
+ return PTR_ERR(priv->ipc_dev);
+
+ priv->dsp_ipc = dev_get_drvdata(&priv->ipc_dev->dev);
+ if (!priv->dsp_ipc) {
+ /* DSP IPC driver not probed yet, try later */
+ ret = -EPROBE_DEFER;
+ dev_err(sdev->dev, "Failed to get drvdata\n");
+ goto exit_pdev_unregister;
+ }
+
+ imx_dsp_set_data(priv->dsp_ipc, priv);
+ priv->dsp_ipc->ops = &imx8m_dsp_ops;
+
+ /* DSP base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get DSP base at idx 0\n");
+ ret = -EINVAL;
+ goto exit_pdev_unregister;
+ }
+
+ priv->dap = devm_ioremap(sdev->dev, IMX8M_DAP_DEBUG, IMX8M_DAP_DEBUG_SIZE);
+ if (!priv->dap) {
+ dev_err(sdev->dev, "error: failed to map DAP debug memory area");
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_IRAM] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[SOF_FW_BLK_TYPE_IRAM]) {
+ dev_err(sdev->dev, "failed to ioremap base 0x%x size 0x%x\n",
+ base, size);
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+ sdev->mmio_bar = SOF_FW_BLK_TYPE_IRAM;
+
+ res_node = of_parse_phandle(np, "memory-region", 0);
+ if (!res_node) {
+ dev_err(&pdev->dev, "failed to get memory region node\n");
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+
+ ret = of_address_to_resource(res_node, 0, &res);
+ of_node_put(res_node);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get reserved region address\n");
+ goto exit_pdev_unregister;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap_wc(sdev->dev, res.start,
+ resource_size(&res));
+ if (!sdev->bar[SOF_FW_BLK_TYPE_SRAM]) {
+ dev_err(sdev->dev, "failed to ioremap mem 0x%x size 0x%x\n",
+ base, size);
+ ret = -ENOMEM;
+ goto exit_pdev_unregister;
+ }
+ sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ priv->regmap = syscon_regmap_lookup_by_compatible("fsl,dsp-ctrl");
+ if (IS_ERR(priv->regmap)) {
+ dev_err(sdev->dev, "cannot find dsp-ctrl registers");
+ ret = PTR_ERR(priv->regmap);
+ goto exit_pdev_unregister;
+ }
+
+ /* init clocks info */
+ priv->clks->dsp_clks = imx8m_dsp_clks;
+ priv->clks->num_dsp_clks = ARRAY_SIZE(imx8m_dsp_clks);
+
+ ret = imx8_parse_clocks(sdev, priv->clks);
+ if (ret < 0)
+ goto exit_pdev_unregister;
+
+ ret = imx8_enable_clocks(sdev, priv->clks);
+ if (ret < 0)
+ goto exit_pdev_unregister;
+
+ return 0;
+
+exit_pdev_unregister:
+ platform_device_unregister(priv->ipc_dev);
+ return ret;
+}
+
+static int imx8m_remove(struct snd_sof_dev *sdev)
+{
+ struct imx8m_priv *priv = sdev->pdata->hw_pdata;
+
+ imx8_disable_clocks(sdev, priv->clks);
+ platform_device_unregister(priv->ipc_dev);
+
+ return 0;
+}
+
+/* on i.MX8 there is 1 to 1 match between type and BAR idx */
+static int imx8m_get_bar_index(struct snd_sof_dev *sdev, u32 type)
+{
+ /* Only IRAM and SRAM bars are valid */
+ switch (type) {
+ case SOF_FW_BLK_TYPE_IRAM:
+ case SOF_FW_BLK_TYPE_SRAM:
+ return type;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct snd_soc_dai_driver imx8m_dai[] = {
+{
+ .name = "sai1",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+},
+{
+ .name = "sai3",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+},
+};
+
+static int imx8m_dsp_set_power_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ sdev->dsp_power_state = *target_state;
+
+ return 0;
+}
+
+static int imx8m_resume(struct snd_sof_dev *sdev)
+{
+ struct imx8m_priv *priv = (struct imx8m_priv *)sdev->pdata->hw_pdata;
+ int ret;
+ int i;
+
+ ret = imx8_enable_clocks(sdev, priv->clks);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < DSP_MU_CHAN_NUM; i++)
+ imx_dsp_request_channel(priv->dsp_ipc, i);
+
+ return 0;
+}
+
+static void imx8m_suspend(struct snd_sof_dev *sdev)
+{
+ struct imx8m_priv *priv = (struct imx8m_priv *)sdev->pdata->hw_pdata;
+ int i;
+
+ for (i = 0; i < DSP_MU_CHAN_NUM; i++)
+ imx_dsp_free_channel(priv->dsp_ipc, i);
+
+ imx8_disable_clocks(sdev, priv->clks);
+}
+
+static int imx8m_dsp_runtime_resume(struct snd_sof_dev *sdev)
+{
+ int ret;
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+
+ ret = imx8m_resume(sdev);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static int imx8m_dsp_runtime_suspend(struct snd_sof_dev *sdev)
+{
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = SOF_DSP_PM_D3,
+ };
+
+ imx8m_suspend(sdev);
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static int imx8m_dsp_resume(struct snd_sof_dev *sdev)
+{
+ int ret;
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+
+ ret = imx8m_resume(sdev);
+ if (ret < 0)
+ return ret;
+
+ if (pm_runtime_suspended(sdev->dev)) {
+ pm_runtime_disable(sdev->dev);
+ pm_runtime_set_active(sdev->dev);
+ pm_runtime_mark_last_busy(sdev->dev);
+ pm_runtime_enable(sdev->dev);
+ pm_runtime_idle(sdev->dev);
+ }
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static int imx8m_dsp_suspend(struct snd_sof_dev *sdev, unsigned int target_state)
+{
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = target_state,
+ };
+
+ if (!pm_runtime_suspended(sdev->dev))
+ imx8m_suspend(sdev);
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+/* i.MX8 ops */
+static struct snd_sof_dsp_ops sof_imx8m_ops = {
+ /* probe and remove */
+ .probe = imx8m_probe,
+ .remove = imx8m_remove,
+ /* DSP core boot */
+ .run = imx8m_run,
+ .reset = imx8m_reset,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* ipc */
+ .send_msg = imx8m_send_msg,
+ .get_mailbox_offset = imx8m_get_mailbox_offset,
+ .get_window_offset = imx8m_get_window_offset,
+
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ .get_bar_index = imx8m_get_bar_index,
+
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* Debug information */
+ .dbg_dump = imx8_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_close = sof_stream_pcm_close,
+ /* Firmware ops */
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+
+ /* DAI drivers */
+ .drv = imx8m_dai,
+ .num_drv = ARRAY_SIZE(imx8m_dai),
+
+ .suspend = imx8m_dsp_suspend,
+ .resume = imx8m_dsp_resume,
+
+ .runtime_suspend = imx8m_dsp_runtime_suspend,
+ .runtime_resume = imx8m_dsp_runtime_resume,
+
+ .set_power_state = imx8m_dsp_set_power_state,
+
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+};
+
+static struct sof_dev_desc sof_of_imx8mp_desc = {
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "imx/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "imx/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-imx8m.ri",
+ },
+ .nocodec_tplg_filename = "sof-imx8-nocodec.tplg",
+ .ops = &sof_imx8m_ops,
+};
+
+static const struct of_device_id sof_of_imx8m_ids[] = {
+ { .compatible = "fsl,imx8mp-dsp", .data = &sof_of_imx8mp_desc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, sof_of_imx8m_ids);
+
+/* DT driver definition */
+static struct platform_driver snd_sof_of_imx8m_driver = {
+ .probe = sof_of_probe,
+ .remove = sof_of_remove,
+ .driver = {
+ .name = "sof-audio-of-imx8m",
+ .pm = &sof_of_pm,
+ .of_match_table = sof_of_imx8m_ids,
+ },
+};
+module_platform_driver(snd_sof_of_imx8m_driver);
+
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/imx/imx8ulp.c b/sound/soc/sof/imx/imx8ulp.c
new file mode 100644
index 000000000..4a562c985
--- /dev/null
+++ b/sound/soc/sof/imx/imx8ulp.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright 2021-2022 NXP
+//
+// Author: Peng Zhang <peng.zhang_8@nxp.com>
+//
+// Hardware interface for audio DSP on i.MX8ULP
+
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/firmware.h>
+#include <linux/firmware/imx/dsp.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/svc/misc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+
+#include "../ops.h"
+#include "../sof-of-dev.h"
+#include "imx-common.h"
+
+#define FSL_SIP_HIFI_XRDC 0xc200000e
+
+/* SIM Domain register */
+#define SYSCTRL0 0x8
+#define EXECUTE_BIT BIT(13)
+#define RESET_BIT BIT(16)
+#define HIFI4_CLK_BIT BIT(17)
+#define PB_CLK_BIT BIT(18)
+#define PLAT_CLK_BIT BIT(19)
+#define DEBUG_LOGIC_BIT BIT(25)
+
+#define MBOX_OFFSET 0x800000
+#define MBOX_SIZE 0x1000
+
+static struct clk_bulk_data imx8ulp_dsp_clks[] = {
+ { .id = "core" },
+ { .id = "ipg" },
+ { .id = "ocram" },
+ { .id = "mu" },
+};
+
+struct imx8ulp_priv {
+ struct device *dev;
+ struct snd_sof_dev *sdev;
+
+ /* DSP IPC handler */
+ struct imx_dsp_ipc *dsp_ipc;
+ struct platform_device *ipc_dev;
+
+ struct regmap *regmap;
+ struct imx_clocks *clks;
+};
+
+static void imx8ulp_sim_lpav_start(struct imx8ulp_priv *priv)
+{
+ /* Controls the HiFi4 DSP Reset: 1 in reset, 0 out of reset */
+ regmap_update_bits(priv->regmap, SYSCTRL0, RESET_BIT, 0);
+
+ /* Reset HiFi4 DSP Debug logic: 1 debug reset, 0 out of reset*/
+ regmap_update_bits(priv->regmap, SYSCTRL0, DEBUG_LOGIC_BIT, 0);
+
+ /* Stall HIFI4 DSP Execution: 1 stall, 0 run */
+ regmap_update_bits(priv->regmap, SYSCTRL0, EXECUTE_BIT, 0);
+}
+
+static int imx8ulp_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+
+static int imx8ulp_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+
+static void imx8ulp_dsp_handle_reply(struct imx_dsp_ipc *ipc)
+{
+ struct imx8ulp_priv *priv = imx_dsp_get_data(ipc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sdev->ipc_lock, flags);
+
+ snd_sof_ipc_process_reply(priv->sdev, 0);
+
+ spin_unlock_irqrestore(&priv->sdev->ipc_lock, flags);
+}
+
+static void imx8ulp_dsp_handle_request(struct imx_dsp_ipc *ipc)
+{
+ struct imx8ulp_priv *priv = imx_dsp_get_data(ipc);
+ u32 p; /* panic code */
+
+ /* Read the message from the debug box. */
+ sof_mailbox_read(priv->sdev, priv->sdev->debug_box.offset + 4, &p, sizeof(p));
+
+ /* Check to see if the message is a panic code (0x0dead***) */
+ if ((p & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC)
+ snd_sof_dsp_panic(priv->sdev, p, true);
+ else
+ snd_sof_ipc_msgs_rx(priv->sdev);
+}
+
+static struct imx_dsp_ops dsp_ops = {
+ .handle_reply = imx8ulp_dsp_handle_reply,
+ .handle_request = imx8ulp_dsp_handle_request,
+};
+
+static int imx8ulp_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct imx8ulp_priv *priv = sdev->pdata->hw_pdata;
+
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ imx_dsp_ring_doorbell(priv->dsp_ipc, 0);
+
+ return 0;
+}
+
+static int imx8ulp_run(struct snd_sof_dev *sdev)
+{
+ struct imx8ulp_priv *priv = sdev->pdata->hw_pdata;
+
+ imx8ulp_sim_lpav_start(priv);
+
+ return 0;
+}
+
+static int imx8ulp_reset(struct snd_sof_dev *sdev)
+{
+ struct imx8ulp_priv *priv = sdev->pdata->hw_pdata;
+ struct arm_smccc_res smc_resource;
+
+ /* HiFi4 Platform Clock Enable: 1 enabled, 0 disabled */
+ regmap_update_bits(priv->regmap, SYSCTRL0, PLAT_CLK_BIT, PLAT_CLK_BIT);
+
+ /* HiFi4 PBCLK clock enable: 1 enabled, 0 disabled */
+ regmap_update_bits(priv->regmap, SYSCTRL0, PB_CLK_BIT, PB_CLK_BIT);
+
+ /* HiFi4 Clock Enable: 1 enabled, 0 disabled */
+ regmap_update_bits(priv->regmap, SYSCTRL0, HIFI4_CLK_BIT, HIFI4_CLK_BIT);
+
+ regmap_update_bits(priv->regmap, SYSCTRL0, RESET_BIT, RESET_BIT);
+ usleep_range(1, 2);
+
+ /* Stall HIFI4 DSP Execution: 1 stall, 0 not stall */
+ regmap_update_bits(priv->regmap, SYSCTRL0, EXECUTE_BIT, EXECUTE_BIT);
+ usleep_range(1, 2);
+
+ arm_smccc_smc(FSL_SIP_HIFI_XRDC, 0, 0, 0, 0, 0, 0, 0, &smc_resource);
+
+ return 0;
+}
+
+static int imx8ulp_probe(struct snd_sof_dev *sdev)
+{
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *res_node;
+ struct resource *mmio;
+ struct imx8ulp_priv *priv;
+ struct resource res;
+ u32 base, size;
+ int ret = 0;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clks = devm_kzalloc(&pdev->dev, sizeof(*priv->clks), GFP_KERNEL);
+ if (!priv->clks)
+ return -ENOMEM;
+
+ sdev->num_cores = 1;
+ sdev->pdata->hw_pdata = priv;
+ priv->dev = sdev->dev;
+ priv->sdev = sdev;
+
+ /* System integration module(SIM) control dsp configuration */
+ priv->regmap = syscon_regmap_lookup_by_phandle(np, "fsl,dsp-ctrl");
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->ipc_dev = platform_device_register_data(sdev->dev, "imx-dsp",
+ PLATFORM_DEVID_NONE,
+ pdev, sizeof(*pdev));
+ if (IS_ERR(priv->ipc_dev))
+ return PTR_ERR(priv->ipc_dev);
+
+ priv->dsp_ipc = dev_get_drvdata(&priv->ipc_dev->dev);
+ if (!priv->dsp_ipc) {
+ /* DSP IPC driver not probed yet, try later */
+ ret = -EPROBE_DEFER;
+ dev_err(sdev->dev, "Failed to get drvdata\n");
+ goto exit_pdev_unregister;
+ }
+
+ imx_dsp_set_data(priv->dsp_ipc, priv);
+ priv->dsp_ipc->ops = &dsp_ops;
+
+ /* DSP base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get DSP base at idx 0\n");
+ ret = -EINVAL;
+ goto exit_pdev_unregister;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_IRAM] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[SOF_FW_BLK_TYPE_IRAM]) {
+ dev_err(sdev->dev, "failed to ioremap base 0x%x size 0x%x\n",
+ base, size);
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+ sdev->mmio_bar = SOF_FW_BLK_TYPE_IRAM;
+
+ res_node = of_parse_phandle(np, "memory-reserved", 0);
+ if (!res_node) {
+ dev_err(&pdev->dev, "failed to get memory region node\n");
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+
+ ret = of_address_to_resource(res_node, 0, &res);
+ of_node_put(res_node);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get reserved region address\n");
+ goto exit_pdev_unregister;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap_wc(sdev->dev, res.start,
+ resource_size(&res));
+ if (!sdev->bar[SOF_FW_BLK_TYPE_SRAM]) {
+ dev_err(sdev->dev, "failed to ioremap mem 0x%x size 0x%x\n",
+ base, size);
+ ret = -ENOMEM;
+ goto exit_pdev_unregister;
+ }
+ sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ ret = of_reserved_mem_device_init(sdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init reserved memory region %d\n", ret);
+ goto exit_pdev_unregister;
+ }
+
+ priv->clks->dsp_clks = imx8ulp_dsp_clks;
+ priv->clks->num_dsp_clks = ARRAY_SIZE(imx8ulp_dsp_clks);
+
+ ret = imx8_parse_clocks(sdev, priv->clks);
+ if (ret < 0)
+ goto exit_pdev_unregister;
+
+ ret = imx8_enable_clocks(sdev, priv->clks);
+ if (ret < 0)
+ goto exit_pdev_unregister;
+
+ return 0;
+
+exit_pdev_unregister:
+ platform_device_unregister(priv->ipc_dev);
+
+ return ret;
+}
+
+static int imx8ulp_remove(struct snd_sof_dev *sdev)
+{
+ struct imx8ulp_priv *priv = sdev->pdata->hw_pdata;
+
+ imx8_disable_clocks(sdev, priv->clks);
+ platform_device_unregister(priv->ipc_dev);
+
+ return 0;
+}
+
+/* on i.MX8 there is 1 to 1 match between type and BAR idx */
+static int imx8ulp_get_bar_index(struct snd_sof_dev *sdev, u32 type)
+{
+ return type;
+}
+
+static int imx8ulp_suspend(struct snd_sof_dev *sdev)
+{
+ int i;
+ struct imx8ulp_priv *priv = (struct imx8ulp_priv *)sdev->pdata->hw_pdata;
+
+ /*Stall DSP, release in .run() */
+ regmap_update_bits(priv->regmap, SYSCTRL0, EXECUTE_BIT, EXECUTE_BIT);
+
+ for (i = 0; i < DSP_MU_CHAN_NUM; i++)
+ imx_dsp_free_channel(priv->dsp_ipc, i);
+
+ imx8_disable_clocks(sdev, priv->clks);
+
+ return 0;
+}
+
+static int imx8ulp_resume(struct snd_sof_dev *sdev)
+{
+ struct imx8ulp_priv *priv = (struct imx8ulp_priv *)sdev->pdata->hw_pdata;
+ int i;
+
+ imx8_enable_clocks(sdev, priv->clks);
+
+ for (i = 0; i < DSP_MU_CHAN_NUM; i++)
+ imx_dsp_request_channel(priv->dsp_ipc, i);
+
+ return 0;
+}
+
+static int imx8ulp_dsp_runtime_resume(struct snd_sof_dev *sdev)
+{
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = SOF_DSP_PM_D0,
+ .substate = 0,
+ };
+
+ imx8ulp_resume(sdev);
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static int imx8ulp_dsp_runtime_suspend(struct snd_sof_dev *sdev)
+{
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = SOF_DSP_PM_D3,
+ .substate = 0,
+ };
+
+ imx8ulp_suspend(sdev);
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static int imx8ulp_dsp_suspend(struct snd_sof_dev *sdev, unsigned int target_state)
+{
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = target_state,
+ .substate = 0,
+ };
+
+ if (!pm_runtime_suspended(sdev->dev))
+ imx8ulp_suspend(sdev);
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static int imx8ulp_dsp_resume(struct snd_sof_dev *sdev)
+{
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = SOF_DSP_PM_D0,
+ .substate = 0,
+ };
+
+ imx8ulp_resume(sdev);
+
+ if (pm_runtime_suspended(sdev->dev)) {
+ pm_runtime_disable(sdev->dev);
+ pm_runtime_set_active(sdev->dev);
+ pm_runtime_mark_last_busy(sdev->dev);
+ pm_runtime_enable(sdev->dev);
+ pm_runtime_idle(sdev->dev);
+ }
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static struct snd_soc_dai_driver imx8ulp_dai[] = {
+ {
+ .name = "sai5",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ },
+ {
+ .name = "sai6",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ },
+};
+
+static int imx8ulp_dsp_set_power_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ sdev->dsp_power_state = *target_state;
+
+ return 0;
+}
+
+/* i.MX8 ops */
+static struct snd_sof_dsp_ops sof_imx8ulp_ops = {
+ /* probe and remove */
+ .probe = imx8ulp_probe,
+ .remove = imx8ulp_remove,
+ /* DSP core boot */
+ .run = imx8ulp_run,
+ .reset = imx8ulp_reset,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Module IO */
+ .read64 = sof_io_read64,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* ipc */
+ .send_msg = imx8ulp_send_msg,
+ .get_mailbox_offset = imx8ulp_get_mailbox_offset,
+ .get_window_offset = imx8ulp_get_window_offset,
+
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_close = sof_stream_pcm_close,
+
+ /* module loading */
+ .get_bar_index = imx8ulp_get_bar_index,
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* Debug information */
+ .dbg_dump = imx8_dump,
+
+ /* Firmware ops */
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+
+ /* DAI drivers */
+ .drv = imx8ulp_dai,
+ .num_drv = ARRAY_SIZE(imx8ulp_dai),
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+
+ /* PM */
+ .runtime_suspend = imx8ulp_dsp_runtime_suspend,
+ .runtime_resume = imx8ulp_dsp_runtime_resume,
+
+ .suspend = imx8ulp_dsp_suspend,
+ .resume = imx8ulp_dsp_resume,
+
+ .set_power_state = imx8ulp_dsp_set_power_state,
+};
+
+static struct sof_dev_desc sof_of_imx8ulp_desc = {
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "imx/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "imx/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-imx8ulp.ri",
+ },
+ .nocodec_tplg_filename = "sof-imx8ulp-nocodec.tplg",
+ .ops = &sof_imx8ulp_ops,
+};
+
+static const struct of_device_id sof_of_imx8ulp_ids[] = {
+ { .compatible = "fsl,imx8ulp-dsp", .data = &sof_of_imx8ulp_desc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, sof_of_imx8ulp_ids);
+
+/* DT driver definition */
+static struct platform_driver snd_sof_of_imx8ulp_driver = {
+ .probe = sof_of_probe,
+ .remove = sof_of_remove,
+ .driver = {
+ .name = "sof-audio-of-imx8ulp",
+ .pm = &sof_of_pm,
+ .of_match_table = sof_of_imx8ulp_ids,
+ },
+};
+module_platform_driver(snd_sof_of_imx8ulp_driver);
+
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
new file mode 100644
index 000000000..9d0107932
--- /dev/null
+++ b/sound/soc/sof/intel/Kconfig
@@ -0,0 +1,364 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SND_SOC_SOF_INTEL_TOPLEVEL
+ bool "SOF support for Intel audio DSPs"
+ depends on X86 || COMPILE_TEST
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+if SND_SOC_SOF_INTEL_TOPLEVEL
+
+config SND_SOC_SOF_INTEL_HIFI_EP_IPC
+ tristate
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_INTEL_ATOM_HIFI_EP
+ tristate
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_INTEL_HIFI_EP_IPC
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_INTEL_COMMON
+ tristate
+ select SND_SOC_SOF
+ select SND_SOC_ACPI_INTEL_MATCH
+ select SND_SOC_SOF_XTENSA
+ select SND_SOC_INTEL_MACH
+ select SND_SOC_ACPI if ACPI
+ select SND_INTEL_DSP_CONFIG
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+if SND_SOC_SOF_ACPI
+
+config SND_SOC_SOF_BAYTRAIL
+ tristate "SOF support for Baytrail, Braswell and Cherrytrail"
+ default SND_SOC_SOF_ACPI
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
+ select SND_SOC_SOF_ACPI_DEV
+ select IOSF_MBI if X86 && PCI
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Baytrail, Braswell or Cherrytrail processors.
+ This option can coexist in the same build with the Atom legacy
+ drivers, currently the default but which will be deprecated
+ at some point.
+ Existing firmware/topology binaries and UCM configurations
+ typically located in the root file system are already
+ compatible with both SOF or Atom/SST legacy drivers.
+ This is a recommended option for distributions.
+ Say Y if you want to enable SOF on Baytrail/Cherrytrail.
+ If unsure select "N".
+
+config SND_SOC_SOF_BROADWELL
+ tristate "SOF support for Broadwell"
+ default SND_SOC_SOF_ACPI
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_INTEL_HIFI_EP_IPC
+ select SND_SOC_SOF_ACPI_DEV
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Broadwell processors.
+ This option can coexist in the same build with the default 'catpt'
+ driver.
+ Existing firmware/topology binaries and UCM configurations typically
+ located in the root file system are already compatible with both SOF
+ or catpt drivers.
+ SOF does not fully support Broadwell and has limitations related to
+ DMA and suspend-resume, this is not a recommended option for
+ distributions.
+ Say Y if you want to enable SOF on Broadwell.
+ If unsure select "N".
+
+endif ## SND_SOC_SOF_ACPI
+
+if SND_SOC_SOF_PCI
+
+config SND_SOC_SOF_MERRIFIELD
+ tristate "SOF support for Tangier/Merrifield"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_PCI_DEV
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Tangier/Merrifield processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_INTEL_SKL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_SKYLAKE
+ tristate "SOF support for SkyLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_SKL
+ help
+ This adds support for the Intel(R) platforms using the SkyLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+ This is intended only for developers and not a recommend option for distros.
+
+config SND_SOC_SOF_KABYLAKE
+ tristate "SOF support for KabyLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_SKL
+ help
+ This adds support for the Intel(R) platforms using the KabyLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+ This is intended only for developers and not a recommend option for distros.
+
+config SND_SOC_SOF_INTEL_APL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_APOLLOLAKE
+ tristate "SOF support for Apollolake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_APL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Apollolake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_GEMINILAKE
+ tristate "SOF support for GeminiLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_APL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Geminilake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_INTEL_CNL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_CANNONLAKE
+ tristate "SOF support for Cannonlake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_CNL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Cannonlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_COFFEELAKE
+ tristate "SOF support for CoffeeLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_CNL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Coffeelake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_COMETLAKE
+ tristate "SOF support for CometLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_CNL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Cometlake processors.
+ If unsure select "N".
+
+config SND_SOC_SOF_INTEL_ICL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_ICELAKE
+ tristate "SOF support for Icelake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_ICL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Icelake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_JASPERLAKE
+ tristate "SOF support for JasperLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_ICL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the JasperLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_INTEL_TGL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_TIGERLAKE
+ tristate "SOF support for Tigerlake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_TGL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Tigerlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_ELKHARTLAKE
+ tristate "SOF support for ElkhartLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_TGL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the ElkhartLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_ALDERLAKE
+ tristate "SOF support for Alderlake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_TGL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Alderlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_INTEL_MTL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_METEORLAKE
+ tristate "SOF support for Meteorlake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_MTL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Meteorlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_INTEL_LNL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_LUNARLAKE
+ tristate "SOF support for Lunarlake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_LNL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Lunarlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_HDA_COMMON
+ tristate
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_PCI_DEV
+ select SND_INTEL_DSP_CONFIG
+ select SND_SOC_SOF_HDA_LINK_BASELINE
+ select SND_SOC_SOF_HDA_PROBES
+ select SND_SOC_SOF_HDA_MLINK if SND_SOC_SOF_HDA_LINK
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_HDA_MLINK
+ tristate
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+if SND_SOC_SOF_HDA_COMMON
+
+config SND_SOC_SOF_HDA_LINK
+ bool "SOF support for HDA Links(HDA/HDMI)"
+ help
+ This adds support for HDA links(HDA/HDMI) with Sound Open Firmware
+ for Intel(R) platforms.
+ Say Y if you want to enable HDA links with SOF.
+ If unsure select "N".
+
+config SND_SOC_SOF_HDA_AUDIO_CODEC
+ bool "SOF support for HDAudio codecs"
+ depends on SND_SOC_SOF_HDA_LINK
+ select SND_SOC_SOF_PROBE_WORK_QUEUE
+ help
+ This adds support for HDAudio codecs with Sound Open Firmware
+ for Intel(R) platforms.
+ Say Y if you want to enable HDAudio codecs with SOF.
+ If unsure select "N".
+
+endif ## SND_SOC_SOF_HDA_COMMON
+
+config SND_SOC_SOF_HDA_LINK_BASELINE
+ tristate
+ select SND_SOC_SOF_HDA if SND_SOC_SOF_HDA_LINK
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_HDA
+ tristate
+ select SND_HDA_EXT_CORE if SND_SOC_SOF_HDA_LINK
+ select SND_SOC_HDAC_HDA if SND_SOC_SOF_HDA_AUDIO_CODEC
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_HDA_PROBES
+ tristate
+ select SND_SOC_SOF_DEBUG_PROBES
+ help
+ The option enables the data probing for Intel(R) Skylake and newer
+ (HDA) platforms.
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ tristate
+ select SOUNDWIRE_INTEL if SND_SOC_SOF_INTEL_SOUNDWIRE != n
+ select SND_INTEL_SOUNDWIRE_ACPI if SND_SOC_SOF_INTEL_SOUNDWIRE != n
+
+config SND_SOC_SOF_INTEL_SOUNDWIRE
+ tristate "SOF support for SoundWire"
+ default SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ depends on SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ depends on ACPI && SOUNDWIRE
+ depends on !(SOUNDWIRE=m && SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=y)
+ help
+ This adds support for SoundWire with Sound Open Firmware
+ for Intel(R) platforms.
+ Say Y if you want to enable SoundWire links with SOF.
+ If unsure select "N".
+
+endif ## SND_SOC_SOF_PCI
+
+endif ## SND_SOC_SOF_INTEL_TOPLEVEL
diff --git a/sound/soc/sof/intel/Makefile b/sound/soc/sof/intel/Makefile
new file mode 100644
index 000000000..030574dbc
--- /dev/null
+++ b/sound/soc/sof/intel/Makefile
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+snd-sof-acpi-intel-byt-objs := byt.o
+snd-sof-acpi-intel-bdw-objs := bdw.o
+
+snd-sof-intel-hda-common-objs := hda.o hda-loader.o hda-stream.o hda-trace.o \
+ hda-dsp.o hda-ipc.o hda-ctrl.o hda-pcm.o \
+ hda-dai.o hda-dai-ops.o hda-bus.o \
+ skl.o hda-loader-skl.o \
+ apl.o cnl.o tgl.o icl.o mtl.o lnl.o hda-common-ops.o
+
+snd-sof-intel-hda-mlink-objs := hda-mlink.o
+
+snd-sof-intel-hda-common-$(CONFIG_SND_SOC_SOF_HDA_PROBES) += hda-probes.o
+
+snd-sof-intel-hda-objs := hda-codec.o
+
+snd-sof-intel-atom-objs := atom.o
+
+obj-$(CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP) += snd-sof-intel-atom.o
+obj-$(CONFIG_SND_SOC_SOF_BAYTRAIL) += snd-sof-acpi-intel-byt.o
+obj-$(CONFIG_SND_SOC_SOF_BROADWELL) += snd-sof-acpi-intel-bdw.o
+obj-$(CONFIG_SND_SOC_SOF_HDA_COMMON) += snd-sof-intel-hda-common.o
+obj-$(CONFIG_SND_SOC_SOF_HDA_MLINK) += snd-sof-intel-hda-mlink.o
+obj-$(CONFIG_SND_SOC_SOF_HDA) += snd-sof-intel-hda.o
+
+snd-sof-pci-intel-tng-objs := pci-tng.o
+snd-sof-pci-intel-skl-objs := pci-skl.o
+snd-sof-pci-intel-apl-objs := pci-apl.o
+snd-sof-pci-intel-cnl-objs := pci-cnl.o
+snd-sof-pci-intel-icl-objs := pci-icl.o
+snd-sof-pci-intel-tgl-objs := pci-tgl.o
+snd-sof-pci-intel-mtl-objs := pci-mtl.o
+snd-sof-pci-intel-lnl-objs := pci-lnl.o
+
+obj-$(CONFIG_SND_SOC_SOF_MERRIFIELD) += snd-sof-pci-intel-tng.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_SKL) += snd-sof-pci-intel-skl.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_APL) += snd-sof-pci-intel-apl.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_CNL) += snd-sof-pci-intel-cnl.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_ICL) += snd-sof-pci-intel-icl.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_TGL) += snd-sof-pci-intel-tgl.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_MTL) += snd-sof-pci-intel-mtl.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_LNL) += snd-sof-pci-intel-lnl.o
diff --git a/sound/soc/sof/intel/apl.c b/sound/soc/sof/intel/apl.c
new file mode 100644
index 000000000..e1f25a8f0
--- /dev/null
+++ b/sound/soc/sof/intel/apl.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Apollolake and GeminiLake
+ */
+
+#include <sound/sof/ext_manifest4.h>
+#include "../ipc4-priv.h"
+#include "../sof-priv.h"
+#include "hda.h"
+#include "../sof-audio.h"
+
+static const struct snd_sof_debugfs_map apl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+/* apollolake ops */
+struct snd_sof_dsp_ops sof_apl_ops;
+EXPORT_SYMBOL_NS(sof_apl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_apl_ops_init(struct snd_sof_dev *sdev)
+{
+ /* common defaults */
+ memcpy(&sof_apl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* probe/remove/shutdown */
+ sof_apl_ops.shutdown = hda_dsp_shutdown;
+
+ if (sdev->pdata->ipc_type == SOF_IPC) {
+ /* doorbell */
+ sof_apl_ops.irq_thread = hda_dsp_ipc_irq_thread;
+
+ /* ipc */
+ sof_apl_ops.send_msg = hda_dsp_ipc_send_msg;
+
+ /* debug */
+ sof_apl_ops.ipc_dump = hda_ipc_dump;
+
+ sof_apl_ops.set_power_state = hda_dsp_set_power_state_ipc3;
+ }
+
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_1_5;
+
+ /* External library loading support */
+ ipc4_data->load_library = hda_dsp_ipc4_load_library;
+
+ /* doorbell */
+ sof_apl_ops.irq_thread = hda_dsp_ipc4_irq_thread;
+
+ /* ipc */
+ sof_apl_ops.send_msg = hda_dsp_ipc4_send_msg;
+
+ /* debug */
+ sof_apl_ops.ipc_dump = hda_ipc4_dump;
+
+ sof_apl_ops.set_power_state = hda_dsp_set_power_state_ipc4;
+ }
+
+ /* set DAI driver ops */
+ hda_set_dai_drv_ops(sdev, &sof_apl_ops);
+
+ /* debug */
+ sof_apl_ops.debug_map = apl_dsp_debugfs;
+ sof_apl_ops.debug_map_count = ARRAY_SIZE(apl_dsp_debugfs);
+
+ /* firmware run */
+ sof_apl_ops.run = hda_dsp_cl_boot_firmware;
+
+ /* pre/post fw run */
+ sof_apl_ops.post_fw_run = hda_dsp_post_fw_run;
+
+ /* dsp core get/put */
+ sof_apl_ops.core_get = hda_dsp_core_get;
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_apl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc apl_chip_info = {
+ /* Apollolake */
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(1, 0),
+ .ipc_req = HDA_DSP_REG_HIPCI,
+ .ipc_req_mask = HDA_DSP_REG_HIPCI_BUSY,
+ .ipc_ack = HDA_DSP_REG_HIPCIE,
+ .ipc_ack_mask = HDA_DSP_REG_HIPCIE_DONE,
+ .ipc_ctl = HDA_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 150,
+ .ssp_count = APL_SSP_COUNT,
+ .ssp_base_offset = APL_SSP_BASE_OFFSET,
+ .d0i3_offset = SOF_HDA_VS_D0I3C,
+ .quirks = SOF_INTEL_PROCEN_FMT_QUIRK,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_1_5_PLUS,
+};
+EXPORT_SYMBOL_NS(apl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/atom.c b/sound/soc/sof/intel/atom.c
new file mode 100644
index 000000000..bd9789b48
--- /dev/null
+++ b/sound/soc/sof/intel/atom.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2021 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Atom devices
+ */
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/intel-dsp-config.h>
+#include "../ops.h"
+#include "shim.h"
+#include "atom.h"
+#include "../sof-acpi-dev.h"
+#include "../sof-audio.h"
+#include "../../intel/common/soc-intel-quirks.h"
+
+static void atom_host_done(struct snd_sof_dev *sdev);
+static void atom_dsp_done(struct snd_sof_dev *sdev);
+
+/*
+ * Debug
+ */
+
+static void atom_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read regsisters */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
+}
+
+void atom_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[STACK_DUMP_SIZE];
+ u64 status, panic, imrd, imrx;
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCD);
+ panic = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCX);
+ atom_get_registers(sdev, &xoops, &panic_info, stack,
+ STACK_DUMP_SIZE);
+ sof_print_oops_and_stack(sdev, KERN_ERR, status, panic, &xoops,
+ &panic_info, stack, STACK_DUMP_SIZE);
+
+ /* provide some context for firmware debug */
+ imrx = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IMRX);
+ imrd = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IMRD);
+ dev_err(sdev->dev,
+ "error: ipc host -> DSP: pending %s complete %s raw 0x%llx\n",
+ (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
+ (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ dev_err(sdev->dev,
+ "error: mask host: pending %s complete %s raw 0x%llx\n",
+ (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
+ (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ dev_err(sdev->dev,
+ "error: ipc DSP -> host: pending %s complete %s raw 0x%llx\n",
+ (status & SHIM_IPCD_BUSY) ? "yes" : "no",
+ (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ dev_err(sdev->dev,
+ "error: mask DSP: pending %s complete %s raw 0x%llx\n",
+ (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
+ (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+
+}
+EXPORT_SYMBOL_NS(atom_dump, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+/*
+ * IPC Doorbell IRQ handler and thread.
+ */
+
+irqreturn_t atom_irq_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u64 ipcx, ipcd;
+ int ret = IRQ_NONE;
+
+ ipcx = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCX);
+ ipcd = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCD);
+
+ if (ipcx & SHIM_BYT_IPCX_DONE) {
+
+ /* reply message from DSP, Mask Done interrupt first */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR,
+ SHIM_IMRX,
+ SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (ipcd & SHIM_BYT_IPCD_BUSY) {
+
+ /* new message from DSP, Mask Busy interrupt first */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR,
+ SHIM_IMRX,
+ SHIM_IMRX_BUSY,
+ SHIM_IMRX_BUSY);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_NS(atom_irq_handler, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+irqreturn_t atom_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u64 ipcx, ipcd;
+
+ ipcx = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCX);
+ ipcd = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCD);
+
+ /* reply message from DSP */
+ if (ipcx & SHIM_BYT_IPCX_DONE) {
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /*
+ * handle immediate reply from DSP core. If the msg is
+ * found, set done bit in cmd_done which is called at the
+ * end of message processing function, else set it here
+ * because the done bit can't be set in cmd_done function
+ * which is triggered by msg
+ */
+ snd_sof_ipc_process_reply(sdev, ipcx);
+
+ atom_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ }
+
+ /* new message from DSP */
+ if (ipcd & SHIM_BYT_IPCD_BUSY) {
+
+ /* Handle messages from DSP Core */
+ if ((ipcd & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, PANIC_OFFSET(ipcd) + MBOX_OFFSET,
+ true);
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ atom_host_done(sdev);
+ }
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_NS(atom_irq_thread, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+int atom_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ /* unmask and prepare to receive Done interrupt */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_DONE, 0);
+
+ /* send the message */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write64(sdev, DSP_BAR, SHIM_IPCX, SHIM_BYT_IPCX_BUSY);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(atom_send_msg, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+int atom_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+EXPORT_SYMBOL_NS(atom_get_mailbox_offset, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+int atom_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+EXPORT_SYMBOL_NS(atom_get_window_offset, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+static void atom_host_done(struct snd_sof_dev *sdev)
+{
+ /* clear BUSY bit and set DONE bit - accept new messages */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR, SHIM_IPCD,
+ SHIM_BYT_IPCD_BUSY |
+ SHIM_BYT_IPCD_DONE,
+ SHIM_BYT_IPCD_DONE);
+
+ /* unmask and prepare to receive next new message */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY, 0);
+}
+
+static void atom_dsp_done(struct snd_sof_dev *sdev)
+{
+ /* clear DONE bit - tell DSP we have completed */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR, SHIM_IPCX,
+ SHIM_BYT_IPCX_DONE, 0);
+}
+
+/*
+ * DSP control.
+ */
+
+int atom_run(struct snd_sof_dev *sdev)
+{
+ int tries = 10;
+
+ /* release stall and wait to unstall */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_STALL, 0x0);
+ while (tries--) {
+ if (!(snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_CSR) &
+ SHIM_BYT_CSR_PWAITMODE))
+ break;
+ msleep(100);
+ }
+ if (tries < 0)
+ return -ENODEV;
+
+ /* return init core mask */
+ return 1;
+}
+EXPORT_SYMBOL_NS(atom_run, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+int atom_reset(struct snd_sof_dev *sdev)
+{
+ /* put DSP into reset, set reset vector and stall */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL |
+ SHIM_BYT_CSR_STALL,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL |
+ SHIM_BYT_CSR_STALL);
+
+ usleep_range(10, 15);
+
+ /* take DSP out of reset and keep stalled for FW loading */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(atom_reset, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
+ const char *sof_tplg_filename,
+ const char *ssp_str)
+{
+ const char *tplg_filename = NULL;
+ const char *split_ext;
+ char *filename, *tmp;
+
+ filename = kstrdup(sof_tplg_filename, GFP_KERNEL);
+ if (!filename)
+ return NULL;
+
+ /* this assumes a .tplg extension */
+ tmp = filename;
+ split_ext = strsep(&tmp, ".");
+ if (split_ext)
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s-%s.tplg",
+ split_ext, ssp_str);
+ kfree(filename);
+
+ return tplg_filename;
+}
+
+struct snd_soc_acpi_mach *atom_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach;
+ struct platform_device *pdev;
+ const char *tplg_filename;
+
+ mach = snd_soc_acpi_find_machine(desc->machines);
+ if (!mach) {
+ dev_warn(sdev->dev, "warning: No matching ASoC machine driver found\n");
+ return NULL;
+ }
+
+ pdev = to_platform_device(sdev->dev);
+ if (soc_intel_is_byt_cr(pdev)) {
+ dev_dbg(sdev->dev,
+ "BYT-CR detected, SSP0 used instead of SSP2\n");
+
+ tplg_filename = fixup_tplg_name(sdev,
+ mach->sof_tplg_filename,
+ "ssp0");
+ } else {
+ tplg_filename = mach->sof_tplg_filename;
+ }
+
+ if (!tplg_filename) {
+ dev_dbg(sdev->dev,
+ "error: no topology filename\n");
+ return NULL;
+ }
+
+ sof_pdata->tplg_filename = tplg_filename;
+ mach->mach_params.acpi_ipc_irq_index = desc->irqindex_host_ipc;
+
+ return mach;
+}
+EXPORT_SYMBOL_NS(atom_machine_select, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+/* Atom DAIs */
+struct snd_soc_dai_driver atom_dai[] = {
+{
+ .name = "ssp0-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp1-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp2-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ }
+},
+{
+ .name = "ssp3-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp4-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp5-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+};
+EXPORT_SYMBOL_NS(atom_dai, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+void atom_set_mach_params(struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct snd_soc_acpi_mach_params *mach_params;
+
+ mach_params = &mach->mach_params;
+ mach_params->platform = dev_name(sdev->dev);
+ mach_params->num_dai_drivers = desc->ops->num_drv;
+ mach_params->dai_drivers = desc->ops->drv;
+}
+EXPORT_SYMBOL_NS(atom_set_mach_params, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/atom.h b/sound/soc/sof/intel/atom.h
new file mode 100644
index 000000000..b965e5e08
--- /dev/null
+++ b/sound/soc/sof/intel/atom.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2017-2021 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_ATOM_H
+#define __SOF_INTEL_ATOM_H
+
+/* DSP memories */
+#define IRAM_OFFSET 0x0C0000
+#define IRAM_SIZE (80 * 1024)
+#define DRAM_OFFSET 0x100000
+#define DRAM_SIZE (160 * 1024)
+#define SHIM_OFFSET 0x140000
+#define SHIM_SIZE_BYT 0x100
+#define SHIM_SIZE_CHT 0x118
+#define MBOX_OFFSET 0x144000
+#define MBOX_SIZE 0x1000
+#define EXCEPT_OFFSET 0x800
+#define EXCEPT_MAX_HDR_SIZE 0x400
+
+/* DSP peripherals */
+#define DMAC0_OFFSET 0x098000
+#define DMAC1_OFFSET 0x09c000
+#define DMAC2_OFFSET 0x094000
+#define DMAC_SIZE 0x420
+#define SSP0_OFFSET 0x0a0000
+#define SSP1_OFFSET 0x0a1000
+#define SSP2_OFFSET 0x0a2000
+#define SSP3_OFFSET 0x0a4000
+#define SSP4_OFFSET 0x0a5000
+#define SSP5_OFFSET 0x0a6000
+#define SSP_SIZE 0x100
+
+#define STACK_DUMP_SIZE 32
+
+#define PCI_BAR_SIZE 0x200000
+
+#define PANIC_OFFSET(x) (((x) & GENMASK_ULL(47, 32)) >> 32)
+
+/*
+ * Debug
+ */
+
+#define MBOX_DUMP_SIZE 0x30
+
+/* BARs */
+#define DSP_BAR 0
+#define PCI_BAR 1
+#define IMR_BAR 2
+
+irqreturn_t atom_irq_handler(int irq, void *context);
+irqreturn_t atom_irq_thread(int irq, void *context);
+
+int atom_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
+int atom_get_mailbox_offset(struct snd_sof_dev *sdev);
+int atom_get_window_offset(struct snd_sof_dev *sdev, u32 id);
+
+int atom_run(struct snd_sof_dev *sdev);
+int atom_reset(struct snd_sof_dev *sdev);
+void atom_dump(struct snd_sof_dev *sdev, u32 flags);
+
+struct snd_soc_acpi_mach *atom_machine_select(struct snd_sof_dev *sdev);
+void atom_set_mach_params(struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev);
+
+extern struct snd_soc_dai_driver atom_dai[];
+
+#endif
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
new file mode 100644
index 000000000..812a49b1d
--- /dev/null
+++ b/sound/soc/sof/intel/bdw.c
@@ -0,0 +1,699 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Broadwell
+ */
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/intel-dsp-config.h>
+#include "../ops.h"
+#include "shim.h"
+#include "../sof-acpi-dev.h"
+#include "../sof-audio.h"
+
+/* BARs */
+#define BDW_DSP_BAR 0
+#define BDW_PCI_BAR 1
+
+/*
+ * Debug
+ */
+
+/* DSP memories for BDW */
+#define IRAM_OFFSET 0xA0000
+#define BDW_IRAM_SIZE (10 * 32 * 1024)
+#define DRAM_OFFSET 0x00000
+#define BDW_DRAM_SIZE (20 * 32 * 1024)
+#define SHIM_OFFSET 0xFB000
+#define SHIM_SIZE 0x100
+#define MBOX_OFFSET 0x9E000
+#define MBOX_SIZE 0x1000
+#define MBOX_DUMP_SIZE 0x30
+#define EXCEPT_OFFSET 0x800
+#define EXCEPT_MAX_HDR_SIZE 0x400
+
+/* DSP peripherals */
+#define DMAC0_OFFSET 0xFE000
+#define DMAC1_OFFSET 0xFF000
+#define DMAC_SIZE 0x420
+#define SSP0_OFFSET 0xFC000
+#define SSP1_OFFSET 0xFD000
+#define SSP_SIZE 0x100
+
+#define BDW_STACK_DUMP_SIZE 32
+
+#define BDW_PANIC_OFFSET(x) ((x) & 0xFFFF)
+
+static const struct snd_sof_debugfs_map bdw_debugfs[] = {
+ {"dmac0", BDW_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", BDW_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", BDW_DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", BDW_DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", BDW_DSP_BAR, IRAM_OFFSET, BDW_IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", BDW_DSP_BAR, DRAM_OFFSET, BDW_DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", BDW_DSP_BAR, SHIM_OFFSET, SHIM_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void bdw_host_done(struct snd_sof_dev *sdev);
+static void bdw_dsp_done(struct snd_sof_dev *sdev);
+
+/*
+ * DSP Control.
+ */
+
+static int bdw_run(struct snd_sof_dev *sdev)
+{
+ /* set opportunistic mode on engine 0,1 for all channels */
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_HMDC,
+ SHIM_HMDC_HDDA_E0_ALLCH |
+ SHIM_HMDC_HDDA_E1_ALLCH, 0);
+
+ /* set DSP to RUN */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_STALL, 0x0);
+
+ /* return init core mask */
+ return 1;
+}
+
+static int bdw_reset(struct snd_sof_dev *sdev)
+{
+ /* put DSP into reset and stall */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_RST | SHIM_CSR_STALL,
+ SHIM_CSR_RST | SHIM_CSR_STALL);
+
+ /* keep in reset for 10ms */
+ mdelay(10);
+
+ /* take DSP out of reset and keep stalled for FW loading */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_RST | SHIM_CSR_STALL,
+ SHIM_CSR_STALL);
+
+ return 0;
+}
+
+static int bdw_set_dsp_D0(struct snd_sof_dev *sdev)
+{
+ int tries = 10;
+ u32 reg;
+
+ /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2,
+ PCI_VDRTCL2_DCLCGE |
+ PCI_VDRTCL2_DTCGE, 0);
+
+ /* Disable D3PG (VDRTCTL0.D3PGD = 1) */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL0,
+ PCI_VDRTCL0_D3PGD, PCI_VDRTCL0_D3PGD);
+
+ /* Set D0 state */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_PMCS,
+ PCI_PMCS_PS_MASK, 0);
+
+ /* check that ADSP shim is enabled */
+ while (tries--) {
+ reg = readl(sdev->bar[BDW_PCI_BAR] + PCI_PMCS)
+ & PCI_PMCS_PS_MASK;
+ if (reg == 0)
+ goto finish;
+
+ msleep(20);
+ }
+
+ return -ENODEV;
+
+finish:
+ /*
+ * select SSP1 19.2MHz base clock, SSP clock 0,
+ * turn off Low Power Clock
+ */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_S1IOCS | SHIM_CSR_SBCS1 |
+ SHIM_CSR_LPCS, 0x0);
+
+ /* stall DSP core, set clk to 192/96Mhz */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR,
+ SHIM_CSR, SHIM_CSR_STALL |
+ SHIM_CSR_DCS_MASK,
+ SHIM_CSR_STALL |
+ SHIM_CSR_DCS(4));
+
+ /* Set 24MHz MCLK, prevent local clock gating, enable SSP0 clock */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CLKCTL,
+ SHIM_CLKCTL_MASK |
+ SHIM_CLKCTL_DCPLCG |
+ SHIM_CLKCTL_SCOE0,
+ SHIM_CLKCTL_MASK |
+ SHIM_CLKCTL_DCPLCG |
+ SHIM_CLKCTL_SCOE0);
+
+ /* Stall and reset core, set CSR */
+ bdw_reset(sdev);
+
+ /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2,
+ PCI_VDRTCL2_DCLCGE |
+ PCI_VDRTCL2_DTCGE,
+ PCI_VDRTCL2_DCLCGE |
+ PCI_VDRTCL2_DTCGE);
+
+ usleep_range(50, 55);
+
+ /* switch on audio PLL */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2,
+ PCI_VDRTCL2_APLLSE_MASK, 0);
+
+ /*
+ * set default power gating control, enable power gating control for
+ * all blocks. that is, can't be accessed, please enable each block
+ * before accessing.
+ */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL0,
+ 0xfffffffC, 0x0);
+
+ /* disable DMA finish function for SSP0 & SSP1 */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR2,
+ SHIM_CSR2_SDFD_SSP1,
+ SHIM_CSR2_SDFD_SSP1);
+
+ /* set on-demond mode on engine 0,1 for all channels */
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_HMDC,
+ SHIM_HMDC_HDDA_E0_ALLCH |
+ SHIM_HMDC_HDDA_E1_ALLCH,
+ SHIM_HMDC_HDDA_E0_ALLCH |
+ SHIM_HMDC_HDDA_E1_ALLCH);
+
+ /* Enable Interrupt from both sides */
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_IMRX,
+ (SHIM_IMRX_BUSY | SHIM_IMRX_DONE), 0x0);
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_IMRD,
+ (SHIM_IMRD_DONE | SHIM_IMRD_BUSY |
+ SHIM_IMRD_SSP0 | SHIM_IMRD_DMAC), 0x0);
+
+ /* clear IPC registers */
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCX, 0x0);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCD, 0x0);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, 0x80, 0x6);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, 0xe0, 0x300a);
+
+ return 0;
+}
+
+static void bdw_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read registers */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
+}
+
+static void bdw_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[BDW_STACK_DUMP_SIZE];
+ u32 status, panic, imrx, imrd;
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD);
+ panic = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCX);
+ bdw_get_registers(sdev, &xoops, &panic_info, stack,
+ BDW_STACK_DUMP_SIZE);
+ sof_print_oops_and_stack(sdev, KERN_ERR, status, panic, &xoops,
+ &panic_info, stack, BDW_STACK_DUMP_SIZE);
+
+ /* provide some context for firmware debug */
+ imrx = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IMRX);
+ imrd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IMRD);
+ dev_err(sdev->dev,
+ "error: ipc host -> DSP: pending %s complete %s raw 0x%8.8x\n",
+ (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
+ (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ dev_err(sdev->dev,
+ "error: mask host: pending %s complete %s raw 0x%8.8x\n",
+ (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
+ (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ dev_err(sdev->dev,
+ "error: ipc DSP -> host: pending %s complete %s raw 0x%8.8x\n",
+ (status & SHIM_IPCD_BUSY) ? "yes" : "no",
+ (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ dev_err(sdev->dev,
+ "error: mask DSP: pending %s complete %s raw 0x%8.8x\n",
+ (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
+ (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+}
+
+/*
+ * IPC Doorbell IRQ handler and thread.
+ */
+
+static irqreturn_t bdw_irq_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 isr;
+ int ret = IRQ_NONE;
+
+ /* Interrupt arrived, check src */
+ isr = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_ISRX);
+ if (isr & (SHIM_ISRX_DONE | SHIM_ISRX_BUSY))
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+static irqreturn_t bdw_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 ipcx, ipcd, imrx;
+
+ imrx = snd_sof_dsp_read64(sdev, BDW_DSP_BAR, SHIM_IMRX);
+ ipcx = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCX);
+
+ /* reply message from DSP */
+ if (ipcx & SHIM_IPCX_DONE &&
+ !(imrx & SHIM_IMRX_DONE)) {
+ /* Mask Done interrupt before return */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR,
+ SHIM_IMRX, SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /*
+ * handle immediate reply from DSP core. If the msg is
+ * found, set done bit in cmd_done which is called at the
+ * end of message processing function, else set it here
+ * because the done bit can't be set in cmd_done function
+ * which is triggered by msg
+ */
+ snd_sof_ipc_process_reply(sdev, ipcx);
+
+ bdw_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ }
+
+ ipcd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD);
+
+ /* new message from DSP */
+ if (ipcd & SHIM_IPCD_BUSY &&
+ !(imrx & SHIM_IMRX_BUSY)) {
+ /* Mask Busy interrupt before return */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR,
+ SHIM_IMRX, SHIM_IMRX_BUSY,
+ SHIM_IMRX_BUSY);
+
+ /* Handle messages from DSP Core */
+ if ((ipcd & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, BDW_PANIC_OFFSET(ipcx) + MBOX_OFFSET,
+ true);
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ bdw_host_done(sdev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * IPC Mailbox IO
+ */
+
+static int bdw_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ /* send the message */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCX, SHIM_IPCX_BUSY);
+
+ return 0;
+}
+
+static int bdw_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+
+static int bdw_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+
+static void bdw_host_done(struct snd_sof_dev *sdev)
+{
+ /* clear BUSY bit and set DONE bit - accept new messages */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IPCD,
+ SHIM_IPCD_BUSY | SHIM_IPCD_DONE,
+ SHIM_IPCD_DONE);
+
+ /* unmask busy interrupt */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY, 0);
+}
+
+static void bdw_dsp_done(struct snd_sof_dev *sdev)
+{
+ /* clear DONE bit - tell DSP we have completed */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IPCX,
+ SHIM_IPCX_DONE, 0);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_DONE, 0);
+}
+
+/*
+ * Probe and remove.
+ */
+static int bdw_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ const struct sof_intel_dsp_desc *chip;
+ struct resource *mmio;
+ u32 base, size;
+ int ret;
+
+ chip = get_chip_info(sdev->pdata);
+ if (!chip) {
+ dev_err(sdev->dev, "error: no such device supported\n");
+ return -EIO;
+ }
+
+ sdev->num_cores = chip->cores_num;
+
+ /* LPE base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_lpe_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get LPE base at idx %d\n",
+ desc->resindex_lpe_base);
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[BDW_DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BDW_DSP_BAR]) {
+ dev_err(sdev->dev,
+ "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[BDW_DSP_BAR]);
+
+ /* TODO: add offsets */
+ sdev->mmio_bar = BDW_DSP_BAR;
+ sdev->mailbox_bar = BDW_DSP_BAR;
+ sdev->dsp_oops_offset = MBOX_OFFSET;
+
+ /* PCI base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_pcicfg_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get PCI base at idx %d\n",
+ desc->resindex_pcicfg_base);
+ return -ENODEV;
+ }
+
+ dev_dbg(sdev->dev, "PCI base at 0x%x size 0x%x", base, size);
+ sdev->bar[BDW_PCI_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BDW_PCI_BAR]) {
+ dev_err(sdev->dev,
+ "error: failed to ioremap PCI base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "PCI VADDR %p\n", sdev->bar[BDW_PCI_BAR]);
+
+ /* register our IRQ */
+ sdev->ipc_irq = platform_get_irq(pdev, desc->irqindex_host_ipc);
+ if (sdev->ipc_irq < 0)
+ return sdev->ipc_irq;
+
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ bdw_irq_handler, bdw_irq_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable the DSP SHIM */
+ ret = bdw_set_dsp_D0(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DSP D0\n");
+ return ret;
+ }
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(sdev->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return ret;
+}
+
+static struct snd_soc_acpi_mach *bdw_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach;
+
+ mach = snd_soc_acpi_find_machine(desc->machines);
+ if (!mach) {
+ dev_warn(sdev->dev, "warning: No matching ASoC machine driver found\n");
+ return NULL;
+ }
+
+ sof_pdata->tplg_filename = mach->sof_tplg_filename;
+ mach->mach_params.acpi_ipc_irq_index = desc->irqindex_host_ipc;
+
+ return mach;
+}
+
+static void bdw_set_mach_params(struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct snd_soc_acpi_mach_params *mach_params;
+
+ mach_params = &mach->mach_params;
+ mach_params->platform = dev_name(sdev->dev);
+ mach_params->num_dai_drivers = desc->ops->num_drv;
+ mach_params->dai_drivers = desc->ops->drv;
+}
+
+/* Broadwell DAIs */
+static struct snd_soc_dai_driver bdw_dai[] = {
+{
+ .name = "ssp0-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp1-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+};
+
+/* broadwell ops */
+static struct snd_sof_dsp_ops sof_bdw_ops = {
+ /*Device init */
+ .probe = bdw_probe,
+
+ /* DSP Core Control */
+ .run = bdw_run,
+ .reset = bdw_reset,
+
+ /* Register IO uses direct mmio */
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* ipc */
+ .send_msg = bdw_send_msg,
+ .get_mailbox_offset = bdw_get_mailbox_offset,
+ .get_window_offset = bdw_get_window_offset,
+
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ /* machine driver */
+ .machine_select = bdw_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = bdw_set_mach_params,
+
+ /* debug */
+ .debug_map = bdw_debugfs,
+ .debug_map_count = ARRAY_SIZE(bdw_debugfs),
+ .dbg_dump = bdw_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_close = sof_stream_pcm_close,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = bdw_dai,
+ .num_drv = ARRAY_SIZE(bdw_dai),
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+};
+
+static const struct sof_intel_dsp_desc bdw_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+ .hw_ip_version = SOF_INTEL_BROADWELL,
+};
+
+static const struct sof_dev_desc sof_acpi_broadwell_desc = {
+ .machines = snd_soc_acpi_intel_broadwell_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = 0,
+ .chip_info = &bdw_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-bdw.ri",
+ },
+ .nocodec_tplg_filename = "sof-bdw-nocodec.tplg",
+ .ops = &sof_bdw_ops,
+};
+
+static const struct acpi_device_id sof_broadwell_match[] = {
+ { "INT3438", (unsigned long)&sof_acpi_broadwell_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sof_broadwell_match);
+
+static int sof_broadwell_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct acpi_device_id *id;
+ const struct sof_dev_desc *desc;
+ int ret;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return -ENODEV;
+
+ ret = snd_intel_acpi_dsp_driver_probe(dev, id->id);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+ dev_dbg(dev, "SOF ACPI driver not selected, aborting probe\n");
+ return -ENODEV;
+ }
+
+ desc = (const struct sof_dev_desc *)id->driver_data;
+ return sof_acpi_probe(pdev, desc);
+}
+
+/* acpi_driver definition */
+static struct platform_driver snd_sof_acpi_intel_bdw_driver = {
+ .probe = sof_broadwell_probe,
+ .remove = sof_acpi_remove,
+ .driver = {
+ .name = "sof-audio-acpi-intel-bdw",
+ .pm = &sof_acpi_pm,
+ .acpi_match_table = sof_broadwell_match,
+ },
+};
+module_platform_driver(snd_sof_acpi_intel_bdw_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_IMPORT_NS(SND_SOC_SOF_ACPI_DEV);
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
new file mode 100644
index 000000000..faf223b38
--- /dev/null
+++ b/sound/soc/sof/intel/byt.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Baytrail, Braswell and Cherrytrail.
+ */
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/intel-dsp-config.h>
+#include "../ops.h"
+#include "atom.h"
+#include "shim.h"
+#include "../sof-acpi-dev.h"
+#include "../sof-audio.h"
+#include "../../intel/common/soc-intel-quirks.h"
+
+static const struct snd_sof_debugfs_map byt_debugfs[] = {
+ {"dmac0", DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", DSP_BAR, SHIM_OFFSET, SHIM_SIZE_BYT,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static const struct snd_sof_debugfs_map cht_debugfs[] = {
+ {"dmac0", DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac2", DSP_BAR, DMAC2_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp3", DSP_BAR, SSP3_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp4", DSP_BAR, SSP4_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp5", DSP_BAR, SSP5_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", DSP_BAR, SHIM_OFFSET, SHIM_SIZE_CHT,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void byt_reset_dsp_disable_int(struct snd_sof_dev *sdev)
+{
+ /* Disable Interrupt from both sides */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRX, 0x3, 0x3);
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRD, 0x3, 0x3);
+
+ /* Put DSP into reset, set reset vector */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL);
+}
+
+static int byt_suspend(struct snd_sof_dev *sdev, u32 target_state)
+{
+ byt_reset_dsp_disable_int(sdev);
+
+ return 0;
+}
+
+static int byt_resume(struct snd_sof_dev *sdev)
+{
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ return 0;
+}
+
+static int byt_remove(struct snd_sof_dev *sdev)
+{
+ byt_reset_dsp_disable_int(sdev);
+
+ return 0;
+}
+
+static int byt_acpi_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ const struct sof_intel_dsp_desc *chip;
+ struct resource *mmio;
+ u32 base, size;
+ int ret;
+
+ chip = get_chip_info(sdev->pdata);
+ if (!chip) {
+ dev_err(sdev->dev, "error: no such device supported\n");
+ return -EIO;
+ }
+
+ sdev->num_cores = chip->cores_num;
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(sdev->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* LPE base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_lpe_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get LPE base at idx %d\n",
+ desc->resindex_lpe_base);
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[DSP_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[DSP_BAR]);
+
+ /* TODO: add offsets */
+ sdev->mmio_bar = DSP_BAR;
+ sdev->mailbox_bar = DSP_BAR;
+
+ /* IMR base - optional */
+ if (desc->resindex_imr_base == -1)
+ goto irq;
+
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_imr_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get IMR base at idx %d\n",
+ desc->resindex_imr_base);
+ return -ENODEV;
+ }
+
+ /* some BIOSes don't map IMR */
+ if (base == 0x55aa55aa || base == 0x0) {
+ dev_info(sdev->dev, "IMR not set by BIOS. Ignoring\n");
+ goto irq;
+ }
+
+ dev_dbg(sdev->dev, "IMR base at 0x%x size 0x%x", base, size);
+ sdev->bar[IMR_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[IMR_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap IMR base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "IMR VADDR %p\n", sdev->bar[IMR_BAR]);
+
+irq:
+ /* register our IRQ */
+ sdev->ipc_irq = platform_get_irq(pdev, desc->irqindex_host_ipc);
+ if (sdev->ipc_irq < 0)
+ return sdev->ipc_irq;
+
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ atom_irq_handler, atom_irq_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return ret;
+}
+
+/* baytrail ops */
+static struct snd_sof_dsp_ops sof_byt_ops = {
+ /* device init */
+ .probe = byt_acpi_probe,
+ .remove = byt_remove,
+
+ /* DSP core boot / reset */
+ .run = atom_run,
+ .reset = atom_reset,
+
+ /* Register IO uses direct mmio */
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* doorbell */
+ .irq_handler = atom_irq_handler,
+ .irq_thread = atom_irq_thread,
+
+ /* ipc */
+ .send_msg = atom_send_msg,
+ .get_mailbox_offset = atom_get_mailbox_offset,
+ .get_window_offset = atom_get_window_offset,
+
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ /* machine driver */
+ .machine_select = atom_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = atom_set_mach_params,
+
+ /* debug */
+ .debug_map = byt_debugfs,
+ .debug_map_count = ARRAY_SIZE(byt_debugfs),
+ .dbg_dump = atom_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_close = sof_stream_pcm_close,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* PM */
+ .suspend = byt_suspend,
+ .resume = byt_resume,
+
+ /* DAI drivers */
+ .drv = atom_dai,
+ .num_drv = 3, /* we have only 3 SSPs on byt*/
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+};
+
+static const struct sof_intel_dsp_desc byt_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+ .hw_ip_version = SOF_INTEL_BAYTRAIL,
+};
+
+/* cherrytrail and braswell ops */
+static struct snd_sof_dsp_ops sof_cht_ops = {
+ /* device init */
+ .probe = byt_acpi_probe,
+ .remove = byt_remove,
+
+ /* DSP core boot / reset */
+ .run = atom_run,
+ .reset = atom_reset,
+
+ /* Register IO uses direct mmio */
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* doorbell */
+ .irq_handler = atom_irq_handler,
+ .irq_thread = atom_irq_thread,
+
+ /* ipc */
+ .send_msg = atom_send_msg,
+ .get_mailbox_offset = atom_get_mailbox_offset,
+ .get_window_offset = atom_get_window_offset,
+
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ /* machine driver */
+ .machine_select = atom_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = atom_set_mach_params,
+
+ /* debug */
+ .debug_map = cht_debugfs,
+ .debug_map_count = ARRAY_SIZE(cht_debugfs),
+ .dbg_dump = atom_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_close = sof_stream_pcm_close,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* PM */
+ .suspend = byt_suspend,
+ .resume = byt_resume,
+
+ /* DAI drivers */
+ .drv = atom_dai,
+ /* all 6 SSPs may be available for cherrytrail */
+ .num_drv = 6,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+};
+
+static const struct sof_intel_dsp_desc cht_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+ .hw_ip_version = SOF_INTEL_BAYTRAIL,
+};
+
+/* BYTCR uses different IRQ index */
+static const struct sof_dev_desc sof_acpi_baytrailcr_desc = {
+ .machines = snd_soc_acpi_intel_baytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = 2,
+ .irqindex_host_ipc = 0,
+ .chip_info = &byt_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-byt.ri",
+ },
+ .nocodec_tplg_filename = "sof-byt-nocodec.tplg",
+ .ops = &sof_byt_ops,
+};
+
+static const struct sof_dev_desc sof_acpi_baytrail_desc = {
+ .machines = snd_soc_acpi_intel_baytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = 2,
+ .irqindex_host_ipc = 5,
+ .chip_info = &byt_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-byt.ri",
+ },
+ .nocodec_tplg_filename = "sof-byt-nocodec.tplg",
+ .ops = &sof_byt_ops,
+};
+
+static const struct sof_dev_desc sof_acpi_cherrytrail_desc = {
+ .machines = snd_soc_acpi_intel_cherrytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = 2,
+ .irqindex_host_ipc = 5,
+ .chip_info = &cht_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-cht.ri",
+ },
+ .nocodec_tplg_filename = "sof-cht-nocodec.tplg",
+ .ops = &sof_cht_ops,
+};
+
+static const struct acpi_device_id sof_baytrail_match[] = {
+ { "80860F28", (unsigned long)&sof_acpi_baytrail_desc },
+ { "808622A8", (unsigned long)&sof_acpi_cherrytrail_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sof_baytrail_match);
+
+static int sof_baytrail_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct sof_dev_desc *desc;
+ const struct acpi_device_id *id;
+ int ret;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return -ENODEV;
+
+ ret = snd_intel_acpi_dsp_driver_probe(dev, id->id);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+ dev_dbg(dev, "SOF ACPI driver not selected, aborting probe\n");
+ return -ENODEV;
+ }
+
+ desc = (const struct sof_dev_desc *)id->driver_data;
+ if (desc == &sof_acpi_baytrail_desc && soc_intel_is_byt_cr(pdev))
+ desc = &sof_acpi_baytrailcr_desc;
+
+ return sof_acpi_probe(pdev, desc);
+}
+
+/* acpi_driver definition */
+static struct platform_driver snd_sof_acpi_intel_byt_driver = {
+ .probe = sof_baytrail_probe,
+ .remove = sof_acpi_remove,
+ .driver = {
+ .name = "sof-audio-acpi-intel-byt",
+ .pm = &sof_acpi_pm,
+ .acpi_match_table = sof_baytrail_match,
+ },
+};
+module_platform_driver(snd_sof_acpi_intel_byt_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_IMPORT_NS(SND_SOC_SOF_ACPI_DEV);
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
new file mode 100644
index 000000000..c6fbf4285
--- /dev/null
+++ b/sound/soc/sof/intel/cnl.c
@@ -0,0 +1,512 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Cannonlake.
+ */
+
+#include <sound/sof/ext_manifest4.h>
+#include <sound/sof/ipc4/header.h>
+#include <trace/events/sof_intel.h>
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+#include "../sof-audio.h"
+
+static const struct snd_sof_debugfs_map cnl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void cnl_ipc_host_done(struct snd_sof_dev *sdev);
+static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev);
+
+irqreturn_t cnl_ipc4_irq_thread(int irq, void *context)
+{
+ struct sof_ipc4_msg notification_data = {{ 0 }};
+ struct snd_sof_dev *sdev = context;
+ bool ack_received = false;
+ bool ipc_irq = false;
+ u32 hipcida, hipctdr;
+
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
+ if (hipcida & CNL_DSP_REG_HIPCIDA_DONE) {
+ /* DSP received the message */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCCTL,
+ CNL_DSP_REG_HIPCCTL_DONE, 0);
+ cnl_ipc_dsp_done(sdev);
+
+ ipc_irq = true;
+ ack_received = true;
+ }
+
+ if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
+ /* Message from DSP (reply or notification) */
+ u32 hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCTDD);
+ u32 primary = hipctdr & CNL_DSP_REG_HIPCTDR_MSG_MASK;
+ u32 extension = hipctdd & CNL_DSP_REG_HIPCTDD_MSG_MASK;
+
+ if (primary & SOF_IPC4_MSG_DIR_MASK) {
+ /* Reply received */
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
+
+ data->primary = primary;
+ data->extension = extension;
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ snd_sof_ipc_get_reply(sdev);
+ cnl_ipc_host_done(sdev);
+ snd_sof_ipc_reply(sdev, data->primary);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev,
+ "IPC reply before FW_READY: %#x|%#x\n",
+ primary, extension);
+ }
+ } else {
+ /* Notification received */
+ notification_data.primary = primary;
+ notification_data.extension = extension;
+
+ sdev->ipc->msg.rx_data = &notification_data;
+ snd_sof_ipc_msgs_rx(sdev);
+ sdev->ipc->msg.rx_data = NULL;
+
+ /* Let DSP know that we have finished processing the message */
+ cnl_ipc_host_done(sdev);
+ }
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq)
+ /* This interrupt is not shared so no need to return IRQ_NONE. */
+ dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
+
+ if (ack_received) {
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ if (hdev->delayed_ipc_tx_msg)
+ cnl_ipc4_send_msg(sdev, hdev->delayed_ipc_tx_msg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 hipci;
+ u32 hipcida;
+ u32 hipctdr;
+ u32 hipctdd;
+ u32 msg;
+ u32 msg_ext;
+ bool ipc_irq = false;
+
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
+ hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDD);
+ hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR);
+
+ /* reply message from DSP */
+ if (hipcida & CNL_DSP_REG_HIPCIDA_DONE) {
+ msg_ext = hipci & CNL_DSP_REG_HIPCIDR_MSG_MASK;
+ msg = hipcida & CNL_DSP_REG_HIPCIDA_MSG_MASK;
+
+ trace_sof_intel_ipc_firmware_response(sdev, msg, msg_ext);
+
+ /* mask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCCTL,
+ CNL_DSP_REG_HIPCCTL_DONE, 0);
+
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /* handle immediate reply from DSP core */
+ hda_dsp_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, msg);
+
+ cnl_ipc_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev, "IPC reply before FW_READY: %#x\n",
+ msg);
+ }
+
+ ipc_irq = true;
+ }
+
+ /* new message from DSP */
+ if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
+ msg = hipctdr & CNL_DSP_REG_HIPCTDR_MSG_MASK;
+ msg_ext = hipctdd & CNL_DSP_REG_HIPCTDD_MSG_MASK;
+
+ trace_sof_intel_ipc_firmware_initiated(sdev, msg, msg_ext);
+
+ /* handle messages from DSP */
+ if ((hipctdr & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ bool non_recoverable = true;
+
+ /*
+ * This is a PANIC message!
+ *
+ * If it is arriving during firmware boot and it is not
+ * the last boot attempt then change the non_recoverable
+ * to false as the DSP might be able to boot in the next
+ * iteration(s)
+ */
+ if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS &&
+ hda->boot_iteration < HDA_FW_BOOT_ATTEMPTS)
+ non_recoverable = false;
+
+ snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext),
+ non_recoverable);
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ cnl_ipc_host_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq) {
+ /*
+ * This interrupt is not shared so no need to return IRQ_NONE.
+ */
+ dev_dbg_ratelimited(sdev->dev,
+ "nothing to do in IPC IRQ thread\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cnl_ipc_host_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * clear busy interrupt to tell dsp controller this
+ * interrupt has been accepted, not trigger it again
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCTDR,
+ CNL_DSP_REG_HIPCTDR_BUSY,
+ CNL_DSP_REG_HIPCTDR_BUSY);
+ /*
+ * set done bit to ack dsp the msg has been
+ * processed and send reply msg to dsp
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCTDA,
+ CNL_DSP_REG_HIPCTDA_DONE,
+ CNL_DSP_REG_HIPCTDA_DONE);
+}
+
+static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * set DONE bit - tell DSP we have received the reply msg
+ * from DSP, and processed it, don't send more reply to host
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCIDA,
+ CNL_DSP_REG_HIPCIDA_DONE,
+ CNL_DSP_REG_HIPCIDA_DONE);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCCTL,
+ CNL_DSP_REG_HIPCCTL_DONE,
+ CNL_DSP_REG_HIPCCTL_DONE);
+}
+
+static bool cnl_compact_ipc_compress(struct snd_sof_ipc_msg *msg,
+ u32 *dr, u32 *dd)
+{
+ struct sof_ipc_pm_gate *pm_gate = msg->msg_data;
+
+ if (pm_gate->hdr.cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
+ /* send the compact message via the primary register */
+ *dr = HDA_IPC_MSG_COMPACT | HDA_IPC_PM_GATE;
+
+ /* send payload via the extended data register */
+ *dd = pm_gate->flags;
+
+ return true;
+ }
+
+ return false;
+}
+
+int cnl_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct sof_ipc4_msg *msg_data = msg->msg_data;
+
+ if (hda_ipc4_tx_is_busy(sdev)) {
+ hdev->delayed_ipc_tx_msg = msg;
+ return 0;
+ }
+
+ hdev->delayed_ipc_tx_msg = NULL;
+
+ /* send the message via mailbox */
+ if (msg_data->data_size)
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
+ msg_data->data_size);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD, msg_data->extension);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ msg_data->primary | CNL_DSP_REG_HIPCIDR_BUSY);
+
+ hda_dsp_ipc4_schedule_d0i3_work(hdev, msg);
+
+ return 0;
+}
+
+int cnl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct sof_ipc_cmd_hdr *hdr;
+ u32 dr = 0;
+ u32 dd = 0;
+
+ /*
+ * Currently the only compact IPC supported is the PM_GATE
+ * IPC which is used for transitioning the DSP between the
+ * D0I0 and D0I3 states. And these are sent only during the
+ * set_power_state() op. Therefore, there will never be a case
+ * that a compact IPC results in the DSP exiting D0I3 without
+ * the host and FW being in sync.
+ */
+ if (cnl_compact_ipc_compress(msg, &dr, &dd)) {
+ /* send the message via IPC registers */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD,
+ dd);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ CNL_DSP_REG_HIPCIDR_BUSY | dr);
+ return 0;
+ }
+
+ /* send the message via mailbox */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ CNL_DSP_REG_HIPCIDR_BUSY);
+
+ hdr = msg->msg_data;
+
+ /*
+ * Use mod_delayed_work() to schedule the delayed work
+ * to avoid scheduling multiple workqueue items when
+ * IPCs are sent at a high-rate. mod_delayed_work()
+ * modifies the timer if the work is pending.
+ * Also, a new delayed work should not be queued after the
+ * CTX_SAVE IPC, which is sent before the DSP enters D3.
+ */
+ if (hdr->cmd != (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE))
+ mod_delayed_work(system_wq, &hdev->d0i3_work,
+ msecs_to_jiffies(SOF_HDA_D0I3_WORK_DELAY_MS));
+
+ return 0;
+}
+
+void cnl_ipc_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcctl;
+ u32 hipcida;
+ u32 hipctdr;
+
+ hda_ipc_irq_dump(sdev);
+
+ /* read IPC status */
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCCTL);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev,
+ "error: host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
+ hipcida, hipctdr, hipcctl);
+}
+
+void cnl_ipc4_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcidr, hipcidd, hipcida, hipctdr, hipctdd, hipctda, hipcctl;
+
+ hda_ipc_irq_dump(sdev);
+
+ hipcidr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR);
+ hipcidd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD);
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
+ hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDD);
+ hipctda = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDA);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCCTL);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev,
+ "Host IPC initiator: %#x|%#x|%#x, target: %#x|%#x|%#x, ctl: %#x\n",
+ hipcidr, hipcidd, hipcida, hipctdr, hipctdd, hipctda, hipcctl);
+}
+
+/* cannonlake ops */
+struct snd_sof_dsp_ops sof_cnl_ops;
+EXPORT_SYMBOL_NS(sof_cnl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_cnl_ops_init(struct snd_sof_dev *sdev)
+{
+ /* common defaults */
+ memcpy(&sof_cnl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* probe/remove/shutdown */
+ sof_cnl_ops.shutdown = hda_dsp_shutdown;
+
+ /* ipc */
+ if (sdev->pdata->ipc_type == SOF_IPC) {
+ /* doorbell */
+ sof_cnl_ops.irq_thread = cnl_ipc_irq_thread;
+
+ /* ipc */
+ sof_cnl_ops.send_msg = cnl_ipc_send_msg;
+
+ /* debug */
+ sof_cnl_ops.ipc_dump = cnl_ipc_dump;
+
+ sof_cnl_ops.set_power_state = hda_dsp_set_power_state_ipc3;
+ }
+
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_1_8;
+
+ /* External library loading support */
+ ipc4_data->load_library = hda_dsp_ipc4_load_library;
+
+ /* doorbell */
+ sof_cnl_ops.irq_thread = cnl_ipc4_irq_thread;
+
+ /* ipc */
+ sof_cnl_ops.send_msg = cnl_ipc4_send_msg;
+
+ /* debug */
+ sof_cnl_ops.ipc_dump = cnl_ipc4_dump;
+
+ sof_cnl_ops.set_power_state = hda_dsp_set_power_state_ipc4;
+ }
+
+ /* set DAI driver ops */
+ hda_set_dai_drv_ops(sdev, &sof_cnl_ops);
+
+ /* debug */
+ sof_cnl_ops.debug_map = cnl_dsp_debugfs;
+ sof_cnl_ops.debug_map_count = ARRAY_SIZE(cnl_dsp_debugfs);
+
+ /* pre/post fw run */
+ sof_cnl_ops.post_fw_run = hda_dsp_post_fw_run;
+
+ /* firmware run */
+ sof_cnl_ops.run = hda_dsp_cl_boot_firmware;
+
+ /* dsp core get/put */
+ sof_cnl_ops.core_get = hda_dsp_core_get;
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_cnl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc cnl_chip_info = {
+ /* Cannonlake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(3, 0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = CNL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .d0i3_offset = SOF_HDA_VS_D0I3C,
+ .read_sdw_lcount = hda_sdw_check_lcount_common,
+ .enable_sdw_irq = hda_common_enable_sdw_irq,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_1_8,
+};
+EXPORT_SYMBOL_NS(cnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+/*
+ * JasperLake is technically derived from IceLake, and should be in
+ * described in icl.c. However since JasperLake was designed with
+ * two cores, it cannot support the IceLake-specific power-up sequences
+ * which rely on core3. To simplify, JasperLake uses the CannonLake ops and
+ * is described in cnl.c
+ */
+const struct sof_intel_dsp_desc jsl_chip_info = {
+ /* Jasperlake */
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(1, 0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .d0i3_offset = SOF_HDA_VS_D0I3C,
+ .read_sdw_lcount = hda_sdw_check_lcount_common,
+ .enable_sdw_irq = hda_common_enable_sdw_irq,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_2_0,
+};
+EXPORT_SYMBOL_NS(jsl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/ext_manifest.h b/sound/soc/sof/intel/ext_manifest.h
new file mode 100644
index 000000000..2dfae9285
--- /dev/null
+++ b/sound/soc/sof/intel/ext_manifest.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2020 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Intel extended manifest is a extra place to store Intel cavs specific
+ * metadata about firmware, for example LPRO/HPRO configuration is
+ * Intel cavs specific. This part of output binary is not signed.
+ */
+
+#ifndef __INTEL_CAVS_EXT_MANIFEST_H__
+#define __INTEL_CAVS_EXT_MANIFEST_H__
+
+#include <sound/sof/ext_manifest.h>
+
+/* EXT_MAN_ELEM_PLATFORM_CONFIG_DATA elements identificators */
+enum sof_cavs_config_elem_type {
+ SOF_EXT_MAN_CAVS_CONFIG_EMPTY = 0,
+ SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO = 1,
+ SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE = 2,
+ SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE = 3,
+};
+
+/* EXT_MAN_ELEM_PLATFORM_CONFIG_DATA elements */
+struct sof_ext_man_cavs_config_data {
+ struct sof_ext_man_elem_header hdr;
+
+ struct sof_config_elem elems[];
+} __packed;
+
+#endif /* __INTEL_CAVS_EXT_MANIFEST_H__ */
diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c
new file mode 100644
index 000000000..fc63085d2
--- /dev/null
+++ b/sound/soc/sof/intel/hda-bus.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Keyon Jie <yang.jie@linux.intel.com>
+
+#include <linux/io.h>
+#include <sound/hdaudio.h>
+#include <sound/hda_i915.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_register.h>
+#include "../sof-priv.h"
+#include "hda.h"
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+#include "../../codecs/hdac_hda.h"
+#define sof_hda_ext_ops snd_soc_hdac_hda_get_ops()
+
+static void update_codec_wake_enable(struct hdac_bus *bus, unsigned int addr, bool link_power)
+{
+ unsigned int mask = snd_hdac_chip_readw(bus, WAKEEN);
+
+ if (link_power)
+ mask &= ~BIT(addr);
+ else
+ mask |= BIT(addr);
+
+ snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
+}
+
+static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable)
+{
+ struct hdac_bus *bus = codec->bus;
+ bool oldstate = test_bit(codec->addr, &bus->codec_powered);
+
+ snd_hdac_ext_bus_link_power(codec, enable);
+
+ if (enable == oldstate)
+ return;
+
+ /*
+ * Both codec driver and controller can hold references to
+ * display power. To avoid unnecessary power-up/down cycles,
+ * controller doesn't immediately release its reference.
+ *
+ * If the codec driver powers down the link, release
+ * the controller reference as well.
+ */
+ if (codec->addr == HDA_IDISP_ADDR && !enable)
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
+
+ /* WAKEEN needs to be set for disabled links */
+ update_codec_wake_enable(bus, codec->addr, enable);
+}
+
+static const struct hdac_bus_ops bus_core_ops = {
+ .command = snd_hdac_bus_send_cmd,
+ .get_response = snd_hdac_bus_get_response,
+ .link_power = sof_hda_bus_link_power,
+};
+#endif
+
+/*
+ * This can be used for both with/without hda link support.
+ */
+void sof_hda_bus_init(struct snd_sof_dev *sdev, struct device *dev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_LINK)
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ snd_hdac_ext_bus_init(bus, dev, &bus_core_ops, sof_hda_ext_ops);
+#else
+ snd_hdac_ext_bus_init(bus, dev, NULL, NULL);
+#endif
+#else
+
+ memset(bus, 0, sizeof(*bus));
+ bus->dev = dev;
+
+ INIT_LIST_HEAD(&bus->stream_list);
+
+ bus->irq = -1;
+
+ /*
+ * There is only one HDA bus atm. keep the index as 0.
+ * Need to fix when there are more than one HDA bus.
+ */
+ bus->idx = 0;
+
+ spin_lock_init(&bus->reg_lock);
+#endif /* CONFIG_SND_SOC_SOF_HDA_LINK */
+}
+
+void sof_hda_bus_exit(struct snd_sof_dev *sdev)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_LINK)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ snd_hdac_ext_bus_exit(bus);
+#endif
+}
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
new file mode 100644
index 000000000..328d7c227
--- /dev/null
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Keyon Jie <yang.jie@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_i915.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "hda.h"
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+#include "../../codecs/hdac_hda.h"
+
+#define CODEC_PROBE_RETRIES 3
+
+#define IDISP_VID_INTEL 0x80860000
+
+static int hda_codec_mask = -1;
+module_param_named(codec_mask, hda_codec_mask, int, 0444);
+MODULE_PARM_DESC(codec_mask, "SOF HDA codec mask for probing");
+
+/* load the legacy HDA codec driver */
+static int request_codec_module(struct hda_codec *codec)
+{
+#ifdef MODULE
+ char alias[MODULE_NAME_LEN];
+ const char *mod = NULL;
+
+ switch (codec->probe_id) {
+ case HDA_CODEC_ID_GENERIC:
+#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
+ mod = "snd-hda-codec-generic";
+#endif
+ break;
+ default:
+ snd_hdac_codec_modalias(&codec->core, alias, sizeof(alias));
+ mod = alias;
+ break;
+ }
+
+ if (mod) {
+ dev_dbg(&codec->core.dev, "loading codec module: %s\n", mod);
+ request_module(mod);
+ }
+#endif /* MODULE */
+ return device_attach(hda_codec_dev(codec));
+}
+
+static int hda_codec_load_module(struct hda_codec *codec)
+{
+ int ret;
+
+ ret = snd_hdac_device_register(&codec->core);
+ if (ret) {
+ dev_err(&codec->core.dev, "failed to register hdac device\n");
+ put_device(&codec->core.dev);
+ return ret;
+ }
+
+ ret = request_codec_module(codec);
+ if (ret <= 0) {
+ codec->probe_id = HDA_CODEC_ID_GENERIC;
+ ret = request_codec_module(codec);
+ }
+
+ return ret;
+}
+
+/* enable controller wake up event for all codecs with jack connectors */
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ struct hda_bus *hbus = sof_to_hbus(sdev);
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hda_codec *codec;
+ unsigned int mask = 0;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return;
+
+ if (enable) {
+ list_for_each_codec(codec, hbus)
+ if (codec->jacktbl.used)
+ mask |= BIT(codec->core.addr);
+ }
+
+ snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_jack_wake_enable, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+/* check jack status after resuming from suspend mode */
+void hda_codec_jack_check(struct snd_sof_dev *sdev)
+{
+ struct hda_bus *hbus = sof_to_hbus(sdev);
+ struct hda_codec *codec;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return;
+
+ list_for_each_codec(codec, hbus)
+ /*
+ * Wake up all jack-detecting codecs regardless whether an event
+ * has been recorded in STATESTS
+ */
+ if (codec->jacktbl.used)
+ pm_request_resume(&codec->core.dev);
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_jack_check, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+#if IS_ENABLED(CONFIG_SND_HDA_GENERIC)
+#define is_generic_config(bus) \
+ ((bus)->modelname && !strcmp((bus)->modelname, "generic"))
+#else
+#define is_generic_config(x) 0
+#endif
+
+static struct hda_codec *hda_codec_device_init(struct hdac_bus *bus, int addr, int type)
+{
+ struct hda_codec *codec;
+
+ codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "ehdaudio%dD%d", bus->idx, addr);
+ if (IS_ERR(codec)) {
+ dev_err(bus->dev, "device init failed for hdac device\n");
+ return codec;
+ }
+
+ codec->core.type = type;
+
+ return codec;
+}
+
+/* probe individual codec */
+static int hda_codec_probe(struct snd_sof_dev *sdev, int address)
+{
+ struct hdac_hda_priv *hda_priv;
+ struct hda_bus *hbus = sof_to_hbus(sdev);
+ struct hda_codec *codec;
+ u32 hda_cmd = (address << 28) | (AC_NODE_ROOT << 20) |
+ (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
+ u32 resp = -1;
+ int ret, retry = 0;
+
+ do {
+ mutex_lock(&hbus->core.cmd_mutex);
+ snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
+ snd_hdac_bus_get_response(&hbus->core, address, &resp);
+ mutex_unlock(&hbus->core.cmd_mutex);
+ } while (resp == -1 && retry++ < CODEC_PROBE_RETRIES);
+
+ if (resp == -1)
+ return -EIO;
+ dev_dbg(sdev->dev, "HDA codec #%d probed OK: response: %x\n",
+ address, resp);
+
+ hda_priv = devm_kzalloc(sdev->dev, sizeof(*hda_priv), GFP_KERNEL);
+ if (!hda_priv)
+ return -ENOMEM;
+
+ codec = hda_codec_device_init(&hbus->core, address, HDA_DEV_LEGACY);
+ ret = PTR_ERR_OR_ZERO(codec);
+ if (ret < 0)
+ return ret;
+
+ hda_priv->codec = codec;
+ dev_set_drvdata(&codec->core.dev, hda_priv);
+
+ if ((resp & 0xFFFF0000) == IDISP_VID_INTEL) {
+ if (!hbus->core.audio_component) {
+ dev_dbg(sdev->dev,
+ "iDisp hw present but no driver\n");
+ ret = -ENOENT;
+ goto out;
+ }
+ hda_priv->need_display_power = true;
+ }
+
+ if (is_generic_config(hbus))
+ codec->probe_id = HDA_CODEC_ID_GENERIC;
+ else
+ codec->probe_id = 0;
+
+ ret = hda_codec_load_module(codec);
+ /*
+ * handle ret==0 (no driver bound) as an error, but pass
+ * other return codes without modification
+ */
+ if (ret == 0)
+ ret = -ENOENT;
+
+out:
+ if (ret < 0) {
+ snd_hdac_device_unregister(&codec->core);
+ put_device(&codec->core.dev);
+ }
+
+ return ret;
+}
+
+/* Codec initialization */
+void hda_codec_probe_bus(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int i, ret;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return;
+
+ /* probe codecs in avail slots */
+ for (i = 0; i < HDA_MAX_CODECS; i++) {
+
+ if (!(bus->codec_mask & (1 << i)))
+ continue;
+
+ ret = hda_codec_probe(sdev, i);
+ if (ret < 0) {
+ dev_warn(bus->dev, "codec #%d probe error, ret: %d\n",
+ i, ret);
+ bus->codec_mask &= ~BIT(i);
+ }
+ }
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_probe_bus, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+void hda_codec_check_for_state_change(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ unsigned int codec_mask;
+
+ codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+ if (codec_mask) {
+ hda_codec_jack_check(sdev);
+ snd_hdac_chip_writew(bus, STATESTS, codec_mask);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_check_for_state_change, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+void hda_codec_detect_mask(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return;
+
+ /* Accept unsolicited responses */
+ snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, AZX_GCTL_UNSOL);
+
+ /* detect codecs */
+ if (!bus->codec_mask) {
+ bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+ dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask);
+ }
+
+ if (hda_codec_mask != -1) {
+ bus->codec_mask &= hda_codec_mask;
+ dev_dbg(bus->dev, "filtered codec_mask = 0x%lx\n",
+ bus->codec_mask);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_detect_mask, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+void hda_codec_init_cmd_io(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return;
+
+ /* initialize the codec command I/O */
+ snd_hdac_bus_init_cmd_io(bus);
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_init_cmd_io, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+void hda_codec_resume_cmd_io(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return;
+
+ /* set up CORB/RIRB buffers if was on before suspend */
+ if (bus->cmd_dma_state)
+ snd_hdac_bus_init_cmd_io(bus);
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_resume_cmd_io, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+void hda_codec_stop_cmd_io(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return;
+
+ /* initialize the codec command I/O */
+ snd_hdac_bus_stop_cmd_io(bus);
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_stop_cmd_io, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+void hda_codec_suspend_cmd_io(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return;
+
+ /* stop the CORB/RIRB DMA if it is On */
+ if (bus->cmd_dma_state)
+ snd_hdac_bus_stop_cmd_io(bus);
+
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_suspend_cmd_io, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+void hda_codec_rirb_status_clear(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return;
+
+ /* clear rirb status */
+ snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_rirb_status_clear, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+void hda_codec_set_codec_wakeup(struct snd_sof_dev *sdev, bool status)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return;
+
+ snd_hdac_set_codec_wakeup(bus, status);
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_set_codec_wakeup, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+bool hda_codec_check_rirb_status(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ bool active = false;
+ u32 rirb_status;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return false;
+
+ rirb_status = snd_hdac_chip_readb(bus, RIRBSTS);
+ if (rirb_status & RIRB_INT_MASK) {
+ /*
+ * Clearing the interrupt status here ensures
+ * that no interrupt gets masked after the RIRB
+ * wp is read in snd_hdac_bus_update_rirb.
+ */
+ snd_hdac_chip_writeb(bus, RIRBSTS,
+ RIRB_INT_MASK);
+ active = true;
+ if (rirb_status & RIRB_INT_RESPONSE)
+ snd_hdac_bus_update_rirb(bus);
+ }
+ return active;
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_check_rirb_status, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+void hda_codec_device_remove(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return;
+
+ /* codec removal, invoke bus_device_remove */
+ snd_hdac_ext_bus_device_remove(bus);
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_device_remove, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+#endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC) && IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
+
+void hda_codec_i915_display_power(struct snd_sof_dev *sdev, bool enable)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return;
+
+ if (HDA_IDISP_CODEC(bus->codec_mask)) {
+ dev_dbg(bus->dev, "Turning i915 HDAC power %d\n", enable);
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, enable);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_i915_display_power, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+
+int hda_codec_i915_init(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int ret;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return 0;
+
+ /* i915 exposes a HDA codec for HDMI audio */
+ ret = snd_hdac_i915_init(bus);
+ if (ret < 0)
+ return ret;
+
+ /* codec_mask not yet known, power up for probe */
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_i915_init, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+
+int hda_codec_i915_exit(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ return 0;
+
+ if (!bus->audio_component)
+ return 0;
+
+ /* power down unconditionally */
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
+
+ return snd_hdac_i915_exit(bus);
+}
+EXPORT_SYMBOL_NS_GPL(hda_codec_i915_exit, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+
+#endif
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/hda-common-ops.c b/sound/soc/sof/intel/hda-common-ops.c
new file mode 100644
index 000000000..8e1cd0bab
--- /dev/null
+++ b/sound/soc/sof/intel/hda-common-ops.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+
+/*
+ * common ops for SKL+ HDAudio platforms
+ */
+
+#include "../sof-priv.h"
+#include "hda.h"
+#include "../sof-audio.h"
+
+struct snd_sof_dsp_ops sof_hda_common_ops = {
+ /* probe/remove/shutdown */
+ .probe = hda_dsp_probe,
+ .remove = hda_dsp_remove,
+
+ /* Register IO uses direct mmio */
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* ipc */
+ .get_mailbox_offset = hda_dsp_ipc_get_mailbox_offset,
+ .get_window_offset = hda_dsp_ipc_get_window_offset,
+
+ .ipc_msg_data = hda_ipc_msg_data,
+ .set_stream_data_offset = hda_set_stream_data_offset,
+
+ /* machine driver */
+ .machine_select = hda_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = hda_set_mach_params,
+
+ /* debug */
+ .dbg_dump = hda_dsp_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = hda_dsp_pcm_open,
+ .pcm_close = hda_dsp_pcm_close,
+ .pcm_hw_params = hda_dsp_pcm_hw_params,
+ .pcm_hw_free = hda_dsp_stream_hw_free,
+ .pcm_trigger = hda_dsp_pcm_trigger,
+ .pcm_pointer = hda_dsp_pcm_pointer,
+ .pcm_ack = hda_dsp_pcm_ack,
+
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_raw,
+
+ /* pre/post fw run */
+ .pre_fw_run = hda_dsp_pre_fw_run,
+
+ /* firmware run */
+ .run = hda_dsp_cl_boot_firmware,
+
+ /* parse platform specific extended manifest */
+ .parse_platform_ext_manifest = hda_dsp_ext_man_get_cavs_config_data,
+
+ /* dsp core get/put */
+
+ /* trace callback */
+ .trace_init = hda_dsp_trace_init,
+ .trace_release = hda_dsp_trace_release,
+ .trace_trigger = hda_dsp_trace_trigger,
+
+ /* client ops */
+ .register_ipc_clients = hda_register_clients,
+ .unregister_ipc_clients = hda_unregister_clients,
+
+ /* DAI drivers */
+ .drv = skl_dai,
+ .num_drv = SOF_SKL_NUM_DAIS,
+
+ /* PM */
+ .suspend = hda_dsp_suspend,
+ .resume = hda_dsp_resume,
+ .runtime_suspend = hda_dsp_runtime_suspend,
+ .runtime_resume = hda_dsp_runtime_resume,
+ .runtime_idle = hda_dsp_runtime_idle,
+ .set_hw_params_upon_resume = hda_dsp_set_hw_params_upon_resume,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+};
diff --git a/sound/soc/sof/intel/hda-ctrl.c b/sound/soc/sof/intel/hda-ctrl.c
new file mode 100644
index 000000000..84bf01bd3
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ctrl.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/hda_component.h>
+#include <sound/hda-mlink.h>
+#include "../ops.h"
+#include "hda.h"
+
+/*
+ * HDA Operations.
+ */
+
+int hda_dsp_ctrl_link_reset(struct snd_sof_dev *sdev, bool reset)
+{
+ unsigned long timeout;
+ u32 gctl = 0;
+ u32 val;
+
+ /* 0 to enter reset and 1 to exit reset */
+ val = reset ? 0 : SOF_HDA_GCTL_RESET;
+
+ /* enter/exit HDA controller reset */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCTL,
+ SOF_HDA_GCTL_RESET, val);
+
+ /* wait to enter/exit reset */
+ timeout = jiffies + msecs_to_jiffies(HDA_DSP_CTRL_RESET_TIMEOUT);
+ while (time_before(jiffies, timeout)) {
+ gctl = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCTL);
+ if ((gctl & SOF_HDA_GCTL_RESET) == val)
+ return 0;
+ usleep_range(500, 1000);
+ }
+
+ /* enter/exit reset failed */
+ dev_err(sdev->dev, "error: failed to %s HDA controller gctl 0x%x\n",
+ reset ? "reset" : "ready", gctl);
+ return -EIO;
+}
+
+int hda_dsp_ctrl_get_caps(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ u32 cap, offset, feature;
+ int count = 0;
+ int ret;
+
+ /*
+ * On some devices, one reset cycle is necessary before reading
+ * capabilities
+ */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0)
+ return ret;
+ ret = hda_dsp_ctrl_link_reset(sdev, false);
+ if (ret < 0)
+ return ret;
+
+ offset = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_LLCH);
+
+ do {
+ dev_dbg(sdev->dev, "checking for capabilities at offset 0x%x\n",
+ offset & SOF_HDA_CAP_NEXT_MASK);
+
+ cap = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, offset);
+
+ if (cap == -1) {
+ dev_dbg(bus->dev, "Invalid capability reg read\n");
+ break;
+ }
+
+ feature = (cap & SOF_HDA_CAP_ID_MASK) >> SOF_HDA_CAP_ID_OFF;
+
+ switch (feature) {
+ case SOF_HDA_PP_CAP_ID:
+ dev_dbg(sdev->dev, "found DSP capability at 0x%x\n",
+ offset);
+ bus->ppcap = bus->remap_addr + offset;
+ sdev->bar[HDA_DSP_PP_BAR] = bus->ppcap;
+ break;
+ case SOF_HDA_SPIB_CAP_ID:
+ dev_dbg(sdev->dev, "found SPIB capability at 0x%x\n",
+ offset);
+ bus->spbcap = bus->remap_addr + offset;
+ sdev->bar[HDA_DSP_SPIB_BAR] = bus->spbcap;
+ break;
+ case SOF_HDA_DRSM_CAP_ID:
+ dev_dbg(sdev->dev, "found DRSM capability at 0x%x\n",
+ offset);
+ bus->drsmcap = bus->remap_addr + offset;
+ sdev->bar[HDA_DSP_DRSM_BAR] = bus->drsmcap;
+ break;
+ case SOF_HDA_GTS_CAP_ID:
+ dev_dbg(sdev->dev, "found GTS capability at 0x%x\n",
+ offset);
+ bus->gtscap = bus->remap_addr + offset;
+ break;
+ case SOF_HDA_ML_CAP_ID:
+ dev_dbg(sdev->dev, "found ML capability at 0x%x\n",
+ offset);
+ bus->mlcap = bus->remap_addr + offset;
+ break;
+ default:
+ dev_dbg(sdev->dev, "found capability %d at 0x%x\n",
+ feature, offset);
+ break;
+ }
+
+ offset = cap & SOF_HDA_CAP_NEXT_MASK;
+ } while (count++ <= SOF_HDA_MAX_CAPS && offset);
+
+ return 0;
+}
+
+void hda_dsp_ctrl_ppcap_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? SOF_HDA_PPCTL_GPROCEN : 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, val);
+}
+
+void hda_dsp_ctrl_ppcap_int_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? SOF_HDA_PPCTL_PIE : 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_PIE, val);
+}
+
+void hda_dsp_ctrl_misc_clock_gating(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? PCI_CGCTL_MISCBDCGE_MASK : 0;
+
+ snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_MISCBDCGE_MASK, val);
+}
+
+/*
+ * enable/disable audio dsp clock gating and power gating bits.
+ * This allows the HW to opportunistically power and clock gate
+ * the audio dsp when it is idle
+ */
+int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ u32 val;
+
+ /* enable/disable audio dsp clock gating */
+ val = enable ? PCI_CGCTL_ADSPDCGE : 0;
+ snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_ADSPDCGE, val);
+
+ /* disable the DMI link when requested. But enable only if it wasn't disabled previously */
+ val = enable ? HDA_VS_INTEL_EM2_L1SEN : 0;
+ if (!enable || !hda->l1_disabled)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, val);
+
+ /* enable/disable audio dsp power gating */
+ val = enable ? 0 : PCI_PGCTL_ADSPPGD;
+ snd_sof_pci_update_bits(sdev, PCI_PGCTL, PCI_PGCTL_ADSPPGD, val);
+
+ return 0;
+}
+
+int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *stream;
+ int sd_offset, ret = 0;
+
+ if (bus->chip_init)
+ return 0;
+
+ hda_codec_set_codec_wakeup(sdev, true);
+
+ hda_dsp_ctrl_misc_clock_gating(sdev, false);
+
+ /* reset HDA controller */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to reset HDA controller\n");
+ goto err;
+ }
+
+ usleep_range(500, 1000);
+
+ /* exit HDA controller reset */
+ ret = hda_dsp_ctrl_link_reset(sdev, false);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
+ goto err;
+ }
+ usleep_range(1000, 1200);
+
+ hda_codec_detect_mask(sdev);
+
+ /* clear stream status */
+ list_for_each_entry(stream, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(stream);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+ }
+
+ /* clear WAKESTS */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
+ SOF_HDA_WAKESTS_INT_MASK);
+
+ hda_codec_rirb_status_clear(sdev);
+
+ /* clear interrupt status register */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
+
+ hda_codec_init_cmd_io(sdev);
+
+ /* enable CIE and GIE interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN);
+
+ /* program the position buffer */
+ if (bus->use_posbuf && bus->posbuf.addr) {
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE,
+ (u32)bus->posbuf.addr);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPUBASE,
+ upper_32_bits(bus->posbuf.addr));
+ }
+
+ hda_bus_ml_reset_losidv(bus);
+
+ bus->chip_init = true;
+
+err:
+ hda_dsp_ctrl_misc_clock_gating(sdev, true);
+
+ hda_codec_set_codec_wakeup(sdev, false);
+
+ return ret;
+}
+
+void hda_dsp_ctrl_stop_chip(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *stream;
+ int sd_offset;
+
+ if (!bus->chip_init)
+ return;
+
+ /* disable interrupts in stream descriptor */
+ list_for_each_entry(stream, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(stream);
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_SD_CTL,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ 0);
+ }
+
+ /* disable SIE for all streams */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_ALL_STREAM, 0);
+
+ /* disable controller CIE and GIE */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
+ 0);
+
+ /* clear stream status */
+ list_for_each_entry(stream, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(stream);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+ }
+
+ /* clear WAKESTS */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
+ SOF_HDA_WAKESTS_INT_MASK);
+
+ hda_codec_rirb_status_clear(sdev);
+
+ /* clear interrupt status register */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
+
+ hda_codec_stop_cmd_io(sdev);
+
+ /* disable position buffer */
+ if (bus->use_posbuf && bus->posbuf.addr) {
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_ADSP_DPLBASE, 0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_ADSP_DPUBASE, 0);
+ }
+
+ bus->chip_init = false;
+}
diff --git a/sound/soc/sof/intel/hda-dai-ops.c b/sound/soc/sof/intel/hda-dai-ops.c
new file mode 100644
index 000000000..494ced2b7
--- /dev/null
+++ b/sound/soc/sof/intel/hda-dai-ops.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+
+#include <sound/pcm_params.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda-mlink.h>
+#include <sound/sof/ipc4/header.h>
+#include <uapi/sound/sof/header.h>
+#include "../ipc4-priv.h"
+#include "../ipc4-topology.h"
+#include "../sof-priv.h"
+#include "../sof-audio.h"
+#include "hda.h"
+
+/* These ops are only applicable for the HDA DAI's in their current form */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_LINK)
+/*
+ * This function checks if the host dma channel corresponding
+ * to the link DMA stream_tag argument is assigned to one
+ * of the FEs connected to the BE DAI.
+ */
+static bool hda_check_fes(struct snd_soc_pcm_runtime *rtd,
+ int dir, int stream_tag)
+{
+ struct snd_pcm_substream *fe_substream;
+ struct hdac_stream *fe_hstream;
+ struct snd_soc_dpcm *dpcm;
+
+ for_each_dpcm_fe(rtd, dir, dpcm) {
+ fe_substream = snd_soc_dpcm_get_substream(dpcm->fe, dir);
+ fe_hstream = fe_substream->runtime->private_data;
+ if (fe_hstream->stream_tag == stream_tag)
+ return true;
+ }
+
+ return false;
+}
+
+static struct hdac_ext_stream *
+hda_link_stream_assign(struct hdac_bus *bus, struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct sof_intel_hda_stream *hda_stream;
+ const struct sof_intel_dsp_desc *chip;
+ struct snd_sof_dev *sdev;
+ struct hdac_ext_stream *res = NULL;
+ struct hdac_stream *hstream = NULL;
+
+ int stream_dir = substream->stream;
+
+ if (!bus->ppcap) {
+ dev_err(bus->dev, "stream type not supported\n");
+ return NULL;
+ }
+
+ spin_lock_irq(&bus->reg_lock);
+ list_for_each_entry(hstream, &bus->stream_list, list) {
+ struct hdac_ext_stream *hext_stream =
+ stream_to_hdac_ext_stream(hstream);
+ if (hstream->direction != substream->stream)
+ continue;
+
+ hda_stream = hstream_to_sof_hda_stream(hext_stream);
+ sdev = hda_stream->sdev;
+ chip = get_chip_info(sdev->pdata);
+
+ /* check if link is available */
+ if (!hext_stream->link_locked) {
+ /*
+ * choose the first available link for platforms that do not have the
+ * PROCEN_FMT_QUIRK set.
+ */
+ if (!(chip->quirks & SOF_INTEL_PROCEN_FMT_QUIRK)) {
+ res = hext_stream;
+ break;
+ }
+
+ if (hstream->opened) {
+ /*
+ * check if the stream tag matches the stream
+ * tag of one of the connected FEs
+ */
+ if (hda_check_fes(rtd, stream_dir,
+ hstream->stream_tag)) {
+ res = hext_stream;
+ break;
+ }
+ } else {
+ res = hext_stream;
+
+ /*
+ * This must be a hostless stream.
+ * So reserve the host DMA channel.
+ */
+ hda_stream->host_reserved = 1;
+ break;
+ }
+ }
+ }
+
+ if (res) {
+ /* Make sure that host and link DMA is decoupled. */
+ snd_hdac_ext_stream_decouple_locked(bus, res, true);
+
+ res->link_locked = 1;
+ res->link_substream = substream;
+ }
+ spin_unlock_irq(&bus->reg_lock);
+
+ return res;
+}
+
+static struct hdac_ext_stream *hda_get_hext_stream(struct snd_sof_dev *sdev,
+ struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream)
+{
+ return snd_soc_dai_get_dma_data(cpu_dai, substream);
+}
+
+static struct hdac_ext_stream *hda_ipc4_get_hext_stream(struct snd_sof_dev *sdev,
+ struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+ struct snd_sof_widget *swidget;
+ struct snd_soc_dapm_widget *w;
+
+ w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
+ swidget = w->dobj.private;
+ pipe_widget = swidget->spipe->pipe_widget;
+ pipeline = pipe_widget->private;
+
+ /* mark pipeline so that it can be skipped during FE trigger */
+ pipeline->skip_during_fe_trigger = true;
+
+ return snd_soc_dai_get_dma_data(cpu_dai, substream);
+}
+
+static struct hdac_ext_stream *hda_assign_hext_stream(struct snd_sof_dev *sdev,
+ struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *dai;
+ struct hdac_ext_stream *hext_stream;
+
+ /* only allocate a stream_tag for the first DAI in the dailink */
+ dai = asoc_rtd_to_cpu(rtd, 0);
+ if (dai == cpu_dai)
+ hext_stream = hda_link_stream_assign(sof_to_bus(sdev), substream);
+ else
+ hext_stream = snd_soc_dai_get_dma_data(dai, substream);
+
+ if (!hext_stream)
+ return NULL;
+
+ snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)hext_stream);
+
+ return hext_stream;
+}
+
+static void hda_release_hext_stream(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_ext_stream *hext_stream = hda_get_hext_stream(sdev, cpu_dai, substream);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *dai;
+
+ /* only release a stream_tag for the first DAI in the dailink */
+ dai = asoc_rtd_to_cpu(rtd, 0);
+ if (dai == cpu_dai)
+ snd_hdac_ext_stream_release(hext_stream, HDAC_EXT_STREAM_TYPE_LINK);
+ snd_soc_dai_set_dma_data(cpu_dai, substream, NULL);
+}
+
+static void hda_setup_hext_stream(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream,
+ unsigned int format_val)
+{
+ snd_hdac_ext_stream_setup(hext_stream, format_val);
+}
+
+static void hda_reset_hext_stream(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream)
+{
+ snd_hdac_ext_stream_reset(hext_stream);
+}
+
+static void hda_codec_dai_set_stream(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct hdac_stream *hstream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+
+ /* set the hdac_stream in the codec dai */
+ snd_soc_dai_set_stream(codec_dai, hstream, substream->stream);
+}
+
+static unsigned int hda_calc_stream_format(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ unsigned int link_bps;
+ unsigned int format_val;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ link_bps = codec_dai->driver->playback.sig_bits;
+ else
+ link_bps = codec_dai->driver->capture.sig_bits;
+
+ format_val = snd_hdac_calc_stream_format(params_rate(params), params_channels(params),
+ params_format(params), link_bps, 0);
+
+ dev_dbg(sdev->dev, "format_val=%#x, rate=%d, ch=%d, format=%d\n", format_val,
+ params_rate(params), params_channels(params), params_format(params));
+
+ return format_val;
+}
+
+static struct hdac_ext_link *hda_get_hlink(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ return snd_hdac_ext_bus_get_hlink_by_name(bus, codec_dai->component->name);
+}
+
+static unsigned int generic_calc_stream_format(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ unsigned int format_val;
+
+ format_val = snd_hdac_calc_stream_format(params_rate(params), params_channels(params),
+ params_format(params),
+ params_physical_width(params),
+ 0);
+
+ dev_dbg(sdev->dev, "format_val=%#x, rate=%d, ch=%d, format=%d\n", format_val,
+ params_rate(params), params_channels(params), params_format(params));
+
+ return format_val;
+}
+
+static unsigned int dmic_calc_stream_format(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ unsigned int format_val;
+ snd_pcm_format_t format;
+ unsigned int channels;
+ unsigned int width;
+
+ channels = params_channels(params);
+ format = params_format(params);
+ width = params_physical_width(params);
+
+ if (format == SNDRV_PCM_FORMAT_S16_LE) {
+ format = SNDRV_PCM_FORMAT_S32_LE;
+ channels /= 2;
+ width = 32;
+ }
+
+ format_val = snd_hdac_calc_stream_format(params_rate(params), channels,
+ format,
+ width,
+ 0);
+
+ dev_dbg(sdev->dev, "format_val=%#x, rate=%d, ch=%d, format=%d\n", format_val,
+ params_rate(params), channels, format);
+
+ return format_val;
+}
+
+static struct hdac_ext_link *ssp_get_hlink(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ return hdac_bus_eml_ssp_get_hlink(bus);
+}
+
+static struct hdac_ext_link *dmic_get_hlink(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ return hdac_bus_eml_dmic_get_hlink(bus);
+}
+
+static struct hdac_ext_link *sdw_get_hlink(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ return hdac_bus_eml_sdw_get_hlink(bus);
+}
+
+static int hda_ipc4_pre_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+ struct snd_sof_widget *swidget;
+ struct snd_soc_dapm_widget *w;
+ int ret = 0;
+
+ w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
+ swidget = w->dobj.private;
+ pipe_widget = swidget->spipe->pipe_widget;
+ pipeline = pipe_widget->private;
+
+ if (pipe_widget->instance_id < 0)
+ return 0;
+
+ mutex_lock(&ipc4_data->pipeline_state_mutex);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ ret = sof_ipc4_set_pipeline_state(sdev, pipe_widget->instance_id,
+ SOF_IPC4_PIPE_PAUSED);
+ if (ret < 0)
+ goto out;
+
+ pipeline->state = SOF_IPC4_PIPE_PAUSED;
+ break;
+ default:
+ dev_err(sdev->dev, "unknown trigger command %d\n", cmd);
+ ret = -EINVAL;
+ }
+out:
+ mutex_unlock(&ipc4_data->pipeline_state_mutex);
+ return ret;
+}
+
+static int hda_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ struct hdac_ext_stream *hext_stream = snd_soc_dai_get_dma_data(cpu_dai, substream);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ snd_hdac_ext_stream_start(hext_stream);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ snd_hdac_ext_stream_clear(hext_stream);
+ break;
+ default:
+ dev_err(sdev->dev, "unknown trigger command %d\n", cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hda_ipc4_post_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+ struct snd_sof_widget *swidget;
+ struct snd_soc_dapm_widget *w;
+ int ret = 0;
+
+ w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
+ swidget = w->dobj.private;
+ pipe_widget = swidget->spipe->pipe_widget;
+ pipeline = pipe_widget->private;
+
+ if (pipe_widget->instance_id < 0)
+ return 0;
+
+ mutex_lock(&ipc4_data->pipeline_state_mutex);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ if (pipeline->state != SOF_IPC4_PIPE_PAUSED) {
+ ret = sof_ipc4_set_pipeline_state(sdev, pipe_widget->instance_id,
+ SOF_IPC4_PIPE_PAUSED);
+ if (ret < 0)
+ goto out;
+ pipeline->state = SOF_IPC4_PIPE_PAUSED;
+ }
+
+ ret = sof_ipc4_set_pipeline_state(sdev, pipe_widget->instance_id,
+ SOF_IPC4_PIPE_RUNNING);
+ if (ret < 0)
+ goto out;
+ pipeline->state = SOF_IPC4_PIPE_RUNNING;
+ swidget->spipe->started_count++;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = sof_ipc4_set_pipeline_state(sdev, pipe_widget->instance_id,
+ SOF_IPC4_PIPE_RUNNING);
+ if (ret < 0)
+ goto out;
+ pipeline->state = SOF_IPC4_PIPE_RUNNING;
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ /*
+ * STOP/SUSPEND trigger is invoked only once when all users of this pipeline have
+ * been stopped. So, clear the started_count so that the pipeline can be reset
+ */
+ swidget->spipe->started_count = 0;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ break;
+ default:
+ dev_err(sdev->dev, "unknown trigger command %d\n", cmd);
+ ret = -EINVAL;
+ break;
+ }
+out:
+ mutex_unlock(&ipc4_data->pipeline_state_mutex);
+ return ret;
+}
+
+static struct hdac_ext_stream *sdw_hda_ipc4_get_hext_stream(struct snd_sof_dev *sdev,
+ struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
+ struct snd_sof_widget *swidget = w->dobj.private;
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_ipc4_copier *ipc4_copier = dai->private;
+ struct sof_ipc4_alh_configuration_blob *blob;
+
+ blob = (struct sof_ipc4_alh_configuration_blob *)ipc4_copier->copier_config;
+
+ /*
+ * Starting with ACE_2_0, re-setting the device_count is mandatory to avoid using
+ * the multi-gateway firmware configuration. The DMA hardware can take care of
+ * multiple links without needing any firmware assistance
+ */
+ blob->alh_cfg.device_count = 1;
+
+ return hda_ipc4_get_hext_stream(sdev, cpu_dai, substream);
+}
+
+static const struct hda_dai_widget_dma_ops hda_ipc4_dma_ops = {
+ .get_hext_stream = hda_ipc4_get_hext_stream,
+ .assign_hext_stream = hda_assign_hext_stream,
+ .release_hext_stream = hda_release_hext_stream,
+ .setup_hext_stream = hda_setup_hext_stream,
+ .reset_hext_stream = hda_reset_hext_stream,
+ .pre_trigger = hda_ipc4_pre_trigger,
+ .trigger = hda_trigger,
+ .post_trigger = hda_ipc4_post_trigger,
+ .codec_dai_set_stream = hda_codec_dai_set_stream,
+ .calc_stream_format = hda_calc_stream_format,
+ .get_hlink = hda_get_hlink,
+};
+
+static const struct hda_dai_widget_dma_ops ssp_ipc4_dma_ops = {
+ .get_hext_stream = hda_ipc4_get_hext_stream,
+ .assign_hext_stream = hda_assign_hext_stream,
+ .release_hext_stream = hda_release_hext_stream,
+ .setup_hext_stream = hda_setup_hext_stream,
+ .reset_hext_stream = hda_reset_hext_stream,
+ .pre_trigger = hda_ipc4_pre_trigger,
+ .trigger = hda_trigger,
+ .post_trigger = hda_ipc4_post_trigger,
+ .calc_stream_format = generic_calc_stream_format,
+ .get_hlink = ssp_get_hlink,
+};
+
+static const struct hda_dai_widget_dma_ops dmic_ipc4_dma_ops = {
+ .get_hext_stream = hda_ipc4_get_hext_stream,
+ .assign_hext_stream = hda_assign_hext_stream,
+ .release_hext_stream = hda_release_hext_stream,
+ .setup_hext_stream = hda_setup_hext_stream,
+ .reset_hext_stream = hda_reset_hext_stream,
+ .pre_trigger = hda_ipc4_pre_trigger,
+ .trigger = hda_trigger,
+ .post_trigger = hda_ipc4_post_trigger,
+ .calc_stream_format = dmic_calc_stream_format,
+ .get_hlink = dmic_get_hlink,
+};
+
+static const struct hda_dai_widget_dma_ops sdw_ipc4_dma_ops = {
+ .get_hext_stream = sdw_hda_ipc4_get_hext_stream,
+ .assign_hext_stream = hda_assign_hext_stream,
+ .release_hext_stream = hda_release_hext_stream,
+ .setup_hext_stream = hda_setup_hext_stream,
+ .reset_hext_stream = hda_reset_hext_stream,
+ .pre_trigger = hda_ipc4_pre_trigger,
+ .trigger = hda_trigger,
+ .post_trigger = hda_ipc4_post_trigger,
+ .calc_stream_format = generic_calc_stream_format,
+ .get_hlink = sdw_get_hlink,
+};
+
+static const struct hda_dai_widget_dma_ops hda_ipc4_chain_dma_ops = {
+ .get_hext_stream = hda_get_hext_stream,
+ .assign_hext_stream = hda_assign_hext_stream,
+ .release_hext_stream = hda_release_hext_stream,
+ .setup_hext_stream = hda_setup_hext_stream,
+ .reset_hext_stream = hda_reset_hext_stream,
+ .trigger = hda_trigger,
+ .codec_dai_set_stream = hda_codec_dai_set_stream,
+ .calc_stream_format = hda_calc_stream_format,
+ .get_hlink = hda_get_hlink,
+};
+
+static int hda_ipc3_post_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ struct hdac_ext_stream *hext_stream = hda_get_hext_stream(sdev, cpu_dai, substream);
+ struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ {
+ struct snd_sof_dai_config_data data = { 0 };
+ int ret;
+
+ data.dai_data = DMA_CHAN_INVALID;
+ ret = hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_FREE, &data);
+ if (ret < 0)
+ return ret;
+
+ if (cmd == SNDRV_PCM_TRIGGER_STOP)
+ return hda_link_dma_cleanup(substream, hext_stream, cpu_dai);
+
+ break;
+ }
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_PAUSE, NULL);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct hda_dai_widget_dma_ops hda_ipc3_dma_ops = {
+ .get_hext_stream = hda_get_hext_stream,
+ .assign_hext_stream = hda_assign_hext_stream,
+ .release_hext_stream = hda_release_hext_stream,
+ .setup_hext_stream = hda_setup_hext_stream,
+ .reset_hext_stream = hda_reset_hext_stream,
+ .trigger = hda_trigger,
+ .post_trigger = hda_ipc3_post_trigger,
+ .codec_dai_set_stream = hda_codec_dai_set_stream,
+ .calc_stream_format = hda_calc_stream_format,
+ .get_hlink = hda_get_hlink,
+};
+
+static struct hdac_ext_stream *
+hda_dspless_get_hext_stream(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+
+ return stream_to_hdac_ext_stream(hstream);
+}
+
+static void hda_dspless_setup_hext_stream(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream,
+ unsigned int format_val)
+{
+ /*
+ * Save the format_val which was adjusted by the maxbps of the codec.
+ * This information is not available on the FE side since there we are
+ * using dummy_codec.
+ */
+ hext_stream->hstream.format_val = format_val;
+}
+
+static const struct hda_dai_widget_dma_ops hda_dspless_dma_ops = {
+ .get_hext_stream = hda_dspless_get_hext_stream,
+ .setup_hext_stream = hda_dspless_setup_hext_stream,
+ .codec_dai_set_stream = hda_codec_dai_set_stream,
+ .calc_stream_format = hda_calc_stream_format,
+ .get_hlink = hda_get_hlink,
+};
+
+#endif
+
+const struct hda_dai_widget_dma_ops *
+hda_select_dai_widget_ops(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_LINK)
+ struct snd_sof_dai *sdai;
+
+ if (sdev->dspless_mode_selected)
+ return &hda_dspless_dma_ops;
+
+ sdai = swidget->private;
+
+ switch (sdev->pdata->ipc_type) {
+ case SOF_IPC:
+ {
+ struct sof_dai_private_data *private = sdai->private;
+
+ if (private->dai_config->type == SOF_DAI_INTEL_HDA)
+ return &hda_ipc3_dma_ops;
+ break;
+ }
+ case SOF_INTEL_IPC4:
+ {
+ struct sof_ipc4_copier *ipc4_copier = sdai->private;
+ const struct sof_intel_dsp_desc *chip;
+
+ chip = get_chip_info(sdev->pdata);
+
+ switch (ipc4_copier->dai_type) {
+ case SOF_DAI_INTEL_HDA:
+ {
+ struct snd_sof_widget *pipe_widget = swidget->spipe->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+
+ if (pipeline->use_chain_dma)
+ return &hda_ipc4_chain_dma_ops;
+
+ return &hda_ipc4_dma_ops;
+ }
+ case SOF_DAI_INTEL_SSP:
+ if (chip->hw_ip_version < SOF_INTEL_ACE_2_0)
+ return NULL;
+ return &ssp_ipc4_dma_ops;
+ case SOF_DAI_INTEL_DMIC:
+ if (chip->hw_ip_version < SOF_INTEL_ACE_2_0)
+ return NULL;
+ return &dmic_ipc4_dma_ops;
+ case SOF_DAI_INTEL_ALH:
+ if (chip->hw_ip_version < SOF_INTEL_ACE_2_0)
+ return NULL;
+ return &sdw_ipc4_dma_ops;
+
+ default:
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+#endif
+ return NULL;
+}
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
new file mode 100644
index 000000000..f3cefd866
--- /dev/null
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Keyon Jie <yang.jie@linux.intel.com>
+//
+
+#include <sound/pcm_params.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda-mlink.h>
+#include <sound/hda_register.h>
+#include <sound/intel-nhlt.h>
+#include <sound/sof/ipc4/header.h>
+#include <uapi/sound/sof/header.h>
+#include "../ipc4-priv.h"
+#include "../ipc4-topology.h"
+#include "../sof-priv.h"
+#include "../sof-audio.h"
+#include "hda.h"
+
+/*
+ * The default method is to fetch NHLT from BIOS. With this parameter set
+ * it is possible to override that with NHLT in the SOF topology manifest.
+ */
+static bool hda_use_tplg_nhlt;
+module_param_named(sof_use_tplg_nhlt, hda_use_tplg_nhlt, bool, 0444);
+MODULE_PARM_DESC(sof_use_tplg_nhlt, "SOF topology nhlt override");
+
+static struct snd_sof_dev *widget_to_sdev(struct snd_soc_dapm_widget *w)
+{
+ struct snd_sof_widget *swidget = w->dobj.private;
+ struct snd_soc_component *component = swidget->scomp;
+
+ return snd_soc_component_get_drvdata(component);
+}
+
+int hda_dai_config(struct snd_soc_dapm_widget *w, unsigned int flags,
+ struct snd_sof_dai_config_data *data)
+{
+ struct snd_sof_widget *swidget = w->dobj.private;
+ const struct sof_ipc_tplg_ops *tplg_ops;
+ struct snd_sof_dev *sdev;
+ int ret;
+
+ if (!swidget)
+ return 0;
+
+ sdev = widget_to_sdev(w);
+ tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ if (tplg_ops && tplg_ops->dai_config) {
+ ret = tplg_ops->dai_config(sdev, swidget, flags, data);
+ if (ret < 0) {
+ dev_err(sdev->dev, "DAI config with flags %x failed for widget %s\n",
+ flags, w->name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_LINK)
+
+static struct snd_sof_dev *dai_to_sdev(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
+
+ return widget_to_sdev(w);
+}
+
+static const struct hda_dai_widget_dma_ops *
+hda_dai_get_ops(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai)
+{
+ struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
+ struct snd_sof_widget *swidget = w->dobj.private;
+ struct snd_sof_dev *sdev;
+ struct snd_sof_dai *sdai;
+
+ sdev = widget_to_sdev(w);
+
+ /*
+ * The swidget parameter of hda_select_dai_widget_ops() is ignored in
+ * case of DSPless mode
+ */
+ if (sdev->dspless_mode_selected)
+ return hda_select_dai_widget_ops(sdev, NULL);
+
+ sdai = swidget->private;
+
+ /* select and set the DAI widget ops if not set already */
+ if (!sdai->platform_private) {
+ const struct hda_dai_widget_dma_ops *ops =
+ hda_select_dai_widget_ops(sdev, swidget);
+ if (!ops)
+ return NULL;
+
+ /* check if mandatory ops are set */
+ if (!ops || !ops->get_hext_stream)
+ return NULL;
+
+ sdai->platform_private = ops;
+ }
+
+ return sdai->platform_private;
+}
+
+int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream,
+ struct snd_soc_dai *cpu_dai)
+{
+ const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai);
+ struct sof_intel_hda_stream *hda_stream;
+ struct hdac_ext_link *hlink;
+ struct snd_sof_dev *sdev;
+ int stream_tag;
+
+ if (!ops) {
+ dev_err(cpu_dai->dev, "DAI widget ops not set\n");
+ return -EINVAL;
+ }
+
+ sdev = dai_to_sdev(substream, cpu_dai);
+
+ hlink = ops->get_hlink(sdev, substream);
+ if (!hlink)
+ return -EINVAL;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ stream_tag = hdac_stream(hext_stream)->stream_tag;
+ snd_hdac_ext_bus_link_clear_stream_id(hlink, stream_tag);
+ }
+
+ if (ops->release_hext_stream)
+ ops->release_hext_stream(sdev, cpu_dai, substream);
+
+ hext_stream->link_prepared = 0;
+
+ /* free the host DMA channel reserved by hostless streams */
+ hda_stream = hstream_to_sof_hda_stream(hext_stream);
+ hda_stream->host_reserved = 0;
+
+ return 0;
+}
+
+static int hda_link_dma_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai)
+{
+ const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai);
+ struct hdac_ext_stream *hext_stream;
+ struct hdac_stream *hstream;
+ struct hdac_ext_link *hlink;
+ struct snd_sof_dev *sdev;
+ int stream_tag;
+
+ if (!ops) {
+ dev_err(cpu_dai->dev, "DAI widget ops not set\n");
+ return -EINVAL;
+ }
+
+ sdev = dai_to_sdev(substream, cpu_dai);
+
+ hlink = ops->get_hlink(sdev, substream);
+ if (!hlink)
+ return -EINVAL;
+
+ hext_stream = ops->get_hext_stream(sdev, cpu_dai, substream);
+
+ if (!hext_stream) {
+ if (ops->assign_hext_stream)
+ hext_stream = ops->assign_hext_stream(sdev, cpu_dai, substream);
+ }
+
+ if (!hext_stream)
+ return -EBUSY;
+
+ hstream = &hext_stream->hstream;
+ stream_tag = hstream->stream_tag;
+
+ if (hext_stream->hstream.direction == SNDRV_PCM_STREAM_PLAYBACK)
+ snd_hdac_ext_bus_link_set_stream_id(hlink, stream_tag);
+
+ /* set the hdac_stream in the codec dai */
+ if (ops->codec_dai_set_stream)
+ ops->codec_dai_set_stream(sdev, substream, hstream);
+
+ if (ops->reset_hext_stream)
+ ops->reset_hext_stream(sdev, hext_stream);
+
+ if (ops->calc_stream_format && ops->setup_hext_stream) {
+ unsigned int format_val = ops->calc_stream_format(sdev, substream, params);
+
+ ops->setup_hext_stream(sdev, hext_stream, format_val);
+ }
+
+ hext_stream->link_prepared = 1;
+
+ return 0;
+}
+
+static int __maybe_unused hda_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai);
+ struct hdac_ext_stream *hext_stream;
+ struct snd_sof_dev *sdev = dai_to_sdev(substream, cpu_dai);
+
+ if (!ops) {
+ dev_err(cpu_dai->dev, "DAI widget ops not set\n");
+ return -EINVAL;
+ }
+
+ hext_stream = ops->get_hext_stream(sdev, cpu_dai, substream);
+ if (!hext_stream)
+ return 0;
+
+ return hda_link_dma_cleanup(substream, hext_stream, cpu_dai);
+}
+
+static int __maybe_unused hda_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(dai, substream->stream);
+ const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, dai);
+ struct hdac_ext_stream *hext_stream;
+ struct snd_sof_dai_config_data data = { 0 };
+ unsigned int flags = SOF_DAI_CONFIG_FLAGS_HW_PARAMS;
+ struct snd_sof_dev *sdev = widget_to_sdev(w);
+ int ret;
+
+ if (!ops) {
+ dev_err(sdev->dev, "DAI widget ops not set\n");
+ return -EINVAL;
+ }
+
+ hext_stream = ops->get_hext_stream(sdev, dai, substream);
+ if (hext_stream && hext_stream->link_prepared)
+ return 0;
+
+ ret = hda_link_dma_hw_params(substream, params, dai);
+ if (ret < 0)
+ return ret;
+
+ hext_stream = ops->get_hext_stream(sdev, dai, substream);
+
+ flags |= SOF_DAI_CONFIG_FLAGS_2_STEP_STOP << SOF_DAI_CONFIG_FLAGS_QUIRK_SHIFT;
+ data.dai_data = hdac_stream(hext_stream)->stream_tag - 1;
+
+ return hda_dai_config(w, flags, &data);
+}
+
+/*
+ * In contrast to IPC3, the dai trigger in IPC4 mixes pipeline state changes
+ * (over IPC channel) and DMA state change (direct host register changes).
+ */
+static int __maybe_unused hda_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, dai);
+ struct hdac_ext_stream *hext_stream;
+ struct snd_sof_dev *sdev;
+ int ret;
+
+ if (!ops) {
+ dev_err(dai->dev, "DAI widget ops not set\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dai->dev, "cmd=%d dai %s direction %d\n", cmd,
+ dai->name, substream->stream);
+
+ sdev = dai_to_sdev(substream, dai);
+
+ hext_stream = ops->get_hext_stream(sdev, dai, substream);
+ if (!hext_stream)
+ return -EINVAL;
+
+ if (ops->pre_trigger) {
+ ret = ops->pre_trigger(sdev, dai, substream, cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ops->trigger) {
+ ret = ops->trigger(sdev, dai, substream, cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ops->post_trigger) {
+ ret = ops->post_trigger(sdev, dai, substream, cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ ret = hda_link_dma_cleanup(substream, hext_stream, dai);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: failed to clean up link DMA\n", __func__);
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+
+static int hda_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ int stream = substream->stream;
+
+ return hda_dai_hw_params(substream, &rtd->dpcm[stream].hw_params, dai);
+}
+
+static const struct snd_soc_dai_ops hda_dai_ops = {
+ .hw_params = hda_dai_hw_params,
+ .hw_free = hda_dai_hw_free,
+ .trigger = hda_dai_trigger,
+ .prepare = hda_dai_prepare,
+};
+
+#endif
+
+static struct sof_ipc4_copier *widget_to_copier(struct snd_soc_dapm_widget *w)
+{
+ struct snd_sof_widget *swidget = w->dobj.private;
+ struct snd_sof_dai *sdai = swidget->private;
+ struct sof_ipc4_copier *ipc4_copier = (struct sof_ipc4_copier *)sdai->private;
+
+ return ipc4_copier;
+}
+
+static int non_hda_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
+ struct sof_ipc4_dma_config_tlv *dma_config_tlv;
+ const struct hda_dai_widget_dma_ops *ops;
+ struct sof_ipc4_dma_config *dma_config;
+ struct sof_ipc4_copier *ipc4_copier;
+ struct hdac_ext_stream *hext_stream;
+ struct hdac_stream *hstream;
+ struct snd_sof_dev *sdev;
+ int stream_id;
+ int ret;
+
+ ops = hda_dai_get_ops(substream, cpu_dai);
+ if (!ops) {
+ dev_err(cpu_dai->dev, "DAI widget ops not set\n");
+ return -EINVAL;
+ }
+
+ /* use HDaudio stream handling */
+ ret = hda_dai_hw_params(substream, params, cpu_dai);
+ if (ret < 0) {
+ dev_err(cpu_dai->dev, "%s: hda_dai_hw_params failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ /* get stream_id */
+ sdev = widget_to_sdev(w);
+ hext_stream = ops->get_hext_stream(sdev, cpu_dai, substream);
+
+ if (!hext_stream) {
+ dev_err(cpu_dai->dev, "%s: no hext_stream found\n", __func__);
+ return -ENODEV;
+ }
+
+ hstream = &hext_stream->hstream;
+ stream_id = hstream->stream_tag;
+
+ if (!stream_id) {
+ dev_err(cpu_dai->dev, "%s: no stream_id allocated\n", __func__);
+ return -ENODEV;
+ }
+
+ /* configure TLV */
+ ipc4_copier = widget_to_copier(w);
+
+ dma_config_tlv = &ipc4_copier->dma_config_tlv;
+ dma_config_tlv->type = SOF_IPC4_GTW_DMA_CONFIG_ID;
+ /* dma_config_priv_size is zero */
+ dma_config_tlv->length = sizeof(dma_config_tlv->dma_config);
+
+ dma_config = &dma_config_tlv->dma_config;
+
+ dma_config->dma_method = SOF_IPC4_DMA_METHOD_HDA;
+ dma_config->pre_allocated_by_host = 1;
+ dma_config->dma_channel_id = stream_id - 1;
+ dma_config->stream_id = stream_id;
+ dma_config->dma_stream_channel_map.device_count = 0; /* mapping not used */
+ dma_config->dma_priv_config_size = 0;
+
+ return 0;
+}
+
+static int non_hda_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ int stream = substream->stream;
+
+ return non_hda_dai_hw_params(substream, &rtd->dpcm[stream].hw_params, cpu_dai);
+}
+
+static const struct snd_soc_dai_ops ssp_dai_ops = {
+ .hw_params = non_hda_dai_hw_params,
+ .hw_free = hda_dai_hw_free,
+ .trigger = hda_dai_trigger,
+ .prepare = non_hda_dai_prepare,
+};
+
+static const struct snd_soc_dai_ops dmic_dai_ops = {
+ .hw_params = non_hda_dai_hw_params,
+ .hw_free = hda_dai_hw_free,
+ .trigger = hda_dai_trigger,
+ .prepare = non_hda_dai_prepare,
+};
+
+int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai,
+ int link_id)
+{
+ struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
+ const struct hda_dai_widget_dma_ops *ops;
+ struct hdac_ext_stream *hext_stream;
+ struct snd_sof_dev *sdev;
+ int ret;
+
+ ret = non_hda_dai_hw_params(substream, params, cpu_dai);
+ if (ret < 0) {
+ dev_err(cpu_dai->dev, "%s: non_hda_dai_hw_params failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ ops = hda_dai_get_ops(substream, cpu_dai);
+ sdev = widget_to_sdev(w);
+ hext_stream = ops->get_hext_stream(sdev, cpu_dai, substream);
+
+ if (!hext_stream)
+ return -ENODEV;
+
+ /* in the case of SoundWire we need to program the PCMSyCM registers */
+ ret = hdac_bus_eml_sdw_map_stream_ch(sof_to_bus(sdev), link_id, cpu_dai->id,
+ GENMASK(params_channels(params) - 1, 0),
+ hdac_stream(hext_stream)->stream_tag,
+ substream->stream);
+ if (ret < 0) {
+ dev_err(cpu_dai->dev, "%s: hdac_bus_eml_sdw_map_stream_ch failed %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int sdw_hda_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai,
+ int link_id)
+{
+ struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
+ struct snd_sof_dev *sdev;
+ int ret;
+
+ ret = hda_dai_hw_free(substream, cpu_dai);
+ if (ret < 0) {
+ dev_err(cpu_dai->dev, "%s: non_hda_dai_hw_free failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ sdev = widget_to_sdev(w);
+
+ /* in the case of SoundWire we need to reset the PCMSyCM registers */
+ ret = hdac_bus_eml_sdw_map_stream_ch(sof_to_bus(sdev), link_id, cpu_dai->id,
+ 0, 0, substream->stream);
+ if (ret < 0) {
+ dev_err(cpu_dai->dev, "%s: hdac_bus_eml_sdw_map_stream_ch failed %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int sdw_hda_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *cpu_dai)
+{
+ return hda_dai_trigger(substream, cmd, cpu_dai);
+}
+
+static int hda_dai_suspend(struct hdac_bus *bus)
+{
+ struct snd_soc_pcm_runtime *rtd;
+ struct hdac_ext_stream *hext_stream;
+ struct hdac_stream *s;
+ int ret;
+
+ /* set internal flag for BE */
+ list_for_each_entry(s, &bus->stream_list, list) {
+
+ hext_stream = stream_to_hdac_ext_stream(s);
+
+ /*
+ * clear stream. This should already be taken care for running
+ * streams when the SUSPEND trigger is called. But paused
+ * streams do not get suspended, so this needs to be done
+ * explicitly during suspend.
+ */
+ if (hext_stream->link_substream) {
+ const struct hda_dai_widget_dma_ops *ops;
+ struct snd_sof_widget *swidget;
+ struct snd_soc_dapm_widget *w;
+ struct snd_soc_dai *cpu_dai;
+ struct snd_sof_dev *sdev;
+ struct snd_sof_dai *sdai;
+
+ rtd = asoc_substream_to_rtd(hext_stream->link_substream);
+ cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ w = snd_soc_dai_get_widget(cpu_dai, hdac_stream(hext_stream)->direction);
+ swidget = w->dobj.private;
+ sdev = widget_to_sdev(w);
+ sdai = swidget->private;
+ ops = sdai->platform_private;
+
+ ret = hda_link_dma_cleanup(hext_stream->link_substream,
+ hext_stream,
+ cpu_dai);
+ if (ret < 0)
+ return ret;
+
+ /* for consistency with TRIGGER_SUSPEND */
+ if (ops->post_trigger) {
+ ret = ops->post_trigger(sdev, cpu_dai,
+ hext_stream->link_substream,
+ SNDRV_PCM_TRIGGER_SUSPEND);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void ssp_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops)
+{
+ const struct sof_intel_dsp_desc *chip;
+ int i;
+
+ chip = get_chip_info(sdev->pdata);
+
+ if (chip->hw_ip_version >= SOF_INTEL_ACE_2_0) {
+ for (i = 0; i < ops->num_drv; i++) {
+ if (strstr(ops->drv[i].name, "SSP"))
+ ops->drv[i].ops = &ssp_dai_ops;
+ }
+ }
+}
+
+static void dmic_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops)
+{
+ const struct sof_intel_dsp_desc *chip;
+ int i;
+
+ chip = get_chip_info(sdev->pdata);
+
+ if (chip->hw_ip_version >= SOF_INTEL_ACE_2_0) {
+ for (i = 0; i < ops->num_drv; i++) {
+ if (strstr(ops->drv[i].name, "DMIC"))
+ ops->drv[i].ops = &dmic_dai_ops;
+ }
+ }
+}
+
+#else
+
+static inline void ssp_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops) {}
+static inline void dmic_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops) {}
+
+#endif /* CONFIG_SND_SOC_SOF_HDA_LINK */
+
+void hda_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops)
+{
+ int i;
+
+ for (i = 0; i < ops->num_drv; i++) {
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ if (strstr(ops->drv[i].name, "iDisp") ||
+ strstr(ops->drv[i].name, "Analog") ||
+ strstr(ops->drv[i].name, "Digital"))
+ ops->drv[i].ops = &hda_dai_ops;
+#endif
+ }
+
+ ssp_set_dai_drv_ops(sdev, ops);
+ dmic_set_dai_drv_ops(sdev, ops);
+
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4 && !hda_use_tplg_nhlt) {
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+
+ ipc4_data->nhlt = intel_nhlt_init(sdev->dev);
+ }
+}
+
+void hda_ops_free(struct snd_sof_dev *sdev)
+{
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+
+ if (!hda_use_tplg_nhlt)
+ intel_nhlt_free(ipc4_data->nhlt);
+ }
+}
+EXPORT_SYMBOL_NS(hda_ops_free, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+/*
+ * common dai driver for skl+ platforms.
+ * some products who use this DAI array only physically have a subset of
+ * the DAIs, but no harm is done here by adding the whole set.
+ */
+struct snd_soc_dai_driver skl_dai[] = {
+{
+ .name = "SSP0 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP1 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP2 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP3 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP4 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP5 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "DMIC01 Pin",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+},
+{
+ .name = "DMIC16k Pin",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+},
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+{
+ .name = "iDisp1 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "iDisp2 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "iDisp3 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "iDisp4 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "Analog CPU DAI",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+},
+{
+ .name = "Digital CPU DAI",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+},
+{
+ .name = "Alt Analog CPU DAI",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+},
+#endif
+};
+
+int hda_dsp_dais_suspend(struct snd_sof_dev *sdev)
+{
+ /*
+ * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
+ * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
+ * Since the component suspend is called last, we can trap this corner case
+ * and force the DAIs to release their resources.
+ */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_LINK)
+ int ret;
+
+ ret = hda_dai_suspend(sof_to_bus(sdev));
+ if (ret < 0)
+ return ret;
+#endif
+
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
new file mode 100644
index 000000000..44f39a520
--- /dev/null
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -0,0 +1,1110 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/hda-mlink.h>
+#include <trace/events/sof_intel.h>
+#include "../sof-audio.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+
+static bool hda_enable_trace_D0I3_S0;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
+module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
+MODULE_PARM_DESC(enable_trace_D0I3_S0,
+ "SOF HDA enable trace when the DSP is in D0I3 in S0");
+#endif
+
+/*
+ * DSP Core control.
+ */
+
+static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ u32 adspcs;
+ u32 reset;
+ int ret;
+
+ /* set reset bits for cores */
+ reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ reset, reset);
+
+ /* poll with timeout to check if operation successful */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ ((adspcs & reset) == reset),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
+
+ /* has core entered reset ? */
+ adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS);
+ if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
+ HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
+ dev_err(sdev->dev,
+ "error: reset enter failed: core_mask %x adspcs 0x%x\n",
+ core_mask, adspcs);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ unsigned int crst;
+ u32 adspcs;
+ int ret;
+
+ /* clear reset bits for cores */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CRST_MASK(core_mask),
+ 0);
+
+ /* poll with timeout to check if operation successful */
+ crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ !(adspcs & crst),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
+
+ /* has core left reset ? */
+ adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS);
+ if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
+ dev_err(sdev->dev,
+ "error: reset leave failed: core_mask %x adspcs 0x%x\n",
+ core_mask, adspcs);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ /* stall core */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+ /* set reset state */
+ return hda_dsp_core_reset_enter(sdev, core_mask);
+}
+
+bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ int val;
+ bool is_enable;
+
+ val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
+
+#define MASK_IS_EQUAL(v, m, field) ({ \
+ u32 _m = field(m); \
+ ((v) & _m) == _m; \
+})
+
+ is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
+ MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
+ !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
+ !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+#undef MASK_IS_EQUAL
+
+ dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
+ is_enable, core_mask);
+
+ return is_enable;
+}
+
+int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ int ret;
+
+ /* leave reset state */
+ ret = hda_dsp_core_reset_leave(sdev, core_mask);
+ if (ret < 0)
+ return ret;
+
+ /* run core */
+ dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
+ 0);
+
+ /* is core now running ? */
+ if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
+ hda_dsp_core_stall_reset(sdev, core_mask);
+ dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
+ core_mask);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/*
+ * Power Management.
+ */
+
+int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int cpa;
+ u32 adspcs;
+ int ret;
+
+ /* restrict core_mask to host managed cores mask */
+ core_mask &= chip->host_managed_cores_mask;
+ /* return if core_mask is not valid */
+ if (!core_mask)
+ return 0;
+
+ /* update bits */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_SPA_MASK(core_mask),
+ HDA_DSP_ADSPCS_SPA_MASK(core_mask));
+
+ /* poll with timeout to check if operation successful */
+ cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ (adspcs & cpa) == cpa,
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
+
+ /* did core power up ? */
+ adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS);
+ if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
+ HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
+ dev_err(sdev->dev,
+ "error: power up core failed core_mask %xadspcs 0x%x\n",
+ core_mask, adspcs);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ u32 adspcs;
+ int ret;
+
+ /* update bits */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+
+ return ret;
+}
+
+int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ int ret;
+
+ /* restrict core_mask to host managed cores mask */
+ core_mask &= chip->host_managed_cores_mask;
+
+ /* return if core_mask is not valid or cores are already enabled */
+ if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
+ return 0;
+
+ /* power up */
+ ret = hda_dsp_core_power_up(sdev, core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
+ core_mask);
+ return ret;
+ }
+
+ return hda_dsp_core_run(sdev, core_mask);
+}
+
+int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
+ unsigned int core_mask)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ int ret;
+
+ /* restrict core_mask to host managed cores mask */
+ core_mask &= chip->host_managed_cores_mask;
+
+ /* return if core_mask is not valid */
+ if (!core_mask)
+ return 0;
+
+ /* place core in reset prior to power down */
+ ret = hda_dsp_core_stall_reset(sdev, core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
+ core_mask);
+ return ret;
+ }
+
+ /* power down core */
+ ret = hda_dsp_core_power_down(sdev, core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
+ core_mask, ret);
+ return ret;
+ }
+
+ /* make sure we are in OFF state */
+ if (hda_dsp_core_is_enabled(sdev, core_mask)) {
+ dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
+ core_mask, ret);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ if (sdev->dspless_mode_selected)
+ return;
+
+ /* enable IPC DONE and BUSY interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
+ HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
+
+ /* enable IPC interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
+}
+
+void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ if (sdev->dspless_mode_selected)
+ return;
+
+ /* disable IPC interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, 0);
+
+ /* disable IPC BUSY and DONE interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
+}
+
+static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
+{
+ int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_intel_dsp_desc *chip;
+
+ chip = get_chip_info(pdata);
+ while (snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset) &
+ SOF_HDA_VS_D0I3C_CIP) {
+ if (!retry--)
+ return -ETIMEDOUT;
+ usleep_range(10, 15);
+ }
+
+ return 0;
+}
+
+static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
+{
+ const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm);
+
+ if (pm_ops && pm_ops->set_pm_gate)
+ return pm_ops->set_pm_gate(sdev, flags);
+
+ return 0;
+}
+
+static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_intel_dsp_desc *chip;
+ int ret;
+ u8 reg;
+
+ chip = get_chip_info(pdata);
+
+ /* Write to D0I3C after Command-In-Progress bit is cleared */
+ ret = hda_dsp_wait_d0i3c_done(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "CIP timeout before D0I3C update!\n");
+ return ret;
+ }
+
+ /* Update D0I3C register */
+ snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset,
+ SOF_HDA_VS_D0I3C_I3, value);
+
+ /*
+ * The value written to the D0I3C::I3 bit may not be taken into account immediately.
+ * A delay is recommended before checking if D0I3C::CIP is cleared
+ */
+ usleep_range(30, 40);
+
+ /* Wait for cmd in progress to be cleared before exiting the function */
+ ret = hda_dsp_wait_d0i3c_done(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "CIP timeout after D0I3C update!\n");
+ return ret;
+ }
+
+ reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset);
+ /* Confirm d0i3 state changed with paranoia check */
+ if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) {
+ dev_err(sdev->dev, "failed to update D0I3C!\n");
+ return -EIO;
+ }
+
+ trace_sof_intel_D0I3C_updated(sdev, reg);
+
+ return 0;
+}
+
+/*
+ * d0i3 streaming is enabled if all the active streams can
+ * work in d0i3 state and playback is enabled
+ */
+static bool hda_dsp_d0i3_streaming_applicable(struct snd_sof_dev *sdev)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_sof_pcm *spcm;
+ bool playback_active = false;
+ int dir;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ for_each_pcm_streams(dir) {
+ substream = spcm->stream[dir].substream;
+ if (!substream || !substream->runtime)
+ continue;
+
+ if (!spcm->stream[dir].d0i3_compatible)
+ return false;
+
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ playback_active = true;
+ }
+ }
+
+ return playback_active;
+}
+
+static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ u32 flags = 0;
+ int ret;
+ u8 value = 0;
+
+ /*
+ * Sanity check for illegal state transitions
+ * The only allowed transitions are:
+ * 1. D3 -> D0I0
+ * 2. D0I0 -> D0I3
+ * 3. D0I3 -> D0I0
+ */
+ switch (sdev->dsp_power_state.state) {
+ case SOF_DSP_PM_D0:
+ /* Follow the sequence below for D0 substate transitions */
+ break;
+ case SOF_DSP_PM_D3:
+ /* Follow regular flow for D3 -> D0 transition */
+ return 0;
+ default:
+ dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
+ sdev->dsp_power_state.state, target_state->state);
+ return -EINVAL;
+ }
+
+ /* Set flags and register value for D0 target substate */
+ if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
+ value = SOF_HDA_VS_D0I3C_I3;
+
+ /*
+ * Trace DMA need to be disabled when the DSP enters
+ * D0I3 for S0Ix suspend, but it can be kept enabled
+ * when the DSP enters D0I3 while the system is in S0
+ * for debug purpose.
+ */
+ if (!sdev->fw_trace_is_supported ||
+ !hda_enable_trace_D0I3_S0 ||
+ sdev->system_suspend_target != SOF_SUSPEND_NONE)
+ flags = HDA_PM_NO_DMA_TRACE;
+
+ if (hda_dsp_d0i3_streaming_applicable(sdev))
+ flags |= HDA_PM_PG_STREAMING;
+ } else {
+ /* prevent power gating in D0I0 */
+ flags = HDA_PM_PPG;
+ }
+
+ /* update D0I3C register */
+ ret = hda_dsp_update_d0i3c_register(sdev, value);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Notify the DSP of the state change.
+ * If this IPC fails, revert the D0I3C register update in order
+ * to prevent partial state change.
+ */
+ ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: PM_GATE ipc error %d\n", ret);
+ goto revert;
+ }
+
+ return ret;
+
+revert:
+ /* fallback to the previous register value */
+ value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
+
+ /*
+ * This can fail but return the IPC error to signal that
+ * the state change failed.
+ */
+ hda_dsp_update_d0i3c_register(sdev, value);
+
+ return ret;
+}
+
+/* helper to log DSP state */
+static void hda_dsp_state_log(struct snd_sof_dev *sdev)
+{
+ switch (sdev->dsp_power_state.state) {
+ case SOF_DSP_PM_D0:
+ switch (sdev->dsp_power_state.substate) {
+ case SOF_HDA_DSP_PM_D0I0:
+ dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
+ break;
+ case SOF_HDA_DSP_PM_D0I3:
+ dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
+ break;
+ default:
+ dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
+ sdev->dsp_power_state.substate);
+ break;
+ }
+ break;
+ case SOF_DSP_PM_D1:
+ dev_dbg(sdev->dev, "Current DSP power state: D1\n");
+ break;
+ case SOF_DSP_PM_D2:
+ dev_dbg(sdev->dev, "Current DSP power state: D2\n");
+ break;
+ case SOF_DSP_PM_D3:
+ dev_dbg(sdev->dev, "Current DSP power state: D3\n");
+ break;
+ default:
+ dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
+ sdev->dsp_power_state.state);
+ break;
+ }
+}
+
+/*
+ * All DSP power state transitions are initiated by the driver.
+ * If the requested state change fails, the error is simply returned.
+ * Further state transitions are attempted only when the set_power_save() op
+ * is called again either because of a new IPC sent to the DSP or
+ * during system suspend/resume.
+ */
+static int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ int ret = 0;
+
+ switch (target_state->state) {
+ case SOF_DSP_PM_D0:
+ ret = hda_dsp_set_D0_state(sdev, target_state);
+ break;
+ case SOF_DSP_PM_D3:
+ /* The only allowed transition is: D0I0 -> D3 */
+ if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
+ sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
+ break;
+
+ dev_err(sdev->dev,
+ "error: transition from %d to %d not allowed\n",
+ sdev->dsp_power_state.state, target_state->state);
+ return -EINVAL;
+ default:
+ dev_err(sdev->dev, "error: target state unsupported %d\n",
+ target_state->state);
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "failed to set requested target DSP state %d substate %d\n",
+ target_state->state, target_state->substate);
+ return ret;
+ }
+
+ sdev->dsp_power_state = *target_state;
+ hda_dsp_state_log(sdev);
+ return ret;
+}
+
+int hda_dsp_set_power_state_ipc3(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ /*
+ * When the DSP is already in D0I3 and the target state is D0I3,
+ * it could be the case that the DSP is in D0I3 during S0
+ * and the system is suspending to S0Ix. Therefore,
+ * hda_dsp_set_D0_state() must be called to disable trace DMA
+ * by sending the PM_GATE IPC to the FW.
+ */
+ if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
+ sdev->system_suspend_target == SOF_SUSPEND_S0IX)
+ return hda_dsp_set_power_state(sdev, target_state);
+
+ /*
+ * For all other cases, return without doing anything if
+ * the DSP is already in the target state.
+ */
+ if (target_state->state == sdev->dsp_power_state.state &&
+ target_state->substate == sdev->dsp_power_state.substate)
+ return 0;
+
+ return hda_dsp_set_power_state(sdev, target_state);
+}
+
+int hda_dsp_set_power_state_ipc4(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ /* Return without doing anything if the DSP is already in the target state */
+ if (target_state->state == sdev->dsp_power_state.state &&
+ target_state->substate == sdev->dsp_power_state.substate)
+ return 0;
+
+ return hda_dsp_set_power_state(sdev, target_state);
+}
+
+/*
+ * Audio DSP states may transform as below:-
+ *
+ * Opportunistic D0I3 in S0
+ * Runtime +---------------------+ Delayed D0i3 work timeout
+ * suspend | +--------------------+
+ * +------------+ D0I0(active) | |
+ * | | <---------------+ |
+ * | +--------> | New IPC | |
+ * | |Runtime +--^--+---------^--+--+ (via mailbox) | |
+ * | |resume | | | | | |
+ * | | | | | | | |
+ * | | System| | | | | |
+ * | | resume| | S3/S0IX | | | |
+ * | | | | suspend | | S0IX | |
+ * | | | | | |suspend | |
+ * | | | | | | | |
+ * | | | | | | | |
+ * +-v---+-----------+--v-------+ | | +------+----v----+
+ * | | | +-----------> |
+ * | D3 (suspended) | | | D0I3 |
+ * | | +--------------+ |
+ * | | System resume | |
+ * +----------------------------+ +----------------+
+ *
+ * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
+ * ignored the suspend trigger. Otherwise the DSP
+ * is in D3.
+ */
+
+static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int ret, j;
+
+ /*
+ * The memory used for IMR boot loses its content in deeper than S3 state
+ * We must not try IMR boot on next power up (as it will fail).
+ *
+ * In case of firmware crash or boot failure set the skip_imr_boot to true
+ * as well in order to try to re-load the firmware to do a 'cold' boot.
+ */
+ if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
+ sdev->fw_state == SOF_FW_CRASHED ||
+ sdev->fw_state == SOF_FW_BOOT_FAILED)
+ hda->skip_imr_boot = true;
+
+ ret = chip->disable_interrupts(sdev);
+ if (ret < 0)
+ return ret;
+
+ hda_codec_jack_wake_enable(sdev, runtime_suspend);
+
+ /* power down all hda links */
+ hda_bus_ml_suspend(bus);
+
+ if (sdev->dspless_mode_selected)
+ goto skip_dsp;
+
+ ret = chip->power_down_dsp(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to power down DSP during suspend\n");
+ return ret;
+ }
+
+ /* reset ref counts for all cores */
+ for (j = 0; j < chip->cores_num; j++)
+ sdev->dsp_core_ref_count[j] = 0;
+
+ /* disable ppcap interrupt */
+ hda_dsp_ctrl_ppcap_enable(sdev, false);
+ hda_dsp_ctrl_ppcap_int_enable(sdev, false);
+skip_dsp:
+
+ /* disable hda bus irq and streams */
+ hda_dsp_ctrl_stop_chip(sdev);
+
+ /* disable LP retention mode */
+ snd_sof_pci_update_bits(sdev, PCI_PGCTL,
+ PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
+
+ /* reset controller */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to reset controller during suspend\n");
+ return ret;
+ }
+
+ /* display codec can powered off after link reset */
+ hda_codec_i915_display_power(sdev, false);
+
+ return 0;
+}
+
+static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
+{
+ int ret;
+
+ /* display codec must be powered before link reset */
+ hda_codec_i915_display_power(sdev, true);
+
+ /*
+ * clear TCSEL to clear playback on some HD Audio
+ * codecs. PCI TCSEL is defined in the Intel manuals.
+ */
+ snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
+
+ /* reset and start hda controller */
+ ret = hda_dsp_ctrl_init_chip(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to start controller after resume\n");
+ goto cleanup;
+ }
+
+ /* check jack status */
+ if (runtime_resume) {
+ hda_codec_jack_wake_enable(sdev, false);
+ if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
+ hda_codec_jack_check(sdev);
+ }
+
+ if (!sdev->dspless_mode_selected) {
+ /* enable ppcap interrupt */
+ hda_dsp_ctrl_ppcap_enable(sdev, true);
+ hda_dsp_ctrl_ppcap_int_enable(sdev, true);
+ }
+
+cleanup:
+ /* display codec can powered off after controller init */
+ hda_codec_i915_display_power(sdev, false);
+
+ return 0;
+}
+
+int hda_dsp_resume(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ .substate = SOF_HDA_DSP_PM_D0I0,
+ };
+ int ret;
+
+ /* resume from D0I3 */
+ if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
+ ret = hda_bus_ml_resume(bus);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error %d in %s: failed to power up links",
+ ret, __func__);
+ return ret;
+ }
+
+ /* set up CORB/RIRB buffers if was on before suspend */
+ hda_codec_resume_cmd_io(sdev);
+
+ /* Set DSP power state */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_state);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
+ target_state.state, target_state.substate);
+ return ret;
+ }
+
+ /* restore L1SEN bit */
+ if (hda->l1_disabled)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, 0);
+
+ /* restore and disable the system wakeup */
+ pci_restore_state(pci);
+ disable_irq_wake(pci->irq);
+ return 0;
+ }
+
+ /* init hda controller. DSP cores will be powered up during fw boot */
+ ret = hda_resume(sdev, false);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
+}
+
+int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
+{
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+ int ret;
+
+ /* init hda controller. DSP cores will be powered up during fw boot */
+ ret = hda_resume(sdev, true);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
+}
+
+int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *hbus = sof_to_bus(sdev);
+
+ if (hbus->codec_powered) {
+ dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
+ (unsigned int)hbus->codec_powered);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D3,
+ };
+ int ret;
+
+ if (!sdev->dspless_mode_selected) {
+ /* cancel any attempt for DSP D0I3 */
+ cancel_delayed_work_sync(&hda->d0i3_work);
+ }
+
+ /* stop hda controller and power dsp off */
+ ret = hda_suspend(sdev, true);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
+}
+
+int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = target_state,
+ .substate = target_state == SOF_DSP_PM_D0 ?
+ SOF_HDA_DSP_PM_D0I3 : 0,
+ };
+ int ret;
+
+ if (!sdev->dspless_mode_selected) {
+ /* cancel any attempt for DSP D0I3 */
+ cancel_delayed_work_sync(&hda->d0i3_work);
+ }
+
+ if (target_state == SOF_DSP_PM_D0) {
+ /* Set DSP power state */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
+ target_dsp_state.state,
+ target_dsp_state.substate);
+ return ret;
+ }
+
+ /* enable L1SEN to make sure the system can enter S0Ix */
+ if (hda->l1_disabled)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN);
+
+ /* stop the CORB/RIRB DMA if it is On */
+ hda_codec_suspend_cmd_io(sdev);
+
+ /* no link can be powered in s0ix state */
+ ret = hda_bus_ml_suspend(bus);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error %d in %s: failed to power down links",
+ ret, __func__);
+ return ret;
+ }
+
+ /* enable the system waking up via IPC IRQ */
+ enable_irq_wake(pci->irq);
+ pci_save_state(pci);
+ return 0;
+ }
+
+ /* stop hda controller and power dsp off */
+ ret = hda_suspend(sdev, false);
+ if (ret < 0) {
+ dev_err(bus->dev, "error: suspending dsp\n");
+ return ret;
+ }
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *s;
+ unsigned int active_streams = 0;
+ int sd_offset;
+ u32 val;
+
+ list_for_each_entry(s, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(s);
+ val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ sd_offset);
+ if (val & SOF_HDA_SD_CTL_DMA_START)
+ active_streams |= BIT(s->index);
+ }
+
+ return active_streams;
+}
+
+static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ /*
+ * Do not assume a certain timing between the prior
+ * suspend flow, and running of this quirk function.
+ * This is needed if the controller was just put
+ * to reset before calling this function.
+ */
+ usleep_range(500, 1000);
+
+ /*
+ * Take controller out of reset to flush DMA
+ * transactions.
+ */
+ ret = hda_dsp_ctrl_link_reset(sdev, false);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(500, 1000);
+
+ /* Restore state for shutdown, back to reset */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev)
+{
+ unsigned int active_streams;
+ int ret, ret2;
+
+ /* check if DMA cleanup has been successful */
+ active_streams = hda_dsp_check_for_dma_streams(sdev);
+
+ sdev->system_suspend_target = SOF_SUSPEND_S3;
+ ret = snd_sof_suspend(sdev->dev);
+
+ if (active_streams) {
+ dev_warn(sdev->dev,
+ "There were active DSP streams (%#x) at shutdown, trying to recover\n",
+ active_streams);
+ ret2 = hda_dsp_s5_quirk(sdev);
+ if (ret2 < 0)
+ dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2);
+ }
+
+ return ret;
+}
+
+int hda_dsp_shutdown(struct snd_sof_dev *sdev)
+{
+ sdev->system_suspend_target = SOF_SUSPEND_S3;
+ return snd_sof_suspend(sdev->dev);
+}
+
+int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ /* make sure all DAI resources are freed */
+ ret = hda_dsp_dais_suspend(sdev);
+ if (ret < 0)
+ dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__);
+
+ return ret;
+}
+
+void hda_dsp_d0i3_work(struct work_struct *work)
+{
+ struct sof_intel_hda_dev *hdev = container_of(work,
+ struct sof_intel_hda_dev,
+ d0i3_work.work);
+ struct hdac_bus *bus = &hdev->hbus.core;
+ struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
+ struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ .substate = SOF_HDA_DSP_PM_D0I3,
+ };
+ int ret;
+
+ /* DSP can enter D0I3 iff only D0I3-compatible streams are active */
+ if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
+ /* remain in D0I0 */
+ return;
+
+ /* This can fail but error cannot be propagated */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_state);
+ if (ret < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: failed to set DSP state %d substate %d\n",
+ target_state.state, target_state.substate);
+}
+
+int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
+{
+ const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
+ int ret, ret1;
+
+ /* power up core */
+ ret = hda_dsp_enable_core(sdev, BIT(core));
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to power up core %d with err: %d\n",
+ core, ret);
+ return ret;
+ }
+
+ /* No need to send IPC for primary core or if FW boot is not complete */
+ if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE)
+ return 0;
+
+ /* No need to continue the set_core_state ops is not available */
+ if (!pm_ops->set_core_state)
+ return 0;
+
+ /* Now notify DSP for secondary cores */
+ ret = pm_ops->set_core_state(sdev, core, true);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n",
+ core, ret);
+ goto power_down;
+ }
+
+ return ret;
+
+power_down:
+ /* power down core if it is host managed and return the original error if this fails too */
+ ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core));
+ if (ret1 < 0)
+ dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1);
+
+ return ret;
+}
+
+int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev)
+{
+ hda_sdw_int_enable(sdev, false);
+ hda_dsp_ipc_int_disable(sdev);
+
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
new file mode 100644
index 000000000..a838dddb1
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ipc.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/sof/ipc4/header.h>
+#include <trace/events/sof_intel.h>
+#include "../ops.h"
+#include "hda.h"
+
+static void hda_dsp_ipc_host_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * tell DSP cmd is done - clear busy
+ * interrupt and send reply msg to dsp
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCT,
+ HDA_DSP_REG_HIPCT_BUSY,
+ HDA_DSP_REG_HIPCT_BUSY);
+
+ /* unmask BUSY interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_BUSY,
+ HDA_DSP_REG_HIPCCTL_BUSY);
+}
+
+static void hda_dsp_ipc_dsp_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * set DONE bit - tell DSP we have received the reply msg
+ * from DSP, and processed it, don't send more reply to host
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCIE,
+ HDA_DSP_REG_HIPCIE_DONE,
+ HDA_DSP_REG_HIPCIE_DONE);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_DONE,
+ HDA_DSP_REG_HIPCCTL_DONE);
+}
+
+int hda_dsp_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ /* send IPC message to DSP */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI,
+ HDA_DSP_REG_HIPCI_BUSY);
+
+ return 0;
+}
+
+static inline bool hda_dsp_ipc4_pm_msg(u32 primary)
+{
+ /* pm setting is only supported by module msg */
+ if (SOF_IPC4_MSG_IS_MODULE_MSG(primary) != SOF_IPC4_MODULE_MSG)
+ return false;
+
+ if (SOF_IPC4_MSG_TYPE_GET(primary) == SOF_IPC4_MOD_SET_DX ||
+ SOF_IPC4_MSG_TYPE_GET(primary) == SOF_IPC4_MOD_SET_D0IX)
+ return true;
+
+ return false;
+}
+
+void hda_dsp_ipc4_schedule_d0i3_work(struct sof_intel_hda_dev *hdev,
+ struct snd_sof_ipc_msg *msg)
+{
+ struct sof_ipc4_msg *msg_data = msg->msg_data;
+
+ /* Schedule a delayed work for d0i3 entry after sending non-pm ipc msg */
+ if (hda_dsp_ipc4_pm_msg(msg_data->primary))
+ return;
+
+ mod_delayed_work(system_wq, &hdev->d0i3_work,
+ msecs_to_jiffies(SOF_HDA_D0I3_WORK_DELAY_MS));
+}
+
+int hda_dsp_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct sof_ipc4_msg *msg_data = msg->msg_data;
+
+ if (hda_ipc4_tx_is_busy(sdev)) {
+ hdev->delayed_ipc_tx_msg = msg;
+ return 0;
+ }
+
+ hdev->delayed_ipc_tx_msg = NULL;
+
+ /* send the message via mailbox */
+ if (msg_data->data_size)
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
+ msg_data->data_size);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE, msg_data->extension);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI,
+ msg_data->primary | HDA_DSP_REG_HIPCI_BUSY);
+
+ hda_dsp_ipc4_schedule_d0i3_work(hdev, msg);
+
+ return 0;
+}
+
+void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ struct sof_ipc_cmd_hdr *hdr;
+
+ /*
+ * Sometimes, there is unexpected reply ipc arriving. The reply
+ * ipc belongs to none of the ipcs sent from driver.
+ * In this case, the driver must ignore the ipc.
+ */
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
+ return;
+ }
+
+ hdr = msg->msg_data;
+ if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE) ||
+ hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
+ /*
+ * memory windows are powered off before sending IPC reply,
+ * so we can't read the mailbox for CTX_SAVE and PM_GATE
+ * replies.
+ */
+ reply.error = 0;
+ reply.hdr.cmd = SOF_IPC_GLB_REPLY;
+ reply.hdr.size = sizeof(reply);
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+
+ msg->reply_error = 0;
+ } else {
+ snd_sof_ipc_get_reply(sdev);
+ }
+}
+
+irqreturn_t hda_dsp_ipc4_irq_thread(int irq, void *context)
+{
+ struct sof_ipc4_msg notification_data = {{ 0 }};
+ struct snd_sof_dev *sdev = context;
+ bool ack_received = false;
+ bool ipc_irq = false;
+ u32 hipcie, hipct;
+
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
+ hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
+
+ if (hipcie & HDA_DSP_REG_HIPCIE_DONE) {
+ /* DSP received the message */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_DONE, 0);
+ hda_dsp_ipc_dsp_done(sdev);
+
+ ipc_irq = true;
+ ack_received = true;
+ }
+
+ if (hipct & HDA_DSP_REG_HIPCT_BUSY) {
+ /* Message from DSP (reply or notification) */
+ u32 hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCTE);
+ u32 primary = hipct & HDA_DSP_REG_HIPCT_MSG_MASK;
+ u32 extension = hipcte & HDA_DSP_REG_HIPCTE_MSG_MASK;
+
+ /* mask BUSY interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_BUSY, 0);
+
+ if (primary & SOF_IPC4_MSG_DIR_MASK) {
+ /* Reply received */
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
+
+ data->primary = primary;
+ data->extension = extension;
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ snd_sof_ipc_get_reply(sdev);
+ hda_dsp_ipc_host_done(sdev);
+ snd_sof_ipc_reply(sdev, data->primary);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev,
+ "IPC reply before FW_READY: %#x|%#x\n",
+ primary, extension);
+ }
+ } else {
+ /* Notification received */
+
+ notification_data.primary = primary;
+ notification_data.extension = extension;
+ sdev->ipc->msg.rx_data = &notification_data;
+ snd_sof_ipc_msgs_rx(sdev);
+ sdev->ipc->msg.rx_data = NULL;
+
+ /* Let DSP know that we have finished processing the message */
+ hda_dsp_ipc_host_done(sdev);
+ }
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq)
+ /* This interrupt is not shared so no need to return IRQ_NONE. */
+ dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
+
+ if (ack_received) {
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ if (hdev->delayed_ipc_tx_msg)
+ hda_dsp_ipc4_send_msg(sdev, hdev->delayed_ipc_tx_msg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* IPC handler thread */
+irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 hipci;
+ u32 hipcie;
+ u32 hipct;
+ u32 hipcte;
+ u32 msg;
+ u32 msg_ext;
+ bool ipc_irq = false;
+
+ /* read IPC status */
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCIE);
+ hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
+ hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI);
+ hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCTE);
+
+ /* is this a reply message from the DSP */
+ if (hipcie & HDA_DSP_REG_HIPCIE_DONE) {
+ msg = hipci & HDA_DSP_REG_HIPCI_MSG_MASK;
+ msg_ext = hipcie & HDA_DSP_REG_HIPCIE_MSG_MASK;
+
+ trace_sof_intel_ipc_firmware_response(sdev, msg, msg_ext);
+
+ /* mask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_DONE, 0);
+
+ /*
+ * Make sure the interrupt thread cannot be preempted between
+ * waking up the sender and re-enabling the interrupt. Also
+ * protect against a theoretical race with sof_ipc_tx_message():
+ * if the DSP is fast enough to receive an IPC message, reply to
+ * it, and the host interrupt processing calls this function on
+ * a different core from the one, where the sending is taking
+ * place, the message might not yet be marked as expecting a
+ * reply.
+ */
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /* handle immediate reply from DSP core */
+ hda_dsp_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, msg);
+
+ /* set the done bit */
+ hda_dsp_ipc_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev, "IPC reply before FW_READY: %#x\n",
+ msg);
+ }
+
+ ipc_irq = true;
+ }
+
+ /* is this a new message from DSP */
+ if (hipct & HDA_DSP_REG_HIPCT_BUSY) {
+ msg = hipct & HDA_DSP_REG_HIPCT_MSG_MASK;
+ msg_ext = hipcte & HDA_DSP_REG_HIPCTE_MSG_MASK;
+
+ trace_sof_intel_ipc_firmware_initiated(sdev, msg, msg_ext);
+
+ /* mask BUSY interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_BUSY, 0);
+
+ /* handle messages from DSP */
+ if ((hipct & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ bool non_recoverable = true;
+
+ /*
+ * This is a PANIC message!
+ *
+ * If it is arriving during firmware boot and it is not
+ * the last boot attempt then change the non_recoverable
+ * to false as the DSP might be able to boot in the next
+ * iteration(s)
+ */
+ if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS &&
+ hda->boot_iteration < HDA_FW_BOOT_ATTEMPTS)
+ non_recoverable = false;
+
+ snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext),
+ non_recoverable);
+ } else {
+ /* normal message - process normally */
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ hda_dsp_ipc_host_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq) {
+ /*
+ * This interrupt is not shared so no need to return IRQ_NONE.
+ */
+ dev_dbg_ratelimited(sdev->dev,
+ "nothing to do in IPC IRQ thread\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Check if an IPC IRQ occurred */
+bool hda_dsp_check_ipc_irq(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ bool ret = false;
+ u32 irq_status;
+
+ if (sdev->dspless_mode_selected)
+ return false;
+
+ /* store status */
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS);
+ trace_sof_intel_hda_irq_ipc_check(sdev, irq_status);
+
+ /* invalid message ? */
+ if (irq_status == 0xffffffff)
+ goto out;
+
+ /* IPC message ? */
+ if (irq_status & HDA_DSP_ADSPIS_IPC)
+ ret = true;
+
+ /* CLDMA message ? */
+ if (irq_status & HDA_DSP_ADSPIS_CL_DMA) {
+ hda->code_loading = 0;
+ wake_up(&hda->waitq);
+ ret = false;
+ }
+
+out:
+ return ret;
+}
+
+int hda_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return HDA_DSP_MBOX_UPLINK_OFFSET;
+}
+
+int hda_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return SRAM_WINDOW_OFFSET(id);
+}
+
+int hda_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ void *p, size_t sz)
+{
+ if (!sps || !sdev->stream_box.size) {
+ sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
+ } else {
+ struct snd_pcm_substream *substream = sps->substream;
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = container_of(hstream,
+ struct sof_intel_hda_stream,
+ hext_stream.hstream);
+
+ /* The stream might already be closed */
+ if (!hstream)
+ return -ESTRPIPE;
+
+ sof_mailbox_read(sdev, hda_stream->sof_intel_stream.posn_offset, p, sz);
+ }
+
+ return 0;
+}
+
+int hda_set_stream_data_offset(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ size_t posn_offset)
+{
+ struct snd_pcm_substream *substream = sps->substream;
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = container_of(hstream, struct sof_intel_hda_stream,
+ hext_stream.hstream);
+
+ /* check for unaligned offset or overflow */
+ if (posn_offset > sdev->stream_box.size ||
+ posn_offset % sizeof(struct sof_ipc_stream_posn) != 0)
+ return -EINVAL;
+
+ hda_stream->sof_intel_stream.posn_offset = sdev->stream_box.offset + posn_offset;
+
+ dev_dbg(sdev->dev, "pcm: stream dir %d, posn mailbox offset is %zu",
+ substream->stream, hda_stream->sof_intel_stream.posn_offset);
+
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-ipc.h b/sound/soc/sof/intel/hda-ipc.h
new file mode 100644
index 000000000..8ec5e9f6f
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ipc.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2019 Intel Corporation. All rights reserved.
+ *
+ * Author: Keyon Jie <yang.jie@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_HDA_IPC_H
+#define __SOF_INTEL_HDA_IPC_H
+
+/*
+ * Primary register, mapped to
+ * - DIPCTDR (HIPCIDR) in sideband IPC (cAVS 1.8+)
+ * - DIPCT in cAVS 1.5 IPC
+ *
+ * Secondary register, mapped to:
+ * - DIPCTDD (HIPCIDD) in sideband IPC (cAVS 1.8+)
+ * - DIPCTE in cAVS 1.5 IPC
+ */
+
+/* Common bits in primary register */
+
+/* Reserved for doorbell */
+#define HDA_IPC_RSVD_31 BIT(31)
+/* Target, 0 - normal message, 1 - compact message(cAVS compatible) */
+#define HDA_IPC_MSG_COMPACT BIT(30)
+/* Direction, 0 - request, 1 - response */
+#define HDA_IPC_RSP BIT(29)
+
+#define HDA_IPC_TYPE_SHIFT 24
+#define HDA_IPC_TYPE_MASK GENMASK(28, 24)
+#define HDA_IPC_TYPE(x) ((x) << HDA_IPC_TYPE_SHIFT)
+
+#define HDA_IPC_PM_GATE HDA_IPC_TYPE(0x8U)
+
+/* Command specific payload bits in secondary register */
+
+/* Disable DMA tracing (0 - keep tracing, 1 - to disable DMA trace) */
+#define HDA_PM_NO_DMA_TRACE BIT(4)
+/* Prevent clock gating (0 - cg allowed, 1 - DSP clock always on) */
+#define HDA_PM_PCG BIT(3)
+/* Prevent power gating (0 - deep power state transitions allowed) */
+#define HDA_PM_PPG BIT(2)
+/* Indicates whether streaming is active */
+#define HDA_PM_PG_STREAMING BIT(1)
+#define HDA_PM_PG_RSVD BIT(0)
+
+irqreturn_t cnl_ipc_irq_thread(int irq, void *context);
+int cnl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
+void cnl_ipc_dump(struct snd_sof_dev *sdev);
+void cnl_ipc4_dump(struct snd_sof_dev *sdev);
+
+#endif
diff --git a/sound/soc/sof/intel/hda-loader-skl.c b/sound/soc/sof/intel/hda-loader-skl.c
new file mode 100644
index 000000000..1e77ca936
--- /dev/null
+++ b/sound/soc/sof/intel/hda-loader-skl.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
+//
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/sof.h>
+#include <sound/pcm_params.h>
+
+#include "../sof-priv.h"
+#include "../ops.h"
+#include "hda.h"
+
+#define HDA_SKL_WAIT_TIMEOUT 500 /* 500 msec */
+#define HDA_SKL_CLDMA_MAX_BUFFER_SIZE (32 * PAGE_SIZE)
+
+/* Stream Reset */
+#define HDA_CL_SD_CTL_SRST_SHIFT 0
+#define HDA_CL_SD_CTL_SRST(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_SRST_SHIFT)
+
+/* Stream Run */
+#define HDA_CL_SD_CTL_RUN_SHIFT 1
+#define HDA_CL_SD_CTL_RUN(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_RUN_SHIFT)
+
+/* Interrupt On Completion Enable */
+#define HDA_CL_SD_CTL_IOCE_SHIFT 2
+#define HDA_CL_SD_CTL_IOCE(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_IOCE_SHIFT)
+
+/* FIFO Error Interrupt Enable */
+#define HDA_CL_SD_CTL_FEIE_SHIFT 3
+#define HDA_CL_SD_CTL_FEIE(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_FEIE_SHIFT)
+
+/* Descriptor Error Interrupt Enable */
+#define HDA_CL_SD_CTL_DEIE_SHIFT 4
+#define HDA_CL_SD_CTL_DEIE(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_DEIE_SHIFT)
+
+/* FIFO Limit Change */
+#define HDA_CL_SD_CTL_FIFOLC_SHIFT 5
+#define HDA_CL_SD_CTL_FIFOLC(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_FIFOLC_SHIFT)
+
+/* Stripe Control */
+#define HDA_CL_SD_CTL_STRIPE_SHIFT 16
+#define HDA_CL_SD_CTL_STRIPE(x) (((x) & 0x3) << \
+ HDA_CL_SD_CTL_STRIPE_SHIFT)
+
+/* Traffic Priority */
+#define HDA_CL_SD_CTL_TP_SHIFT 18
+#define HDA_CL_SD_CTL_TP(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_TP_SHIFT)
+
+/* Bidirectional Direction Control */
+#define HDA_CL_SD_CTL_DIR_SHIFT 19
+#define HDA_CL_SD_CTL_DIR(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_DIR_SHIFT)
+
+/* Stream Number */
+#define HDA_CL_SD_CTL_STRM_SHIFT 20
+#define HDA_CL_SD_CTL_STRM(x) (((x) & 0xf) << \
+ HDA_CL_SD_CTL_STRM_SHIFT)
+
+#define HDA_CL_SD_CTL_INT(x) \
+ (HDA_CL_SD_CTL_IOCE(x) | \
+ HDA_CL_SD_CTL_FEIE(x) | \
+ HDA_CL_SD_CTL_DEIE(x))
+
+#define HDA_CL_SD_CTL_INT_MASK \
+ (HDA_CL_SD_CTL_IOCE(1) | \
+ HDA_CL_SD_CTL_FEIE(1) | \
+ HDA_CL_SD_CTL_DEIE(1))
+
+#define DMA_ADDRESS_128_BITS_ALIGNMENT 7
+#define BDL_ALIGN(x) ((x) >> DMA_ADDRESS_128_BITS_ALIGNMENT)
+
+/* Buffer Descriptor List Lower Base Address */
+#define HDA_CL_SD_BDLPLBA_SHIFT 7
+#define HDA_CL_SD_BDLPLBA_MASK GENMASK(31, 7)
+#define HDA_CL_SD_BDLPLBA(x) \
+ ((BDL_ALIGN(lower_32_bits(x)) << HDA_CL_SD_BDLPLBA_SHIFT) & \
+ HDA_CL_SD_BDLPLBA_MASK)
+
+/* Buffer Descriptor List Upper Base Address */
+#define HDA_CL_SD_BDLPUBA(x) \
+ (upper_32_bits(x))
+
+/* Software Position in Buffer Enable */
+#define HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT 0
+#define HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_MASK \
+ (1 << HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT)
+
+#define HDA_CL_SPBFIFO_SPBFCCTL_SPIBE(x) \
+ (((x) << HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT) & \
+ HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_MASK)
+
+#define HDA_CL_DMA_SD_INT_COMPLETE 0x4
+
+static int cl_skl_cldma_setup_bdle(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab_data,
+ __le32 **bdlp, int size, int with_ioc)
+{
+ phys_addr_t addr = virt_to_phys(dmab_data->area);
+ __le32 *bdl = *bdlp;
+
+ /*
+ * This code is simplified by using one fragment of physical memory and assuming
+ * all the code fits. This could be improved with scatter-gather but the firmware
+ * size is limited by DSP memory anyways
+ */
+ bdl[0] = cpu_to_le32(lower_32_bits(addr));
+ bdl[1] = cpu_to_le32(upper_32_bits(addr));
+ bdl[2] = cpu_to_le32(size);
+ bdl[3] = (!with_ioc) ? 0 : cpu_to_le32(0x01);
+
+ return 1; /* one fragment */
+}
+
+static void cl_skl_cldma_stream_run(struct snd_sof_dev *sdev, bool enable)
+{
+ int sd_offset = SOF_HDA_ADSP_LOADER_BASE;
+ unsigned char val;
+ int retries;
+ u32 run = enable ? 0x1 : 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_CTL,
+ HDA_CL_SD_CTL_RUN(1), HDA_CL_SD_CTL_RUN(run));
+
+ retries = 300;
+ do {
+ udelay(3);
+
+ /* waiting for hardware to report the stream Run bit set */
+ val = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_CTL);
+ val &= HDA_CL_SD_CTL_RUN(1);
+ if (enable && val)
+ break;
+ else if (!enable && !val)
+ break;
+ } while (--retries);
+
+ if (retries == 0)
+ dev_err(sdev->dev, "%s: failed to set Run bit=%d enable=%d\n",
+ __func__, val, enable);
+}
+
+static void cl_skl_cldma_stream_clear(struct snd_sof_dev *sdev)
+{
+ int sd_offset = SOF_HDA_ADSP_LOADER_BASE;
+
+ /* make sure Run bit is cleared before setting stream register */
+ cl_skl_cldma_stream_run(sdev, 0);
+
+ /* Disable the Interrupt On Completion, FIFO Error Interrupt,
+ * Descriptor Error Interrupt and set the cldma stream number to 0.
+ */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_CTL,
+ HDA_CL_SD_CTL_INT_MASK, HDA_CL_SD_CTL_INT(0));
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_CTL,
+ HDA_CL_SD_CTL_STRM(0xf), HDA_CL_SD_CTL_STRM(0));
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL, HDA_CL_SD_BDLPLBA(0));
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU, 0);
+
+ /* Set the Cyclic Buffer Length to 0. */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_CBL, 0);
+ /* Set the Last Valid Index. */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_LVI, 0);
+}
+
+static void cl_skl_cldma_setup_spb(struct snd_sof_dev *sdev,
+ unsigned int size, bool enable)
+{
+ int sd_offset = SOF_DSP_REG_CL_SPBFIFO;
+
+ if (enable)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
+ HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
+ HDA_CL_SPBFIFO_SPBFCCTL_SPIBE(1));
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SPBFIFO_SPIB, size);
+}
+
+static void cl_skl_cldma_set_intr(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? HDA_DSP_ADSPIC_CL_DMA : 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_CL_DMA, val);
+}
+
+static void cl_skl_cldma_cleanup_spb(struct snd_sof_dev *sdev)
+{
+ int sd_offset = SOF_DSP_REG_CL_SPBFIFO;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
+ HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
+ HDA_CL_SPBFIFO_SPBFCCTL_SPIBE(0));
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SPBFIFO_SPIB, 0);
+}
+
+static void cl_skl_cldma_setup_controller(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab_bdl,
+ unsigned int max_size, u32 count)
+{
+ int sd_offset = SOF_HDA_ADSP_LOADER_BASE;
+
+ /* Clear the stream first and then set it. */
+ cl_skl_cldma_stream_clear(sdev);
+
+ /* setting the stream register */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL,
+ HDA_CL_SD_BDLPLBA(dmab_bdl->addr));
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU,
+ HDA_CL_SD_BDLPUBA(dmab_bdl->addr));
+
+ /* Set the Cyclic Buffer Length. */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_CBL, max_size);
+ /* Set the Last Valid Index. */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_LVI, count - 1);
+
+ /* Set the Interrupt On Completion, FIFO Error Interrupt,
+ * Descriptor Error Interrupt and the cldma stream number.
+ */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_CTL,
+ HDA_CL_SD_CTL_INT_MASK, HDA_CL_SD_CTL_INT(1));
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_CTL,
+ HDA_CL_SD_CTL_STRM(0xf),
+ HDA_CL_SD_CTL_STRM(1));
+}
+
+static int cl_stream_prepare_skl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct snd_dma_buffer *dmab_bdl)
+
+{
+ unsigned int bufsize = HDA_SKL_CLDMA_MAX_BUFFER_SIZE;
+ __le32 *bdl;
+ int frags;
+ int ret;
+
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev, bufsize, dmab);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: failed to alloc fw buffer: %x\n", __func__, ret);
+ return ret;
+ }
+
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev, bufsize, dmab_bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: failed to alloc blde: %x\n", __func__, ret);
+ snd_dma_free_pages(dmab);
+ return ret;
+ }
+
+ bdl = (__le32 *)dmab_bdl->area;
+ frags = cl_skl_cldma_setup_bdle(sdev, dmab, &bdl, bufsize, 1);
+ cl_skl_cldma_setup_controller(sdev, dmab_bdl, bufsize, frags);
+
+ return ret;
+}
+
+static void cl_cleanup_skl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct snd_dma_buffer *dmab_bdl)
+{
+ cl_skl_cldma_cleanup_spb(sdev);
+ cl_skl_cldma_stream_clear(sdev);
+ snd_dma_free_pages(dmab);
+ snd_dma_free_pages(dmab_bdl);
+}
+
+static int cl_dsp_init_skl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct snd_dma_buffer *dmab_bdl)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int status;
+ u32 flags;
+ int ret;
+
+ /* check if the init_core is already enabled, if yes, reset and make it run,
+ * if not, powerdown and enable it again.
+ */
+ if (hda_dsp_core_is_enabled(sdev, chip->init_core_mask)) {
+ /* if enabled, reset it, and run the init_core. */
+ ret = hda_dsp_core_stall_reset(sdev, chip->init_core_mask);
+ if (ret < 0)
+ goto err;
+
+ ret = hda_dsp_core_run(sdev, chip->init_core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: dsp core start failed %d\n", __func__, ret);
+ goto err;
+ }
+ } else {
+ /* if not enabled, power down it first and then powerup and run
+ * the init_core.
+ */
+ ret = hda_dsp_core_reset_power_down(sdev, chip->init_core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: dsp core0 disable fail: %d\n", __func__, ret);
+ goto err;
+ }
+ ret = hda_dsp_enable_core(sdev, chip->init_core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: dsp core0 enable fail: %d\n", __func__, ret);
+ goto err;
+ }
+ }
+
+ /* prepare DMA for code loader stream */
+ ret = cl_stream_prepare_skl(sdev, dmab, dmab_bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: dma prepare fw loading err: %x\n", __func__, ret);
+ return ret;
+ }
+
+ /* enable the interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
+
+ /* enable IPC DONE interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ HDA_DSP_REG_HIPCCTL_DONE,
+ HDA_DSP_REG_HIPCCTL_DONE);
+
+ /* enable IPC BUSY interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ HDA_DSP_REG_HIPCCTL_BUSY,
+ HDA_DSP_REG_HIPCCTL_BUSY);
+
+ /* polling the ROM init status information. */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->rom_status_reg, status,
+ (FSR_TO_STATE_CODE(status)
+ == FSR_STATE_INIT_DONE),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ chip->rom_init_timeout *
+ USEC_PER_MSEC);
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX;
+
+ snd_sof_dsp_dbg_dump(sdev, "Boot failed\n", flags);
+ cl_cleanup_skl(sdev, dmab, dmab_bdl);
+ hda_dsp_core_reset_power_down(sdev, chip->init_core_mask);
+ return ret;
+}
+
+static void cl_skl_cldma_fill_buffer(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ unsigned int bufsize,
+ unsigned int copysize,
+ const void *curr_pos,
+ bool intr_enable)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+
+ /* copy the image into the buffer with the maximum buffer size. */
+ unsigned int size = (bufsize == copysize) ? bufsize : copysize;
+
+ memcpy(dmab->area, curr_pos, size);
+
+ /* Set the wait condition for every load. */
+ hda->code_loading = 1;
+
+ /* Set the interrupt. */
+ if (intr_enable)
+ cl_skl_cldma_set_intr(sdev, true);
+
+ /* Set the SPB. */
+ cl_skl_cldma_setup_spb(sdev, size, true);
+
+ /* Trigger the code loading stream. */
+ cl_skl_cldma_stream_run(sdev, true);
+}
+
+static int cl_skl_cldma_wait_interruptible(struct snd_sof_dev *sdev,
+ bool intr_wait)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ int sd_offset = SOF_HDA_ADSP_LOADER_BASE;
+ u8 cl_dma_intr_status;
+
+ /*
+ * Wait for CLDMA interrupt to inform the binary segment transfer is
+ * complete.
+ */
+ if (!wait_event_timeout(hda->waitq, !hda->code_loading,
+ msecs_to_jiffies(HDA_SKL_WAIT_TIMEOUT))) {
+ dev_err(sdev->dev, "cldma copy timeout\n");
+ dev_err(sdev->dev, "ROM code=%#x: FW status=%#x\n",
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_ROM_ERROR),
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg));
+ return -EIO;
+ }
+
+ /* now check DMA interrupt status */
+ cl_dma_intr_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_STS);
+
+ if (!(cl_dma_intr_status & HDA_CL_DMA_SD_INT_COMPLETE)) {
+ dev_err(sdev->dev, "cldma copy failed\n");
+ return -EIO;
+ }
+
+ dev_dbg(sdev->dev, "cldma buffer copy complete\n");
+ return 0;
+}
+
+static int
+cl_skl_cldma_copy_to_buf(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ const void *bin,
+ u32 total_size, u32 bufsize)
+{
+ unsigned int bytes_left = total_size;
+ const void *curr_pos = bin;
+ int ret;
+
+ if (total_size <= 0)
+ return -EINVAL;
+
+ while (bytes_left > 0) {
+ if (bytes_left > bufsize) {
+ dev_dbg(sdev->dev, "cldma copy %#x bytes\n", bufsize);
+
+ cl_skl_cldma_fill_buffer(sdev, dmab, bufsize, bufsize, curr_pos, true);
+
+ ret = cl_skl_cldma_wait_interruptible(sdev, false);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: fw failed to load. %#x bytes remaining\n",
+ __func__, bytes_left);
+ return ret;
+ }
+
+ bytes_left -= bufsize;
+ curr_pos += bufsize;
+ } else {
+ dev_dbg(sdev->dev, "cldma copy %#x bytes\n", bytes_left);
+
+ cl_skl_cldma_set_intr(sdev, false);
+ cl_skl_cldma_fill_buffer(sdev, dmab, bufsize, bytes_left, curr_pos, false);
+ return 0;
+ }
+ }
+
+ return bytes_left;
+}
+
+static int cl_copy_fw_skl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab)
+
+{
+ const struct firmware *fw = sdev->basefw.fw;
+ struct firmware stripped_firmware;
+ unsigned int bufsize = HDA_SKL_CLDMA_MAX_BUFFER_SIZE;
+ int ret;
+
+ stripped_firmware.data = fw->data + sdev->basefw.payload_offset;
+ stripped_firmware.size = fw->size - sdev->basefw.payload_offset;
+
+ dev_dbg(sdev->dev, "firmware size: %#zx buffer size %#x\n", fw->size, bufsize);
+
+ ret = cl_skl_cldma_copy_to_buf(sdev, dmab, stripped_firmware.data,
+ stripped_firmware.size, bufsize);
+ if (ret < 0)
+ dev_err(sdev->dev, "%s: fw copy failed %d\n", __func__, ret);
+
+ return ret;
+}
+
+int hda_dsp_cl_boot_firmware_skl(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ struct snd_dma_buffer dmab_bdl;
+ struct snd_dma_buffer dmab;
+ unsigned int reg;
+ u32 flags;
+ int ret;
+
+ ret = cl_dsp_init_skl(sdev, &dmab, &dmab_bdl);
+
+ /* retry enabling core and ROM load. seemed to help */
+ if (ret < 0) {
+ ret = cl_dsp_init_skl(sdev, &dmab, &dmab_bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error code=%#x: FW status=%#x\n",
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_ROM_ERROR),
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg));
+ dev_err(sdev->dev, "Core En/ROM load fail:%d\n", ret);
+ return ret;
+ }
+ }
+
+ dev_dbg(sdev->dev, "ROM init successful\n");
+
+ /* at this point DSP ROM has been initialized and should be ready for
+ * code loading and firmware boot
+ */
+ ret = cl_copy_fw_skl(sdev, &dmab);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: load firmware failed : %d\n", __func__, ret);
+ goto err;
+ }
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->rom_status_reg, reg,
+ (FSR_TO_STATE_CODE(reg)
+ == FSR_STATE_ROM_BASEFW_ENTERED),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_BASEFW_TIMEOUT_US);
+
+ dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
+
+ cl_skl_cldma_stream_run(sdev, false);
+ cl_cleanup_skl(sdev, &dmab, &dmab_bdl);
+
+ if (!ret)
+ return chip->init_core_mask;
+
+ return ret;
+
+err:
+ flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX;
+
+ snd_sof_dsp_dbg_dump(sdev, "Boot failed\n", flags);
+
+ /* power down DSP */
+ hda_dsp_core_reset_power_down(sdev, chip->init_core_mask);
+ cl_skl_cldma_stream_run(sdev, false);
+ cl_cleanup_skl(sdev, &dmab, &dmab_bdl);
+
+ dev_err(sdev->dev, "%s: load fw failed err: %d\n", __func__, ret);
+ return ret;
+}
diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
new file mode 100644
index 000000000..50ce6b190
--- /dev/null
+++ b/sound/soc/sof/intel/hda-loader.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for HDA DSP code loader
+ */
+
+#include <linux/firmware.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/sof.h>
+#include <sound/sof/ipc4/header.h>
+#include "ext_manifest.h"
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "../sof-priv.h"
+#include "hda.h"
+
+static void hda_ssp_set_cbp_cfp(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ int i;
+
+ /* DSP is powered up, set all SSPs to clock consumer/codec provider mode */
+ for (i = 0; i < chip->ssp_count; i++) {
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ chip->ssp_base_offset
+ + i * SSP_DEV_MEM_SIZE
+ + SSP_SSC1_OFFSET,
+ SSP_SET_CBP_CFP,
+ SSP_SET_CBP_CFP);
+ }
+}
+
+struct hdac_ext_stream *hda_cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
+ unsigned int size, struct snd_dma_buffer *dmab,
+ int direction)
+{
+ struct hdac_ext_stream *hext_stream;
+ struct hdac_stream *hstream;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ int ret;
+
+ hext_stream = hda_dsp_stream_get(sdev, direction, 0);
+
+ if (!hext_stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return ERR_PTR(-ENODEV);
+ }
+ hstream = &hext_stream->hstream;
+ hstream->substream = NULL;
+
+ /* allocate DMA buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: memory alloc failed: %d\n", ret);
+ goto out_put;
+ }
+
+ hstream->period_bytes = 0;/* initialize period_bytes */
+ hstream->format_val = format;
+ hstream->bufsize = size;
+
+ if (direction == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = hda_dsp_iccmax_stream_hw_params(sdev, hext_stream, dmab, NULL);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: iccmax stream prepare failed: %d\n", ret);
+ goto out_free;
+ }
+ } else {
+ ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
+ goto out_free;
+ }
+ hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_ENABLE, size);
+ }
+
+ return hext_stream;
+
+out_free:
+ snd_dma_free_pages(dmab);
+out_put:
+ hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
+ return ERR_PTR(ret);
+}
+
+/*
+ * first boot sequence has some extra steps.
+ * power on all host managed cores and only unstall/run the boot core to boot the
+ * DSP then turn off all non boot cores (if any) is powered on.
+ */
+int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int status, target_status;
+ u32 flags, ipc_hdr, j;
+ unsigned long mask;
+ char *dump_msg;
+ int ret;
+
+ /* step 1: power up corex */
+ ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n");
+ goto err;
+ }
+
+ hda_ssp_set_cbp_cfp(sdev);
+
+ /* step 2: Send ROM_CONTROL command (stream_tag is ignored for IMR boot) */
+ ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL;
+ if (!imr_boot)
+ ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr);
+
+ /* step 3: unset core 0 reset state & unstall/run core 0 */
+ ret = hda_dsp_core_run(sdev, chip->init_core_mask);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "error: dsp core start failed %d\n", ret);
+ ret = -EIO;
+ goto err;
+ }
+
+ /* step 4: wait for IPC DONE bit from ROM */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->ipc_ack, status,
+ ((status & chip->ipc_ack_mask)
+ == chip->ipc_ack_mask),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_INIT_TIMEOUT_US);
+
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "error: %s: timeout for HIPCIE done\n",
+ __func__);
+ goto err;
+ }
+
+ /* set DONE bit to clear the reply IPC message */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ chip->ipc_ack,
+ chip->ipc_ack_mask,
+ chip->ipc_ack_mask);
+
+ /* step 5: power down cores that are no longer needed */
+ ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask &
+ ~(chip->init_core_mask));
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "error: dsp core x power down failed\n");
+ goto err;
+ }
+
+ /* step 6: enable IPC interrupts */
+ hda_dsp_ipc_int_enable(sdev);
+
+ /*
+ * step 7:
+ * - Cold/Full boot: wait for ROM init to proceed to download the firmware
+ * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR)
+ */
+ if (imr_boot)
+ target_status = FSR_STATE_FW_ENTERED;
+ else
+ target_status = FSR_STATE_INIT_DONE;
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->rom_status_reg, status,
+ (FSR_TO_STATE_CODE(status) == target_status),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ chip->rom_init_timeout *
+ USEC_PER_MSEC);
+ if (!ret) {
+ /* set enabled cores mask and increment ref count for cores in init_core_mask */
+ sdev->enabled_cores_mask |= chip->init_core_mask;
+ mask = sdev->enabled_cores_mask;
+ for_each_set_bit(j, &mask, SOF_MAX_DSP_NUM_CORES)
+ sdev->dsp_core_ref_count[j]++;
+ return 0;
+ }
+
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "%s: timeout with rom_status_reg (%#x) read\n",
+ __func__, chip->rom_status_reg);
+
+err:
+ flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL;
+
+ /* after max boot attempts make sure that the dump is printed */
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ flags &= ~SOF_DBG_DUMP_OPTIONAL;
+
+ dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d",
+ hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS);
+ snd_sof_dsp_dbg_dump(sdev, dump_msg, flags);
+ hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
+
+ kfree(dump_msg);
+ return ret;
+}
+
+static int cl_trigger(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream, int cmd)
+{
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+
+ /* code loader is special case that reuses stream ops */
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ 1 << hstream->index,
+ 1 << hstream->index);
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->running = true;
+ return 0;
+ default:
+ return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
+ }
+}
+
+int hda_cl_cleanup(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
+ struct hdac_ext_stream *hext_stream)
+{
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ int ret = 0;
+
+ if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0);
+ else
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_SD_CTL_DMA_START, 0);
+
+ hda_dsp_stream_put(sdev, hstream->direction, hstream->stream_tag);
+ hstream->running = 0;
+ hstream->substream = NULL;
+
+ /* reset BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL, 0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU, 0);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0);
+ snd_dma_free_pages(dmab);
+ dmab->area = NULL;
+ hstream->bufsize = 0;
+ hstream->format_val = 0;
+
+ return ret;
+}
+
+int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int reg;
+ int ret, status;
+
+ ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_START);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: DMA trigger start failed\n");
+ return ret;
+ }
+
+ status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->rom_status_reg, reg,
+ (FSR_TO_STATE_CODE(reg) == FSR_STATE_FW_ENTERED),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_BASEFW_TIMEOUT_US);
+
+ /*
+ * even in case of errors we still need to stop the DMAs,
+ * but we return the initial error should the DMA stop also fail
+ */
+
+ if (status < 0) {
+ dev_err(sdev->dev,
+ "%s: timeout with rom_status_reg (%#x) read\n",
+ __func__, chip->rom_status_reg);
+ }
+
+ ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: DMA trigger stop failed\n");
+ if (!status)
+ status = ret;
+ }
+
+ return status;
+}
+
+int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev)
+{
+ struct hdac_ext_stream *iccmax_stream;
+ struct snd_dma_buffer dmab_bdl;
+ int ret, ret1;
+ u8 original_gb;
+
+ /* save the original LTRP guardband value */
+ original_gb = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP) &
+ HDA_VS_INTEL_LTRP_GB_MASK;
+
+ /*
+ * Prepare capture stream for ICCMAX. We do not need to store
+ * the data, so use a buffer of PAGE_SIZE for receiving.
+ */
+ iccmax_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT, PAGE_SIZE,
+ &dmab_bdl, SNDRV_PCM_STREAM_CAPTURE);
+ if (IS_ERR(iccmax_stream)) {
+ dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n");
+ return PTR_ERR(iccmax_stream);
+ }
+
+ ret = hda_dsp_cl_boot_firmware(sdev);
+
+ /*
+ * Perform iccmax stream cleanup. This should be done even if firmware loading fails.
+ * If the cleanup also fails, we return the initial error
+ */
+ ret1 = hda_cl_cleanup(sdev, &dmab_bdl, iccmax_stream);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n");
+
+ /* set return value to indicate cleanup failure */
+ if (!ret)
+ ret = ret1;
+ }
+
+ /* restore the original guardband value after FW boot */
+ snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP,
+ HDA_VS_INTEL_LTRP_GB_MASK, original_gb);
+
+ return ret;
+}
+
+static int hda_dsp_boot_imr(struct snd_sof_dev *sdev)
+{
+ const struct sof_intel_dsp_desc *chip_info;
+ int ret;
+
+ chip_info = get_chip_info(sdev->pdata);
+ if (chip_info->cl_init)
+ ret = chip_info->cl_init(sdev, 0, true);
+ else
+ ret = -EINVAL;
+
+ if (!ret)
+ hda_sdw_process_wakeen(sdev);
+
+ return ret;
+}
+
+int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const struct sof_dev_desc *desc = plat_data->desc;
+ const struct sof_intel_dsp_desc *chip_info;
+ struct hdac_ext_stream *hext_stream;
+ struct firmware stripped_firmware;
+ struct snd_dma_buffer dmab;
+ int ret, ret1, i;
+
+ if (hda->imrboot_supported && !sdev->first_boot && !hda->skip_imr_boot) {
+ dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n");
+ hda->boot_iteration = 0;
+ ret = hda_dsp_boot_imr(sdev);
+ if (!ret) {
+ hda->booted_from_imr = true;
+ return 0;
+ }
+
+ dev_warn(sdev->dev, "IMR restore failed, trying to cold boot\n");
+ }
+
+ hda->booted_from_imr = false;
+
+ chip_info = desc->chip_info;
+
+ if (sdev->basefw.fw->size <= sdev->basefw.payload_offset) {
+ dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
+ return -EINVAL;
+ }
+
+ stripped_firmware.data = sdev->basefw.fw->data + sdev->basefw.payload_offset;
+ stripped_firmware.size = sdev->basefw.fw->size - sdev->basefw.payload_offset;
+
+ /* init for booting wait */
+ init_waitqueue_head(&sdev->boot_wait);
+
+ /* prepare DMA for code loader stream */
+ hext_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT,
+ stripped_firmware.size,
+ &dmab, SNDRV_PCM_STREAM_PLAYBACK);
+ if (IS_ERR(hext_stream)) {
+ dev_err(sdev->dev, "error: dma prepare for fw loading failed\n");
+ return PTR_ERR(hext_stream);
+ }
+
+ memcpy(dmab.area, stripped_firmware.data,
+ stripped_firmware.size);
+
+ /* try ROM init a few times before giving up */
+ for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) {
+ dev_dbg(sdev->dev,
+ "Attempting iteration %d of Core En/ROM load...\n", i);
+
+ hda->boot_iteration = i + 1;
+ if (chip_info->cl_init)
+ ret = chip_info->cl_init(sdev, hext_stream->hstream.stream_tag, false);
+ else
+ ret = -EINVAL;
+
+ /* don't retry anymore if successful */
+ if (!ret)
+ break;
+ }
+
+ if (i == HDA_FW_BOOT_ATTEMPTS) {
+ dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n",
+ i, ret);
+ goto cleanup;
+ }
+
+ /*
+ * When a SoundWire link is in clock stop state, a Slave
+ * device may trigger in-band wakes for events such as jack
+ * insertion or acoustic event detection. This event will lead
+ * to a WAKEEN interrupt, handled by the PCI device and routed
+ * to PME if the PCI device is in D3. The resume function in
+ * audio PCI driver will be invoked by ACPI for PME event and
+ * initialize the device and process WAKEEN interrupt.
+ *
+ * The WAKEEN interrupt should be processed ASAP to prevent an
+ * interrupt flood, otherwise other interrupts, such IPC,
+ * cannot work normally. The WAKEEN is handled after the ROM
+ * is initialized successfully, which ensures power rails are
+ * enabled before accessing the SoundWire SHIM registers
+ */
+ if (!sdev->first_boot)
+ hda_sdw_process_wakeen(sdev);
+
+ /*
+ * Set the boot_iteration to the last attempt, indicating that the
+ * DSP ROM has been initialized and from this point there will be no
+ * retry done to boot.
+ *
+ * Continue with code loading and firmware boot
+ */
+ hda->boot_iteration = HDA_FW_BOOT_ATTEMPTS;
+ ret = hda_cl_copy_fw(sdev, hext_stream);
+ if (!ret) {
+ dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
+ hda->skip_imr_boot = false;
+ } else {
+ snd_sof_dsp_dbg_dump(sdev, "Firmware download failed",
+ SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX);
+ hda->skip_imr_boot = true;
+ }
+
+cleanup:
+ /*
+ * Perform codeloader stream cleanup.
+ * This should be done even if firmware loading fails.
+ * If the cleanup also fails, we return the initial error
+ */
+ ret1 = hda_cl_cleanup(sdev, &dmab, hext_stream);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
+
+ /* set return value to indicate cleanup failure */
+ if (!ret)
+ ret = ret1;
+ }
+
+ /*
+ * return primary core id if both fw copy
+ * and stream clean up are successful
+ */
+ if (!ret)
+ return chip_info->init_core_mask;
+
+ /* disable DSP */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
+ SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, 0);
+ return ret;
+}
+
+int hda_dsp_ipc4_load_library(struct snd_sof_dev *sdev,
+ struct sof_ipc4_fw_library *fw_lib, bool reload)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_ext_stream *hext_stream;
+ struct firmware stripped_firmware;
+ struct sof_ipc4_msg msg = {};
+ struct snd_dma_buffer dmab;
+ int ret, ret1;
+
+ /* IMR booting will restore the libraries as well, skip the loading */
+ if (reload && hda->booted_from_imr)
+ return 0;
+
+ /* the fw_lib has been verified during loading, we can trust the validity here */
+ stripped_firmware.data = fw_lib->sof_fw.fw->data + fw_lib->sof_fw.payload_offset;
+ stripped_firmware.size = fw_lib->sof_fw.fw->size - fw_lib->sof_fw.payload_offset;
+
+ /* prepare DMA for code loader stream */
+ hext_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT,
+ stripped_firmware.size,
+ &dmab, SNDRV_PCM_STREAM_PLAYBACK);
+ if (IS_ERR(hext_stream)) {
+ dev_err(sdev->dev, "%s: DMA prepare failed\n", __func__);
+ return PTR_ERR(hext_stream);
+ }
+
+ memcpy(dmab.area, stripped_firmware.data, stripped_firmware.size);
+
+ msg.primary = hext_stream->hstream.stream_tag - 1;
+ msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY);
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
+ msg.primary |= SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID(fw_lib->id);
+
+ ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_START);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: DMA trigger start failed\n", __func__);
+ goto cleanup;
+ }
+
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
+
+ ret1 = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "%s: DMA trigger stop failed\n", __func__);
+ if (!ret)
+ ret = ret1;
+ }
+
+cleanup:
+ /* clean up even in case of error and return the first error */
+ ret1 = hda_cl_cleanup(sdev, &dmab, hext_stream);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "%s: Code loader DSP cleanup failed\n", __func__);
+
+ /* set return value to indicate cleanup failure */
+ if (!ret)
+ ret = ret1;
+ }
+
+ return ret;
+}
+
+/* pre fw run operations */
+int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev)
+{
+ /* disable clock gating and power gating */
+ return hda_dsp_ctrl_clock_power_gating(sdev, false);
+}
+
+/* post fw run operations */
+int hda_dsp_post_fw_run(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ if (sdev->first_boot) {
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ ret = hda_sdw_startup(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: could not startup SoundWire links\n");
+ return ret;
+ }
+
+ /* Check if IMR boot is usable */
+ if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT) &&
+ (sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT ||
+ sdev->pdata->ipc_type == SOF_INTEL_IPC4))
+ hdev->imrboot_supported = true;
+ }
+
+ hda_sdw_int_enable(sdev, true);
+
+ /* re-enable clock gating and power gating */
+ return hda_dsp_ctrl_clock_power_gating(sdev, true);
+}
+
+int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct sof_ext_man_cavs_config_data *config_data =
+ container_of(hdr, struct sof_ext_man_cavs_config_data, hdr);
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ int i, elem_num;
+
+ /* calculate total number of config data elements */
+ elem_num = (hdr->size - sizeof(struct sof_ext_man_elem_header))
+ / sizeof(struct sof_config_elem);
+ if (elem_num <= 0) {
+ dev_err(sdev->dev, "cavs config data is inconsistent: %d\n", elem_num);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < elem_num; i++)
+ switch (config_data->elems[i].token) {
+ case SOF_EXT_MAN_CAVS_CONFIG_EMPTY:
+ /* skip empty token */
+ break;
+ case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO:
+ hda->clk_config_lpro = config_data->elems[i].value;
+ dev_dbg(sdev->dev, "FW clock config: %s\n",
+ hda->clk_config_lpro ? "LPRO" : "HPRO");
+ break;
+ case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE:
+ case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE:
+ /* These elements are defined but not being used yet. No warn is required */
+ break;
+ default:
+ dev_info(sdev->dev, "unsupported token type: %d\n",
+ config_data->elems[i].token);
+ }
+
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-mlink.c b/sound/soc/sof/intel/hda-mlink.c
new file mode 100644
index 000000000..b592e687a
--- /dev/null
+++ b/sound/soc/sof/intel/hda-mlink.c
@@ -0,0 +1,974 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+
+/*
+ * Management of HDaudio multi-link (capabilities, power, coupling)
+ */
+
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/hda-mlink.h>
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_MLINK)
+
+/* worst-case number of sublinks is used for sublink refcount array allocation only */
+#define HDAML_MAX_SUBLINKS (AZX_ML_LCTL_CPA_SHIFT - AZX_ML_LCTL_SPA_SHIFT)
+
+/**
+ * struct hdac_ext2_link - HDAudio extended+alternate link
+ *
+ * @hext_link: hdac_ext_link
+ * @alt: flag set for alternate extended links
+ * @intc: boolean for interrupt capable
+ * @ofls: boolean for offload support
+ * @lss: boolean for link synchronization capabilities
+ * @slcount: sublink count
+ * @elid: extended link ID (AZX_REG_ML_LEPTR_ID_ defines)
+ * @elver: extended link version
+ * @leptr: extended link pointer
+ * @eml_lock: mutual exclusion to access shared registers e.g. CPA/SPA bits
+ * in LCTL register
+ * @sublink_ref_count: array of refcounts, required to power-manage sublinks independently
+ * @base_ptr: pointer to shim/ip/shim_vs space
+ * @instance_offset: offset between each of @slcount instances managed by link
+ * @shim_offset: offset to SHIM register base
+ * @ip_offset: offset to IP register base
+ * @shim_vs_offset: offset to vendor-specific (VS) SHIM base
+ */
+struct hdac_ext2_link {
+ struct hdac_ext_link hext_link;
+
+ /* read directly from LCAP register */
+ bool alt;
+ bool intc;
+ bool ofls;
+ bool lss;
+ int slcount;
+ int elid;
+ int elver;
+ u32 leptr;
+
+ struct mutex eml_lock; /* prevent concurrent access to e.g. CPA/SPA */
+ int sublink_ref_count[HDAML_MAX_SUBLINKS];
+
+ /* internal values computed from LCAP contents */
+ void __iomem *base_ptr;
+ u32 instance_offset;
+ u32 shim_offset;
+ u32 ip_offset;
+ u32 shim_vs_offset;
+};
+
+#define hdac_ext_link_to_ext2(h) container_of(h, struct hdac_ext2_link, hext_link)
+
+#define AZX_REG_SDW_INSTANCE_OFFSET 0x8000
+#define AZX_REG_SDW_SHIM_OFFSET 0x0
+#define AZX_REG_SDW_IP_OFFSET 0x100
+#define AZX_REG_SDW_VS_SHIM_OFFSET 0x6000
+#define AZX_REG_SDW_SHIM_PCMSyCM(y) (0x16 + 0x4 * (y))
+
+/* only one instance supported */
+#define AZX_REG_INTEL_DMIC_SHIM_OFFSET 0x0
+#define AZX_REG_INTEL_DMIC_IP_OFFSET 0x100
+#define AZX_REG_INTEL_DMIC_VS_SHIM_OFFSET 0x6000
+
+#define AZX_REG_INTEL_SSP_INSTANCE_OFFSET 0x1000
+#define AZX_REG_INTEL_SSP_SHIM_OFFSET 0x0
+#define AZX_REG_INTEL_SSP_IP_OFFSET 0x100
+#define AZX_REG_INTEL_SSP_VS_SHIM_OFFSET 0xC00
+
+/* only one instance supported */
+#define AZX_REG_INTEL_UAOL_SHIM_OFFSET 0x0
+#define AZX_REG_INTEL_UAOL_IP_OFFSET 0x100
+#define AZX_REG_INTEL_UAOL_VS_SHIM_OFFSET 0xC00
+
+/* HDAML section - this part follows sequences in the hardware specification,
+ * including naming conventions and the use of the hdaml_ prefix.
+ * The code is intentionally minimal with limited dependencies on frameworks or
+ * helpers. Locking and scanning lists is handled at a higher level
+ */
+
+static int hdaml_lnk_enum(struct device *dev, struct hdac_ext2_link *h2link,
+ void __iomem *remap_addr, void __iomem *ml_addr, int link_idx)
+{
+ struct hdac_ext_link *hlink = &h2link->hext_link;
+ u32 base_offset;
+
+ hlink->lcaps = readl(ml_addr + AZX_REG_ML_LCAP);
+
+ h2link->alt = FIELD_GET(AZX_ML_HDA_LCAP_ALT, hlink->lcaps);
+
+ /* handle alternate extensions */
+ if (!h2link->alt) {
+ h2link->slcount = 1;
+
+ /*
+ * LSDIID is initialized by hardware for HDaudio link,
+ * it needs to be setup by software for alternate links
+ */
+ hlink->lsdiid = readw(ml_addr + AZX_REG_ML_LSDIID);
+
+ dev_dbg(dev, "Link %d: HDAudio - lsdiid=%d\n",
+ link_idx, hlink->lsdiid);
+
+ return 0;
+ }
+
+ h2link->intc = FIELD_GET(AZX_ML_HDA_LCAP_INTC, hlink->lcaps);
+ h2link->ofls = FIELD_GET(AZX_ML_HDA_LCAP_OFLS, hlink->lcaps);
+ h2link->lss = FIELD_GET(AZX_ML_HDA_LCAP_LSS, hlink->lcaps);
+
+ /* read slcount (increment due to zero-based hardware representation */
+ h2link->slcount = FIELD_GET(AZX_ML_HDA_LCAP_SLCOUNT, hlink->lcaps) + 1;
+ dev_dbg(dev, "Link %d: HDAudio extended - sublink count %d\n",
+ link_idx, h2link->slcount);
+
+ /* find IP ID and offsets */
+ h2link->leptr = readl(ml_addr + AZX_REG_ML_LEPTR);
+
+ h2link->elid = FIELD_GET(AZX_REG_ML_LEPTR_ID, h2link->leptr);
+
+ base_offset = FIELD_GET(AZX_REG_ML_LEPTR_PTR, h2link->leptr);
+ h2link->base_ptr = remap_addr + base_offset;
+
+ switch (h2link->elid) {
+ case AZX_REG_ML_LEPTR_ID_SDW:
+ h2link->instance_offset = AZX_REG_SDW_INSTANCE_OFFSET;
+ h2link->shim_offset = AZX_REG_SDW_SHIM_OFFSET;
+ h2link->ip_offset = AZX_REG_SDW_IP_OFFSET;
+ h2link->shim_vs_offset = AZX_REG_SDW_VS_SHIM_OFFSET;
+ dev_dbg(dev, "Link %d: HDAudio extended - SoundWire alternate link, leptr.ptr %#x\n",
+ link_idx, base_offset);
+ break;
+ case AZX_REG_ML_LEPTR_ID_INTEL_DMIC:
+ h2link->shim_offset = AZX_REG_INTEL_DMIC_SHIM_OFFSET;
+ h2link->ip_offset = AZX_REG_INTEL_DMIC_IP_OFFSET;
+ h2link->shim_vs_offset = AZX_REG_INTEL_DMIC_VS_SHIM_OFFSET;
+ dev_dbg(dev, "Link %d: HDAudio extended - INTEL DMIC alternate link, leptr.ptr %#x\n",
+ link_idx, base_offset);
+ break;
+ case AZX_REG_ML_LEPTR_ID_INTEL_SSP:
+ h2link->instance_offset = AZX_REG_INTEL_SSP_INSTANCE_OFFSET;
+ h2link->shim_offset = AZX_REG_INTEL_SSP_SHIM_OFFSET;
+ h2link->ip_offset = AZX_REG_INTEL_SSP_IP_OFFSET;
+ h2link->shim_vs_offset = AZX_REG_INTEL_SSP_VS_SHIM_OFFSET;
+ dev_dbg(dev, "Link %d: HDAudio extended - INTEL SSP alternate link, leptr.ptr %#x\n",
+ link_idx, base_offset);
+ break;
+ case AZX_REG_ML_LEPTR_ID_INTEL_UAOL:
+ h2link->shim_offset = AZX_REG_INTEL_UAOL_SHIM_OFFSET;
+ h2link->ip_offset = AZX_REG_INTEL_UAOL_IP_OFFSET;
+ h2link->shim_vs_offset = AZX_REG_INTEL_UAOL_VS_SHIM_OFFSET;
+ dev_dbg(dev, "Link %d: HDAudio extended - INTEL UAOL alternate link, leptr.ptr %#x\n",
+ link_idx, base_offset);
+ break;
+ default:
+ dev_err(dev, "Link %d: HDAudio extended - Unsupported alternate link, leptr.id=%#02x value\n",
+ link_idx, h2link->elid);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Hardware recommendations are to wait ~10us before checking any hardware transition
+ * reported by bits changing status.
+ * This value does not need to be super-precise, a slack of 5us is perfectly acceptable.
+ * The worst-case is about 1ms before reporting an issue
+ */
+#define HDAML_POLL_DELAY_MIN_US 10
+#define HDAML_POLL_DELAY_SLACK_US 5
+#define HDAML_POLL_DELAY_RETRY 100
+
+static int check_sublink_power(u32 __iomem *lctl, int sublink, bool enabled)
+{
+ int mask = BIT(sublink) << AZX_ML_LCTL_CPA_SHIFT;
+ int retry = HDAML_POLL_DELAY_RETRY;
+ u32 val;
+
+ usleep_range(HDAML_POLL_DELAY_MIN_US,
+ HDAML_POLL_DELAY_MIN_US + HDAML_POLL_DELAY_SLACK_US);
+ do {
+ val = readl(lctl);
+ if (enabled) {
+ if (val & mask)
+ return 0;
+ } else {
+ if (!(val & mask))
+ return 0;
+ }
+ usleep_range(HDAML_POLL_DELAY_MIN_US,
+ HDAML_POLL_DELAY_MIN_US + HDAML_POLL_DELAY_SLACK_US);
+
+ } while (--retry);
+
+ return -EIO;
+}
+
+static int hdaml_link_init(u32 __iomem *lctl, int sublink)
+{
+ u32 val;
+ u32 mask = BIT(sublink) << AZX_ML_LCTL_SPA_SHIFT;
+
+ val = readl(lctl);
+ val |= mask;
+
+ writel(val, lctl);
+
+ return check_sublink_power(lctl, sublink, true);
+}
+
+static int hdaml_link_shutdown(u32 __iomem *lctl, int sublink)
+{
+ u32 val;
+ u32 mask;
+
+ val = readl(lctl);
+ mask = BIT(sublink) << AZX_ML_LCTL_SPA_SHIFT;
+ val &= ~mask;
+
+ writel(val, lctl);
+
+ return check_sublink_power(lctl, sublink, false);
+}
+
+static void hdaml_link_enable_interrupt(u32 __iomem *lctl, bool enable)
+{
+ u32 val;
+
+ val = readl(lctl);
+ if (enable)
+ val |= AZX_ML_LCTL_INTEN;
+ else
+ val &= ~AZX_ML_LCTL_INTEN;
+
+ writel(val, lctl);
+}
+
+static bool hdaml_link_check_interrupt(u32 __iomem *lctl)
+{
+ u32 val;
+
+ val = readl(lctl);
+
+ return val & AZX_ML_LCTL_INTSTS;
+}
+
+static int hdaml_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
+{
+ int timeout = HDAML_POLL_DELAY_RETRY;
+ u32 reg_read;
+
+ do {
+ reg_read = readl(base + offset);
+ if ((reg_read & mask) == target)
+ return 0;
+
+ timeout--;
+ usleep_range(HDAML_POLL_DELAY_MIN_US,
+ HDAML_POLL_DELAY_MIN_US + HDAML_POLL_DELAY_SLACK_US);
+ } while (timeout != 0);
+
+ return -EAGAIN;
+}
+
+static void hdaml_link_set_syncprd(u32 __iomem *lsync, u32 syncprd)
+{
+ u32 val;
+
+ val = readl(lsync);
+ val &= ~AZX_REG_ML_LSYNC_SYNCPRD;
+ val |= (syncprd & AZX_REG_ML_LSYNC_SYNCPRD);
+
+ /*
+ * set SYNCPU but do not wait. The bit is cleared by hardware when
+ * the link becomes active.
+ */
+ val |= AZX_REG_ML_LSYNC_SYNCPU;
+
+ writel(val, lsync);
+}
+
+static int hdaml_link_wait_syncpu(u32 __iomem *lsync)
+{
+ return hdaml_wait_bit(lsync, 0, AZX_REG_ML_LSYNC_SYNCPU, 0);
+}
+
+static void hdaml_link_sync_arm(u32 __iomem *lsync, int sublink)
+{
+ u32 val;
+
+ val = readl(lsync);
+ val |= (AZX_REG_ML_LSYNC_CMDSYNC << sublink);
+
+ writel(val, lsync);
+}
+
+static void hdaml_link_sync_go(u32 __iomem *lsync)
+{
+ u32 val;
+
+ val = readl(lsync);
+ val |= AZX_REG_ML_LSYNC_SYNCGO;
+
+ writel(val, lsync);
+}
+
+static bool hdaml_link_check_cmdsync(u32 __iomem *lsync, u32 cmdsync_mask)
+{
+ u32 val;
+
+ val = readl(lsync);
+
+ return !!(val & cmdsync_mask);
+}
+
+static u16 hdaml_link_get_lsdiid(u16 __iomem *lsdiid)
+{
+ return readw(lsdiid);
+}
+
+static void hdaml_link_set_lsdiid(u16 __iomem *lsdiid, int dev_num)
+{
+ u16 val;
+
+ val = readw(lsdiid);
+ val |= BIT(dev_num);
+
+ writew(val, lsdiid);
+}
+
+static void hdaml_shim_map_stream_ch(u16 __iomem *pcmsycm, int lchan, int hchan,
+ int stream_id, int dir)
+{
+ u16 val;
+
+ val = readw(pcmsycm);
+
+ u16p_replace_bits(&val, lchan, GENMASK(3, 0));
+ u16p_replace_bits(&val, hchan, GENMASK(7, 4));
+ u16p_replace_bits(&val, stream_id, GENMASK(13, 8));
+ u16p_replace_bits(&val, dir, BIT(15));
+
+ writew(val, pcmsycm);
+}
+
+static void hdaml_lctl_offload_enable(u32 __iomem *lctl, bool enable)
+{
+ u32 val = readl(lctl);
+
+ if (enable)
+ val |= AZX_ML_LCTL_OFLEN;
+ else
+ val &= ~AZX_ML_LCTL_OFLEN;
+
+ writel(val, lctl);
+}
+
+/* END HDAML section */
+
+static int hda_ml_alloc_h2link(struct hdac_bus *bus, int index)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+ int ret;
+
+ h2link = kzalloc(sizeof(*h2link), GFP_KERNEL);
+ if (!h2link)
+ return -ENOMEM;
+
+ /* basic initialization */
+ hlink = &h2link->hext_link;
+
+ hlink->index = index;
+ hlink->bus = bus;
+ hlink->ml_addr = bus->mlcap + AZX_ML_BASE + (AZX_ML_INTERVAL * index);
+
+ ret = hdaml_lnk_enum(bus->dev, h2link, bus->remap_addr, hlink->ml_addr, index);
+ if (ret < 0) {
+ kfree(h2link);
+ return ret;
+ }
+
+ mutex_init(&h2link->eml_lock);
+
+ list_add_tail(&hlink->list, &bus->hlink_list);
+
+ /*
+ * HDaudio regular links are powered-on by default, the
+ * refcount needs to be initialized.
+ */
+ if (!h2link->alt)
+ hlink->ref_count = 1;
+
+ return 0;
+}
+
+int hda_bus_ml_init(struct hdac_bus *bus)
+{
+ u32 link_count;
+ int ret;
+ int i;
+
+ if (!bus->mlcap)
+ return 0;
+
+ link_count = readl(bus->mlcap + AZX_REG_ML_MLCD) + 1;
+
+ dev_dbg(bus->dev, "HDAudio Multi-Link count: %d\n", link_count);
+
+ for (i = 0; i < link_count; i++) {
+ ret = hda_ml_alloc_h2link(bus, i);
+ if (ret < 0) {
+ hda_bus_ml_free(bus);
+ return ret;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_NS(hda_bus_ml_init, SND_SOC_SOF_HDA_MLINK);
+
+void hda_bus_ml_free(struct hdac_bus *bus)
+{
+ struct hdac_ext_link *hlink, *_h;
+ struct hdac_ext2_link *h2link;
+
+ if (!bus->mlcap)
+ return;
+
+ list_for_each_entry_safe(hlink, _h, &bus->hlink_list, list) {
+ list_del(&hlink->list);
+ h2link = hdac_ext_link_to_ext2(hlink);
+
+ mutex_destroy(&h2link->eml_lock);
+ kfree(h2link);
+ }
+}
+EXPORT_SYMBOL_NS(hda_bus_ml_free, SND_SOC_SOF_HDA_MLINK);
+
+static struct hdac_ext2_link *
+find_ext2_link(struct hdac_bus *bus, bool alt, int elid)
+{
+ struct hdac_ext_link *hlink;
+
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
+ struct hdac_ext2_link *h2link = hdac_ext_link_to_ext2(hlink);
+
+ if (h2link->alt == alt && h2link->elid == elid)
+ return h2link;
+ }
+
+ return NULL;
+}
+
+int hdac_bus_eml_get_count(struct hdac_bus *bus, bool alt, int elid)
+{
+ struct hdac_ext2_link *h2link;
+
+ h2link = find_ext2_link(bus, alt, elid);
+ if (!h2link)
+ return 0;
+
+ return h2link->slcount;
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_get_count, SND_SOC_SOF_HDA_MLINK);
+
+void hdac_bus_eml_enable_interrupt(struct hdac_bus *bus, bool alt, int elid, bool enable)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+
+ h2link = find_ext2_link(bus, alt, elid);
+ if (!h2link)
+ return;
+
+ if (!h2link->intc)
+ return;
+
+ hlink = &h2link->hext_link;
+
+ mutex_lock(&h2link->eml_lock);
+
+ hdaml_link_enable_interrupt(hlink->ml_addr + AZX_REG_ML_LCTL, enable);
+
+ mutex_unlock(&h2link->eml_lock);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_enable_interrupt, SND_SOC_SOF_HDA_MLINK);
+
+bool hdac_bus_eml_check_interrupt(struct hdac_bus *bus, bool alt, int elid)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+
+ h2link = find_ext2_link(bus, alt, elid);
+ if (!h2link)
+ return false;
+
+ if (!h2link->intc)
+ return false;
+
+ hlink = &h2link->hext_link;
+
+ return hdaml_link_check_interrupt(hlink->ml_addr + AZX_REG_ML_LCTL);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_check_interrupt, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_set_syncprd_unlocked(struct hdac_bus *bus, bool alt, int elid, u32 syncprd)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+
+ h2link = find_ext2_link(bus, alt, elid);
+ if (!h2link)
+ return 0;
+
+ if (!h2link->lss)
+ return 0;
+
+ hlink = &h2link->hext_link;
+
+ hdaml_link_set_syncprd(hlink->ml_addr + AZX_REG_ML_LSYNC, syncprd);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_set_syncprd_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_sdw_set_syncprd_unlocked(struct hdac_bus *bus, u32 syncprd)
+{
+ return hdac_bus_eml_set_syncprd_unlocked(bus, true, AZX_REG_ML_LEPTR_ID_SDW, syncprd);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_set_syncprd_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_wait_syncpu_unlocked(struct hdac_bus *bus, bool alt, int elid)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+
+ h2link = find_ext2_link(bus, alt, elid);
+ if (!h2link)
+ return 0;
+
+ if (!h2link->lss)
+ return 0;
+
+ hlink = &h2link->hext_link;
+
+ return hdaml_link_wait_syncpu(hlink->ml_addr + AZX_REG_ML_LSYNC);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_wait_syncpu_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_sdw_wait_syncpu_unlocked(struct hdac_bus *bus)
+{
+ return hdac_bus_eml_wait_syncpu_unlocked(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_wait_syncpu_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+void hdac_bus_eml_sync_arm_unlocked(struct hdac_bus *bus, bool alt, int elid, int sublink)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+
+ h2link = find_ext2_link(bus, alt, elid);
+ if (!h2link)
+ return;
+
+ if (!h2link->lss)
+ return;
+
+ hlink = &h2link->hext_link;
+
+ hdaml_link_sync_arm(hlink->ml_addr + AZX_REG_ML_LSYNC, sublink);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_sync_arm_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+void hdac_bus_eml_sdw_sync_arm_unlocked(struct hdac_bus *bus, int sublink)
+{
+ hdac_bus_eml_sync_arm_unlocked(bus, true, AZX_REG_ML_LEPTR_ID_SDW, sublink);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_sync_arm_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_sync_go_unlocked(struct hdac_bus *bus, bool alt, int elid)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+
+ h2link = find_ext2_link(bus, alt, elid);
+ if (!h2link)
+ return 0;
+
+ if (!h2link->lss)
+ return 0;
+
+ hlink = &h2link->hext_link;
+
+ hdaml_link_sync_go(hlink->ml_addr + AZX_REG_ML_LSYNC);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_sync_go_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_sdw_sync_go_unlocked(struct hdac_bus *bus)
+{
+ return hdac_bus_eml_sync_go_unlocked(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_sync_go_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+bool hdac_bus_eml_check_cmdsync_unlocked(struct hdac_bus *bus, bool alt, int elid)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+ u32 cmdsync_mask;
+
+ h2link = find_ext2_link(bus, alt, elid);
+ if (!h2link)
+ return 0;
+
+ if (!h2link->lss)
+ return 0;
+
+ hlink = &h2link->hext_link;
+
+ cmdsync_mask = GENMASK(AZX_REG_ML_LSYNC_CMDSYNC_SHIFT + h2link->slcount - 1,
+ AZX_REG_ML_LSYNC_CMDSYNC_SHIFT);
+
+ return hdaml_link_check_cmdsync(hlink->ml_addr + AZX_REG_ML_LSYNC,
+ cmdsync_mask);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_check_cmdsync_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+bool hdac_bus_eml_sdw_check_cmdsync_unlocked(struct hdac_bus *bus)
+{
+ return hdac_bus_eml_check_cmdsync_unlocked(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_check_cmdsync_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+static int hdac_bus_eml_power_up_base(struct hdac_bus *bus, bool alt, int elid, int sublink,
+ bool eml_lock)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+ int ret = 0;
+
+ h2link = find_ext2_link(bus, alt, elid);
+ if (!h2link)
+ return -ENODEV;
+
+ if (sublink >= h2link->slcount)
+ return -EINVAL;
+
+ hlink = &h2link->hext_link;
+
+ if (eml_lock)
+ mutex_lock(&h2link->eml_lock);
+
+ if (!alt) {
+ if (++hlink->ref_count > 1)
+ goto skip_init;
+ } else {
+ if (++h2link->sublink_ref_count[sublink] > 1)
+ goto skip_init;
+ }
+
+ ret = hdaml_link_init(hlink->ml_addr + AZX_REG_ML_LCTL, sublink);
+
+skip_init:
+ if (eml_lock)
+ mutex_unlock(&h2link->eml_lock);
+
+ return ret;
+}
+
+int hdac_bus_eml_power_up(struct hdac_bus *bus, bool alt, int elid, int sublink)
+{
+ return hdac_bus_eml_power_up_base(bus, alt, elid, sublink, true);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_power_up, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_power_up_unlocked(struct hdac_bus *bus, bool alt, int elid, int sublink)
+{
+ return hdac_bus_eml_power_up_base(bus, alt, elid, sublink, false);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_power_up_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+static int hdac_bus_eml_power_down_base(struct hdac_bus *bus, bool alt, int elid, int sublink,
+ bool eml_lock)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+ int ret = 0;
+
+ h2link = find_ext2_link(bus, alt, elid);
+ if (!h2link)
+ return -ENODEV;
+
+ if (sublink >= h2link->slcount)
+ return -EINVAL;
+
+ hlink = &h2link->hext_link;
+
+ if (eml_lock)
+ mutex_lock(&h2link->eml_lock);
+
+ if (!alt) {
+ if (--hlink->ref_count > 0)
+ goto skip_shutdown;
+ } else {
+ if (--h2link->sublink_ref_count[sublink] > 0)
+ goto skip_shutdown;
+ }
+ ret = hdaml_link_shutdown(hlink->ml_addr + AZX_REG_ML_LCTL, sublink);
+
+skip_shutdown:
+ if (eml_lock)
+ mutex_unlock(&h2link->eml_lock);
+
+ return ret;
+}
+
+int hdac_bus_eml_power_down(struct hdac_bus *bus, bool alt, int elid, int sublink)
+{
+ return hdac_bus_eml_power_down_base(bus, alt, elid, sublink, true);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_power_down, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_power_down_unlocked(struct hdac_bus *bus, bool alt, int elid, int sublink)
+{
+ return hdac_bus_eml_power_down_base(bus, alt, elid, sublink, false);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_power_down_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_sdw_power_up_unlocked(struct hdac_bus *bus, int sublink)
+{
+ return hdac_bus_eml_power_up_unlocked(bus, true, AZX_REG_ML_LEPTR_ID_SDW, sublink);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_power_up_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_sdw_power_down_unlocked(struct hdac_bus *bus, int sublink)
+{
+ return hdac_bus_eml_power_down_unlocked(bus, true, AZX_REG_ML_LEPTR_ID_SDW, sublink);
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_power_down_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_sdw_get_lsdiid_unlocked(struct hdac_bus *bus, int sublink, u16 *lsdiid)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+
+ h2link = find_ext2_link(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+ if (!h2link)
+ return -ENODEV;
+
+ hlink = &h2link->hext_link;
+
+ *lsdiid = hdaml_link_get_lsdiid(hlink->ml_addr + AZX_REG_ML_LSDIID_OFFSET(sublink));
+
+ return 0;
+} EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_get_lsdiid_unlocked, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+
+ h2link = find_ext2_link(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+ if (!h2link)
+ return -ENODEV;
+
+ hlink = &h2link->hext_link;
+
+ mutex_lock(&h2link->eml_lock);
+
+ hdaml_link_set_lsdiid(hlink->ml_addr + AZX_REG_ML_LSDIID_OFFSET(sublink), dev_num);
+
+ mutex_unlock(&h2link->eml_lock);
+
+ return 0;
+} EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_set_lsdiid, SND_SOC_SOF_HDA_MLINK);
+
+/*
+ * the 'y' parameter comes from the PCMSyCM hardware register naming. 'y' refers to the
+ * PDI index, i.e. the FIFO used for RX or TX
+ */
+int hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
+ int channel_mask, int stream_id, int dir)
+{
+ struct hdac_ext2_link *h2link;
+ u16 __iomem *pcmsycm;
+ int hchan;
+ int lchan;
+ u16 val;
+
+ h2link = find_ext2_link(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+ if (!h2link)
+ return -ENODEV;
+
+ pcmsycm = h2link->base_ptr + h2link->shim_offset +
+ h2link->instance_offset * sublink +
+ AZX_REG_SDW_SHIM_PCMSyCM(y);
+
+ if (channel_mask) {
+ hchan = __fls(channel_mask);
+ lchan = __ffs(channel_mask);
+ } else {
+ hchan = 0;
+ lchan = 0;
+ }
+
+ mutex_lock(&h2link->eml_lock);
+
+ hdaml_shim_map_stream_ch(pcmsycm, lchan, hchan,
+ stream_id, dir);
+
+ mutex_unlock(&h2link->eml_lock);
+
+ val = readw(pcmsycm);
+
+ dev_dbg(bus->dev, "sublink %d channel_mask %#x stream_id %d dir %d pcmscm %#x\n",
+ sublink, channel_mask, stream_id, dir, val);
+
+ return 0;
+} EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_map_stream_ch, SND_SOC_SOF_HDA_MLINK);
+
+void hda_bus_ml_put_all(struct hdac_bus *bus)
+{
+ struct hdac_ext_link *hlink;
+
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
+ struct hdac_ext2_link *h2link = hdac_ext_link_to_ext2(hlink);
+
+ if (!h2link->alt)
+ snd_hdac_ext_bus_link_put(bus, hlink);
+ }
+}
+EXPORT_SYMBOL_NS(hda_bus_ml_put_all, SND_SOC_SOF_HDA_MLINK);
+
+void hda_bus_ml_reset_losidv(struct hdac_bus *bus)
+{
+ struct hdac_ext_link *hlink;
+
+ /* Reset stream-to-link mapping */
+ list_for_each_entry(hlink, &bus->hlink_list, list)
+ writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
+}
+EXPORT_SYMBOL_NS(hda_bus_ml_reset_losidv, SND_SOC_SOF_HDA_MLINK);
+
+int hda_bus_ml_resume(struct hdac_bus *bus)
+{
+ struct hdac_ext_link *hlink;
+ int ret;
+
+ /* power up links that were active before suspend */
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
+ struct hdac_ext2_link *h2link = hdac_ext_link_to_ext2(hlink);
+
+ if (!h2link->alt && hlink->ref_count) {
+ ret = snd_hdac_ext_bus_link_power_up(hlink);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_NS(hda_bus_ml_resume, SND_SOC_SOF_HDA_MLINK);
+
+int hda_bus_ml_suspend(struct hdac_bus *bus)
+{
+ struct hdac_ext_link *hlink;
+ int ret;
+
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
+ struct hdac_ext2_link *h2link = hdac_ext_link_to_ext2(hlink);
+
+ if (!h2link->alt) {
+ ret = snd_hdac_ext_bus_link_power_down(hlink);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_NS(hda_bus_ml_suspend, SND_SOC_SOF_HDA_MLINK);
+
+struct mutex *hdac_bus_eml_get_mutex(struct hdac_bus *bus, bool alt, int elid)
+{
+ struct hdac_ext2_link *h2link;
+
+ h2link = find_ext2_link(bus, alt, elid);
+ if (!h2link)
+ return NULL;
+
+ return &h2link->eml_lock;
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_get_mutex, SND_SOC_SOF_HDA_MLINK);
+
+struct hdac_ext_link *hdac_bus_eml_ssp_get_hlink(struct hdac_bus *bus)
+{
+ struct hdac_ext2_link *h2link;
+
+ h2link = find_ext2_link(bus, true, AZX_REG_ML_LEPTR_ID_INTEL_SSP);
+ if (!h2link)
+ return NULL;
+
+ return &h2link->hext_link;
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_ssp_get_hlink, SND_SOC_SOF_HDA_MLINK);
+
+struct hdac_ext_link *hdac_bus_eml_dmic_get_hlink(struct hdac_bus *bus)
+{
+ struct hdac_ext2_link *h2link;
+
+ h2link = find_ext2_link(bus, true, AZX_REG_ML_LEPTR_ID_INTEL_DMIC);
+ if (!h2link)
+ return NULL;
+
+ return &h2link->hext_link;
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_dmic_get_hlink, SND_SOC_SOF_HDA_MLINK);
+
+struct hdac_ext_link *hdac_bus_eml_sdw_get_hlink(struct hdac_bus *bus)
+{
+ struct hdac_ext2_link *h2link;
+
+ h2link = find_ext2_link(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+ if (!h2link)
+ return NULL;
+
+ return &h2link->hext_link;
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_get_hlink, SND_SOC_SOF_HDA_MLINK);
+
+int hdac_bus_eml_enable_offload(struct hdac_bus *bus, bool alt, int elid, bool enable)
+{
+ struct hdac_ext2_link *h2link;
+ struct hdac_ext_link *hlink;
+
+ h2link = find_ext2_link(bus, alt, elid);
+ if (!h2link)
+ return -ENODEV;
+
+ if (!h2link->ofls)
+ return 0;
+
+ hlink = &h2link->hext_link;
+
+ mutex_lock(&h2link->eml_lock);
+
+ hdaml_lctl_offload_enable(hlink->ml_addr + AZX_REG_ML_LCTL, enable);
+
+ mutex_unlock(&h2link->eml_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_enable_offload, SND_SOC_SOF_HDA_MLINK);
+
+#endif
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
new file mode 100644
index 000000000..f23c72cdf
--- /dev/null
+++ b/sound/soc/sof/intel/hda-pcm.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <linux/moduleparam.h>
+#include <sound/hda_register.h>
+#include <sound/pcm_params.h>
+#include <trace/events/sof_intel.h>
+#include "../sof-audio.h"
+#include "../ops.h"
+#include "hda.h"
+
+#define SDnFMT_BASE(x) ((x) << 14)
+#define SDnFMT_MULT(x) (((x) - 1) << 11)
+#define SDnFMT_DIV(x) (((x) - 1) << 8)
+#define SDnFMT_BITS(x) ((x) << 4)
+#define SDnFMT_CHAN(x) ((x) << 0)
+
+static bool hda_always_enable_dmi_l1;
+module_param_named(always_enable_dmi_l1, hda_always_enable_dmi_l1, bool, 0444);
+MODULE_PARM_DESC(always_enable_dmi_l1, "SOF HDA always enable DMI l1");
+
+static bool hda_disable_rewinds;
+module_param_named(disable_rewinds, hda_disable_rewinds, bool, 0444);
+MODULE_PARM_DESC(disable_rewinds, "SOF HDA disable rewinds");
+
+u32 hda_dsp_get_mult_div(struct snd_sof_dev *sdev, int rate)
+{
+ switch (rate) {
+ case 8000:
+ return SDnFMT_DIV(6);
+ case 9600:
+ return SDnFMT_DIV(5);
+ case 11025:
+ return SDnFMT_BASE(1) | SDnFMT_DIV(4);
+ case 16000:
+ return SDnFMT_DIV(3);
+ case 22050:
+ return SDnFMT_BASE(1) | SDnFMT_DIV(2);
+ case 32000:
+ return SDnFMT_DIV(3) | SDnFMT_MULT(2);
+ case 44100:
+ return SDnFMT_BASE(1);
+ case 48000:
+ return 0;
+ case 88200:
+ return SDnFMT_BASE(1) | SDnFMT_MULT(2);
+ case 96000:
+ return SDnFMT_MULT(2);
+ case 176400:
+ return SDnFMT_BASE(1) | SDnFMT_MULT(4);
+ case 192000:
+ return SDnFMT_MULT(4);
+ default:
+ dev_warn(sdev->dev, "can't find div rate %d using 48kHz\n",
+ rate);
+ return 0; /* use 48KHz if not found */
+ }
+};
+
+u32 hda_dsp_get_bits(struct snd_sof_dev *sdev, int sample_bits)
+{
+ switch (sample_bits) {
+ case 8:
+ return SDnFMT_BITS(0);
+ case 16:
+ return SDnFMT_BITS(1);
+ case 20:
+ return SDnFMT_BITS(2);
+ case 24:
+ return SDnFMT_BITS(3);
+ case 32:
+ return SDnFMT_BITS(4);
+ default:
+ dev_warn(sdev->dev, "can't find %d bits using 16bit\n",
+ sample_bits);
+ return SDnFMT_BITS(1); /* use 16bits format if not found */
+ }
+};
+
+int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream);
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct snd_dma_buffer *dmab;
+ int ret;
+
+ hstream->substream = substream;
+
+ dmab = substream->runtime->dma_buffer_p;
+
+ /*
+ * Use the codec required format val (which is link_bps adjusted) when
+ * the DSP is not in use
+ */
+ if (!sdev->dspless_mode_selected) {
+ u32 rate = hda_dsp_get_mult_div(sdev, params_rate(params));
+ u32 bits = hda_dsp_get_bits(sdev, params_width(params));
+
+ hstream->format_val = rate | bits | (params_channels(params) - 1);
+ }
+
+ hstream->bufsize = params_buffer_bytes(params);
+ hstream->period_bytes = params_period_bytes(params);
+ hstream->no_period_wakeup =
+ (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
+ (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
+
+ ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, params);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
+ return ret;
+ }
+
+ /* enable SPIB when rewinds are disabled */
+ if (hda_disable_rewinds)
+ hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_ENABLE, 0);
+ else
+ hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0);
+
+ if (hda)
+ platform_params->no_ipc_position = hda->no_ipc_position;
+
+ platform_params->stream_tag = hstream->stream_tag;
+
+ return 0;
+}
+
+/* update SPIB register with appl position */
+int hda_dsp_pcm_ack(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ ssize_t appl_pos, buf_size;
+ u32 spib;
+
+ appl_pos = frames_to_bytes(runtime, runtime->control->appl_ptr);
+ buf_size = frames_to_bytes(runtime, runtime->buffer_size);
+
+ spib = appl_pos % buf_size;
+
+ /* Allowable value for SPIB is 1 byte to max buffer size */
+ if (!spib)
+ spib = buf_size;
+
+ sof_io_write(sdev, hstream->spib_addr, spib);
+
+ return 0;
+}
+
+int hda_dsp_pcm_trigger(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream);
+
+ return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
+}
+
+snd_pcm_uframes_t hda_dsp_pcm_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_component *scomp = sdev->component;
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct snd_sof_pcm *spcm;
+ snd_pcm_uframes_t pos;
+
+ spcm = snd_sof_find_spcm_dai(scomp, rtd);
+ if (!spcm) {
+ dev_warn_ratelimited(sdev->dev, "warn: can't find PCM with DAI ID %d\n",
+ rtd->dai_link->id);
+ return 0;
+ }
+
+ if (hda && !hda->no_ipc_position) {
+ /* read position from IPC position */
+ pos = spcm->stream[substream->stream].posn.host_posn;
+ goto found;
+ }
+
+ pos = hda_dsp_stream_get_position(hstream, substream->stream, true);
+found:
+ pos = bytes_to_frames(substream->runtime, pos);
+
+ trace_sof_intel_hda_dsp_pcm(sdev, hstream, substream, pos);
+ return pos;
+}
+
+int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_component *scomp = sdev->component;
+ struct hdac_ext_stream *dsp_stream;
+ struct snd_sof_pcm *spcm;
+ int direction = substream->stream;
+ u32 flags = 0;
+
+ spcm = snd_sof_find_spcm_dai(scomp, rtd);
+ if (!spcm) {
+ dev_err(sdev->dev, "error: can't find PCM with DAI ID %d\n", rtd->dai_link->id);
+ return -EINVAL;
+ }
+
+ /*
+ * if we want the .ack to work, we need to prevent the control from being mapped.
+ * The status can still be mapped.
+ */
+ if (hda_disable_rewinds)
+ runtime->hw.info |= SNDRV_PCM_INFO_NO_REWINDS | SNDRV_PCM_INFO_SYNC_APPLPTR;
+
+ /*
+ * All playback streams are DMI L1 capable, capture streams need
+ * pause push/release to be disabled
+ */
+ if (hda_always_enable_dmi_l1 && direction == SNDRV_PCM_STREAM_CAPTURE)
+ runtime->hw.info &= ~SNDRV_PCM_INFO_PAUSE;
+
+ if (hda_always_enable_dmi_l1 ||
+ direction == SNDRV_PCM_STREAM_PLAYBACK ||
+ spcm->stream[substream->stream].d0i3_compatible)
+ flags |= SOF_HDA_STREAM_DMI_L1_COMPATIBLE;
+
+ dsp_stream = hda_dsp_stream_get(sdev, direction, flags);
+ if (!dsp_stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return -ENODEV;
+ }
+
+ /* minimum as per HDA spec */
+ snd_pcm_hw_constraint_step(substream->runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 4);
+
+ /* avoid circular buffer wrap in middle of period */
+ snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+
+ /* Only S16 and S32 supported by HDA hardware when used without DSP */
+ if (sdev->dspless_mode_selected)
+ snd_pcm_hw_constraint_mask64(substream->runtime, SNDRV_PCM_HW_PARAM_FORMAT,
+ SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S32);
+
+ /* binding pcm substream to hda stream */
+ substream->runtime->private_data = &dsp_stream->hstream;
+ return 0;
+}
+
+int hda_dsp_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ int direction = substream->stream;
+ int ret;
+
+ ret = hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
+
+ if (ret) {
+ dev_dbg(sdev->dev, "stream %s not opened!\n", substream->name);
+ return -ENODEV;
+ }
+
+ /* unbinding pcm substream to hda stream */
+ substream->runtime->private_data = NULL;
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-probes.c b/sound/soc/sof/intel/hda-probes.c
new file mode 100644
index 000000000..56a533c63
--- /dev/null
+++ b/sound/soc/sof/intel/hda-probes.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019-2021 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+// Converted to SOF client:
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/soc.h>
+#include "../sof-priv.h"
+#include "../sof-client-probes.h"
+#include "../sof-client.h"
+#include "hda.h"
+
+static inline struct hdac_ext_stream *
+hda_compr_get_stream(struct snd_compr_stream *cstream)
+{
+ return cstream->runtime->private_data;
+}
+
+static int hda_probes_compr_startup(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai, u32 *stream_id)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ struct hdac_ext_stream *hext_stream;
+
+ hext_stream = hda_dsp_stream_get(sdev, cstream->direction, 0);
+ if (!hext_stream)
+ return -EBUSY;
+
+ hdac_stream(hext_stream)->curr_pos = 0;
+ hdac_stream(hext_stream)->cstream = cstream;
+ cstream->runtime->private_data = hext_stream;
+
+ *stream_id = hdac_stream(hext_stream)->stream_tag;
+
+ return 0;
+}
+
+static int hda_probes_compr_shutdown(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ int ret;
+
+ ret = hda_dsp_stream_put(sdev, cstream->direction,
+ hdac_stream(hext_stream)->stream_tag);
+ if (ret < 0) {
+ dev_dbg(sdev->dev, "stream put failed: %d\n", ret);
+ return ret;
+ }
+
+ hdac_stream(hext_stream)->cstream = NULL;
+ cstream->runtime->private_data = NULL;
+
+ return 0;
+}
+
+static int hda_probes_compr_set_params(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ struct hdac_stream *hstream = hdac_stream(hext_stream);
+ struct snd_dma_buffer *dmab;
+ u32 bits, rate;
+ int bps, ret;
+
+ dmab = cstream->runtime->dma_buffer_p;
+ /* compr params do not store bit depth, default to S32_LE */
+ bps = snd_pcm_format_physical_width(SNDRV_PCM_FORMAT_S32_LE);
+ if (bps < 0)
+ return bps;
+ bits = hda_dsp_get_bits(sdev, bps);
+ rate = hda_dsp_get_mult_div(sdev, params->codec.sample_rate);
+
+ hstream->format_val = rate | bits | (params->codec.ch_out - 1);
+ hstream->bufsize = cstream->runtime->buffer_size;
+ hstream->period_bytes = cstream->runtime->fragment_size;
+ hstream->no_period_wakeup = 0;
+
+ ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hda_probes_compr_trigger(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+
+ return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
+}
+
+static int hda_probes_compr_pointer(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
+ struct snd_soc_pcm_stream *pstream;
+
+ pstream = &dai->driver->capture;
+ tstamp->copied_total = hdac_stream(hext_stream)->curr_pos;
+ tstamp->sampling_rate = snd_pcm_rate_bit_to_rate(pstream->rates);
+
+ return 0;
+}
+
+/* SOF client implementation */
+static const struct sof_probes_host_ops hda_probes_ops = {
+ .startup = hda_probes_compr_startup,
+ .shutdown = hda_probes_compr_shutdown,
+ .set_params = hda_probes_compr_set_params,
+ .trigger = hda_probes_compr_trigger,
+ .pointer = hda_probes_compr_pointer,
+};
+
+int hda_probes_register(struct snd_sof_dev *sdev)
+{
+ return sof_client_dev_register(sdev, "hda-probes", 0, &hda_probes_ops,
+ sizeof(hda_probes_ops));
+}
+
+void hda_probes_unregister(struct snd_sof_dev *sdev)
+{
+ sof_client_dev_unregister(sdev, "hda-probes", 0);
+}
+
+MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
new file mode 100644
index 000000000..0b0087abc
--- /dev/null
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/sof.h>
+#include <trace/events/sof_intel.h>
+#include "../ops.h"
+#include "../sof-audio.h"
+#include "hda.h"
+
+#define HDA_LTRP_GB_VALUE_US 95
+
+static inline const char *hda_hstream_direction_str(struct hdac_stream *hstream)
+{
+ if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK)
+ return "Playback";
+ else
+ return "Capture";
+}
+
+static char *hda_hstream_dbg_get_stream_info_str(struct hdac_stream *hstream)
+{
+ struct snd_soc_pcm_runtime *rtd;
+
+ if (hstream->substream)
+ rtd = asoc_substream_to_rtd(hstream->substream);
+ else if (hstream->cstream)
+ rtd = hstream->cstream->private_data;
+ else
+ /* Non audio DMA user, like dma-trace */
+ return kasprintf(GFP_KERNEL, "-- (%s, stream_tag: %u)",
+ hda_hstream_direction_str(hstream),
+ hstream->stream_tag);
+
+ return kasprintf(GFP_KERNEL, "dai_link \"%s\" (%s, stream_tag: %u)",
+ rtd->dai_link->name, hda_hstream_direction_str(hstream),
+ hstream->stream_tag);
+}
+
+/*
+ * set up one of BDL entries for a stream
+ */
+static int hda_setup_bdle(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct hdac_stream *hstream,
+ struct sof_intel_dsp_bdl **bdlp,
+ int offset, int size, int ioc)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sof_intel_dsp_bdl *bdl = *bdlp;
+
+ while (size > 0) {
+ dma_addr_t addr;
+ int chunk;
+
+ if (hstream->frags >= HDA_DSP_MAX_BDL_ENTRIES) {
+ dev_err(sdev->dev, "error: stream frags exceeded\n");
+ return -EINVAL;
+ }
+
+ addr = snd_sgbuf_get_addr(dmab, offset);
+ /* program BDL addr */
+ bdl->addr_l = cpu_to_le32(lower_32_bits(addr));
+ bdl->addr_h = cpu_to_le32(upper_32_bits(addr));
+ /* program BDL size */
+ chunk = snd_sgbuf_get_chunk_size(dmab, offset, size);
+ /* one BDLE should not cross 4K boundary */
+ if (bus->align_bdle_4k) {
+ u32 remain = 0x1000 - (offset & 0xfff);
+
+ if (chunk > remain)
+ chunk = remain;
+ }
+ bdl->size = cpu_to_le32(chunk);
+ /* only program IOC when the whole segment is processed */
+ size -= chunk;
+ bdl->ioc = (size || !ioc) ? 0 : cpu_to_le32(0x01);
+ bdl++;
+ hstream->frags++;
+ offset += chunk;
+ }
+
+ *bdlp = bdl;
+ return offset;
+}
+
+/*
+ * set up Buffer Descriptor List (BDL) for host memory transfer
+ * BDL describes the location of the individual buffers and is little endian.
+ */
+int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct hdac_stream *hstream)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct sof_intel_dsp_bdl *bdl;
+ int i, offset, period_bytes, periods;
+ int remain, ioc;
+
+ period_bytes = hstream->period_bytes;
+ dev_dbg(sdev->dev, "period_bytes:0x%x\n", period_bytes);
+ if (!period_bytes)
+ period_bytes = hstream->bufsize;
+
+ periods = hstream->bufsize / period_bytes;
+
+ dev_dbg(sdev->dev, "periods:%d\n", periods);
+
+ remain = hstream->bufsize % period_bytes;
+ if (remain)
+ periods++;
+
+ /* program the initial BDL entries */
+ bdl = (struct sof_intel_dsp_bdl *)hstream->bdl.area;
+ offset = 0;
+ hstream->frags = 0;
+
+ /*
+ * set IOC if don't use position IPC
+ * and period_wakeup needed.
+ */
+ ioc = hda->no_ipc_position ?
+ !hstream->no_period_wakeup : 0;
+
+ for (i = 0; i < periods; i++) {
+ if (i == (periods - 1) && remain)
+ /* set the last small entry */
+ offset = hda_setup_bdle(sdev, dmab,
+ hstream, &bdl, offset,
+ remain, 0);
+ else
+ offset = hda_setup_bdle(sdev, dmab,
+ hstream, &bdl, offset,
+ period_bytes, ioc);
+ }
+
+ return offset;
+}
+
+int hda_dsp_stream_spib_config(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream,
+ int enable, u32 size)
+{
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ u32 mask;
+
+ if (!sdev->bar[HDA_DSP_SPIB_BAR]) {
+ dev_err(sdev->dev, "error: address of spib capability is NULL\n");
+ return -EINVAL;
+ }
+
+ mask = (1 << hstream->index);
+
+ /* enable/disable SPIB for the stream */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_SPIB_BAR,
+ SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL, mask,
+ enable << hstream->index);
+
+ /* set the SPIB value */
+ sof_io_write(sdev, hstream->spib_addr, size);
+
+ return 0;
+}
+
+/* get next unused stream */
+struct hdac_ext_stream *
+hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction, u32 flags)
+{
+ const struct sof_intel_dsp_desc *chip_info = get_chip_info(sdev->pdata);
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sof_intel_hda_stream *hda_stream;
+ struct hdac_ext_stream *hext_stream = NULL;
+ struct hdac_stream *s;
+
+ spin_lock_irq(&bus->reg_lock);
+
+ /* get an unused stream */
+ list_for_each_entry(s, &bus->stream_list, list) {
+ if (s->direction == direction && !s->opened) {
+ hext_stream = stream_to_hdac_ext_stream(s);
+ hda_stream = container_of(hext_stream,
+ struct sof_intel_hda_stream,
+ hext_stream);
+ /* check if the host DMA channel is reserved */
+ if (hda_stream->host_reserved)
+ continue;
+
+ s->opened = true;
+ break;
+ }
+ }
+
+ spin_unlock_irq(&bus->reg_lock);
+
+ /* stream found ? */
+ if (!hext_stream) {
+ dev_err(sdev->dev, "error: no free %s streams\n",
+ direction == SNDRV_PCM_STREAM_PLAYBACK ?
+ "playback" : "capture");
+ return hext_stream;
+ }
+
+ hda_stream->flags = flags;
+
+ /*
+ * Prevent DMI Link L1 entry for streams that don't support it.
+ * Workaround to address a known issue with host DMA that results
+ * in xruns during pause/release in capture scenarios. This is not needed for the ACE IP.
+ */
+ if (chip_info->hw_ip_version < SOF_INTEL_ACE_1_0 &&
+ !(flags & SOF_HDA_STREAM_DMI_L1_COMPATIBLE)) {
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, 0);
+ hda->l1_disabled = true;
+ }
+
+ return hext_stream;
+}
+
+/* free a stream */
+int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag)
+{
+ const struct sof_intel_dsp_desc *chip_info = get_chip_info(sdev->pdata);
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sof_intel_hda_stream *hda_stream;
+ struct hdac_ext_stream *hext_stream;
+ struct hdac_stream *s;
+ bool dmi_l1_enable = true;
+ bool found = false;
+
+ spin_lock_irq(&bus->reg_lock);
+
+ /*
+ * close stream matching the stream tag and check if there are any open streams
+ * that are DMI L1 incompatible.
+ */
+ list_for_each_entry(s, &bus->stream_list, list) {
+ hext_stream = stream_to_hdac_ext_stream(s);
+ hda_stream = container_of(hext_stream, struct sof_intel_hda_stream, hext_stream);
+
+ if (!s->opened)
+ continue;
+
+ if (s->direction == direction && s->stream_tag == stream_tag) {
+ s->opened = false;
+ found = true;
+ } else if (!(hda_stream->flags & SOF_HDA_STREAM_DMI_L1_COMPATIBLE)) {
+ dmi_l1_enable = false;
+ }
+ }
+
+ spin_unlock_irq(&bus->reg_lock);
+
+ /* Enable DMI L1 if permitted */
+ if (chip_info->hw_ip_version < SOF_INTEL_ACE_1_0 && dmi_l1_enable) {
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN);
+ hda->l1_disabled = false;
+ }
+
+ if (!found) {
+ dev_err(sdev->dev, "%s: stream_tag %d not opened!\n",
+ __func__, stream_tag);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int hda_dsp_stream_reset(struct snd_sof_dev *sdev, struct hdac_stream *hstream)
+{
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ int timeout = HDA_DSP_STREAM_RESET_TIMEOUT;
+ u32 val;
+
+ /* enter stream reset */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, SOF_STREAM_SD_OFFSET_CRST,
+ SOF_STREAM_SD_OFFSET_CRST);
+ do {
+ val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, sd_offset);
+ if (val & SOF_STREAM_SD_OFFSET_CRST)
+ break;
+ } while (--timeout);
+ if (timeout == 0) {
+ dev_err(sdev->dev, "timeout waiting for stream reset\n");
+ return -ETIMEDOUT;
+ }
+
+ timeout = HDA_DSP_STREAM_RESET_TIMEOUT;
+
+ /* exit stream reset and wait to read a zero before reading any other register */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, SOF_STREAM_SD_OFFSET_CRST, 0x0);
+
+ /* wait for hardware to report that stream is out of reset */
+ udelay(3);
+ do {
+ val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, sd_offset);
+ if ((val & SOF_STREAM_SD_OFFSET_CRST) == 0)
+ break;
+ } while (--timeout);
+ if (timeout == 0) {
+ dev_err(sdev->dev, "timeout waiting for stream to exit reset\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream, int cmd)
+{
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ u32 dma_start = SOF_HDA_SD_CTL_DMA_START;
+ int ret = 0;
+ u32 run;
+
+ /* cmd must be for audio stream */
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (!sdev->dspless_mode_selected)
+ break;
+ fallthrough;
+ case SNDRV_PCM_TRIGGER_START:
+ if (hstream->running)
+ break;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ 1 << hstream->index,
+ 1 << hstream->index);
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev,
+ HDA_DSP_HDA_BAR,
+ sd_offset, run,
+ ((run & dma_start) == dma_start),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_STREAM_RUN_TIMEOUT);
+
+ if (ret >= 0)
+ hstream->running = true;
+
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (!sdev->dspless_mode_selected)
+ break;
+ fallthrough;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK, 0x0);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
+ sd_offset, run,
+ !(run & dma_start),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_STREAM_RUN_TIMEOUT);
+
+ if (ret >= 0) {
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->running = false;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_INTCTL,
+ 1 << hstream->index, 0x0);
+ }
+ break;
+ default:
+ dev_err(sdev->dev, "error: unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ if (ret < 0) {
+ char *stream_name = hda_hstream_dbg_get_stream_info_str(hstream);
+
+ dev_err(sdev->dev,
+ "%s: cmd %d on %s: timeout on STREAM_SD_OFFSET read\n",
+ __func__, cmd, stream_name ? stream_name : "unknown stream");
+ kfree(stream_name);
+ }
+
+ return ret;
+}
+
+/* minimal recommended programming for ICCMAX stream */
+int hda_dsp_iccmax_stream_hw_params(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params)
+{
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ int ret;
+ u32 mask = 0x1 << hstream->index;
+
+ if (!hext_stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return -ENODEV;
+ }
+
+ if (!dmab) {
+ dev_err(sdev->dev, "error: no dma buffer allocated!\n");
+ return -ENODEV;
+ }
+
+ if (hstream->posbuf)
+ *hstream->posbuf = 0;
+
+ /* reset BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL,
+ 0x0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU,
+ 0x0);
+
+ hstream->frags = 0;
+
+ ret = hda_dsp_stream_setup_bdl(sdev, dmab, hstream);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: set up of BDL failed\n");
+ return ret;
+ }
+
+ /* program BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL,
+ (u32)hstream->bdl.addr);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU,
+ upper_32_bits(hstream->bdl.addr));
+
+ /* program cyclic buffer length */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_CBL,
+ hstream->bufsize);
+
+ /* program last valid index */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_LVI,
+ 0xffff, (hstream->frags - 1));
+
+ /* decouple host and link DMA, enable DSP features */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, mask);
+
+ /* Follow HW recommendation to set the guardband value to 95us during FW boot */
+ snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP,
+ HDA_VS_INTEL_LTRP_GB_MASK, HDA_LTRP_GB_VALUE_US);
+
+ /* start DMA */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_SD_CTL_DMA_START, SOF_HDA_SD_CTL_DMA_START);
+
+ return 0;
+}
+
+/*
+ * prepare for common hdac registers settings, for both code loader
+ * and normal stream.
+ */
+int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params)
+{
+ const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *hstream;
+ int sd_offset, ret;
+ u32 dma_start = SOF_HDA_SD_CTL_DMA_START;
+ u32 mask;
+ u32 run;
+
+ if (!hext_stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return -ENODEV;
+ }
+
+ if (!dmab) {
+ dev_err(sdev->dev, "error: no dma buffer allocated!\n");
+ return -ENODEV;
+ }
+
+ hstream = &hext_stream->hstream;
+ sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ mask = BIT(hstream->index);
+
+ /* decouple host and link DMA if the DSP is used */
+ if (!sdev->dspless_mode_selected)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, mask);
+
+ /* clear stream status */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_DMA_SD_INT_MASK |
+ SOF_HDA_SD_CTL_DMA_START, 0);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
+ sd_offset, run,
+ !(run & dma_start),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_STREAM_RUN_TIMEOUT);
+
+ if (ret < 0) {
+ char *stream_name = hda_hstream_dbg_get_stream_info_str(hstream);
+
+ dev_err(sdev->dev,
+ "%s: on %s: timeout on STREAM_SD_OFFSET read1\n",
+ __func__, stream_name ? stream_name : "unknown stream");
+ kfree(stream_name);
+ return ret;
+ }
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ /* stream reset */
+ ret = hda_dsp_stream_reset(sdev, hstream);
+ if (ret < 0)
+ return ret;
+
+ if (hstream->posbuf)
+ *hstream->posbuf = 0;
+
+ /* reset BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL,
+ 0x0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU,
+ 0x0);
+
+ /* clear stream status */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_DMA_SD_INT_MASK |
+ SOF_HDA_SD_CTL_DMA_START, 0);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
+ sd_offset, run,
+ !(run & dma_start),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_STREAM_RUN_TIMEOUT);
+
+ if (ret < 0) {
+ char *stream_name = hda_hstream_dbg_get_stream_info_str(hstream);
+
+ dev_err(sdev->dev,
+ "%s: on %s: timeout on STREAM_SD_OFFSET read1\n",
+ __func__, stream_name ? stream_name : "unknown stream");
+ kfree(stream_name);
+ return ret;
+ }
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->frags = 0;
+
+ ret = hda_dsp_stream_setup_bdl(sdev, dmab, hstream);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: set up of BDL failed\n");
+ return ret;
+ }
+
+ /* program stream tag to set up stream descriptor for DMA */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_SD_CTL_STREAM_TAG_MASK,
+ hstream->stream_tag <<
+ SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT);
+
+ /* program cyclic buffer length */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_CBL,
+ hstream->bufsize);
+
+ /*
+ * Recommended hardware programming sequence for HDAudio DMA format
+ * on earlier platforms - this is not needed on newer platforms
+ *
+ * 1. Put DMA into coupled mode by clearing PPCTL.PROCEN bit
+ * for corresponding stream index before the time of writing
+ * format to SDxFMT register.
+ * 2. Write SDxFMT
+ * 3. Set PPCTL.PROCEN bit for corresponding stream index to
+ * enable decoupled mode
+ */
+
+ if (!sdev->dspless_mode_selected && (chip->quirks & SOF_INTEL_PROCEN_FMT_QUIRK))
+ /* couple host and link DMA, disable DSP features */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, 0);
+
+ /* program stream format */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_SD_FORMAT,
+ 0xffff, hstream->format_val);
+
+ if (!sdev->dspless_mode_selected && (chip->quirks & SOF_INTEL_PROCEN_FMT_QUIRK))
+ /* decouple host and link DMA, enable DSP features */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, mask);
+
+ /* program last valid index */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_LVI,
+ 0xffff, (hstream->frags - 1));
+
+ /* program BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL,
+ (u32)hstream->bdl.addr);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU,
+ upper_32_bits(hstream->bdl.addr));
+
+ /* enable position buffer, if needed */
+ if (bus->use_posbuf && bus->posbuf.addr &&
+ !(snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE)
+ & SOF_HDA_ADSP_DPLBASE_ENABLE)) {
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPUBASE,
+ upper_32_bits(bus->posbuf.addr));
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE,
+ (u32)bus->posbuf.addr |
+ SOF_HDA_ADSP_DPLBASE_ENABLE);
+ }
+
+ /* set interrupt enable bits */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ /* read FIFO size */
+ if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ hstream->fifo_size =
+ snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_SD_FIFOSIZE);
+ hstream->fifo_size &= 0xffff;
+ hstream->fifo_size += 1;
+ } else {
+ hstream->fifo_size = 0;
+ }
+
+ return ret;
+}
+
+int hda_dsp_stream_hw_free(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *hext_stream = container_of(hstream,
+ struct hdac_ext_stream,
+ hstream);
+ int ret;
+
+ ret = hda_dsp_stream_reset(sdev, hstream);
+ if (ret < 0)
+ return ret;
+
+ if (!sdev->dspless_mode_selected) {
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ u32 mask = BIT(hstream->index);
+
+ spin_lock_irq(&bus->reg_lock);
+ /* couple host and link DMA if link DMA channel is idle */
+ if (!hext_stream->link_locked)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
+ SOF_HDA_REG_PP_PPCTL, mask, 0);
+ spin_unlock_irq(&bus->reg_lock);
+ }
+
+ hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0);
+
+ hstream->substream = NULL;
+
+ return 0;
+}
+
+bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ bool ret = false;
+ u32 status;
+
+ /* The function can be called at irq thread, so use spin_lock_irq */
+ spin_lock_irq(&bus->reg_lock);
+
+ status = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS);
+
+ trace_sof_intel_hda_dsp_check_stream_irq(sdev, status);
+
+ /* if Register inaccessible, ignore it.*/
+ if (status != 0xffffffff)
+ ret = true;
+
+ spin_unlock_irq(&bus->reg_lock);
+
+ return ret;
+}
+
+static void
+hda_dsp_compr_bytes_transferred(struct hdac_stream *hstream, int direction)
+{
+ u64 buffer_size = hstream->bufsize;
+ u64 prev_pos, pos, num_bytes;
+
+ div64_u64_rem(hstream->curr_pos, buffer_size, &prev_pos);
+ pos = hda_dsp_stream_get_position(hstream, direction, false);
+
+ if (pos < prev_pos)
+ num_bytes = (buffer_size - prev_pos) + pos;
+ else
+ num_bytes = pos - prev_pos;
+
+ hstream->curr_pos += num_bytes;
+}
+
+static bool hda_dsp_stream_check(struct hdac_bus *bus, u32 status)
+{
+ struct sof_intel_hda_dev *sof_hda = bus_to_sof_hda(bus);
+ struct hdac_stream *s;
+ bool active = false;
+ u32 sd_status;
+
+ list_for_each_entry(s, &bus->stream_list, list) {
+ if (status & BIT(s->index) && s->opened) {
+ sd_status = readb(s->sd_addr + SOF_HDA_ADSP_REG_SD_STS);
+
+ trace_sof_intel_hda_dsp_stream_status(bus->dev, s, sd_status);
+
+ writeb(sd_status, s->sd_addr + SOF_HDA_ADSP_REG_SD_STS);
+
+ active = true;
+ if ((!s->substream && !s->cstream) ||
+ !s->running ||
+ (sd_status & SOF_HDA_CL_DMA_SD_INT_COMPLETE) == 0)
+ continue;
+
+ /* Inform ALSA only in case not do that with IPC */
+ if (s->substream && sof_hda->no_ipc_position) {
+ snd_sof_pcm_period_elapsed(s->substream);
+ } else if (s->cstream) {
+ hda_dsp_compr_bytes_transferred(s, s->cstream->direction);
+ snd_compr_fragment_elapsed(s->cstream);
+ }
+ }
+ }
+
+ return active;
+}
+
+irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ bool active;
+ u32 status;
+ int i;
+
+ /*
+ * Loop 10 times to handle missed interrupts caused by
+ * unsolicited responses from the codec
+ */
+ for (i = 0, active = true; i < 10 && active; i++) {
+ spin_lock_irq(&bus->reg_lock);
+
+ status = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS);
+
+ /* check streams */
+ active = hda_dsp_stream_check(bus, status);
+
+ /* check and clear RIRB interrupt */
+ if (status & AZX_INT_CTRL_EN) {
+ active |= hda_codec_check_rirb_status(sdev);
+ }
+ spin_unlock_irq(&bus->reg_lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int hda_dsp_stream_init(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_stream *hext_stream;
+ struct hdac_stream *hstream;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ struct sof_intel_hda_dev *sof_hda = bus_to_sof_hda(bus);
+ int sd_offset;
+ int i, num_playback, num_capture, num_total, ret;
+ u32 gcap;
+
+ gcap = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCAP);
+ dev_dbg(sdev->dev, "hda global caps = 0x%x\n", gcap);
+
+ /* get stream count from GCAP */
+ num_capture = (gcap >> 8) & 0x0f;
+ num_playback = (gcap >> 12) & 0x0f;
+ num_total = num_playback + num_capture;
+
+ dev_dbg(sdev->dev, "detected %d playback and %d capture streams\n",
+ num_playback, num_capture);
+
+ if (num_playback >= SOF_HDA_PLAYBACK_STREAMS) {
+ dev_err(sdev->dev, "error: too many playback streams %d\n",
+ num_playback);
+ return -EINVAL;
+ }
+
+ if (num_capture >= SOF_HDA_CAPTURE_STREAMS) {
+ dev_err(sdev->dev, "error: too many capture streams %d\n",
+ num_playback);
+ return -EINVAL;
+ }
+
+ /*
+ * mem alloc for the position buffer
+ * TODO: check position buffer update
+ */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ SOF_HDA_DPIB_ENTRY_SIZE * num_total,
+ &bus->posbuf);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: posbuffer dma alloc failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * mem alloc for the CORB/RIRB ringbuffers - this will be used only for
+ * HDAudio codecs
+ */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ PAGE_SIZE, &bus->rb);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: RB alloc failed\n");
+ return -ENOMEM;
+ }
+
+ /* create capture and playback streams */
+ for (i = 0; i < num_total; i++) {
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = devm_kzalloc(sdev->dev, sizeof(*hda_stream),
+ GFP_KERNEL);
+ if (!hda_stream)
+ return -ENOMEM;
+
+ hda_stream->sdev = sdev;
+
+ hext_stream = &hda_stream->hext_stream;
+
+ if (sdev->bar[HDA_DSP_PP_BAR]) {
+ hext_stream->pphc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPHC_BASE + SOF_HDA_PPHC_INTERVAL * i;
+
+ hext_stream->pplc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPLC_BASE + SOF_HDA_PPLC_MULTI * num_total +
+ SOF_HDA_PPLC_INTERVAL * i;
+ }
+
+ hstream = &hext_stream->hstream;
+
+ /* do we support SPIB */
+ if (sdev->bar[HDA_DSP_SPIB_BAR]) {
+ hstream->spib_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_SPIB;
+
+ hstream->fifo_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_MAXFIFO;
+ }
+
+ hstream->bus = bus;
+ hstream->sd_int_sta_mask = 1 << i;
+ hstream->index = i;
+ sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ hstream->sd_addr = sdev->bar[HDA_DSP_HDA_BAR] + sd_offset;
+ hstream->opened = false;
+ hstream->running = false;
+
+ if (i < num_capture) {
+ hstream->stream_tag = i + 1;
+ hstream->direction = SNDRV_PCM_STREAM_CAPTURE;
+ } else {
+ hstream->stream_tag = i - num_capture + 1;
+ hstream->direction = SNDRV_PCM_STREAM_PLAYBACK;
+ }
+
+ /* mem alloc for stream BDL */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ HDA_DSP_BDL_SIZE, &hstream->bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: stream bdl dma alloc failed\n");
+ return -ENOMEM;
+ }
+
+ hstream->posbuf = (__le32 *)(bus->posbuf.area +
+ (hstream->index) * 8);
+
+ list_add_tail(&hstream->list, &bus->stream_list);
+ }
+
+ /* store total stream count (playback + capture) from GCAP */
+ sof_hda->stream_max = num_total;
+
+ return 0;
+}
+
+void hda_dsp_stream_free(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *s, *_s;
+ struct hdac_ext_stream *hext_stream;
+ struct sof_intel_hda_stream *hda_stream;
+
+ /* free position buffer */
+ if (bus->posbuf.area)
+ snd_dma_free_pages(&bus->posbuf);
+
+ /* free CORB/RIRB buffer - only used for HDaudio codecs */
+ if (bus->rb.area)
+ snd_dma_free_pages(&bus->rb);
+
+ list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
+ /* TODO: decouple */
+
+ /* free bdl buffer */
+ if (s->bdl.area)
+ snd_dma_free_pages(&s->bdl);
+ list_del(&s->list);
+ hext_stream = stream_to_hdac_ext_stream(s);
+ hda_stream = container_of(hext_stream, struct sof_intel_hda_stream,
+ hext_stream);
+ devm_kfree(sdev->dev, hda_stream);
+ }
+}
+
+snd_pcm_uframes_t hda_dsp_stream_get_position(struct hdac_stream *hstream,
+ int direction, bool can_sleep)
+{
+ struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream);
+ struct sof_intel_hda_stream *hda_stream = hstream_to_sof_hda_stream(hext_stream);
+ struct snd_sof_dev *sdev = hda_stream->sdev;
+ snd_pcm_uframes_t pos;
+
+ switch (sof_hda_position_quirk) {
+ case SOF_HDA_POSITION_QUIRK_USE_SKYLAKE_LEGACY:
+ /*
+ * This legacy code, inherited from the Skylake driver,
+ * mixes DPIB registers and DPIB DDR updates and
+ * does not seem to follow any known hardware recommendations.
+ * It's not clear e.g. why there is a different flow
+ * for capture and playback, the only information that matters is
+ * what traffic class is used, and on all SOF-enabled platforms
+ * only VC0 is supported so the work-around was likely not necessary
+ * and quite possibly wrong.
+ */
+
+ /* DPIB/posbuf position mode:
+ * For Playback, Use DPIB register from HDA space which
+ * reflects the actual data transferred.
+ * For Capture, Use the position buffer for pointer, as DPIB
+ * is not accurate enough, its update may be completed
+ * earlier than the data written to DDR.
+ */
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ AZX_REG_VS_SDXDPIB_XBASE +
+ (AZX_REG_VS_SDXDPIB_XINTERVAL *
+ hstream->index));
+ } else {
+ /*
+ * For capture stream, we need more workaround to fix the
+ * position incorrect issue:
+ *
+ * 1. Wait at least 20us before reading position buffer after
+ * the interrupt generated(IOC), to make sure position update
+ * happens on frame boundary i.e. 20.833uSec for 48KHz.
+ * 2. Perform a dummy Read to DPIB register to flush DMA
+ * position value.
+ * 3. Read the DMA Position from posbuf. Now the readback
+ * value should be >= period boundary.
+ */
+ if (can_sleep)
+ usleep_range(20, 21);
+
+ snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ AZX_REG_VS_SDXDPIB_XBASE +
+ (AZX_REG_VS_SDXDPIB_XINTERVAL *
+ hstream->index));
+ pos = snd_hdac_stream_get_pos_posbuf(hstream);
+ }
+ break;
+ case SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS:
+ /*
+ * In case VC1 traffic is disabled this is the recommended option
+ */
+ pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ AZX_REG_VS_SDXDPIB_XBASE +
+ (AZX_REG_VS_SDXDPIB_XINTERVAL *
+ hstream->index));
+ break;
+ case SOF_HDA_POSITION_QUIRK_USE_DPIB_DDR_UPDATE:
+ /*
+ * This is the recommended option when VC1 is enabled.
+ * While this isn't needed for SOF platforms it's added for
+ * consistency and debug.
+ */
+ pos = snd_hdac_stream_get_pos_posbuf(hstream);
+ break;
+ default:
+ dev_err_once(sdev->dev, "hda_position_quirk value %d not supported\n",
+ sof_hda_position_quirk);
+ pos = 0;
+ break;
+ }
+
+ if (pos >= hstream->bufsize)
+ pos = 0;
+
+ return pos;
+}
diff --git a/sound/soc/sof/intel/hda-trace.c b/sound/soc/sof/intel/hda-trace.c
new file mode 100644
index 000000000..cbb9bd777
--- /dev/null
+++ b/sound/soc/sof/intel/hda-trace.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/hdaudio_ext.h>
+#include "../ops.h"
+#include "hda.h"
+
+static int hda_dsp_trace_prepare(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_ext_stream *hext_stream = hda->dtrace_stream;
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ int ret;
+
+ hstream->period_bytes = 0;/* initialize period_bytes */
+ hstream->bufsize = dmab->bytes;
+
+ ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
+
+ return ret;
+}
+
+int hda_dsp_trace_init(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
+ struct sof_ipc_dma_trace_params_ext *dtrace_params)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ int ret;
+
+ hda->dtrace_stream = hda_dsp_stream_get(sdev, SNDRV_PCM_STREAM_CAPTURE,
+ SOF_HDA_STREAM_DMI_L1_COMPATIBLE);
+
+ if (!hda->dtrace_stream) {
+ dev_err(sdev->dev,
+ "error: no available capture stream for DMA trace\n");
+ return -ENODEV;
+ }
+
+ dtrace_params->stream_tag = hda->dtrace_stream->hstream.stream_tag;
+
+ /*
+ * initialize capture stream, set BDL address and return corresponding
+ * stream tag which will be sent to the firmware by IPC message.
+ */
+ ret = hda_dsp_trace_prepare(sdev, dmab);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac trace init failed: %d\n", ret);
+ hda_dsp_stream_put(sdev, SNDRV_PCM_STREAM_CAPTURE,
+ dtrace_params->stream_tag);
+ hda->dtrace_stream = NULL;
+ dtrace_params->stream_tag = 0;
+ }
+
+ return ret;
+}
+
+int hda_dsp_trace_release(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_stream *hstream;
+
+ if (hda->dtrace_stream) {
+ hstream = &hda->dtrace_stream->hstream;
+ hda_dsp_stream_put(sdev,
+ SNDRV_PCM_STREAM_CAPTURE,
+ hstream->stream_tag);
+ hda->dtrace_stream = NULL;
+ return 0;
+ }
+
+ dev_dbg(sdev->dev, "DMA trace stream is not opened!\n");
+ return -ENODEV;
+}
+
+int hda_dsp_trace_trigger(struct snd_sof_dev *sdev, int cmd)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+
+ return hda_dsp_stream_trigger(sdev, hda->dtrace_stream, cmd);
+}
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
new file mode 100644
index 000000000..15e6779ef
--- /dev/null
+++ b/sound/soc/sof/intel/hda.c
@@ -0,0 +1,1768 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
+#include <sound/intel-dsp-config.h>
+#include <sound/intel-nhlt.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <sound/hda-mlink.h>
+#include "../sof-audio.h"
+#include "../sof-pci-dev.h"
+#include "../ops.h"
+#include "hda.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sof_intel.h>
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+#include <sound/soc-acpi-intel-match.h>
+#endif
+
+/* platform specific devices */
+#include "shim.h"
+
+#define EXCEPT_MAX_HDR_SIZE 0x400
+#define HDA_EXT_ROM_STATUS_SIZE 8
+
+static u32 hda_get_interface_mask(struct snd_sof_dev *sdev)
+{
+ const struct sof_intel_dsp_desc *chip;
+ u32 interface_mask[2] = { 0 };
+
+ chip = get_chip_info(sdev->pdata);
+ switch (chip->hw_ip_version) {
+ case SOF_INTEL_TANGIER:
+ case SOF_INTEL_BAYTRAIL:
+ case SOF_INTEL_BROADWELL:
+ interface_mask[0] = BIT(SOF_DAI_INTEL_SSP);
+ break;
+ case SOF_INTEL_CAVS_1_5:
+ case SOF_INTEL_CAVS_1_5_PLUS:
+ interface_mask[0] = BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) |
+ BIT(SOF_DAI_INTEL_HDA);
+ interface_mask[1] = BIT(SOF_DAI_INTEL_HDA);
+ break;
+ case SOF_INTEL_CAVS_1_8:
+ case SOF_INTEL_CAVS_2_0:
+ case SOF_INTEL_CAVS_2_5:
+ case SOF_INTEL_ACE_1_0:
+ interface_mask[0] = BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) |
+ BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH);
+ interface_mask[1] = BIT(SOF_DAI_INTEL_HDA);
+ break;
+ case SOF_INTEL_ACE_2_0:
+ interface_mask[0] = BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) |
+ BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH);
+ interface_mask[1] = interface_mask[0]; /* all interfaces accessible without DSP */
+ break;
+ default:
+ break;
+ }
+
+ return interface_mask[sdev->dspless_mode_selected];
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+
+/*
+ * The default for SoundWire clock stop quirks is to power gate the IP
+ * and do a Bus Reset, this will need to be modified when the DSP
+ * needs to remain in D0i3 so that the Master does not lose context
+ * and enumeration is not required on clock restart
+ */
+static int sdw_clock_stop_quirks = SDW_INTEL_CLK_STOP_BUS_RESET;
+module_param(sdw_clock_stop_quirks, int, 0444);
+MODULE_PARM_DESC(sdw_clock_stop_quirks, "SOF SoundWire clock stop quirks");
+
+static int sdw_params_stream(struct device *dev,
+ struct sdw_intel_stream_params_data *params_data)
+{
+ struct snd_soc_dai *d = params_data->dai;
+ struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(d, params_data->substream->stream);
+ struct snd_sof_dai_config_data data = { 0 };
+
+ data.dai_index = (params_data->link_id << 8) | d->id;
+ data.dai_data = params_data->alh_stream_id;
+
+ return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_PARAMS, &data);
+}
+
+struct sdw_intel_ops sdw_callback = {
+ .params_stream = sdw_params_stream,
+};
+
+static int sdw_ace2x_params_stream(struct device *dev,
+ struct sdw_intel_stream_params_data *params_data)
+{
+ return sdw_hda_dai_hw_params(params_data->substream,
+ params_data->hw_params,
+ params_data->dai,
+ params_data->link_id);
+}
+
+static int sdw_ace2x_free_stream(struct device *dev,
+ struct sdw_intel_stream_free_data *free_data)
+{
+ return sdw_hda_dai_hw_free(free_data->substream,
+ free_data->dai,
+ free_data->link_id);
+}
+
+static int sdw_ace2x_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
+{
+ return sdw_hda_dai_trigger(substream, cmd, dai);
+}
+
+static struct sdw_intel_ops sdw_ace2x_callback = {
+ .params_stream = sdw_ace2x_params_stream,
+ .free_stream = sdw_ace2x_free_stream,
+ .trigger = sdw_ace2x_trigger,
+};
+
+void hda_common_enable_sdw_irq(struct snd_sof_dev *sdev, bool enable)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ if (!hdev->sdw)
+ return;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC2,
+ HDA_DSP_REG_ADSPIC2_SNDW,
+ enable ? HDA_DSP_REG_ADSPIC2_SNDW : 0);
+}
+
+void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 interface_mask = hda_get_interface_mask(sdev);
+ const struct sof_intel_dsp_desc *chip;
+
+ if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
+ return;
+
+ chip = get_chip_info(sdev->pdata);
+ if (chip && chip->enable_sdw_irq)
+ chip->enable_sdw_irq(sdev, enable);
+}
+
+static int hda_sdw_acpi_scan(struct snd_sof_dev *sdev)
+{
+ u32 interface_mask = hda_get_interface_mask(sdev);
+ struct sof_intel_hda_dev *hdev;
+ acpi_handle handle;
+ int ret;
+
+ if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
+ return -EINVAL;
+
+ handle = ACPI_HANDLE(sdev->dev);
+
+ /* save ACPI info for the probe step */
+ hdev = sdev->pdata->hw_pdata;
+
+ ret = sdw_intel_acpi_scan(handle, &hdev->info);
+ if (ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hda_sdw_probe(struct snd_sof_dev *sdev)
+{
+ const struct sof_intel_dsp_desc *chip;
+ struct sof_intel_hda_dev *hdev;
+ struct sdw_intel_res res;
+ void *sdw;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ memset(&res, 0, sizeof(res));
+
+ chip = get_chip_info(sdev->pdata);
+ if (chip->hw_ip_version < SOF_INTEL_ACE_2_0) {
+ res.mmio_base = sdev->bar[HDA_DSP_BAR];
+ res.hw_ops = &sdw_intel_cnl_hw_ops;
+ res.shim_base = hdev->desc->sdw_shim_base;
+ res.alh_base = hdev->desc->sdw_alh_base;
+ res.ext = false;
+ res.ops = &sdw_callback;
+ } else {
+ /*
+ * retrieve eml_lock needed to protect shared registers
+ * in the HDaudio multi-link areas
+ */
+ res.eml_lock = hdac_bus_eml_get_mutex(sof_to_bus(sdev), true,
+ AZX_REG_ML_LEPTR_ID_SDW);
+ if (!res.eml_lock)
+ return -ENODEV;
+
+ res.mmio_base = sdev->bar[HDA_DSP_HDA_BAR];
+ /*
+ * the SHIM and SoundWire register offsets are link-specific
+ * and will be determined when adding auxiliary devices
+ */
+ res.hw_ops = &sdw_intel_lnl_hw_ops;
+ res.ext = true;
+ res.ops = &sdw_ace2x_callback;
+
+ }
+ res.irq = sdev->ipc_irq;
+ res.handle = hdev->info.handle;
+ res.parent = sdev->dev;
+
+ res.dev = sdev->dev;
+ res.clock_stop_quirks = sdw_clock_stop_quirks;
+ res.hbus = sof_to_bus(sdev);
+
+ /*
+ * ops and arg fields are not populated for now,
+ * they will be needed when the DAI callbacks are
+ * provided
+ */
+
+ /* we could filter links here if needed, e.g for quirks */
+ res.count = hdev->info.count;
+ res.link_mask = hdev->info.link_mask;
+
+ sdw = sdw_intel_probe(&res);
+ if (!sdw) {
+ dev_err(sdev->dev, "error: SoundWire probe failed\n");
+ return -EINVAL;
+ }
+
+ /* save context */
+ hdev->sdw = sdw;
+
+ return 0;
+}
+
+int hda_sdw_check_lcount_common(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ struct sdw_intel_ctx *ctx;
+ u32 caps;
+
+ hdev = sdev->pdata->hw_pdata;
+ ctx = hdev->sdw;
+
+ caps = snd_sof_dsp_read(sdev, HDA_DSP_BAR, ctx->shim_base + SDW_SHIM_LCAP);
+ caps &= SDW_SHIM_LCAP_LCOUNT_MASK;
+
+ /* Check HW supported vs property value */
+ if (caps < ctx->count) {
+ dev_err(sdev->dev,
+ "%s: BIOS master count %d is larger than hardware capabilities %d\n",
+ __func__, ctx->count, caps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hda_sdw_check_lcount_ext(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ struct sdw_intel_ctx *ctx;
+ struct hdac_bus *bus;
+ u32 slcount;
+
+ bus = sof_to_bus(sdev);
+
+ hdev = sdev->pdata->hw_pdata;
+ ctx = hdev->sdw;
+
+ slcount = hdac_bus_eml_get_count(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+
+ /* Check HW supported vs property value */
+ if (slcount < ctx->count) {
+ dev_err(sdev->dev,
+ "%s: BIOS master count %d is larger than hardware capabilities %d\n",
+ __func__, ctx->count, slcount);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hda_sdw_check_lcount(struct snd_sof_dev *sdev)
+{
+ const struct sof_intel_dsp_desc *chip;
+
+ chip = get_chip_info(sdev->pdata);
+ if (chip && chip->read_sdw_lcount)
+ return chip->read_sdw_lcount(sdev);
+
+ return 0;
+}
+
+int hda_sdw_startup(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ int ret;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ if (!hdev->sdw)
+ return 0;
+
+ if (pdata->machine && !pdata->machine->mach_params.link_mask)
+ return 0;
+
+ ret = hda_sdw_check_lcount(sdev);
+ if (ret < 0)
+ return ret;
+
+ return sdw_intel_startup(hdev->sdw);
+}
+
+static int hda_sdw_exit(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ hda_sdw_int_enable(sdev, false);
+
+ if (hdev->sdw)
+ sdw_intel_exit(hdev->sdw);
+ hdev->sdw = NULL;
+
+ return 0;
+}
+
+bool hda_common_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ bool ret = false;
+ u32 irq_status;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ if (!hdev->sdw)
+ return ret;
+
+ /* store status */
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS2);
+
+ /* invalid message ? */
+ if (irq_status == 0xffffffff)
+ goto out;
+
+ /* SDW message ? */
+ if (irq_status & HDA_DSP_REG_ADSPIS2_SNDW)
+ ret = true;
+
+out:
+ return ret;
+}
+
+static bool hda_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ u32 interface_mask = hda_get_interface_mask(sdev);
+ const struct sof_intel_dsp_desc *chip;
+
+ if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
+ return false;
+
+ chip = get_chip_info(sdev->pdata);
+ if (chip && chip->check_sdw_irq)
+ return chip->check_sdw_irq(sdev);
+
+ return false;
+}
+
+static irqreturn_t hda_dsp_sdw_thread(int irq, void *context)
+{
+ return sdw_intel_thread(irq, context);
+}
+
+bool hda_sdw_check_wakeen_irq_common(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+ if (hdev->sdw &&
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ hdev->desc->sdw_shim_base + SDW_SHIM_WAKESTS))
+ return true;
+
+ return false;
+}
+
+static bool hda_sdw_check_wakeen_irq(struct snd_sof_dev *sdev)
+{
+ u32 interface_mask = hda_get_interface_mask(sdev);
+ const struct sof_intel_dsp_desc *chip;
+
+ if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
+ return false;
+
+ chip = get_chip_info(sdev->pdata);
+ if (chip && chip->check_sdw_wakeen_irq)
+ return chip->check_sdw_wakeen_irq(sdev);
+
+ return false;
+}
+
+void hda_sdw_process_wakeen(struct snd_sof_dev *sdev)
+{
+ u32 interface_mask = hda_get_interface_mask(sdev);
+ struct sof_intel_hda_dev *hdev;
+
+ if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
+ return;
+
+ hdev = sdev->pdata->hw_pdata;
+ if (!hdev->sdw)
+ return;
+
+ sdw_intel_process_wakeen_event(hdev->sdw);
+}
+
+#else /* IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE) */
+static inline int hda_sdw_acpi_scan(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline int hda_sdw_probe(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline int hda_sdw_exit(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline bool hda_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ return false;
+}
+
+static inline irqreturn_t hda_dsp_sdw_thread(int irq, void *context)
+{
+ return IRQ_HANDLED;
+}
+
+static inline bool hda_sdw_check_wakeen_irq(struct snd_sof_dev *sdev)
+{
+ return false;
+}
+
+#endif /* IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE) */
+
+/*
+ * Debug
+ */
+
+struct hda_dsp_msg_code {
+ u32 code;
+ const char *text;
+};
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
+static bool hda_use_msi = true;
+module_param_named(use_msi, hda_use_msi, bool, 0444);
+MODULE_PARM_DESC(use_msi, "SOF HDA use PCI MSI mode");
+#else
+#define hda_use_msi (1)
+#endif
+
+int sof_hda_position_quirk = SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS;
+module_param_named(position_quirk, sof_hda_position_quirk, int, 0444);
+MODULE_PARM_DESC(position_quirk, "SOF HDaudio position quirk");
+
+static char *hda_model;
+module_param(hda_model, charp, 0444);
+MODULE_PARM_DESC(hda_model, "Use the given HDA board model.");
+
+static int dmic_num_override = -1;
+module_param_named(dmic_num, dmic_num_override, int, 0444);
+MODULE_PARM_DESC(dmic_num, "SOF HDA DMIC number");
+
+static int mclk_id_override = -1;
+module_param_named(mclk_id, mclk_id_override, int, 0444);
+MODULE_PARM_DESC(mclk_id, "SOF SSP mclk_id");
+
+static const struct hda_dsp_msg_code hda_dsp_rom_fw_error_texts[] = {
+ {HDA_DSP_ROM_CSE_ERROR, "error: cse error"},
+ {HDA_DSP_ROM_CSE_WRONG_RESPONSE, "error: cse wrong response"},
+ {HDA_DSP_ROM_IMR_TO_SMALL, "error: IMR too small"},
+ {HDA_DSP_ROM_BASE_FW_NOT_FOUND, "error: base fw not found"},
+ {HDA_DSP_ROM_CSE_VALIDATION_FAILED, "error: signature verification failed"},
+ {HDA_DSP_ROM_IPC_FATAL_ERROR, "error: ipc fatal error"},
+ {HDA_DSP_ROM_L2_CACHE_ERROR, "error: L2 cache error"},
+ {HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL, "error: load offset too small"},
+ {HDA_DSP_ROM_API_PTR_INVALID, "error: API ptr invalid"},
+ {HDA_DSP_ROM_BASEFW_INCOMPAT, "error: base fw incompatible"},
+ {HDA_DSP_ROM_UNHANDLED_INTERRUPT, "error: unhandled interrupt"},
+ {HDA_DSP_ROM_MEMORY_HOLE_ECC, "error: ECC memory hole"},
+ {HDA_DSP_ROM_KERNEL_EXCEPTION, "error: kernel exception"},
+ {HDA_DSP_ROM_USER_EXCEPTION, "error: user exception"},
+ {HDA_DSP_ROM_UNEXPECTED_RESET, "error: unexpected reset"},
+ {HDA_DSP_ROM_NULL_FW_ENTRY, "error: null FW entry point"},
+};
+
+#define FSR_ROM_STATE_ENTRY(state) {FSR_STATE_ROM_##state, #state}
+static const struct hda_dsp_msg_code fsr_rom_state_names[] = {
+ FSR_ROM_STATE_ENTRY(INIT),
+ FSR_ROM_STATE_ENTRY(INIT_DONE),
+ FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED),
+ FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED),
+ FSR_ROM_STATE_ENTRY(FW_FW_LOADED),
+ FSR_ROM_STATE_ENTRY(FW_ENTERED),
+ FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK),
+ FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET),
+ FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT),
+ FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT_DONE),
+ /* CSE states */
+ FSR_ROM_STATE_ENTRY(CSE_IMR_REQUEST),
+ FSR_ROM_STATE_ENTRY(CSE_IMR_GRANTED),
+ FSR_ROM_STATE_ENTRY(CSE_VALIDATE_IMAGE_REQUEST),
+ FSR_ROM_STATE_ENTRY(CSE_IMAGE_VALIDATED),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_IFACE_INIT),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_RESET_PHASE_1),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL_ENTRY),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_DOWN),
+};
+
+#define FSR_BRINGUP_STATE_ENTRY(state) {FSR_STATE_BRINGUP_##state, #state}
+static const struct hda_dsp_msg_code fsr_bringup_state_names[] = {
+ FSR_BRINGUP_STATE_ENTRY(INIT),
+ FSR_BRINGUP_STATE_ENTRY(INIT_DONE),
+ FSR_BRINGUP_STATE_ENTRY(HPSRAM_LOAD),
+ FSR_BRINGUP_STATE_ENTRY(UNPACK_START),
+ FSR_BRINGUP_STATE_ENTRY(IMR_RESTORE),
+ FSR_BRINGUP_STATE_ENTRY(FW_ENTERED),
+};
+
+#define FSR_WAIT_STATE_ENTRY(state) {FSR_WAIT_FOR_##state, #state}
+static const struct hda_dsp_msg_code fsr_wait_state_names[] = {
+ FSR_WAIT_STATE_ENTRY(IPC_BUSY),
+ FSR_WAIT_STATE_ENTRY(IPC_DONE),
+ FSR_WAIT_STATE_ENTRY(CACHE_INVALIDATION),
+ FSR_WAIT_STATE_ENTRY(LP_SRAM_OFF),
+ FSR_WAIT_STATE_ENTRY(DMA_BUFFER_FULL),
+ FSR_WAIT_STATE_ENTRY(CSE_CSR),
+};
+
+#define FSR_MODULE_NAME_ENTRY(mod) [FSR_MOD_##mod] = #mod
+static const char * const fsr_module_names[] = {
+ FSR_MODULE_NAME_ENTRY(ROM),
+ FSR_MODULE_NAME_ENTRY(ROM_BYP),
+ FSR_MODULE_NAME_ENTRY(BASE_FW),
+ FSR_MODULE_NAME_ENTRY(LP_BOOT),
+ FSR_MODULE_NAME_ENTRY(BRNGUP),
+ FSR_MODULE_NAME_ENTRY(ROM_EXT),
+};
+
+static const char *
+hda_dsp_get_state_text(u32 code, const struct hda_dsp_msg_code *msg_code,
+ size_t array_size)
+{
+ int i;
+
+ for (i = 0; i < array_size; i++) {
+ if (code == msg_code[i].code)
+ return msg_code[i].text;
+ }
+
+ return NULL;
+}
+
+static void hda_dsp_get_state(struct snd_sof_dev *sdev, const char *level)
+{
+ const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
+ const char *state_text, *error_text, *module_text;
+ u32 fsr, state, wait_state, module, error_code;
+
+ fsr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg);
+ state = FSR_TO_STATE_CODE(fsr);
+ wait_state = FSR_TO_WAIT_STATE_CODE(fsr);
+ module = FSR_TO_MODULE_CODE(fsr);
+
+ if (module > FSR_MOD_ROM_EXT)
+ module_text = "unknown";
+ else
+ module_text = fsr_module_names[module];
+
+ if (module == FSR_MOD_BRNGUP)
+ state_text = hda_dsp_get_state_text(state, fsr_bringup_state_names,
+ ARRAY_SIZE(fsr_bringup_state_names));
+ else
+ state_text = hda_dsp_get_state_text(state, fsr_rom_state_names,
+ ARRAY_SIZE(fsr_rom_state_names));
+
+ /* not for us, must be generic sof message */
+ if (!state_text) {
+ dev_printk(level, sdev->dev, "%#010x: unknown ROM status value\n", fsr);
+ return;
+ }
+
+ if (wait_state) {
+ const char *wait_state_text;
+
+ wait_state_text = hda_dsp_get_state_text(wait_state, fsr_wait_state_names,
+ ARRAY_SIZE(fsr_wait_state_names));
+ if (!wait_state_text)
+ wait_state_text = "unknown";
+
+ dev_printk(level, sdev->dev,
+ "%#010x: module: %s, state: %s, waiting for: %s, %s\n",
+ fsr, module_text, state_text, wait_state_text,
+ fsr & FSR_HALTED ? "not running" : "running");
+ } else {
+ dev_printk(level, sdev->dev, "%#010x: module: %s, state: %s, %s\n",
+ fsr, module_text, state_text,
+ fsr & FSR_HALTED ? "not running" : "running");
+ }
+
+ error_code = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + 4);
+ if (!error_code)
+ return;
+
+ error_text = hda_dsp_get_state_text(error_code, hda_dsp_rom_fw_error_texts,
+ ARRAY_SIZE(hda_dsp_rom_fw_error_texts));
+ if (!error_text)
+ error_text = "unknown";
+
+ if (state == FSR_STATE_FW_ENTERED)
+ dev_printk(level, sdev->dev, "status code: %#x (%s)\n", error_code,
+ error_text);
+ else
+ dev_printk(level, sdev->dev, "error code: %#x (%s)\n", error_code,
+ error_text);
+}
+
+static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read registers */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_block_read(sdev, sdev->mmio_bar, offset,
+ panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_block_read(sdev, sdev->mmio_bar, offset, stack,
+ stack_words * sizeof(u32));
+}
+
+/* dump the first 8 dwords representing the extended ROM status */
+static void hda_dsp_dump_ext_rom_status(struct snd_sof_dev *sdev, const char *level,
+ u32 flags)
+{
+ const struct sof_intel_dsp_desc *chip;
+ char msg[128];
+ int len = 0;
+ u32 value;
+ int i;
+
+ chip = get_chip_info(sdev->pdata);
+ for (i = 0; i < HDA_EXT_ROM_STATUS_SIZE; i++) {
+ value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + i * 0x4);
+ len += scnprintf(msg + len, sizeof(msg) - len, " 0x%x", value);
+ }
+
+ dev_printk(level, sdev->dev, "extended rom status: %s", msg);
+
+}
+
+void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[HDA_DSP_STACK_DUMP_SIZE];
+
+ /* print ROM/FW status */
+ hda_dsp_get_state(sdev, level);
+
+ /* The firmware register dump only available with IPC3 */
+ if (flags & SOF_DBG_DUMP_REGS && sdev->pdata->ipc_type == SOF_IPC) {
+ u32 status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_STATUS);
+ u32 panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP);
+
+ hda_dsp_get_registers(sdev, &xoops, &panic_info, stack,
+ HDA_DSP_STACK_DUMP_SIZE);
+ sof_print_oops_and_stack(sdev, level, status, panic, &xoops,
+ &panic_info, stack, HDA_DSP_STACK_DUMP_SIZE);
+ } else {
+ hda_dsp_dump_ext_rom_status(sdev, level, flags);
+ }
+}
+
+static bool hda_check_ipc_irq(struct snd_sof_dev *sdev)
+{
+ const struct sof_intel_dsp_desc *chip;
+
+ chip = get_chip_info(sdev->pdata);
+ if (chip && chip->check_ipc_irq)
+ return chip->check_ipc_irq(sdev);
+
+ return false;
+}
+
+void hda_ipc_irq_dump(struct snd_sof_dev *sdev)
+{
+ u32 adspis;
+ u32 intsts;
+ u32 intctl;
+ u32 ppsts;
+ u8 rirbsts;
+
+ /* read key IRQ stats and config registers */
+ adspis = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS);
+ intsts = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS);
+ intctl = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL);
+ ppsts = snd_sof_dsp_read(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPSTS);
+ rirbsts = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, AZX_REG_RIRBSTS);
+
+ dev_err(sdev->dev, "hda irq intsts 0x%8.8x intlctl 0x%8.8x rirb %2.2x\n",
+ intsts, intctl, rirbsts);
+ dev_err(sdev->dev, "dsp irq ppsts 0x%8.8x adspis 0x%8.8x\n", ppsts, adspis);
+}
+
+void hda_ipc_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcie;
+ u32 hipct;
+ u32 hipcctl;
+
+ hda_ipc_irq_dump(sdev);
+
+ /* read IPC status */
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
+ hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev, "host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
+ hipcie, hipct, hipcctl);
+}
+
+void hda_ipc4_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipci, hipcie, hipct, hipcte, hipcctl;
+
+ hda_ipc_irq_dump(sdev);
+
+ hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI);
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
+ hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
+ hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCTE);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev, "Host IPC initiator: %#x|%#x, target: %#x|%#x, ctl: %#x\n",
+ hipci, hipcie, hipct, hipcte, hipcctl);
+}
+
+bool hda_ipc4_tx_is_busy(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ u32 val;
+
+ val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->ipc_req);
+
+ return !!(val & chip->ipc_req_mask);
+}
+
+static int hda_init(struct snd_sof_dev *sdev)
+{
+ struct hda_bus *hbus;
+ struct hdac_bus *bus;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ int ret;
+
+ hbus = sof_to_hbus(sdev);
+ bus = sof_to_bus(sdev);
+
+ /* HDA bus init */
+ sof_hda_bus_init(sdev, &pci->dev);
+
+ if (sof_hda_position_quirk == SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS)
+ bus->use_posbuf = 0;
+ else
+ bus->use_posbuf = 1;
+ bus->bdl_pos_adj = 0;
+ bus->sync_write = 1;
+
+ mutex_init(&hbus->prepare_mutex);
+ hbus->pci = pci;
+ hbus->mixer_assigned = -1;
+ hbus->modelname = hda_model;
+
+ /* initialise hdac bus */
+ bus->addr = pci_resource_start(pci, 0);
+ bus->remap_addr = pci_ioremap_bar(pci, 0);
+ if (!bus->remap_addr) {
+ dev_err(bus->dev, "error: ioremap error\n");
+ return -ENXIO;
+ }
+
+ /* HDA base */
+ sdev->bar[HDA_DSP_HDA_BAR] = bus->remap_addr;
+
+ /* init i915 and HDMI codecs */
+ ret = hda_codec_i915_init(sdev);
+ if (ret < 0)
+ dev_warn(sdev->dev, "init of i915 and HDMI codec failed\n");
+
+ /* get controller capabilities */
+ ret = hda_dsp_ctrl_get_caps(sdev);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: get caps error\n");
+
+ return ret;
+}
+
+static int check_dmic_num(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct nhlt_acpi_table *nhlt;
+ int dmic_num = 0;
+
+ nhlt = hdev->nhlt;
+ if (nhlt)
+ dmic_num = intel_nhlt_get_dmic_geo(sdev->dev, nhlt);
+
+ /* allow for module parameter override */
+ if (dmic_num_override != -1) {
+ dev_dbg(sdev->dev,
+ "overriding DMICs detected in NHLT tables %d by kernel param %d\n",
+ dmic_num, dmic_num_override);
+ dmic_num = dmic_num_override;
+ }
+
+ if (dmic_num < 0 || dmic_num > 4) {
+ dev_dbg(sdev->dev, "invalid dmic_number %d\n", dmic_num);
+ dmic_num = 0;
+ }
+
+ return dmic_num;
+}
+
+static int check_nhlt_ssp_mask(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct nhlt_acpi_table *nhlt;
+ int ssp_mask = 0;
+
+ nhlt = hdev->nhlt;
+ if (!nhlt)
+ return ssp_mask;
+
+ if (intel_nhlt_has_endpoint_type(nhlt, NHLT_LINK_SSP)) {
+ ssp_mask = intel_nhlt_ssp_endpoint_mask(nhlt, NHLT_DEVICE_I2S);
+ if (ssp_mask)
+ dev_info(sdev->dev, "NHLT_DEVICE_I2S detected, ssp_mask %#x\n", ssp_mask);
+ }
+
+ return ssp_mask;
+}
+
+static int check_nhlt_ssp_mclk_mask(struct snd_sof_dev *sdev, int ssp_num)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct nhlt_acpi_table *nhlt;
+
+ nhlt = hdev->nhlt;
+ if (!nhlt)
+ return 0;
+
+ return intel_nhlt_ssp_mclk_mask(nhlt, ssp_num);
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC) || IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+
+static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
+ const char *sof_tplg_filename,
+ const char *idisp_str,
+ const char *dmic_str)
+{
+ const char *tplg_filename = NULL;
+ char *filename, *tmp;
+ const char *split_ext;
+
+ filename = kstrdup(sof_tplg_filename, GFP_KERNEL);
+ if (!filename)
+ return NULL;
+
+ /* this assumes a .tplg extension */
+ tmp = filename;
+ split_ext = strsep(&tmp, ".");
+ if (split_ext)
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s%s%s.tplg",
+ split_ext, idisp_str, dmic_str);
+ kfree(filename);
+
+ return tplg_filename;
+}
+
+static int dmic_detect_topology_fixup(struct snd_sof_dev *sdev,
+ const char **tplg_filename,
+ const char *idisp_str,
+ int *dmic_found,
+ bool tplg_fixup)
+{
+ const char *dmic_str;
+ int dmic_num;
+
+ /* first check for DMICs (using NHLT or module parameter) */
+ dmic_num = check_dmic_num(sdev);
+
+ switch (dmic_num) {
+ case 1:
+ dmic_str = "-1ch";
+ break;
+ case 2:
+ dmic_str = "-2ch";
+ break;
+ case 3:
+ dmic_str = "-3ch";
+ break;
+ case 4:
+ dmic_str = "-4ch";
+ break;
+ default:
+ dmic_num = 0;
+ dmic_str = "";
+ break;
+ }
+
+ if (tplg_fixup) {
+ const char *default_tplg_filename = *tplg_filename;
+ const char *fixed_tplg_filename;
+
+ fixed_tplg_filename = fixup_tplg_name(sdev, default_tplg_filename,
+ idisp_str, dmic_str);
+ if (!fixed_tplg_filename)
+ return -ENOMEM;
+ *tplg_filename = fixed_tplg_filename;
+ }
+
+ dev_info(sdev->dev, "DMICs detected in NHLT tables: %d\n", dmic_num);
+ *dmic_found = dmic_num;
+
+ return 0;
+}
+#endif
+
+static int hda_init_caps(struct snd_sof_dev *sdev)
+{
+ u32 interface_mask = hda_get_interface_mask(sdev);
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ struct sof_intel_hda_dev *hdev = pdata->hw_pdata;
+ u32 link_mask;
+ int ret = 0;
+
+ /* check if dsp is there */
+ if (bus->ppcap)
+ dev_dbg(sdev->dev, "PP capability, will probe DSP later.\n");
+
+ /* Init HDA controller after i915 init */
+ ret = hda_dsp_ctrl_init_chip(sdev);
+ if (ret < 0) {
+ dev_err(bus->dev, "error: init chip failed with ret: %d\n",
+ ret);
+ return ret;
+ }
+
+ hda_bus_ml_init(bus);
+
+ /* Skip SoundWire if it is not supported */
+ if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
+ goto skip_soundwire;
+
+ /* scan SoundWire capabilities exposed by DSDT */
+ ret = hda_sdw_acpi_scan(sdev);
+ if (ret < 0) {
+ dev_dbg(sdev->dev, "skipping SoundWire, not detected with ACPI scan\n");
+ goto skip_soundwire;
+ }
+
+ link_mask = hdev->info.link_mask;
+ if (!link_mask) {
+ dev_dbg(sdev->dev, "skipping SoundWire, no links enabled\n");
+ goto skip_soundwire;
+ }
+
+ /*
+ * probe/allocate SoundWire resources.
+ * The hardware configuration takes place in hda_sdw_startup
+ * after power rails are enabled.
+ * It's entirely possible to have a mix of I2S/DMIC/SoundWire
+ * devices, so we allocate the resources in all cases.
+ */
+ ret = hda_sdw_probe(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: SoundWire probe error\n");
+ return ret;
+ }
+
+skip_soundwire:
+
+ /* create codec instances */
+ hda_codec_probe_bus(sdev);
+
+ if (!HDA_IDISP_CODEC(bus->codec_mask))
+ hda_codec_i915_display_power(sdev, false);
+
+ hda_bus_ml_put_all(bus);
+
+ return 0;
+}
+
+static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+
+ /*
+ * Get global interrupt status. It includes all hardware interrupt
+ * sources in the Intel HD Audio controller.
+ */
+ if (snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS) &
+ SOF_HDA_INTSTS_GIS) {
+
+ /* disable GIE interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_INTCTL,
+ SOF_HDA_INT_GLOBAL_EN,
+ 0);
+
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ /* deal with streams and controller first */
+ if (hda_dsp_check_stream_irq(sdev)) {
+ trace_sof_intel_hda_irq(sdev, "stream");
+ hda_dsp_stream_threaded_handler(irq, sdev);
+ }
+
+ if (hda_check_ipc_irq(sdev)) {
+ trace_sof_intel_hda_irq(sdev, "ipc");
+ sof_ops(sdev)->irq_thread(irq, sdev);
+ }
+
+ if (hda_dsp_check_sdw_irq(sdev)) {
+ trace_sof_intel_hda_irq(sdev, "sdw");
+ hda_dsp_sdw_thread(irq, hdev->sdw);
+ }
+
+ if (hda_sdw_check_wakeen_irq(sdev)) {
+ trace_sof_intel_hda_irq(sdev, "wakeen");
+ hda_sdw_process_wakeen(sdev);
+ }
+
+ hda_codec_check_for_state_change(sdev);
+
+ /* enable GIE interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_INTCTL,
+ SOF_HDA_INT_GLOBAL_EN,
+ SOF_HDA_INT_GLOBAL_EN);
+
+ return IRQ_HANDLED;
+}
+
+int hda_dsp_probe(struct snd_sof_dev *sdev)
+{
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ struct sof_intel_hda_dev *hdev;
+ struct hdac_bus *bus;
+ const struct sof_intel_dsp_desc *chip;
+ int ret = 0;
+
+ if (!sdev->dspless_mode_selected) {
+ /*
+ * detect DSP by checking class/subclass/prog-id information
+ * class=04 subclass 03 prog-if 00: no DSP, legacy driver is required
+ * class=04 subclass 01 prog-if 00: DSP is present
+ * (and may be required e.g. for DMIC or SSP support)
+ * class=04 subclass 03 prog-if 80: either of DSP or legacy mode works
+ */
+ if (pci->class == 0x040300) {
+ dev_err(sdev->dev, "the DSP is not enabled on this platform, aborting probe\n");
+ return -ENODEV;
+ } else if (pci->class != 0x040100 && pci->class != 0x040380) {
+ dev_err(sdev->dev, "unknown PCI class/subclass/prog-if 0x%06x found, aborting probe\n",
+ pci->class);
+ return -ENODEV;
+ }
+ dev_info(sdev->dev, "DSP detected with PCI class/subclass/prog-if 0x%06x\n",
+ pci->class);
+ }
+
+ chip = get_chip_info(sdev->pdata);
+ if (!chip) {
+ dev_err(sdev->dev, "error: no such device supported, chip id:%x\n",
+ pci->device);
+ ret = -EIO;
+ goto err;
+ }
+
+ sdev->num_cores = chip->cores_num;
+
+ hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+ sdev->pdata->hw_pdata = hdev;
+ hdev->desc = chip;
+
+ hdev->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec",
+ PLATFORM_DEVID_NONE,
+ NULL, 0);
+ if (IS_ERR(hdev->dmic_dev)) {
+ dev_err(sdev->dev, "error: failed to create DMIC device\n");
+ return PTR_ERR(hdev->dmic_dev);
+ }
+
+ /*
+ * use position update IPC if either it is forced
+ * or we don't have other choice
+ */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_FORCE_IPC_POSITION)
+ hdev->no_ipc_position = 0;
+#else
+ hdev->no_ipc_position = sof_ops(sdev)->pcm_pointer ? 1 : 0;
+#endif
+
+ if (sdev->dspless_mode_selected)
+ hdev->no_ipc_position = 1;
+
+ /* set up HDA base */
+ bus = sof_to_bus(sdev);
+ ret = hda_init(sdev);
+ if (ret < 0)
+ goto hdac_bus_unmap;
+
+ if (sdev->dspless_mode_selected)
+ goto skip_dsp_setup;
+
+ /* DSP base */
+ sdev->bar[HDA_DSP_BAR] = pci_ioremap_bar(pci, HDA_DSP_BAR);
+ if (!sdev->bar[HDA_DSP_BAR]) {
+ dev_err(sdev->dev, "error: ioremap error\n");
+ ret = -ENXIO;
+ goto hdac_bus_unmap;
+ }
+
+ sdev->mmio_bar = HDA_DSP_BAR;
+ sdev->mailbox_bar = HDA_DSP_BAR;
+skip_dsp_setup:
+
+ /* allow 64bit DMA address if supported by H/W */
+ if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(64))) {
+ dev_dbg(sdev->dev, "DMA mask is 32 bit\n");
+ dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32));
+ }
+ dma_set_max_seg_size(&pci->dev, UINT_MAX);
+
+ /* init streams */
+ ret = hda_dsp_stream_init(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to init streams\n");
+ /*
+ * not all errors are due to memory issues, but trying
+ * to free everything does not harm
+ */
+ goto free_streams;
+ }
+
+ /*
+ * register our IRQ
+ * let's try to enable msi firstly
+ * if it fails, use legacy interrupt mode
+ * TODO: support msi multiple vectors
+ */
+ if (hda_use_msi && pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI) > 0) {
+ dev_info(sdev->dev, "use msi interrupt mode\n");
+ sdev->ipc_irq = pci_irq_vector(pci, 0);
+ /* initialised to "false" by kzalloc() */
+ sdev->msi_enabled = true;
+ }
+
+ if (!sdev->msi_enabled) {
+ dev_info(sdev->dev, "use legacy interrupt mode\n");
+ /*
+ * in IO-APIC mode, hda->irq and ipc_irq are using the same
+ * irq number of pci->irq
+ */
+ sdev->ipc_irq = pci->irq;
+ }
+
+ dev_dbg(sdev->dev, "using IPC IRQ %d\n", sdev->ipc_irq);
+ ret = request_threaded_irq(sdev->ipc_irq, hda_dsp_interrupt_handler,
+ hda_dsp_interrupt_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IPC IRQ %d\n",
+ sdev->ipc_irq);
+ goto free_irq_vector;
+ }
+
+ pci_set_master(pci);
+ synchronize_irq(pci->irq);
+
+ /*
+ * clear TCSEL to clear playback on some HD Audio
+ * codecs. PCI TCSEL is defined in the Intel manuals.
+ */
+ snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
+
+ /* init HDA capabilities */
+ ret = hda_init_caps(sdev);
+ if (ret < 0)
+ goto free_ipc_irq;
+
+ if (!sdev->dspless_mode_selected) {
+ /* enable ppcap interrupt */
+ hda_dsp_ctrl_ppcap_enable(sdev, true);
+ hda_dsp_ctrl_ppcap_int_enable(sdev, true);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = HDA_DSP_MBOX_UPLINK_OFFSET;
+
+ INIT_DELAYED_WORK(&hdev->d0i3_work, hda_dsp_d0i3_work);
+ }
+
+ init_waitqueue_head(&hdev->waitq);
+
+ hdev->nhlt = intel_nhlt_init(sdev->dev);
+
+ return 0;
+
+free_ipc_irq:
+ free_irq(sdev->ipc_irq, sdev);
+free_irq_vector:
+ if (sdev->msi_enabled)
+ pci_free_irq_vectors(pci);
+free_streams:
+ hda_dsp_stream_free(sdev);
+/* dsp_unmap: not currently used */
+ if (!sdev->dspless_mode_selected)
+ iounmap(sdev->bar[HDA_DSP_BAR]);
+hdac_bus_unmap:
+ platform_device_unregister(hdev->dmic_dev);
+ iounmap(bus->remap_addr);
+ hda_codec_i915_exit(sdev);
+err:
+ return ret;
+}
+
+int hda_dsp_remove(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ struct nhlt_acpi_table *nhlt = hda->nhlt;
+
+ if (nhlt)
+ intel_nhlt_free(nhlt);
+
+ if (!sdev->dspless_mode_selected)
+ /* cancel any attempt for DSP D0I3 */
+ cancel_delayed_work_sync(&hda->d0i3_work);
+
+ hda_codec_device_remove(sdev);
+
+ hda_sdw_exit(sdev);
+
+ if (!IS_ERR_OR_NULL(hda->dmic_dev))
+ platform_device_unregister(hda->dmic_dev);
+
+ if (!sdev->dspless_mode_selected) {
+ /* disable DSP IRQ */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_PIE, 0);
+ }
+
+ /* disable CIE and GIE interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN, 0);
+
+ if (sdev->dspless_mode_selected)
+ goto skip_disable_dsp;
+
+ /* no need to check for error as the DSP will be disabled anyway */
+ if (chip && chip->power_down_dsp)
+ chip->power_down_dsp(sdev);
+
+ /* disable DSP */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, 0);
+
+skip_disable_dsp:
+ free_irq(sdev->ipc_irq, sdev);
+ if (sdev->msi_enabled)
+ pci_free_irq_vectors(pci);
+
+ hda_dsp_stream_free(sdev);
+
+ hda_bus_ml_free(sof_to_bus(sdev));
+
+ if (!sdev->dspless_mode_selected)
+ iounmap(sdev->bar[HDA_DSP_BAR]);
+
+ iounmap(bus->remap_addr);
+
+ sof_hda_bus_exit(sdev);
+
+ hda_codec_i915_exit(sdev);
+
+ return 0;
+}
+
+int hda_power_down_dsp(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ return hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+static void hda_generic_machine_select(struct snd_sof_dev *sdev,
+ struct snd_soc_acpi_mach **mach)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct snd_soc_acpi_mach_params *mach_params;
+ struct snd_soc_acpi_mach *hda_mach;
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const char *tplg_filename;
+ const char *idisp_str;
+ int dmic_num = 0;
+ int codec_num = 0;
+ int ret;
+ int i;
+
+ /* codec detection */
+ if (!bus->codec_mask) {
+ dev_info(bus->dev, "no hda codecs found!\n");
+ } else {
+ dev_info(bus->dev, "hda codecs found, mask %lx\n",
+ bus->codec_mask);
+
+ for (i = 0; i < HDA_MAX_CODECS; i++) {
+ if (bus->codec_mask & (1 << i))
+ codec_num++;
+ }
+
+ /*
+ * If no machine driver is found, then:
+ *
+ * generic hda machine driver can handle:
+ * - one HDMI codec, and/or
+ * - one external HDAudio codec
+ */
+ if (!*mach && codec_num <= 2) {
+ bool tplg_fixup;
+
+ hda_mach = snd_soc_acpi_intel_hda_machines;
+
+ dev_info(bus->dev, "using HDA machine driver %s now\n",
+ hda_mach->drv_name);
+
+ if (codec_num == 1 && HDA_IDISP_CODEC(bus->codec_mask))
+ idisp_str = "-idisp";
+ else
+ idisp_str = "";
+
+ /* topology: use the info from hda_machines */
+ if (pdata->tplg_filename) {
+ tplg_fixup = false;
+ tplg_filename = pdata->tplg_filename;
+ } else {
+ tplg_fixup = true;
+ tplg_filename = hda_mach->sof_tplg_filename;
+ }
+ ret = dmic_detect_topology_fixup(sdev, &tplg_filename, idisp_str, &dmic_num,
+ tplg_fixup);
+ if (ret < 0)
+ return;
+
+ hda_mach->mach_params.dmic_num = dmic_num;
+ pdata->tplg_filename = tplg_filename;
+
+ if (codec_num == 2 ||
+ (codec_num == 1 && !HDA_IDISP_CODEC(bus->codec_mask))) {
+ /*
+ * Prevent SoundWire links from starting when an external
+ * HDaudio codec is used
+ */
+ hda_mach->mach_params.link_mask = 0;
+ } else {
+ /*
+ * Allow SoundWire links to start when no external HDaudio codec
+ * was detected. This will not create a SoundWire card but
+ * will help detect if any SoundWire codec reports as ATTACHED.
+ */
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ hda_mach->mach_params.link_mask = hdev->info.link_mask;
+ }
+
+ *mach = hda_mach;
+ }
+ }
+
+ /* used by hda machine driver to create dai links */
+ if (*mach) {
+ mach_params = &(*mach)->mach_params;
+ mach_params->codec_mask = bus->codec_mask;
+ mach_params->common_hdmi_codec_drv = true;
+ }
+}
+#else
+static void hda_generic_machine_select(struct snd_sof_dev *sdev,
+ struct snd_soc_acpi_mach **mach)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+
+static struct snd_soc_acpi_mach *hda_sdw_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct snd_soc_acpi_link_adr *link;
+ struct snd_soc_acpi_mach *mach;
+ struct sof_intel_hda_dev *hdev;
+ u32 link_mask;
+ int i;
+
+ hdev = pdata->hw_pdata;
+ link_mask = hdev->info.link_mask;
+
+ /*
+ * Select SoundWire machine driver if needed using the
+ * alternate tables. This case deals with SoundWire-only
+ * machines, for mixed cases with I2C/I2S the detection relies
+ * on the HID list.
+ */
+ if (link_mask) {
+ for (mach = pdata->desc->alt_machines;
+ mach && mach->link_mask; mach++) {
+ /*
+ * On some platforms such as Up Extreme all links
+ * are enabled but only one link can be used by
+ * external codec. Instead of exact match of two masks,
+ * first check whether link_mask of mach is subset of
+ * link_mask supported by hw and then go on searching
+ * link_adr
+ */
+ if (~link_mask & mach->link_mask)
+ continue;
+
+ /* No need to match adr if there is no links defined */
+ if (!mach->links)
+ break;
+
+ link = mach->links;
+ for (i = 0; i < hdev->info.count && link->num_adr;
+ i++, link++) {
+ /*
+ * Try next machine if any expected Slaves
+ * are not found on this link.
+ */
+ if (!snd_soc_acpi_sdw_link_slaves_found(sdev->dev, link,
+ hdev->sdw->ids,
+ hdev->sdw->num_slaves))
+ break;
+ }
+ /* Found if all Slaves are checked */
+ if (i == hdev->info.count || !link->num_adr)
+ break;
+ }
+ if (mach && mach->link_mask) {
+ int dmic_num = 0;
+ bool tplg_fixup;
+ const char *tplg_filename;
+
+ mach->mach_params.links = mach->links;
+ mach->mach_params.link_mask = mach->link_mask;
+ mach->mach_params.platform = dev_name(sdev->dev);
+
+ if (pdata->tplg_filename) {
+ tplg_fixup = false;
+ } else {
+ tplg_fixup = true;
+ tplg_filename = mach->sof_tplg_filename;
+ }
+
+ /*
+ * DMICs use up to 4 pins and are typically pin-muxed with SoundWire
+ * link 2 and 3, or link 1 and 2, thus we only try to enable dmics
+ * if all conditions are true:
+ * a) 2 or fewer links are used by SoundWire
+ * b) the NHLT table reports the presence of microphones
+ */
+ if (hweight_long(mach->link_mask) <= 2) {
+ int ret;
+
+ ret = dmic_detect_topology_fixup(sdev, &tplg_filename, "",
+ &dmic_num, tplg_fixup);
+ if (ret < 0)
+ return NULL;
+ }
+ if (tplg_fixup)
+ pdata->tplg_filename = tplg_filename;
+ mach->mach_params.dmic_num = dmic_num;
+
+ dev_dbg(sdev->dev,
+ "SoundWire machine driver %s topology %s\n",
+ mach->drv_name,
+ pdata->tplg_filename);
+
+ return mach;
+ }
+
+ dev_info(sdev->dev, "No SoundWire machine driver found\n");
+ }
+
+ return NULL;
+}
+#else
+static struct snd_soc_acpi_mach *hda_sdw_machine_select(struct snd_sof_dev *sdev)
+{
+ return NULL;
+}
+#endif
+
+void hda_set_mach_params(struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct snd_soc_acpi_mach_params *mach_params;
+
+ mach_params = &mach->mach_params;
+ mach_params->platform = dev_name(sdev->dev);
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ mach_params->num_dai_drivers = SOF_SKL_NUM_DAIS_NOCODEC;
+ else
+ mach_params->num_dai_drivers = desc->ops->num_drv;
+ mach_params->dai_drivers = desc->ops->drv;
+}
+
+struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
+{
+ u32 interface_mask = hda_get_interface_mask(sdev);
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach = NULL;
+ const char *tplg_filename;
+
+ /* Try I2S or DMIC if it is supported */
+ if (interface_mask & (BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC)))
+ mach = snd_soc_acpi_find_machine(desc->machines);
+
+ if (mach) {
+ bool add_extension = false;
+ bool tplg_fixup = false;
+
+ /*
+ * If tplg file name is overridden, use it instead of
+ * the one set in mach table
+ */
+ if (!sof_pdata->tplg_filename) {
+ sof_pdata->tplg_filename = mach->sof_tplg_filename;
+ tplg_fixup = true;
+ }
+
+ /* report to machine driver if any DMICs are found */
+ mach->mach_params.dmic_num = check_dmic_num(sdev);
+
+ if (tplg_fixup &&
+ mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER &&
+ mach->mach_params.dmic_num) {
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s%s%d%s",
+ sof_pdata->tplg_filename,
+ "-dmic",
+ mach->mach_params.dmic_num,
+ "ch");
+ if (!tplg_filename)
+ return NULL;
+
+ sof_pdata->tplg_filename = tplg_filename;
+ add_extension = true;
+ }
+
+ if (mach->link_mask) {
+ mach->mach_params.links = mach->links;
+ mach->mach_params.link_mask = mach->link_mask;
+ }
+
+ /* report SSP link mask to machine driver */
+ mach->mach_params.i2s_link_mask = check_nhlt_ssp_mask(sdev);
+
+ if (tplg_fixup &&
+ mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER &&
+ mach->mach_params.i2s_link_mask) {
+ const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
+ int ssp_num;
+ int mclk_mask;
+
+ if (hweight_long(mach->mach_params.i2s_link_mask) > 1 &&
+ !(mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_SSP_MSB))
+ dev_warn(sdev->dev, "More than one SSP exposed by NHLT, choosing MSB\n");
+
+ /* fls returns 1-based results, SSPs indices are 0-based */
+ ssp_num = fls(mach->mach_params.i2s_link_mask) - 1;
+
+ if (ssp_num >= chip->ssp_count) {
+ dev_err(sdev->dev, "Invalid SSP %d, max on this platform is %d\n",
+ ssp_num, chip->ssp_count);
+ return NULL;
+ }
+
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s%s%d",
+ sof_pdata->tplg_filename,
+ "-ssp",
+ ssp_num);
+ if (!tplg_filename)
+ return NULL;
+
+ sof_pdata->tplg_filename = tplg_filename;
+ add_extension = true;
+
+ mclk_mask = check_nhlt_ssp_mclk_mask(sdev, ssp_num);
+
+ if (mclk_mask < 0) {
+ dev_err(sdev->dev, "Invalid MCLK configuration\n");
+ return NULL;
+ }
+
+ dev_dbg(sdev->dev, "MCLK mask %#x found in NHLT\n", mclk_mask);
+
+ if (mclk_mask) {
+ dev_info(sdev->dev, "Overriding topology with MCLK mask %#x from NHLT\n", mclk_mask);
+ sdev->mclk_id_override = true;
+ sdev->mclk_id_quirk = (mclk_mask & BIT(0)) ? 0 : 1;
+ }
+ }
+
+ if (tplg_fixup && add_extension) {
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s%s",
+ sof_pdata->tplg_filename,
+ ".tplg");
+ if (!tplg_filename)
+ return NULL;
+
+ sof_pdata->tplg_filename = tplg_filename;
+ }
+
+ /* check if mclk_id should be modified from topology defaults */
+ if (mclk_id_override >= 0) {
+ dev_info(sdev->dev, "Overriding topology with MCLK %d from kernel_parameter\n", mclk_id_override);
+ sdev->mclk_id_override = true;
+ sdev->mclk_id_quirk = mclk_id_override;
+ }
+ }
+
+ /* If I2S fails, try SoundWire if it is supported */
+ if (!mach && (interface_mask & BIT(SOF_DAI_INTEL_ALH)))
+ mach = hda_sdw_machine_select(sdev);
+
+ /*
+ * Choose HDA generic machine driver if mach is NULL.
+ * Otherwise, set certain mach params.
+ */
+ hda_generic_machine_select(sdev, &mach);
+ if (!mach)
+ dev_warn(sdev->dev, "warning: No matching ASoC machine driver found\n");
+
+ return mach;
+}
+
+int hda_pci_intel_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ int ret;
+
+ ret = snd_intel_dsp_driver_probe(pci);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+ dev_dbg(&pci->dev, "SOF PCI driver not selected, aborting probe\n");
+ return -ENODEV;
+ }
+
+ return sof_pci_probe(pci, pci_id);
+}
+EXPORT_SYMBOL_NS(hda_pci_intel_probe, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int hda_register_clients(struct snd_sof_dev *sdev)
+{
+ return hda_probes_register(sdev);
+}
+
+void hda_unregister_clients(struct snd_sof_dev *sdev)
+{
+ hda_probes_unregister(sdev);
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
+MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC);
+MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_IMPORT_NS(SND_INTEL_SOUNDWIRE_ACPI);
+MODULE_IMPORT_NS(SOUNDWIRE_INTEL_INIT);
+MODULE_IMPORT_NS(SOUNDWIRE_INTEL);
+MODULE_IMPORT_NS(SND_SOC_SOF_HDA_MLINK);
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
new file mode 100644
index 000000000..5c517ec57
--- /dev/null
+++ b/sound/soc/sof/intel/hda.h
@@ -0,0 +1,990 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_HDA_H
+#define __SOF_INTEL_HDA_H
+
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
+#include <sound/compress_driver.h>
+#include <sound/hda_codec.h>
+#include <sound/hdaudio_ext.h>
+#include "../sof-client-probes.h"
+#include "../sof-audio.h"
+#include "shim.h"
+
+/* PCI registers */
+#define PCI_TCSEL 0x44
+#define PCI_PGCTL PCI_TCSEL
+#define PCI_CGCTL 0x48
+
+/* PCI_PGCTL bits */
+#define PCI_PGCTL_ADSPPGD BIT(2)
+#define PCI_PGCTL_LSRMD_MASK BIT(4)
+
+/* PCI_CGCTL bits */
+#define PCI_CGCTL_MISCBDCGE_MASK BIT(6)
+#define PCI_CGCTL_ADSPDCGE BIT(1)
+
+/* Legacy HDA registers and bits used - widths are variable */
+#define SOF_HDA_GCAP 0x0
+#define SOF_HDA_GCTL 0x8
+/* accept unsol. response enable */
+#define SOF_HDA_GCTL_UNSOL BIT(8)
+#define SOF_HDA_LLCH 0x14
+#define SOF_HDA_INTCTL 0x20
+#define SOF_HDA_INTSTS 0x24
+#define SOF_HDA_WAKESTS 0x0E
+#define SOF_HDA_WAKESTS_INT_MASK ((1 << 8) - 1)
+#define SOF_HDA_RIRBSTS 0x5d
+
+/* SOF_HDA_GCTL register bist */
+#define SOF_HDA_GCTL_RESET BIT(0)
+
+/* SOF_HDA_INCTL regs */
+#define SOF_HDA_INT_GLOBAL_EN BIT(31)
+#define SOF_HDA_INT_CTRL_EN BIT(30)
+#define SOF_HDA_INT_ALL_STREAM 0xff
+
+/* SOF_HDA_INTSTS regs */
+#define SOF_HDA_INTSTS_GIS BIT(31)
+
+#define SOF_HDA_MAX_CAPS 10
+#define SOF_HDA_CAP_ID_OFF 16
+#define SOF_HDA_CAP_ID_MASK GENMASK(SOF_HDA_CAP_ID_OFF + 11,\
+ SOF_HDA_CAP_ID_OFF)
+#define SOF_HDA_CAP_NEXT_MASK 0xFFFF
+
+#define SOF_HDA_GTS_CAP_ID 0x1
+#define SOF_HDA_ML_CAP_ID 0x2
+
+#define SOF_HDA_PP_CAP_ID 0x3
+#define SOF_HDA_REG_PP_PPCH 0x10
+#define SOF_HDA_REG_PP_PPCTL 0x04
+#define SOF_HDA_REG_PP_PPSTS 0x08
+#define SOF_HDA_PPCTL_PIE BIT(31)
+#define SOF_HDA_PPCTL_GPROCEN BIT(30)
+
+/*Vendor Specific Registers*/
+#define SOF_HDA_VS_D0I3C 0x104A
+
+/* D0I3C Register fields */
+#define SOF_HDA_VS_D0I3C_CIP BIT(0) /* Command-In-Progress */
+#define SOF_HDA_VS_D0I3C_I3 BIT(2) /* D0i3 enable bit */
+
+/* DPIB entry size: 8 Bytes = 2 DWords */
+#define SOF_HDA_DPIB_ENTRY_SIZE 0x8
+
+#define SOF_HDA_SPIB_CAP_ID 0x4
+#define SOF_HDA_DRSM_CAP_ID 0x5
+
+#define SOF_HDA_SPIB_BASE 0x08
+#define SOF_HDA_SPIB_INTERVAL 0x08
+#define SOF_HDA_SPIB_SPIB 0x00
+#define SOF_HDA_SPIB_MAXFIFO 0x04
+
+#define SOF_HDA_PPHC_BASE 0x10
+#define SOF_HDA_PPHC_INTERVAL 0x10
+
+#define SOF_HDA_PPLC_BASE 0x10
+#define SOF_HDA_PPLC_MULTI 0x10
+#define SOF_HDA_PPLC_INTERVAL 0x10
+
+#define SOF_HDA_DRSM_BASE 0x08
+#define SOF_HDA_DRSM_INTERVAL 0x08
+
+/* Descriptor error interrupt */
+#define SOF_HDA_CL_DMA_SD_INT_DESC_ERR 0x10
+
+/* FIFO error interrupt */
+#define SOF_HDA_CL_DMA_SD_INT_FIFO_ERR 0x08
+
+/* Buffer completion interrupt */
+#define SOF_HDA_CL_DMA_SD_INT_COMPLETE 0x04
+
+#define SOF_HDA_CL_DMA_SD_INT_MASK \
+ (SOF_HDA_CL_DMA_SD_INT_DESC_ERR | \
+ SOF_HDA_CL_DMA_SD_INT_FIFO_ERR | \
+ SOF_HDA_CL_DMA_SD_INT_COMPLETE)
+#define SOF_HDA_SD_CTL_DMA_START 0x02 /* Stream DMA start bit */
+
+/* Intel HD Audio Code Loader DMA Registers */
+#define SOF_HDA_ADSP_LOADER_BASE 0x80
+#define SOF_HDA_ADSP_DPLBASE 0x70
+#define SOF_HDA_ADSP_DPUBASE 0x74
+#define SOF_HDA_ADSP_DPLBASE_ENABLE 0x01
+
+/* Stream Registers */
+#define SOF_HDA_ADSP_REG_SD_CTL 0x00
+#define SOF_HDA_ADSP_REG_SD_STS 0x03
+#define SOF_HDA_ADSP_REG_SD_LPIB 0x04
+#define SOF_HDA_ADSP_REG_SD_CBL 0x08
+#define SOF_HDA_ADSP_REG_SD_LVI 0x0C
+#define SOF_HDA_ADSP_REG_SD_FIFOW 0x0E
+#define SOF_HDA_ADSP_REG_SD_FIFOSIZE 0x10
+#define SOF_HDA_ADSP_REG_SD_FORMAT 0x12
+#define SOF_HDA_ADSP_REG_SD_FIFOL 0x14
+#define SOF_HDA_ADSP_REG_SD_BDLPL 0x18
+#define SOF_HDA_ADSP_REG_SD_BDLPU 0x1C
+#define SOF_HDA_ADSP_SD_ENTRY_SIZE 0x20
+
+/* CL: Software Position Based FIFO Capability Registers */
+#define SOF_DSP_REG_CL_SPBFIFO \
+ (SOF_HDA_ADSP_LOADER_BASE + 0x20)
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCH 0x0
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL 0x4
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_SPIB 0x8
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_MAXFIFOS 0xc
+
+/* Stream Number */
+#define SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT 20
+#define SOF_HDA_CL_SD_CTL_STREAM_TAG_MASK \
+ GENMASK(SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT + 3,\
+ SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT)
+
+#define HDA_DSP_HDA_BAR 0
+#define HDA_DSP_PP_BAR 1
+#define HDA_DSP_SPIB_BAR 2
+#define HDA_DSP_DRSM_BAR 3
+#define HDA_DSP_BAR 4
+
+#define SRAM_WINDOW_OFFSET(x) (0x80000 + (x) * 0x20000)
+
+#define HDA_DSP_MBOX_OFFSET SRAM_WINDOW_OFFSET(0)
+
+#define HDA_DSP_PANIC_OFFSET(x) \
+ (((x) & 0xFFFFFF) + HDA_DSP_MBOX_OFFSET)
+
+/* SRAM window 0 FW "registers" */
+#define HDA_DSP_SRAM_REG_ROM_STATUS (HDA_DSP_MBOX_OFFSET + 0x0)
+#define HDA_DSP_SRAM_REG_ROM_ERROR (HDA_DSP_MBOX_OFFSET + 0x4)
+/* FW and ROM share offset 4 */
+#define HDA_DSP_SRAM_REG_FW_STATUS (HDA_DSP_MBOX_OFFSET + 0x4)
+#define HDA_DSP_SRAM_REG_FW_TRACEP (HDA_DSP_MBOX_OFFSET + 0x8)
+#define HDA_DSP_SRAM_REG_FW_END (HDA_DSP_MBOX_OFFSET + 0xc)
+
+#define HDA_DSP_MBOX_UPLINK_OFFSET 0x81000
+
+#define HDA_DSP_STREAM_RESET_TIMEOUT 300
+/*
+ * Timeout in us, for setting the stream RUN bit, during
+ * start/stop the stream. The timeout expires if new RUN bit
+ * value cannot be read back within the specified time.
+ */
+#define HDA_DSP_STREAM_RUN_TIMEOUT 300
+
+#define HDA_DSP_SPIB_ENABLE 1
+#define HDA_DSP_SPIB_DISABLE 0
+
+#define SOF_HDA_MAX_BUFFER_SIZE (32 * PAGE_SIZE)
+
+#define HDA_DSP_STACK_DUMP_SIZE 32
+
+/* ROM/FW status register */
+#define FSR_STATE_MASK GENMASK(23, 0)
+#define FSR_WAIT_STATE_MASK GENMASK(27, 24)
+#define FSR_MODULE_MASK GENMASK(30, 28)
+#define FSR_HALTED BIT(31)
+#define FSR_TO_STATE_CODE(x) ((x) & FSR_STATE_MASK)
+#define FSR_TO_WAIT_STATE_CODE(x) (((x) & FSR_WAIT_STATE_MASK) >> 24)
+#define FSR_TO_MODULE_CODE(x) (((x) & FSR_MODULE_MASK) >> 28)
+
+/* Wait states */
+#define FSR_WAIT_FOR_IPC_BUSY 0x1
+#define FSR_WAIT_FOR_IPC_DONE 0x2
+#define FSR_WAIT_FOR_CACHE_INVALIDATION 0x3
+#define FSR_WAIT_FOR_LP_SRAM_OFF 0x4
+#define FSR_WAIT_FOR_DMA_BUFFER_FULL 0x5
+#define FSR_WAIT_FOR_CSE_CSR 0x6
+
+/* Module codes */
+#define FSR_MOD_ROM 0x0
+#define FSR_MOD_ROM_BYP 0x1
+#define FSR_MOD_BASE_FW 0x2
+#define FSR_MOD_LP_BOOT 0x3
+#define FSR_MOD_BRNGUP 0x4
+#define FSR_MOD_ROM_EXT 0x5
+
+/* State codes (module dependent) */
+/* Module independent states */
+#define FSR_STATE_INIT 0x0
+#define FSR_STATE_INIT_DONE 0x1
+#define FSR_STATE_FW_ENTERED 0x5
+
+/* ROM states */
+#define FSR_STATE_ROM_INIT FSR_STATE_INIT
+#define FSR_STATE_ROM_INIT_DONE FSR_STATE_INIT_DONE
+#define FSR_STATE_ROM_CSE_MANIFEST_LOADED 0x2
+#define FSR_STATE_ROM_FW_MANIFEST_LOADED 0x3
+#define FSR_STATE_ROM_FW_FW_LOADED 0x4
+#define FSR_STATE_ROM_FW_ENTERED FSR_STATE_FW_ENTERED
+#define FSR_STATE_ROM_VERIFY_FEATURE_MASK 0x6
+#define FSR_STATE_ROM_GET_LOAD_OFFSET 0x7
+#define FSR_STATE_ROM_FETCH_ROM_EXT 0x8
+#define FSR_STATE_ROM_FETCH_ROM_EXT_DONE 0x9
+#define FSR_STATE_ROM_BASEFW_ENTERED 0xf /* SKL */
+
+/* (ROM) CSE states */
+#define FSR_STATE_ROM_CSE_IMR_REQUEST 0x10
+#define FSR_STATE_ROM_CSE_IMR_GRANTED 0x11
+#define FSR_STATE_ROM_CSE_VALIDATE_IMAGE_REQUEST 0x12
+#define FSR_STATE_ROM_CSE_IMAGE_VALIDATED 0x13
+
+#define FSR_STATE_ROM_CSE_IPC_IFACE_INIT 0x20
+#define FSR_STATE_ROM_CSE_IPC_RESET_PHASE_1 0x21
+#define FSR_STATE_ROM_CSE_IPC_OPERATIONAL_ENTRY 0x22
+#define FSR_STATE_ROM_CSE_IPC_OPERATIONAL 0x23
+#define FSR_STATE_ROM_CSE_IPC_DOWN 0x24
+
+/* BRINGUP (or BRNGUP) states */
+#define FSR_STATE_BRINGUP_INIT FSR_STATE_INIT
+#define FSR_STATE_BRINGUP_INIT_DONE FSR_STATE_INIT_DONE
+#define FSR_STATE_BRINGUP_HPSRAM_LOAD 0x2
+#define FSR_STATE_BRINGUP_UNPACK_START 0X3
+#define FSR_STATE_BRINGUP_IMR_RESTORE 0x4
+#define FSR_STATE_BRINGUP_FW_ENTERED FSR_STATE_FW_ENTERED
+
+/* ROM status/error values */
+#define HDA_DSP_ROM_CSE_ERROR 40
+#define HDA_DSP_ROM_CSE_WRONG_RESPONSE 41
+#define HDA_DSP_ROM_IMR_TO_SMALL 42
+#define HDA_DSP_ROM_BASE_FW_NOT_FOUND 43
+#define HDA_DSP_ROM_CSE_VALIDATION_FAILED 44
+#define HDA_DSP_ROM_IPC_FATAL_ERROR 45
+#define HDA_DSP_ROM_L2_CACHE_ERROR 46
+#define HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL 47
+#define HDA_DSP_ROM_API_PTR_INVALID 50
+#define HDA_DSP_ROM_BASEFW_INCOMPAT 51
+#define HDA_DSP_ROM_UNHANDLED_INTERRUPT 0xBEE00000
+#define HDA_DSP_ROM_MEMORY_HOLE_ECC 0xECC00000
+#define HDA_DSP_ROM_KERNEL_EXCEPTION 0xCAFE0000
+#define HDA_DSP_ROM_USER_EXCEPTION 0xBEEF0000
+#define HDA_DSP_ROM_UNEXPECTED_RESET 0xDECAF000
+#define HDA_DSP_ROM_NULL_FW_ENTRY 0x4c4c4e55
+
+#define HDA_DSP_ROM_IPC_CONTROL 0x01000000
+#define HDA_DSP_ROM_IPC_PURGE_FW 0x00004000
+
+/* various timeout values */
+#define HDA_DSP_PU_TIMEOUT 50
+#define HDA_DSP_PD_TIMEOUT 50
+#define HDA_DSP_RESET_TIMEOUT_US 50000
+#define HDA_DSP_BASEFW_TIMEOUT_US 3000000
+#define HDA_DSP_INIT_TIMEOUT_US 500000
+#define HDA_DSP_CTRL_RESET_TIMEOUT 100
+#define HDA_DSP_WAIT_TIMEOUT 500 /* 500 msec */
+#define HDA_DSP_REG_POLL_INTERVAL_US 500 /* 0.5 msec */
+#define HDA_DSP_REG_POLL_RETRY_COUNT 50
+
+#define HDA_DSP_ADSPIC_IPC BIT(0)
+#define HDA_DSP_ADSPIS_IPC BIT(0)
+
+/* Intel HD Audio General DSP Registers */
+#define HDA_DSP_GEN_BASE 0x0
+#define HDA_DSP_REG_ADSPCS (HDA_DSP_GEN_BASE + 0x04)
+#define HDA_DSP_REG_ADSPIC (HDA_DSP_GEN_BASE + 0x08)
+#define HDA_DSP_REG_ADSPIS (HDA_DSP_GEN_BASE + 0x0C)
+#define HDA_DSP_REG_ADSPIC2 (HDA_DSP_GEN_BASE + 0x10)
+#define HDA_DSP_REG_ADSPIS2 (HDA_DSP_GEN_BASE + 0x14)
+
+#define HDA_DSP_REG_ADSPIC2_SNDW BIT(5)
+#define HDA_DSP_REG_ADSPIS2_SNDW BIT(5)
+
+/* Intel HD Audio Inter-Processor Communication Registers */
+#define HDA_DSP_IPC_BASE 0x40
+#define HDA_DSP_REG_HIPCT (HDA_DSP_IPC_BASE + 0x00)
+#define HDA_DSP_REG_HIPCTE (HDA_DSP_IPC_BASE + 0x04)
+#define HDA_DSP_REG_HIPCI (HDA_DSP_IPC_BASE + 0x08)
+#define HDA_DSP_REG_HIPCIE (HDA_DSP_IPC_BASE + 0x0C)
+#define HDA_DSP_REG_HIPCCTL (HDA_DSP_IPC_BASE + 0x10)
+
+/* Intel Vendor Specific Registers */
+#define HDA_VS_INTEL_EM2 0x1030
+#define HDA_VS_INTEL_EM2_L1SEN BIT(13)
+#define HDA_VS_INTEL_LTRP 0x1048
+#define HDA_VS_INTEL_LTRP_GB_MASK 0x3F
+
+/* HIPCI */
+#define HDA_DSP_REG_HIPCI_BUSY BIT(31)
+#define HDA_DSP_REG_HIPCI_MSG_MASK 0x7FFFFFFF
+
+/* HIPCIE */
+#define HDA_DSP_REG_HIPCIE_DONE BIT(30)
+#define HDA_DSP_REG_HIPCIE_MSG_MASK 0x3FFFFFFF
+
+/* HIPCCTL */
+#define HDA_DSP_REG_HIPCCTL_DONE BIT(1)
+#define HDA_DSP_REG_HIPCCTL_BUSY BIT(0)
+
+/* HIPCT */
+#define HDA_DSP_REG_HIPCT_BUSY BIT(31)
+#define HDA_DSP_REG_HIPCT_MSG_MASK 0x7FFFFFFF
+
+/* HIPCTE */
+#define HDA_DSP_REG_HIPCTE_MSG_MASK 0x3FFFFFFF
+
+#define HDA_DSP_ADSPIC_CL_DMA BIT(1)
+#define HDA_DSP_ADSPIS_CL_DMA BIT(1)
+
+/* Delay before scheduling D0i3 entry */
+#define BXT_D0I3_DELAY 5000
+
+#define FW_CL_STREAM_NUMBER 0x1
+#define HDA_FW_BOOT_ATTEMPTS 3
+
+/* ADSPCS - Audio DSP Control & Status */
+
+/*
+ * Core Reset - asserted high
+ * CRST Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_CRST_SHIFT 0
+#define HDA_DSP_ADSPCS_CRST_MASK(cm) ((cm) << HDA_DSP_ADSPCS_CRST_SHIFT)
+
+/*
+ * Core run/stall - when set to '1' core is stalled
+ * CSTALL Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_CSTALL_SHIFT 8
+#define HDA_DSP_ADSPCS_CSTALL_MASK(cm) ((cm) << HDA_DSP_ADSPCS_CSTALL_SHIFT)
+
+/*
+ * Set Power Active - when set to '1' turn cores on
+ * SPA Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_SPA_SHIFT 16
+#define HDA_DSP_ADSPCS_SPA_MASK(cm) ((cm) << HDA_DSP_ADSPCS_SPA_SHIFT)
+
+/*
+ * Current Power Active - power status of cores, set by hardware
+ * CPA Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_CPA_SHIFT 24
+#define HDA_DSP_ADSPCS_CPA_MASK(cm) ((cm) << HDA_DSP_ADSPCS_CPA_SHIFT)
+
+/*
+ * Mask for a given number of cores
+ * nc = number of supported cores
+ */
+#define SOF_DSP_CORES_MASK(nc) GENMASK(((nc) - 1), 0)
+
+/* Intel HD Audio Inter-Processor Communication Registers for Cannonlake*/
+#define CNL_DSP_IPC_BASE 0xc0
+#define CNL_DSP_REG_HIPCTDR (CNL_DSP_IPC_BASE + 0x00)
+#define CNL_DSP_REG_HIPCTDA (CNL_DSP_IPC_BASE + 0x04)
+#define CNL_DSP_REG_HIPCTDD (CNL_DSP_IPC_BASE + 0x08)
+#define CNL_DSP_REG_HIPCIDR (CNL_DSP_IPC_BASE + 0x10)
+#define CNL_DSP_REG_HIPCIDA (CNL_DSP_IPC_BASE + 0x14)
+#define CNL_DSP_REG_HIPCIDD (CNL_DSP_IPC_BASE + 0x18)
+#define CNL_DSP_REG_HIPCCTL (CNL_DSP_IPC_BASE + 0x28)
+
+/* HIPCI */
+#define CNL_DSP_REG_HIPCIDR_BUSY BIT(31)
+#define CNL_DSP_REG_HIPCIDR_MSG_MASK 0x7FFFFFFF
+
+/* HIPCIE */
+#define CNL_DSP_REG_HIPCIDA_DONE BIT(31)
+#define CNL_DSP_REG_HIPCIDA_MSG_MASK 0x7FFFFFFF
+
+/* HIPCCTL */
+#define CNL_DSP_REG_HIPCCTL_DONE BIT(1)
+#define CNL_DSP_REG_HIPCCTL_BUSY BIT(0)
+
+/* HIPCT */
+#define CNL_DSP_REG_HIPCTDR_BUSY BIT(31)
+#define CNL_DSP_REG_HIPCTDR_MSG_MASK 0x7FFFFFFF
+
+/* HIPCTDA */
+#define CNL_DSP_REG_HIPCTDA_DONE BIT(31)
+#define CNL_DSP_REG_HIPCTDA_MSG_MASK 0x7FFFFFFF
+
+/* HIPCTDD */
+#define CNL_DSP_REG_HIPCTDD_MSG_MASK 0x7FFFFFFF
+
+/* BDL */
+#define HDA_DSP_BDL_SIZE 4096
+#define HDA_DSP_MAX_BDL_ENTRIES \
+ (HDA_DSP_BDL_SIZE / sizeof(struct sof_intel_dsp_bdl))
+
+/* Number of DAIs */
+#define SOF_SKL_NUM_DAIS_NOCODEC 8
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+#define SOF_SKL_NUM_DAIS 15
+#else
+#define SOF_SKL_NUM_DAIS SOF_SKL_NUM_DAIS_NOCODEC
+#endif
+
+/* Intel HD Audio SRAM Window 0*/
+#define HDA_DSP_SRAM_REG_ROM_STATUS_SKL 0x8000
+#define HDA_ADSP_SRAM0_BASE_SKL 0x8000
+
+/* Firmware status window */
+#define HDA_ADSP_FW_STATUS_SKL HDA_ADSP_SRAM0_BASE_SKL
+#define HDA_ADSP_ERROR_CODE_SKL (HDA_ADSP_FW_STATUS_SKL + 0x4)
+
+/* Host Device Memory Space */
+#define APL_SSP_BASE_OFFSET 0x2000
+#define CNL_SSP_BASE_OFFSET 0x10000
+
+/* Host Device Memory Size of a Single SSP */
+#define SSP_DEV_MEM_SIZE 0x1000
+
+/* SSP Count of the Platform */
+#define APL_SSP_COUNT 6
+#define CNL_SSP_COUNT 3
+#define ICL_SSP_COUNT 6
+#define TGL_SSP_COUNT 3
+#define MTL_SSP_COUNT 3
+
+/* SSP Registers */
+#define SSP_SSC1_OFFSET 0x4
+#define SSP_SET_SCLK_CONSUMER BIT(25)
+#define SSP_SET_SFRM_CONSUMER BIT(24)
+#define SSP_SET_CBP_CFP (SSP_SET_SCLK_CONSUMER | SSP_SET_SFRM_CONSUMER)
+
+#define HDA_IDISP_ADDR 2
+#define HDA_IDISP_CODEC(x) ((x) & BIT(HDA_IDISP_ADDR))
+
+struct sof_intel_dsp_bdl {
+ __le32 addr_l;
+ __le32 addr_h;
+ __le32 size;
+ __le32 ioc;
+} __attribute((packed));
+
+#define SOF_HDA_PLAYBACK_STREAMS 16
+#define SOF_HDA_CAPTURE_STREAMS 16
+#define SOF_HDA_PLAYBACK 0
+#define SOF_HDA_CAPTURE 1
+
+/* stream flags */
+#define SOF_HDA_STREAM_DMI_L1_COMPATIBLE 1
+
+/*
+ * Time in ms for opportunistic D0I3 entry delay.
+ * This has been deliberately chosen to be long to avoid race conditions.
+ * Could be optimized in future.
+ */
+#define SOF_HDA_D0I3_WORK_DELAY_MS 5000
+
+/* HDA DSP D0 substate */
+enum sof_hda_D0_substate {
+ SOF_HDA_DSP_PM_D0I0, /* default D0 substate */
+ SOF_HDA_DSP_PM_D0I3, /* low power D0 substate */
+};
+
+/* represents DSP HDA controller frontend - i.e. host facing control */
+struct sof_intel_hda_dev {
+ bool imrboot_supported;
+ bool skip_imr_boot;
+ bool booted_from_imr;
+
+ int boot_iteration;
+
+ struct hda_bus hbus;
+
+ /* hw config */
+ const struct sof_intel_dsp_desc *desc;
+
+ /* trace */
+ struct hdac_ext_stream *dtrace_stream;
+
+ /* if position update IPC needed */
+ u32 no_ipc_position;
+
+ /* the maximum number of streams (playback + capture) supported */
+ u32 stream_max;
+
+ /* PM related */
+ bool l1_disabled;/* is DMI link L1 disabled? */
+
+ /* DMIC device */
+ struct platform_device *dmic_dev;
+
+ /* delayed work to enter D0I3 opportunistically */
+ struct delayed_work d0i3_work;
+
+ /* ACPI information stored between scan and probe steps */
+ struct sdw_intel_acpi_info info;
+
+ /* sdw context allocated by SoundWire driver */
+ struct sdw_intel_ctx *sdw;
+
+ /* FW clock config, 0:HPRO, 1:LPRO */
+ bool clk_config_lpro;
+
+ wait_queue_head_t waitq;
+ bool code_loading;
+
+ /* Intel NHLT information */
+ struct nhlt_acpi_table *nhlt;
+
+ /*
+ * Pointing to the IPC message if immediate sending was not possible
+ * because the downlink communication channel was BUSY at the time.
+ * The message will be re-tried when the channel becomes free (the ACK
+ * is received from the DSP for the previous message)
+ */
+ struct snd_sof_ipc_msg *delayed_ipc_tx_msg;
+};
+
+static inline struct hdac_bus *sof_to_bus(struct snd_sof_dev *s)
+{
+ struct sof_intel_hda_dev *hda = s->pdata->hw_pdata;
+
+ return &hda->hbus.core;
+}
+
+static inline struct hda_bus *sof_to_hbus(struct snd_sof_dev *s)
+{
+ struct sof_intel_hda_dev *hda = s->pdata->hw_pdata;
+
+ return &hda->hbus;
+}
+
+struct sof_intel_hda_stream {
+ struct snd_sof_dev *sdev;
+ struct hdac_ext_stream hext_stream;
+ struct sof_intel_stream sof_intel_stream;
+ int host_reserved; /* reserve host DMA channel */
+ u32 flags;
+};
+
+#define hstream_to_sof_hda_stream(hstream) \
+ container_of(hstream, struct sof_intel_hda_stream, hext_stream)
+
+#define bus_to_sof_hda(bus) \
+ container_of(bus, struct sof_intel_hda_dev, hbus.core)
+
+#define SOF_STREAM_SD_OFFSET(s) \
+ (SOF_HDA_ADSP_SD_ENTRY_SIZE * ((s)->index) \
+ + SOF_HDA_ADSP_LOADER_BASE)
+
+#define SOF_STREAM_SD_OFFSET_CRST 0x1
+
+/*
+ * DSP Core services.
+ */
+int hda_dsp_probe(struct snd_sof_dev *sdev);
+int hda_dsp_remove(struct snd_sof_dev *sdev);
+int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
+ unsigned int core_mask);
+int hda_power_down_dsp(struct snd_sof_dev *sdev);
+int hda_dsp_core_get(struct snd_sof_dev *sdev, int core);
+void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev);
+void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev);
+bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask);
+
+int hda_dsp_set_power_state_ipc3(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state);
+int hda_dsp_set_power_state_ipc4(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state);
+
+int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state);
+int hda_dsp_resume(struct snd_sof_dev *sdev);
+int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev);
+int hda_dsp_runtime_resume(struct snd_sof_dev *sdev);
+int hda_dsp_runtime_idle(struct snd_sof_dev *sdev);
+int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev);
+int hda_dsp_shutdown(struct snd_sof_dev *sdev);
+int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev);
+void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags);
+void hda_ipc_dump(struct snd_sof_dev *sdev);
+void hda_ipc_irq_dump(struct snd_sof_dev *sdev);
+void hda_dsp_d0i3_work(struct work_struct *work);
+int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev);
+
+/*
+ * DSP PCM Operations.
+ */
+u32 hda_dsp_get_mult_div(struct snd_sof_dev *sdev, int rate);
+u32 hda_dsp_get_bits(struct snd_sof_dev *sdev, int sample_bits);
+int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params);
+int hda_dsp_stream_hw_free(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_trigger(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream, int cmd);
+snd_pcm_uframes_t hda_dsp_pcm_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_ack(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream);
+
+/*
+ * DSP Stream Operations.
+ */
+
+int hda_dsp_stream_init(struct snd_sof_dev *sdev);
+void hda_dsp_stream_free(struct snd_sof_dev *sdev);
+int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params);
+int hda_dsp_iccmax_stream_hw_params(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params);
+int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream, int cmd);
+irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context);
+int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct hdac_stream *hstream);
+bool hda_dsp_check_ipc_irq(struct snd_sof_dev *sdev);
+bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev);
+
+snd_pcm_uframes_t hda_dsp_stream_get_position(struct hdac_stream *hstream,
+ int direction, bool can_sleep);
+
+struct hdac_ext_stream *
+ hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction, u32 flags);
+int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag);
+int hda_dsp_stream_spib_config(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream,
+ int enable, u32 size);
+
+int hda_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ void *p, size_t sz);
+int hda_set_stream_data_offset(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ size_t posn_offset);
+
+/*
+ * DSP IPC Operations.
+ */
+int hda_dsp_ipc_send_msg(struct snd_sof_dev *sdev,
+ struct snd_sof_ipc_msg *msg);
+void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev);
+int hda_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev);
+int hda_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id);
+
+irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context);
+int hda_dsp_ipc_cmd_done(struct snd_sof_dev *sdev, int dir);
+
+/*
+ * DSP Code loader.
+ */
+int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev);
+int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev);
+int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream);
+struct hdac_ext_stream *hda_cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
+ unsigned int size, struct snd_dma_buffer *dmab,
+ int direction);
+int hda_cl_cleanup(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
+ struct hdac_ext_stream *hext_stream);
+int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot);
+#define HDA_CL_STREAM_FORMAT 0x40
+
+/* pre and post fw run ops */
+int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev);
+int hda_dsp_post_fw_run(struct snd_sof_dev *sdev);
+
+/* parse platform specific ext manifest ops */
+int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr);
+
+/*
+ * HDA Controller Operations.
+ */
+int hda_dsp_ctrl_get_caps(struct snd_sof_dev *sdev);
+void hda_dsp_ctrl_ppcap_enable(struct snd_sof_dev *sdev, bool enable);
+void hda_dsp_ctrl_ppcap_int_enable(struct snd_sof_dev *sdev, bool enable);
+int hda_dsp_ctrl_link_reset(struct snd_sof_dev *sdev, bool reset);
+void hda_dsp_ctrl_misc_clock_gating(struct snd_sof_dev *sdev, bool enable);
+int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable);
+int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev);
+void hda_dsp_ctrl_stop_chip(struct snd_sof_dev *sdev);
+/*
+ * HDA bus operations.
+ */
+void sof_hda_bus_init(struct snd_sof_dev *sdev, struct device *dev);
+void sof_hda_bus_exit(struct snd_sof_dev *sdev);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+/*
+ * HDA Codec operations.
+ */
+void hda_codec_probe_bus(struct snd_sof_dev *sdev);
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable);
+void hda_codec_jack_check(struct snd_sof_dev *sdev);
+void hda_codec_check_for_state_change(struct snd_sof_dev *sdev);
+void hda_codec_init_cmd_io(struct snd_sof_dev *sdev);
+void hda_codec_resume_cmd_io(struct snd_sof_dev *sdev);
+void hda_codec_stop_cmd_io(struct snd_sof_dev *sdev);
+void hda_codec_suspend_cmd_io(struct snd_sof_dev *sdev);
+void hda_codec_detect_mask(struct snd_sof_dev *sdev);
+void hda_codec_rirb_status_clear(struct snd_sof_dev *sdev);
+bool hda_codec_check_rirb_status(struct snd_sof_dev *sdev);
+void hda_codec_set_codec_wakeup(struct snd_sof_dev *sdev, bool status);
+void hda_codec_device_remove(struct snd_sof_dev *sdev);
+
+#else
+
+static inline void hda_codec_probe_bus(struct snd_sof_dev *sdev) { }
+static inline void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable) { }
+static inline void hda_codec_jack_check(struct snd_sof_dev *sdev) { }
+static inline void hda_codec_check_for_state_change(struct snd_sof_dev *sdev) { }
+static inline void hda_codec_init_cmd_io(struct snd_sof_dev *sdev) { }
+static inline void hda_codec_resume_cmd_io(struct snd_sof_dev *sdev) { }
+static inline void hda_codec_stop_cmd_io(struct snd_sof_dev *sdev) { }
+static inline void hda_codec_suspend_cmd_io(struct snd_sof_dev *sdev) { }
+static inline void hda_codec_detect_mask(struct snd_sof_dev *sdev) { }
+static inline void hda_codec_rirb_status_clear(struct snd_sof_dev *sdev) { }
+static inline bool hda_codec_check_rirb_status(struct snd_sof_dev *sdev) { return false; }
+static inline void hda_codec_set_codec_wakeup(struct snd_sof_dev *sdev, bool status) { }
+static inline void hda_codec_device_remove(struct snd_sof_dev *sdev) { }
+
+#endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC) && IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
+
+void hda_codec_i915_display_power(struct snd_sof_dev *sdev, bool enable);
+int hda_codec_i915_init(struct snd_sof_dev *sdev);
+int hda_codec_i915_exit(struct snd_sof_dev *sdev);
+
+#else
+
+static inline void hda_codec_i915_display_power(struct snd_sof_dev *sdev, bool enable) { }
+static inline int hda_codec_i915_init(struct snd_sof_dev *sdev) { return 0; }
+static inline int hda_codec_i915_exit(struct snd_sof_dev *sdev) { return 0; }
+
+#endif
+
+/*
+ * Trace Control.
+ */
+int hda_dsp_trace_init(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
+ struct sof_ipc_dma_trace_params_ext *dtrace_params);
+int hda_dsp_trace_release(struct snd_sof_dev *sdev);
+int hda_dsp_trace_trigger(struct snd_sof_dev *sdev, int cmd);
+
+/*
+ * SoundWire support
+ */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+
+int hda_sdw_check_lcount_common(struct snd_sof_dev *sdev);
+int hda_sdw_check_lcount_ext(struct snd_sof_dev *sdev);
+int hda_sdw_startup(struct snd_sof_dev *sdev);
+void hda_common_enable_sdw_irq(struct snd_sof_dev *sdev, bool enable);
+void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable);
+bool hda_sdw_check_wakeen_irq_common(struct snd_sof_dev *sdev);
+void hda_sdw_process_wakeen(struct snd_sof_dev *sdev);
+bool hda_common_check_sdw_irq(struct snd_sof_dev *sdev);
+
+#else
+
+static inline int hda_sdw_check_lcount_common(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline int hda_sdw_check_lcount_ext(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline int hda_sdw_startup(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline void hda_common_enable_sdw_irq(struct snd_sof_dev *sdev, bool enable)
+{
+}
+
+static inline void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable)
+{
+}
+
+static inline bool hda_sdw_check_wakeen_irq_common(struct snd_sof_dev *sdev)
+{
+ return false;
+}
+
+static inline void hda_sdw_process_wakeen(struct snd_sof_dev *sdev)
+{
+}
+
+static inline bool hda_common_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ return false;
+}
+
+#endif
+
+int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai,
+ int link_id);
+
+int sdw_hda_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai,
+ int link_id);
+
+int sdw_hda_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *cpu_dai);
+
+/* common dai driver */
+extern struct snd_soc_dai_driver skl_dai[];
+int hda_dsp_dais_suspend(struct snd_sof_dev *sdev);
+
+/*
+ * Platform Specific HW abstraction Ops.
+ */
+extern struct snd_sof_dsp_ops sof_hda_common_ops;
+
+extern struct snd_sof_dsp_ops sof_skl_ops;
+int sof_skl_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_apl_ops;
+int sof_apl_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_cnl_ops;
+int sof_cnl_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_tgl_ops;
+int sof_tgl_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_icl_ops;
+int sof_icl_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_mtl_ops;
+int sof_mtl_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_lnl_ops;
+int sof_lnl_ops_init(struct snd_sof_dev *sdev);
+
+extern const struct sof_intel_dsp_desc skl_chip_info;
+extern const struct sof_intel_dsp_desc apl_chip_info;
+extern const struct sof_intel_dsp_desc cnl_chip_info;
+extern const struct sof_intel_dsp_desc icl_chip_info;
+extern const struct sof_intel_dsp_desc tgl_chip_info;
+extern const struct sof_intel_dsp_desc tglh_chip_info;
+extern const struct sof_intel_dsp_desc ehl_chip_info;
+extern const struct sof_intel_dsp_desc jsl_chip_info;
+extern const struct sof_intel_dsp_desc adls_chip_info;
+extern const struct sof_intel_dsp_desc mtl_chip_info;
+extern const struct sof_intel_dsp_desc lnl_chip_info;
+
+/* Probes support */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+int hda_probes_register(struct snd_sof_dev *sdev);
+void hda_probes_unregister(struct snd_sof_dev *sdev);
+#else
+static inline int hda_probes_register(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline void hda_probes_unregister(struct snd_sof_dev *sdev)
+{
+}
+#endif /* CONFIG_SND_SOC_SOF_HDA_PROBES */
+
+/* SOF client registration for HDA platforms */
+int hda_register_clients(struct snd_sof_dev *sdev);
+void hda_unregister_clients(struct snd_sof_dev *sdev);
+
+/* machine driver select */
+struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev);
+void hda_set_mach_params(struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev);
+
+/* PCI driver selection and probe */
+int hda_pci_intel_probe(struct pci_dev *pci, const struct pci_device_id *pci_id);
+
+struct snd_sof_dai;
+struct sof_ipc_dai_config;
+
+#define SOF_HDA_POSITION_QUIRK_USE_SKYLAKE_LEGACY (0) /* previous implementation */
+#define SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS (1) /* recommended if VC0 only */
+#define SOF_HDA_POSITION_QUIRK_USE_DPIB_DDR_UPDATE (2) /* recommended with VC0 or VC1 */
+
+extern int sof_hda_position_quirk;
+
+void hda_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops);
+void hda_ops_free(struct snd_sof_dev *sdev);
+
+/* SKL/KBL */
+int hda_dsp_cl_boot_firmware_skl(struct snd_sof_dev *sdev);
+int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask);
+
+/* IPC4 */
+irqreturn_t cnl_ipc4_irq_thread(int irq, void *context);
+int cnl_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
+irqreturn_t hda_dsp_ipc4_irq_thread(int irq, void *context);
+bool hda_ipc4_tx_is_busy(struct snd_sof_dev *sdev);
+void hda_dsp_ipc4_schedule_d0i3_work(struct sof_intel_hda_dev *hdev,
+ struct snd_sof_ipc_msg *msg);
+int hda_dsp_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
+void hda_ipc4_dump(struct snd_sof_dev *sdev);
+extern struct sdw_intel_ops sdw_callback;
+
+struct sof_ipc4_fw_library;
+int hda_dsp_ipc4_load_library(struct snd_sof_dev *sdev,
+ struct sof_ipc4_fw_library *fw_lib, bool reload);
+
+/**
+ * struct hda_dai_widget_dma_ops - DAI DMA ops optional by default unless specified otherwise
+ * @get_hext_stream: Mandatory function pointer to get the saved pointer to struct hdac_ext_stream
+ * @assign_hext_stream: Function pointer to assign a hdac_ext_stream
+ * @release_hext_stream: Function pointer to release the hdac_ext_stream
+ * @setup_hext_stream: Function pointer for hdac_ext_stream setup
+ * @reset_hext_stream: Function pointer for hdac_ext_stream reset
+ * @pre_trigger: Function pointer for DAI DMA pre-trigger actions
+ * @trigger: Function pointer for DAI DMA trigger actions
+ * @post_trigger: Function pointer for DAI DMA post-trigger actions
+ * @codec_dai_set_stream: Function pointer to set codec-side stream information
+ * @calc_stream_format: Function pointer to determine stream format from hw_params and
+ * for HDaudio codec DAI from the .sig bits
+ * @get_hlink: Mandatory function pointer to retrieve hlink, mainly to program LOSIDV
+ * for legacy HDaudio links or program HDaudio Extended Link registers.
+ */
+struct hda_dai_widget_dma_ops {
+ struct hdac_ext_stream *(*get_hext_stream)(struct snd_sof_dev *sdev,
+ struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream);
+ struct hdac_ext_stream *(*assign_hext_stream)(struct snd_sof_dev *sdev,
+ struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream);
+ void (*release_hext_stream)(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream);
+ void (*setup_hext_stream)(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream,
+ unsigned int format_val);
+ void (*reset_hext_stream)(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_sream);
+ int (*pre_trigger)(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream, int cmd);
+ int (*trigger)(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream, int cmd);
+ int (*post_trigger)(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream, int cmd);
+ void (*codec_dai_set_stream)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct hdac_stream *hstream);
+ unsigned int (*calc_stream_format)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+ struct hdac_ext_link * (*get_hlink)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+};
+
+const struct hda_dai_widget_dma_ops *
+hda_select_dai_widget_ops(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget);
+int hda_dai_config(struct snd_soc_dapm_widget *w, unsigned int flags,
+ struct snd_sof_dai_config_data *data);
+int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream,
+ struct snd_soc_dai *cpu_dai);
+
+#endif
diff --git a/sound/soc/sof/intel/icl.c b/sound/soc/sof/intel/icl.c
new file mode 100644
index 000000000..7ac10167a
--- /dev/null
+++ b/sound/soc/sof/intel/icl.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2020 Intel Corporation. All rights reserved.
+//
+// Author: Fred Oh <fred.oh@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on IceLake.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kconfig.h>
+#include <linux/export.h>
+#include <linux/bits.h>
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+#include "../sof-audio.h"
+
+#define ICL_DSP_HPRO_CORE_ID 3
+
+static const struct snd_sof_debugfs_map icl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static int icl_dsp_core_stall(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* make sure core_mask in host managed cores */
+ core_mask &= chip->host_managed_cores_mask;
+ if (!core_mask) {
+ dev_err(sdev->dev, "error: core_mask is not in host managed cores\n");
+ return -EINVAL;
+ }
+
+ /* stall core */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+ return 0;
+}
+
+/*
+ * post fw run operation for ICL.
+ * Core 3 will be powered up and in stall when HPRO is enabled
+ */
+static int icl_dsp_post_fw_run(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ int ret;
+
+ if (sdev->first_boot) {
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ ret = hda_sdw_startup(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: could not startup SoundWire links\n");
+ return ret;
+ }
+
+ /* Check if IMR boot is usable */
+ if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT) &&
+ sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT)
+ hdev->imrboot_supported = true;
+ }
+
+ hda_sdw_int_enable(sdev, true);
+
+ /*
+ * The recommended HW programming sequence for ICL is to
+ * power up core 3 and keep it in stall if HPRO is enabled.
+ */
+ if (!hda->clk_config_lpro) {
+ ret = hda_dsp_enable_core(sdev, BIT(ICL_DSP_HPRO_CORE_ID));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core power up failed on core %d\n",
+ ICL_DSP_HPRO_CORE_ID);
+ return ret;
+ }
+
+ sdev->enabled_cores_mask |= BIT(ICL_DSP_HPRO_CORE_ID);
+ sdev->dsp_core_ref_count[ICL_DSP_HPRO_CORE_ID]++;
+
+ snd_sof_dsp_stall(sdev, BIT(ICL_DSP_HPRO_CORE_ID));
+ }
+
+ /* re-enable clock gating and power gating */
+ return hda_dsp_ctrl_clock_power_gating(sdev, true);
+}
+
+/* Icelake ops */
+struct snd_sof_dsp_ops sof_icl_ops;
+EXPORT_SYMBOL_NS(sof_icl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_icl_ops_init(struct snd_sof_dev *sdev)
+{
+ /* common defaults */
+ memcpy(&sof_icl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* probe/remove/shutdown */
+ sof_icl_ops.shutdown = hda_dsp_shutdown;
+
+ if (sdev->pdata->ipc_type == SOF_IPC) {
+ /* doorbell */
+ sof_icl_ops.irq_thread = cnl_ipc_irq_thread;
+
+ /* ipc */
+ sof_icl_ops.send_msg = cnl_ipc_send_msg;
+
+ /* debug */
+ sof_icl_ops.ipc_dump = cnl_ipc_dump;
+
+ sof_icl_ops.set_power_state = hda_dsp_set_power_state_ipc3;
+ }
+
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_2;
+
+ /* External library loading support */
+ ipc4_data->load_library = hda_dsp_ipc4_load_library;
+
+ /* doorbell */
+ sof_icl_ops.irq_thread = cnl_ipc4_irq_thread;
+
+ /* ipc */
+ sof_icl_ops.send_msg = cnl_ipc4_send_msg;
+
+ /* debug */
+ sof_icl_ops.ipc_dump = cnl_ipc4_dump;
+
+ sof_icl_ops.set_power_state = hda_dsp_set_power_state_ipc4;
+ }
+
+ /* debug */
+ sof_icl_ops.debug_map = icl_dsp_debugfs;
+ sof_icl_ops.debug_map_count = ARRAY_SIZE(icl_dsp_debugfs);
+
+ /* pre/post fw run */
+ sof_icl_ops.post_fw_run = icl_dsp_post_fw_run;
+
+ /* firmware run */
+ sof_icl_ops.run = hda_dsp_cl_boot_firmware_iccmax;
+ sof_icl_ops.stall = icl_dsp_core_stall;
+
+ /* dsp core get/put */
+ sof_icl_ops.core_get = hda_dsp_core_get;
+
+ /* set DAI driver ops */
+ hda_set_dai_drv_ops(sdev, &sof_icl_ops);
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_icl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc icl_chip_info = {
+ /* Icelake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(3, 0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .d0i3_offset = SOF_HDA_VS_D0I3C,
+ .read_sdw_lcount = hda_sdw_check_lcount_common,
+ .enable_sdw_irq = hda_common_enable_sdw_irq,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_2_0,
+};
+EXPORT_SYMBOL_NS(icl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/lnl.c b/sound/soc/sof/intel/lnl.c
new file mode 100644
index 000000000..db94b45e5
--- /dev/null
+++ b/sound/soc/sof/intel/lnl.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2023 Intel Corporation. All rights reserved.
+
+/*
+ * Hardware interface for audio DSP on LunarLake.
+ */
+
+#include <linux/firmware.h>
+#include <sound/hda_register.h>
+#include <sound/sof/ipc4/header.h>
+#include <trace/events/sof_intel.h>
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+#include "../sof-audio.h"
+#include "mtl.h"
+#include <sound/hda-mlink.h>
+
+/* LunarLake ops */
+struct snd_sof_dsp_ops sof_lnl_ops;
+EXPORT_SYMBOL_NS(sof_lnl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+static const struct snd_sof_debugfs_map lnl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+/* this helps allows the DSP to setup DMIC/SSP */
+static int hdac_bus_offload_dmic_ssp(struct hdac_bus *bus)
+{
+ int ret;
+
+ ret = hdac_bus_eml_enable_offload(bus, true, AZX_REG_ML_LEPTR_ID_INTEL_SSP, true);
+ if (ret < 0)
+ return ret;
+
+ ret = hdac_bus_eml_enable_offload(bus, true, AZX_REG_ML_LEPTR_ID_INTEL_DMIC, true);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int lnl_hda_dsp_probe(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ ret = hda_dsp_probe(sdev);
+ if (ret < 0)
+ return ret;
+
+ return hdac_bus_offload_dmic_ssp(sof_to_bus(sdev));
+}
+
+static int lnl_hda_dsp_resume(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ ret = hda_dsp_resume(sdev);
+ if (ret < 0)
+ return ret;
+
+ return hdac_bus_offload_dmic_ssp(sof_to_bus(sdev));
+}
+
+static int lnl_hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ ret = hda_dsp_runtime_resume(sdev);
+ if (ret < 0)
+ return ret;
+
+ return hdac_bus_offload_dmic_ssp(sof_to_bus(sdev));
+}
+
+int sof_lnl_ops_init(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ /* common defaults */
+ memcpy(&sof_lnl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* probe */
+ sof_lnl_ops.probe = lnl_hda_dsp_probe;
+
+ /* shutdown */
+ sof_lnl_ops.shutdown = hda_dsp_shutdown;
+
+ /* doorbell */
+ sof_lnl_ops.irq_thread = mtl_ipc_irq_thread;
+
+ /* ipc */
+ sof_lnl_ops.send_msg = mtl_ipc_send_msg;
+ sof_lnl_ops.get_mailbox_offset = mtl_dsp_ipc_get_mailbox_offset;
+ sof_lnl_ops.get_window_offset = mtl_dsp_ipc_get_window_offset;
+
+ /* debug */
+ sof_lnl_ops.debug_map = lnl_dsp_debugfs;
+ sof_lnl_ops.debug_map_count = ARRAY_SIZE(lnl_dsp_debugfs);
+ sof_lnl_ops.dbg_dump = mtl_dsp_dump;
+ sof_lnl_ops.ipc_dump = mtl_ipc_dump;
+
+ /* pre/post fw run */
+ sof_lnl_ops.pre_fw_run = mtl_dsp_pre_fw_run;
+ sof_lnl_ops.post_fw_run = mtl_dsp_post_fw_run;
+
+ /* parse platform specific extended manifest */
+ sof_lnl_ops.parse_platform_ext_manifest = NULL;
+
+ /* dsp core get/put */
+ /* TODO: add core_get and core_put */
+
+ /* PM */
+ sof_lnl_ops.resume = lnl_hda_dsp_resume;
+ sof_lnl_ops.runtime_resume = lnl_hda_dsp_runtime_resume;
+
+ sof_lnl_ops.get_stream_position = mtl_dsp_get_stream_hda_link_position;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(struct sof_ipc4_fw_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_2;
+
+ /* External library loading support */
+ ipc4_data->load_library = hda_dsp_ipc4_load_library;
+
+ /* set DAI ops */
+ hda_set_dai_drv_ops(sdev, &sof_lnl_ops);
+
+ sof_lnl_ops.set_power_state = hda_dsp_set_power_state_ipc4;
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_lnl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+/* Check if an SDW IRQ occurred */
+static bool lnl_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ return hdac_bus_eml_check_interrupt(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+}
+
+static void lnl_enable_sdw_irq(struct snd_sof_dev *sdev, bool enable)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ hdac_bus_eml_enable_interrupt(bus, true, AZX_REG_ML_LEPTR_ID_SDW, enable);
+}
+
+static int lnl_dsp_disable_interrupts(struct snd_sof_dev *sdev)
+{
+ lnl_enable_sdw_irq(sdev, false);
+ mtl_disable_ipc_interrupts(sdev);
+ return mtl_enable_interrupts(sdev, false);
+}
+
+const struct sof_intel_dsp_desc lnl_chip_info = {
+ .cores_num = 5,
+ .init_core_mask = BIT(0),
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = MTL_DSP_REG_HFIPCXIDR,
+ .ipc_req_mask = MTL_DSP_REG_HFIPCXIDR_BUSY,
+ .ipc_ack = MTL_DSP_REG_HFIPCXIDA,
+ .ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
+ .ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
+ .rom_status_reg = MTL_DSP_ROM_STS,
+ .rom_init_timeout = 300,
+ .ssp_count = MTL_SSP_COUNT,
+ .d0i3_offset = MTL_HDA_VS_D0I3C,
+ .read_sdw_lcount = hda_sdw_check_lcount_ext,
+ .enable_sdw_irq = lnl_enable_sdw_irq,
+ .check_sdw_irq = lnl_dsp_check_sdw_irq,
+ .check_ipc_irq = mtl_dsp_check_ipc_irq,
+ .cl_init = mtl_dsp_cl_init,
+ .power_down_dsp = mtl_power_down_dsp,
+ .disable_interrupts = lnl_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_ACE_2_0,
+};
+EXPORT_SYMBOL_NS(lnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/mtl.c b/sound/soc/sof/intel/mtl.c
new file mode 100644
index 000000000..f9412517e
--- /dev/null
+++ b/sound/soc/sof/intel/mtl.c
@@ -0,0 +1,745 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+// Authors: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Meteorlake.
+ */
+
+#include <linux/firmware.h>
+#include <sound/sof/ipc4/header.h>
+#include <trace/events/sof_intel.h>
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+#include "../sof-audio.h"
+#include "mtl.h"
+
+static const struct snd_sof_debugfs_map mtl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void mtl_ipc_host_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * clear busy interrupt to tell dsp controller this interrupt has been accepted,
+ * not trigger it again
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDR,
+ MTL_DSP_REG_HFIPCXTDR_BUSY, MTL_DSP_REG_HFIPCXTDR_BUSY);
+ /*
+ * clear busy bit to ack dsp the msg has been processed and send reply msg to dsp
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDA,
+ MTL_DSP_REG_HFIPCXTDA_BUSY, 0);
+}
+
+static void mtl_ipc_dsp_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * set DONE bit - tell DSP we have received the reply msg from DSP, and processed it,
+ * don't send more reply to host
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDA,
+ MTL_DSP_REG_HFIPCXIDA_DONE, MTL_DSP_REG_HFIPCXIDA_DONE);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXCTL,
+ MTL_DSP_REG_HFIPCXCTL_DONE, MTL_DSP_REG_HFIPCXCTL_DONE);
+}
+
+/* Check if an IPC IRQ occurred */
+bool mtl_dsp_check_ipc_irq(struct snd_sof_dev *sdev)
+{
+ u32 irq_status;
+ u32 hfintipptr;
+
+ if (sdev->dspless_mode_selected)
+ return false;
+
+ /* read Interrupt IP Pointer */
+ hfintipptr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFINTIPPTR) & MTL_HFINTIPPTR_PTR_MASK;
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, hfintipptr + MTL_DSP_IRQSTS);
+
+ trace_sof_intel_hda_irq_ipc_check(sdev, irq_status);
+
+ if (irq_status != U32_MAX && (irq_status & MTL_DSP_IRQSTS_IPC))
+ return true;
+
+ return false;
+}
+
+/* Check if an SDW IRQ occurred */
+static bool mtl_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ u32 irq_status;
+ u32 hfintipptr;
+
+ /* read Interrupt IP Pointer */
+ hfintipptr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFINTIPPTR) & MTL_HFINTIPPTR_PTR_MASK;
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, hfintipptr + MTL_DSP_IRQSTS);
+
+ if (irq_status != U32_MAX && (irq_status & MTL_DSP_IRQSTS_SDW))
+ return true;
+
+ return false;
+}
+
+int mtl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct sof_ipc4_msg *msg_data = msg->msg_data;
+
+ if (hda_ipc4_tx_is_busy(sdev)) {
+ hdev->delayed_ipc_tx_msg = msg;
+ return 0;
+ }
+
+ hdev->delayed_ipc_tx_msg = NULL;
+
+ /* send the message via mailbox */
+ if (msg_data->data_size)
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
+ msg_data->data_size);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDDY,
+ msg_data->extension);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDR,
+ msg_data->primary | MTL_DSP_REG_HFIPCXIDR_BUSY);
+
+ hda_dsp_ipc4_schedule_d0i3_work(hdev, msg);
+
+ return 0;
+}
+
+void mtl_enable_ipc_interrupts(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ if (sdev->dspless_mode_selected)
+ return;
+
+ /* enable IPC DONE and BUSY interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ MTL_DSP_REG_HFIPCXCTL_BUSY | MTL_DSP_REG_HFIPCXCTL_DONE,
+ MTL_DSP_REG_HFIPCXCTL_BUSY | MTL_DSP_REG_HFIPCXCTL_DONE);
+}
+
+void mtl_disable_ipc_interrupts(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ if (sdev->dspless_mode_selected)
+ return;
+
+ /* disable IPC DONE and BUSY interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ MTL_DSP_REG_HFIPCXCTL_BUSY | MTL_DSP_REG_HFIPCXCTL_DONE, 0);
+}
+
+static void mtl_enable_sdw_irq(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 hipcie;
+ u32 mask;
+ u32 val;
+ int ret;
+
+ if (sdev->dspless_mode_selected)
+ return;
+
+ /* Enable/Disable SoundWire interrupt */
+ mask = MTL_DSP_REG_HfSNDWIE_IE_MASK;
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfSNDWIE, mask, val);
+
+ /* check if operation was successful */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfSNDWIE, hipcie,
+ (hipcie & mask) == val,
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to set SoundWire IPC interrupt %s\n",
+ enable ? "enable" : "disable");
+}
+
+int mtl_enable_interrupts(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 hfintipptr;
+ u32 irqinten;
+ u32 hipcie;
+ u32 mask;
+ u32 val;
+ int ret;
+
+ if (sdev->dspless_mode_selected)
+ return 0;
+
+ /* read Interrupt IP Pointer */
+ hfintipptr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFINTIPPTR) & MTL_HFINTIPPTR_PTR_MASK;
+
+ /* Enable/Disable Host IPC and SOUNDWIRE */
+ mask = MTL_IRQ_INTEN_L_HOST_IPC_MASK | MTL_IRQ_INTEN_L_SOUNDWIRE_MASK;
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, hfintipptr, mask, val);
+
+ /* check if operation was successful */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, hfintipptr, irqinten,
+ (irqinten & mask) == val,
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to %s Host IPC and/or SOUNDWIRE\n",
+ enable ? "enable" : "disable");
+ return ret;
+ }
+
+ /* Enable/Disable Host IPC interrupt*/
+ mask = MTL_DSP_REG_HfHIPCIE_IE_MASK;
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfHIPCIE, mask, val);
+
+ /* check if operation was successful */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfHIPCIE, hipcie,
+ (hipcie & mask) == val,
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to set Host IPC interrupt %s\n",
+ enable ? "enable" : "disable");
+ return ret;
+ }
+
+ return ret;
+}
+
+/* pre fw run operations */
+int mtl_dsp_pre_fw_run(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ u32 dsphfpwrsts;
+ u32 dsphfdsscs;
+ u32 cpa;
+ u32 pgs;
+ int ret;
+
+ /* Set the DSP subsystem power on */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_HFDSSCS,
+ MTL_HFDSSCS_SPA_MASK, MTL_HFDSSCS_SPA_MASK);
+
+ /* Wait for unstable CPA read (1 then 0 then 1) just after setting SPA bit */
+ usleep_range(1000, 1010);
+
+ /* poll with timeout to check if operation successful */
+ cpa = MTL_HFDSSCS_CPA_MASK;
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_HFDSSCS, dsphfdsscs,
+ (dsphfdsscs & cpa) == cpa, HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to enable DSP subsystem\n");
+ return ret;
+ }
+
+ /* Power up gated-DSP-0 domain in order to access the DSP shim register block. */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_HFPWRCTL,
+ MTL_HFPWRCTL_WPDSPHPXPG, MTL_HFPWRCTL_WPDSPHPXPG);
+
+ usleep_range(1000, 1010);
+
+ /* poll with timeout to check if operation successful */
+ pgs = MTL_HFPWRSTS_DSPHPXPGS_MASK;
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_HFPWRSTS, dsphfpwrsts,
+ (dsphfpwrsts & pgs) == pgs,
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to power up gated DSP domain\n");
+
+ /* if SoundWire is used, make sure it is not power-gated */
+ if (hdev->info.handle && hdev->info.link_mask > 0)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_HFPWRCTL,
+ MTL_HfPWRCTL_WPIOXPG(1), MTL_HfPWRCTL_WPIOXPG(1));
+
+ return ret;
+}
+
+int mtl_dsp_post_fw_run(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ if (sdev->first_boot) {
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ ret = hda_sdw_startup(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "could not startup SoundWire links\n");
+ return ret;
+ }
+
+ /* Check if IMR boot is usable */
+ if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT))
+ hdev->imrboot_supported = true;
+ }
+
+ hda_sdw_int_enable(sdev, true);
+ return 0;
+}
+
+void mtl_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
+ u32 romdbgsts;
+ u32 romdbgerr;
+ u32 fwsts;
+ u32 fwlec;
+
+ fwsts = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_ROM_STS);
+ fwlec = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_ROM_ERROR);
+ romdbgsts = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFFLGPXQWY);
+ romdbgerr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFFLGPXQWY_ERROR);
+
+ dev_err(sdev->dev, "ROM status: %#x, ROM error: %#x\n", fwsts, fwlec);
+ dev_err(sdev->dev, "ROM debug status: %#x, ROM debug error: %#x\n", romdbgsts,
+ romdbgerr);
+ romdbgsts = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFFLGPXQWY + 0x8 * 3);
+ dev_printk(level, sdev->dev, "ROM feature bit%s enabled\n",
+ romdbgsts & BIT(24) ? "" : " not");
+}
+
+static bool mtl_dsp_primary_core_is_enabled(struct snd_sof_dev *sdev)
+{
+ int val;
+
+ val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE);
+ if (val != U32_MAX && val & MTL_DSP2CXCTL_PRIMARY_CORE_CPA_MASK)
+ return true;
+
+ return false;
+}
+
+static int mtl_dsp_core_power_up(struct snd_sof_dev *sdev, int core)
+{
+ unsigned int cpa;
+ u32 dspcxctl;
+ int ret;
+
+ /* Only the primary core can be powered up by the host */
+ if (core != SOF_DSP_PRIMARY_CORE || mtl_dsp_primary_core_is_enabled(sdev))
+ return 0;
+
+ /* Program the owner of the IP & shim registers (10: Host CPU) */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE,
+ MTL_DSP2CXCTL_PRIMARY_CORE_OSEL,
+ 0x2 << MTL_DSP2CXCTL_PRIMARY_CORE_OSEL_SHIFT);
+
+ /* enable SPA bit */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE,
+ MTL_DSP2CXCTL_PRIMARY_CORE_SPA_MASK,
+ MTL_DSP2CXCTL_PRIMARY_CORE_SPA_MASK);
+
+ /* Wait for unstable CPA read (1 then 0 then 1) just after setting SPA bit */
+ usleep_range(1000, 1010);
+
+ /* poll with timeout to check if operation successful */
+ cpa = MTL_DSP2CXCTL_PRIMARY_CORE_CPA_MASK;
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE, dspcxctl,
+ (dspcxctl & cpa) == cpa, HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: timeout on MTL_DSP2CXCTL_PRIMARY_CORE read\n",
+ __func__);
+ return ret;
+ }
+
+ /* set primary core mask and refcount to 1 */
+ sdev->enabled_cores_mask = BIT(SOF_DSP_PRIMARY_CORE);
+ sdev->dsp_core_ref_count[SOF_DSP_PRIMARY_CORE] = 1;
+
+ return 0;
+}
+
+static int mtl_dsp_core_power_down(struct snd_sof_dev *sdev, int core)
+{
+ u32 dspcxctl;
+ int ret;
+
+ /* Only the primary core can be powered down by the host */
+ if (core != SOF_DSP_PRIMARY_CORE || !mtl_dsp_primary_core_is_enabled(sdev))
+ return 0;
+
+ /* disable SPA bit */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE,
+ MTL_DSP2CXCTL_PRIMARY_CORE_SPA_MASK, 0);
+
+ /* Wait for unstable CPA read (0 then 1 then 0) just after setting SPA bit */
+ usleep_range(1000, 1010);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE, dspcxctl,
+ !(dspcxctl & MTL_DSP2CXCTL_PRIMARY_CORE_CPA_MASK),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to power down primary core\n");
+ return ret;
+ }
+
+ sdev->enabled_cores_mask = 0;
+ sdev->dsp_core_ref_count[SOF_DSP_PRIMARY_CORE] = 0;
+
+ return 0;
+}
+
+int mtl_power_down_dsp(struct snd_sof_dev *sdev)
+{
+ u32 dsphfdsscs, cpa;
+ int ret;
+
+ /* first power down core */
+ ret = mtl_dsp_core_power_down(sdev, SOF_DSP_PRIMARY_CORE);
+ if (ret) {
+ dev_err(sdev->dev, "mtl dsp power down error, %d\n", ret);
+ return ret;
+ }
+
+ /* Set the DSP subsystem power down */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_HFDSSCS,
+ MTL_HFDSSCS_SPA_MASK, 0);
+
+ /* Wait for unstable CPA read (0 then 1 then 0) just after setting SPA bit */
+ usleep_range(1000, 1010);
+
+ /* poll with timeout to check if operation successful */
+ cpa = MTL_HFDSSCS_CPA_MASK;
+ dsphfdsscs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFDSSCS);
+ return snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_HFDSSCS, dsphfdsscs,
+ (dsphfdsscs & cpa) == 0, HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+}
+
+int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int status;
+ u32 ipc_hdr;
+ int ret;
+
+ /* step 1: purge FW request */
+ ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL;
+ if (!imr_boot)
+ ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr);
+
+ /* step 2: power up primary core */
+ ret = mtl_dsp_core_power_up(sdev, SOF_DSP_PRIMARY_CORE);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev, "dsp core 0/1 power up failed\n");
+ goto err;
+ }
+
+ dev_dbg(sdev->dev, "Primary core power up successful\n");
+
+ /* step 3: wait for IPC DONE bit from ROM */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, chip->ipc_ack, status,
+ ((status & chip->ipc_ack_mask) == chip->ipc_ack_mask),
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_INIT_TIMEOUT_US);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev, "timeout waiting for purge IPC done\n");
+ goto err;
+ }
+
+ /* set DONE bit to clear the reply IPC message */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, chip->ipc_ack, chip->ipc_ack_mask,
+ chip->ipc_ack_mask);
+
+ /* step 4: enable interrupts */
+ ret = mtl_enable_interrupts(sdev, true);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev, "%s: failed to enable interrupts\n", __func__);
+ goto err;
+ }
+
+ mtl_enable_ipc_interrupts(sdev);
+
+ /*
+ * ACE workaround: don't wait for ROM INIT.
+ * The platform cannot catch ROM_INIT_DONE because of a very short
+ * timing window. Follow the recommendations and skip this part.
+ */
+
+ return 0;
+
+err:
+ snd_sof_dsp_dbg_dump(sdev, "MTL DSP init fail", 0);
+ mtl_dsp_core_power_down(sdev, SOF_DSP_PRIMARY_CORE);
+ return ret;
+}
+
+irqreturn_t mtl_ipc_irq_thread(int irq, void *context)
+{
+ struct sof_ipc4_msg notification_data = {{ 0 }};
+ struct snd_sof_dev *sdev = context;
+ bool ack_received = false;
+ bool ipc_irq = false;
+ u32 hipcida;
+ u32 hipctdr;
+
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDA);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDR);
+
+ /* reply message from DSP */
+ if (hipcida & MTL_DSP_REG_HFIPCXIDA_DONE) {
+ /* DSP received the message */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXCTL,
+ MTL_DSP_REG_HFIPCXCTL_DONE, 0);
+
+ mtl_ipc_dsp_done(sdev);
+
+ ipc_irq = true;
+ ack_received = true;
+ }
+
+ if (hipctdr & MTL_DSP_REG_HFIPCXTDR_BUSY) {
+ /* Message from DSP (reply or notification) */
+ u32 extension = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDDY);
+ u32 primary = hipctdr & MTL_DSP_REG_HFIPCXTDR_MSG_MASK;
+
+ /*
+ * ACE fw sends a new fw ipc message to host to
+ * notify the status of the last host ipc message
+ */
+ if (primary & SOF_IPC4_MSG_DIR_MASK) {
+ /* Reply received */
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
+
+ data->primary = primary;
+ data->extension = extension;
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ snd_sof_ipc_get_reply(sdev);
+ mtl_ipc_host_done(sdev);
+ snd_sof_ipc_reply(sdev, data->primary);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev,
+ "IPC reply before FW_READY: %#x|%#x\n",
+ primary, extension);
+ }
+ } else {
+ /* Notification received */
+ notification_data.primary = primary;
+ notification_data.extension = extension;
+
+ sdev->ipc->msg.rx_data = &notification_data;
+ snd_sof_ipc_msgs_rx(sdev);
+ sdev->ipc->msg.rx_data = NULL;
+
+ mtl_ipc_host_done(sdev);
+ }
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq) {
+ /* This interrupt is not shared so no need to return IRQ_NONE. */
+ dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
+ }
+
+ if (ack_received) {
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ if (hdev->delayed_ipc_tx_msg)
+ mtl_ipc_send_msg(sdev, hdev->delayed_ipc_tx_msg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int mtl_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MTL_DSP_MBOX_UPLINK_OFFSET;
+}
+
+int mtl_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MTL_SRAM_WINDOW_OFFSET(id);
+}
+
+void mtl_ipc_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcidr, hipcidd, hipcida, hipctdr, hipctdd, hipctda, hipcctl;
+
+ hipcidr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDR);
+ hipcidd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDDY);
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDA);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDR);
+ hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDDY);
+ hipctda = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDA);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXCTL);
+
+ dev_err(sdev->dev,
+ "Host IPC initiator: %#x|%#x|%#x, target: %#x|%#x|%#x, ctl: %#x\n",
+ hipcidr, hipcidd, hipcida, hipctdr, hipctdd, hipctda, hipcctl);
+}
+
+static int mtl_dsp_disable_interrupts(struct snd_sof_dev *sdev)
+{
+ mtl_enable_sdw_irq(sdev, false);
+ mtl_disable_ipc_interrupts(sdev);
+ return mtl_enable_interrupts(sdev, false);
+}
+
+u64 mtl_dsp_get_stream_hda_link_position(struct snd_sof_dev *sdev,
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ u32 llp_l, llp_u;
+
+ llp_l = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, MTL_PPLCLLPL(hstream->index));
+ llp_u = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, MTL_PPLCLLPU(hstream->index));
+ return ((u64)llp_u << 32) | llp_l;
+}
+
+static int mtl_dsp_core_get(struct snd_sof_dev *sdev, int core)
+{
+ const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
+
+ if (core == SOF_DSP_PRIMARY_CORE)
+ return mtl_dsp_core_power_up(sdev, SOF_DSP_PRIMARY_CORE);
+
+ if (pm_ops->set_core_state)
+ return pm_ops->set_core_state(sdev, core, true);
+
+ return 0;
+}
+
+static int mtl_dsp_core_put(struct snd_sof_dev *sdev, int core)
+{
+ const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
+ int ret;
+
+ if (pm_ops->set_core_state) {
+ ret = pm_ops->set_core_state(sdev, core, false);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (core == SOF_DSP_PRIMARY_CORE)
+ return mtl_dsp_core_power_down(sdev, SOF_DSP_PRIMARY_CORE);
+
+ return 0;
+}
+
+/* Meteorlake ops */
+struct snd_sof_dsp_ops sof_mtl_ops;
+EXPORT_SYMBOL_NS(sof_mtl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_mtl_ops_init(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ /* common defaults */
+ memcpy(&sof_mtl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* shutdown */
+ sof_mtl_ops.shutdown = hda_dsp_shutdown;
+
+ /* doorbell */
+ sof_mtl_ops.irq_thread = mtl_ipc_irq_thread;
+
+ /* ipc */
+ sof_mtl_ops.send_msg = mtl_ipc_send_msg;
+ sof_mtl_ops.get_mailbox_offset = mtl_dsp_ipc_get_mailbox_offset;
+ sof_mtl_ops.get_window_offset = mtl_dsp_ipc_get_window_offset;
+
+ /* debug */
+ sof_mtl_ops.debug_map = mtl_dsp_debugfs;
+ sof_mtl_ops.debug_map_count = ARRAY_SIZE(mtl_dsp_debugfs);
+ sof_mtl_ops.dbg_dump = mtl_dsp_dump;
+ sof_mtl_ops.ipc_dump = mtl_ipc_dump;
+
+ /* pre/post fw run */
+ sof_mtl_ops.pre_fw_run = mtl_dsp_pre_fw_run;
+ sof_mtl_ops.post_fw_run = mtl_dsp_post_fw_run;
+
+ /* parse platform specific extended manifest */
+ sof_mtl_ops.parse_platform_ext_manifest = NULL;
+
+ /* dsp core get/put */
+ sof_mtl_ops.core_get = mtl_dsp_core_get;
+ sof_mtl_ops.core_put = mtl_dsp_core_put;
+
+ sof_mtl_ops.get_stream_position = mtl_dsp_get_stream_hda_link_position;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(struct sof_ipc4_fw_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_2;
+
+ /* External library loading support */
+ ipc4_data->load_library = hda_dsp_ipc4_load_library;
+
+ /* set DAI ops */
+ hda_set_dai_drv_ops(sdev, &sof_mtl_ops);
+
+ sof_mtl_ops.set_power_state = hda_dsp_set_power_state_ipc4;
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_mtl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc mtl_chip_info = {
+ .cores_num = 3,
+ .init_core_mask = BIT(0),
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = MTL_DSP_REG_HFIPCXIDR,
+ .ipc_req_mask = MTL_DSP_REG_HFIPCXIDR_BUSY,
+ .ipc_ack = MTL_DSP_REG_HFIPCXIDA,
+ .ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
+ .ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
+ .rom_status_reg = MTL_DSP_ROM_STS,
+ .rom_init_timeout = 300,
+ .ssp_count = MTL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE_ACE,
+ .sdw_alh_base = SDW_ALH_BASE_ACE,
+ .d0i3_offset = MTL_HDA_VS_D0I3C,
+ .read_sdw_lcount = hda_sdw_check_lcount_common,
+ .enable_sdw_irq = mtl_enable_sdw_irq,
+ .check_sdw_irq = mtl_dsp_check_sdw_irq,
+ .check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
+ .check_ipc_irq = mtl_dsp_check_ipc_irq,
+ .cl_init = mtl_dsp_cl_init,
+ .power_down_dsp = mtl_power_down_dsp,
+ .disable_interrupts = mtl_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_ACE_1_0,
+};
+EXPORT_SYMBOL_NS(mtl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/mtl.h b/sound/soc/sof/intel/mtl.h
new file mode 100644
index 000000000..95696b3d7
--- /dev/null
+++ b/sound/soc/sof/intel/mtl.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2020-2022 Intel Corporation. All rights reserved.
+ */
+
+/* HDA Registers */
+#define MTL_PPLCLLPL_BASE 0x948
+#define MTL_PPLCLLPU_STRIDE 0x10
+#define MTL_PPLCLLPL(x) (MTL_PPLCLLPL_BASE + (x) * MTL_PPLCLLPU_STRIDE)
+#define MTL_PPLCLLPU(x) (MTL_PPLCLLPL_BASE + 0x4 + (x) * MTL_PPLCLLPU_STRIDE)
+
+/* DSP Registers */
+#define MTL_HFDSSCS 0x1000
+#define MTL_HFDSSCS_SPA_MASK BIT(16)
+#define MTL_HFDSSCS_CPA_MASK BIT(24)
+#define MTL_HFSNDWIE 0x114C
+#define MTL_HFPWRCTL 0x1D18
+#define MTL_HfPWRCTL_WPIOXPG(x) BIT((x) + 8)
+#define MTL_HFPWRCTL_WPDSPHPXPG BIT(0)
+#define MTL_HFPWRSTS 0x1D1C
+#define MTL_HFPWRSTS_DSPHPXPGS_MASK BIT(0)
+#define MTL_HFINTIPPTR 0x1108
+#define MTL_IRQ_INTEN_L_HOST_IPC_MASK BIT(0)
+#define MTL_IRQ_INTEN_L_SOUNDWIRE_MASK BIT(6)
+#define MTL_HFINTIPPTR_PTR_MASK GENMASK(20, 0)
+
+#define MTL_HDA_VS_D0I3C 0x1D4A
+
+#define MTL_DSP2CXCAP_PRIMARY_CORE 0x178D00
+#define MTL_DSP2CXCTL_PRIMARY_CORE 0x178D04
+#define MTL_DSP2CXCTL_PRIMARY_CORE_SPA_MASK BIT(0)
+#define MTL_DSP2CXCTL_PRIMARY_CORE_CPA_MASK BIT(8)
+#define MTL_DSP2CXCTL_PRIMARY_CORE_OSEL GENMASK(25, 24)
+#define MTL_DSP2CXCTL_PRIMARY_CORE_OSEL_SHIFT 24
+
+/* IPC Registers */
+#define MTL_DSP_REG_HFIPCXTDR 0x73200
+#define MTL_DSP_REG_HFIPCXTDR_BUSY BIT(31)
+#define MTL_DSP_REG_HFIPCXTDR_MSG_MASK GENMASK(30, 0)
+#define MTL_DSP_REG_HFIPCXTDA 0x73204
+#define MTL_DSP_REG_HFIPCXTDA_BUSY BIT(31)
+#define MTL_DSP_REG_HFIPCXIDR 0x73210
+#define MTL_DSP_REG_HFIPCXIDR_BUSY BIT(31)
+#define MTL_DSP_REG_HFIPCXIDR_MSG_MASK GENMASK(30, 0)
+#define MTL_DSP_REG_HFIPCXIDA 0x73214
+#define MTL_DSP_REG_HFIPCXIDA_DONE BIT(31)
+#define MTL_DSP_REG_HFIPCXIDA_MSG_MASK GENMASK(30, 0)
+#define MTL_DSP_REG_HFIPCXCTL 0x73228
+#define MTL_DSP_REG_HFIPCXCTL_BUSY BIT(0)
+#define MTL_DSP_REG_HFIPCXCTL_DONE BIT(1)
+#define MTL_DSP_REG_HFIPCXTDDY 0x73300
+#define MTL_DSP_REG_HFIPCXIDDY 0x73380
+#define MTL_DSP_REG_HfHIPCIE 0x1140
+#define MTL_DSP_REG_HfHIPCIE_IE_MASK BIT(0)
+#define MTL_DSP_REG_HfSNDWIE 0x114C
+#define MTL_DSP_REG_HfSNDWIE_IE_MASK GENMASK(3, 0)
+
+#define MTL_DSP_IRQSTS 0x20
+#define MTL_DSP_IRQSTS_IPC BIT(0)
+#define MTL_DSP_IRQSTS_SDW BIT(6)
+
+#define MTL_DSP_REG_POLL_INTERVAL_US 10 /* 10 us */
+
+/* Memory windows */
+#define MTL_SRAM_WINDOW_OFFSET(x) (0x180000 + 0x8000 * (x))
+
+#define MTL_DSP_MBOX_UPLINK_OFFSET (MTL_SRAM_WINDOW_OFFSET(0) + 0x1000)
+#define MTL_DSP_MBOX_UPLINK_SIZE 0x1000
+#define MTL_DSP_MBOX_DOWNLINK_OFFSET MTL_SRAM_WINDOW_OFFSET(1)
+#define MTL_DSP_MBOX_DOWNLINK_SIZE 0x1000
+
+/* FW registers */
+#define MTL_DSP_ROM_STS MTL_SRAM_WINDOW_OFFSET(0) /* ROM status */
+#define MTL_DSP_ROM_ERROR (MTL_SRAM_WINDOW_OFFSET(0) + 0x4) /* ROM error code */
+
+#define MTL_DSP_REG_HFFLGPXQWY 0x163200 /* ROM debug status */
+#define MTL_DSP_REG_HFFLGPXQWY_ERROR 0x163204 /* ROM debug error code */
+#define MTL_DSP_REG_HfIMRIS1 0x162088
+#define MTL_DSP_REG_HfIMRIS1_IU_MASK BIT(0)
+
+bool mtl_dsp_check_ipc_irq(struct snd_sof_dev *sdev);
+int mtl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
+
+void mtl_enable_ipc_interrupts(struct snd_sof_dev *sdev);
+void mtl_disable_ipc_interrupts(struct snd_sof_dev *sdev);
+
+int mtl_enable_interrupts(struct snd_sof_dev *sdev, bool enable);
+
+int mtl_dsp_pre_fw_run(struct snd_sof_dev *sdev);
+int mtl_dsp_post_fw_run(struct snd_sof_dev *sdev);
+void mtl_dsp_dump(struct snd_sof_dev *sdev, u32 flags);
+
+int mtl_power_down_dsp(struct snd_sof_dev *sdev);
+int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot);
+
+irqreturn_t mtl_ipc_irq_thread(int irq, void *context);
+
+int mtl_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev);
+int mtl_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id);
+
+void mtl_ipc_dump(struct snd_sof_dev *sdev);
+
+u64 mtl_dsp_get_stream_hda_link_position(struct snd_sof_dev *sdev,
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
diff --git a/sound/soc/sof/intel/pci-apl.c b/sound/soc/sof/intel/pci-apl.c
new file mode 100644
index 000000000..460f87f25
--- /dev/null
+++ b/sound/soc/sof/intel/pci-apl.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2021 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+
+static const struct sof_dev_desc bxt_desc = {
+ .machines = snd_soc_acpi_intel_bxt_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &apl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/apl",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/apl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-apl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-apl-nocodec.tplg",
+ .ops = &sof_apl_ops,
+ .ops_init = sof_apl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc glk_desc = {
+ .machines = snd_soc_acpi_intel_glk_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &apl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/glk",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/glk",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-glk.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-glk-nocodec.tplg",
+ .ops = &sof_apl_ops,
+ .ops_init = sof_apl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, HDA_APL, &bxt_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_GML, &glk_desc) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_apl_driver = {
+ .name = "sof-audio-pci-intel-apl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_apl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-cnl.c b/sound/soc/sof/intel/pci-cnl.c
new file mode 100644
index 000000000..e2c50e7b0
--- /dev/null
+++ b/sound/soc/sof/intel/pci-cnl.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+
+static const struct sof_dev_desc cnl_desc = {
+ .machines = snd_soc_acpi_intel_cnl_machines,
+ .alt_machines = snd_soc_acpi_intel_cnl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &cnl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/cnl",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/cnl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-cnl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .ops_init = sof_cnl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc cfl_desc = {
+ .machines = snd_soc_acpi_intel_cfl_machines,
+ .alt_machines = snd_soc_acpi_intel_cfl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &cnl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/cnl",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/cnl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-cfl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .ops_init = sof_cnl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc cml_desc = {
+ .machines = snd_soc_acpi_intel_cml_machines,
+ .alt_machines = snd_soc_acpi_intel_cml_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &cnl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/cnl",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/cnl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-cml.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .ops_init = sof_cnl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, HDA_CNL_LP, &cnl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_CNL_H, &cfl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_CML_LP, &cml_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_CML_H, &cml_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_CML_S, &cml_desc) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_cnl_driver = {
+ .name = "sof-audio-pci-intel-cnl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_cnl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-icl.c b/sound/soc/sof/intel/pci-icl.c
new file mode 100644
index 000000000..0a65df3ed
--- /dev/null
+++ b/sound/soc/sof/intel/pci-icl.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2021 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+
+static const struct sof_dev_desc icl_desc = {
+ .machines = snd_soc_acpi_intel_icl_machines,
+ .alt_machines = snd_soc_acpi_intel_icl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &icl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/icl",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/icl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-icl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-icl-nocodec.tplg",
+ .ops = &sof_icl_ops,
+ .ops_init = sof_icl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc jsl_desc = {
+ .machines = snd_soc_acpi_intel_jsl_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &jsl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/jsl",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/jsl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-jsl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .ops_init = sof_cnl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, HDA_ICL_LP, &icl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_ICL_H, &icl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_ICL_N, &jsl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_JSL_N, &jsl_desc) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_icl_driver = {
+ .name = "sof-audio-pci-intel-icl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_icl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-lnl.c b/sound/soc/sof/intel/pci-lnl.c
new file mode 100644
index 000000000..1b12c280e
--- /dev/null
+++ b/sound/soc/sof/intel/pci-lnl.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2023 Intel Corporation. All rights reserved.
+//
+// Author: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+#include "mtl.h"
+
+static const struct sof_dev_desc lnl_desc = {
+ .use_acpi_target_states = true,
+ .machines = snd_soc_acpi_intel_lnl_machines,
+ .alt_machines = snd_soc_acpi_intel_lnl_sdw_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &lnl_chip_info,
+ .ipc_supported_mask = BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_INTEL_IPC4,
+ .dspless_mode_supported = true,
+ .default_fw_path = {
+ [SOF_INTEL_IPC4] = "intel/sof-ipc4/lnl",
+ },
+ .default_tplg_path = {
+ [SOF_INTEL_IPC4] = "intel/sof-ace-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_INTEL_IPC4] = "sof-lnl.ri",
+ },
+ .nocodec_tplg_filename = "sof-lnl-nocodec.tplg",
+ .ops = &sof_lnl_ops,
+ .ops_init = sof_lnl_ops_init,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, HDA_LNL_P, &lnl_desc) }, /* LNL-P */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_lnl_driver = {
+ .name = "sof-audio-pci-intel-lnl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_lnl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-mtl.c b/sound/soc/sof/intel/pci-mtl.c
new file mode 100644
index 000000000..7868b0827
--- /dev/null
+++ b/sound/soc/sof/intel/pci-mtl.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
+//
+// Author: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+#include "mtl.h"
+
+static const struct sof_dev_desc mtl_desc = {
+ .use_acpi_target_states = true,
+ .machines = snd_soc_acpi_intel_mtl_machines,
+ .alt_machines = snd_soc_acpi_intel_mtl_sdw_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &mtl_chip_info,
+ .ipc_supported_mask = BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_INTEL_IPC4,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_INTEL_IPC4] = "intel/sof-ipc4/mtl",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/sof-ipc4-lib/mtl",
+ },
+ .default_tplg_path = {
+ [SOF_INTEL_IPC4] = "intel/sof-ace-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_INTEL_IPC4] = "sof-mtl.ri",
+ },
+ .nocodec_tplg_filename = "sof-mtl-nocodec.tplg",
+ .ops = &sof_mtl_ops,
+ .ops_init = sof_mtl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, HDA_MTL, &mtl_desc) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_mtl_driver = {
+ .name = "sof-audio-pci-intel-mtl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_mtl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-skl.c b/sound/soc/sof/intel/pci-skl.c
new file mode 100644
index 000000000..a6588b138
--- /dev/null
+++ b/sound/soc/sof/intel/pci-skl.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+
+static struct sof_dev_desc skl_desc = {
+ .machines = snd_soc_acpi_intel_skl_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .chip_info = &skl_chip_info,
+ .irqindex_host_ipc = -1,
+ .ipc_supported_mask = BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_INTEL_IPC4,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_INTEL_IPC4] = "intel/avs/skl",
+ },
+ .default_tplg_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-skl-nocodec.tplg",
+ .ops = &sof_skl_ops,
+ .ops_init = sof_skl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static struct sof_dev_desc kbl_desc = {
+ .machines = snd_soc_acpi_intel_kbl_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .chip_info = &skl_chip_info,
+ .irqindex_host_ipc = -1,
+ .ipc_supported_mask = BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_INTEL_IPC4,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_INTEL_IPC4] = "intel/avs/kbl",
+ },
+ .default_tplg_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-kbl-nocodec.tplg",
+ .ops = &sof_skl_ops,
+ .ops_init = sof_skl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &kbl_desc) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_skl_driver = {
+ .name = "sof-audio-pci-intel-skl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_skl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
new file mode 100644
index 000000000..d688f9373
--- /dev/null
+++ b/sound/soc/sof/intel/pci-tgl.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2021 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+
+static const struct sof_dev_desc tgl_desc = {
+ .machines = snd_soc_acpi_intel_tgl_machines,
+ .alt_machines = snd_soc_acpi_intel_tgl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &tgl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/tgl",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/tgl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-tgl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc tglh_desc = {
+ .machines = snd_soc_acpi_intel_tgl_machines,
+ .alt_machines = snd_soc_acpi_intel_tgl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &tglh_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/tgl-h",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/tgl-h",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-tgl-h.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc ehl_desc = {
+ .machines = snd_soc_acpi_intel_ehl_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &ehl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/ehl",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/ehl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-ehl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc adls_desc = {
+ .machines = snd_soc_acpi_intel_adl_machines,
+ .alt_machines = snd_soc_acpi_intel_adl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &adls_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/adl-s",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/adl-s",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-adl-s.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc adl_desc = {
+ .machines = snd_soc_acpi_intel_adl_machines,
+ .alt_machines = snd_soc_acpi_intel_adl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &tgl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/adl",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/adl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-adl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc adl_n_desc = {
+ .machines = snd_soc_acpi_intel_adl_machines,
+ .alt_machines = snd_soc_acpi_intel_adl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &tgl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/adl-n",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/adl-n",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-adl-n.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc rpls_desc = {
+ .machines = snd_soc_acpi_intel_rpl_machines,
+ .alt_machines = snd_soc_acpi_intel_rpl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &adls_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/rpl-s",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/rpl-s",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-rpl-s.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc rpl_desc = {
+ .machines = snd_soc_acpi_intel_rpl_machines,
+ .alt_machines = snd_soc_acpi_intel_rpl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &tgl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .dspless_mode_supported = true, /* Only supported for HDaudio */
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/rpl",
+ },
+ .default_lib_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-lib/rpl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-rpl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, HDA_TGL_LP, &tgl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_TGL_H, &tglh_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_EHL_0, &ehl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_EHL_3, &ehl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_ADL_S, &adls_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_RPL_S, &rpls_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_ADL_P, &adl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_ADL_PS, &adl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_RPL_P_0, &rpl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_RPL_P_1, &rpl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_ADL_M, &adl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_ADL_PX, &adl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_RPL_M, &rpl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_RPL_PX, &rpl_desc) },
+ { PCI_DEVICE_DATA(INTEL, HDA_ADL_N, &adl_n_desc) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_tgl_driver = {
+ .name = "sof-audio-pci-intel-tgl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_tgl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-tng.c b/sound/soc/sof/intel/pci-tng.c
new file mode 100644
index 000000000..4ae4fe17c
--- /dev/null
+++ b/sound/soc/sof/intel/pci-tng.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2021 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "atom.h"
+#include "../sof-pci-dev.h"
+#include "../sof-audio.h"
+
+/* platform specific devices */
+#include "shim.h"
+
+static struct snd_soc_acpi_mach sof_tng_machines[] = {
+ {
+ .id = "INT343A",
+ .drv_name = "edison",
+ .sof_tplg_filename = "sof-byt.tplg",
+ },
+ {}
+};
+
+static const struct snd_sof_debugfs_map tng_debugfs[] = {
+ {"dmac0", DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", DSP_BAR, SHIM_OFFSET, SHIM_SIZE_BYT,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static int tangier_pci_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_intel_dsp_desc *chip;
+ u32 base, size;
+ int ret;
+
+ chip = get_chip_info(sdev->pdata);
+ if (!chip) {
+ dev_err(sdev->dev, "error: no such device supported\n");
+ return -EIO;
+ }
+
+ sdev->num_cores = chip->cores_num;
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(&pci->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* LPE base */
+ base = pci_resource_start(pci, desc->resindex_lpe_base) - IRAM_OFFSET;
+ size = PCI_BAR_SIZE;
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[DSP_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[DSP_BAR]);
+
+ /* IMR base - optional */
+ if (desc->resindex_imr_base == -1)
+ goto irq;
+
+ base = pci_resource_start(pci, desc->resindex_imr_base);
+ size = pci_resource_len(pci, desc->resindex_imr_base);
+
+ /* some BIOSes don't map IMR */
+ if (base == 0x55aa55aa || base == 0x0) {
+ dev_info(sdev->dev, "IMR not set by BIOS. Ignoring\n");
+ goto irq;
+ }
+
+ dev_dbg(sdev->dev, "IMR base at 0x%x size 0x%x", base, size);
+ sdev->bar[IMR_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[IMR_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap IMR base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "IMR VADDR %p\n", sdev->bar[IMR_BAR]);
+
+irq:
+ /* register our IRQ */
+ sdev->ipc_irq = pci->irq;
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ atom_irq_handler, atom_irq_thread,
+ 0, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return ret;
+}
+
+struct snd_sof_dsp_ops sof_tng_ops = {
+ /* device init */
+ .probe = tangier_pci_probe,
+
+ /* DSP core boot / reset */
+ .run = atom_run,
+ .reset = atom_reset,
+
+ /* Register IO uses direct mmio */
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* doorbell */
+ .irq_handler = atom_irq_handler,
+ .irq_thread = atom_irq_thread,
+
+ /* ipc */
+ .send_msg = atom_send_msg,
+ .get_mailbox_offset = atom_get_mailbox_offset,
+ .get_window_offset = atom_get_window_offset,
+
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ /* machine driver */
+ .machine_select = atom_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = atom_set_mach_params,
+
+ /* debug */
+ .debug_map = tng_debugfs,
+ .debug_map_count = ARRAY_SIZE(tng_debugfs),
+ .dbg_dump = atom_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_close = sof_stream_pcm_close,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = atom_dai,
+ .num_drv = 3, /* we have only 3 SSPs on byt*/
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+};
+
+const struct sof_intel_dsp_desc tng_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+ .hw_ip_version = SOF_INTEL_TANGIER,
+};
+
+static const struct sof_dev_desc tng_desc = {
+ .machines = sof_tng_machines,
+ .resindex_lpe_base = 3, /* IRAM, but subtract IRAM offset */
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = 0,
+ .irqindex_host_ipc = -1,
+ .chip_info = &tng_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-byt.ri",
+ },
+ .nocodec_tplg_filename = "sof-byt.tplg",
+ .ops = &sof_tng_ops,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, SST_TNG, &tng_desc) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_tng_driver = {
+ .name = "sof-audio-pci-intel-tng",
+ .id_table = sof_pci_ids,
+ .probe = sof_pci_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_tng_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
diff --git a/sound/soc/sof/intel/shim.h b/sound/soc/sof/intel/shim.h
new file mode 100644
index 000000000..9515d753c
--- /dev/null
+++ b/sound/soc/sof/intel/shim.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_SHIM_H
+#define __SOF_INTEL_SHIM_H
+
+enum sof_intel_hw_ip_version {
+ SOF_INTEL_TANGIER,
+ SOF_INTEL_BAYTRAIL,
+ SOF_INTEL_BROADWELL,
+ SOF_INTEL_CAVS_1_5, /* SkyLake, KabyLake, AmberLake */
+ SOF_INTEL_CAVS_1_5_PLUS,/* ApolloLake, GeminiLake */
+ SOF_INTEL_CAVS_1_8, /* CannonLake, CometLake, CoffeeLake */
+ SOF_INTEL_CAVS_2_0, /* IceLake, JasperLake */
+ SOF_INTEL_CAVS_2_5, /* TigerLake, AlderLake */
+ SOF_INTEL_ACE_1_0, /* MeteorLake */
+ SOF_INTEL_ACE_2_0, /* LunarLake */
+};
+
+/*
+ * SHIM registers for BYT, BSW, CHT, BDW
+ */
+
+#define SHIM_CSR (SHIM_OFFSET + 0x00)
+#define SHIM_PISR (SHIM_OFFSET + 0x08)
+#define SHIM_PIMR (SHIM_OFFSET + 0x10)
+#define SHIM_ISRX (SHIM_OFFSET + 0x18)
+#define SHIM_ISRD (SHIM_OFFSET + 0x20)
+#define SHIM_IMRX (SHIM_OFFSET + 0x28)
+#define SHIM_IMRD (SHIM_OFFSET + 0x30)
+#define SHIM_IPCX (SHIM_OFFSET + 0x38)
+#define SHIM_IPCD (SHIM_OFFSET + 0x40)
+#define SHIM_ISRSC (SHIM_OFFSET + 0x48)
+#define SHIM_ISRLPESC (SHIM_OFFSET + 0x50)
+#define SHIM_IMRSC (SHIM_OFFSET + 0x58)
+#define SHIM_IMRLPESC (SHIM_OFFSET + 0x60)
+#define SHIM_IPCSC (SHIM_OFFSET + 0x68)
+#define SHIM_IPCLPESC (SHIM_OFFSET + 0x70)
+#define SHIM_CLKCTL (SHIM_OFFSET + 0x78)
+#define SHIM_CSR2 (SHIM_OFFSET + 0x80)
+#define SHIM_LTRC (SHIM_OFFSET + 0xE0)
+#define SHIM_HMDC (SHIM_OFFSET + 0xE8)
+
+#define SHIM_PWMCTRL 0x1000
+
+/*
+ * SST SHIM register bits for BYT, BSW, CHT, BDW
+ * Register bit naming and functionaility can differ between devices.
+ */
+
+/* CSR / CS */
+#define SHIM_CSR_RST BIT(1)
+#define SHIM_CSR_SBCS0 BIT(2)
+#define SHIM_CSR_SBCS1 BIT(3)
+#define SHIM_CSR_DCS(x) ((x) << 4)
+#define SHIM_CSR_DCS_MASK (0x7 << 4)
+#define SHIM_CSR_STALL BIT(10)
+#define SHIM_CSR_S0IOCS BIT(21)
+#define SHIM_CSR_S1IOCS BIT(23)
+#define SHIM_CSR_LPCS BIT(31)
+#define SHIM_CSR_24MHZ_LPCS \
+ (SHIM_CSR_SBCS0 | SHIM_CSR_SBCS1 | SHIM_CSR_LPCS)
+#define SHIM_CSR_24MHZ_NO_LPCS (SHIM_CSR_SBCS0 | SHIM_CSR_SBCS1)
+#define SHIM_BYT_CSR_RST BIT(0)
+#define SHIM_BYT_CSR_VECTOR_SEL BIT(1)
+#define SHIM_BYT_CSR_STALL BIT(2)
+#define SHIM_BYT_CSR_PWAITMODE BIT(3)
+
+/* ISRX / ISC */
+#define SHIM_ISRX_BUSY BIT(1)
+#define SHIM_ISRX_DONE BIT(0)
+#define SHIM_BYT_ISRX_REQUEST BIT(1)
+
+/* ISRD / ISD */
+#define SHIM_ISRD_BUSY BIT(1)
+#define SHIM_ISRD_DONE BIT(0)
+
+/* IMRX / IMC */
+#define SHIM_IMRX_BUSY BIT(1)
+#define SHIM_IMRX_DONE BIT(0)
+#define SHIM_BYT_IMRX_REQUEST BIT(1)
+
+/* IMRD / IMD */
+#define SHIM_IMRD_DONE BIT(0)
+#define SHIM_IMRD_BUSY BIT(1)
+#define SHIM_IMRD_SSP0 BIT(16)
+#define SHIM_IMRD_DMAC0 BIT(21)
+#define SHIM_IMRD_DMAC1 BIT(22)
+#define SHIM_IMRD_DMAC (SHIM_IMRD_DMAC0 | SHIM_IMRD_DMAC1)
+
+/* IPCX / IPCC */
+#define SHIM_IPCX_DONE BIT(30)
+#define SHIM_IPCX_BUSY BIT(31)
+#define SHIM_BYT_IPCX_DONE BIT_ULL(62)
+#define SHIM_BYT_IPCX_BUSY BIT_ULL(63)
+
+/* IPCD */
+#define SHIM_IPCD_DONE BIT(30)
+#define SHIM_IPCD_BUSY BIT(31)
+#define SHIM_BYT_IPCD_DONE BIT_ULL(62)
+#define SHIM_BYT_IPCD_BUSY BIT_ULL(63)
+
+/* CLKCTL */
+#define SHIM_CLKCTL_SMOS(x) ((x) << 24)
+#define SHIM_CLKCTL_MASK (3 << 24)
+#define SHIM_CLKCTL_DCPLCG BIT(18)
+#define SHIM_CLKCTL_SCOE1 BIT(17)
+#define SHIM_CLKCTL_SCOE0 BIT(16)
+
+/* CSR2 / CS2 */
+#define SHIM_CSR2_SDFD_SSP0 BIT(1)
+#define SHIM_CSR2_SDFD_SSP1 BIT(2)
+
+/* LTRC */
+#define SHIM_LTRC_VAL(x) ((x) << 0)
+
+/* HMDC */
+#define SHIM_HMDC_HDDA0(x) ((x) << 0)
+#define SHIM_HMDC_HDDA1(x) ((x) << 7)
+#define SHIM_HMDC_HDDA_E0_CH0 1
+#define SHIM_HMDC_HDDA_E0_CH1 2
+#define SHIM_HMDC_HDDA_E0_CH2 4
+#define SHIM_HMDC_HDDA_E0_CH3 8
+#define SHIM_HMDC_HDDA_E1_CH0 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH0)
+#define SHIM_HMDC_HDDA_E1_CH1 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH1)
+#define SHIM_HMDC_HDDA_E1_CH2 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH2)
+#define SHIM_HMDC_HDDA_E1_CH3 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH3)
+#define SHIM_HMDC_HDDA_E0_ALLCH \
+ (SHIM_HMDC_HDDA_E0_CH0 | SHIM_HMDC_HDDA_E0_CH1 | \
+ SHIM_HMDC_HDDA_E0_CH2 | SHIM_HMDC_HDDA_E0_CH3)
+#define SHIM_HMDC_HDDA_E1_ALLCH \
+ (SHIM_HMDC_HDDA_E1_CH0 | SHIM_HMDC_HDDA_E1_CH1 | \
+ SHIM_HMDC_HDDA_E1_CH2 | SHIM_HMDC_HDDA_E1_CH3)
+
+/* Audio DSP PCI registers */
+#define PCI_VDRTCTL0 0xa0
+#define PCI_VDRTCTL1 0xa4
+#define PCI_VDRTCTL2 0xa8
+#define PCI_VDRTCTL3 0xaC
+
+/* VDRTCTL0 */
+#define PCI_VDRTCL0_D3PGD BIT(0)
+#define PCI_VDRTCL0_D3SRAMPGD BIT(1)
+#define PCI_VDRTCL0_DSRAMPGE_SHIFT 12
+#define PCI_VDRTCL0_DSRAMPGE_MASK GENMASK(PCI_VDRTCL0_DSRAMPGE_SHIFT + 19,\
+ PCI_VDRTCL0_DSRAMPGE_SHIFT)
+#define PCI_VDRTCL0_ISRAMPGE_SHIFT 2
+#define PCI_VDRTCL0_ISRAMPGE_MASK GENMASK(PCI_VDRTCL0_ISRAMPGE_SHIFT + 9,\
+ PCI_VDRTCL0_ISRAMPGE_SHIFT)
+
+/* VDRTCTL2 */
+#define PCI_VDRTCL2_DCLCGE BIT(1)
+#define PCI_VDRTCL2_DTCGE BIT(10)
+#define PCI_VDRTCL2_APLLSE_MASK BIT(31)
+
+/* PMCS */
+#define PCI_PMCS 0x84
+#define PCI_PMCS_PS_MASK 0x3
+
+/* Intel quirks */
+#define SOF_INTEL_PROCEN_FMT_QUIRK BIT(0)
+
+/* DSP hardware descriptor */
+struct sof_intel_dsp_desc {
+ int cores_num;
+ int host_managed_cores_mask;
+ int init_core_mask; /* cores available after fw boot */
+ int ipc_req;
+ int ipc_req_mask;
+ int ipc_ack;
+ int ipc_ack_mask;
+ int ipc_ctl;
+ int rom_status_reg;
+ int rom_init_timeout;
+ int ssp_count; /* ssp count of the platform */
+ int ssp_base_offset; /* base address of the SSPs */
+ u32 sdw_shim_base;
+ u32 sdw_alh_base;
+ u32 d0i3_offset;
+ u32 quirks;
+ enum sof_intel_hw_ip_version hw_ip_version;
+ int (*read_sdw_lcount)(struct snd_sof_dev *sdev);
+ void (*enable_sdw_irq)(struct snd_sof_dev *sdev, bool enable);
+ bool (*check_sdw_irq)(struct snd_sof_dev *sdev);
+ bool (*check_sdw_wakeen_irq)(struct snd_sof_dev *sdev);
+ bool (*check_ipc_irq)(struct snd_sof_dev *sdev);
+ int (*power_down_dsp)(struct snd_sof_dev *sdev);
+ int (*disable_interrupts)(struct snd_sof_dev *sdev);
+ int (*cl_init)(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot);
+};
+
+extern struct snd_sof_dsp_ops sof_tng_ops;
+
+extern const struct sof_intel_dsp_desc tng_chip_info;
+
+struct sof_intel_stream {
+ size_t posn_offset;
+};
+
+static inline const struct sof_intel_dsp_desc *get_chip_info(struct snd_sof_pdata *pdata)
+{
+ const struct sof_dev_desc *desc = pdata->desc;
+
+ return desc->chip_info;
+}
+
+#endif
diff --git a/sound/soc/sof/intel/skl.c b/sound/soc/sof/intel/skl.c
new file mode 100644
index 000000000..d24e64e71
--- /dev/null
+++ b/sound/soc/sof/intel/skl.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
+//
+
+/*
+ * Hardware interface for audio DSP on Skylake and Kabylake.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/pcm_params.h>
+#include <sound/sof.h>
+#include <sound/sof/ext_manifest4.h>
+
+#include "../sof-priv.h"
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "hda.h"
+#include "../sof-audio.h"
+
+#define SRAM_MEMORY_WINDOW_BASE 0x8000
+
+static const __maybe_unused struct snd_sof_debugfs_map skl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000},
+};
+
+static int skl_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return SRAM_MEMORY_WINDOW_BASE + (0x2000 * id);
+}
+
+static int skl_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return SRAM_MEMORY_WINDOW_BASE + 0x1000;
+}
+
+/* skylake ops */
+struct snd_sof_dsp_ops sof_skl_ops;
+EXPORT_SYMBOL_NS(sof_skl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_skl_ops_init(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ /* common defaults */
+ memcpy(&sof_skl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* probe/remove/shutdown */
+ sof_skl_ops.shutdown = hda_dsp_shutdown;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET_CAVS_1_5;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_1_5;
+
+ sof_skl_ops.get_window_offset = skl_dsp_ipc_get_window_offset;
+ sof_skl_ops.get_mailbox_offset = skl_dsp_ipc_get_mailbox_offset;
+
+ /* doorbell */
+ sof_skl_ops.irq_thread = hda_dsp_ipc4_irq_thread;
+
+ /* ipc */
+ sof_skl_ops.send_msg = hda_dsp_ipc4_send_msg;
+
+ /* set DAI driver ops */
+ hda_set_dai_drv_ops(sdev, &sof_skl_ops);
+
+ /* debug */
+ sof_skl_ops.debug_map = skl_dsp_debugfs;
+ sof_skl_ops.debug_map_count = ARRAY_SIZE(skl_dsp_debugfs);
+ sof_skl_ops.ipc_dump = hda_ipc4_dump;
+
+ /* firmware run */
+ sof_skl_ops.run = hda_dsp_cl_boot_firmware_skl;
+
+ /* pre/post fw run */
+ sof_skl_ops.post_fw_run = hda_dsp_post_fw_run;
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_skl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc skl_chip_info = {
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(1, 0),
+ .ipc_req = HDA_DSP_REG_HIPCI,
+ .ipc_req_mask = HDA_DSP_REG_HIPCI_BUSY,
+ .ipc_ack = HDA_DSP_REG_HIPCIE,
+ .ipc_ack_mask = HDA_DSP_REG_HIPCIE_DONE,
+ .ipc_ctl = HDA_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS_SKL,
+ .rom_init_timeout = 300,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_1_5,
+};
+EXPORT_SYMBOL_NS(skl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/tgl.c b/sound/soc/sof/intel/tgl.c
new file mode 100644
index 000000000..bb9f20253
--- /dev/null
+++ b/sound/soc/sof/intel/tgl.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2020 Intel Corporation. All rights reserved.
+//
+// Authors: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Tigerlake.
+ */
+
+#include <sound/sof/ext_manifest4.h>
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+#include "../sof-audio.h"
+
+static const struct snd_sof_debugfs_map tgl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static int tgl_dsp_core_get(struct snd_sof_dev *sdev, int core)
+{
+ const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
+
+ /* power up primary core if not already powered up and return */
+ if (core == SOF_DSP_PRIMARY_CORE)
+ return hda_dsp_enable_core(sdev, BIT(core));
+
+ if (pm_ops->set_core_state)
+ return pm_ops->set_core_state(sdev, core, true);
+
+ return 0;
+}
+
+static int tgl_dsp_core_put(struct snd_sof_dev *sdev, int core)
+{
+ const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
+ int ret;
+
+ if (pm_ops->set_core_state) {
+ ret = pm_ops->set_core_state(sdev, core, false);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* power down primary core and return */
+ if (core == SOF_DSP_PRIMARY_CORE)
+ return hda_dsp_core_reset_power_down(sdev, BIT(core));
+
+ return 0;
+}
+
+/* Tigerlake ops */
+struct snd_sof_dsp_ops sof_tgl_ops;
+EXPORT_SYMBOL_NS(sof_tgl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_tgl_ops_init(struct snd_sof_dev *sdev)
+{
+ /* common defaults */
+ memcpy(&sof_tgl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* probe/remove/shutdown */
+ sof_tgl_ops.shutdown = hda_dsp_shutdown_dma_flush;
+
+ if (sdev->pdata->ipc_type == SOF_IPC) {
+ /* doorbell */
+ sof_tgl_ops.irq_thread = cnl_ipc_irq_thread;
+
+ /* ipc */
+ sof_tgl_ops.send_msg = cnl_ipc_send_msg;
+
+ /* debug */
+ sof_tgl_ops.ipc_dump = cnl_ipc_dump;
+
+ sof_tgl_ops.set_power_state = hda_dsp_set_power_state_ipc3;
+ }
+
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_2;
+
+ /* External library loading support */
+ ipc4_data->load_library = hda_dsp_ipc4_load_library;
+
+ /* doorbell */
+ sof_tgl_ops.irq_thread = cnl_ipc4_irq_thread;
+
+ /* ipc */
+ sof_tgl_ops.send_msg = cnl_ipc4_send_msg;
+
+ /* debug */
+ sof_tgl_ops.ipc_dump = cnl_ipc4_dump;
+
+ sof_tgl_ops.set_power_state = hda_dsp_set_power_state_ipc4;
+ }
+
+ /* set DAI driver ops */
+ hda_set_dai_drv_ops(sdev, &sof_tgl_ops);
+
+ /* debug */
+ sof_tgl_ops.debug_map = tgl_dsp_debugfs;
+ sof_tgl_ops.debug_map_count = ARRAY_SIZE(tgl_dsp_debugfs);
+
+ /* pre/post fw run */
+ sof_tgl_ops.post_fw_run = hda_dsp_post_fw_run;
+
+ /* firmware run */
+ sof_tgl_ops.run = hda_dsp_cl_boot_firmware_iccmax;
+
+ /* dsp core get/put */
+ sof_tgl_ops.core_get = tgl_dsp_core_get;
+ sof_tgl_ops.core_put = tgl_dsp_core_put;
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_tgl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc tgl_chip_info = {
+ /* Tigerlake , Alderlake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = TGL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .d0i3_offset = SOF_HDA_VS_D0I3C,
+ .read_sdw_lcount = hda_sdw_check_lcount_common,
+ .enable_sdw_irq = hda_common_enable_sdw_irq,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_2_5,
+};
+EXPORT_SYMBOL_NS(tgl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc tglh_chip_info = {
+ /* Tigerlake-H */
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = TGL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .d0i3_offset = SOF_HDA_VS_D0I3C,
+ .read_sdw_lcount = hda_sdw_check_lcount_common,
+ .enable_sdw_irq = hda_common_enable_sdw_irq,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_2_5,
+};
+EXPORT_SYMBOL_NS(tglh_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc ehl_chip_info = {
+ /* Elkhartlake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = TGL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .d0i3_offset = SOF_HDA_VS_D0I3C,
+ .read_sdw_lcount = hda_sdw_check_lcount_common,
+ .enable_sdw_irq = hda_common_enable_sdw_irq,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_2_5,
+};
+EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc adls_chip_info = {
+ /* Alderlake-S */
+ .cores_num = 2,
+ .init_core_mask = BIT(0),
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = TGL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .d0i3_offset = SOF_HDA_VS_D0I3C,
+ .read_sdw_lcount = hda_sdw_check_lcount_common,
+ .enable_sdw_irq = hda_common_enable_sdw_irq,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_2_5,
+};
+EXPORT_SYMBOL_NS(adls_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/iomem-utils.c b/sound/soc/sof/iomem-utils.c
new file mode 100644
index 000000000..3f57f6cf6
--- /dev/null
+++ b/sound/soc/sof/iomem-utils.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
+//
+// Author: Keyon Jie <yang.jie@linux.intel.com>
+//
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/platform_device.h>
+#include <asm/unaligned.h>
+#include <sound/soc.h>
+#include <sound/sof.h>
+#include "sof-priv.h"
+#include "ops.h"
+
+/*
+ * Register IO
+ *
+ * The sof_io_xyz() wrappers are typically referenced in snd_sof_dsp_ops
+ * structures and cannot be inlined.
+ */
+
+void sof_io_write(struct snd_sof_dev *sdev, void __iomem *addr, u32 value)
+{
+ writel(value, addr);
+}
+EXPORT_SYMBOL(sof_io_write);
+
+u32 sof_io_read(struct snd_sof_dev *sdev, void __iomem *addr)
+{
+ return readl(addr);
+}
+EXPORT_SYMBOL(sof_io_read);
+
+void sof_io_write64(struct snd_sof_dev *sdev, void __iomem *addr, u64 value)
+{
+ writeq(value, addr);
+}
+EXPORT_SYMBOL(sof_io_write64);
+
+u64 sof_io_read64(struct snd_sof_dev *sdev, void __iomem *addr)
+{
+ return readq(addr);
+}
+EXPORT_SYMBOL(sof_io_read64);
+
+/*
+ * IPC Mailbox IO
+ */
+
+void sof_mailbox_write(struct snd_sof_dev *sdev, u32 offset,
+ void *message, size_t bytes)
+{
+ void __iomem *dest = sdev->bar[sdev->mailbox_bar] + offset;
+
+ memcpy_toio(dest, message, bytes);
+}
+EXPORT_SYMBOL(sof_mailbox_write);
+
+void sof_mailbox_read(struct snd_sof_dev *sdev, u32 offset,
+ void *message, size_t bytes)
+{
+ void __iomem *src = sdev->bar[sdev->mailbox_bar] + offset;
+
+ memcpy_fromio(message, src, bytes);
+}
+EXPORT_SYMBOL(sof_mailbox_read);
+
+/*
+ * Memory copy.
+ */
+
+int sof_block_write(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
+ u32 offset, void *src, size_t size)
+{
+ int bar = snd_sof_dsp_get_bar_index(sdev, blk_type);
+ const u8 *src_byte = src;
+ void __iomem *dest;
+ u32 affected_mask;
+ u32 tmp;
+ int m, n;
+
+ if (bar < 0)
+ return bar;
+
+ dest = sdev->bar[bar] + offset;
+
+ m = size / 4;
+ n = size % 4;
+
+ /* __iowrite32_copy use 32bit size values so divide by 4 */
+ __iowrite32_copy(dest, src, m);
+
+ if (n) {
+ affected_mask = (1 << (8 * n)) - 1;
+
+ /* first read the 32bit data of dest, then change affected
+ * bytes, and write back to dest. For unaffected bytes, it
+ * should not be changed
+ */
+ tmp = ioread32(dest + m * 4);
+ tmp &= ~affected_mask;
+
+ tmp |= *(u32 *)(src_byte + m * 4) & affected_mask;
+ iowrite32(tmp, dest + m * 4);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_block_write);
+
+int sof_block_read(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
+ u32 offset, void *dest, size_t size)
+{
+ int bar = snd_sof_dsp_get_bar_index(sdev, blk_type);
+
+ if (bar < 0)
+ return bar;
+
+ memcpy_fromio(dest, sdev->bar[bar] + offset, size);
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_block_read);
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
new file mode 100644
index 000000000..b53abc923
--- /dev/null
+++ b/sound/soc/sof/ipc.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+// Generic IPC layer that can work over MMIO and SPI/I2C. PHY layer provided
+// by platform driver code.
+//
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ops.h"
+
+/**
+ * sof_ipc_send_msg - generic function to prepare and send one IPC message
+ * @sdev: pointer to SOF core device struct
+ * @msg_data: pointer to a message to send
+ * @msg_bytes: number of bytes in the message
+ * @reply_bytes: number of bytes available for the reply.
+ * The buffer for the reply data is not passed to this
+ * function, the available size is an information for the
+ * reply handling functions.
+ *
+ * On success the function returns 0, otherwise negative error number.
+ *
+ * Note: higher level sdev->ipc->tx_mutex must be held to make sure that
+ * transfers are synchronized.
+ */
+int sof_ipc_send_msg(struct snd_sof_dev *sdev, void *msg_data, size_t msg_bytes,
+ size_t reply_bytes)
+{
+ struct snd_sof_ipc *ipc = sdev->ipc;
+ struct snd_sof_ipc_msg *msg;
+ int ret;
+
+ if (ipc->disable_ipc_tx || sdev->fw_state != SOF_FW_BOOT_COMPLETE)
+ return -ENODEV;
+
+ /*
+ * The spin-lock is needed to protect message objects against other
+ * atomic contexts.
+ */
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /* initialise the message */
+ msg = &ipc->msg;
+
+ /* attach message data */
+ msg->msg_data = msg_data;
+ msg->msg_size = msg_bytes;
+
+ msg->reply_size = reply_bytes;
+ msg->reply_error = 0;
+
+ sdev->msg = msg;
+
+ ret = snd_sof_dsp_send_msg(sdev, msg);
+ /* Next reply that we receive will be related to this message */
+ if (!ret)
+ msg->ipc_complete = false;
+
+ spin_unlock_irq(&sdev->ipc_lock);
+
+ return ret;
+}
+
+/* send IPC message from host to DSP */
+int sof_ipc_tx_message(struct snd_sof_ipc *ipc, void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes)
+{
+ if (msg_bytes > ipc->max_payload_size ||
+ reply_bytes > ipc->max_payload_size)
+ return -ENOBUFS;
+
+ return ipc->ops->tx_msg(ipc->sdev, msg_data, msg_bytes, reply_data,
+ reply_bytes, false);
+}
+EXPORT_SYMBOL(sof_ipc_tx_message);
+
+/* IPC set or get data from host to DSP */
+int sof_ipc_set_get_data(struct snd_sof_ipc *ipc, void *msg_data,
+ size_t msg_bytes, bool set)
+{
+ return ipc->ops->set_get_data(ipc->sdev, msg_data, msg_bytes, set);
+}
+EXPORT_SYMBOL(sof_ipc_set_get_data);
+
+/*
+ * send IPC message from host to DSP without modifying the DSP state.
+ * This will be used for IPC's that can be handled by the DSP
+ * even in a low-power D0 substate.
+ */
+int sof_ipc_tx_message_no_pm(struct snd_sof_ipc *ipc, void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes)
+{
+ if (msg_bytes > ipc->max_payload_size ||
+ reply_bytes > ipc->max_payload_size)
+ return -ENOBUFS;
+
+ return ipc->ops->tx_msg(ipc->sdev, msg_data, msg_bytes, reply_data,
+ reply_bytes, true);
+}
+EXPORT_SYMBOL(sof_ipc_tx_message_no_pm);
+
+/* Generic helper function to retrieve the reply */
+void snd_sof_ipc_get_reply(struct snd_sof_dev *sdev)
+{
+ /*
+ * Sometimes, there is unexpected reply ipc arriving. The reply
+ * ipc belongs to none of the ipcs sent from driver.
+ * In this case, the driver must ignore the ipc.
+ */
+ if (!sdev->msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
+ return;
+ }
+
+ sdev->msg->reply_error = sdev->ipc->ops->get_reply(sdev);
+}
+EXPORT_SYMBOL(snd_sof_ipc_get_reply);
+
+/* handle reply message from DSP */
+void snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct snd_sof_ipc_msg *msg = &sdev->ipc->msg;
+
+ if (msg->ipc_complete) {
+ dev_dbg(sdev->dev,
+ "no reply expected, received 0x%x, will be ignored",
+ msg_id);
+ return;
+ }
+
+ /* wake up and return the error if we have waiters on this message ? */
+ msg->ipc_complete = true;
+ wake_up(&msg->waitq);
+}
+EXPORT_SYMBOL(snd_sof_ipc_reply);
+
+struct snd_sof_ipc *snd_sof_ipc_init(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc *ipc;
+ struct snd_sof_ipc_msg *msg;
+ const struct sof_ipc_ops *ops;
+
+ ipc = devm_kzalloc(sdev->dev, sizeof(*ipc), GFP_KERNEL);
+ if (!ipc)
+ return NULL;
+
+ mutex_init(&ipc->tx_mutex);
+ ipc->sdev = sdev;
+ msg = &ipc->msg;
+
+ /* indicate that we aren't sending a message ATM */
+ msg->ipc_complete = true;
+
+ init_waitqueue_head(&msg->waitq);
+
+ switch (sdev->pdata->ipc_type) {
+#if defined(CONFIG_SND_SOC_SOF_IPC3)
+ case SOF_IPC:
+ ops = &ipc3_ops;
+ break;
+#endif
+#if defined(CONFIG_SND_SOC_SOF_INTEL_IPC4)
+ case SOF_INTEL_IPC4:
+ ops = &ipc4_ops;
+ break;
+#endif
+ default:
+ dev_err(sdev->dev, "Not supported IPC version: %d\n",
+ sdev->pdata->ipc_type);
+ return NULL;
+ }
+
+ /* check for mandatory ops */
+ if (!ops->tx_msg || !ops->rx_msg || !ops->set_get_data || !ops->get_reply) {
+ dev_err(sdev->dev, "Missing IPC message handling ops\n");
+ return NULL;
+ }
+
+ if (!ops->fw_loader || !ops->fw_loader->validate ||
+ !ops->fw_loader->parse_ext_manifest) {
+ dev_err(sdev->dev, "Missing IPC firmware loading ops\n");
+ return NULL;
+ }
+
+ if (!ops->pcm) {
+ dev_err(sdev->dev, "Missing IPC PCM ops\n");
+ return NULL;
+ }
+
+ if (!ops->tplg || !ops->tplg->widget || !ops->tplg->control) {
+ dev_err(sdev->dev, "Missing IPC topology ops\n");
+ return NULL;
+ }
+
+ if (ops->fw_tracing && (!ops->fw_tracing->init || !ops->fw_tracing->suspend ||
+ !ops->fw_tracing->resume)) {
+ dev_err(sdev->dev, "Missing firmware tracing ops\n");
+ return NULL;
+ }
+
+ if (ops->init && ops->init(sdev))
+ return NULL;
+
+ ipc->ops = ops;
+
+ return ipc;
+}
+EXPORT_SYMBOL(snd_sof_ipc_init);
+
+void snd_sof_ipc_free(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc *ipc = sdev->ipc;
+
+ if (!ipc)
+ return;
+
+ /* disable sending of ipc's */
+ mutex_lock(&ipc->tx_mutex);
+ ipc->disable_ipc_tx = true;
+ mutex_unlock(&ipc->tx_mutex);
+
+ if (ipc->ops->exit)
+ ipc->ops->exit(sdev);
+}
+EXPORT_SYMBOL(snd_sof_ipc_free);
diff --git a/sound/soc/sof/ipc3-control.c b/sound/soc/sof/ipc3-control.c
new file mode 100644
index 000000000..a8deec7dc
--- /dev/null
+++ b/sound/soc/sof/ipc3-control.c
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2021 Intel Corporation. All rights reserved.
+//
+//
+
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ipc3-priv.h"
+
+/* IPC set()/get() for kcontrols. */
+static int sof_ipc3_set_get_kcontrol_data(struct snd_sof_control *scontrol,
+ bool set, bool lock)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scontrol->scomp);
+ struct sof_ipc_ctrl_data *cdata = scontrol->ipc_control_data;
+ const struct sof_ipc_ops *iops = sdev->ipc->ops;
+ enum sof_ipc_ctrl_type ctrl_type;
+ struct snd_sof_widget *swidget;
+ bool widget_found = false;
+ u32 ipc_cmd, msg_bytes;
+ int ret = 0;
+
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (swidget->comp_id == scontrol->comp_id) {
+ widget_found = true;
+ break;
+ }
+ }
+
+ if (!widget_found) {
+ dev_err(sdev->dev, "%s: can't find widget with id %d\n", __func__,
+ scontrol->comp_id);
+ return -EINVAL;
+ }
+
+ if (lock)
+ mutex_lock(&swidget->setup_mutex);
+ else
+ lockdep_assert_held(&swidget->setup_mutex);
+
+ /*
+ * Volatile controls should always be part of static pipelines and the
+ * widget use_count would always be > 0 in this case. For the others,
+ * just return the cached value if the widget is not set up.
+ */
+ if (!swidget->use_count)
+ goto unlock;
+
+ /*
+ * Select the IPC cmd and the ctrl_type based on the ctrl_cmd and the
+ * direction
+ * Note: SOF_CTRL_TYPE_VALUE_COMP_* is not used and supported currently
+ * for ctrl_type
+ */
+ if (cdata->cmd == SOF_CTRL_CMD_BINARY) {
+ ipc_cmd = set ? SOF_IPC_COMP_SET_DATA : SOF_IPC_COMP_GET_DATA;
+ ctrl_type = set ? SOF_CTRL_TYPE_DATA_SET : SOF_CTRL_TYPE_DATA_GET;
+ } else {
+ ipc_cmd = set ? SOF_IPC_COMP_SET_VALUE : SOF_IPC_COMP_GET_VALUE;
+ ctrl_type = set ? SOF_CTRL_TYPE_VALUE_CHAN_SET : SOF_CTRL_TYPE_VALUE_CHAN_GET;
+ }
+
+ cdata->rhdr.hdr.cmd = SOF_IPC_GLB_COMP_MSG | ipc_cmd;
+ cdata->type = ctrl_type;
+ cdata->comp_id = scontrol->comp_id;
+ cdata->msg_index = 0;
+
+ /* calculate header and data size */
+ switch (cdata->type) {
+ case SOF_CTRL_TYPE_VALUE_CHAN_GET:
+ case SOF_CTRL_TYPE_VALUE_CHAN_SET:
+ cdata->num_elems = scontrol->num_channels;
+
+ msg_bytes = scontrol->num_channels *
+ sizeof(struct sof_ipc_ctrl_value_chan);
+ msg_bytes += sizeof(struct sof_ipc_ctrl_data);
+ break;
+ case SOF_CTRL_TYPE_DATA_GET:
+ case SOF_CTRL_TYPE_DATA_SET:
+ cdata->num_elems = cdata->data->size;
+
+ msg_bytes = cdata->data->size;
+ msg_bytes += sizeof(struct sof_ipc_ctrl_data) +
+ sizeof(struct sof_abi_hdr);
+ break;
+ default:
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ cdata->rhdr.hdr.size = msg_bytes;
+ cdata->elems_remaining = 0;
+
+ ret = iops->set_get_data(sdev, cdata, cdata->rhdr.hdr.size, set);
+ if (!set)
+ goto unlock;
+
+ /* It is a set-data operation, and we have a backup that we can restore */
+ if (ret < 0) {
+ if (!scontrol->old_ipc_control_data)
+ goto unlock;
+ /*
+ * Current ipc_control_data is not valid, we use the last known good
+ * configuration
+ */
+ memcpy(scontrol->ipc_control_data, scontrol->old_ipc_control_data,
+ scontrol->max_size);
+ kfree(scontrol->old_ipc_control_data);
+ scontrol->old_ipc_control_data = NULL;
+ /* Send the last known good configuration to firmware */
+ ret = iops->set_get_data(sdev, cdata, cdata->rhdr.hdr.size, set);
+ if (ret < 0)
+ goto unlock;
+ }
+
+unlock:
+ if (lock)
+ mutex_unlock(&swidget->setup_mutex);
+
+ return ret;
+}
+
+static void sof_ipc3_refresh_control(struct snd_sof_control *scontrol)
+{
+ struct sof_ipc_ctrl_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ int ret;
+
+ if (!scontrol->comp_data_dirty)
+ return;
+
+ if (!pm_runtime_active(scomp->dev))
+ return;
+
+ /* set the ABI header values */
+ cdata->data->magic = SOF_ABI_MAGIC;
+ cdata->data->abi = SOF_ABI_VERSION;
+
+ /* refresh the component data from DSP */
+ scontrol->comp_data_dirty = false;
+ ret = sof_ipc3_set_get_kcontrol_data(scontrol, false, true);
+ if (ret < 0) {
+ dev_err(scomp->dev, "Failed to get control data: %d\n", ret);
+
+ /* Set the flag to re-try next time to get the data */
+ scontrol->comp_data_dirty = true;
+ }
+}
+
+static int sof_ipc3_volume_get(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc_ctrl_data *cdata = scontrol->ipc_control_data;
+ unsigned int channels = scontrol->num_channels;
+ unsigned int i;
+
+ sof_ipc3_refresh_control(scontrol);
+
+ /* read back each channel */
+ for (i = 0; i < channels; i++)
+ ucontrol->value.integer.value[i] = ipc_to_mixer(cdata->chanv[i].value,
+ scontrol->volume_table,
+ scontrol->max + 1);
+
+ return 0;
+}
+
+static bool sof_ipc3_volume_put(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc_ctrl_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ unsigned int channels = scontrol->num_channels;
+ unsigned int i;
+ bool change = false;
+
+ /* update each channel */
+ for (i = 0; i < channels; i++) {
+ u32 value = mixer_to_ipc(ucontrol->value.integer.value[i],
+ scontrol->volume_table, scontrol->max + 1);
+
+ change = change || (value != cdata->chanv[i].value);
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
+ }
+
+ /* notify DSP of mixer updates */
+ if (pm_runtime_active(scomp->dev)) {
+ int ret = sof_ipc3_set_get_kcontrol_data(scontrol, true, true);
+
+ if (ret < 0) {
+ dev_err(scomp->dev, "Failed to set mixer updates for %s\n",
+ scontrol->name);
+ return false;
+ }
+ }
+
+ return change;
+}
+
+static int sof_ipc3_switch_get(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc_ctrl_data *cdata = scontrol->ipc_control_data;
+ unsigned int channels = scontrol->num_channels;
+ unsigned int i;
+
+ sof_ipc3_refresh_control(scontrol);
+
+ /* read back each channel */
+ for (i = 0; i < channels; i++)
+ ucontrol->value.integer.value[i] = cdata->chanv[i].value;
+
+ return 0;
+}
+
+static bool sof_ipc3_switch_put(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc_ctrl_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ unsigned int channels = scontrol->num_channels;
+ unsigned int i;
+ bool change = false;
+ u32 value;
+
+ /* update each channel */
+ for (i = 0; i < channels; i++) {
+ value = ucontrol->value.integer.value[i];
+ change = change || (value != cdata->chanv[i].value);
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
+ }
+
+ /* notify DSP of mixer updates */
+ if (pm_runtime_active(scomp->dev)) {
+ int ret = sof_ipc3_set_get_kcontrol_data(scontrol, true, true);
+
+ if (ret < 0) {
+ dev_err(scomp->dev, "Failed to set mixer updates for %s\n",
+ scontrol->name);
+ return false;
+ }
+ }
+
+ return change;
+}
+
+static int sof_ipc3_enum_get(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc_ctrl_data *cdata = scontrol->ipc_control_data;
+ unsigned int channels = scontrol->num_channels;
+ unsigned int i;
+
+ sof_ipc3_refresh_control(scontrol);
+
+ /* read back each channel */
+ for (i = 0; i < channels; i++)
+ ucontrol->value.enumerated.item[i] = cdata->chanv[i].value;
+
+ return 0;
+}
+
+static bool sof_ipc3_enum_put(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc_ctrl_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ unsigned int channels = scontrol->num_channels;
+ unsigned int i;
+ bool change = false;
+ u32 value;
+
+ /* update each channel */
+ for (i = 0; i < channels; i++) {
+ value = ucontrol->value.enumerated.item[i];
+ change = change || (value != cdata->chanv[i].value);
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
+ }
+
+ /* notify DSP of enum updates */
+ if (pm_runtime_active(scomp->dev)) {
+ int ret = sof_ipc3_set_get_kcontrol_data(scontrol, true, true);
+
+ if (ret < 0) {
+ dev_err(scomp->dev, "Failed to set enum updates for %s\n",
+ scontrol->name);
+ return false;
+ }
+ }
+
+ return change;
+}
+
+static int sof_ipc3_bytes_get(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc_ctrl_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_abi_hdr *data = cdata->data;
+ size_t size;
+
+ sof_ipc3_refresh_control(scontrol);
+
+ if (scontrol->max_size > sizeof(ucontrol->value.bytes.data)) {
+ dev_err_ratelimited(scomp->dev, "data max %zu exceeds ucontrol data array size\n",
+ scontrol->max_size);
+ return -EINVAL;
+ }
+
+ /* be->max has been verified to be >= sizeof(struct sof_abi_hdr) */
+ if (data->size > scontrol->max_size - sizeof(*data)) {
+ dev_err_ratelimited(scomp->dev,
+ "%u bytes of control data is invalid, max is %zu\n",
+ data->size, scontrol->max_size - sizeof(*data));
+ return -EINVAL;
+ }
+
+ size = data->size + sizeof(*data);
+
+ /* copy back to kcontrol */
+ memcpy(ucontrol->value.bytes.data, data, size);
+
+ return 0;
+}
+
+static int sof_ipc3_bytes_put(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc_ctrl_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_abi_hdr *data = cdata->data;
+ size_t size;
+
+ if (scontrol->max_size > sizeof(ucontrol->value.bytes.data)) {
+ dev_err_ratelimited(scomp->dev, "data max %zu exceeds ucontrol data array size\n",
+ scontrol->max_size);
+ return -EINVAL;
+ }
+
+ /* scontrol->max_size has been verified to be >= sizeof(struct sof_abi_hdr) */
+ if (data->size > scontrol->max_size - sizeof(*data)) {
+ dev_err_ratelimited(scomp->dev, "data size too big %u bytes max is %zu\n",
+ data->size, scontrol->max_size - sizeof(*data));
+ return -EINVAL;
+ }
+
+ size = data->size + sizeof(*data);
+
+ /* copy from kcontrol */
+ memcpy(data, ucontrol->value.bytes.data, size);
+
+ /* notify DSP of byte control updates */
+ if (pm_runtime_active(scomp->dev))
+ return sof_ipc3_set_get_kcontrol_data(scontrol, true, true);
+
+ return 0;
+}
+
+static int sof_ipc3_bytes_ext_put(struct snd_sof_control *scontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size)
+{
+ const struct snd_ctl_tlv __user *tlvd = (const struct snd_ctl_tlv __user *)binary_data;
+ struct sof_ipc_ctrl_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_ctl_tlv header;
+ int ret = -EINVAL;
+
+ /*
+ * The beginning of bytes data contains a header from where
+ * the length (as bytes) is needed to know the correct copy
+ * length of data from tlvd->tlv.
+ */
+ if (copy_from_user(&header, tlvd, sizeof(struct snd_ctl_tlv)))
+ return -EFAULT;
+
+ /* make sure TLV info is consistent */
+ if (header.length + sizeof(struct snd_ctl_tlv) > size) {
+ dev_err_ratelimited(scomp->dev, "Inconsistent TLV, data %d + header %zu > %d\n",
+ header.length, sizeof(struct snd_ctl_tlv), size);
+ return -EINVAL;
+ }
+
+ /* be->max is coming from topology */
+ if (header.length > scontrol->max_size) {
+ dev_err_ratelimited(scomp->dev, "Bytes data size %d exceeds max %zu\n",
+ header.length, scontrol->max_size);
+ return -EINVAL;
+ }
+
+ /* Check that header id matches the command */
+ if (header.numid != cdata->cmd) {
+ dev_err_ratelimited(scomp->dev, "Incorrect command for bytes put %d\n",
+ header.numid);
+ return -EINVAL;
+ }
+
+ if (!scontrol->old_ipc_control_data) {
+ /* Create a backup of the current, valid bytes control */
+ scontrol->old_ipc_control_data = kmemdup(scontrol->ipc_control_data,
+ scontrol->max_size, GFP_KERNEL);
+ if (!scontrol->old_ipc_control_data)
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(cdata->data, tlvd->tlv, header.length)) {
+ ret = -EFAULT;
+ goto err_restore;
+ }
+
+ if (cdata->data->magic != SOF_ABI_MAGIC) {
+ dev_err_ratelimited(scomp->dev, "Wrong ABI magic 0x%08x\n", cdata->data->magic);
+ goto err_restore;
+ }
+
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION, cdata->data->abi)) {
+ dev_err_ratelimited(scomp->dev, "Incompatible ABI version 0x%08x\n",
+ cdata->data->abi);
+ goto err_restore;
+ }
+
+ /* be->max has been verified to be >= sizeof(struct sof_abi_hdr) */
+ if (cdata->data->size > scontrol->max_size - sizeof(struct sof_abi_hdr)) {
+ dev_err_ratelimited(scomp->dev, "Mismatch in ABI data size (truncated?)\n");
+ goto err_restore;
+ }
+
+ /* notify DSP of byte control updates */
+ if (pm_runtime_active(scomp->dev)) {
+ /* Actually send the data to the DSP; this is an opportunity to validate the data */
+ return sof_ipc3_set_get_kcontrol_data(scontrol, true, true);
+ }
+
+ return 0;
+
+err_restore:
+ /* If we have an issue, we restore the old, valid bytes control data */
+ if (scontrol->old_ipc_control_data) {
+ memcpy(cdata->data, scontrol->old_ipc_control_data, scontrol->max_size);
+ kfree(scontrol->old_ipc_control_data);
+ scontrol->old_ipc_control_data = NULL;
+ }
+ return ret;
+}
+
+static int _sof_ipc3_bytes_ext_get(struct snd_sof_control *scontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size, bool from_dsp)
+{
+ struct snd_ctl_tlv __user *tlvd = (struct snd_ctl_tlv __user *)binary_data;
+ struct sof_ipc_ctrl_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_ctl_tlv header;
+ size_t data_size;
+
+ /*
+ * Decrement the limit by ext bytes header size to
+ * ensure the user space buffer is not exceeded.
+ */
+ if (size < sizeof(struct snd_ctl_tlv))
+ return -ENOSPC;
+
+ size -= sizeof(struct snd_ctl_tlv);
+
+ /* set the ABI header values */
+ cdata->data->magic = SOF_ABI_MAGIC;
+ cdata->data->abi = SOF_ABI_VERSION;
+
+ /* get all the component data from DSP */
+ if (from_dsp) {
+ int ret = sof_ipc3_set_get_kcontrol_data(scontrol, false, true);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ /* check data size doesn't exceed max coming from topology */
+ if (cdata->data->size > scontrol->max_size - sizeof(struct sof_abi_hdr)) {
+ dev_err_ratelimited(scomp->dev, "User data size %d exceeds max size %zu\n",
+ cdata->data->size,
+ scontrol->max_size - sizeof(struct sof_abi_hdr));
+ return -EINVAL;
+ }
+
+ data_size = cdata->data->size + sizeof(struct sof_abi_hdr);
+
+ /* make sure we don't exceed size provided by user space for data */
+ if (data_size > size)
+ return -ENOSPC;
+
+ header.numid = cdata->cmd;
+ header.length = data_size;
+ if (copy_to_user(tlvd, &header, sizeof(struct snd_ctl_tlv)))
+ return -EFAULT;
+
+ if (copy_to_user(tlvd->tlv, cdata->data, data_size))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sof_ipc3_bytes_ext_get(struct snd_sof_control *scontrol,
+ const unsigned int __user *binary_data, unsigned int size)
+{
+ return _sof_ipc3_bytes_ext_get(scontrol, binary_data, size, false);
+}
+
+static int sof_ipc3_bytes_ext_volatile_get(struct snd_sof_control *scontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size)
+{
+ return _sof_ipc3_bytes_ext_get(scontrol, binary_data, size, true);
+}
+
+static void snd_sof_update_control(struct snd_sof_control *scontrol,
+ struct sof_ipc_ctrl_data *cdata)
+{
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_ipc_ctrl_data *local_cdata;
+ int i;
+
+ local_cdata = scontrol->ipc_control_data;
+
+ if (cdata->cmd == SOF_CTRL_CMD_BINARY) {
+ if (cdata->num_elems != local_cdata->data->size) {
+ dev_err(scomp->dev, "cdata binary size mismatch %u - %u\n",
+ cdata->num_elems, local_cdata->data->size);
+ return;
+ }
+
+ /* copy the new binary data */
+ memcpy(local_cdata->data, cdata->data, cdata->num_elems);
+ } else if (cdata->num_elems != scontrol->num_channels) {
+ dev_err(scomp->dev, "cdata channel count mismatch %u - %d\n",
+ cdata->num_elems, scontrol->num_channels);
+ } else {
+ /* copy the new values */
+ for (i = 0; i < cdata->num_elems; i++)
+ local_cdata->chanv[i].value = cdata->chanv[i].value;
+ }
+}
+
+static void sof_ipc3_control_update(struct snd_sof_dev *sdev, void *ipc_control_message)
+{
+ struct sof_ipc_ctrl_data *cdata = ipc_control_message;
+ struct snd_soc_dapm_widget *widget;
+ struct snd_sof_control *scontrol;
+ struct snd_sof_widget *swidget;
+ struct snd_kcontrol *kc = NULL;
+ struct soc_mixer_control *sm;
+ struct soc_bytes_ext *be;
+ size_t expected_size;
+ struct soc_enum *se;
+ bool found = false;
+ int i, type;
+
+ if (cdata->type == SOF_CTRL_TYPE_VALUE_COMP_GET ||
+ cdata->type == SOF_CTRL_TYPE_VALUE_COMP_SET) {
+ dev_err(sdev->dev, "Component data is not supported in control notification\n");
+ return;
+ }
+
+ /* Find the swidget first */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (swidget->comp_id == cdata->comp_id) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return;
+
+ /* Translate SOF cmd to TPLG type */
+ switch (cdata->cmd) {
+ case SOF_CTRL_CMD_VOLUME:
+ case SOF_CTRL_CMD_SWITCH:
+ type = SND_SOC_TPLG_TYPE_MIXER;
+ break;
+ case SOF_CTRL_CMD_BINARY:
+ type = SND_SOC_TPLG_TYPE_BYTES;
+ break;
+ case SOF_CTRL_CMD_ENUM:
+ type = SND_SOC_TPLG_TYPE_ENUM;
+ break;
+ default:
+ dev_err(sdev->dev, "Unknown cmd %u in %s\n", cdata->cmd, __func__);
+ return;
+ }
+
+ widget = swidget->widget;
+ for (i = 0; i < widget->num_kcontrols; i++) {
+ /* skip non matching types or non matching indexes within type */
+ if (widget->dobj.widget.kcontrol_type[i] == type &&
+ widget->kcontrol_news[i].index == cdata->index) {
+ kc = widget->kcontrols[i];
+ break;
+ }
+ }
+
+ if (!kc)
+ return;
+
+ switch (cdata->cmd) {
+ case SOF_CTRL_CMD_VOLUME:
+ case SOF_CTRL_CMD_SWITCH:
+ sm = (struct soc_mixer_control *)kc->private_value;
+ scontrol = sm->dobj.private;
+ break;
+ case SOF_CTRL_CMD_BINARY:
+ be = (struct soc_bytes_ext *)kc->private_value;
+ scontrol = be->dobj.private;
+ break;
+ case SOF_CTRL_CMD_ENUM:
+ se = (struct soc_enum *)kc->private_value;
+ scontrol = se->dobj.private;
+ break;
+ default:
+ return;
+ }
+
+ expected_size = sizeof(struct sof_ipc_ctrl_data);
+ switch (cdata->type) {
+ case SOF_CTRL_TYPE_VALUE_CHAN_GET:
+ case SOF_CTRL_TYPE_VALUE_CHAN_SET:
+ expected_size += cdata->num_elems *
+ sizeof(struct sof_ipc_ctrl_value_chan);
+ break;
+ case SOF_CTRL_TYPE_DATA_GET:
+ case SOF_CTRL_TYPE_DATA_SET:
+ expected_size += cdata->num_elems + sizeof(struct sof_abi_hdr);
+ break;
+ default:
+ return;
+ }
+
+ if (cdata->rhdr.hdr.size != expected_size) {
+ dev_err(sdev->dev, "Component notification size mismatch\n");
+ return;
+ }
+
+ if (cdata->num_elems)
+ /*
+ * The message includes the updated value/data, update the
+ * control's local cache using the received notification
+ */
+ snd_sof_update_control(scontrol, cdata);
+ else
+ /* Mark the scontrol that the value/data is changed in SOF */
+ scontrol->comp_data_dirty = true;
+
+ snd_ctl_notify_one(swidget->scomp->card->snd_card, SNDRV_CTL_EVENT_MASK_VALUE, kc, 0);
+}
+
+static int sof_ipc3_widget_kcontrol_setup(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *swidget)
+{
+ struct snd_sof_control *scontrol;
+ int ret;
+
+ /* set up all controls for the widget */
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list)
+ if (scontrol->comp_id == swidget->comp_id) {
+ /* set kcontrol data in DSP */
+ ret = sof_ipc3_set_get_kcontrol_data(scontrol, true, false);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "kcontrol %d set up failed for widget %s\n",
+ scontrol->comp_id, swidget->widget->name);
+ return ret;
+ }
+
+ /*
+ * Read back the data from the DSP for static widgets.
+ * This is particularly useful for binary kcontrols
+ * associated with static pipeline widgets to initialize
+ * the data size to match that in the DSP.
+ */
+ if (swidget->dynamic_pipeline_widget)
+ continue;
+
+ ret = sof_ipc3_set_get_kcontrol_data(scontrol, false, false);
+ if (ret < 0)
+ dev_warn(sdev->dev,
+ "kcontrol %d read failed for widget %s\n",
+ scontrol->comp_id, swidget->widget->name);
+ }
+
+ return 0;
+}
+
+static int
+sof_ipc3_set_up_volume_table(struct snd_sof_control *scontrol, int tlv[SOF_TLV_ITEMS], int size)
+{
+ int i;
+
+ /* init the volume table */
+ scontrol->volume_table = kcalloc(size, sizeof(u32), GFP_KERNEL);
+ if (!scontrol->volume_table)
+ return -ENOMEM;
+
+ /* populate the volume table */
+ for (i = 0; i < size ; i++)
+ scontrol->volume_table[i] = vol_compute_gain(i, tlv);
+
+ return 0;
+}
+
+const struct sof_ipc_tplg_control_ops tplg_ipc3_control_ops = {
+ .volume_put = sof_ipc3_volume_put,
+ .volume_get = sof_ipc3_volume_get,
+ .switch_put = sof_ipc3_switch_put,
+ .switch_get = sof_ipc3_switch_get,
+ .enum_put = sof_ipc3_enum_put,
+ .enum_get = sof_ipc3_enum_get,
+ .bytes_put = sof_ipc3_bytes_put,
+ .bytes_get = sof_ipc3_bytes_get,
+ .bytes_ext_put = sof_ipc3_bytes_ext_put,
+ .bytes_ext_get = sof_ipc3_bytes_ext_get,
+ .bytes_ext_volatile_get = sof_ipc3_bytes_ext_volatile_get,
+ .update = sof_ipc3_control_update,
+ .widget_kcontrol_setup = sof_ipc3_widget_kcontrol_setup,
+ .set_up_volume_table = sof_ipc3_set_up_volume_table,
+};
diff --git a/sound/soc/sof/ipc3-dtrace.c b/sound/soc/sof/ipc3-dtrace.c
new file mode 100644
index 000000000..bd07f0472
--- /dev/null
+++ b/sound/soc/sof/ipc3-dtrace.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+
+#include <linux/debugfs.h>
+#include <linux/sched/signal.h>
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ops.h"
+#include "sof-utils.h"
+#include "ipc3-priv.h"
+
+#define TRACE_FILTER_ELEMENTS_PER_ENTRY 4
+#define TRACE_FILTER_MAX_CONFIG_STRING_LENGTH 1024
+
+enum sof_dtrace_state {
+ SOF_DTRACE_DISABLED,
+ SOF_DTRACE_STOPPED,
+ SOF_DTRACE_INITIALIZING,
+ SOF_DTRACE_ENABLED,
+};
+
+struct sof_dtrace_priv {
+ struct snd_dma_buffer dmatb;
+ struct snd_dma_buffer dmatp;
+ int dma_trace_pages;
+ wait_queue_head_t trace_sleep;
+ u32 host_offset;
+ bool dtrace_error;
+ bool dtrace_draining;
+ enum sof_dtrace_state dtrace_state;
+};
+
+static bool trace_pos_update_expected(struct sof_dtrace_priv *priv)
+{
+ if (priv->dtrace_state == SOF_DTRACE_ENABLED ||
+ priv->dtrace_state == SOF_DTRACE_INITIALIZING)
+ return true;
+
+ return false;
+}
+
+static int trace_filter_append_elem(struct snd_sof_dev *sdev, u32 key, u32 value,
+ struct sof_ipc_trace_filter_elem *elem_list,
+ int capacity, int *counter)
+{
+ if (*counter >= capacity)
+ return -ENOMEM;
+
+ elem_list[*counter].key = key;
+ elem_list[*counter].value = value;
+ ++*counter;
+
+ return 0;
+}
+
+static int trace_filter_parse_entry(struct snd_sof_dev *sdev, const char *line,
+ struct sof_ipc_trace_filter_elem *elem,
+ int capacity, int *counter)
+{
+ int log_level, pipe_id, comp_id, read, ret;
+ int len = strlen(line);
+ int cnt = *counter;
+ u32 uuid_id;
+
+ /* ignore empty content */
+ ret = sscanf(line, " %n", &read);
+ if (!ret && read == len)
+ return len;
+
+ ret = sscanf(line, " %d %x %d %d %n", &log_level, &uuid_id, &pipe_id, &comp_id, &read);
+ if (ret != TRACE_FILTER_ELEMENTS_PER_ENTRY || read != len) {
+ dev_err(sdev->dev, "Invalid trace filter entry '%s'\n", line);
+ return -EINVAL;
+ }
+
+ if (uuid_id > 0) {
+ ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_UUID,
+ uuid_id, elem, capacity, &cnt);
+ if (ret)
+ return ret;
+ }
+ if (pipe_id >= 0) {
+ ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_PIPE,
+ pipe_id, elem, capacity, &cnt);
+ if (ret)
+ return ret;
+ }
+ if (comp_id >= 0) {
+ ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_COMP,
+ comp_id, elem, capacity, &cnt);
+ if (ret)
+ return ret;
+ }
+
+ ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_SET_LEVEL |
+ SOF_IPC_TRACE_FILTER_ELEM_FIN,
+ log_level, elem, capacity, &cnt);
+ if (ret)
+ return ret;
+
+ /* update counter only when parsing whole entry passed */
+ *counter = cnt;
+
+ return len;
+}
+
+static int trace_filter_parse(struct snd_sof_dev *sdev, char *string,
+ int *out_elem_cnt,
+ struct sof_ipc_trace_filter_elem **out)
+{
+ static const char entry_delimiter[] = ";";
+ char *entry = string;
+ int capacity = 0;
+ int entry_len;
+ int cnt = 0;
+
+ /*
+ * Each entry contains at least 1, up to TRACE_FILTER_ELEMENTS_PER_ENTRY
+ * IPC elements, depending on content. Calculate IPC elements capacity
+ * for the input string where each element is set.
+ */
+ while (entry) {
+ capacity += TRACE_FILTER_ELEMENTS_PER_ENTRY;
+ entry = strchr(entry + 1, entry_delimiter[0]);
+ }
+ *out = kmalloc(capacity * sizeof(**out), GFP_KERNEL);
+ if (!*out)
+ return -ENOMEM;
+
+ /* split input string by ';', and parse each entry separately in trace_filter_parse_entry */
+ while ((entry = strsep(&string, entry_delimiter))) {
+ entry_len = trace_filter_parse_entry(sdev, entry, *out, capacity, &cnt);
+ if (entry_len < 0) {
+ dev_err(sdev->dev,
+ "Parsing filter entry '%s' failed with %d\n",
+ entry, entry_len);
+ return -EINVAL;
+ }
+ }
+
+ *out_elem_cnt = cnt;
+
+ return 0;
+}
+
+static int ipc3_trace_update_filter(struct snd_sof_dev *sdev, int num_elems,
+ struct sof_ipc_trace_filter_elem *elems)
+{
+ struct sof_ipc_trace_filter *msg;
+ size_t size;
+ int ret;
+
+ size = struct_size(msg, elems, num_elems);
+ if (size > SOF_IPC_MSG_MAX_SIZE)
+ return -EINVAL;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->hdr.size = size;
+ msg->hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_FILTER_UPDATE;
+ msg->elem_cnt = num_elems;
+ memcpy(&msg->elems[0], elems, num_elems * sizeof(*elems));
+
+ ret = pm_runtime_resume_and_get(sdev->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err(sdev->dev, "enabling device failed: %d\n", ret);
+ goto error;
+ }
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, msg, msg->hdr.size);
+ pm_runtime_mark_last_busy(sdev->dev);
+ pm_runtime_put_autosuspend(sdev->dev);
+
+error:
+ kfree(msg);
+ return ret;
+}
+
+static ssize_t dfsentry_trace_filter_write(struct file *file, const char __user *from,
+ size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct sof_ipc_trace_filter_elem *elems = NULL;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ int num_elems;
+ char *string;
+ int ret;
+
+ if (count > TRACE_FILTER_MAX_CONFIG_STRING_LENGTH) {
+ dev_err(sdev->dev, "%s too long input, %zu > %d\n", __func__, count,
+ TRACE_FILTER_MAX_CONFIG_STRING_LENGTH);
+ return -EINVAL;
+ }
+
+ string = memdup_user_nul(from, count);
+ if (IS_ERR(string))
+ return PTR_ERR(string);
+
+ ret = trace_filter_parse(sdev, string, &num_elems, &elems);
+ if (ret < 0)
+ goto error;
+
+ if (num_elems) {
+ ret = ipc3_trace_update_filter(sdev, num_elems, elems);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Filter update failed: %d\n", ret);
+ goto error;
+ }
+ }
+ ret = count;
+error:
+ kfree(string);
+ kfree(elems);
+ return ret;
+}
+
+static const struct file_operations sof_dfs_trace_filter_fops = {
+ .open = simple_open,
+ .write = dfsentry_trace_filter_write,
+ .llseek = default_llseek,
+};
+
+static int debugfs_create_trace_filter(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ dfse->sdev = sdev;
+ dfse->type = SOF_DFSENTRY_TYPE_BUF;
+
+ debugfs_create_file("filter", 0200, sdev->debugfs_root, dfse,
+ &sof_dfs_trace_filter_fops);
+ /* add to dfsentry list */
+ list_add(&dfse->list, &sdev->dfsentry_list);
+
+ return 0;
+}
+
+static bool sof_dtrace_set_host_offset(struct sof_dtrace_priv *priv, u32 new_offset)
+{
+ u32 host_offset = READ_ONCE(priv->host_offset);
+
+ if (host_offset != new_offset) {
+ /* This is a bit paranoid and unlikely that it is needed */
+ u32 ret = cmpxchg(&priv->host_offset, host_offset, new_offset);
+
+ if (ret == host_offset)
+ return true;
+ }
+
+ return false;
+}
+
+static size_t sof_dtrace_avail(struct snd_sof_dev *sdev,
+ loff_t pos, size_t buffer_size)
+{
+ struct sof_dtrace_priv *priv = sdev->fw_trace_data;
+ loff_t host_offset = READ_ONCE(priv->host_offset);
+
+ /*
+ * If host offset is less than local pos, it means write pointer of
+ * host DMA buffer has been wrapped. We should output the trace data
+ * at the end of host DMA buffer at first.
+ */
+ if (host_offset < pos)
+ return buffer_size - pos;
+
+ /* If there is available trace data now, it is unnecessary to wait. */
+ if (host_offset > pos)
+ return host_offset - pos;
+
+ return 0;
+}
+
+static size_t sof_wait_dtrace_avail(struct snd_sof_dev *sdev, loff_t pos,
+ size_t buffer_size)
+{
+ size_t ret = sof_dtrace_avail(sdev, pos, buffer_size);
+ struct sof_dtrace_priv *priv = sdev->fw_trace_data;
+ wait_queue_entry_t wait;
+
+ /* data immediately available */
+ if (ret)
+ return ret;
+
+ if (priv->dtrace_draining && !trace_pos_update_expected(priv)) {
+ /*
+ * tracing has ended and all traces have been
+ * read by client, return EOF
+ */
+ priv->dtrace_draining = false;
+ return 0;
+ }
+
+ /* wait for available trace data from FW */
+ init_waitqueue_entry(&wait, current);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&priv->trace_sleep, &wait);
+
+ if (!signal_pending(current)) {
+ /* set timeout to max value, no error code */
+ schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+ }
+ remove_wait_queue(&priv->trace_sleep, &wait);
+
+ return sof_dtrace_avail(sdev, pos, buffer_size);
+}
+
+static ssize_t dfsentry_dtrace_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ struct sof_dtrace_priv *priv = sdev->fw_trace_data;
+ unsigned long rem;
+ loff_t lpos = *ppos;
+ size_t avail, buffer_size = dfse->size;
+ u64 lpos_64;
+
+ /* make sure we know about any failures on the DSP side */
+ priv->dtrace_error = false;
+
+ /* check pos and count */
+ if (lpos < 0)
+ return -EINVAL;
+ if (!count)
+ return 0;
+
+ /* check for buffer wrap and count overflow */
+ lpos_64 = lpos;
+ lpos = do_div(lpos_64, buffer_size);
+
+ /* get available count based on current host offset */
+ avail = sof_wait_dtrace_avail(sdev, lpos, buffer_size);
+ if (priv->dtrace_error) {
+ dev_err(sdev->dev, "trace IO error\n");
+ return -EIO;
+ }
+
+ /* no new trace data */
+ if (!avail)
+ return 0;
+
+ /* make sure count is <= avail */
+ if (count > avail)
+ count = avail;
+
+ /*
+ * make sure that all trace data is available for the CPU as the trace
+ * data buffer might be allocated from non consistent memory.
+ * Note: snd_dma_buffer_sync() is called for normal audio playback and
+ * capture streams also.
+ */
+ snd_dma_buffer_sync(&priv->dmatb, SNDRV_DMA_SYNC_CPU);
+ /* copy available trace data to debugfs */
+ rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count);
+ if (rem)
+ return -EFAULT;
+
+ *ppos += count;
+
+ /* move debugfs reading position */
+ return count;
+}
+
+static int dfsentry_dtrace_release(struct inode *inode, struct file *file)
+{
+ struct snd_sof_dfsentry *dfse = inode->i_private;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ struct sof_dtrace_priv *priv = sdev->fw_trace_data;
+
+ /* avoid duplicate traces at next open */
+ if (priv->dtrace_state != SOF_DTRACE_ENABLED)
+ sof_dtrace_set_host_offset(priv, 0);
+
+ return 0;
+}
+
+static const struct file_operations sof_dfs_dtrace_fops = {
+ .open = simple_open,
+ .read = dfsentry_dtrace_read,
+ .llseek = default_llseek,
+ .release = dfsentry_dtrace_release,
+};
+
+static int debugfs_create_dtrace(struct snd_sof_dev *sdev)
+{
+ struct sof_dtrace_priv *priv;
+ struct snd_sof_dfsentry *dfse;
+ int ret;
+
+ if (!sdev)
+ return -EINVAL;
+
+ priv = sdev->fw_trace_data;
+
+ ret = debugfs_create_trace_filter(sdev);
+ if (ret < 0)
+ dev_warn(sdev->dev, "failed to create filter debugfs file: %d", ret);
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ dfse->type = SOF_DFSENTRY_TYPE_BUF;
+ dfse->buf = priv->dmatb.area;
+ dfse->size = priv->dmatb.bytes;
+ dfse->sdev = sdev;
+
+ debugfs_create_file("trace", 0444, sdev->debugfs_root, dfse,
+ &sof_dfs_dtrace_fops);
+
+ return 0;
+}
+
+static int ipc3_dtrace_enable(struct snd_sof_dev *sdev)
+{
+ struct sof_dtrace_priv *priv = sdev->fw_trace_data;
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
+ struct sof_ipc_dma_trace_params_ext params;
+ int ret;
+
+ if (!sdev->fw_trace_is_supported)
+ return 0;
+
+ if (priv->dtrace_state == SOF_DTRACE_ENABLED || !priv->dma_trace_pages)
+ return -EINVAL;
+
+ if (priv->dtrace_state == SOF_DTRACE_STOPPED)
+ goto start;
+
+ /* set IPC parameters */
+ params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG;
+ /* PARAMS_EXT is only supported from ABI 3.7.0 onwards */
+ if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) {
+ params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext);
+ params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT;
+ params.timestamp_ns = ktime_get(); /* in nanosecond */
+ } else {
+ params.hdr.size = sizeof(struct sof_ipc_dma_trace_params);
+ params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS;
+ }
+ params.buffer.phy_addr = priv->dmatp.addr;
+ params.buffer.size = priv->dmatb.bytes;
+ params.buffer.pages = priv->dma_trace_pages;
+ params.stream_tag = 0;
+
+ sof_dtrace_set_host_offset(priv, 0);
+ priv->dtrace_draining = false;
+
+ ret = sof_dtrace_host_init(sdev, &priv->dmatb, &params);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Host dtrace init failed: %d\n", ret);
+ return ret;
+ }
+ dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag);
+
+ /* send IPC to the DSP */
+ priv->dtrace_state = SOF_DTRACE_INITIALIZING;
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, &params, sizeof(params));
+ if (ret < 0) {
+ dev_err(sdev->dev, "can't set params for DMA for trace %d\n", ret);
+ goto trace_release;
+ }
+
+start:
+ priv->dtrace_state = SOF_DTRACE_ENABLED;
+
+ ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_START);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Host dtrace trigger start failed: %d\n", ret);
+ goto trace_release;
+ }
+
+ return 0;
+
+trace_release:
+ priv->dtrace_state = SOF_DTRACE_DISABLED;
+ sof_dtrace_host_release(sdev);
+ return ret;
+}
+
+static int ipc3_dtrace_init(struct snd_sof_dev *sdev)
+{
+ struct sof_dtrace_priv *priv;
+ int ret;
+
+ /* dtrace is only supported with SOF_IPC */
+ if (sdev->pdata->ipc_type != SOF_IPC)
+ return -EOPNOTSUPP;
+
+ if (sdev->fw_trace_data) {
+ dev_err(sdev->dev, "fw_trace_data has been already allocated\n");
+ return -EBUSY;
+ }
+
+ priv = devm_kzalloc(sdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ sdev->fw_trace_data = priv;
+
+ /* set false before start initialization */
+ priv->dtrace_state = SOF_DTRACE_DISABLED;
+
+ /* allocate trace page table buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
+ PAGE_SIZE, &priv->dmatp);
+ if (ret < 0) {
+ dev_err(sdev->dev, "can't alloc page table for trace %d\n", ret);
+ return ret;
+ }
+
+ /* allocate trace data buffer */
+ ret = snd_dma_alloc_dir_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
+ DMA_FROM_DEVICE, DMA_BUF_SIZE_FOR_TRACE,
+ &priv->dmatb);
+ if (ret < 0) {
+ dev_err(sdev->dev, "can't alloc buffer for trace %d\n", ret);
+ goto page_err;
+ }
+
+ /* create compressed page table for audio firmware */
+ ret = snd_sof_create_page_table(sdev->dev, &priv->dmatb,
+ priv->dmatp.area, priv->dmatb.bytes);
+ if (ret < 0)
+ goto table_err;
+
+ priv->dma_trace_pages = ret;
+ dev_dbg(sdev->dev, "dma_trace_pages: %d\n", priv->dma_trace_pages);
+
+ if (sdev->first_boot) {
+ ret = debugfs_create_dtrace(sdev);
+ if (ret < 0)
+ goto table_err;
+ }
+
+ init_waitqueue_head(&priv->trace_sleep);
+
+ ret = ipc3_dtrace_enable(sdev);
+ if (ret < 0)
+ goto table_err;
+
+ return 0;
+table_err:
+ priv->dma_trace_pages = 0;
+ snd_dma_free_pages(&priv->dmatb);
+page_err:
+ snd_dma_free_pages(&priv->dmatp);
+ return ret;
+}
+
+int ipc3_dtrace_posn_update(struct snd_sof_dev *sdev,
+ struct sof_ipc_dma_trace_posn *posn)
+{
+ struct sof_dtrace_priv *priv = sdev->fw_trace_data;
+
+ if (!sdev->fw_trace_is_supported)
+ return 0;
+
+ if (trace_pos_update_expected(priv) &&
+ sof_dtrace_set_host_offset(priv, posn->host_offset))
+ wake_up(&priv->trace_sleep);
+
+ if (posn->overflow != 0)
+ dev_err(sdev->dev,
+ "DSP trace buffer overflow %u bytes. Total messages %d\n",
+ posn->overflow, posn->messages);
+
+ return 0;
+}
+
+/* an error has occurred within the DSP that prevents further trace */
+static void ipc3_dtrace_fw_crashed(struct snd_sof_dev *sdev)
+{
+ struct sof_dtrace_priv *priv = sdev->fw_trace_data;
+
+ if (priv->dtrace_state == SOF_DTRACE_ENABLED) {
+ priv->dtrace_error = true;
+ wake_up(&priv->trace_sleep);
+ }
+}
+
+static void ipc3_dtrace_release(struct snd_sof_dev *sdev, bool only_stop)
+{
+ struct sof_dtrace_priv *priv = sdev->fw_trace_data;
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
+ struct sof_ipc_cmd_hdr hdr;
+ int ret;
+
+ if (!sdev->fw_trace_is_supported || priv->dtrace_state == SOF_DTRACE_DISABLED)
+ return;
+
+ ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
+ if (ret < 0)
+ dev_err(sdev->dev, "Host dtrace trigger stop failed: %d\n", ret);
+ priv->dtrace_state = SOF_DTRACE_STOPPED;
+
+ /*
+ * stop and free trace DMA in the DSP. TRACE_DMA_FREE is only supported from
+ * ABI 3.20.0 onwards
+ */
+ if (v->abi_version >= SOF_ABI_VER(3, 20, 0)) {
+ hdr.size = sizeof(hdr);
+ hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_DMA_FREE;
+
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, &hdr, hdr.size);
+ if (ret < 0)
+ dev_err(sdev->dev, "DMA_TRACE_FREE failed with error: %d\n", ret);
+ }
+
+ if (only_stop)
+ goto out;
+
+ ret = sof_dtrace_host_release(sdev);
+ if (ret < 0)
+ dev_err(sdev->dev, "Host dtrace release failed %d\n", ret);
+
+ priv->dtrace_state = SOF_DTRACE_DISABLED;
+
+out:
+ priv->dtrace_draining = true;
+ wake_up(&priv->trace_sleep);
+}
+
+static void ipc3_dtrace_suspend(struct snd_sof_dev *sdev, pm_message_t pm_state)
+{
+ ipc3_dtrace_release(sdev, pm_state.event == SOF_DSP_PM_D0);
+}
+
+static int ipc3_dtrace_resume(struct snd_sof_dev *sdev)
+{
+ return ipc3_dtrace_enable(sdev);
+}
+
+static void ipc3_dtrace_free(struct snd_sof_dev *sdev)
+{
+ struct sof_dtrace_priv *priv = sdev->fw_trace_data;
+
+ /* release trace */
+ ipc3_dtrace_release(sdev, false);
+
+ if (priv->dma_trace_pages) {
+ snd_dma_free_pages(&priv->dmatb);
+ snd_dma_free_pages(&priv->dmatp);
+ priv->dma_trace_pages = 0;
+ }
+}
+
+const struct sof_ipc_fw_tracing_ops ipc3_dtrace_ops = {
+ .init = ipc3_dtrace_init,
+ .free = ipc3_dtrace_free,
+ .fw_crashed = ipc3_dtrace_fw_crashed,
+ .suspend = ipc3_dtrace_suspend,
+ .resume = ipc3_dtrace_resume,
+};
diff --git a/sound/soc/sof/ipc3-loader.c b/sound/soc/sof/ipc3-loader.c
new file mode 100644
index 000000000..28218766d
--- /dev/null
+++ b/sound/soc/sof/ipc3-loader.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+
+#include <linux/firmware.h>
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ipc3-priv.h"
+#include "ops.h"
+
+static int ipc3_fw_ext_man_get_version(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct sof_ext_man_fw_version *v =
+ container_of(hdr, struct sof_ext_man_fw_version, hdr);
+
+ memcpy(&sdev->fw_ready.version, &v->version, sizeof(v->version));
+ sdev->fw_ready.flags = v->flags;
+
+ /* log ABI versions and check FW compatibility */
+ return sof_ipc3_validate_fw_version(sdev);
+}
+
+static int ipc3_fw_ext_man_get_windows(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct sof_ext_man_window *w;
+
+ w = container_of(hdr, struct sof_ext_man_window, hdr);
+
+ return sof_ipc3_get_ext_windows(sdev, &w->ipc_window.ext_hdr);
+}
+
+static int ipc3_fw_ext_man_get_cc_info(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct sof_ext_man_cc_version *cc;
+
+ cc = container_of(hdr, struct sof_ext_man_cc_version, hdr);
+
+ return sof_ipc3_get_cc_info(sdev, &cc->cc_version.ext_hdr);
+}
+
+static int ipc3_fw_ext_man_get_dbg_abi_info(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct ext_man_dbg_abi *dbg_abi =
+ container_of(hdr, struct ext_man_dbg_abi, hdr);
+
+ if (sdev->first_boot)
+ dev_dbg(sdev->dev,
+ "Firmware: DBG_ABI %d:%d:%d\n",
+ SOF_ABI_VERSION_MAJOR(dbg_abi->dbg_abi.abi_dbg_version),
+ SOF_ABI_VERSION_MINOR(dbg_abi->dbg_abi.abi_dbg_version),
+ SOF_ABI_VERSION_PATCH(dbg_abi->dbg_abi.abi_dbg_version));
+
+ return 0;
+}
+
+static int ipc3_fw_ext_man_get_config_data(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct sof_ext_man_config_data *config =
+ container_of(hdr, struct sof_ext_man_config_data, hdr);
+ const struct sof_config_elem *elem;
+ int elems_counter;
+ int elems_size;
+ int ret = 0;
+ int i;
+
+ /* calculate elements counter */
+ elems_size = config->hdr.size - sizeof(struct sof_ext_man_elem_header);
+ elems_counter = elems_size / sizeof(struct sof_config_elem);
+
+ dev_dbg(sdev->dev, "manifest can hold up to %d config elements\n", elems_counter);
+
+ for (i = 0; i < elems_counter; ++i) {
+ elem = &config->elems[i];
+ dev_dbg(sdev->dev, "get index %d token %d val %d\n",
+ i, elem->token, elem->value);
+ switch (elem->token) {
+ case SOF_EXT_MAN_CONFIG_EMPTY:
+ /* unused memory space is zero filled - mapped to EMPTY elements */
+ break;
+ case SOF_EXT_MAN_CONFIG_IPC_MSG_SIZE:
+ /* TODO: use ipc msg size from config data */
+ break;
+ case SOF_EXT_MAN_CONFIG_MEMORY_USAGE_SCAN:
+ if (sdev->first_boot && elem->value)
+ ret = snd_sof_dbg_memory_info_init(sdev);
+ break;
+ default:
+ dev_info(sdev->dev,
+ "Unknown firmware configuration token %d value %d",
+ elem->token, elem->value);
+ break;
+ }
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "%s: processing failed for token %d value %#x, %d\n",
+ __func__, elem->token, elem->value, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t ipc3_fw_ext_man_size(struct snd_sof_dev *sdev, const struct firmware *fw)
+{
+ const struct sof_ext_man_header *head;
+
+ head = (struct sof_ext_man_header *)fw->data;
+
+ /*
+ * assert fw size is big enough to contain extended manifest header,
+ * it prevents from reading unallocated memory from `head` in following
+ * step.
+ */
+ if (fw->size < sizeof(*head))
+ return -EINVAL;
+
+ /*
+ * When fw points to extended manifest,
+ * then first u32 must be equal SOF_EXT_MAN_MAGIC_NUMBER.
+ */
+ if (head->magic == SOF_EXT_MAN_MAGIC_NUMBER)
+ return head->full_size;
+
+ /* otherwise given fw don't have an extended manifest */
+ dev_dbg(sdev->dev, "Unexpected extended manifest magic number: %#x\n",
+ head->magic);
+ return 0;
+}
+
+static size_t sof_ipc3_fw_parse_ext_man(struct snd_sof_dev *sdev)
+{
+ const struct firmware *fw = sdev->basefw.fw;
+ const struct sof_ext_man_elem_header *elem_hdr;
+ const struct sof_ext_man_header *head;
+ ssize_t ext_man_size;
+ ssize_t remaining;
+ uintptr_t iptr;
+ int ret = 0;
+
+ head = (struct sof_ext_man_header *)fw->data;
+ remaining = head->full_size - head->header_size;
+ ext_man_size = ipc3_fw_ext_man_size(sdev, fw);
+
+ /* Assert firmware starts with extended manifest */
+ if (ext_man_size <= 0)
+ return ext_man_size;
+
+ /* incompatible version */
+ if (SOF_EXT_MAN_VERSION_INCOMPATIBLE(SOF_EXT_MAN_VERSION,
+ head->header_version)) {
+ dev_err(sdev->dev,
+ "extended manifest version %#x differ from used %#x\n",
+ head->header_version, SOF_EXT_MAN_VERSION);
+ return -EINVAL;
+ }
+
+ /* get first extended manifest element header */
+ iptr = (uintptr_t)fw->data + head->header_size;
+
+ while (remaining > sizeof(*elem_hdr)) {
+ elem_hdr = (struct sof_ext_man_elem_header *)iptr;
+
+ dev_dbg(sdev->dev, "found sof_ext_man header type %d size %#x\n",
+ elem_hdr->type, elem_hdr->size);
+
+ if (elem_hdr->size < sizeof(*elem_hdr) ||
+ elem_hdr->size > remaining) {
+ dev_err(sdev->dev,
+ "invalid sof_ext_man header size, type %d size %#x\n",
+ elem_hdr->type, elem_hdr->size);
+ return -EINVAL;
+ }
+
+ /* process structure data */
+ switch (elem_hdr->type) {
+ case SOF_EXT_MAN_ELEM_FW_VERSION:
+ ret = ipc3_fw_ext_man_get_version(sdev, elem_hdr);
+ break;
+ case SOF_EXT_MAN_ELEM_WINDOW:
+ ret = ipc3_fw_ext_man_get_windows(sdev, elem_hdr);
+ break;
+ case SOF_EXT_MAN_ELEM_CC_VERSION:
+ ret = ipc3_fw_ext_man_get_cc_info(sdev, elem_hdr);
+ break;
+ case SOF_EXT_MAN_ELEM_DBG_ABI:
+ ret = ipc3_fw_ext_man_get_dbg_abi_info(sdev, elem_hdr);
+ break;
+ case SOF_EXT_MAN_ELEM_CONFIG_DATA:
+ ret = ipc3_fw_ext_man_get_config_data(sdev, elem_hdr);
+ break;
+ case SOF_EXT_MAN_ELEM_PLATFORM_CONFIG_DATA:
+ ret = snd_sof_dsp_parse_platform_ext_manifest(sdev, elem_hdr);
+ break;
+ default:
+ dev_info(sdev->dev,
+ "unknown sof_ext_man header type %d size %#x\n",
+ elem_hdr->type, elem_hdr->size);
+ break;
+ }
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "failed to parse sof_ext_man header type %d size %#x\n",
+ elem_hdr->type, elem_hdr->size);
+ return ret;
+ }
+
+ remaining -= elem_hdr->size;
+ iptr += elem_hdr->size;
+ }
+
+ if (remaining) {
+ dev_err(sdev->dev, "error: sof_ext_man header is inconsistent\n");
+ return -EINVAL;
+ }
+
+ return ext_man_size;
+}
+
+/* generic module parser for mmaped DSPs */
+static int sof_ipc3_parse_module_memcpy(struct snd_sof_dev *sdev,
+ struct snd_sof_mod_hdr *module)
+{
+ struct snd_sof_blk_hdr *block;
+ int count, ret;
+ u32 offset;
+ size_t remaining;
+
+ dev_dbg(sdev->dev, "new module size %#x blocks %#x type %#x\n",
+ module->size, module->num_blocks, module->type);
+
+ block = (struct snd_sof_blk_hdr *)((u8 *)module + sizeof(*module));
+
+ /* module->size doesn't include header size */
+ remaining = module->size;
+ for (count = 0; count < module->num_blocks; count++) {
+ /* check for wrap */
+ if (remaining < sizeof(*block)) {
+ dev_err(sdev->dev, "not enough data remaining\n");
+ return -EINVAL;
+ }
+
+ /* minus header size of block */
+ remaining -= sizeof(*block);
+
+ if (block->size == 0) {
+ dev_warn(sdev->dev,
+ "warning: block %d size zero\n", count);
+ dev_warn(sdev->dev, " type %#x offset %#x\n",
+ block->type, block->offset);
+ continue;
+ }
+
+ switch (block->type) {
+ case SOF_FW_BLK_TYPE_RSRVD0:
+ case SOF_FW_BLK_TYPE_ROM...SOF_FW_BLK_TYPE_RSRVD14:
+ continue; /* not handled atm */
+ case SOF_FW_BLK_TYPE_IRAM:
+ case SOF_FW_BLK_TYPE_DRAM:
+ case SOF_FW_BLK_TYPE_SRAM:
+ offset = block->offset;
+ break;
+ default:
+ dev_err(sdev->dev, "%s: bad type %#x for block %#x\n",
+ __func__, block->type, count);
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "block %d type %#x size %#x ==> offset %#x\n",
+ count, block->type, block->size, offset);
+
+ /* checking block->size to avoid unaligned access */
+ if (block->size % sizeof(u32)) {
+ dev_err(sdev->dev, "%s: invalid block size %#x\n",
+ __func__, block->size);
+ return -EINVAL;
+ }
+ ret = snd_sof_dsp_block_write(sdev, block->type, offset,
+ block + 1, block->size);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: write to block type %#x failed\n",
+ __func__, block->type);
+ return ret;
+ }
+
+ if (remaining < block->size) {
+ dev_err(sdev->dev, "%s: not enough data remaining\n", __func__);
+ return -EINVAL;
+ }
+
+ /* minus body size of block */
+ remaining -= block->size;
+ /* next block */
+ block = (struct snd_sof_blk_hdr *)((u8 *)block + sizeof(*block)
+ + block->size);
+ }
+
+ return 0;
+}
+
+static int sof_ipc3_load_fw_to_dsp(struct snd_sof_dev *sdev)
+{
+ u32 payload_offset = sdev->basefw.payload_offset;
+ const struct firmware *fw = sdev->basefw.fw;
+ struct snd_sof_fw_header *header;
+ struct snd_sof_mod_hdr *module;
+ int (*load_module)(struct snd_sof_dev *sof_dev, struct snd_sof_mod_hdr *hdr);
+ size_t remaining;
+ int ret, count;
+
+ if (!fw)
+ return -EINVAL;
+
+ header = (struct snd_sof_fw_header *)(fw->data + payload_offset);
+ load_module = sof_ops(sdev)->load_module;
+ if (!load_module) {
+ dev_dbg(sdev->dev, "Using generic module loading\n");
+ load_module = sof_ipc3_parse_module_memcpy;
+ } else {
+ dev_dbg(sdev->dev, "Using custom module loading\n");
+ }
+
+ /* parse each module */
+ module = (struct snd_sof_mod_hdr *)(fw->data + payload_offset + sizeof(*header));
+ remaining = fw->size - sizeof(*header) - payload_offset;
+ /* check for wrap */
+ if (remaining > fw->size) {
+ dev_err(sdev->dev, "%s: fw size smaller than header size\n", __func__);
+ return -EINVAL;
+ }
+
+ for (count = 0; count < header->num_modules; count++) {
+ /* check for wrap */
+ if (remaining < sizeof(*module)) {
+ dev_err(sdev->dev, "%s: not enough data for a module\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* minus header size of module */
+ remaining -= sizeof(*module);
+
+ /* module */
+ ret = load_module(sdev, module);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: invalid module %d\n", __func__, count);
+ return ret;
+ }
+
+ if (remaining < module->size) {
+ dev_err(sdev->dev, "%s: not enough data remaining\n", __func__);
+ return -EINVAL;
+ }
+
+ /* minus body size of module */
+ remaining -= module->size;
+ module = (struct snd_sof_mod_hdr *)((u8 *)module +
+ sizeof(*module) + module->size);
+ }
+
+ return 0;
+}
+
+static int sof_ipc3_validate_firmware(struct snd_sof_dev *sdev)
+{
+ u32 payload_offset = sdev->basefw.payload_offset;
+ const struct firmware *fw = sdev->basefw.fw;
+ struct snd_sof_fw_header *header;
+ size_t fw_size = fw->size - payload_offset;
+
+ if (fw->size <= payload_offset) {
+ dev_err(sdev->dev,
+ "firmware size must be greater than firmware offset\n");
+ return -EINVAL;
+ }
+
+ /* Read the header information from the data pointer */
+ header = (struct snd_sof_fw_header *)(fw->data + payload_offset);
+
+ /* verify FW sig */
+ if (strncmp(header->sig, SND_SOF_FW_SIG, SND_SOF_FW_SIG_SIZE) != 0) {
+ dev_err(sdev->dev, "invalid firmware signature\n");
+ return -EINVAL;
+ }
+
+ /* check size is valid */
+ if (fw_size != header->file_size + sizeof(*header)) {
+ dev_err(sdev->dev,
+ "invalid filesize mismatch got 0x%zx expected 0x%zx\n",
+ fw_size, header->file_size + sizeof(*header));
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "header size=0x%x modules=0x%x abi=0x%x size=%zu\n",
+ header->file_size, header->num_modules,
+ header->abi, sizeof(*header));
+
+ return 0;
+}
+
+const struct sof_ipc_fw_loader_ops ipc3_loader_ops = {
+ .validate = sof_ipc3_validate_firmware,
+ .parse_ext_manifest = sof_ipc3_fw_parse_ext_man,
+ .load_fw_to_dsp = sof_ipc3_load_fw_to_dsp,
+};
diff --git a/sound/soc/sof/ipc3-pcm.c b/sound/soc/sof/ipc3-pcm.c
new file mode 100644
index 000000000..cb58ee8c1
--- /dev/null
+++ b/sound/soc/sof/ipc3-pcm.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2021 Intel Corporation. All rights reserved.
+//
+//
+
+#include <sound/pcm_params.h>
+#include "ipc3-priv.h"
+#include "ops.h"
+#include "sof-priv.h"
+#include "sof-audio.h"
+
+static int sof_ipc3_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct sof_ipc_stream stream;
+ struct snd_sof_pcm *spcm;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ if (!spcm->prepared[substream->stream])
+ return 0;
+
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_FREE;
+ stream.comp_id = spcm->stream[substream->stream].comp_id;
+
+ /* send IPC to the DSP */
+ return sof_ipc_tx_message_no_reply(sdev->ipc, &stream, sizeof(stream));
+}
+
+static int sof_ipc3_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sof_ipc_pcm_params_reply ipc_params_reply;
+ struct sof_ipc_pcm_params pcm;
+ struct snd_sof_pcm *spcm;
+ int ret;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ memset(&pcm, 0, sizeof(pcm));
+
+ /* number of pages should be rounded up */
+ pcm.params.buffer.pages = PFN_UP(runtime->dma_bytes);
+
+ /* set IPC PCM parameters */
+ pcm.hdr.size = sizeof(pcm);
+ pcm.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_PARAMS;
+ pcm.comp_id = spcm->stream[substream->stream].comp_id;
+ pcm.params.hdr.size = sizeof(pcm.params);
+ pcm.params.buffer.phy_addr = spcm->stream[substream->stream].page_table.addr;
+ pcm.params.buffer.size = runtime->dma_bytes;
+ pcm.params.direction = substream->stream;
+ pcm.params.sample_valid_bytes = params_width(params) >> 3;
+ pcm.params.buffer_fmt = SOF_IPC_BUFFER_INTERLEAVED;
+ pcm.params.rate = params_rate(params);
+ pcm.params.channels = params_channels(params);
+ pcm.params.host_period_bytes = params_period_bytes(params);
+
+ /* container size */
+ ret = snd_pcm_format_physical_width(params_format(params));
+ if (ret < 0)
+ return ret;
+ pcm.params.sample_container_bytes = ret >> 3;
+
+ /* format */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S16_LE;
+ break;
+ case SNDRV_PCM_FORMAT_S24:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S24_4LE;
+ break;
+ case SNDRV_PCM_FORMAT_S32:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S32_LE;
+ break;
+ case SNDRV_PCM_FORMAT_FLOAT:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_FLOAT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Update the IPC message with information from the platform */
+ pcm.params.stream_tag = platform_params->stream_tag;
+
+ if (platform_params->use_phy_address)
+ pcm.params.buffer.phy_addr = platform_params->phy_addr;
+
+ if (platform_params->no_ipc_position) {
+ /* For older ABIs set host_period_bytes to zero to inform
+ * FW we don't want position updates. Newer versions use
+ * no_stream_position for this purpose.
+ */
+ if (v->abi_version < SOF_ABI_VER(3, 10, 0))
+ pcm.params.host_period_bytes = 0;
+ else
+ pcm.params.no_stream_position = 1;
+ }
+
+ if (platform_params->cont_update_posn)
+ pcm.params.cont_update_posn = 1;
+
+ dev_dbg(component->dev, "stream_tag %d", pcm.params.stream_tag);
+
+ /* send hw_params IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc, &pcm, sizeof(pcm),
+ &ipc_params_reply, sizeof(ipc_params_reply));
+ if (ret < 0) {
+ dev_err(component->dev, "HW params ipc failed for stream %d\n",
+ pcm.params.stream_tag);
+ return ret;
+ }
+
+ ret = snd_sof_set_stream_data_offset(sdev, &spcm->stream[substream->stream],
+ ipc_params_reply.posn_offset);
+ if (ret < 0) {
+ dev_err(component->dev, "%s: invalid stream data offset for PCM %d\n",
+ __func__, spcm->pcm.pcm_id);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int sof_ipc3_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct sof_ipc_stream stream;
+ struct snd_sof_pcm *spcm;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG;
+ stream.comp_id = spcm->stream[substream->stream].comp_id;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_PAUSE;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_RELEASE;
+ break;
+ case SNDRV_PCM_TRIGGER_START:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_START;
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ fallthrough;
+ case SNDRV_PCM_TRIGGER_STOP:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
+ break;
+ default:
+ dev_err(component->dev, "Unhandled trigger cmd %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* send IPC to the DSP */
+ return sof_ipc_tx_message_no_reply(sdev->ipc, &stream, sizeof(stream));
+}
+
+static void ssp_dai_config_pcm_params_match(struct snd_sof_dev *sdev, const char *link_name,
+ struct snd_pcm_hw_params *params)
+{
+ struct sof_ipc_dai_config *config;
+ struct snd_sof_dai *dai;
+ int i;
+
+ /*
+ * Search for all matching DAIs as we can have both playback and capture DAI
+ * associated with the same link.
+ */
+ list_for_each_entry(dai, &sdev->dai_list, list) {
+ if (!dai->name || strcmp(link_name, dai->name))
+ continue;
+ for (i = 0; i < dai->number_configs; i++) {
+ struct sof_dai_private_data *private = dai->private;
+
+ config = &private->dai_config[i];
+ if (config->ssp.fsync_rate == params_rate(params)) {
+ dev_dbg(sdev->dev, "DAI config %d matches pcm hw params\n", i);
+ dai->current_config = i;
+ break;
+ }
+ }
+ }
+}
+
+static int sof_ipc3_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
+ struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_sof_dai *dai = snd_sof_find_dai(component, (char *)rtd->dai_link->name);
+ struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct sof_dai_private_data *private;
+ struct snd_soc_dpcm *dpcm;
+
+ if (!dai) {
+ dev_err(component->dev, "%s: No DAI found with name %s\n", __func__,
+ rtd->dai_link->name);
+ return -EINVAL;
+ }
+
+ private = dai->private;
+ if (!private) {
+ dev_err(component->dev, "%s: No private data found for DAI %s\n", __func__,
+ rtd->dai_link->name);
+ return -EINVAL;
+ }
+
+ /* read format from topology */
+ snd_mask_none(fmt);
+
+ switch (private->comp_dai->config.frame_fmt) {
+ case SOF_IPC_FRAME_S16_LE:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ break;
+ case SOF_IPC_FRAME_S24_4LE:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ break;
+ case SOF_IPC_FRAME_S32_LE:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S32_LE);
+ break;
+ default:
+ dev_err(component->dev, "No available DAI format!\n");
+ return -EINVAL;
+ }
+
+ /* read rate and channels from topology */
+ switch (private->dai_config->type) {
+ case SOF_DAI_INTEL_SSP:
+ /* search for config to pcm params match, if not found use default */
+ ssp_dai_config_pcm_params_match(sdev, (char *)rtd->dai_link->name, params);
+
+ rate->min = private->dai_config[dai->current_config].ssp.fsync_rate;
+ rate->max = private->dai_config[dai->current_config].ssp.fsync_rate;
+ channels->min = private->dai_config[dai->current_config].ssp.tdm_slots;
+ channels->max = private->dai_config[dai->current_config].ssp.tdm_slots;
+
+ dev_dbg(component->dev, "rate_min: %d rate_max: %d\n", rate->min, rate->max);
+ dev_dbg(component->dev, "channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+
+ break;
+ case SOF_DAI_INTEL_DMIC:
+ /* DMIC only supports 16 or 32 bit formats */
+ if (private->comp_dai->config.frame_fmt == SOF_IPC_FRAME_S24_4LE) {
+ dev_err(component->dev, "Invalid fmt %d for DAI type %d\n",
+ private->comp_dai->config.frame_fmt,
+ private->dai_config->type);
+ }
+ break;
+ case SOF_DAI_INTEL_HDA:
+ /*
+ * HDAudio does not follow the default trigger
+ * sequence due to firmware implementation
+ */
+ for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
+ struct snd_soc_pcm_runtime *fe = dpcm->fe;
+
+ fe->dai_link->trigger[SNDRV_PCM_STREAM_PLAYBACK] =
+ SND_SOC_DPCM_TRIGGER_POST;
+ }
+ break;
+ case SOF_DAI_INTEL_ALH:
+ /*
+ * Dai could run with different channel count compared with
+ * front end, so get dai channel count from topology
+ */
+ channels->min = private->dai_config->alh.channels;
+ channels->max = private->dai_config->alh.channels;
+ break;
+ case SOF_DAI_IMX_ESAI:
+ rate->min = private->dai_config->esai.fsync_rate;
+ rate->max = private->dai_config->esai.fsync_rate;
+ channels->min = private->dai_config->esai.tdm_slots;
+ channels->max = private->dai_config->esai.tdm_slots;
+
+ dev_dbg(component->dev, "rate_min: %d rate_max: %d\n", rate->min, rate->max);
+ dev_dbg(component->dev, "channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+ break;
+ case SOF_DAI_MEDIATEK_AFE:
+ rate->min = private->dai_config->afe.rate;
+ rate->max = private->dai_config->afe.rate;
+ channels->min = private->dai_config->afe.channels;
+ channels->max = private->dai_config->afe.channels;
+
+ snd_mask_none(fmt);
+
+ switch (private->dai_config->afe.format) {
+ case SOF_IPC_FRAME_S16_LE:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ break;
+ case SOF_IPC_FRAME_S24_4LE:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ break;
+ case SOF_IPC_FRAME_S32_LE:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S32_LE);
+ break;
+ default:
+ dev_err(component->dev, "Not available format!\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(component->dev, "rate_min: %d rate_max: %d\n", rate->min, rate->max);
+ dev_dbg(component->dev, "channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+ break;
+ case SOF_DAI_IMX_SAI:
+ rate->min = private->dai_config->sai.fsync_rate;
+ rate->max = private->dai_config->sai.fsync_rate;
+ channels->min = private->dai_config->sai.tdm_slots;
+ channels->max = private->dai_config->sai.tdm_slots;
+
+ dev_dbg(component->dev, "rate_min: %d rate_max: %d\n", rate->min, rate->max);
+ dev_dbg(component->dev, "channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+ break;
+ case SOF_DAI_AMD_BT:
+ rate->min = private->dai_config->acpbt.fsync_rate;
+ rate->max = private->dai_config->acpbt.fsync_rate;
+ channels->min = private->dai_config->acpbt.tdm_slots;
+ channels->max = private->dai_config->acpbt.tdm_slots;
+
+ dev_dbg(component->dev,
+ "AMD_BT rate_min: %d rate_max: %d\n", rate->min, rate->max);
+ dev_dbg(component->dev, "AMD_BT channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+ break;
+ case SOF_DAI_AMD_SP:
+ case SOF_DAI_AMD_SP_VIRTUAL:
+ rate->min = private->dai_config->acpsp.fsync_rate;
+ rate->max = private->dai_config->acpsp.fsync_rate;
+ channels->min = private->dai_config->acpsp.tdm_slots;
+ channels->max = private->dai_config->acpsp.tdm_slots;
+
+ dev_dbg(component->dev,
+ "AMD_SP rate_min: %d rate_max: %d\n", rate->min, rate->max);
+ dev_dbg(component->dev, "AMD_SP channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+ break;
+ case SOF_DAI_AMD_HS:
+ case SOF_DAI_AMD_HS_VIRTUAL:
+ rate->min = private->dai_config->acphs.fsync_rate;
+ rate->max = private->dai_config->acphs.fsync_rate;
+ channels->min = private->dai_config->acphs.tdm_slots;
+ channels->max = private->dai_config->acphs.tdm_slots;
+
+ dev_dbg(component->dev,
+ "AMD_HS channel_max: %d rate_max: %d\n", channels->max, rate->max);
+ break;
+ case SOF_DAI_AMD_DMIC:
+ rate->min = private->dai_config->acpdmic.pdm_rate;
+ rate->max = private->dai_config->acpdmic.pdm_rate;
+ channels->min = private->dai_config->acpdmic.pdm_ch;
+ channels->max = private->dai_config->acpdmic.pdm_ch;
+
+ dev_dbg(component->dev,
+ "AMD_DMIC rate_min: %d rate_max: %d\n", rate->min, rate->max);
+ dev_dbg(component->dev, "AMD_DMIC channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+ break;
+ default:
+ dev_err(component->dev, "Invalid DAI type %d\n", private->dai_config->type);
+ break;
+ }
+
+ return 0;
+}
+
+const struct sof_ipc_pcm_ops ipc3_pcm_ops = {
+ .hw_params = sof_ipc3_pcm_hw_params,
+ .hw_free = sof_ipc3_pcm_hw_free,
+ .trigger = sof_ipc3_pcm_trigger,
+ .dai_link_fixup = sof_ipc3_pcm_dai_link_fixup,
+ .reset_hw_params_during_stop = true,
+};
diff --git a/sound/soc/sof/ipc3-priv.h b/sound/soc/sof/ipc3-priv.h
new file mode 100644
index 000000000..0bbca418e
--- /dev/null
+++ b/sound/soc/sof/ipc3-priv.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2021 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __SOUND_SOC_SOF_IPC3_PRIV_H
+#define __SOUND_SOC_SOF_IPC3_PRIV_H
+
+#include "sof-priv.h"
+
+/* IPC3 specific ops */
+extern const struct sof_ipc_pcm_ops ipc3_pcm_ops;
+extern const struct sof_ipc_tplg_ops ipc3_tplg_ops;
+extern const struct sof_ipc_tplg_control_ops tplg_ipc3_control_ops;
+extern const struct sof_ipc_fw_loader_ops ipc3_loader_ops;
+extern const struct sof_ipc_fw_tracing_ops ipc3_dtrace_ops;
+
+/* helpers for fw_ready and ext_manifest parsing */
+int sof_ipc3_get_ext_windows(struct snd_sof_dev *sdev,
+ const struct sof_ipc_ext_data_hdr *ext_hdr);
+int sof_ipc3_get_cc_info(struct snd_sof_dev *sdev,
+ const struct sof_ipc_ext_data_hdr *ext_hdr);
+int sof_ipc3_validate_fw_version(struct snd_sof_dev *sdev);
+
+/* dtrace position update */
+int ipc3_dtrace_posn_update(struct snd_sof_dev *sdev,
+ struct sof_ipc_dma_trace_posn *posn);
+/* RX handler backend */
+void sof_ipc3_do_rx_work(struct snd_sof_dev *sdev, struct sof_ipc_cmd_hdr *hdr, void *msg_buf);
+
+/* dtrace platform callback wrappers */
+static inline int sof_dtrace_host_init(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmatb,
+ struct sof_ipc_dma_trace_params_ext *dtrace_params)
+{
+ struct snd_sof_dsp_ops *dsp_ops = sdev->pdata->desc->ops;
+
+ if (dsp_ops->trace_init)
+ return dsp_ops->trace_init(sdev, dmatb, dtrace_params);
+
+ return 0;
+}
+
+static inline int sof_dtrace_host_release(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_dsp_ops *dsp_ops = sdev->pdata->desc->ops;
+
+ if (dsp_ops->trace_release)
+ return dsp_ops->trace_release(sdev);
+
+ return 0;
+}
+
+static inline int sof_dtrace_host_trigger(struct snd_sof_dev *sdev, int cmd)
+{
+ struct snd_sof_dsp_ops *dsp_ops = sdev->pdata->desc->ops;
+
+ if (dsp_ops->trace_trigger)
+ return dsp_ops->trace_trigger(sdev, cmd);
+
+ return 0;
+}
+
+#endif
diff --git a/sound/soc/sof/ipc3-topology.c b/sound/soc/sof/ipc3-topology.c
new file mode 100644
index 000000000..2c7a5e7a3
--- /dev/null
+++ b/sound/soc/sof/ipc3-topology.c
@@ -0,0 +1,2582 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2021 Intel Corporation. All rights reserved.
+//
+//
+
+#include <uapi/sound/sof/tokens.h>
+#include <sound/pcm_params.h>
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ipc3-priv.h"
+#include "ops.h"
+
+/* Full volume for default values */
+#define VOL_ZERO_DB BIT(VOLUME_FWL)
+
+/* size of tplg ABI in bytes */
+#define SOF_IPC3_TPLG_ABI_SIZE 3
+
+struct sof_widget_data {
+ int ctrl_type;
+ int ipc_cmd;
+ void *pdata;
+ size_t pdata_size;
+ struct snd_sof_control *control;
+};
+
+struct sof_process_types {
+ const char *name;
+ enum sof_ipc_process_type type;
+ enum sof_comp_type comp_type;
+};
+
+static const struct sof_process_types sof_process[] = {
+ {"EQFIR", SOF_PROCESS_EQFIR, SOF_COMP_EQ_FIR},
+ {"EQIIR", SOF_PROCESS_EQIIR, SOF_COMP_EQ_IIR},
+ {"KEYWORD_DETECT", SOF_PROCESS_KEYWORD_DETECT, SOF_COMP_KEYWORD_DETECT},
+ {"KPB", SOF_PROCESS_KPB, SOF_COMP_KPB},
+ {"CHAN_SELECTOR", SOF_PROCESS_CHAN_SELECTOR, SOF_COMP_SELECTOR},
+ {"MUX", SOF_PROCESS_MUX, SOF_COMP_MUX},
+ {"DEMUX", SOF_PROCESS_DEMUX, SOF_COMP_DEMUX},
+ {"DCBLOCK", SOF_PROCESS_DCBLOCK, SOF_COMP_DCBLOCK},
+ {"SMART_AMP", SOF_PROCESS_SMART_AMP, SOF_COMP_SMART_AMP},
+};
+
+static enum sof_ipc_process_type find_process(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sof_process); i++) {
+ if (strcmp(name, sof_process[i].name) == 0)
+ return sof_process[i].type;
+ }
+
+ return SOF_PROCESS_NONE;
+}
+
+static int get_token_process_type(void *elem, void *object, u32 offset)
+{
+ u32 *val = (u32 *)((u8 *)object + offset);
+
+ *val = find_process((const char *)elem);
+ return 0;
+}
+
+/* Buffers */
+static const struct sof_topology_token buffer_tokens[] = {
+ {SOF_TKN_BUF_SIZE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_buffer, size)},
+ {SOF_TKN_BUF_CAPS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_buffer, caps)},
+};
+
+/* DAI */
+static const struct sof_topology_token dai_tokens[] = {
+ {SOF_TKN_DAI_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_dai_type,
+ offsetof(struct sof_ipc_comp_dai, type)},
+ {SOF_TKN_DAI_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_dai, dai_index)},
+ {SOF_TKN_DAI_DIRECTION, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_dai, direction)},
+};
+
+/* BE DAI link */
+static const struct sof_topology_token dai_link_tokens[] = {
+ {SOF_TKN_DAI_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_dai_type,
+ offsetof(struct sof_ipc_dai_config, type)},
+ {SOF_TKN_DAI_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_config, dai_index)},
+};
+
+/* scheduling */
+static const struct sof_topology_token sched_tokens[] = {
+ {SOF_TKN_SCHED_PERIOD, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, period)},
+ {SOF_TKN_SCHED_PRIORITY, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, priority)},
+ {SOF_TKN_SCHED_MIPS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, period_mips)},
+ {SOF_TKN_SCHED_CORE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, core)},
+ {SOF_TKN_SCHED_FRAMES, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, frames_per_sched)},
+ {SOF_TKN_SCHED_TIME_DOMAIN, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, time_domain)},
+};
+
+static const struct sof_topology_token pipeline_tokens[] = {
+ {SOF_TKN_SCHED_DYNAMIC_PIPELINE, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_widget, dynamic_pipeline_widget)},
+
+};
+
+/* volume */
+static const struct sof_topology_token volume_tokens[] = {
+ {SOF_TKN_VOLUME_RAMP_STEP_TYPE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_volume, ramp)},
+ {SOF_TKN_VOLUME_RAMP_STEP_MS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_volume, initial_ramp)},
+};
+
+/* SRC */
+static const struct sof_topology_token src_tokens[] = {
+ {SOF_TKN_SRC_RATE_IN, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_src, source_rate)},
+ {SOF_TKN_SRC_RATE_OUT, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_src, sink_rate)},
+};
+
+/* ASRC */
+static const struct sof_topology_token asrc_tokens[] = {
+ {SOF_TKN_ASRC_RATE_IN, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_asrc, source_rate)},
+ {SOF_TKN_ASRC_RATE_OUT, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_asrc, sink_rate)},
+ {SOF_TKN_ASRC_ASYNCHRONOUS_MODE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_asrc, asynchronous_mode)},
+ {SOF_TKN_ASRC_OPERATION_MODE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_asrc, operation_mode)},
+};
+
+/* EFFECT */
+static const struct sof_topology_token process_tokens[] = {
+ {SOF_TKN_PROCESS_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_process_type,
+ offsetof(struct sof_ipc_comp_process, type)},
+};
+
+/* PCM */
+static const struct sof_topology_token pcm_tokens[] = {
+ {SOF_TKN_PCM_DMAC_CONFIG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_host, dmac_config)},
+};
+
+/* Generic components */
+static const struct sof_topology_token comp_tokens[] = {
+ {SOF_TKN_COMP_PERIOD_SINK_COUNT, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_config, periods_sink)},
+ {SOF_TKN_COMP_PERIOD_SOURCE_COUNT, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_config, periods_source)},
+ {SOF_TKN_COMP_FORMAT,
+ SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_comp_format,
+ offsetof(struct sof_ipc_comp_config, frame_fmt)},
+};
+
+/* SSP */
+static const struct sof_topology_token ssp_tokens[] = {
+ {SOF_TKN_INTEL_SSP_CLKS_CONTROL, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_ssp_params, clks_control)},
+ {SOF_TKN_INTEL_SSP_MCLK_ID, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_ssp_params, mclk_id)},
+ {SOF_TKN_INTEL_SSP_SAMPLE_BITS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_ssp_params, sample_valid_bits)},
+ {SOF_TKN_INTEL_SSP_FRAME_PULSE_WIDTH, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_ssp_params, frame_pulse_width)},
+ {SOF_TKN_INTEL_SSP_QUIRKS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_ssp_params, quirks)},
+ {SOF_TKN_INTEL_SSP_TDM_PADDING_PER_SLOT, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct sof_ipc_dai_ssp_params, tdm_per_slot_padding_flag)},
+ {SOF_TKN_INTEL_SSP_BCLK_DELAY, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_ssp_params, bclk_delay)},
+};
+
+/* ALH */
+static const struct sof_topology_token alh_tokens[] = {
+ {SOF_TKN_INTEL_ALH_RATE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_alh_params, rate)},
+ {SOF_TKN_INTEL_ALH_CH, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_alh_params, channels)},
+};
+
+/* DMIC */
+static const struct sof_topology_token dmic_tokens[] = {
+ {SOF_TKN_INTEL_DMIC_DRIVER_VERSION, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, driver_ipc_version)},
+ {SOF_TKN_INTEL_DMIC_CLK_MIN, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, pdmclk_min)},
+ {SOF_TKN_INTEL_DMIC_CLK_MAX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, pdmclk_max)},
+ {SOF_TKN_INTEL_DMIC_SAMPLE_RATE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, fifo_fs)},
+ {SOF_TKN_INTEL_DMIC_DUTY_MIN, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_params, duty_min)},
+ {SOF_TKN_INTEL_DMIC_DUTY_MAX, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_params, duty_max)},
+ {SOF_TKN_INTEL_DMIC_NUM_PDM_ACTIVE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, num_pdm_active)},
+ {SOF_TKN_INTEL_DMIC_FIFO_WORD_LENGTH, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_params, fifo_bits)},
+ {SOF_TKN_INTEL_DMIC_UNMUTE_RAMP_TIME_MS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, unmute_ramp_time)},
+};
+
+/* ESAI */
+static const struct sof_topology_token esai_tokens[] = {
+ {SOF_TKN_IMX_ESAI_MCLK_ID, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_esai_params, mclk_id)},
+};
+
+/* SAI */
+static const struct sof_topology_token sai_tokens[] = {
+ {SOF_TKN_IMX_SAI_MCLK_ID, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_sai_params, mclk_id)},
+};
+
+/*
+ * DMIC PDM Tokens
+ * SOF_TKN_INTEL_DMIC_PDM_CTRL_ID should be the first token
+ * as it increments the index while parsing the array of pdm tokens
+ * and determines the correct offset
+ */
+static const struct sof_topology_token dmic_pdm_tokens[] = {
+ {SOF_TKN_INTEL_DMIC_PDM_CTRL_ID, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, id)},
+ {SOF_TKN_INTEL_DMIC_PDM_MIC_A_Enable, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, enable_mic_a)},
+ {SOF_TKN_INTEL_DMIC_PDM_MIC_B_Enable, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, enable_mic_b)},
+ {SOF_TKN_INTEL_DMIC_PDM_POLARITY_A, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, polarity_mic_a)},
+ {SOF_TKN_INTEL_DMIC_PDM_POLARITY_B, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, polarity_mic_b)},
+ {SOF_TKN_INTEL_DMIC_PDM_CLK_EDGE, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, clk_edge)},
+ {SOF_TKN_INTEL_DMIC_PDM_SKEW, SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, skew)},
+};
+
+/* HDA */
+static const struct sof_topology_token hda_tokens[] = {
+ {SOF_TKN_INTEL_HDA_RATE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_hda_params, rate)},
+ {SOF_TKN_INTEL_HDA_CH, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_hda_params, channels)},
+};
+
+/* AFE */
+static const struct sof_topology_token afe_tokens[] = {
+ {SOF_TKN_MEDIATEK_AFE_RATE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_mtk_afe_params, rate)},
+ {SOF_TKN_MEDIATEK_AFE_CH, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_mtk_afe_params, channels)},
+ {SOF_TKN_MEDIATEK_AFE_FORMAT, SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_comp_format,
+ offsetof(struct sof_ipc_dai_mtk_afe_params, format)},
+};
+
+/* ACPDMIC */
+static const struct sof_topology_token acpdmic_tokens[] = {
+ {SOF_TKN_AMD_ACPDMIC_RATE,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_acpdmic_params, pdm_rate)},
+ {SOF_TKN_AMD_ACPDMIC_CH,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_acpdmic_params, pdm_ch)},
+};
+
+/* ACPI2S */
+static const struct sof_topology_token acpi2s_tokens[] = {
+ {SOF_TKN_AMD_ACPI2S_RATE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_acp_params, fsync_rate)},
+ {SOF_TKN_AMD_ACPI2S_CH, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_acp_params, tdm_slots)},
+ {SOF_TKN_AMD_ACPI2S_TDM_MODE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_acp_params, tdm_mode)},
+};
+
+/* Core tokens */
+static const struct sof_topology_token core_tokens[] = {
+ {SOF_TKN_COMP_CORE_ID, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp, core)},
+};
+
+/* Component extended tokens */
+static const struct sof_topology_token comp_ext_tokens[] = {
+ {SOF_TKN_COMP_UUID, SND_SOC_TPLG_TUPLE_TYPE_UUID, get_token_uuid,
+ offsetof(struct snd_sof_widget, uuid)},
+};
+
+static const struct sof_token_info ipc3_token_list[SOF_TOKEN_COUNT] = {
+ [SOF_PCM_TOKENS] = {"PCM tokens", pcm_tokens, ARRAY_SIZE(pcm_tokens)},
+ [SOF_PIPELINE_TOKENS] = {"Pipeline tokens", pipeline_tokens, ARRAY_SIZE(pipeline_tokens)},
+ [SOF_SCHED_TOKENS] = {"Scheduler tokens", sched_tokens, ARRAY_SIZE(sched_tokens)},
+ [SOF_COMP_TOKENS] = {"Comp tokens", comp_tokens, ARRAY_SIZE(comp_tokens)},
+ [SOF_CORE_TOKENS] = {"Core tokens", core_tokens, ARRAY_SIZE(core_tokens)},
+ [SOF_COMP_EXT_TOKENS] = {"AFE tokens", comp_ext_tokens, ARRAY_SIZE(comp_ext_tokens)},
+ [SOF_BUFFER_TOKENS] = {"Buffer tokens", buffer_tokens, ARRAY_SIZE(buffer_tokens)},
+ [SOF_VOLUME_TOKENS] = {"Volume tokens", volume_tokens, ARRAY_SIZE(volume_tokens)},
+ [SOF_SRC_TOKENS] = {"SRC tokens", src_tokens, ARRAY_SIZE(src_tokens)},
+ [SOF_ASRC_TOKENS] = {"ASRC tokens", asrc_tokens, ARRAY_SIZE(asrc_tokens)},
+ [SOF_PROCESS_TOKENS] = {"Process tokens", process_tokens, ARRAY_SIZE(process_tokens)},
+ [SOF_DAI_TOKENS] = {"DAI tokens", dai_tokens, ARRAY_SIZE(dai_tokens)},
+ [SOF_DAI_LINK_TOKENS] = {"DAI link tokens", dai_link_tokens, ARRAY_SIZE(dai_link_tokens)},
+ [SOF_HDA_TOKENS] = {"HDA tokens", hda_tokens, ARRAY_SIZE(hda_tokens)},
+ [SOF_SSP_TOKENS] = {"SSP tokens", ssp_tokens, ARRAY_SIZE(ssp_tokens)},
+ [SOF_ALH_TOKENS] = {"ALH tokens", alh_tokens, ARRAY_SIZE(alh_tokens)},
+ [SOF_DMIC_TOKENS] = {"DMIC tokens", dmic_tokens, ARRAY_SIZE(dmic_tokens)},
+ [SOF_DMIC_PDM_TOKENS] = {"DMIC PDM tokens", dmic_pdm_tokens, ARRAY_SIZE(dmic_pdm_tokens)},
+ [SOF_ESAI_TOKENS] = {"ESAI tokens", esai_tokens, ARRAY_SIZE(esai_tokens)},
+ [SOF_SAI_TOKENS] = {"SAI tokens", sai_tokens, ARRAY_SIZE(sai_tokens)},
+ [SOF_AFE_TOKENS] = {"AFE tokens", afe_tokens, ARRAY_SIZE(afe_tokens)},
+ [SOF_ACPDMIC_TOKENS] = {"ACPDMIC tokens", acpdmic_tokens, ARRAY_SIZE(acpdmic_tokens)},
+ [SOF_ACPI2S_TOKENS] = {"ACPI2S tokens", acpi2s_tokens, ARRAY_SIZE(acpi2s_tokens)},
+};
+
+/**
+ * sof_comp_alloc - allocate and initialize buffer for a new component
+ * @swidget: pointer to struct snd_sof_widget containing extended data
+ * @ipc_size: IPC payload size that will be updated depending on valid
+ * extended data.
+ * @index: ID of the pipeline the component belongs to
+ *
+ * Return: The pointer to the new allocated component, NULL if failed.
+ */
+static void *sof_comp_alloc(struct snd_sof_widget *swidget, size_t *ipc_size,
+ int index)
+{
+ struct sof_ipc_comp *comp;
+ size_t total_size = *ipc_size;
+ size_t ext_size = sizeof(swidget->uuid);
+
+ /* only non-zero UUID is valid */
+ if (!guid_is_null(&swidget->uuid))
+ total_size += ext_size;
+
+ comp = kzalloc(total_size, GFP_KERNEL);
+ if (!comp)
+ return NULL;
+
+ /* configure comp new IPC message */
+ comp->hdr.size = total_size;
+ comp->hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_NEW;
+ comp->id = swidget->comp_id;
+ comp->pipeline_id = index;
+ comp->core = swidget->core;
+
+ /* handle the extended data if needed */
+ if (total_size > *ipc_size) {
+ /* append extended data to the end of the component */
+ memcpy((u8 *)comp + *ipc_size, &swidget->uuid, ext_size);
+ comp->ext_data_length = ext_size;
+ }
+
+ /* update ipc_size and return */
+ *ipc_size = total_size;
+ return comp;
+}
+
+static void sof_dbg_comp_config(struct snd_soc_component *scomp, struct sof_ipc_comp_config *config)
+{
+ dev_dbg(scomp->dev, " config: periods snk %d src %d fmt %d\n",
+ config->periods_sink, config->periods_source,
+ config->frame_fmt);
+}
+
+static int sof_ipc3_widget_setup_comp_host(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc_comp_host *host;
+ size_t ipc_size = sizeof(*host);
+ int ret;
+
+ host = sof_comp_alloc(swidget, &ipc_size, swidget->pipeline_id);
+ if (!host)
+ return -ENOMEM;
+ swidget->private = host;
+
+ /* configure host comp IPC message */
+ host->comp.type = SOF_COMP_HOST;
+ host->config.hdr.size = sizeof(host->config);
+
+ if (swidget->id == snd_soc_dapm_aif_out)
+ host->direction = SOF_IPC_STREAM_CAPTURE;
+ else
+ host->direction = SOF_IPC_STREAM_PLAYBACK;
+
+ /* parse one set of pcm_tokens */
+ ret = sof_update_ipc_object(scomp, host, SOF_PCM_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*host), 1);
+ if (ret < 0)
+ goto err;
+
+ /* parse one set of comp_tokens */
+ ret = sof_update_ipc_object(scomp, &host->config, SOF_COMP_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(host->config), 1);
+ if (ret < 0)
+ goto err;
+
+ dev_dbg(scomp->dev, "loaded host %s\n", swidget->widget->name);
+ sof_dbg_comp_config(scomp, &host->config);
+
+ return 0;
+err:
+ kfree(swidget->private);
+ swidget->private = NULL;
+
+ return ret;
+}
+
+static void sof_ipc3_widget_free_comp(struct snd_sof_widget *swidget)
+{
+ kfree(swidget->private);
+}
+
+static int sof_ipc3_widget_setup_comp_tone(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc_comp_tone *tone;
+ size_t ipc_size = sizeof(*tone);
+ int ret;
+
+ tone = sof_comp_alloc(swidget, &ipc_size, swidget->pipeline_id);
+ if (!tone)
+ return -ENOMEM;
+
+ swidget->private = tone;
+
+ /* configure siggen IPC message */
+ tone->comp.type = SOF_COMP_TONE;
+ tone->config.hdr.size = sizeof(tone->config);
+
+ /* parse one set of comp tokens */
+ ret = sof_update_ipc_object(scomp, &tone->config, SOF_COMP_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(tone->config), 1);
+ if (ret < 0) {
+ kfree(swidget->private);
+ swidget->private = NULL;
+ return ret;
+ }
+
+ dev_dbg(scomp->dev, "tone %s: frequency %d amplitude %d\n",
+ swidget->widget->name, tone->frequency, tone->amplitude);
+ sof_dbg_comp_config(scomp, &tone->config);
+
+ return 0;
+}
+
+static int sof_ipc3_widget_setup_comp_mixer(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc_comp_mixer *mixer;
+ size_t ipc_size = sizeof(*mixer);
+ int ret;
+
+ mixer = sof_comp_alloc(swidget, &ipc_size, swidget->pipeline_id);
+ if (!mixer)
+ return -ENOMEM;
+
+ swidget->private = mixer;
+
+ /* configure mixer IPC message */
+ mixer->comp.type = SOF_COMP_MIXER;
+ mixer->config.hdr.size = sizeof(mixer->config);
+
+ /* parse one set of comp tokens */
+ ret = sof_update_ipc_object(scomp, &mixer->config, SOF_COMP_TOKENS,
+ swidget->tuples, swidget->num_tuples,
+ sizeof(mixer->config), 1);
+ if (ret < 0) {
+ kfree(swidget->private);
+ swidget->private = NULL;
+
+ return ret;
+ }
+
+ dev_dbg(scomp->dev, "loaded mixer %s\n", swidget->widget->name);
+ sof_dbg_comp_config(scomp, &mixer->config);
+
+ return 0;
+}
+
+static int sof_ipc3_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_pipeline *spipe = swidget->spipe;
+ struct sof_ipc_pipe_new *pipeline;
+ struct snd_sof_widget *comp_swidget;
+ int ret;
+
+ pipeline = kzalloc(sizeof(*pipeline), GFP_KERNEL);
+ if (!pipeline)
+ return -ENOMEM;
+
+ /* configure pipeline IPC message */
+ pipeline->hdr.size = sizeof(*pipeline);
+ pipeline->hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_PIPE_NEW;
+ pipeline->pipeline_id = swidget->pipeline_id;
+ pipeline->comp_id = swidget->comp_id;
+
+ swidget->private = pipeline;
+
+ /* component at start of pipeline is our stream id */
+ comp_swidget = snd_sof_find_swidget(scomp, swidget->widget->sname);
+ if (!comp_swidget) {
+ dev_err(scomp->dev, "scheduler %s refers to non existent widget %s\n",
+ swidget->widget->name, swidget->widget->sname);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ pipeline->sched_id = comp_swidget->comp_id;
+
+ /* parse one set of scheduler tokens */
+ ret = sof_update_ipc_object(scomp, pipeline, SOF_SCHED_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*pipeline), 1);
+ if (ret < 0)
+ goto err;
+
+ /* parse one set of pipeline tokens */
+ ret = sof_update_ipc_object(scomp, swidget, SOF_PIPELINE_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*swidget), 1);
+ if (ret < 0)
+ goto err;
+
+ if (sof_debug_check_flag(SOF_DBG_DISABLE_MULTICORE))
+ pipeline->core = SOF_DSP_PRIMARY_CORE;
+
+ if (sof_debug_check_flag(SOF_DBG_DYNAMIC_PIPELINES_OVERRIDE))
+ swidget->dynamic_pipeline_widget =
+ sof_debug_check_flag(SOF_DBG_DYNAMIC_PIPELINES_ENABLE);
+
+ dev_dbg(scomp->dev, "pipeline %s: period %d pri %d mips %d core %d frames %d dynamic %d\n",
+ swidget->widget->name, pipeline->period, pipeline->priority,
+ pipeline->period_mips, pipeline->core, pipeline->frames_per_sched,
+ swidget->dynamic_pipeline_widget);
+
+ swidget->core = pipeline->core;
+ spipe->core_mask |= BIT(pipeline->core);
+
+ return 0;
+
+err:
+ kfree(swidget->private);
+ swidget->private = NULL;
+
+ return ret;
+}
+
+static int sof_ipc3_widget_setup_comp_buffer(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc_buffer *buffer;
+ int ret;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ swidget->private = buffer;
+
+ /* configure dai IPC message */
+ buffer->comp.hdr.size = sizeof(*buffer);
+ buffer->comp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_BUFFER_NEW;
+ buffer->comp.id = swidget->comp_id;
+ buffer->comp.type = SOF_COMP_BUFFER;
+ buffer->comp.pipeline_id = swidget->pipeline_id;
+ buffer->comp.core = swidget->core;
+
+ /* parse one set of buffer tokens */
+ ret = sof_update_ipc_object(scomp, buffer, SOF_BUFFER_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*buffer), 1);
+ if (ret < 0) {
+ kfree(swidget->private);
+ swidget->private = NULL;
+ return ret;
+ }
+
+ dev_dbg(scomp->dev, "buffer %s: size %d caps 0x%x\n",
+ swidget->widget->name, buffer->size, buffer->caps);
+
+ return 0;
+}
+
+static int sof_ipc3_widget_setup_comp_src(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc_comp_src *src;
+ size_t ipc_size = sizeof(*src);
+ int ret;
+
+ src = sof_comp_alloc(swidget, &ipc_size, swidget->pipeline_id);
+ if (!src)
+ return -ENOMEM;
+
+ swidget->private = src;
+
+ /* configure src IPC message */
+ src->comp.type = SOF_COMP_SRC;
+ src->config.hdr.size = sizeof(src->config);
+
+ /* parse one set of src tokens */
+ ret = sof_update_ipc_object(scomp, src, SOF_SRC_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*src), 1);
+ if (ret < 0)
+ goto err;
+
+ /* parse one set of comp tokens */
+ ret = sof_update_ipc_object(scomp, &src->config, SOF_COMP_TOKENS,
+ swidget->tuples, swidget->num_tuples, sizeof(src->config), 1);
+ if (ret < 0)
+ goto err;
+
+ dev_dbg(scomp->dev, "src %s: source rate %d sink rate %d\n",
+ swidget->widget->name, src->source_rate, src->sink_rate);
+ sof_dbg_comp_config(scomp, &src->config);
+
+ return 0;
+err:
+ kfree(swidget->private);
+ swidget->private = NULL;
+
+ return ret;
+}
+
+static int sof_ipc3_widget_setup_comp_asrc(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc_comp_asrc *asrc;
+ size_t ipc_size = sizeof(*asrc);
+ int ret;
+
+ asrc = sof_comp_alloc(swidget, &ipc_size, swidget->pipeline_id);
+ if (!asrc)
+ return -ENOMEM;
+
+ swidget->private = asrc;
+
+ /* configure ASRC IPC message */
+ asrc->comp.type = SOF_COMP_ASRC;
+ asrc->config.hdr.size = sizeof(asrc->config);
+
+ /* parse one set of asrc tokens */
+ ret = sof_update_ipc_object(scomp, asrc, SOF_ASRC_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*asrc), 1);
+ if (ret < 0)
+ goto err;
+
+ /* parse one set of comp tokens */
+ ret = sof_update_ipc_object(scomp, &asrc->config, SOF_COMP_TOKENS,
+ swidget->tuples, swidget->num_tuples, sizeof(asrc->config), 1);
+ if (ret < 0)
+ goto err;
+
+ dev_dbg(scomp->dev, "asrc %s: source rate %d sink rate %d asynch %d operation %d\n",
+ swidget->widget->name, asrc->source_rate, asrc->sink_rate,
+ asrc->asynchronous_mode, asrc->operation_mode);
+
+ sof_dbg_comp_config(scomp, &asrc->config);
+
+ return 0;
+err:
+ kfree(swidget->private);
+ swidget->private = NULL;
+
+ return ret;
+}
+
+/*
+ * Mux topology
+ */
+static int sof_ipc3_widget_setup_comp_mux(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc_comp_mux *mux;
+ size_t ipc_size = sizeof(*mux);
+ int ret;
+
+ mux = sof_comp_alloc(swidget, &ipc_size, swidget->pipeline_id);
+ if (!mux)
+ return -ENOMEM;
+
+ swidget->private = mux;
+
+ /* configure mux IPC message */
+ mux->comp.type = SOF_COMP_MUX;
+ mux->config.hdr.size = sizeof(mux->config);
+
+ /* parse one set of comp tokens */
+ ret = sof_update_ipc_object(scomp, &mux->config, SOF_COMP_TOKENS,
+ swidget->tuples, swidget->num_tuples, sizeof(mux->config), 1);
+ if (ret < 0) {
+ kfree(swidget->private);
+ swidget->private = NULL;
+ return ret;
+ }
+
+ dev_dbg(scomp->dev, "loaded mux %s\n", swidget->widget->name);
+ sof_dbg_comp_config(scomp, &mux->config);
+
+ return 0;
+}
+
+/*
+ * PGA Topology
+ */
+
+static int sof_ipc3_widget_setup_comp_pga(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_comp_volume *volume;
+ struct snd_sof_control *scontrol;
+ size_t ipc_size = sizeof(*volume);
+ int min_step, max_step;
+ int ret;
+
+ volume = sof_comp_alloc(swidget, &ipc_size, swidget->pipeline_id);
+ if (!volume)
+ return -ENOMEM;
+
+ swidget->private = volume;
+
+ /* configure volume IPC message */
+ volume->comp.type = SOF_COMP_VOLUME;
+ volume->config.hdr.size = sizeof(volume->config);
+
+ /* parse one set of volume tokens */
+ ret = sof_update_ipc_object(scomp, volume, SOF_VOLUME_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*volume), 1);
+ if (ret < 0)
+ goto err;
+
+ /* parse one set of comp tokens */
+ ret = sof_update_ipc_object(scomp, &volume->config, SOF_COMP_TOKENS,
+ swidget->tuples, swidget->num_tuples,
+ sizeof(volume->config), 1);
+ if (ret < 0)
+ goto err;
+
+ dev_dbg(scomp->dev, "loaded PGA %s\n", swidget->widget->name);
+ sof_dbg_comp_config(scomp, &volume->config);
+
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
+ if (scontrol->comp_id == swidget->comp_id &&
+ scontrol->volume_table) {
+ min_step = scontrol->min_volume_step;
+ max_step = scontrol->max_volume_step;
+ volume->min_value = scontrol->volume_table[min_step];
+ volume->max_value = scontrol->volume_table[max_step];
+ volume->channels = scontrol->num_channels;
+ break;
+ }
+ }
+
+ return 0;
+err:
+ kfree(swidget->private);
+ swidget->private = NULL;
+
+ return ret;
+}
+
+static int sof_get_control_data(struct snd_soc_component *scomp,
+ struct snd_soc_dapm_widget *widget,
+ struct sof_widget_data *wdata, size_t *size)
+{
+ const struct snd_kcontrol_new *kc;
+ struct sof_ipc_ctrl_data *cdata;
+ struct soc_mixer_control *sm;
+ struct soc_bytes_ext *sbe;
+ struct soc_enum *se;
+ int i;
+
+ *size = 0;
+
+ for (i = 0; i < widget->num_kcontrols; i++) {
+ kc = &widget->kcontrol_news[i];
+
+ switch (widget->dobj.widget.kcontrol_type[i]) {
+ case SND_SOC_TPLG_TYPE_MIXER:
+ sm = (struct soc_mixer_control *)kc->private_value;
+ wdata[i].control = sm->dobj.private;
+ break;
+ case SND_SOC_TPLG_TYPE_BYTES:
+ sbe = (struct soc_bytes_ext *)kc->private_value;
+ wdata[i].control = sbe->dobj.private;
+ break;
+ case SND_SOC_TPLG_TYPE_ENUM:
+ se = (struct soc_enum *)kc->private_value;
+ wdata[i].control = se->dobj.private;
+ break;
+ default:
+ dev_err(scomp->dev, "Unknown kcontrol type %u in widget %s\n",
+ widget->dobj.widget.kcontrol_type[i], widget->name);
+ return -EINVAL;
+ }
+
+ if (!wdata[i].control) {
+ dev_err(scomp->dev, "No scontrol for widget %s\n", widget->name);
+ return -EINVAL;
+ }
+
+ cdata = wdata[i].control->ipc_control_data;
+
+ if (widget->dobj.widget.kcontrol_type[i] == SND_SOC_TPLG_TYPE_BYTES) {
+ /* make sure data is valid - data can be updated at runtime */
+ if (cdata->data->magic != SOF_ABI_MAGIC)
+ return -EINVAL;
+
+ wdata[i].pdata = cdata->data->data;
+ wdata[i].pdata_size = cdata->data->size;
+ } else {
+ /* points to the control data union */
+ wdata[i].pdata = cdata->chanv;
+ /*
+ * wdata[i].control->size is calculated with struct_size
+ * and includes the size of struct sof_ipc_ctrl_data
+ */
+ wdata[i].pdata_size = wdata[i].control->size -
+ sizeof(struct sof_ipc_ctrl_data);
+ }
+
+ *size += wdata[i].pdata_size;
+
+ /* get data type */
+ switch (cdata->cmd) {
+ case SOF_CTRL_CMD_VOLUME:
+ case SOF_CTRL_CMD_ENUM:
+ case SOF_CTRL_CMD_SWITCH:
+ wdata[i].ipc_cmd = SOF_IPC_COMP_SET_VALUE;
+ wdata[i].ctrl_type = SOF_CTRL_TYPE_VALUE_CHAN_SET;
+ break;
+ case SOF_CTRL_CMD_BINARY:
+ wdata[i].ipc_cmd = SOF_IPC_COMP_SET_DATA;
+ wdata[i].ctrl_type = SOF_CTRL_TYPE_DATA_SET;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int sof_process_load(struct snd_soc_component *scomp,
+ struct snd_sof_widget *swidget, int type)
+{
+ struct snd_soc_dapm_widget *widget = swidget->widget;
+ struct sof_ipc_comp_process *process;
+ struct sof_widget_data *wdata = NULL;
+ size_t ipc_data_size = 0;
+ size_t ipc_size;
+ int offset = 0;
+ int ret;
+ int i;
+
+ /* allocate struct for widget control data sizes and types */
+ if (widget->num_kcontrols) {
+ wdata = kcalloc(widget->num_kcontrols, sizeof(*wdata), GFP_KERNEL);
+ if (!wdata)
+ return -ENOMEM;
+
+ /* get possible component controls and get size of all pdata */
+ ret = sof_get_control_data(scomp, widget, wdata, &ipc_data_size);
+ if (ret < 0)
+ goto out;
+ }
+
+ ipc_size = sizeof(struct sof_ipc_comp_process) + ipc_data_size;
+
+ /* we are exceeding max ipc size, config needs to be sent separately */
+ if (ipc_size > SOF_IPC_MSG_MAX_SIZE) {
+ ipc_size -= ipc_data_size;
+ ipc_data_size = 0;
+ }
+
+ process = sof_comp_alloc(swidget, &ipc_size, swidget->pipeline_id);
+ if (!process) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ swidget->private = process;
+
+ /* configure iir IPC message */
+ process->comp.type = type;
+ process->config.hdr.size = sizeof(process->config);
+
+ /* parse one set of comp tokens */
+ ret = sof_update_ipc_object(scomp, &process->config, SOF_COMP_TOKENS,
+ swidget->tuples, swidget->num_tuples,
+ sizeof(process->config), 1);
+ if (ret < 0)
+ goto err;
+
+ dev_dbg(scomp->dev, "loaded process %s\n", swidget->widget->name);
+ sof_dbg_comp_config(scomp, &process->config);
+
+ /*
+ * found private data in control, so copy it.
+ * get possible component controls - get size of all pdata,
+ * then memcpy with headers
+ */
+ if (ipc_data_size) {
+ for (i = 0; i < widget->num_kcontrols; i++) {
+ if (!wdata[i].pdata_size)
+ continue;
+
+ memcpy(&process->data[offset], wdata[i].pdata,
+ wdata[i].pdata_size);
+ offset += wdata[i].pdata_size;
+ }
+ }
+
+ process->size = ipc_data_size;
+
+ kfree(wdata);
+
+ return 0;
+err:
+ kfree(swidget->private);
+ swidget->private = NULL;
+out:
+ kfree(wdata);
+ return ret;
+}
+
+static enum sof_comp_type find_process_comp_type(enum sof_ipc_process_type type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sof_process); i++) {
+ if (sof_process[i].type == type)
+ return sof_process[i].comp_type;
+ }
+
+ return SOF_COMP_NONE;
+}
+
+/*
+ * Processing Component Topology - can be "effect", "codec", or general
+ * "processing".
+ */
+
+static int sof_widget_update_ipc_comp_process(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc_comp_process config;
+ int ret;
+
+ memset(&config, 0, sizeof(config));
+ config.comp.core = swidget->core;
+
+ /* parse one set of process tokens */
+ ret = sof_update_ipc_object(scomp, &config, SOF_PROCESS_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(config), 1);
+ if (ret < 0)
+ return ret;
+
+ /* now load process specific data and send IPC */
+ return sof_process_load(scomp, swidget, find_process_comp_type(config.type));
+}
+
+static int sof_link_hda_load(struct snd_soc_component *scomp, struct snd_sof_dai_link *slink,
+ struct sof_ipc_dai_config *config, struct snd_sof_dai *dai)
+{
+ struct sof_dai_private_data *private = dai->private;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* init IPC */
+ memset(&config->hda, 0, sizeof(config->hda));
+ config->hdr.size = size;
+
+ /* parse one set of HDA tokens */
+ ret = sof_update_ipc_object(scomp, &config->hda, SOF_HDA_TOKENS, slink->tuples,
+ slink->num_tuples, size, 1);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(scomp->dev, "HDA config rate %d channels %d\n",
+ config->hda.rate, config->hda.channels);
+
+ config->hda.link_dma_ch = DMA_CHAN_INVALID;
+
+ dai->number_configs = 1;
+ dai->current_config = 0;
+ private->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!private->dai_config)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sof_dai_set_format(struct snd_soc_tplg_hw_config *hw_config,
+ struct sof_ipc_dai_config *config)
+{
+ /* clock directions wrt codec */
+ config->format &= ~SOF_DAI_FMT_CLOCK_PROVIDER_MASK;
+ if (hw_config->bclk_provider == SND_SOC_TPLG_BCLK_CP) {
+ /* codec is bclk provider */
+ if (hw_config->fsync_provider == SND_SOC_TPLG_FSYNC_CP)
+ config->format |= SOF_DAI_FMT_CBP_CFP;
+ else
+ config->format |= SOF_DAI_FMT_CBP_CFC;
+ } else {
+ /* codec is bclk consumer */
+ if (hw_config->fsync_provider == SND_SOC_TPLG_FSYNC_CP)
+ config->format |= SOF_DAI_FMT_CBC_CFP;
+ else
+ config->format |= SOF_DAI_FMT_CBC_CFC;
+ }
+
+ /* inverted clocks ? */
+ config->format &= ~SOF_DAI_FMT_INV_MASK;
+ if (hw_config->invert_bclk) {
+ if (hw_config->invert_fsync)
+ config->format |= SOF_DAI_FMT_IB_IF;
+ else
+ config->format |= SOF_DAI_FMT_IB_NF;
+ } else {
+ if (hw_config->invert_fsync)
+ config->format |= SOF_DAI_FMT_NB_IF;
+ else
+ config->format |= SOF_DAI_FMT_NB_NF;
+ }
+}
+
+static int sof_link_sai_load(struct snd_soc_component *scomp, struct snd_sof_dai_link *slink,
+ struct sof_ipc_dai_config *config, struct snd_sof_dai *dai)
+{
+ struct snd_soc_tplg_hw_config *hw_config = slink->hw_configs;
+ struct sof_dai_private_data *private = dai->private;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* handle master/slave and inverted clocks */
+ sof_dai_set_format(hw_config, config);
+
+ /* init IPC */
+ memset(&config->sai, 0, sizeof(config->sai));
+ config->hdr.size = size;
+
+ /* parse one set of SAI tokens */
+ ret = sof_update_ipc_object(scomp, &config->sai, SOF_SAI_TOKENS, slink->tuples,
+ slink->num_tuples, size, 1);
+ if (ret < 0)
+ return ret;
+
+ config->sai.mclk_rate = le32_to_cpu(hw_config->mclk_rate);
+ config->sai.bclk_rate = le32_to_cpu(hw_config->bclk_rate);
+ config->sai.fsync_rate = le32_to_cpu(hw_config->fsync_rate);
+ config->sai.mclk_direction = hw_config->mclk_direction;
+
+ config->sai.tdm_slots = le32_to_cpu(hw_config->tdm_slots);
+ config->sai.tdm_slot_width = le32_to_cpu(hw_config->tdm_slot_width);
+ config->sai.rx_slots = le32_to_cpu(hw_config->rx_slots);
+ config->sai.tx_slots = le32_to_cpu(hw_config->tx_slots);
+
+ dev_info(scomp->dev,
+ "tplg: config SAI%d fmt 0x%x mclk %d width %d slots %d mclk id %d\n",
+ config->dai_index, config->format,
+ config->sai.mclk_rate, config->sai.tdm_slot_width,
+ config->sai.tdm_slots, config->sai.mclk_id);
+
+ if (config->sai.tdm_slots < 1 || config->sai.tdm_slots > 8) {
+ dev_err(scomp->dev, "Invalid channel count for SAI%d\n", config->dai_index);
+ return -EINVAL;
+ }
+
+ dai->number_configs = 1;
+ dai->current_config = 0;
+ private->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!private->dai_config)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sof_link_esai_load(struct snd_soc_component *scomp, struct snd_sof_dai_link *slink,
+ struct sof_ipc_dai_config *config, struct snd_sof_dai *dai)
+{
+ struct snd_soc_tplg_hw_config *hw_config = slink->hw_configs;
+ struct sof_dai_private_data *private = dai->private;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* handle master/slave and inverted clocks */
+ sof_dai_set_format(hw_config, config);
+
+ /* init IPC */
+ memset(&config->esai, 0, sizeof(config->esai));
+ config->hdr.size = size;
+
+ /* parse one set of ESAI tokens */
+ ret = sof_update_ipc_object(scomp, &config->esai, SOF_ESAI_TOKENS, slink->tuples,
+ slink->num_tuples, size, 1);
+ if (ret < 0)
+ return ret;
+
+ config->esai.mclk_rate = le32_to_cpu(hw_config->mclk_rate);
+ config->esai.bclk_rate = le32_to_cpu(hw_config->bclk_rate);
+ config->esai.fsync_rate = le32_to_cpu(hw_config->fsync_rate);
+ config->esai.mclk_direction = hw_config->mclk_direction;
+ config->esai.tdm_slots = le32_to_cpu(hw_config->tdm_slots);
+ config->esai.tdm_slot_width = le32_to_cpu(hw_config->tdm_slot_width);
+ config->esai.rx_slots = le32_to_cpu(hw_config->rx_slots);
+ config->esai.tx_slots = le32_to_cpu(hw_config->tx_slots);
+
+ dev_info(scomp->dev,
+ "tplg: config ESAI%d fmt 0x%x mclk %d width %d slots %d mclk id %d\n",
+ config->dai_index, config->format,
+ config->esai.mclk_rate, config->esai.tdm_slot_width,
+ config->esai.tdm_slots, config->esai.mclk_id);
+
+ if (config->esai.tdm_slots < 1 || config->esai.tdm_slots > 8) {
+ dev_err(scomp->dev, "Invalid channel count for ESAI%d\n", config->dai_index);
+ return -EINVAL;
+ }
+
+ dai->number_configs = 1;
+ dai->current_config = 0;
+ private->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!private->dai_config)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sof_link_acp_dmic_load(struct snd_soc_component *scomp, struct snd_sof_dai_link *slink,
+ struct sof_ipc_dai_config *config, struct snd_sof_dai *dai)
+{
+ struct snd_soc_tplg_hw_config *hw_config = slink->hw_configs;
+ struct sof_dai_private_data *private = dai->private;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* handle master/slave and inverted clocks */
+ sof_dai_set_format(hw_config, config);
+
+ config->hdr.size = size;
+
+ /* parse the required set of ACPDMIC tokens based on num_hw_cfgs */
+ ret = sof_update_ipc_object(scomp, &config->acpdmic, SOF_ACPDMIC_TOKENS, slink->tuples,
+ slink->num_tuples, size, slink->num_hw_configs);
+ if (ret < 0)
+ return ret;
+
+ dev_info(scomp->dev, "ACP_DMIC config ACP%d channel %d rate %d\n",
+ config->dai_index, config->acpdmic.pdm_ch,
+ config->acpdmic.pdm_rate);
+
+ dai->number_configs = 1;
+ dai->current_config = 0;
+ private->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!private->dai_config)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sof_link_acp_bt_load(struct snd_soc_component *scomp, struct snd_sof_dai_link *slink,
+ struct sof_ipc_dai_config *config, struct snd_sof_dai *dai)
+{
+ struct snd_soc_tplg_hw_config *hw_config = slink->hw_configs;
+ struct sof_dai_private_data *private = dai->private;
+ u32 size = sizeof(*config);
+
+ /* handle master/slave and inverted clocks */
+ sof_dai_set_format(hw_config, config);
+
+ /* init IPC */
+ memset(&config->acpbt, 0, sizeof(config->acpbt));
+ config->hdr.size = size;
+
+ config->acpbt.fsync_rate = le32_to_cpu(hw_config->fsync_rate);
+ config->acpbt.tdm_slots = le32_to_cpu(hw_config->tdm_slots);
+
+ dev_info(scomp->dev, "ACP_BT config ACP%d channel %d rate %d\n",
+ config->dai_index, config->acpbt.tdm_slots,
+ config->acpbt.fsync_rate);
+
+ dai->number_configs = 1;
+ dai->current_config = 0;
+ private->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!private->dai_config)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sof_link_acp_sp_load(struct snd_soc_component *scomp, struct snd_sof_dai_link *slink,
+ struct sof_ipc_dai_config *config, struct snd_sof_dai *dai)
+{
+ struct snd_soc_tplg_hw_config *hw_config = slink->hw_configs;
+ struct sof_dai_private_data *private = dai->private;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* handle master/slave and inverted clocks */
+ sof_dai_set_format(hw_config, config);
+
+ /* init IPC */
+ memset(&config->acpsp, 0, sizeof(config->acpsp));
+ config->hdr.size = size;
+
+ ret = sof_update_ipc_object(scomp, &config->acpsp, SOF_ACPI2S_TOKENS, slink->tuples,
+ slink->num_tuples, size, slink->num_hw_configs);
+ if (ret < 0)
+ return ret;
+
+
+ dev_info(scomp->dev, "ACP_SP config ACP%d channel %d rate %d tdm_mode %d\n",
+ config->dai_index, config->acpsp.tdm_slots,
+ config->acpsp.fsync_rate, config->acpsp.tdm_mode);
+
+ dai->number_configs = 1;
+ dai->current_config = 0;
+ private->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!private->dai_config)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sof_link_acp_hs_load(struct snd_soc_component *scomp, struct snd_sof_dai_link *slink,
+ struct sof_ipc_dai_config *config, struct snd_sof_dai *dai)
+{
+ struct snd_soc_tplg_hw_config *hw_config = slink->hw_configs;
+ struct sof_dai_private_data *private = dai->private;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* Configures the DAI hardware format and inverted clocks */
+ sof_dai_set_format(hw_config, config);
+
+ /* init IPC */
+ memset(&config->acphs, 0, sizeof(config->acphs));
+ config->hdr.size = size;
+
+ ret = sof_update_ipc_object(scomp, &config->acphs, SOF_ACPI2S_TOKENS, slink->tuples,
+ slink->num_tuples, size, slink->num_hw_configs);
+ if (ret < 0)
+ return ret;
+
+ dev_info(scomp->dev, "ACP_HS config ACP%d channel %d rate %d tdm_mode %d\n",
+ config->dai_index, config->acphs.tdm_slots,
+ config->acphs.fsync_rate, config->acphs.tdm_mode);
+
+ dai->number_configs = 1;
+ dai->current_config = 0;
+ private->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!private->dai_config)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sof_link_afe_load(struct snd_soc_component *scomp, struct snd_sof_dai_link *slink,
+ struct sof_ipc_dai_config *config, struct snd_sof_dai *dai)
+{
+ struct sof_dai_private_data *private = dai->private;
+ u32 size = sizeof(*config);
+ int ret;
+
+ config->hdr.size = size;
+
+ /* parse the required set of AFE tokens based on num_hw_cfgs */
+ ret = sof_update_ipc_object(scomp, &config->afe, SOF_AFE_TOKENS, slink->tuples,
+ slink->num_tuples, size, slink->num_hw_configs);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(scomp->dev, "AFE config rate %d channels %d format:%d\n",
+ config->afe.rate, config->afe.channels, config->afe.format);
+
+ config->afe.stream_id = DMA_CHAN_INVALID;
+
+ dai->number_configs = 1;
+ dai->current_config = 0;
+ private->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!private->dai_config)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sof_link_ssp_load(struct snd_soc_component *scomp, struct snd_sof_dai_link *slink,
+ struct sof_ipc_dai_config *config, struct snd_sof_dai *dai)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_hw_config *hw_config = slink->hw_configs;
+ struct sof_dai_private_data *private = dai->private;
+ u32 size = sizeof(*config);
+ int current_config = 0;
+ int i, ret;
+
+ /*
+ * Parse common data, we should have 1 common data per hw_config.
+ */
+ ret = sof_update_ipc_object(scomp, &config->ssp, SOF_SSP_TOKENS, slink->tuples,
+ slink->num_tuples, size, slink->num_hw_configs);
+ if (ret < 0)
+ return ret;
+
+ /* process all possible hw configs */
+ for (i = 0; i < slink->num_hw_configs; i++) {
+ if (le32_to_cpu(hw_config[i].id) == slink->default_hw_cfg_id)
+ current_config = i;
+
+ /* handle master/slave and inverted clocks */
+ sof_dai_set_format(&hw_config[i], &config[i]);
+
+ config[i].hdr.size = size;
+
+ if (sdev->mclk_id_override) {
+ dev_dbg(scomp->dev, "tplg: overriding topology mclk_id %d by quirk %d\n",
+ config[i].ssp.mclk_id, sdev->mclk_id_quirk);
+ config[i].ssp.mclk_id = sdev->mclk_id_quirk;
+ }
+
+ /* copy differentiating hw configs to ipc structs */
+ config[i].ssp.mclk_rate = le32_to_cpu(hw_config[i].mclk_rate);
+ config[i].ssp.bclk_rate = le32_to_cpu(hw_config[i].bclk_rate);
+ config[i].ssp.fsync_rate = le32_to_cpu(hw_config[i].fsync_rate);
+ config[i].ssp.tdm_slots = le32_to_cpu(hw_config[i].tdm_slots);
+ config[i].ssp.tdm_slot_width = le32_to_cpu(hw_config[i].tdm_slot_width);
+ config[i].ssp.mclk_direction = hw_config[i].mclk_direction;
+ config[i].ssp.rx_slots = le32_to_cpu(hw_config[i].rx_slots);
+ config[i].ssp.tx_slots = le32_to_cpu(hw_config[i].tx_slots);
+
+ dev_dbg(scomp->dev, "tplg: config SSP%d fmt %#x mclk %d bclk %d fclk %d width (%d)%d slots %d mclk id %d quirks %d clks_control %#x\n",
+ config[i].dai_index, config[i].format,
+ config[i].ssp.mclk_rate, config[i].ssp.bclk_rate,
+ config[i].ssp.fsync_rate, config[i].ssp.sample_valid_bits,
+ config[i].ssp.tdm_slot_width, config[i].ssp.tdm_slots,
+ config[i].ssp.mclk_id, config[i].ssp.quirks, config[i].ssp.clks_control);
+
+ /* validate SSP fsync rate and channel count */
+ if (config[i].ssp.fsync_rate < 8000 || config[i].ssp.fsync_rate > 192000) {
+ dev_err(scomp->dev, "Invalid fsync rate for SSP%d\n", config[i].dai_index);
+ return -EINVAL;
+ }
+
+ if (config[i].ssp.tdm_slots < 1 || config[i].ssp.tdm_slots > 8) {
+ dev_err(scomp->dev, "Invalid channel count for SSP%d\n",
+ config[i].dai_index);
+ return -EINVAL;
+ }
+ }
+
+ dai->number_configs = slink->num_hw_configs;
+ dai->current_config = current_config;
+ private->dai_config = kmemdup(config, size * slink->num_hw_configs, GFP_KERNEL);
+ if (!private->dai_config)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sof_link_dmic_load(struct snd_soc_component *scomp, struct snd_sof_dai_link *slink,
+ struct sof_ipc_dai_config *config, struct snd_sof_dai *dai)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_dai_private_data *private = dai->private;
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
+ size_t size = sizeof(*config);
+ int i, ret;
+
+ /* Ensure the entire DMIC config struct is zeros */
+ memset(&config->dmic, 0, sizeof(config->dmic));
+
+ /* parse the required set of DMIC tokens based on num_hw_cfgs */
+ ret = sof_update_ipc_object(scomp, &config->dmic, SOF_DMIC_TOKENS, slink->tuples,
+ slink->num_tuples, size, slink->num_hw_configs);
+ if (ret < 0)
+ return ret;
+
+ /* parse the required set of DMIC PDM tokens based on number of active PDM's */
+ ret = sof_update_ipc_object(scomp, &config->dmic.pdm[0], SOF_DMIC_PDM_TOKENS,
+ slink->tuples, slink->num_tuples,
+ sizeof(struct sof_ipc_dai_dmic_pdm_ctrl),
+ config->dmic.num_pdm_active);
+ if (ret < 0)
+ return ret;
+
+ /* set IPC header size */
+ config->hdr.size = size;
+
+ /* debug messages */
+ dev_dbg(scomp->dev, "tplg: config DMIC%d driver version %d\n",
+ config->dai_index, config->dmic.driver_ipc_version);
+ dev_dbg(scomp->dev, "pdmclk_min %d pdm_clkmax %d duty_min %d\n",
+ config->dmic.pdmclk_min, config->dmic.pdmclk_max,
+ config->dmic.duty_min);
+ dev_dbg(scomp->dev, "duty_max %d fifo_fs %d num_pdms active %d\n",
+ config->dmic.duty_max, config->dmic.fifo_fs,
+ config->dmic.num_pdm_active);
+ dev_dbg(scomp->dev, "fifo word length %d\n", config->dmic.fifo_bits);
+
+ for (i = 0; i < config->dmic.num_pdm_active; i++) {
+ dev_dbg(scomp->dev, "pdm %d mic a %d mic b %d\n",
+ config->dmic.pdm[i].id,
+ config->dmic.pdm[i].enable_mic_a,
+ config->dmic.pdm[i].enable_mic_b);
+ dev_dbg(scomp->dev, "pdm %d polarity a %d polarity b %d\n",
+ config->dmic.pdm[i].id,
+ config->dmic.pdm[i].polarity_mic_a,
+ config->dmic.pdm[i].polarity_mic_b);
+ dev_dbg(scomp->dev, "pdm %d clk_edge %d skew %d\n",
+ config->dmic.pdm[i].id,
+ config->dmic.pdm[i].clk_edge,
+ config->dmic.pdm[i].skew);
+ }
+
+ /*
+ * this takes care of backwards compatible handling of fifo_bits_b.
+ * It is deprecated since firmware ABI version 3.0.1.
+ */
+ if (SOF_ABI_VER(v->major, v->minor, v->micro) < SOF_ABI_VER(3, 0, 1))
+ config->dmic.fifo_bits_b = config->dmic.fifo_bits;
+
+ dai->number_configs = 1;
+ dai->current_config = 0;
+ private->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!private->dai_config)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sof_link_alh_load(struct snd_soc_component *scomp, struct snd_sof_dai_link *slink,
+ struct sof_ipc_dai_config *config, struct snd_sof_dai *dai)
+{
+ struct sof_dai_private_data *private = dai->private;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* parse the required set of ALH tokens based on num_hw_cfgs */
+ ret = sof_update_ipc_object(scomp, &config->alh, SOF_ALH_TOKENS, slink->tuples,
+ slink->num_tuples, size, slink->num_hw_configs);
+ if (ret < 0)
+ return ret;
+
+ /* init IPC */
+ config->hdr.size = size;
+
+ /* set config for all DAI's with name matching the link name */
+ dai->number_configs = 1;
+ dai->current_config = 0;
+ private->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!private->dai_config)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sof_ipc3_widget_setup_comp_dai(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_dai_private_data *private;
+ struct sof_ipc_comp_dai *comp_dai;
+ size_t ipc_size = sizeof(*comp_dai);
+ struct sof_ipc_dai_config *config;
+ struct snd_sof_dai_link *slink;
+ int ret;
+
+ private = kzalloc(sizeof(*private), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
+ dai->private = private;
+
+ private->comp_dai = sof_comp_alloc(swidget, &ipc_size, swidget->pipeline_id);
+ if (!private->comp_dai) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ /* configure dai IPC message */
+ comp_dai = private->comp_dai;
+ comp_dai->comp.type = SOF_COMP_DAI;
+ comp_dai->config.hdr.size = sizeof(comp_dai->config);
+
+ /* parse one set of DAI tokens */
+ ret = sof_update_ipc_object(scomp, comp_dai, SOF_DAI_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*comp_dai), 1);
+ if (ret < 0)
+ goto free;
+
+ /* update comp_tokens */
+ ret = sof_update_ipc_object(scomp, &comp_dai->config, SOF_COMP_TOKENS,
+ swidget->tuples, swidget->num_tuples,
+ sizeof(comp_dai->config), 1);
+ if (ret < 0)
+ goto free;
+
+ dev_dbg(scomp->dev, "dai %s: type %d index %d\n",
+ swidget->widget->name, comp_dai->type, comp_dai->dai_index);
+ sof_dbg_comp_config(scomp, &comp_dai->config);
+
+ /* now update DAI config */
+ list_for_each_entry(slink, &sdev->dai_link_list, list) {
+ struct sof_ipc_dai_config common_config;
+ int i;
+
+ if (strcmp(slink->link->name, dai->name))
+ continue;
+
+ /* Reserve memory for all hw configs, eventually freed by widget */
+ config = kcalloc(slink->num_hw_configs, sizeof(*config), GFP_KERNEL);
+ if (!config) {
+ ret = -ENOMEM;
+ goto free_comp;
+ }
+
+ /* parse one set of DAI link tokens */
+ ret = sof_update_ipc_object(scomp, &common_config, SOF_DAI_LINK_TOKENS,
+ slink->tuples, slink->num_tuples,
+ sizeof(common_config), 1);
+ if (ret < 0)
+ goto free_config;
+
+ for (i = 0; i < slink->num_hw_configs; i++) {
+ config[i].hdr.cmd = SOF_IPC_GLB_DAI_MSG | SOF_IPC_DAI_CONFIG;
+ config[i].format = le32_to_cpu(slink->hw_configs[i].fmt);
+ config[i].type = common_config.type;
+ config[i].dai_index = comp_dai->dai_index;
+ }
+
+ switch (common_config.type) {
+ case SOF_DAI_INTEL_SSP:
+ ret = sof_link_ssp_load(scomp, slink, config, dai);
+ break;
+ case SOF_DAI_INTEL_DMIC:
+ ret = sof_link_dmic_load(scomp, slink, config, dai);
+ break;
+ case SOF_DAI_INTEL_HDA:
+ ret = sof_link_hda_load(scomp, slink, config, dai);
+ break;
+ case SOF_DAI_INTEL_ALH:
+ ret = sof_link_alh_load(scomp, slink, config, dai);
+ break;
+ case SOF_DAI_IMX_SAI:
+ ret = sof_link_sai_load(scomp, slink, config, dai);
+ break;
+ case SOF_DAI_IMX_ESAI:
+ ret = sof_link_esai_load(scomp, slink, config, dai);
+ break;
+ case SOF_DAI_AMD_BT:
+ ret = sof_link_acp_bt_load(scomp, slink, config, dai);
+ break;
+ case SOF_DAI_AMD_SP:
+ case SOF_DAI_AMD_SP_VIRTUAL:
+ ret = sof_link_acp_sp_load(scomp, slink, config, dai);
+ break;
+ case SOF_DAI_AMD_HS:
+ case SOF_DAI_AMD_HS_VIRTUAL:
+ ret = sof_link_acp_hs_load(scomp, slink, config, dai);
+ break;
+ case SOF_DAI_AMD_DMIC:
+ ret = sof_link_acp_dmic_load(scomp, slink, config, dai);
+ break;
+ case SOF_DAI_MEDIATEK_AFE:
+ ret = sof_link_afe_load(scomp, slink, config, dai);
+ break;
+ default:
+ break;
+ }
+ if (ret < 0) {
+ dev_err(scomp->dev, "failed to load config for dai %s\n", dai->name);
+ goto free_config;
+ }
+
+ kfree(config);
+ }
+
+ return 0;
+free_config:
+ kfree(config);
+free_comp:
+ kfree(comp_dai);
+free:
+ kfree(private);
+ dai->private = NULL;
+ return ret;
+}
+
+static void sof_ipc3_widget_free_comp_dai(struct snd_sof_widget *swidget)
+{
+ switch (swidget->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ {
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_dai_private_data *dai_data;
+
+ if (!dai)
+ return;
+
+ dai_data = dai->private;
+ if (dai_data) {
+ kfree(dai_data->comp_dai);
+ kfree(dai_data->dai_config);
+ kfree(dai_data);
+ }
+ kfree(dai);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static int sof_ipc3_route_setup(struct snd_sof_dev *sdev, struct snd_sof_route *sroute)
+{
+ struct sof_ipc_pipe_comp_connect connect;
+ int ret;
+
+ connect.hdr.size = sizeof(connect);
+ connect.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_CONNECT;
+ connect.source_id = sroute->src_widget->comp_id;
+ connect.sink_id = sroute->sink_widget->comp_id;
+
+ dev_dbg(sdev->dev, "setting up route %s -> %s\n",
+ sroute->src_widget->widget->name,
+ sroute->sink_widget->widget->name);
+
+ /* send ipc */
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, &connect, sizeof(connect));
+ if (ret < 0)
+ dev_err(sdev->dev, "%s: route %s -> %s failed\n", __func__,
+ sroute->src_widget->widget->name, sroute->sink_widget->widget->name);
+
+ return ret;
+}
+
+static int sof_ipc3_control_load_bytes(struct snd_sof_dev *sdev, struct snd_sof_control *scontrol)
+{
+ struct sof_ipc_ctrl_data *cdata;
+ size_t priv_size_check;
+ int ret;
+
+ if (scontrol->max_size < (sizeof(*cdata) + sizeof(struct sof_abi_hdr))) {
+ dev_err(sdev->dev, "%s: insufficient size for a bytes control: %zu.\n",
+ __func__, scontrol->max_size);
+ return -EINVAL;
+ }
+
+ if (scontrol->priv_size > scontrol->max_size - sizeof(*cdata)) {
+ dev_err(sdev->dev,
+ "%s: bytes data size %zu exceeds max %zu.\n", __func__,
+ scontrol->priv_size, scontrol->max_size - sizeof(*cdata));
+ return -EINVAL;
+ }
+
+ scontrol->ipc_control_data = kzalloc(scontrol->max_size, GFP_KERNEL);
+ if (!scontrol->ipc_control_data)
+ return -ENOMEM;
+
+ scontrol->size = sizeof(struct sof_ipc_ctrl_data) + scontrol->priv_size;
+
+ cdata = scontrol->ipc_control_data;
+ cdata->cmd = SOF_CTRL_CMD_BINARY;
+ cdata->index = scontrol->index;
+
+ if (scontrol->priv_size > 0) {
+ memcpy(cdata->data, scontrol->priv, scontrol->priv_size);
+ kfree(scontrol->priv);
+ scontrol->priv = NULL;
+
+ if (cdata->data->magic != SOF_ABI_MAGIC) {
+ dev_err(sdev->dev, "Wrong ABI magic 0x%08x.\n", cdata->data->magic);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION, cdata->data->abi)) {
+ dev_err(sdev->dev, "Incompatible ABI version 0x%08x.\n",
+ cdata->data->abi);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ priv_size_check = cdata->data->size + sizeof(struct sof_abi_hdr);
+ if (priv_size_check != scontrol->priv_size) {
+ dev_err(sdev->dev, "Conflict in bytes (%zu) vs. priv size (%zu).\n",
+ priv_size_check, scontrol->priv_size);
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ kfree(scontrol->ipc_control_data);
+ scontrol->ipc_control_data = NULL;
+ return ret;
+}
+
+static int sof_ipc3_control_load_volume(struct snd_sof_dev *sdev, struct snd_sof_control *scontrol)
+{
+ struct sof_ipc_ctrl_data *cdata;
+ int i;
+
+ /* init the volume get/put data */
+ scontrol->size = struct_size(cdata, chanv, scontrol->num_channels);
+
+ scontrol->ipc_control_data = kzalloc(scontrol->size, GFP_KERNEL);
+ if (!scontrol->ipc_control_data)
+ return -ENOMEM;
+
+ cdata = scontrol->ipc_control_data;
+ cdata->index = scontrol->index;
+
+ /* set cmd for mixer control */
+ if (scontrol->max == 1) {
+ cdata->cmd = SOF_CTRL_CMD_SWITCH;
+ return 0;
+ }
+
+ cdata->cmd = SOF_CTRL_CMD_VOLUME;
+
+ /* set default volume values to 0dB in control */
+ for (i = 0; i < scontrol->num_channels; i++) {
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = VOL_ZERO_DB;
+ }
+
+ return 0;
+}
+
+static int sof_ipc3_control_load_enum(struct snd_sof_dev *sdev, struct snd_sof_control *scontrol)
+{
+ struct sof_ipc_ctrl_data *cdata;
+
+ /* init the enum get/put data */
+ scontrol->size = struct_size(cdata, chanv, scontrol->num_channels);
+
+ scontrol->ipc_control_data = kzalloc(scontrol->size, GFP_KERNEL);
+ if (!scontrol->ipc_control_data)
+ return -ENOMEM;
+
+ cdata = scontrol->ipc_control_data;
+ cdata->index = scontrol->index;
+ cdata->cmd = SOF_CTRL_CMD_ENUM;
+
+ return 0;
+}
+
+static int sof_ipc3_control_setup(struct snd_sof_dev *sdev, struct snd_sof_control *scontrol)
+{
+ switch (scontrol->info_type) {
+ case SND_SOC_TPLG_CTL_VOLSW:
+ case SND_SOC_TPLG_CTL_VOLSW_SX:
+ case SND_SOC_TPLG_CTL_VOLSW_XR_SX:
+ return sof_ipc3_control_load_volume(sdev, scontrol);
+ case SND_SOC_TPLG_CTL_BYTES:
+ return sof_ipc3_control_load_bytes(sdev, scontrol);
+ case SND_SOC_TPLG_CTL_ENUM:
+ case SND_SOC_TPLG_CTL_ENUM_VALUE:
+ return sof_ipc3_control_load_enum(sdev, scontrol);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int sof_ipc3_control_free(struct snd_sof_dev *sdev, struct snd_sof_control *scontrol)
+{
+ struct sof_ipc_free fcomp;
+
+ fcomp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_FREE;
+ fcomp.hdr.size = sizeof(fcomp);
+ fcomp.id = scontrol->comp_id;
+
+ /* send IPC to the DSP */
+ return sof_ipc_tx_message_no_reply(sdev->ipc, &fcomp, sizeof(fcomp));
+}
+
+/* send pcm params ipc */
+static int sof_ipc3_keyword_detect_pcm_params(struct snd_sof_widget *swidget, int dir)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_pcm_hw_params *params;
+ struct sof_ipc_pcm_params pcm;
+ struct snd_sof_pcm *spcm;
+ int ret;
+
+ /* get runtime PCM params using widget's stream name */
+ spcm = snd_sof_find_spcm_name(scomp, swidget->widget->sname);
+ if (!spcm) {
+ dev_err(scomp->dev, "Cannot find PCM for %s\n", swidget->widget->name);
+ return -EINVAL;
+ }
+
+ params = &spcm->params[dir];
+
+ /* set IPC PCM params */
+ memset(&pcm, 0, sizeof(pcm));
+ pcm.hdr.size = sizeof(pcm);
+ pcm.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_PARAMS;
+ pcm.comp_id = swidget->comp_id;
+ pcm.params.hdr.size = sizeof(pcm.params);
+ pcm.params.direction = dir;
+ pcm.params.sample_valid_bytes = params_width(params) >> 3;
+ pcm.params.buffer_fmt = SOF_IPC_BUFFER_INTERLEAVED;
+ pcm.params.rate = params_rate(params);
+ pcm.params.channels = params_channels(params);
+ pcm.params.host_period_bytes = params_period_bytes(params);
+
+ /* set format */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S16_LE;
+ break;
+ case SNDRV_PCM_FORMAT_S24:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S24_4LE;
+ break;
+ case SNDRV_PCM_FORMAT_S32:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S32_LE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, &pcm, sizeof(pcm));
+ if (ret < 0)
+ dev_err(scomp->dev, "%s: PCM params failed for %s\n", __func__,
+ swidget->widget->name);
+
+ return ret;
+}
+
+ /* send stream trigger ipc */
+static int sof_ipc3_keyword_detect_trigger(struct snd_sof_widget *swidget, int cmd)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_stream stream;
+ int ret;
+
+ /* set IPC stream params */
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | cmd;
+ stream.comp_id = swidget->comp_id;
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, &stream, sizeof(stream));
+ if (ret < 0)
+ dev_err(scomp->dev, "%s: Failed to trigger %s\n", __func__, swidget->widget->name);
+
+ return ret;
+}
+
+static int sof_ipc3_keyword_dapm_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_sof_widget *swidget = w->dobj.private;
+ struct snd_soc_component *scomp;
+ int stream = SNDRV_PCM_STREAM_CAPTURE;
+ struct snd_sof_pcm *spcm;
+ int ret = 0;
+
+ if (!swidget)
+ return 0;
+
+ scomp = swidget->scomp;
+
+ dev_dbg(scomp->dev, "received event %d for widget %s\n",
+ event, w->name);
+
+ /* get runtime PCM params using widget's stream name */
+ spcm = snd_sof_find_spcm_name(scomp, swidget->widget->sname);
+ if (!spcm) {
+ dev_err(scomp->dev, "%s: Cannot find PCM for %s\n", __func__,
+ swidget->widget->name);
+ return -EINVAL;
+ }
+
+ /* process events */
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (spcm->stream[stream].suspend_ignored) {
+ dev_dbg(scomp->dev, "PRE_PMU event ignored, KWD pipeline is already RUNNING\n");
+ return 0;
+ }
+
+ /* set pcm params */
+ ret = sof_ipc3_keyword_detect_pcm_params(swidget, stream);
+ if (ret < 0) {
+ dev_err(scomp->dev, "%s: Failed to set pcm params for widget %s\n",
+ __func__, swidget->widget->name);
+ break;
+ }
+
+ /* start trigger */
+ ret = sof_ipc3_keyword_detect_trigger(swidget, SOF_IPC_STREAM_TRIG_START);
+ if (ret < 0)
+ dev_err(scomp->dev, "%s: Failed to trigger widget %s\n", __func__,
+ swidget->widget->name);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (spcm->stream[stream].suspend_ignored) {
+ dev_dbg(scomp->dev,
+ "POST_PMD event ignored, KWD pipeline will remain RUNNING\n");
+ return 0;
+ }
+
+ /* stop trigger */
+ ret = sof_ipc3_keyword_detect_trigger(swidget, SOF_IPC_STREAM_TRIG_STOP);
+ if (ret < 0)
+ dev_err(scomp->dev, "%s: Failed to trigger widget %s\n", __func__,
+ swidget->widget->name);
+
+ /* pcm free */
+ ret = sof_ipc3_keyword_detect_trigger(swidget, SOF_IPC_STREAM_PCM_FREE);
+ if (ret < 0)
+ dev_err(scomp->dev, "%s: Failed to free PCM for widget %s\n", __func__,
+ swidget->widget->name);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* event handlers for keyword detect component */
+static const struct snd_soc_tplg_widget_events sof_kwd_events[] = {
+ {SOF_KEYWORD_DETECT_DAPM_EVENT, sof_ipc3_keyword_dapm_event},
+};
+
+static int sof_ipc3_widget_bind_event(struct snd_soc_component *scomp,
+ struct snd_sof_widget *swidget, u16 event_type)
+{
+ struct sof_ipc_comp *ipc_comp;
+
+ /* validate widget event type */
+ switch (event_type) {
+ case SOF_KEYWORD_DETECT_DAPM_EVENT:
+ /* only KEYWORD_DETECT comps should handle this */
+ if (swidget->id != snd_soc_dapm_effect)
+ break;
+
+ ipc_comp = swidget->private;
+ if (ipc_comp && ipc_comp->type != SOF_COMP_KEYWORD_DETECT)
+ break;
+
+ /* bind event to keyword detect comp */
+ return snd_soc_tplg_widget_bind_event(swidget->widget, sof_kwd_events,
+ ARRAY_SIZE(sof_kwd_events), event_type);
+ default:
+ break;
+ }
+
+ dev_err(scomp->dev, "Invalid event type %d for widget %s\n", event_type,
+ swidget->widget->name);
+
+ return -EINVAL;
+}
+
+static int sof_ipc3_complete_pipeline(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
+{
+ struct sof_ipc_pipe_ready ready;
+ int ret;
+
+ dev_dbg(sdev->dev, "tplg: complete pipeline %s id %d\n",
+ swidget->widget->name, swidget->comp_id);
+
+ memset(&ready, 0, sizeof(ready));
+ ready.hdr.size = sizeof(ready);
+ ready.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_PIPE_COMPLETE;
+ ready.comp_id = swidget->comp_id;
+
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, &ready, sizeof(ready));
+ if (ret < 0)
+ return ret;
+
+ return 1;
+}
+
+static int sof_ipc3_widget_free(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
+{
+ struct sof_ipc_free ipc_free = {
+ .hdr = {
+ .size = sizeof(ipc_free),
+ .cmd = SOF_IPC_GLB_TPLG_MSG,
+ },
+ .id = swidget->comp_id,
+ };
+ int ret;
+
+ if (!swidget->private)
+ return 0;
+
+ switch (swidget->id) {
+ case snd_soc_dapm_scheduler:
+ {
+ ipc_free.hdr.cmd |= SOF_IPC_TPLG_PIPE_FREE;
+ break;
+ }
+ case snd_soc_dapm_buffer:
+ ipc_free.hdr.cmd |= SOF_IPC_TPLG_BUFFER_FREE;
+ break;
+ default:
+ ipc_free.hdr.cmd |= SOF_IPC_TPLG_COMP_FREE;
+ break;
+ }
+
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, &ipc_free, sizeof(ipc_free));
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to free widget %s\n", swidget->widget->name);
+
+ return ret;
+}
+
+static int sof_ipc3_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget,
+ unsigned int flags, struct snd_sof_dai_config_data *data)
+{
+ struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_dai_private_data *private;
+ struct sof_ipc_dai_config *config;
+ int ret = 0;
+
+ if (!dai || !dai->private) {
+ dev_err(sdev->dev, "No private data for DAI %s\n", swidget->widget->name);
+ return -EINVAL;
+ }
+
+ private = dai->private;
+ if (!private->dai_config) {
+ dev_err(sdev->dev, "No config for DAI %s\n", dai->name);
+ return -EINVAL;
+ }
+
+ config = &private->dai_config[dai->current_config];
+ if (!config) {
+ dev_err(sdev->dev, "Invalid current config for DAI %s\n", dai->name);
+ return -EINVAL;
+ }
+
+ switch (config->type) {
+ case SOF_DAI_INTEL_SSP:
+ /*
+ * DAI_CONFIG IPC during hw_params/hw_free for SSP DAI's is not supported in older
+ * firmware
+ */
+ if (v->abi_version < SOF_ABI_VER(3, 18, 0) &&
+ ((flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) ||
+ (flags & SOF_DAI_CONFIG_FLAGS_HW_FREE)))
+ return 0;
+ break;
+ case SOF_DAI_INTEL_HDA:
+ if (data)
+ config->hda.link_dma_ch = data->dai_data;
+ break;
+ case SOF_DAI_INTEL_ALH:
+ if (data) {
+ /* save the dai_index during hw_params and reuse it for hw_free */
+ if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS)
+ config->dai_index = data->dai_index;
+ config->alh.stream_id = data->dai_data;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * The dai_config op is invoked several times and the flags argument varies as below:
+ * BE DAI hw_params: When the op is invoked during the BE DAI hw_params, flags contains
+ * SOF_DAI_CONFIG_FLAGS_HW_PARAMS along with quirks
+ * FE DAI hw_params: When invoked during FE DAI hw_params after the DAI widget has
+ * just been set up in the DSP, flags is set to SOF_DAI_CONFIG_FLAGS_HW_PARAMS with no
+ * quirks
+ * BE DAI trigger: When invoked during the BE DAI trigger, flags is set to
+ * SOF_DAI_CONFIG_FLAGS_PAUSE and contains no quirks
+ * BE DAI hw_free: When invoked during the BE DAI hw_free, flags is set to
+ * SOF_DAI_CONFIG_FLAGS_HW_FREE and contains no quirks
+ * FE DAI hw_free: When invoked during the FE DAI hw_free, flags is set to
+ * SOF_DAI_CONFIG_FLAGS_HW_FREE and contains no quirks
+ *
+ * The DAI_CONFIG IPC is sent to the DSP, only after the widget is set up during the FE
+ * DAI hw_params. But since the BE DAI hw_params precedes the FE DAI hw_params, the quirks
+ * need to be preserved when assigning the flags before sending the IPC.
+ * For the case of PAUSE/HW_FREE, since there are no quirks, flags can be used as is.
+ */
+
+ if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) {
+ /* Clear stale command */
+ config->flags &= ~SOF_DAI_CONFIG_FLAGS_CMD_MASK;
+ config->flags |= flags;
+ } else {
+ config->flags = flags;
+ }
+
+ /* only send the IPC if the widget is set up in the DSP */
+ if (swidget->use_count > 0) {
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, config, config->hdr.size);
+ if (ret < 0)
+ dev_err(sdev->dev, "Failed to set dai config for %s\n", dai->name);
+
+ /* clear the flags once the IPC has been sent even if it fails */
+ config->flags = SOF_DAI_CONFIG_FLAGS_NONE;
+ }
+
+ return ret;
+}
+
+static int sof_ipc3_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
+{
+ int ret;
+
+ if (!swidget->private)
+ return 0;
+
+ switch (swidget->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ {
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_dai_private_data *dai_data = dai->private;
+ struct sof_ipc_comp *comp = &dai_data->comp_dai->comp;
+
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, dai_data->comp_dai, comp->hdr.size);
+ break;
+ }
+ case snd_soc_dapm_scheduler:
+ {
+ struct sof_ipc_pipe_new *pipeline;
+
+ pipeline = swidget->private;
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, pipeline, sizeof(*pipeline));
+ break;
+ }
+ default:
+ {
+ struct sof_ipc_cmd_hdr *hdr;
+
+ hdr = swidget->private;
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, swidget->private, hdr->size);
+ break;
+ }
+ }
+ if (ret < 0)
+ dev_err(sdev->dev, "Failed to setup widget %s\n", swidget->widget->name);
+
+ return ret;
+}
+
+static int sof_ipc3_set_up_all_pipelines(struct snd_sof_dev *sdev, bool verify)
+{
+ struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
+ struct snd_sof_widget *swidget;
+ struct snd_sof_route *sroute;
+ int ret;
+
+ /* restore pipeline components */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ /* only set up the widgets belonging to static pipelines */
+ if (!verify && swidget->dynamic_pipeline_widget)
+ continue;
+
+ /*
+ * For older firmware, skip scheduler widgets in this loop,
+ * sof_widget_setup() will be called in the 'complete pipeline' loop
+ */
+ if (v->abi_version < SOF_ABI_VER(3, 19, 0) &&
+ swidget->id == snd_soc_dapm_scheduler)
+ continue;
+
+ /* update DAI config. The IPC will be sent in sof_widget_setup() */
+ if (WIDGET_IS_DAI(swidget->id)) {
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_dai_private_data *private;
+ struct sof_ipc_dai_config *config;
+
+ if (!dai || !dai->private)
+ continue;
+ private = dai->private;
+ if (!private->dai_config)
+ continue;
+
+ config = private->dai_config;
+ /*
+ * The link DMA channel would be invalidated for running
+ * streams but not for streams that were in the PAUSED
+ * state during suspend. So invalidate it here before setting
+ * the dai config in the DSP.
+ */
+ if (config->type == SOF_DAI_INTEL_HDA)
+ config->hda.link_dma_ch = DMA_CHAN_INVALID;
+ }
+
+ ret = sof_widget_setup(sdev, swidget);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* restore pipeline connections */
+ list_for_each_entry(sroute, &sdev->route_list, list) {
+ /* only set up routes belonging to static pipelines */
+ if (!verify && (sroute->src_widget->dynamic_pipeline_widget ||
+ sroute->sink_widget->dynamic_pipeline_widget))
+ continue;
+
+ /*
+ * For virtual routes, both sink and source are not buffer. IPC3 only supports
+ * connections between a buffer and a component. Ignore the rest.
+ */
+ if (sroute->src_widget->id != snd_soc_dapm_buffer &&
+ sroute->sink_widget->id != snd_soc_dapm_buffer)
+ continue;
+
+ ret = sof_route_setup(sdev, sroute->src_widget->widget,
+ sroute->sink_widget->widget);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: route set up failed\n", __func__);
+ return ret;
+ }
+ }
+
+ /* complete pipeline */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ switch (swidget->id) {
+ case snd_soc_dapm_scheduler:
+ /* only complete static pipelines */
+ if (!verify && swidget->dynamic_pipeline_widget)
+ continue;
+
+ if (v->abi_version < SOF_ABI_VER(3, 19, 0)) {
+ ret = sof_widget_setup(sdev, swidget);
+ if (ret < 0)
+ return ret;
+ }
+
+ swidget->spipe->complete = sof_ipc3_complete_pipeline(sdev, swidget);
+ if (swidget->spipe->complete < 0)
+ return swidget->spipe->complete;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Free the PCM, its associated widgets and set the prepared flag to false for all PCMs that
+ * did not get suspended(ex: paused streams) so the widgets can be set up again during resume.
+ */
+static int sof_tear_down_left_over_pipelines(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_widget *swidget;
+ struct snd_sof_pcm *spcm;
+ int dir, ret;
+
+ /*
+ * free all PCMs and their associated DAPM widgets if their connected DAPM widget
+ * list is not NULL. This should only be true for paused streams at this point.
+ * This is equivalent to the handling of FE DAI suspend trigger for running streams.
+ */
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ for_each_pcm_streams(dir) {
+ struct snd_pcm_substream *substream = spcm->stream[dir].substream;
+
+ if (!substream || !substream->runtime || spcm->stream[dir].suspend_ignored)
+ continue;
+
+ if (spcm->stream[dir].list) {
+ ret = sof_pcm_stream_free(sdev, substream, spcm, dir, true);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ /*
+ * free any left over DAI widgets. This is equivalent to the handling of suspend trigger
+ * for the BE DAI for running streams.
+ */
+ list_for_each_entry(swidget, &sdev->widget_list, list)
+ if (WIDGET_IS_DAI(swidget->id) && swidget->use_count == 1) {
+ ret = sof_widget_free(sdev, swidget);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * For older firmware, this function doesn't free widgets for static pipelines during suspend.
+ * It only resets use_count for all widgets.
+ */
+static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verify)
+{
+ struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
+ struct snd_sof_widget *swidget;
+ struct snd_sof_route *sroute;
+ bool dyn_widgets = false;
+ int ret;
+
+ /*
+ * This function is called during suspend and for one-time topology verification during
+ * first boot. In both cases, there is no need to protect swidget->use_count and
+ * sroute->setup because during suspend all running streams are suspended and during
+ * topology loading the sound card unavailable to open PCMs.
+ */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (swidget->dynamic_pipeline_widget) {
+ dyn_widgets = true;
+ continue;
+ }
+
+ /* Do not free widgets for static pipelines with FW older than SOF2.2 */
+ if (!verify && !swidget->dynamic_pipeline_widget &&
+ SOF_FW_VER(v->major, v->minor, v->micro) < SOF_FW_VER(2, 2, 0)) {
+ mutex_lock(&swidget->setup_mutex);
+ swidget->use_count = 0;
+ mutex_unlock(&swidget->setup_mutex);
+ if (swidget->spipe)
+ swidget->spipe->complete = 0;
+ continue;
+ }
+
+ ret = sof_widget_free(sdev, swidget);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Tear down all pipelines associated with PCMs that did not get suspended
+ * and unset the prepare flag so that they can be set up again during resume.
+ * Skip this step for older firmware unless topology has any
+ * dynamic pipeline (in which case the step is mandatory).
+ */
+ if (!verify && (dyn_widgets || SOF_FW_VER(v->major, v->minor, v->micro) >=
+ SOF_FW_VER(2, 2, 0))) {
+ ret = sof_tear_down_left_over_pipelines(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to tear down paused pipelines\n");
+ return ret;
+ }
+ }
+
+ list_for_each_entry(sroute, &sdev->route_list, list)
+ sroute->setup = false;
+
+ /*
+ * before suspending, make sure the refcounts are all zeroed out. There's no way
+ * to recover at this point but this will help root cause bad sequences leading to
+ * more issues on resume
+ */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (swidget->use_count != 0) {
+ dev_err(sdev->dev, "%s: widget %s is still in use: count %d\n",
+ __func__, swidget->widget->name, swidget->use_count);
+ }
+ }
+
+ return 0;
+}
+
+static int sof_ipc3_dai_get_clk(struct snd_sof_dev *sdev, struct snd_sof_dai *dai, int clk_type)
+{
+ struct sof_dai_private_data *private = dai->private;
+
+ if (!private || !private->dai_config)
+ return 0;
+
+ switch (private->dai_config->type) {
+ case SOF_DAI_INTEL_SSP:
+ switch (clk_type) {
+ case SOF_DAI_CLK_INTEL_SSP_MCLK:
+ return private->dai_config->ssp.mclk_rate;
+ case SOF_DAI_CLK_INTEL_SSP_BCLK:
+ return private->dai_config->ssp.bclk_rate;
+ default:
+ break;
+ }
+ dev_err(sdev->dev, "fail to get SSP clk %d rate\n", clk_type);
+ break;
+ default:
+ /* not yet implemented for platforms other than the above */
+ dev_err(sdev->dev, "DAI type %d not supported yet!\n", private->dai_config->type);
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int sof_ipc3_parse_manifest(struct snd_soc_component *scomp, int index,
+ struct snd_soc_tplg_manifest *man)
+{
+ u32 size = le32_to_cpu(man->priv.size);
+ u32 abi_version;
+
+ /* backward compatible with tplg without ABI info */
+ if (!size) {
+ dev_dbg(scomp->dev, "No topology ABI info\n");
+ return 0;
+ }
+
+ if (size != SOF_IPC3_TPLG_ABI_SIZE) {
+ dev_err(scomp->dev, "%s: Invalid topology ABI size: %u\n",
+ __func__, size);
+ return -EINVAL;
+ }
+
+ dev_info(scomp->dev,
+ "Topology: ABI %d:%d:%d Kernel ABI %d:%d:%d\n",
+ man->priv.data[0], man->priv.data[1], man->priv.data[2],
+ SOF_ABI_MAJOR, SOF_ABI_MINOR, SOF_ABI_PATCH);
+
+ abi_version = SOF_ABI_VER(man->priv.data[0], man->priv.data[1], man->priv.data[2]);
+
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION, abi_version)) {
+ dev_err(scomp->dev, "%s: Incompatible topology ABI version\n", __func__);
+ return -EINVAL;
+ }
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_STRICT_ABI_CHECKS) &&
+ SOF_ABI_VERSION_MINOR(abi_version) > SOF_ABI_MINOR) {
+ dev_err(scomp->dev, "%s: Topology ABI is more recent than kernel\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sof_ipc3_link_setup(struct snd_sof_dev *sdev, struct snd_soc_dai_link *link)
+{
+ if (link->no_pcm)
+ return 0;
+
+ /*
+ * set default trigger order for all links. Exceptions to
+ * the rule will be handled in sof_pcm_dai_link_fixup()
+ * For playback, the sequence is the following: start FE,
+ * start BE, stop BE, stop FE; for Capture the sequence is
+ * inverted start BE, start FE, stop FE, stop BE
+ */
+ link->trigger[SNDRV_PCM_STREAM_PLAYBACK] = SND_SOC_DPCM_TRIGGER_PRE;
+ link->trigger[SNDRV_PCM_STREAM_CAPTURE] = SND_SOC_DPCM_TRIGGER_POST;
+
+ return 0;
+}
+
+/* token list for each topology object */
+static enum sof_tokens host_token_list[] = {
+ SOF_CORE_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+ SOF_PCM_TOKENS,
+ SOF_COMP_TOKENS,
+};
+
+static enum sof_tokens comp_generic_token_list[] = {
+ SOF_CORE_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+ SOF_COMP_TOKENS,
+};
+
+static enum sof_tokens buffer_token_list[] = {
+ SOF_BUFFER_TOKENS,
+};
+
+static enum sof_tokens pipeline_token_list[] = {
+ SOF_CORE_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+ SOF_PIPELINE_TOKENS,
+ SOF_SCHED_TOKENS,
+};
+
+static enum sof_tokens asrc_token_list[] = {
+ SOF_CORE_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+ SOF_ASRC_TOKENS,
+ SOF_COMP_TOKENS,
+};
+
+static enum sof_tokens src_token_list[] = {
+ SOF_CORE_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+ SOF_SRC_TOKENS,
+ SOF_COMP_TOKENS
+};
+
+static enum sof_tokens pga_token_list[] = {
+ SOF_CORE_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+ SOF_VOLUME_TOKENS,
+ SOF_COMP_TOKENS,
+};
+
+static enum sof_tokens dai_token_list[] = {
+ SOF_CORE_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+ SOF_DAI_TOKENS,
+ SOF_COMP_TOKENS,
+};
+
+static enum sof_tokens process_token_list[] = {
+ SOF_CORE_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+ SOF_PROCESS_TOKENS,
+ SOF_COMP_TOKENS,
+};
+
+static const struct sof_ipc_tplg_widget_ops tplg_ipc3_widget_ops[SND_SOC_DAPM_TYPE_COUNT] = {
+ [snd_soc_dapm_aif_in] = {sof_ipc3_widget_setup_comp_host, sof_ipc3_widget_free_comp,
+ host_token_list, ARRAY_SIZE(host_token_list), NULL},
+ [snd_soc_dapm_aif_out] = {sof_ipc3_widget_setup_comp_host, sof_ipc3_widget_free_comp,
+ host_token_list, ARRAY_SIZE(host_token_list), NULL},
+
+ [snd_soc_dapm_dai_in] = {sof_ipc3_widget_setup_comp_dai, sof_ipc3_widget_free_comp_dai,
+ dai_token_list, ARRAY_SIZE(dai_token_list), NULL},
+ [snd_soc_dapm_dai_out] = {sof_ipc3_widget_setup_comp_dai, sof_ipc3_widget_free_comp_dai,
+ dai_token_list, ARRAY_SIZE(dai_token_list), NULL},
+ [snd_soc_dapm_buffer] = {sof_ipc3_widget_setup_comp_buffer, sof_ipc3_widget_free_comp,
+ buffer_token_list, ARRAY_SIZE(buffer_token_list), NULL},
+ [snd_soc_dapm_mixer] = {sof_ipc3_widget_setup_comp_mixer, sof_ipc3_widget_free_comp,
+ comp_generic_token_list, ARRAY_SIZE(comp_generic_token_list),
+ NULL},
+ [snd_soc_dapm_src] = {sof_ipc3_widget_setup_comp_src, sof_ipc3_widget_free_comp,
+ src_token_list, ARRAY_SIZE(src_token_list), NULL},
+ [snd_soc_dapm_asrc] = {sof_ipc3_widget_setup_comp_asrc, sof_ipc3_widget_free_comp,
+ asrc_token_list, ARRAY_SIZE(asrc_token_list), NULL},
+ [snd_soc_dapm_siggen] = {sof_ipc3_widget_setup_comp_tone, sof_ipc3_widget_free_comp,
+ comp_generic_token_list, ARRAY_SIZE(comp_generic_token_list),
+ NULL},
+ [snd_soc_dapm_scheduler] = {sof_ipc3_widget_setup_comp_pipeline, sof_ipc3_widget_free_comp,
+ pipeline_token_list, ARRAY_SIZE(pipeline_token_list), NULL},
+ [snd_soc_dapm_pga] = {sof_ipc3_widget_setup_comp_pga, sof_ipc3_widget_free_comp,
+ pga_token_list, ARRAY_SIZE(pga_token_list), NULL},
+ [snd_soc_dapm_mux] = {sof_ipc3_widget_setup_comp_mux, sof_ipc3_widget_free_comp,
+ comp_generic_token_list, ARRAY_SIZE(comp_generic_token_list), NULL},
+ [snd_soc_dapm_demux] = {sof_ipc3_widget_setup_comp_mux, sof_ipc3_widget_free_comp,
+ comp_generic_token_list, ARRAY_SIZE(comp_generic_token_list),
+ NULL},
+ [snd_soc_dapm_effect] = {sof_widget_update_ipc_comp_process, sof_ipc3_widget_free_comp,
+ process_token_list, ARRAY_SIZE(process_token_list),
+ sof_ipc3_widget_bind_event},
+};
+
+const struct sof_ipc_tplg_ops ipc3_tplg_ops = {
+ .widget = tplg_ipc3_widget_ops,
+ .control = &tplg_ipc3_control_ops,
+ .route_setup = sof_ipc3_route_setup,
+ .control_setup = sof_ipc3_control_setup,
+ .control_free = sof_ipc3_control_free,
+ .pipeline_complete = sof_ipc3_complete_pipeline,
+ .token_list = ipc3_token_list,
+ .widget_free = sof_ipc3_widget_free,
+ .widget_setup = sof_ipc3_widget_setup,
+ .dai_config = sof_ipc3_dai_config,
+ .dai_get_clk = sof_ipc3_dai_get_clk,
+ .set_up_all_pipelines = sof_ipc3_set_up_all_pipelines,
+ .tear_down_all_pipelines = sof_ipc3_tear_down_all_pipelines,
+ .parse_manifest = sof_ipc3_parse_manifest,
+ .link_setup = sof_ipc3_link_setup,
+};
diff --git a/sound/soc/sof/ipc3.c b/sound/soc/sof/ipc3.c
new file mode 100644
index 000000000..fb40378ad
--- /dev/null
+++ b/sound/soc/sof/ipc3.c
@@ -0,0 +1,1161 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2021 Intel Corporation. All rights reserved.
+//
+//
+
+#include <sound/sof/stream.h>
+#include <sound/sof/control.h>
+#include <trace/events/sof.h>
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ipc3-priv.h"
+#include "ops.h"
+
+typedef void (*ipc3_rx_callback)(struct snd_sof_dev *sdev, void *msg_buf);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_VERBOSE_IPC)
+static void ipc3_log_header(struct device *dev, u8 *text, u32 cmd)
+{
+ u8 *str;
+ u8 *str2 = NULL;
+ u32 glb;
+ u32 type;
+ bool is_sof_ipc_stream_position = false;
+
+ glb = cmd & SOF_GLB_TYPE_MASK;
+ type = cmd & SOF_CMD_TYPE_MASK;
+
+ switch (glb) {
+ case SOF_IPC_GLB_REPLY:
+ str = "GLB_REPLY"; break;
+ case SOF_IPC_GLB_COMPOUND:
+ str = "GLB_COMPOUND"; break;
+ case SOF_IPC_GLB_TPLG_MSG:
+ str = "GLB_TPLG_MSG";
+ switch (type) {
+ case SOF_IPC_TPLG_COMP_NEW:
+ str2 = "COMP_NEW"; break;
+ case SOF_IPC_TPLG_COMP_FREE:
+ str2 = "COMP_FREE"; break;
+ case SOF_IPC_TPLG_COMP_CONNECT:
+ str2 = "COMP_CONNECT"; break;
+ case SOF_IPC_TPLG_PIPE_NEW:
+ str2 = "PIPE_NEW"; break;
+ case SOF_IPC_TPLG_PIPE_FREE:
+ str2 = "PIPE_FREE"; break;
+ case SOF_IPC_TPLG_PIPE_CONNECT:
+ str2 = "PIPE_CONNECT"; break;
+ case SOF_IPC_TPLG_PIPE_COMPLETE:
+ str2 = "PIPE_COMPLETE"; break;
+ case SOF_IPC_TPLG_BUFFER_NEW:
+ str2 = "BUFFER_NEW"; break;
+ case SOF_IPC_TPLG_BUFFER_FREE:
+ str2 = "BUFFER_FREE"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_PM_MSG:
+ str = "GLB_PM_MSG";
+ switch (type) {
+ case SOF_IPC_PM_CTX_SAVE:
+ str2 = "CTX_SAVE"; break;
+ case SOF_IPC_PM_CTX_RESTORE:
+ str2 = "CTX_RESTORE"; break;
+ case SOF_IPC_PM_CTX_SIZE:
+ str2 = "CTX_SIZE"; break;
+ case SOF_IPC_PM_CLK_SET:
+ str2 = "CLK_SET"; break;
+ case SOF_IPC_PM_CLK_GET:
+ str2 = "CLK_GET"; break;
+ case SOF_IPC_PM_CLK_REQ:
+ str2 = "CLK_REQ"; break;
+ case SOF_IPC_PM_CORE_ENABLE:
+ str2 = "CORE_ENABLE"; break;
+ case SOF_IPC_PM_GATE:
+ str2 = "GATE"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_COMP_MSG:
+ str = "GLB_COMP_MSG";
+ switch (type) {
+ case SOF_IPC_COMP_SET_VALUE:
+ str2 = "SET_VALUE"; break;
+ case SOF_IPC_COMP_GET_VALUE:
+ str2 = "GET_VALUE"; break;
+ case SOF_IPC_COMP_SET_DATA:
+ str2 = "SET_DATA"; break;
+ case SOF_IPC_COMP_GET_DATA:
+ str2 = "GET_DATA"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_STREAM_MSG:
+ str = "GLB_STREAM_MSG";
+ switch (type) {
+ case SOF_IPC_STREAM_PCM_PARAMS:
+ str2 = "PCM_PARAMS"; break;
+ case SOF_IPC_STREAM_PCM_PARAMS_REPLY:
+ str2 = "PCM_REPLY"; break;
+ case SOF_IPC_STREAM_PCM_FREE:
+ str2 = "PCM_FREE"; break;
+ case SOF_IPC_STREAM_TRIG_START:
+ str2 = "TRIG_START"; break;
+ case SOF_IPC_STREAM_TRIG_STOP:
+ str2 = "TRIG_STOP"; break;
+ case SOF_IPC_STREAM_TRIG_PAUSE:
+ str2 = "TRIG_PAUSE"; break;
+ case SOF_IPC_STREAM_TRIG_RELEASE:
+ str2 = "TRIG_RELEASE"; break;
+ case SOF_IPC_STREAM_TRIG_DRAIN:
+ str2 = "TRIG_DRAIN"; break;
+ case SOF_IPC_STREAM_TRIG_XRUN:
+ str2 = "TRIG_XRUN"; break;
+ case SOF_IPC_STREAM_POSITION:
+ is_sof_ipc_stream_position = true;
+ str2 = "POSITION"; break;
+ case SOF_IPC_STREAM_VORBIS_PARAMS:
+ str2 = "VORBIS_PARAMS"; break;
+ case SOF_IPC_STREAM_VORBIS_FREE:
+ str2 = "VORBIS_FREE"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_FW_READY:
+ str = "FW_READY"; break;
+ case SOF_IPC_GLB_DAI_MSG:
+ str = "GLB_DAI_MSG";
+ switch (type) {
+ case SOF_IPC_DAI_CONFIG:
+ str2 = "CONFIG"; break;
+ case SOF_IPC_DAI_LOOPBACK:
+ str2 = "LOOPBACK"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_TRACE_MSG:
+ str = "GLB_TRACE_MSG";
+ switch (type) {
+ case SOF_IPC_TRACE_DMA_PARAMS:
+ str2 = "DMA_PARAMS"; break;
+ case SOF_IPC_TRACE_DMA_POSITION:
+ if (!sof_debug_check_flag(SOF_DBG_PRINT_DMA_POSITION_UPDATE_LOGS))
+ return;
+ str2 = "DMA_POSITION"; break;
+ case SOF_IPC_TRACE_DMA_PARAMS_EXT:
+ str2 = "DMA_PARAMS_EXT"; break;
+ case SOF_IPC_TRACE_FILTER_UPDATE:
+ str2 = "FILTER_UPDATE"; break;
+ case SOF_IPC_TRACE_DMA_FREE:
+ str2 = "DMA_FREE"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_TEST_MSG:
+ str = "GLB_TEST_MSG";
+ switch (type) {
+ case SOF_IPC_TEST_IPC_FLOOD:
+ str2 = "IPC_FLOOD"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_DEBUG:
+ str = "GLB_DEBUG";
+ switch (type) {
+ case SOF_IPC_DEBUG_MEM_USAGE:
+ str2 = "MEM_USAGE"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_PROBE:
+ str = "GLB_PROBE";
+ switch (type) {
+ case SOF_IPC_PROBE_INIT:
+ str2 = "INIT"; break;
+ case SOF_IPC_PROBE_DEINIT:
+ str2 = "DEINIT"; break;
+ case SOF_IPC_PROBE_DMA_ADD:
+ str2 = "DMA_ADD"; break;
+ case SOF_IPC_PROBE_DMA_INFO:
+ str2 = "DMA_INFO"; break;
+ case SOF_IPC_PROBE_DMA_REMOVE:
+ str2 = "DMA_REMOVE"; break;
+ case SOF_IPC_PROBE_POINT_ADD:
+ str2 = "POINT_ADD"; break;
+ case SOF_IPC_PROBE_POINT_INFO:
+ str2 = "POINT_INFO"; break;
+ case SOF_IPC_PROBE_POINT_REMOVE:
+ str2 = "POINT_REMOVE"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ default:
+ str = "unknown GLB command"; break;
+ }
+
+ if (str2) {
+ if (is_sof_ipc_stream_position)
+ trace_sof_stream_position_ipc_rx(dev);
+ else
+ dev_dbg(dev, "%s: 0x%x: %s: %s\n", text, cmd, str, str2);
+ } else {
+ dev_dbg(dev, "%s: 0x%x: %s\n", text, cmd, str);
+ }
+}
+#else
+static inline void ipc3_log_header(struct device *dev, u8 *text, u32 cmd)
+{
+ if ((cmd & SOF_GLB_TYPE_MASK) != SOF_IPC_GLB_TRACE_MSG)
+ dev_dbg(dev, "%s: 0x%x\n", text, cmd);
+}
+#endif
+
+static void sof_ipc3_dump_payload(struct snd_sof_dev *sdev,
+ void *ipc_data, size_t size)
+{
+ printk(KERN_DEBUG "Size of payload following the header: %zu\n", size);
+ print_hex_dump_debug("Message payload: ", DUMP_PREFIX_OFFSET,
+ 16, 4, ipc_data, size, false);
+}
+
+static int sof_ipc3_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply *reply;
+ int ret = 0;
+
+ /* get the generic reply */
+ reply = msg->reply_data;
+ snd_sof_dsp_mailbox_read(sdev, sdev->host_box.offset, reply, sizeof(*reply));
+
+ if (reply->error < 0)
+ return reply->error;
+
+ if (!reply->hdr.size) {
+ /* Reply should always be >= sizeof(struct sof_ipc_reply) */
+ if (msg->reply_size)
+ dev_err(sdev->dev,
+ "empty reply received, expected %zu bytes\n",
+ msg->reply_size);
+ else
+ dev_err(sdev->dev, "empty reply received\n");
+
+ return -EINVAL;
+ }
+
+ if (msg->reply_size > 0) {
+ if (reply->hdr.size == msg->reply_size) {
+ ret = 0;
+ } else if (reply->hdr.size < msg->reply_size) {
+ dev_dbg(sdev->dev,
+ "reply size (%u) is less than expected (%zu)\n",
+ reply->hdr.size, msg->reply_size);
+
+ msg->reply_size = reply->hdr.size;
+ ret = 0;
+ } else {
+ dev_err(sdev->dev,
+ "reply size (%u) exceeds the buffer size (%zu)\n",
+ reply->hdr.size, msg->reply_size);
+ ret = -EINVAL;
+ }
+
+ /*
+ * get the full message if reply->hdr.size <= msg->reply_size
+ * and the reply->hdr.size > sizeof(struct sof_ipc_reply)
+ */
+ if (!ret && msg->reply_size > sizeof(*reply))
+ snd_sof_dsp_mailbox_read(sdev, sdev->host_box.offset,
+ msg->reply_data, msg->reply_size);
+ }
+
+ return ret;
+}
+
+/* wait for IPC message reply */
+static int ipc3_wait_tx_done(struct snd_sof_ipc *ipc, void *reply_data)
+{
+ struct snd_sof_ipc_msg *msg = &ipc->msg;
+ struct sof_ipc_cmd_hdr *hdr = msg->msg_data;
+ struct snd_sof_dev *sdev = ipc->sdev;
+ int ret;
+
+ /* wait for DSP IPC completion */
+ ret = wait_event_timeout(msg->waitq, msg->ipc_complete,
+ msecs_to_jiffies(sdev->ipc_timeout));
+
+ if (ret == 0) {
+ dev_err(sdev->dev,
+ "ipc tx timed out for %#x (msg/reply size: %d/%zu)\n",
+ hdr->cmd, hdr->size, msg->reply_size);
+ snd_sof_handle_fw_exception(ipc->sdev, "IPC timeout");
+ ret = -ETIMEDOUT;
+ } else {
+ ret = msg->reply_error;
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "ipc tx error for %#x (msg/reply size: %d/%zu): %d\n",
+ hdr->cmd, hdr->size, msg->reply_size, ret);
+ } else {
+ if (sof_debug_check_flag(SOF_DBG_PRINT_IPC_SUCCESS_LOGS))
+ ipc3_log_header(sdev->dev, "ipc tx succeeded", hdr->cmd);
+ if (reply_data && msg->reply_size)
+ /* copy the data returned from DSP */
+ memcpy(reply_data, msg->reply_data,
+ msg->reply_size);
+ }
+
+ /* re-enable dumps after successful IPC tx */
+ if (sdev->ipc_dump_printed) {
+ sdev->dbg_dump_printed = false;
+ sdev->ipc_dump_printed = false;
+ }
+ }
+
+ return ret;
+}
+
+/* send IPC message from host to DSP */
+static int ipc3_tx_msg_unlocked(struct snd_sof_ipc *ipc,
+ void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes)
+{
+ struct sof_ipc_cmd_hdr *hdr = msg_data;
+ struct snd_sof_dev *sdev = ipc->sdev;
+ int ret;
+
+ ipc3_log_header(sdev->dev, "ipc tx", hdr->cmd);
+
+ ret = sof_ipc_send_msg(sdev, msg_data, msg_bytes, reply_bytes);
+
+ if (ret) {
+ dev_err_ratelimited(sdev->dev,
+ "%s: ipc message send for %#x failed: %d\n",
+ __func__, hdr->cmd, ret);
+ return ret;
+ }
+
+ /* now wait for completion */
+ return ipc3_wait_tx_done(ipc, reply_data);
+}
+
+static int sof_ipc3_tx_msg(struct snd_sof_dev *sdev, void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes, bool no_pm)
+{
+ struct snd_sof_ipc *ipc = sdev->ipc;
+ int ret;
+
+ if (!msg_data || msg_bytes < sizeof(struct sof_ipc_cmd_hdr)) {
+ dev_err_ratelimited(sdev->dev, "No IPC message to send\n");
+ return -EINVAL;
+ }
+
+ if (!no_pm) {
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+
+ /* ensure the DSP is in D0 before sending a new IPC */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_state);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: resuming DSP failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ /* Serialise IPC TX */
+ mutex_lock(&ipc->tx_mutex);
+
+ ret = ipc3_tx_msg_unlocked(ipc, msg_data, msg_bytes, reply_data, reply_bytes);
+
+ if (sof_debug_check_flag(SOF_DBG_DUMP_IPC_MESSAGE_PAYLOAD)) {
+ size_t payload_bytes, header_bytes;
+ char *payload = NULL;
+
+ /* payload is indicated by non zero msg/reply_bytes */
+ if (msg_bytes > sizeof(struct sof_ipc_cmd_hdr)) {
+ payload = msg_data;
+
+ header_bytes = sizeof(struct sof_ipc_cmd_hdr);
+ payload_bytes = msg_bytes - header_bytes;
+ } else if (reply_bytes > sizeof(struct sof_ipc_reply)) {
+ payload = reply_data;
+
+ header_bytes = sizeof(struct sof_ipc_reply);
+ payload_bytes = reply_bytes - header_bytes;
+ }
+
+ if (payload) {
+ payload += header_bytes;
+ sof_ipc3_dump_payload(sdev, payload, payload_bytes);
+ }
+ }
+
+ mutex_unlock(&ipc->tx_mutex);
+
+ return ret;
+}
+
+static int sof_ipc3_set_get_data(struct snd_sof_dev *sdev, void *data, size_t data_bytes,
+ bool set)
+{
+ size_t msg_bytes, hdr_bytes, payload_size, send_bytes;
+ struct sof_ipc_ctrl_data *cdata = data;
+ struct sof_ipc_ctrl_data *cdata_chunk;
+ struct snd_sof_ipc *ipc = sdev->ipc;
+ size_t offset = 0;
+ u8 *src, *dst;
+ u32 num_msg;
+ int ret = 0;
+ int i;
+
+ if (!cdata || data_bytes < sizeof(*cdata))
+ return -EINVAL;
+
+ if ((cdata->rhdr.hdr.cmd & SOF_GLB_TYPE_MASK) != SOF_IPC_GLB_COMP_MSG) {
+ dev_err(sdev->dev, "%s: Not supported message type of %#x\n",
+ __func__, cdata->rhdr.hdr.cmd);
+ return -EINVAL;
+ }
+
+ /* send normal size ipc in one part */
+ if (cdata->rhdr.hdr.size <= ipc->max_payload_size)
+ return sof_ipc3_tx_msg(sdev, cdata, cdata->rhdr.hdr.size,
+ cdata, cdata->rhdr.hdr.size, false);
+
+ cdata_chunk = kzalloc(ipc->max_payload_size, GFP_KERNEL);
+ if (!cdata_chunk)
+ return -ENOMEM;
+
+ switch (cdata->type) {
+ case SOF_CTRL_TYPE_VALUE_CHAN_GET:
+ case SOF_CTRL_TYPE_VALUE_CHAN_SET:
+ hdr_bytes = sizeof(struct sof_ipc_ctrl_data);
+ if (set) {
+ src = (u8 *)cdata->chanv;
+ dst = (u8 *)cdata_chunk->chanv;
+ } else {
+ src = (u8 *)cdata_chunk->chanv;
+ dst = (u8 *)cdata->chanv;
+ }
+ break;
+ case SOF_CTRL_TYPE_DATA_GET:
+ case SOF_CTRL_TYPE_DATA_SET:
+ hdr_bytes = sizeof(struct sof_ipc_ctrl_data) + sizeof(struct sof_abi_hdr);
+ if (set) {
+ src = (u8 *)cdata->data->data;
+ dst = (u8 *)cdata_chunk->data->data;
+ } else {
+ src = (u8 *)cdata_chunk->data->data;
+ dst = (u8 *)cdata->data->data;
+ }
+ break;
+ default:
+ kfree(cdata_chunk);
+ return -EINVAL;
+ }
+
+ msg_bytes = cdata->rhdr.hdr.size - hdr_bytes;
+ payload_size = ipc->max_payload_size - hdr_bytes;
+ num_msg = DIV_ROUND_UP(msg_bytes, payload_size);
+
+ /* copy the header data */
+ memcpy(cdata_chunk, cdata, hdr_bytes);
+
+ /* Serialise IPC TX */
+ mutex_lock(&sdev->ipc->tx_mutex);
+
+ /* copy the payload data in a loop */
+ for (i = 0; i < num_msg; i++) {
+ send_bytes = min(msg_bytes, payload_size);
+ cdata_chunk->num_elems = send_bytes;
+ cdata_chunk->rhdr.hdr.size = hdr_bytes + send_bytes;
+ cdata_chunk->msg_index = i;
+ msg_bytes -= send_bytes;
+ cdata_chunk->elems_remaining = msg_bytes;
+
+ if (set)
+ memcpy(dst, src + offset, send_bytes);
+
+ ret = ipc3_tx_msg_unlocked(sdev->ipc,
+ cdata_chunk, cdata_chunk->rhdr.hdr.size,
+ cdata_chunk, cdata_chunk->rhdr.hdr.size);
+ if (ret < 0)
+ break;
+
+ if (!set)
+ memcpy(dst + offset, src, send_bytes);
+
+ offset += payload_size;
+ }
+
+ if (sof_debug_check_flag(SOF_DBG_DUMP_IPC_MESSAGE_PAYLOAD)) {
+ size_t header_bytes = sizeof(struct sof_ipc_reply);
+ char *payload = (char *)cdata;
+
+ payload += header_bytes;
+ sof_ipc3_dump_payload(sdev, payload, data_bytes - header_bytes);
+ }
+
+ mutex_unlock(&sdev->ipc->tx_mutex);
+
+ kfree(cdata_chunk);
+
+ return ret;
+}
+
+int sof_ipc3_get_ext_windows(struct snd_sof_dev *sdev,
+ const struct sof_ipc_ext_data_hdr *ext_hdr)
+{
+ const struct sof_ipc_window *w =
+ container_of(ext_hdr, struct sof_ipc_window, ext_hdr);
+
+ if (w->num_windows == 0 || w->num_windows > SOF_IPC_MAX_ELEMS)
+ return -EINVAL;
+
+ if (sdev->info_window) {
+ if (memcmp(sdev->info_window, w, ext_hdr->hdr.size)) {
+ dev_err(sdev->dev, "mismatch between window descriptor from extended manifest and mailbox");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* keep a local copy of the data */
+ sdev->info_window = devm_kmemdup(sdev->dev, w, ext_hdr->hdr.size, GFP_KERNEL);
+ if (!sdev->info_window)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int sof_ipc3_get_cc_info(struct snd_sof_dev *sdev,
+ const struct sof_ipc_ext_data_hdr *ext_hdr)
+{
+ int ret;
+
+ const struct sof_ipc_cc_version *cc =
+ container_of(ext_hdr, struct sof_ipc_cc_version, ext_hdr);
+
+ if (sdev->cc_version) {
+ if (memcmp(sdev->cc_version, cc, cc->ext_hdr.hdr.size)) {
+ dev_err(sdev->dev,
+ "Receive diverged cc_version descriptions");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ dev_dbg(sdev->dev,
+ "Firmware info: used compiler %s %d:%d:%d%s used optimization flags %s\n",
+ cc->name, cc->major, cc->minor, cc->micro, cc->desc, cc->optim);
+
+ /* create read-only cc_version debugfs to store compiler version info */
+ /* use local copy of the cc_version to prevent data corruption */
+ if (sdev->first_boot) {
+ sdev->cc_version = devm_kmemdup(sdev->dev, cc, cc->ext_hdr.hdr.size, GFP_KERNEL);
+ if (!sdev->cc_version)
+ return -ENOMEM;
+
+ ret = snd_sof_debugfs_buf_item(sdev, sdev->cc_version,
+ cc->ext_hdr.hdr.size,
+ "cc_version", 0444);
+
+ /* errors are only due to memory allocation, not debugfs */
+ if (ret < 0) {
+ dev_err(sdev->dev, "snd_sof_debugfs_buf_item failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* parse the extended FW boot data structures from FW boot message */
+static int ipc3_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 offset)
+{
+ struct sof_ipc_ext_data_hdr *ext_hdr;
+ void *ext_data;
+ int ret = 0;
+
+ ext_data = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!ext_data)
+ return -ENOMEM;
+
+ /* get first header */
+ snd_sof_dsp_block_read(sdev, SOF_FW_BLK_TYPE_SRAM, offset, ext_data,
+ sizeof(*ext_hdr));
+ ext_hdr = ext_data;
+
+ while (ext_hdr->hdr.cmd == SOF_IPC_FW_READY) {
+ /* read in ext structure */
+ snd_sof_dsp_block_read(sdev, SOF_FW_BLK_TYPE_SRAM,
+ offset + sizeof(*ext_hdr),
+ (void *)((u8 *)ext_data + sizeof(*ext_hdr)),
+ ext_hdr->hdr.size - sizeof(*ext_hdr));
+
+ dev_dbg(sdev->dev, "found ext header type %d size 0x%x\n",
+ ext_hdr->type, ext_hdr->hdr.size);
+
+ /* process structure data */
+ switch (ext_hdr->type) {
+ case SOF_IPC_EXT_WINDOW:
+ ret = sof_ipc3_get_ext_windows(sdev, ext_hdr);
+ break;
+ case SOF_IPC_EXT_CC_INFO:
+ ret = sof_ipc3_get_cc_info(sdev, ext_hdr);
+ break;
+ case SOF_IPC_EXT_UNUSED:
+ case SOF_IPC_EXT_PROBE_INFO:
+ case SOF_IPC_EXT_USER_ABI_INFO:
+ /* They are supported but we don't do anything here */
+ break;
+ default:
+ dev_info(sdev->dev, "unknown ext header type %d size 0x%x\n",
+ ext_hdr->type, ext_hdr->hdr.size);
+ ret = 0;
+ break;
+ }
+
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to parse ext data type %d\n",
+ ext_hdr->type);
+ break;
+ }
+
+ /* move to next header */
+ offset += ext_hdr->hdr.size;
+ snd_sof_dsp_block_read(sdev, SOF_FW_BLK_TYPE_SRAM, offset, ext_data,
+ sizeof(*ext_hdr));
+ ext_hdr = ext_data;
+ }
+
+ kfree(ext_data);
+ return ret;
+}
+
+static void ipc3_get_windows(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_window_elem *elem;
+ u32 outbox_offset = 0;
+ u32 stream_offset = 0;
+ u32 inbox_offset = 0;
+ u32 outbox_size = 0;
+ u32 stream_size = 0;
+ u32 inbox_size = 0;
+ u32 debug_size = 0;
+ u32 debug_offset = 0;
+ int window_offset;
+ int i;
+
+ if (!sdev->info_window) {
+ dev_err(sdev->dev, "%s: No window info present\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < sdev->info_window->num_windows; i++) {
+ elem = &sdev->info_window->window[i];
+
+ window_offset = snd_sof_dsp_get_window_offset(sdev, elem->id);
+ if (window_offset < 0) {
+ dev_warn(sdev->dev, "No offset for window %d\n", elem->id);
+ continue;
+ }
+
+ switch (elem->type) {
+ case SOF_IPC_REGION_UPBOX:
+ inbox_offset = window_offset + elem->offset;
+ inbox_size = elem->size;
+ snd_sof_debugfs_add_region_item(sdev, SOF_FW_BLK_TYPE_SRAM,
+ inbox_offset,
+ elem->size, "inbox",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_DOWNBOX:
+ outbox_offset = window_offset + elem->offset;
+ outbox_size = elem->size;
+ snd_sof_debugfs_add_region_item(sdev, SOF_FW_BLK_TYPE_SRAM,
+ outbox_offset,
+ elem->size, "outbox",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_TRACE:
+ snd_sof_debugfs_add_region_item(sdev, SOF_FW_BLK_TYPE_SRAM,
+ window_offset + elem->offset,
+ elem->size, "etrace",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_DEBUG:
+ debug_offset = window_offset + elem->offset;
+ debug_size = elem->size;
+ snd_sof_debugfs_add_region_item(sdev, SOF_FW_BLK_TYPE_SRAM,
+ window_offset + elem->offset,
+ elem->size, "debug",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_STREAM:
+ stream_offset = window_offset + elem->offset;
+ stream_size = elem->size;
+ snd_sof_debugfs_add_region_item(sdev, SOF_FW_BLK_TYPE_SRAM,
+ stream_offset,
+ elem->size, "stream",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_REGS:
+ snd_sof_debugfs_add_region_item(sdev, SOF_FW_BLK_TYPE_SRAM,
+ window_offset + elem->offset,
+ elem->size, "regs",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_EXCEPTION:
+ sdev->dsp_oops_offset = window_offset + elem->offset;
+ snd_sof_debugfs_add_region_item(sdev, SOF_FW_BLK_TYPE_SRAM,
+ window_offset + elem->offset,
+ elem->size, "exception",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ default:
+ dev_err(sdev->dev, "%s: Illegal window info: %u\n",
+ __func__, elem->type);
+ return;
+ }
+ }
+
+ if (outbox_size == 0 || inbox_size == 0) {
+ dev_err(sdev->dev, "%s: Illegal mailbox window\n", __func__);
+ return;
+ }
+
+ sdev->dsp_box.offset = inbox_offset;
+ sdev->dsp_box.size = inbox_size;
+
+ sdev->host_box.offset = outbox_offset;
+ sdev->host_box.size = outbox_size;
+
+ sdev->stream_box.offset = stream_offset;
+ sdev->stream_box.size = stream_size;
+
+ sdev->debug_box.offset = debug_offset;
+ sdev->debug_box.size = debug_size;
+
+ dev_dbg(sdev->dev, " mailbox upstream 0x%x - size 0x%x\n",
+ inbox_offset, inbox_size);
+ dev_dbg(sdev->dev, " mailbox downstream 0x%x - size 0x%x\n",
+ outbox_offset, outbox_size);
+ dev_dbg(sdev->dev, " stream region 0x%x - size 0x%x\n",
+ stream_offset, stream_size);
+ dev_dbg(sdev->dev, " debug region 0x%x - size 0x%x\n",
+ debug_offset, debug_size);
+}
+
+static int ipc3_init_reply_data_buffer(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = &sdev->ipc->msg;
+
+ msg->reply_data = devm_kzalloc(sdev->dev, SOF_IPC_MSG_MAX_SIZE, GFP_KERNEL);
+ if (!msg->reply_data)
+ return -ENOMEM;
+
+ sdev->ipc->max_payload_size = SOF_IPC_MSG_MAX_SIZE;
+
+ return 0;
+}
+
+int sof_ipc3_validate_fw_version(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
+
+ dev_info(sdev->dev,
+ "Firmware info: version %d:%d:%d-%s\n", v->major, v->minor,
+ v->micro, v->tag);
+ dev_info(sdev->dev,
+ "Firmware: ABI %d:%d:%d Kernel ABI %d:%d:%d\n",
+ SOF_ABI_VERSION_MAJOR(v->abi_version),
+ SOF_ABI_VERSION_MINOR(v->abi_version),
+ SOF_ABI_VERSION_PATCH(v->abi_version),
+ SOF_ABI_MAJOR, SOF_ABI_MINOR, SOF_ABI_PATCH);
+
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION, v->abi_version)) {
+ dev_err(sdev->dev, "incompatible FW ABI version\n");
+ return -EINVAL;
+ }
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_STRICT_ABI_CHECKS) &&
+ SOF_ABI_VERSION_MINOR(v->abi_version) > SOF_ABI_MINOR) {
+ dev_err(sdev->dev, "FW ABI is more recent than kernel\n");
+ return -EINVAL;
+ }
+
+ if (ready->flags & SOF_IPC_INFO_BUILD) {
+ dev_info(sdev->dev,
+ "Firmware debug build %d on %s-%s - options:\n"
+ " GDB: %s\n"
+ " lock debug: %s\n"
+ " lock vdebug: %s\n",
+ v->build, v->date, v->time,
+ (ready->flags & SOF_IPC_INFO_GDB) ?
+ "enabled" : "disabled",
+ (ready->flags & SOF_IPC_INFO_LOCKS) ?
+ "enabled" : "disabled",
+ (ready->flags & SOF_IPC_INFO_LOCKSV) ?
+ "enabled" : "disabled");
+ }
+
+ /* copy the fw_version into debugfs at first boot */
+ memcpy(&sdev->fw_version, v, sizeof(*v));
+
+ return 0;
+}
+
+static int ipc3_fw_ready(struct snd_sof_dev *sdev, u32 cmd)
+{
+ struct sof_ipc_fw_ready *fw_ready = &sdev->fw_ready;
+ int offset;
+ int ret;
+
+ /* mailbox must be on 4k boundary */
+ offset = snd_sof_dsp_get_mailbox_offset(sdev);
+ if (offset < 0) {
+ dev_err(sdev->dev, "%s: no mailbox offset\n", __func__);
+ return offset;
+ }
+
+ dev_dbg(sdev->dev, "DSP is ready 0x%8.8x offset 0x%x\n", cmd, offset);
+
+ /* no need to re-check version/ABI for subsequent boots */
+ if (!sdev->first_boot)
+ return 0;
+
+ /*
+ * copy data from the DSP FW ready offset
+ * Subsequent error handling is not needed for BLK_TYPE_SRAM
+ */
+ ret = snd_sof_dsp_block_read(sdev, SOF_FW_BLK_TYPE_SRAM, offset, fw_ready,
+ sizeof(*fw_ready));
+ if (ret) {
+ dev_err(sdev->dev,
+ "Unable to read fw_ready, read from TYPE_SRAM failed\n");
+ return ret;
+ }
+
+ /* make sure ABI version is compatible */
+ ret = sof_ipc3_validate_fw_version(sdev);
+ if (ret < 0)
+ return ret;
+
+ /* now check for extended data */
+ ipc3_fw_parse_ext_data(sdev, offset + sizeof(struct sof_ipc_fw_ready));
+
+ ipc3_get_windows(sdev);
+
+ return ipc3_init_reply_data_buffer(sdev);
+}
+
+/* IPC stream position. */
+static void ipc3_period_elapsed(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct snd_soc_component *scomp = sdev->component;
+ struct snd_sof_pcm_stream *stream;
+ struct sof_ipc_stream_posn posn;
+ struct snd_sof_pcm *spcm;
+ int direction, ret;
+
+ spcm = snd_sof_find_spcm_comp(scomp, msg_id, &direction);
+ if (!spcm) {
+ dev_err(sdev->dev, "period elapsed for unknown stream, msg_id %d\n",
+ msg_id);
+ return;
+ }
+
+ stream = &spcm->stream[direction];
+ ret = snd_sof_ipc_msg_data(sdev, stream, &posn, sizeof(posn));
+ if (ret < 0) {
+ dev_warn(sdev->dev, "failed to read stream position: %d\n", ret);
+ return;
+ }
+
+ trace_sof_ipc3_period_elapsed_position(sdev, &posn);
+
+ memcpy(&stream->posn, &posn, sizeof(posn));
+
+ if (spcm->pcm.compress)
+ snd_sof_compr_fragment_elapsed(stream->cstream);
+ else if (stream->substream->runtime &&
+ !stream->substream->runtime->no_period_wakeup)
+ /* only inform ALSA for period_wakeup mode */
+ snd_sof_pcm_period_elapsed(stream->substream);
+}
+
+/* DSP notifies host of an XRUN within FW */
+static void ipc3_xrun(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct snd_soc_component *scomp = sdev->component;
+ struct snd_sof_pcm_stream *stream;
+ struct sof_ipc_stream_posn posn;
+ struct snd_sof_pcm *spcm;
+ int direction, ret;
+
+ spcm = snd_sof_find_spcm_comp(scomp, msg_id, &direction);
+ if (!spcm) {
+ dev_err(sdev->dev, "XRUN for unknown stream, msg_id %d\n",
+ msg_id);
+ return;
+ }
+
+ stream = &spcm->stream[direction];
+ ret = snd_sof_ipc_msg_data(sdev, stream, &posn, sizeof(posn));
+ if (ret < 0) {
+ dev_warn(sdev->dev, "failed to read overrun position: %d\n", ret);
+ return;
+ }
+
+ dev_dbg(sdev->dev, "posn XRUN: host %llx comp %d size %d\n",
+ posn.host_posn, posn.xrun_comp_id, posn.xrun_size);
+
+#if defined(CONFIG_SND_SOC_SOF_DEBUG_XRUN_STOP)
+ /* stop PCM on XRUN - used for pipeline debug */
+ memcpy(&stream->posn, &posn, sizeof(posn));
+ snd_pcm_stop_xrun(stream->substream);
+#endif
+}
+
+/* stream notifications from firmware */
+static void ipc3_stream_message(struct snd_sof_dev *sdev, void *msg_buf)
+{
+ struct sof_ipc_cmd_hdr *hdr = msg_buf;
+ u32 msg_type = hdr->cmd & SOF_CMD_TYPE_MASK;
+ u32 msg_id = SOF_IPC_MESSAGE_ID(hdr->cmd);
+
+ switch (msg_type) {
+ case SOF_IPC_STREAM_POSITION:
+ ipc3_period_elapsed(sdev, msg_id);
+ break;
+ case SOF_IPC_STREAM_TRIG_XRUN:
+ ipc3_xrun(sdev, msg_id);
+ break;
+ default:
+ dev_err(sdev->dev, "unhandled stream message %#x\n",
+ msg_id);
+ break;
+ }
+}
+
+/* component notifications from firmware */
+static void ipc3_comp_notification(struct snd_sof_dev *sdev, void *msg_buf)
+{
+ const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
+ struct sof_ipc_cmd_hdr *hdr = msg_buf;
+ u32 msg_type = hdr->cmd & SOF_CMD_TYPE_MASK;
+
+ switch (msg_type) {
+ case SOF_IPC_COMP_GET_VALUE:
+ case SOF_IPC_COMP_GET_DATA:
+ break;
+ default:
+ dev_err(sdev->dev, "unhandled component message %#x\n", msg_type);
+ return;
+ }
+
+ if (tplg_ops->control->update)
+ tplg_ops->control->update(sdev, msg_buf);
+}
+
+static void ipc3_trace_message(struct snd_sof_dev *sdev, void *msg_buf)
+{
+ struct sof_ipc_cmd_hdr *hdr = msg_buf;
+ u32 msg_type = hdr->cmd & SOF_CMD_TYPE_MASK;
+
+ switch (msg_type) {
+ case SOF_IPC_TRACE_DMA_POSITION:
+ ipc3_dtrace_posn_update(sdev, msg_buf);
+ break;
+ default:
+ dev_err(sdev->dev, "unhandled trace message %#x\n", msg_type);
+ break;
+ }
+}
+
+void sof_ipc3_do_rx_work(struct snd_sof_dev *sdev, struct sof_ipc_cmd_hdr *hdr, void *msg_buf)
+{
+ ipc3_rx_callback rx_callback = NULL;
+ u32 cmd;
+ int err;
+
+ ipc3_log_header(sdev->dev, "ipc rx", hdr->cmd);
+
+ if (hdr->size < sizeof(*hdr) || hdr->size > SOF_IPC_MSG_MAX_SIZE) {
+ dev_err(sdev->dev, "The received message size is invalid: %u\n",
+ hdr->size);
+ return;
+ }
+
+ cmd = hdr->cmd & SOF_GLB_TYPE_MASK;
+
+ /* check message type */
+ switch (cmd) {
+ case SOF_IPC_GLB_REPLY:
+ dev_err(sdev->dev, "ipc reply unknown\n");
+ break;
+ case SOF_IPC_FW_READY:
+ /* check for FW boot completion */
+ if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS) {
+ err = ipc3_fw_ready(sdev, cmd);
+ if (err < 0)
+ sof_set_fw_state(sdev, SOF_FW_BOOT_READY_FAILED);
+ else
+ sof_set_fw_state(sdev, SOF_FW_BOOT_READY_OK);
+
+ /* wake up firmware loader */
+ wake_up(&sdev->boot_wait);
+ }
+ break;
+ case SOF_IPC_GLB_COMPOUND:
+ case SOF_IPC_GLB_TPLG_MSG:
+ case SOF_IPC_GLB_PM_MSG:
+ break;
+ case SOF_IPC_GLB_COMP_MSG:
+ rx_callback = ipc3_comp_notification;
+ break;
+ case SOF_IPC_GLB_STREAM_MSG:
+ rx_callback = ipc3_stream_message;
+ break;
+ case SOF_IPC_GLB_TRACE_MSG:
+ rx_callback = ipc3_trace_message;
+ break;
+ default:
+ dev_err(sdev->dev, "%s: Unknown DSP message: 0x%x\n", __func__, cmd);
+ break;
+ }
+
+ /* Call local handler for the message */
+ if (rx_callback)
+ rx_callback(sdev, msg_buf);
+
+ /* Notify registered clients */
+ sof_client_ipc_rx_dispatcher(sdev, msg_buf);
+
+ ipc3_log_header(sdev->dev, "ipc rx done", hdr->cmd);
+}
+EXPORT_SYMBOL(sof_ipc3_do_rx_work);
+
+/* DSP firmware has sent host a message */
+static void sof_ipc3_rx_msg(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_cmd_hdr hdr;
+ void *msg_buf;
+ int err;
+
+ /* read back header */
+ err = snd_sof_ipc_msg_data(sdev, NULL, &hdr, sizeof(hdr));
+ if (err < 0) {
+ dev_warn(sdev->dev, "failed to read IPC header: %d\n", err);
+ return;
+ }
+
+ if (hdr.size < sizeof(hdr)) {
+ dev_err(sdev->dev, "The received message size is invalid\n");
+ return;
+ }
+
+ /* read the full message */
+ msg_buf = kmalloc(hdr.size, GFP_KERNEL);
+ if (!msg_buf)
+ return;
+
+ err = snd_sof_ipc_msg_data(sdev, NULL, msg_buf, hdr.size);
+ if (err < 0) {
+ dev_err(sdev->dev, "%s: Failed to read message: %d\n", __func__, err);
+ kfree(msg_buf);
+ return;
+ }
+
+ sof_ipc3_do_rx_work(sdev, &hdr, msg_buf);
+
+ kfree(msg_buf);
+}
+
+static int sof_ipc3_set_core_state(struct snd_sof_dev *sdev, int core_idx, bool on)
+{
+ struct sof_ipc_pm_core_config core_cfg = {
+ .hdr.size = sizeof(core_cfg),
+ .hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CORE_ENABLE,
+ };
+
+ if (on)
+ core_cfg.enable_mask = sdev->enabled_cores_mask | BIT(core_idx);
+ else
+ core_cfg.enable_mask = sdev->enabled_cores_mask & ~BIT(core_idx);
+
+ return sof_ipc3_tx_msg(sdev, &core_cfg, sizeof(core_cfg), NULL, 0, false);
+}
+
+static int sof_ipc3_ctx_ipc(struct snd_sof_dev *sdev, int cmd)
+{
+ struct sof_ipc_pm_ctx pm_ctx = {
+ .hdr.size = sizeof(pm_ctx),
+ .hdr.cmd = SOF_IPC_GLB_PM_MSG | cmd,
+ };
+
+ /* send ctx save ipc to dsp */
+ return sof_ipc3_tx_msg(sdev, &pm_ctx, sizeof(pm_ctx), NULL, 0, false);
+}
+
+static int sof_ipc3_ctx_save(struct snd_sof_dev *sdev)
+{
+ return sof_ipc3_ctx_ipc(sdev, SOF_IPC_PM_CTX_SAVE);
+}
+
+static int sof_ipc3_ctx_restore(struct snd_sof_dev *sdev)
+{
+ return sof_ipc3_ctx_ipc(sdev, SOF_IPC_PM_CTX_RESTORE);
+}
+
+static int sof_ipc3_set_pm_gate(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_pm_gate pm_gate;
+
+ memset(&pm_gate, 0, sizeof(pm_gate));
+
+ /* configure pm_gate ipc message */
+ pm_gate.hdr.size = sizeof(pm_gate);
+ pm_gate.hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE;
+ pm_gate.flags = flags;
+
+ /* send pm_gate ipc to dsp */
+ return sof_ipc_tx_message_no_pm_no_reply(sdev->ipc, &pm_gate, sizeof(pm_gate));
+}
+
+static const struct sof_ipc_pm_ops ipc3_pm_ops = {
+ .ctx_save = sof_ipc3_ctx_save,
+ .ctx_restore = sof_ipc3_ctx_restore,
+ .set_core_state = sof_ipc3_set_core_state,
+ .set_pm_gate = sof_ipc3_set_pm_gate,
+};
+
+const struct sof_ipc_ops ipc3_ops = {
+ .tplg = &ipc3_tplg_ops,
+ .pm = &ipc3_pm_ops,
+ .pcm = &ipc3_pcm_ops,
+ .fw_loader = &ipc3_loader_ops,
+ .fw_tracing = &ipc3_dtrace_ops,
+
+ .tx_msg = sof_ipc3_tx_msg,
+ .rx_msg = sof_ipc3_rx_msg,
+ .set_get_data = sof_ipc3_set_get_data,
+ .get_reply = sof_ipc3_get_reply,
+};
diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c
new file mode 100644
index 000000000..e4ce1b53f
--- /dev/null
+++ b/sound/soc/sof/ipc4-control.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+//
+
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ipc4-priv.h"
+#include "ipc4-topology.h"
+
+static int sof_ipc4_set_get_kcontrol_data(struct snd_sof_control *scontrol,
+ bool set, bool lock)
+{
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_ops *iops = sdev->ipc->ops;
+ struct sof_ipc4_msg *msg = &cdata->msg;
+ struct snd_sof_widget *swidget;
+ bool widget_found = false;
+ int ret = 0;
+
+ /* find widget associated with the control */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (swidget->comp_id == scontrol->comp_id) {
+ widget_found = true;
+ break;
+ }
+ }
+
+ if (!widget_found) {
+ dev_err(scomp->dev, "Failed to find widget for kcontrol %s\n", scontrol->name);
+ return -ENOENT;
+ }
+
+ if (lock)
+ mutex_lock(&swidget->setup_mutex);
+ else
+ lockdep_assert_held(&swidget->setup_mutex);
+
+ /*
+ * Volatile controls should always be part of static pipelines and the
+ * widget use_count would always be > 0 in this case. For the others,
+ * just return the cached value if the widget is not set up.
+ */
+ if (!swidget->use_count)
+ goto unlock;
+
+ msg->primary &= ~SOF_IPC4_MOD_INSTANCE_MASK;
+ msg->primary |= SOF_IPC4_MOD_INSTANCE(swidget->instance_id);
+
+ ret = iops->set_get_data(sdev, msg, msg->data_size, set);
+ if (!set)
+ goto unlock;
+
+ /* It is a set-data operation, and we have a valid backup that we can restore */
+ if (ret < 0) {
+ if (!scontrol->old_ipc_control_data)
+ goto unlock;
+ /*
+ * Current ipc_control_data is not valid, we use the last known good
+ * configuration
+ */
+ memcpy(scontrol->ipc_control_data, scontrol->old_ipc_control_data,
+ scontrol->max_size);
+ kfree(scontrol->old_ipc_control_data);
+ scontrol->old_ipc_control_data = NULL;
+ /* Send the last known good configuration to firmware */
+ ret = iops->set_get_data(sdev, msg, msg->data_size, set);
+ if (ret < 0)
+ goto unlock;
+ }
+
+unlock:
+ if (lock)
+ mutex_unlock(&swidget->setup_mutex);
+
+ return ret;
+}
+
+static int
+sof_ipc4_set_volume_data(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget,
+ struct snd_sof_control *scontrol, bool lock)
+{
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct sof_ipc4_gain *gain = swidget->private;
+ struct sof_ipc4_msg *msg = &cdata->msg;
+ struct sof_ipc4_gain_params params;
+ bool all_channels_equal = true;
+ u32 value;
+ int ret, i;
+
+ /* check if all channel values are equal */
+ value = cdata->chanv[0].value;
+ for (i = 1; i < scontrol->num_channels; i++) {
+ if (cdata->chanv[i].value != value) {
+ all_channels_equal = false;
+ break;
+ }
+ }
+
+ /*
+ * notify DSP with a single IPC message if all channel values are equal. Otherwise send
+ * a separate IPC for each channel.
+ */
+ for (i = 0; i < scontrol->num_channels; i++) {
+ if (all_channels_equal) {
+ params.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
+ params.init_val = cdata->chanv[0].value;
+ } else {
+ params.channels = cdata->chanv[i].channel;
+ params.init_val = cdata->chanv[i].value;
+ }
+
+ /* set curve type and duration from topology */
+ params.curve_duration_l = gain->data.params.curve_duration_l;
+ params.curve_duration_h = gain->data.params.curve_duration_h;
+ params.curve_type = gain->data.params.curve_type;
+
+ msg->data_ptr = &params;
+ msg->data_size = sizeof(params);
+
+ ret = sof_ipc4_set_get_kcontrol_data(scontrol, true, lock);
+ msg->data_ptr = NULL;
+ msg->data_size = 0;
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to set volume update for %s\n",
+ scontrol->name);
+ return ret;
+ }
+
+ if (all_channels_equal)
+ break;
+ }
+
+ return 0;
+}
+
+static bool sof_ipc4_volume_put(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ unsigned int channels = scontrol->num_channels;
+ struct snd_sof_widget *swidget;
+ bool widget_found = false;
+ bool change = false;
+ unsigned int i;
+ int ret;
+
+ /* update each channel */
+ for (i = 0; i < channels; i++) {
+ u32 value = mixer_to_ipc(ucontrol->value.integer.value[i],
+ scontrol->volume_table, scontrol->max + 1);
+
+ change = change || (value != cdata->chanv[i].value);
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
+ }
+
+ if (!pm_runtime_active(scomp->dev))
+ return change;
+
+ /* find widget associated with the control */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (swidget->comp_id == scontrol->comp_id) {
+ widget_found = true;
+ break;
+ }
+ }
+
+ if (!widget_found) {
+ dev_err(scomp->dev, "Failed to find widget for kcontrol %s\n", scontrol->name);
+ return false;
+ }
+
+ ret = sof_ipc4_set_volume_data(sdev, swidget, scontrol, true);
+ if (ret < 0)
+ return false;
+
+ return change;
+}
+
+static int sof_ipc4_volume_get(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ unsigned int channels = scontrol->num_channels;
+ unsigned int i;
+
+ for (i = 0; i < channels; i++)
+ ucontrol->value.integer.value[i] = ipc_to_mixer(cdata->chanv[i].value,
+ scontrol->volume_table,
+ scontrol->max + 1);
+
+ return 0;
+}
+
+static int sof_ipc4_set_get_bytes_data(struct snd_sof_dev *sdev,
+ struct snd_sof_control *scontrol,
+ bool set, bool lock)
+{
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct sof_abi_hdr *data = cdata->data;
+ struct sof_ipc4_msg *msg = &cdata->msg;
+ int ret = 0;
+
+ /* Send the new data to the firmware only if it is powered up */
+ if (set && !pm_runtime_active(sdev->dev))
+ return 0;
+
+ msg->extension = SOF_IPC4_MOD_EXT_MSG_PARAM_ID(data->type);
+
+ msg->data_ptr = data->data;
+ msg->data_size = data->size;
+
+ ret = sof_ipc4_set_get_kcontrol_data(scontrol, set, lock);
+ if (ret < 0)
+ dev_err(sdev->dev, "Failed to %s for %s\n",
+ set ? "set bytes update" : "get bytes",
+ scontrol->name);
+
+ msg->data_ptr = NULL;
+ msg->data_size = 0;
+
+ return ret;
+}
+
+static int sof_ipc4_bytes_put(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_abi_hdr *data = cdata->data;
+ size_t size;
+
+ if (scontrol->max_size > sizeof(ucontrol->value.bytes.data)) {
+ dev_err_ratelimited(scomp->dev,
+ "data max %zu exceeds ucontrol data array size\n",
+ scontrol->max_size);
+ return -EINVAL;
+ }
+
+ /* scontrol->max_size has been verified to be >= sizeof(struct sof_abi_hdr) */
+ if (data->size > scontrol->max_size - sizeof(*data)) {
+ dev_err_ratelimited(scomp->dev,
+ "data size too big %u bytes max is %zu\n",
+ data->size, scontrol->max_size - sizeof(*data));
+ return -EINVAL;
+ }
+
+ size = data->size + sizeof(*data);
+
+ /* copy from kcontrol */
+ memcpy(data, ucontrol->value.bytes.data, size);
+
+ sof_ipc4_set_get_bytes_data(sdev, scontrol, true, true);
+
+ return 0;
+}
+
+static int sof_ipc4_bytes_get(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_abi_hdr *data = cdata->data;
+ size_t size;
+
+ if (scontrol->max_size > sizeof(ucontrol->value.bytes.data)) {
+ dev_err_ratelimited(scomp->dev, "data max %zu exceeds ucontrol data array size\n",
+ scontrol->max_size);
+ return -EINVAL;
+ }
+
+ if (data->size > scontrol->max_size - sizeof(*data)) {
+ dev_err_ratelimited(scomp->dev,
+ "%u bytes of control data is invalid, max is %zu\n",
+ data->size, scontrol->max_size - sizeof(*data));
+ return -EINVAL;
+ }
+
+ size = data->size + sizeof(*data);
+
+ /* copy back to kcontrol */
+ memcpy(ucontrol->value.bytes.data, data, size);
+
+ return 0;
+}
+
+static int sof_ipc4_bytes_ext_put(struct snd_sof_control *scontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size)
+{
+ struct snd_ctl_tlv __user *tlvd = (struct snd_ctl_tlv __user *)binary_data;
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_abi_hdr *data = cdata->data;
+ struct sof_abi_hdr abi_hdr;
+ struct snd_ctl_tlv header;
+
+ /*
+ * The beginning of bytes data contains a header from where
+ * the length (as bytes) is needed to know the correct copy
+ * length of data from tlvd->tlv.
+ */
+ if (copy_from_user(&header, tlvd, sizeof(struct snd_ctl_tlv)))
+ return -EFAULT;
+
+ /* make sure TLV info is consistent */
+ if (header.length + sizeof(struct snd_ctl_tlv) > size) {
+ dev_err_ratelimited(scomp->dev,
+ "Inconsistent TLV, data %d + header %zu > %d\n",
+ header.length, sizeof(struct snd_ctl_tlv), size);
+ return -EINVAL;
+ }
+
+ /* be->max is coming from topology */
+ if (header.length > scontrol->max_size) {
+ dev_err_ratelimited(scomp->dev,
+ "Bytes data size %d exceeds max %zu\n",
+ header.length, scontrol->max_size);
+ return -EINVAL;
+ }
+
+ /* Verify the ABI header first */
+ if (copy_from_user(&abi_hdr, tlvd->tlv, sizeof(abi_hdr)))
+ return -EFAULT;
+
+ if (abi_hdr.magic != SOF_IPC4_ABI_MAGIC) {
+ dev_err_ratelimited(scomp->dev, "Wrong ABI magic 0x%08x\n",
+ abi_hdr.magic);
+ return -EINVAL;
+ }
+
+ if (abi_hdr.size > scontrol->max_size - sizeof(abi_hdr)) {
+ dev_err_ratelimited(scomp->dev,
+ "%u bytes of control data is invalid, max is %zu\n",
+ abi_hdr.size, scontrol->max_size - sizeof(abi_hdr));
+ return -EINVAL;
+ }
+
+ if (!scontrol->old_ipc_control_data) {
+ /* Create a backup of the current, valid bytes control */
+ scontrol->old_ipc_control_data = kmemdup(scontrol->ipc_control_data,
+ scontrol->max_size, GFP_KERNEL);
+ if (!scontrol->old_ipc_control_data)
+ return -ENOMEM;
+ }
+
+ /* Copy the whole binary data which includes the ABI header and the payload */
+ if (copy_from_user(data, tlvd->tlv, header.length)) {
+ memcpy(scontrol->ipc_control_data, scontrol->old_ipc_control_data,
+ scontrol->max_size);
+ kfree(scontrol->old_ipc_control_data);
+ scontrol->old_ipc_control_data = NULL;
+ return -EFAULT;
+ }
+
+ return sof_ipc4_set_get_bytes_data(sdev, scontrol, true, true);
+}
+
+static int _sof_ipc4_bytes_ext_get(struct snd_sof_control *scontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size, bool from_dsp)
+{
+ struct snd_ctl_tlv __user *tlvd = (struct snd_ctl_tlv __user *)binary_data;
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_abi_hdr *data = cdata->data;
+ struct snd_ctl_tlv header;
+ size_t data_size;
+
+ /*
+ * Decrement the limit by ext bytes header size to ensure the user space
+ * buffer is not exceeded.
+ */
+ if (size < sizeof(struct snd_ctl_tlv))
+ return -ENOSPC;
+
+ size -= sizeof(struct snd_ctl_tlv);
+
+ /* get all the component data from DSP */
+ if (from_dsp) {
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ int ret = sof_ipc4_set_get_bytes_data(sdev, scontrol, false, true);
+
+ if (ret < 0)
+ return ret;
+
+ /* Set the ABI magic (if the control is not initialized) */
+ data->magic = SOF_IPC4_ABI_MAGIC;
+ }
+
+ if (data->size > scontrol->max_size - sizeof(*data)) {
+ dev_err_ratelimited(scomp->dev,
+ "%u bytes of control data is invalid, max is %zu\n",
+ data->size, scontrol->max_size - sizeof(*data));
+ return -EINVAL;
+ }
+
+ data_size = data->size + sizeof(struct sof_abi_hdr);
+
+ /* make sure we don't exceed size provided by user space for data */
+ if (data_size > size)
+ return -ENOSPC;
+
+ header.numid = scontrol->comp_id;
+ header.length = data_size;
+
+ if (copy_to_user(tlvd, &header, sizeof(struct snd_ctl_tlv)))
+ return -EFAULT;
+
+ if (copy_to_user(tlvd->tlv, data, data_size))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sof_ipc4_bytes_ext_get(struct snd_sof_control *scontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size)
+{
+ return _sof_ipc4_bytes_ext_get(scontrol, binary_data, size, false);
+}
+
+static int sof_ipc4_bytes_ext_volatile_get(struct snd_sof_control *scontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size)
+{
+ return _sof_ipc4_bytes_ext_get(scontrol, binary_data, size, true);
+}
+
+/* set up all controls for the widget */
+static int sof_ipc4_widget_kcontrol_setup(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
+{
+ struct snd_sof_control *scontrol;
+ int ret = 0;
+
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
+ if (scontrol->comp_id == swidget->comp_id) {
+ switch (scontrol->info_type) {
+ case SND_SOC_TPLG_CTL_VOLSW:
+ case SND_SOC_TPLG_CTL_VOLSW_SX:
+ case SND_SOC_TPLG_CTL_VOLSW_XR_SX:
+ ret = sof_ipc4_set_volume_data(sdev, swidget,
+ scontrol, false);
+ break;
+ case SND_SOC_TPLG_CTL_BYTES:
+ ret = sof_ipc4_set_get_bytes_data(sdev, scontrol,
+ true, false);
+ break;
+ default:
+ break;
+ }
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "kcontrol %d set up failed for widget %s\n",
+ scontrol->comp_id, swidget->widget->name);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+sof_ipc4_set_up_volume_table(struct snd_sof_control *scontrol, int tlv[SOF_TLV_ITEMS], int size)
+{
+ int i;
+
+ /* init the volume table */
+ scontrol->volume_table = kcalloc(size, sizeof(u32), GFP_KERNEL);
+ if (!scontrol->volume_table)
+ return -ENOMEM;
+
+ /* populate the volume table */
+ for (i = 0; i < size ; i++) {
+ u32 val = vol_compute_gain(i, tlv);
+ u64 q31val = ((u64)val) << 15; /* Can be over Q1.31, need to saturate */
+
+ scontrol->volume_table[i] = q31val > SOF_IPC4_VOL_ZERO_DB ?
+ SOF_IPC4_VOL_ZERO_DB : q31val;
+ }
+
+ return 0;
+}
+
+const struct sof_ipc_tplg_control_ops tplg_ipc4_control_ops = {
+ .volume_put = sof_ipc4_volume_put,
+ .volume_get = sof_ipc4_volume_get,
+ .bytes_put = sof_ipc4_bytes_put,
+ .bytes_get = sof_ipc4_bytes_get,
+ .bytes_ext_put = sof_ipc4_bytes_ext_put,
+ .bytes_ext_get = sof_ipc4_bytes_ext_get,
+ .bytes_ext_volatile_get = sof_ipc4_bytes_ext_volatile_get,
+ .widget_kcontrol_setup = sof_ipc4_widget_kcontrol_setup,
+ .set_up_volume_table = sof_ipc4_set_up_volume_table,
+};
diff --git a/sound/soc/sof/ipc4-fw-reg.h b/sound/soc/sof/ipc4-fw-reg.h
new file mode 100644
index 000000000..7226161e5
--- /dev/null
+++ b/sound/soc/sof/ipc4-fw-reg.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __IPC4_FW_REG_H__
+#define __IPC4_FW_REG_H__
+
+#define SOF_IPC4_INVALID_STREAM_POSITION ULLONG_MAX
+
+/**
+ * struct sof_ipc4_pipeline_registers - Pipeline start and end information in fw
+ * @stream_start_offset: Stream start offset (LPIB) reported by mixin
+ * module allocated on pipeline attached to Host Output Gateway when
+ * first data is being mixed to mixout module. When data is not mixed
+ * (right after creation/after reset) value "(u64)-1" is reported
+ * @stream_end_offset: Stream end offset (LPIB) reported by mixin
+ * module allocated on pipeline attached to Host Output Gateway
+ * during transition from RUNNING to PAUSED. When data is not mixed
+ * (right after creation or after reset) value "(u64)-1" is reported.
+ * When first data is mixed then value "0"is reported.
+ */
+struct sof_ipc4_pipeline_registers {
+ u64 stream_start_offset;
+ u64 stream_end_offset;
+} __packed __aligned(4);
+
+#define SOF_IPC4_PV_MAX_SUPPORTED_CHANNELS 8
+
+/**
+ * struct sof_ipc4_peak_volume_regs - Volume information in fw
+ * @peak_meter: Peak volume value in fw
+ * @current_volume: Current volume value in fw
+ * @target_volume: Target volume value in fw
+ */
+struct sof_ipc4_peak_volume_regs {
+ u32 peak_meter[SOF_IPC4_PV_MAX_SUPPORTED_CHANNELS];
+ u32 current_volume[SOF_IPC4_PV_MAX_SUPPORTED_CHANNELS];
+ u32 target_volume[SOF_IPC4_PV_MAX_SUPPORTED_CHANNELS];
+} __packed __aligned(4);
+
+/**
+ * struct sof_ipc4_llp_reading - Llp information in fw
+ * @llp_l: Lower part of 64-bit LLP
+ * @llp_u: Upper part of 64-bit LLP
+ * @wclk_l: Lower part of 64-bit Wallclock
+ * @wclk_u: Upper part of 64-bit Wallclock
+ */
+struct sof_ipc4_llp_reading {
+ u32 llp_l;
+ u32 llp_u;
+ u32 wclk_l;
+ u32 wclk_u;
+} __packed __aligned(4);
+
+/**
+ * struct of sof_ipc4_llp_reading_extended - Extended llp info
+ * @llp_reading: Llp information in memory window
+ * @tpd_low: Total processed data (low part)
+ * @tpd_high: Total processed data (high part)
+ */
+struct sof_ipc4_llp_reading_extended {
+ struct sof_ipc4_llp_reading llp_reading;
+ u32 tpd_low;
+ u32 tpd_high;
+} __packed __aligned(4);
+
+/**
+ * struct sof_ipc4_llp_reading_slot - Llp slot information in memory window
+ * @node_id: Dai gateway node id
+ * @reading: Llp information in memory window
+ */
+struct sof_ipc4_llp_reading_slot {
+ u32 node_id;
+ struct sof_ipc4_llp_reading reading;
+} __packed __aligned(4);
+
+/* ROM information */
+#define SOF_IPC4_FW_FUSE_VALUE_MASK GENMASK(7, 0)
+#define SOF_IPC4_FW_LOAD_METHOD_MASK BIT(8)
+#define SOF_IPC4_FW_DOWNLINK_IPC_USE_DMA_MASK BIT(9)
+#define SOF_IPC4_FW_LOAD_METHOD_REV_MASK GENMASK(11, 10)
+#define SOF_IPC4_FW_REVISION_MIN_MASK GENMASK(15, 12)
+#define SOF_IPC4_FW_REVISION_MAJ_MASK GENMASK(19, 16)
+#define SOF_IPC4_FW_VERSION_MIN_MASK GENMASK(23, 20)
+#define SOF_IPC4_FW_VERSION_MAJ_MASK GENMASK(27, 24)
+
+/* Number of dsp core supported in FW Regs. */
+#define SOF_IPC4_MAX_SUPPORTED_ADSP_CORES 8
+
+/* Number of host pipeline registers slots in FW Regs. */
+#define SOF_IPC4_MAX_PIPELINE_REG_SLOTS 16
+
+/* Number of PeakVol registers slots in FW Regs. */
+#define SOF_IPC4_MAX_PEAK_VOL_REG_SLOTS 16
+
+/* Number of GPDMA LLP Reading slots in FW Regs. */
+#define SOF_IPC4_MAX_LLP_GPDMA_READING_SLOTS 24
+
+/* Number of Aggregated SNDW Reading slots in FW Regs. */
+#define SOF_IPC4_MAX_LLP_SNDW_READING_SLOTS 15
+
+/* Current ABI version of the Fw registers layout. */
+#define SOF_IPC4_FW_REGS_ABI_VER 1
+
+/**
+ * struct sof_ipc4_fw_registers - FW Registers exposes additional
+ * DSP / FW state information to the driver
+ * @fw_status: Current ROM / FW status
+ * @lec: Last ROM / FW error code
+ * @fps: Current DSP clock status
+ * @lnec: Last Native Error Code(from external library)
+ * @ltr: Copy of LTRC HW register value(FW only)
+ * @rsvd0: Reserved0
+ * @rom_info: ROM info
+ * @abi_ver: Version of the layout, set to the current FW_REGS_ABI_VER
+ * @slave_core_sts: Slave core states
+ * @rsvd2: Reserved2
+ * @pipeline_regs: State of pipelines attached to host output gateways
+ * @peak_vol_regs: State of PeakVol instances indexed by the PeakVol's instance_id
+ * @llp_gpdma_reading_slots: LLP Readings for single link gateways
+ * @llp_sndw_reading_slots: SNDW aggregated link gateways
+ * @llp_evad_reading_slot: LLP Readings for EVAD gateway
+ */
+struct sof_ipc4_fw_registers {
+ u32 fw_status;
+ u32 lec;
+ u32 fps;
+ u32 lnec;
+ u32 ltr;
+ u32 rsvd0;
+ u32 rom_info;
+ u32 abi_ver;
+ u8 slave_core_sts[SOF_IPC4_MAX_SUPPORTED_ADSP_CORES];
+ u32 rsvd2[6];
+
+ struct sof_ipc4_pipeline_registers
+ pipeline_regs[SOF_IPC4_MAX_PIPELINE_REG_SLOTS];
+
+ struct sof_ipc4_peak_volume_regs
+ peak_vol_regs[SOF_IPC4_MAX_PEAK_VOL_REG_SLOTS];
+
+ struct sof_ipc4_llp_reading_slot
+ llp_gpdma_reading_slots[SOF_IPC4_MAX_LLP_GPDMA_READING_SLOTS];
+
+ struct sof_ipc4_llp_reading_slot
+ llp_sndw_reading_slots[SOF_IPC4_MAX_LLP_SNDW_READING_SLOTS];
+
+ struct sof_ipc4_llp_reading_slot llp_evad_reading_slot;
+} __packed __aligned(4);
+
+#endif
diff --git a/sound/soc/sof/ipc4-loader.c b/sound/soc/sof/ipc4-loader.c
new file mode 100644
index 000000000..4d37e89b5
--- /dev/null
+++ b/sound/soc/sof/ipc4-loader.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+
+#include <linux/firmware.h>
+#include <sound/sof/ext_manifest4.h>
+#include <sound/sof/ipc4/header.h>
+#include <trace/events/sof.h>
+#include "ipc4-priv.h"
+#include "sof-audio.h"
+#include "sof-priv.h"
+#include "ops.h"
+
+/* The module ID includes the id of the library it is part of at offset 12 */
+#define SOF_IPC4_MOD_LIB_ID_SHIFT 12
+
+static ssize_t sof_ipc4_fw_parse_ext_man(struct snd_sof_dev *sdev,
+ struct sof_ipc4_fw_library *fw_lib)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ const struct firmware *fw = fw_lib->sof_fw.fw;
+ struct sof_man4_fw_binary_header *fw_header;
+ struct sof_ext_manifest4_hdr *ext_man_hdr;
+ struct sof_man4_module_config *fm_config;
+ struct sof_ipc4_fw_module *fw_module;
+ struct sof_man4_module *fm_entry;
+ ssize_t remaining;
+ u32 fw_hdr_offset;
+ int i;
+
+ if (!ipc4_data) {
+ dev_err(sdev->dev, "%s: ipc4_data is not available\n", __func__);
+ return -EINVAL;
+ }
+
+ remaining = fw->size;
+ if (remaining <= sizeof(*ext_man_hdr)) {
+ dev_err(sdev->dev, "Firmware size is too small: %zu\n", remaining);
+ return -EINVAL;
+ }
+
+ ext_man_hdr = (struct sof_ext_manifest4_hdr *)fw->data;
+
+ /*
+ * At the start of the firmware image we must have an extended manifest.
+ * Verify that the magic number is correct.
+ */
+ if (ext_man_hdr->id != SOF_EXT_MAN4_MAGIC_NUMBER) {
+ dev_err(sdev->dev,
+ "Unexpected extended manifest magic number: %#x\n",
+ ext_man_hdr->id);
+ return -EINVAL;
+ }
+
+ fw_hdr_offset = ipc4_data->manifest_fw_hdr_offset;
+ if (!fw_hdr_offset)
+ return -EINVAL;
+
+ if (remaining <= ext_man_hdr->len + fw_hdr_offset + sizeof(*fw_header)) {
+ dev_err(sdev->dev, "Invalid firmware size %zu, should be at least %zu\n",
+ remaining, ext_man_hdr->len + fw_hdr_offset + sizeof(*fw_header));
+ return -EINVAL;
+ }
+
+ fw_header = (struct sof_man4_fw_binary_header *)
+ (fw->data + ext_man_hdr->len + fw_hdr_offset);
+ remaining -= (ext_man_hdr->len + fw_hdr_offset);
+
+ if (remaining <= fw_header->len) {
+ dev_err(sdev->dev, "Invalid fw_header->len %u\n", fw_header->len);
+ return -EINVAL;
+ }
+
+ dev_info(sdev->dev, "Loaded firmware library: %s, version: %u.%u.%u.%u\n",
+ fw_header->name, fw_header->major_version, fw_header->minor_version,
+ fw_header->hotfix_version, fw_header->build_version);
+ dev_dbg(sdev->dev, "Header length: %u, module count: %u\n",
+ fw_header->len, fw_header->num_module_entries);
+
+ fw_lib->modules = devm_kmalloc_array(sdev->dev, fw_header->num_module_entries,
+ sizeof(*fw_module), GFP_KERNEL);
+ if (!fw_lib->modules)
+ return -ENOMEM;
+
+ fw_lib->name = fw_header->name;
+ fw_lib->num_modules = fw_header->num_module_entries;
+ fw_module = fw_lib->modules;
+
+ fm_entry = (struct sof_man4_module *)((u8 *)fw_header + fw_header->len);
+ remaining -= fw_header->len;
+
+ if (remaining < fw_header->num_module_entries * sizeof(*fm_entry)) {
+ dev_err(sdev->dev, "Invalid num_module_entries %u\n",
+ fw_header->num_module_entries);
+ return -EINVAL;
+ }
+
+ fm_config = (struct sof_man4_module_config *)
+ (fm_entry + fw_header->num_module_entries);
+ remaining -= (fw_header->num_module_entries * sizeof(*fm_entry));
+ for (i = 0; i < fw_header->num_module_entries; i++) {
+ memcpy(&fw_module->man4_module_entry, fm_entry, sizeof(*fm_entry));
+
+ if (fm_entry->cfg_count) {
+ if (remaining < (fm_entry->cfg_offset + fm_entry->cfg_count) *
+ sizeof(*fm_config)) {
+ dev_err(sdev->dev, "Invalid module cfg_offset %u\n",
+ fm_entry->cfg_offset);
+ return -EINVAL;
+ }
+
+ fw_module->fw_mod_cfg = &fm_config[fm_entry->cfg_offset];
+
+ dev_dbg(sdev->dev,
+ "module %s: UUID %pUL cfg_count: %u, bss_size: %#x\n",
+ fm_entry->name, &fm_entry->uuid, fm_entry->cfg_count,
+ fm_config[fm_entry->cfg_offset].is_bytes);
+ } else {
+ dev_dbg(sdev->dev, "module %s: UUID %pUL\n", fm_entry->name,
+ &fm_entry->uuid);
+ }
+
+ fw_module->man4_module_entry.id = i;
+ ida_init(&fw_module->m_ida);
+ fw_module->private = NULL;
+
+ fw_module++;
+ fm_entry++;
+ }
+
+ return ext_man_hdr->len;
+}
+
+static size_t sof_ipc4_fw_parse_basefw_ext_man(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct sof_ipc4_fw_library *fw_lib;
+ ssize_t payload_offset;
+ int ret;
+
+ fw_lib = devm_kzalloc(sdev->dev, sizeof(*fw_lib), GFP_KERNEL);
+ if (!fw_lib)
+ return -ENOMEM;
+
+ fw_lib->sof_fw.fw = sdev->basefw.fw;
+
+ payload_offset = sof_ipc4_fw_parse_ext_man(sdev, fw_lib);
+ if (payload_offset > 0) {
+ fw_lib->sof_fw.payload_offset = payload_offset;
+
+ /* basefw ID is 0 */
+ fw_lib->id = 0;
+ ret = xa_insert(&ipc4_data->fw_lib_xa, 0, fw_lib, GFP_KERNEL);
+ if (ret)
+ return ret;
+ }
+
+ return payload_offset;
+}
+
+static int sof_ipc4_load_library_by_uuid(struct snd_sof_dev *sdev,
+ unsigned long lib_id, const guid_t *uuid)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct sof_ipc4_fw_library *fw_lib;
+ const char *fw_filename;
+ ssize_t payload_offset;
+ int ret, i, err;
+
+ if (!sdev->pdata->fw_lib_prefix) {
+ dev_err(sdev->dev,
+ "Library loading is not supported due to not set library path\n");
+ return -EINVAL;
+ }
+
+ if (!ipc4_data->load_library) {
+ dev_err(sdev->dev, "Library loading is not supported on this platform\n");
+ return -EOPNOTSUPP;
+ }
+
+ fw_lib = devm_kzalloc(sdev->dev, sizeof(*fw_lib), GFP_KERNEL);
+ if (!fw_lib)
+ return -ENOMEM;
+
+ fw_filename = kasprintf(GFP_KERNEL, "%s/%pUL.bin",
+ sdev->pdata->fw_lib_prefix, uuid);
+ if (!fw_filename) {
+ ret = -ENOMEM;
+ goto free_fw_lib;
+ }
+
+ ret = request_firmware(&fw_lib->sof_fw.fw, fw_filename, sdev->dev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Library file '%s' is missing\n", fw_filename);
+ goto free_filename;
+ } else {
+ dev_dbg(sdev->dev, "Library file '%s' loaded\n", fw_filename);
+ }
+
+ payload_offset = sof_ipc4_fw_parse_ext_man(sdev, fw_lib);
+ if (payload_offset <= 0) {
+ if (!payload_offset)
+ ret = -EINVAL;
+ else
+ ret = payload_offset;
+
+ goto release;
+ }
+
+ fw_lib->sof_fw.payload_offset = payload_offset;
+ fw_lib->id = lib_id;
+
+ /* Fix up the module ID numbers within the library */
+ for (i = 0; i < fw_lib->num_modules; i++)
+ fw_lib->modules[i].man4_module_entry.id |= (lib_id << SOF_IPC4_MOD_LIB_ID_SHIFT);
+
+ /*
+ * Make sure that the DSP is booted and stays up while attempting the
+ * loading the library for the first time
+ */
+ ret = pm_runtime_resume_and_get(sdev->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err_ratelimited(sdev->dev, "%s: pm_runtime resume failed: %d\n",
+ __func__, ret);
+ goto release;
+ }
+
+ ret = ipc4_data->load_library(sdev, fw_lib, false);
+
+ pm_runtime_mark_last_busy(sdev->dev);
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err_ratelimited(sdev->dev, "%s: pm_runtime idle failed: %d\n",
+ __func__, err);
+
+ if (ret)
+ goto release;
+
+ ret = xa_insert(&ipc4_data->fw_lib_xa, lib_id, fw_lib, GFP_KERNEL);
+ if (unlikely(ret))
+ goto release;
+
+ kfree(fw_filename);
+
+ return 0;
+
+release:
+ release_firmware(fw_lib->sof_fw.fw);
+ /* Allocated within sof_ipc4_fw_parse_ext_man() */
+ devm_kfree(sdev->dev, fw_lib->modules);
+free_filename:
+ kfree(fw_filename);
+free_fw_lib:
+ devm_kfree(sdev->dev, fw_lib);
+
+ return ret;
+}
+
+struct sof_ipc4_fw_module *sof_ipc4_find_module_by_uuid(struct snd_sof_dev *sdev,
+ const guid_t *uuid)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct sof_ipc4_fw_library *fw_lib;
+ unsigned long lib_id;
+ int i, ret;
+
+ if (guid_is_null(uuid))
+ return NULL;
+
+ xa_for_each(&ipc4_data->fw_lib_xa, lib_id, fw_lib) {
+ for (i = 0; i < fw_lib->num_modules; i++) {
+ if (guid_equal(uuid, &fw_lib->modules[i].man4_module_entry.uuid))
+ return &fw_lib->modules[i];
+ }
+ }
+
+ /*
+ * Do not attempt to load external library in case the maximum number of
+ * firmware libraries have been already loaded
+ */
+ if ((lib_id + 1) == ipc4_data->max_libs_count) {
+ dev_err(sdev->dev,
+ "%s: Maximum allowed number of libraries reached (%u)\n",
+ __func__, ipc4_data->max_libs_count);
+ return NULL;
+ }
+
+ /* The module cannot be found, try to load it as a library */
+ ret = sof_ipc4_load_library_by_uuid(sdev, lib_id + 1, uuid);
+ if (ret)
+ return NULL;
+
+ /* Look for the module in the newly loaded library, it should be available now */
+ xa_for_each_start(&ipc4_data->fw_lib_xa, lib_id, fw_lib, lib_id) {
+ for (i = 0; i < fw_lib->num_modules; i++) {
+ if (guid_equal(uuid, &fw_lib->modules[i].man4_module_entry.uuid))
+ return &fw_lib->modules[i];
+ }
+ }
+
+ return NULL;
+}
+
+static int sof_ipc4_validate_firmware(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ u32 fw_hdr_offset = ipc4_data->manifest_fw_hdr_offset;
+ struct sof_man4_fw_binary_header *fw_header;
+ const struct firmware *fw = sdev->basefw.fw;
+ struct sof_ext_manifest4_hdr *ext_man_hdr;
+
+ ext_man_hdr = (struct sof_ext_manifest4_hdr *)fw->data;
+ fw_header = (struct sof_man4_fw_binary_header *)
+ (fw->data + ext_man_hdr->len + fw_hdr_offset);
+
+ /* TODO: Add firmware verification code here */
+
+ dev_dbg(sdev->dev, "Validated firmware version: %u.%u.%u.%u\n",
+ fw_header->major_version, fw_header->minor_version,
+ fw_header->hotfix_version, fw_header->build_version);
+
+ return 0;
+}
+
+int sof_ipc4_query_fw_configuration(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ const struct sof_ipc_ops *iops = sdev->ipc->ops;
+ struct sof_ipc4_fw_version *fw_ver;
+ struct sof_ipc4_tuple *tuple;
+ struct sof_ipc4_msg msg;
+ size_t offset = 0;
+ int ret;
+
+ /* Get the firmware configuration */
+ msg.primary = SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MOD_ID(SOF_IPC4_MOD_INIT_BASEFW_MOD_ID);
+ msg.primary |= SOF_IPC4_MOD_INSTANCE(SOF_IPC4_MOD_INIT_BASEFW_INSTANCE_ID);
+ msg.extension = SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_FW_PARAM_FW_CONFIG);
+
+ msg.data_size = sdev->ipc->max_payload_size;
+ msg.data_ptr = kzalloc(msg.data_size, GFP_KERNEL);
+ if (!msg.data_ptr)
+ return -ENOMEM;
+
+ ret = iops->set_get_data(sdev, &msg, msg.data_size, false);
+ if (ret)
+ goto out;
+
+ while (offset < msg.data_size) {
+ tuple = (struct sof_ipc4_tuple *)((u8 *)msg.data_ptr + offset);
+
+ switch (tuple->type) {
+ case SOF_IPC4_FW_CFG_FW_VERSION:
+ fw_ver = (struct sof_ipc4_fw_version *)tuple->value;
+
+ dev_info(sdev->dev,
+ "Booted firmware version: %u.%u.%u.%u\n",
+ fw_ver->major, fw_ver->minor, fw_ver->hotfix,
+ fw_ver->build);
+ break;
+ case SOF_IPC4_FW_CFG_DL_MAILBOX_BYTES:
+ trace_sof_ipc4_fw_config(sdev, "DL mailbox size", *tuple->value);
+ break;
+ case SOF_IPC4_FW_CFG_UL_MAILBOX_BYTES:
+ trace_sof_ipc4_fw_config(sdev, "UL mailbox size", *tuple->value);
+ break;
+ case SOF_IPC4_FW_CFG_TRACE_LOG_BYTES:
+ trace_sof_ipc4_fw_config(sdev, "Trace log size", *tuple->value);
+ ipc4_data->mtrace_log_bytes = *tuple->value;
+ break;
+ case SOF_IPC4_FW_CFG_MAX_LIBS_COUNT:
+ trace_sof_ipc4_fw_config(sdev, "maximum number of libraries",
+ *tuple->value);
+ ipc4_data->max_libs_count = *tuple->value;
+ if (!ipc4_data->max_libs_count)
+ ipc4_data->max_libs_count = 1;
+ break;
+ case SOF_IPC4_FW_CFG_MAX_PPL_COUNT:
+ ipc4_data->max_num_pipelines = *tuple->value;
+ trace_sof_ipc4_fw_config(sdev, "Max PPL count %d",
+ ipc4_data->max_num_pipelines);
+ if (ipc4_data->max_num_pipelines <= 0) {
+ dev_err(sdev->dev, "Invalid max_num_pipelines %d",
+ ipc4_data->max_num_pipelines);
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+ offset += sizeof(*tuple) + tuple->size;
+ }
+
+out:
+ kfree(msg.data_ptr);
+
+ return ret;
+}
+
+int sof_ipc4_reload_fw_libraries(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct sof_ipc4_fw_library *fw_lib;
+ unsigned long lib_id;
+ int ret = 0;
+
+ xa_for_each_start(&ipc4_data->fw_lib_xa, lib_id, fw_lib, 1) {
+ ret = ipc4_data->load_library(sdev, fw_lib, true);
+ if (ret) {
+ dev_err(sdev->dev, "%s: Failed to reload library: %s, %d\n",
+ __func__, fw_lib->name, ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * sof_ipc4_update_cpc_from_manifest - Update the cpc in base config from manifest
+ * @sdev: SOF device
+ * @fw_module: pointer struct sof_ipc4_fw_module to parse
+ * @basecfg: Pointer to the base_config to update
+ */
+void sof_ipc4_update_cpc_from_manifest(struct snd_sof_dev *sdev,
+ struct sof_ipc4_fw_module *fw_module,
+ struct sof_ipc4_base_module_cfg *basecfg)
+{
+ const struct sof_man4_module_config *fw_mod_cfg;
+ u32 cpc_pick = 0;
+ u32 max_cpc = 0;
+ const char *msg;
+ int i;
+
+ if (!fw_module->fw_mod_cfg) {
+ msg = "No mod_cfg available for CPC lookup in the firmware file's manifest";
+ goto no_cpc;
+ }
+
+ /*
+ * Find the best matching (highest) CPC value based on the module's
+ * IBS/OBS configuration inferred from the audio format selection.
+ *
+ * The CPC value in each module config entry has been measured and
+ * recorded as a IBS/OBS/CPC triplet and stored in the firmware file's
+ * manifest
+ */
+ fw_mod_cfg = fw_module->fw_mod_cfg;
+ for (i = 0; i < fw_module->man4_module_entry.cfg_count; i++) {
+ if (basecfg->obs == fw_mod_cfg[i].obs &&
+ basecfg->ibs == fw_mod_cfg[i].ibs &&
+ cpc_pick < fw_mod_cfg[i].cpc)
+ cpc_pick = fw_mod_cfg[i].cpc;
+
+ if (max_cpc < fw_mod_cfg[i].cpc)
+ max_cpc = fw_mod_cfg[i].cpc;
+ }
+
+ basecfg->cpc = cpc_pick;
+
+ /* We have a matching configuration for CPC */
+ if (basecfg->cpc)
+ return;
+
+ /*
+ * No matching IBS/OBS found, the firmware manifest is missing
+ * information in the module's module configuration table.
+ */
+ if (!max_cpc)
+ msg = "No CPC value available in the firmware file's manifest";
+ else if (!cpc_pick)
+ msg = "No CPC match in the firmware file's manifest";
+
+no_cpc:
+ dev_dbg(sdev->dev, "%s (UUID: %pUL): %s (ibs/obs: %u/%u)\n",
+ fw_module->man4_module_entry.name,
+ &fw_module->man4_module_entry.uuid, msg, basecfg->ibs,
+ basecfg->obs);
+}
+
+const struct sof_ipc_fw_loader_ops ipc4_loader_ops = {
+ .validate = sof_ipc4_validate_firmware,
+ .parse_ext_manifest = sof_ipc4_fw_parse_basefw_ext_man,
+};
diff --git a/sound/soc/sof/ipc4-mtrace.c b/sound/soc/sof/ipc4-mtrace.c
new file mode 100644
index 000000000..2b4659a17
--- /dev/null
+++ b/sound/soc/sof/ipc4-mtrace.c
@@ -0,0 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+
+#include <linux/debugfs.h>
+#include <linux/sched/signal.h>
+#include <sound/sof/ipc4/header.h>
+#include "sof-priv.h"
+#include "ipc4-priv.h"
+
+/*
+ * debug info window is organized in 16 (equal sized) pages:
+ *
+ * ------------------------
+ * | Page0 - descriptors |
+ * ------------------------
+ * | Page1 - slot0 |
+ * ------------------------
+ * | Page2 - slot1 |
+ * ------------------------
+ * | ... |
+ * ------------------------
+ * | Page14 - slot13 |
+ * ------------------------
+ * | Page15 - slot14 |
+ * ------------------------
+ *
+ * The slot size == page size
+ *
+ * The first page contains descriptors for the remaining 15 cores
+ * The slot descriptor is:
+ * u32 res_id;
+ * u32 type;
+ * u32 vma;
+ *
+ * Log buffer slots have the following layout:
+ * u32 host_read_ptr;
+ * u32 dsp_write_ptr;
+ * u8 buffer[];
+ *
+ * The two pointers are offsets within the buffer.
+ */
+
+#define SOF_MTRACE_DESCRIPTOR_SIZE 12 /* 3 x u32 */
+
+#define FW_EPOCH_DELTA 11644473600LL
+
+#define INVALID_SLOT_OFFSET 0xffffffff
+#define MAX_ALLOWED_LIBRARIES 16
+#define MAX_MTRACE_SLOTS 15
+
+#define SOF_MTRACE_PAGE_SIZE 0x1000
+#define SOF_MTRACE_SLOT_SIZE SOF_MTRACE_PAGE_SIZE
+
+/* debug log slot types */
+#define SOF_MTRACE_SLOT_UNUSED 0x00000000
+#define SOF_MTRACE_SLOT_CRITICAL_LOG 0x54524300 /* byte 0: core ID */
+#define SOF_MTRACE_SLOT_DEBUG_LOG 0x474f4c00 /* byte 0: core ID */
+#define SOF_MTRACE_SLOT_GDB_STUB 0x42444700
+#define SOF_MTRACE_SLOT_TELEMETRY 0x4c455400
+#define SOF_MTRACE_SLOT_BROKEN 0x44414544
+ /* for debug and critical types */
+#define SOF_MTRACE_SLOT_CORE_MASK GENMASK(7, 0)
+#define SOF_MTRACE_SLOT_TYPE_MASK GENMASK(31, 8)
+
+#define DEFAULT_AGING_TIMER_PERIOD_MS 0x100
+#define DEFAULT_FIFO_FULL_TIMER_PERIOD_MS 0x1000
+
+/* ipc4 log level and source definitions for logs_priorities_mask */
+#define SOF_MTRACE_LOG_LEVEL_CRITICAL BIT(0)
+#define SOF_MTRACE_LOG_LEVEL_ERROR BIT(1)
+#define SOF_MTRACE_LOG_LEVEL_WARNING BIT(2)
+#define SOF_MTRACE_LOG_LEVEL_INFO BIT(3)
+#define SOF_MTRACE_LOG_LEVEL_VERBOSE BIT(4)
+#define SOF_MTRACE_LOG_SOURCE_INFRA BIT(5) /* log source 0 */
+#define SOF_MTRACE_LOG_SOURCE_HAL BIT(6)
+#define SOF_MTRACE_LOG_SOURCE_MODULE BIT(7)
+#define SOF_MTRACE_LOG_SOURCE_AUDIO BIT(8)
+#define SOF_MTRACE_LOG_SOURCE_SCHEDULER BIT(9)
+#define SOF_MTRACE_LOG_SOURCE_ULP_INFRA BIT(10)
+#define SOF_MTRACE_LOG_SOURCE_ULP_MODULE BIT(11)
+#define SOF_MTRACE_LOG_SOURCE_VISION BIT(12) /* log source 7 */
+#define DEFAULT_LOGS_PRIORITIES_MASK (SOF_MTRACE_LOG_LEVEL_CRITICAL | \
+ SOF_MTRACE_LOG_LEVEL_ERROR | \
+ SOF_MTRACE_LOG_LEVEL_WARNING | \
+ SOF_MTRACE_LOG_LEVEL_INFO | \
+ SOF_MTRACE_LOG_SOURCE_INFRA | \
+ SOF_MTRACE_LOG_SOURCE_HAL | \
+ SOF_MTRACE_LOG_SOURCE_MODULE | \
+ SOF_MTRACE_LOG_SOURCE_AUDIO)
+
+struct sof_log_state_info {
+ u32 aging_timer_period;
+ u32 fifo_full_timer_period;
+ u32 enable;
+ u32 logs_priorities_mask[MAX_ALLOWED_LIBRARIES];
+} __packed;
+
+enum sof_mtrace_state {
+ SOF_MTRACE_DISABLED,
+ SOF_MTRACE_INITIALIZING,
+ SOF_MTRACE_ENABLED,
+};
+
+struct sof_mtrace_core_data {
+ struct snd_sof_dev *sdev;
+
+ int id;
+ u32 slot_offset;
+ void *log_buffer;
+ struct mutex buffer_lock; /* for log_buffer alloc/free */
+ u32 host_read_ptr;
+ u32 dsp_write_ptr;
+ /* pos update IPC arrived before the slot offset is known, queried */
+ bool delayed_pos_update;
+ wait_queue_head_t trace_sleep;
+};
+
+struct sof_mtrace_priv {
+ struct snd_sof_dev *sdev;
+ enum sof_mtrace_state mtrace_state;
+ struct sof_log_state_info state_info;
+
+ struct sof_mtrace_core_data cores[];
+};
+
+static int sof_ipc4_mtrace_dfs_open(struct inode *inode, struct file *file)
+{
+ struct sof_mtrace_core_data *core_data = inode->i_private;
+ int ret;
+
+ mutex_lock(&core_data->buffer_lock);
+
+ if (core_data->log_buffer) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = debugfs_file_get(file->f_path.dentry);
+ if (unlikely(ret))
+ goto out;
+
+ core_data->log_buffer = kmalloc(SOF_MTRACE_SLOT_SIZE, GFP_KERNEL);
+ if (!core_data->log_buffer) {
+ debugfs_file_put(file->f_path.dentry);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = simple_open(inode, file);
+ if (ret) {
+ kfree(core_data->log_buffer);
+ debugfs_file_put(file->f_path.dentry);
+ }
+
+out:
+ mutex_unlock(&core_data->buffer_lock);
+
+ return ret;
+}
+
+static bool sof_wait_mtrace_avail(struct sof_mtrace_core_data *core_data)
+{
+ wait_queue_entry_t wait;
+
+ /* data immediately available */
+ if (core_data->host_read_ptr != core_data->dsp_write_ptr)
+ return true;
+
+ /* wait for available trace data from FW */
+ init_waitqueue_entry(&wait, current);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&core_data->trace_sleep, &wait);
+
+ if (!signal_pending(current)) {
+ /* set timeout to max value, no error code */
+ schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+ }
+ remove_wait_queue(&core_data->trace_sleep, &wait);
+
+ if (core_data->host_read_ptr != core_data->dsp_write_ptr)
+ return true;
+
+ return false;
+}
+
+static ssize_t sof_ipc4_mtrace_dfs_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct sof_mtrace_core_data *core_data = file->private_data;
+ u32 log_buffer_offset, log_buffer_size, read_ptr, write_ptr;
+ struct snd_sof_dev *sdev = core_data->sdev;
+ struct sof_mtrace_priv *priv = sdev->fw_trace_data;
+ void *log_buffer = core_data->log_buffer;
+ loff_t lpos = *ppos;
+ u32 avail;
+ int ret;
+
+ /* check pos and count */
+ if (lpos < 0)
+ return -EINVAL;
+ if (!count || count < sizeof(avail))
+ return 0;
+
+ /* get available count based on current host offset */
+ if (!sof_wait_mtrace_avail(core_data)) {
+ /* No data available */
+ avail = 0;
+ if (copy_to_user(buffer, &avail, sizeof(avail)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ if (core_data->slot_offset == INVALID_SLOT_OFFSET)
+ return 0;
+
+ /* The log data buffer starts after the two pointer in the slot */
+ log_buffer_offset = core_data->slot_offset + (sizeof(u32) * 2);
+ /* The log data size excludes the pointers */
+ log_buffer_size = SOF_MTRACE_SLOT_SIZE - (sizeof(u32) * 2);
+
+ read_ptr = core_data->host_read_ptr;
+ write_ptr = core_data->dsp_write_ptr;
+
+ if (read_ptr < write_ptr)
+ avail = write_ptr - read_ptr;
+ else
+ avail = log_buffer_size - read_ptr + write_ptr;
+
+ if (!avail)
+ return 0;
+
+ if (avail > log_buffer_size)
+ avail = log_buffer_size;
+
+ /* Need space for the initial u32 of the avail */
+ if (avail > count - sizeof(avail))
+ avail = count - sizeof(avail);
+
+ if (sof_debug_check_flag(SOF_DBG_PRINT_DMA_POSITION_UPDATE_LOGS))
+ dev_dbg(sdev->dev,
+ "core%d, host read: %#x, dsp write: %#x, avail: %#x\n",
+ core_data->id, read_ptr, write_ptr, avail);
+
+ if (read_ptr < write_ptr) {
+ /* Read data between read pointer and write pointer */
+ sof_mailbox_read(sdev, log_buffer_offset + read_ptr, log_buffer, avail);
+ } else {
+ /* read from read pointer to end of the slot */
+ sof_mailbox_read(sdev, log_buffer_offset + read_ptr, log_buffer,
+ avail - write_ptr);
+ /* read from slot start to write pointer */
+ if (write_ptr)
+ sof_mailbox_read(sdev, log_buffer_offset,
+ (u8 *)(log_buffer) + avail - write_ptr,
+ write_ptr);
+ }
+
+ /* first write the number of bytes we have gathered */
+ ret = copy_to_user(buffer, &avail, sizeof(avail));
+ if (ret)
+ return -EFAULT;
+
+ /* Followed by the data itself */
+ ret = copy_to_user(buffer + sizeof(avail), log_buffer, avail);
+ if (ret)
+ return -EFAULT;
+
+ /* Update the host_read_ptr in the slot for this core */
+ read_ptr += avail;
+ if (read_ptr >= log_buffer_size)
+ read_ptr -= log_buffer_size;
+ sof_mailbox_write(sdev, core_data->slot_offset, &read_ptr, sizeof(read_ptr));
+
+ /* Only update the host_read_ptr if mtrace is enabled */
+ if (priv->mtrace_state != SOF_MTRACE_DISABLED)
+ core_data->host_read_ptr = read_ptr;
+
+ /*
+ * Ask for a new buffer from user space for the next chunk, not
+ * streaming due to the heading number of bytes value.
+ */
+ *ppos += count;
+
+ return count;
+}
+
+static int sof_ipc4_mtrace_dfs_release(struct inode *inode, struct file *file)
+{
+ struct sof_mtrace_core_data *core_data = inode->i_private;
+
+ debugfs_file_put(file->f_path.dentry);
+
+ mutex_lock(&core_data->buffer_lock);
+ kfree(core_data->log_buffer);
+ core_data->log_buffer = NULL;
+ mutex_unlock(&core_data->buffer_lock);
+
+ return 0;
+}
+
+static const struct file_operations sof_dfs_mtrace_fops = {
+ .open = sof_ipc4_mtrace_dfs_open,
+ .read = sof_ipc4_mtrace_dfs_read,
+ .llseek = default_llseek,
+ .release = sof_ipc4_mtrace_dfs_release,
+
+ .owner = THIS_MODULE,
+};
+
+static ssize_t sof_ipc4_priority_mask_dfs_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ struct sof_mtrace_priv *priv = file->private_data;
+ int i, ret, offset, remaining;
+ char *buf;
+
+ /*
+ * one entry (14 char + new line = 15):
+ * " 0: 000001ef"
+ *
+ * 16 * 15 + 1 = 241
+ */
+ buf = kzalloc(241, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_ALLOWED_LIBRARIES; i++) {
+ offset = strlen(buf);
+ remaining = 241 - offset;
+ snprintf(buf + offset, remaining, "%2d: 0x%08x\n", i,
+ priv->state_info.logs_priorities_mask[i]);
+ }
+
+ ret = simple_read_from_buffer(to, count, ppos, buf, strlen(buf));
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t sof_ipc4_priority_mask_dfs_write(struct file *file,
+ const char __user *from,
+ size_t count, loff_t *ppos)
+{
+ struct sof_mtrace_priv *priv = file->private_data;
+ unsigned int id;
+ char *buf;
+ u32 mask;
+ int ret;
+
+ /*
+ * To update Nth mask entry, write:
+ * "N,0x1234" or "N,1234" to the debugfs file
+ * The mask will be interpreted as hexadecimal number
+ */
+ buf = memdup_user_nul(from, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = sscanf(buf, "%u,0x%x", &id, &mask);
+ if (ret != 2) {
+ ret = sscanf(buf, "%u,%x", &id, &mask);
+ if (ret != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (id >= MAX_ALLOWED_LIBRARIES) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->state_info.logs_priorities_mask[id] = mask;
+ ret = count;
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations sof_dfs_priority_mask_fops = {
+ .open = simple_open,
+ .read = sof_ipc4_priority_mask_dfs_read,
+ .write = sof_ipc4_priority_mask_dfs_write,
+ .llseek = default_llseek,
+
+ .owner = THIS_MODULE,
+};
+
+static int mtrace_debugfs_create(struct snd_sof_dev *sdev)
+{
+ struct sof_mtrace_priv *priv = sdev->fw_trace_data;
+ struct dentry *dfs_root;
+ char dfs_name[100];
+ int i;
+
+ dfs_root = debugfs_create_dir("mtrace", sdev->debugfs_root);
+ if (IS_ERR_OR_NULL(dfs_root))
+ return 0;
+
+ /* Create files for the logging parameters */
+ debugfs_create_u32("aging_timer_period", 0644, dfs_root,
+ &priv->state_info.aging_timer_period);
+ debugfs_create_u32("fifo_full_timer_period", 0644, dfs_root,
+ &priv->state_info.fifo_full_timer_period);
+ debugfs_create_file("logs_priorities_mask", 0644, dfs_root, priv,
+ &sof_dfs_priority_mask_fops);
+
+ /* Separate log files per core */
+ for (i = 0; i < sdev->num_cores; i++) {
+ snprintf(dfs_name, sizeof(dfs_name), "core%d", i);
+ debugfs_create_file(dfs_name, 0444, dfs_root, &priv->cores[i],
+ &sof_dfs_mtrace_fops);
+ }
+
+ return 0;
+}
+
+static int ipc4_mtrace_enable(struct snd_sof_dev *sdev)
+{
+ struct sof_mtrace_priv *priv = sdev->fw_trace_data;
+ const struct sof_ipc_ops *iops = sdev->ipc->ops;
+ struct sof_ipc4_msg msg;
+ u64 system_time;
+ ktime_t kt;
+ int ret;
+
+ if (priv->mtrace_state != SOF_MTRACE_DISABLED)
+ return 0;
+
+ msg.primary = SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MOD_ID(SOF_IPC4_MOD_INIT_BASEFW_MOD_ID);
+ msg.primary |= SOF_IPC4_MOD_INSTANCE(SOF_IPC4_MOD_INIT_BASEFW_INSTANCE_ID);
+ msg.extension = SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_FW_PARAM_SYSTEM_TIME);
+
+ /* The system time is in usec, UTC, epoch is 1601-01-01 00:00:00 */
+ kt = ktime_add_us(ktime_get_real(), FW_EPOCH_DELTA * USEC_PER_SEC);
+ system_time = ktime_to_us(kt);
+ msg.data_size = sizeof(system_time);
+ msg.data_ptr = &system_time;
+ ret = iops->set_get_data(sdev, &msg, msg.data_size, true);
+ if (ret)
+ return ret;
+
+ msg.extension = SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_FW_PARAM_ENABLE_LOGS);
+
+ priv->state_info.enable = 1;
+
+ msg.data_size = sizeof(priv->state_info);
+ msg.data_ptr = &priv->state_info;
+
+ priv->mtrace_state = SOF_MTRACE_INITIALIZING;
+ ret = iops->set_get_data(sdev, &msg, msg.data_size, true);
+ if (ret) {
+ priv->mtrace_state = SOF_MTRACE_DISABLED;
+ return ret;
+ }
+
+ priv->mtrace_state = SOF_MTRACE_ENABLED;
+
+ return 0;
+}
+
+static void ipc4_mtrace_disable(struct snd_sof_dev *sdev)
+{
+ struct sof_mtrace_priv *priv = sdev->fw_trace_data;
+ const struct sof_ipc_ops *iops = sdev->ipc->ops;
+ struct sof_ipc4_msg msg;
+ int i;
+
+ if (priv->mtrace_state == SOF_MTRACE_DISABLED)
+ return;
+
+ msg.primary = SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MOD_ID(SOF_IPC4_MOD_INIT_BASEFW_MOD_ID);
+ msg.primary |= SOF_IPC4_MOD_INSTANCE(SOF_IPC4_MOD_INIT_BASEFW_INSTANCE_ID);
+ msg.extension = SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_FW_PARAM_ENABLE_LOGS);
+
+ priv->state_info.enable = 0;
+
+ msg.data_size = sizeof(priv->state_info);
+ msg.data_ptr = &priv->state_info;
+ iops->set_get_data(sdev, &msg, msg.data_size, true);
+
+ priv->mtrace_state = SOF_MTRACE_DISABLED;
+
+ for (i = 0; i < sdev->num_cores; i++) {
+ struct sof_mtrace_core_data *core_data = &priv->cores[i];
+
+ core_data->host_read_ptr = 0;
+ core_data->dsp_write_ptr = 0;
+ wake_up(&core_data->trace_sleep);
+ }
+}
+
+/*
+ * Each DSP core logs to a dedicated slot.
+ * Parse the slot descriptors at debug_box offset to find the debug log slots
+ * and map them to cores.
+ * There are 15 slots and therefore 15 descriptors to check (MAX_MTRACE_SLOTS)
+ */
+static void sof_mtrace_find_core_slots(struct snd_sof_dev *sdev)
+{
+ struct sof_mtrace_priv *priv = sdev->fw_trace_data;
+ struct sof_mtrace_core_data *core_data;
+ u32 slot_desc_type_offset, type, core;
+ int i;
+
+ for (i = 0; i < MAX_MTRACE_SLOTS; i++) {
+ /* The type is the second u32 in the slot descriptor */
+ slot_desc_type_offset = sdev->debug_box.offset;
+ slot_desc_type_offset += SOF_MTRACE_DESCRIPTOR_SIZE * i + sizeof(u32);
+ sof_mailbox_read(sdev, slot_desc_type_offset, &type, sizeof(type));
+
+ if ((type & SOF_MTRACE_SLOT_TYPE_MASK) == SOF_MTRACE_SLOT_DEBUG_LOG) {
+ core = type & SOF_MTRACE_SLOT_CORE_MASK;
+
+ if (core >= sdev->num_cores) {
+ dev_dbg(sdev->dev, "core%u is invalid for slot%d\n",
+ core, i);
+ continue;
+ }
+
+ core_data = &priv->cores[core];
+ /*
+ * The area reserved for descriptors have the same size
+ * as a slot.
+ * In other words: slot0 starts at
+ * debug_box + SOF_MTRACE_SLOT_SIZE offset
+ */
+ core_data->slot_offset = sdev->debug_box.offset;
+ core_data->slot_offset += SOF_MTRACE_SLOT_SIZE * (i + 1);
+ dev_dbg(sdev->dev, "slot%d is used for core%u\n", i, core);
+ if (core_data->delayed_pos_update) {
+ sof_ipc4_mtrace_update_pos(sdev, core);
+ core_data->delayed_pos_update = false;
+ }
+ } else if (type) {
+ dev_dbg(sdev->dev, "slot%d is not a log slot (%#x)\n", i, type);
+ }
+ }
+}
+
+static int ipc4_mtrace_init(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct sof_mtrace_priv *priv;
+ int i, ret;
+
+ if (sdev->fw_trace_data) {
+ dev_err(sdev->dev, "fw_trace_data has been already allocated\n");
+ return -EBUSY;
+ }
+
+ if (!ipc4_data->mtrace_log_bytes ||
+ ipc4_data->mtrace_type != SOF_IPC4_MTRACE_INTEL_CAVS_2) {
+ sdev->fw_trace_is_supported = false;
+ return 0;
+ }
+
+ priv = devm_kzalloc(sdev->dev, struct_size(priv, cores, sdev->num_cores),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ sdev->fw_trace_data = priv;
+
+ /* Set initial values for mtrace parameters */
+ priv->state_info.aging_timer_period = DEFAULT_AGING_TIMER_PERIOD_MS;
+ priv->state_info.fifo_full_timer_period = DEFAULT_FIFO_FULL_TIMER_PERIOD_MS;
+ /* Only enable basefw logs initially (index 0 is always basefw) */
+ priv->state_info.logs_priorities_mask[0] = DEFAULT_LOGS_PRIORITIES_MASK;
+
+ for (i = 0; i < sdev->num_cores; i++) {
+ struct sof_mtrace_core_data *core_data = &priv->cores[i];
+
+ init_waitqueue_head(&core_data->trace_sleep);
+ mutex_init(&core_data->buffer_lock);
+ core_data->sdev = sdev;
+ core_data->id = i;
+ }
+
+ ret = ipc4_mtrace_enable(sdev);
+ if (ret) {
+ /*
+ * Mark firmware tracing as not supported and return 0 to not
+ * block the whole audio stack
+ */
+ sdev->fw_trace_is_supported = false;
+ dev_dbg(sdev->dev, "initialization failed, fw tracing is disabled\n");
+ return 0;
+ }
+
+ sof_mtrace_find_core_slots(sdev);
+
+ ret = mtrace_debugfs_create(sdev);
+ if (ret)
+ ipc4_mtrace_disable(sdev);
+
+ return ret;
+}
+
+static void ipc4_mtrace_free(struct snd_sof_dev *sdev)
+{
+ ipc4_mtrace_disable(sdev);
+}
+
+static int sof_ipc4_mtrace_update_pos_all_cores(struct snd_sof_dev *sdev)
+{
+ int i;
+
+ for (i = 0; i < sdev->num_cores; i++)
+ sof_ipc4_mtrace_update_pos(sdev, i);
+
+ return 0;
+}
+
+int sof_ipc4_mtrace_update_pos(struct snd_sof_dev *sdev, int core)
+{
+ struct sof_mtrace_priv *priv = sdev->fw_trace_data;
+ struct sof_mtrace_core_data *core_data;
+
+ if (!sdev->fw_trace_is_supported ||
+ priv->mtrace_state == SOF_MTRACE_DISABLED)
+ return 0;
+
+ if (core >= sdev->num_cores)
+ return -EINVAL;
+
+ core_data = &priv->cores[core];
+
+ if (core_data->slot_offset == INVALID_SLOT_OFFSET) {
+ core_data->delayed_pos_update = true;
+ return 0;
+ }
+
+ /* Read out the dsp_write_ptr from the slot for this core */
+ sof_mailbox_read(sdev, core_data->slot_offset + sizeof(u32),
+ &core_data->dsp_write_ptr, 4);
+ core_data->dsp_write_ptr -= core_data->dsp_write_ptr % 4;
+
+ if (sof_debug_check_flag(SOF_DBG_PRINT_DMA_POSITION_UPDATE_LOGS))
+ dev_dbg(sdev->dev, "core%d, host read: %#x, dsp write: %#x",
+ core, core_data->host_read_ptr, core_data->dsp_write_ptr);
+
+ wake_up(&core_data->trace_sleep);
+
+ return 0;
+}
+
+static void ipc4_mtrace_fw_crashed(struct snd_sof_dev *sdev)
+{
+ /*
+ * The DSP might not be able to send SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS
+ * messages anymore, so check the log buffer status on all
+ * cores and process any pending messages.
+ */
+ sof_ipc4_mtrace_update_pos_all_cores(sdev);
+}
+
+static int ipc4_mtrace_resume(struct snd_sof_dev *sdev)
+{
+ return ipc4_mtrace_enable(sdev);
+}
+
+static void ipc4_mtrace_suspend(struct snd_sof_dev *sdev, pm_message_t pm_state)
+{
+ ipc4_mtrace_disable(sdev);
+}
+
+const struct sof_ipc_fw_tracing_ops ipc4_mtrace_ops = {
+ .init = ipc4_mtrace_init,
+ .free = ipc4_mtrace_free,
+ .fw_crashed = ipc4_mtrace_fw_crashed,
+ .suspend = ipc4_mtrace_suspend,
+ .resume = ipc4_mtrace_resume,
+};
diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
new file mode 100644
index 000000000..db19cd03e
--- /dev/null
+++ b/sound/soc/sof/ipc4-pcm.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+
+#include <sound/pcm_params.h>
+#include <sound/sof/ipc4/header.h>
+#include "sof-audio.h"
+#include "sof-priv.h"
+#include "ops.h"
+#include "ipc4-priv.h"
+#include "ipc4-topology.h"
+#include "ipc4-fw-reg.h"
+
+static int sof_ipc4_set_multi_pipeline_state(struct snd_sof_dev *sdev, u32 state,
+ struct ipc4_pipeline_set_state_data *trigger_list)
+{
+ struct sof_ipc4_msg msg = {{ 0 }};
+ u32 primary, ipc_size;
+
+ /* trigger a single pipeline */
+ if (trigger_list->count == 1)
+ return sof_ipc4_set_pipeline_state(sdev, trigger_list->pipeline_instance_ids[0],
+ state);
+
+ primary = state;
+ primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_SET_PIPELINE_STATE);
+ primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
+ msg.primary = primary;
+
+ /* trigger multiple pipelines with a single IPC */
+ msg.extension = SOF_IPC4_GLB_PIPE_STATE_EXT_MULTI;
+
+ /* ipc_size includes the count and the pipeline IDs for the number of pipelines */
+ ipc_size = sizeof(u32) * (trigger_list->count + 1);
+ msg.data_size = ipc_size;
+ msg.data_ptr = trigger_list;
+
+ return sof_ipc_tx_message_no_reply(sdev->ipc, &msg, ipc_size);
+}
+
+int sof_ipc4_set_pipeline_state(struct snd_sof_dev *sdev, u32 instance_id, u32 state)
+{
+ struct sof_ipc4_msg msg = {{ 0 }};
+ u32 primary;
+
+ dev_dbg(sdev->dev, "ipc4 set pipeline instance %d state %d", instance_id, state);
+
+ primary = state;
+ primary |= SOF_IPC4_GLB_PIPE_STATE_ID(instance_id);
+ primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_SET_PIPELINE_STATE);
+ primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
+
+ msg.primary = primary;
+
+ return sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
+}
+EXPORT_SYMBOL(sof_ipc4_set_pipeline_state);
+
+static void
+sof_ipc4_add_pipeline_to_trigger_list(struct snd_sof_dev *sdev, int state,
+ struct snd_sof_pipeline *spipe,
+ struct ipc4_pipeline_set_state_data *trigger_list)
+{
+ struct snd_sof_widget *pipe_widget = spipe->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+
+ if (pipeline->skip_during_fe_trigger && state != SOF_IPC4_PIPE_RESET)
+ return;
+
+ switch (state) {
+ case SOF_IPC4_PIPE_RUNNING:
+ /*
+ * Trigger pipeline if all PCMs containing it are paused or if it is RUNNING
+ * for the first time
+ */
+ if (spipe->started_count == spipe->paused_count)
+ trigger_list->pipeline_instance_ids[trigger_list->count++] =
+ pipe_widget->instance_id;
+ break;
+ case SOF_IPC4_PIPE_RESET:
+ /* RESET if the pipeline is neither running nor paused */
+ if (!spipe->started_count && !spipe->paused_count)
+ trigger_list->pipeline_instance_ids[trigger_list->count++] =
+ pipe_widget->instance_id;
+ break;
+ case SOF_IPC4_PIPE_PAUSED:
+ /* Pause the pipeline only when its started_count is 1 more than paused_count */
+ if (spipe->paused_count == (spipe->started_count - 1))
+ trigger_list->pipeline_instance_ids[trigger_list->count++] =
+ pipe_widget->instance_id;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+sof_ipc4_update_pipeline_state(struct snd_sof_dev *sdev, int state, int cmd,
+ struct snd_sof_pipeline *spipe,
+ struct ipc4_pipeline_set_state_data *trigger_list)
+{
+ struct snd_sof_widget *pipe_widget = spipe->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+ int i;
+
+ if (pipeline->skip_during_fe_trigger && state != SOF_IPC4_PIPE_RESET)
+ return;
+
+ /* set state for pipeline if it was just triggered */
+ for (i = 0; i < trigger_list->count; i++) {
+ if (trigger_list->pipeline_instance_ids[i] == pipe_widget->instance_id) {
+ pipeline->state = state;
+ break;
+ }
+ }
+
+ switch (state) {
+ case SOF_IPC4_PIPE_PAUSED:
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ /*
+ * increment paused_count if the PAUSED is the final state during
+ * the PAUSE trigger
+ */
+ spipe->paused_count++;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ /*
+ * decrement started_count if PAUSED is the final state during the
+ * STOP trigger
+ */
+ spipe->started_count--;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SOF_IPC4_PIPE_RUNNING:
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ /* decrement paused_count for RELEASE */
+ spipe->paused_count--;
+ break;
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ /* increment started_count for START/RESUME */
+ spipe->started_count++;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * The picture below represents the pipeline state machine wrt PCM actions corresponding to the
+ * triggers and ioctls
+ * +---------------+
+ * | |
+ * | INIT |
+ * | |
+ * +-------+-------+
+ * |
+ * |
+ * | START
+ * |
+ * |
+ * +----------------+ +------v-------+ +-------------+
+ * | | START | | HW_FREE | |
+ * | RUNNING <-------------+ PAUSED +--------------> + RESET |
+ * | | PAUSE | | | |
+ * +------+---------+ RELEASE +---------+----+ +-------------+
+ * | ^
+ * | |
+ * | |
+ * | |
+ * | PAUSE |
+ * +---------------------------------+
+ * STOP/SUSPEND
+ *
+ * Note that during system suspend, the suspend trigger is followed by a hw_free in
+ * sof_pcm_trigger(). So, the final state during suspend would be RESET.
+ * Also, since the SOF driver doesn't support full resume, streams would be restarted with the
+ * prepare ioctl before the START trigger.
+ */
+
+/*
+ * Chained DMA is a special case where there is no processing on
+ * DSP. The samples are just moved over by host side DMA to a single
+ * buffer on DSP and directly from there to link DMA. However, the
+ * model on SOF driver has two notional pipelines, one at host DAI,
+ * and another at link DAI. They both shall have the use_chain_dma
+ * attribute.
+ */
+
+static int sof_ipc4_chain_dma_trigger(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream_pipeline_list *pipeline_list,
+ int state, int cmd)
+{
+ bool allocate, enable, set_fifo_size;
+ struct sof_ipc4_msg msg = {{ 0 }};
+ int i;
+
+ switch (state) {
+ case SOF_IPC4_PIPE_RUNNING: /* Allocate and start chained dma */
+ allocate = true;
+ enable = true;
+ /*
+ * SOF assumes creation of a new stream from the presence of fifo_size
+ * in the message, so we must leave it out in pause release case.
+ */
+ if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE)
+ set_fifo_size = false;
+ else
+ set_fifo_size = true;
+ break;
+ case SOF_IPC4_PIPE_PAUSED: /* Disable chained DMA. */
+ allocate = true;
+ enable = false;
+ set_fifo_size = false;
+ break;
+ case SOF_IPC4_PIPE_RESET: /* Disable and free chained DMA. */
+ allocate = false;
+ enable = false;
+ set_fifo_size = false;
+ break;
+ default:
+ dev_err(sdev->dev, "Unexpected state %d", state);
+ return -EINVAL;
+ }
+
+ msg.primary = SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_CHAIN_DMA);
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
+
+ /*
+ * To set-up the DMA chain, the host DMA ID and SCS setting
+ * are retrieved from the host pipeline configuration. Likewise
+ * the link DMA ID and fifo_size are retrieved from the link
+ * pipeline configuration.
+ */
+ for (i = 0; i < pipeline_list->count; i++) {
+ struct snd_sof_pipeline *spipe = pipeline_list->pipelines[i];
+ struct snd_sof_widget *pipe_widget = spipe->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+
+ if (!pipeline->use_chain_dma) {
+ dev_err(sdev->dev,
+ "All pipelines in chained DMA stream should have use_chain_dma attribute set.");
+ return -EINVAL;
+ }
+
+ msg.primary |= pipeline->msg.primary;
+
+ /* Add fifo_size (actually DMA buffer size) field to the message */
+ if (set_fifo_size)
+ msg.extension |= pipeline->msg.extension;
+ }
+
+ if (allocate)
+ msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_ALLOCATE_MASK;
+
+ if (enable)
+ msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_ENABLE_MASK;
+
+ return sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
+}
+
+static int sof_ipc4_trigger_pipelines(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int state, int cmd)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_pcm_stream_pipeline_list *pipeline_list;
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct ipc4_pipeline_set_state_data *trigger_list;
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+ struct snd_sof_pipeline *spipe;
+ struct snd_sof_pcm *spcm;
+ int ret;
+ int i;
+
+ dev_dbg(sdev->dev, "trigger cmd: %d state: %d\n", cmd, state);
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ pipeline_list = &spcm->stream[substream->stream].pipeline_list;
+
+ /* nothing to trigger if the list is empty */
+ if (!pipeline_list->pipelines || !pipeline_list->count)
+ return 0;
+
+ spipe = pipeline_list->pipelines[0];
+ pipe_widget = spipe->pipe_widget;
+ pipeline = pipe_widget->private;
+
+ /*
+ * If use_chain_dma attribute is set we proceed to chained DMA
+ * trigger function that handles the rest for the substream.
+ */
+ if (pipeline->use_chain_dma)
+ return sof_ipc4_chain_dma_trigger(sdev, pipeline_list, state, cmd);
+
+ /* allocate memory for the pipeline data */
+ trigger_list = kzalloc(struct_size(trigger_list, pipeline_instance_ids,
+ pipeline_list->count), GFP_KERNEL);
+ if (!trigger_list)
+ return -ENOMEM;
+
+ mutex_lock(&ipc4_data->pipeline_state_mutex);
+
+ /*
+ * IPC4 requires pipelines to be triggered in order starting at the sink and
+ * walking all the way to the source. So traverse the pipeline_list in the order
+ * sink->source when starting PCM's and in the reverse order to pause/stop PCM's.
+ * Skip the pipelines that have their skip_during_fe_trigger flag set. If there is a fork
+ * in the pipeline, the order of triggering between the left/right paths will be
+ * indeterministic. But the sink->source trigger order sink->source would still be
+ * guaranteed for each fork independently.
+ */
+ if (state == SOF_IPC4_PIPE_RUNNING || state == SOF_IPC4_PIPE_RESET)
+ for (i = pipeline_list->count - 1; i >= 0; i--) {
+ spipe = pipeline_list->pipelines[i];
+ sof_ipc4_add_pipeline_to_trigger_list(sdev, state, spipe, trigger_list);
+ }
+ else
+ for (i = 0; i < pipeline_list->count; i++) {
+ spipe = pipeline_list->pipelines[i];
+ sof_ipc4_add_pipeline_to_trigger_list(sdev, state, spipe, trigger_list);
+ }
+
+ /* return if all pipelines are in the requested state already */
+ if (!trigger_list->count) {
+ ret = 0;
+ goto free;
+ }
+
+ /* no need to pause before reset or before pause release */
+ if (state == SOF_IPC4_PIPE_RESET || cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE)
+ goto skip_pause_transition;
+
+ /*
+ * set paused state for pipelines if the final state is PAUSED or when the pipeline
+ * is set to RUNNING for the first time after the PCM is started.
+ */
+ ret = sof_ipc4_set_multi_pipeline_state(sdev, SOF_IPC4_PIPE_PAUSED, trigger_list);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to pause all pipelines\n");
+ goto free;
+ }
+
+ /* update PAUSED state for all pipelines just triggered */
+ for (i = 0; i < pipeline_list->count ; i++) {
+ spipe = pipeline_list->pipelines[i];
+ sof_ipc4_update_pipeline_state(sdev, SOF_IPC4_PIPE_PAUSED, cmd, spipe,
+ trigger_list);
+ }
+
+ /* return if this is the final state */
+ if (state == SOF_IPC4_PIPE_PAUSED)
+ goto free;
+skip_pause_transition:
+ /* else set the RUNNING/RESET state in the DSP */
+ ret = sof_ipc4_set_multi_pipeline_state(sdev, state, trigger_list);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to set final state %d for all pipelines\n", state);
+ goto free;
+ }
+
+ /* update RUNNING/RESET state for all pipelines that were just triggered */
+ for (i = 0; i < pipeline_list->count; i++) {
+ spipe = pipeline_list->pipelines[i];
+ sof_ipc4_update_pipeline_state(sdev, state, cmd, spipe, trigger_list);
+ }
+
+free:
+ mutex_unlock(&ipc4_data->pipeline_state_mutex);
+ kfree(trigger_list);
+ return ret;
+}
+
+static int sof_ipc4_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ int state;
+
+ /* determine the pipeline state */
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ state = SOF_IPC4_PIPE_PAUSED;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_START:
+ state = SOF_IPC4_PIPE_RUNNING;
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ state = SOF_IPC4_PIPE_PAUSED;
+ break;
+ default:
+ dev_err(component->dev, "%s: unhandled trigger cmd %d\n", __func__, cmd);
+ return -EINVAL;
+ }
+
+ /* set the pipeline state */
+ return sof_ipc4_trigger_pipelines(component, substream, state, cmd);
+}
+
+static int sof_ipc4_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ /* command is not relevant with RESET, so just pass 0 */
+ return sof_ipc4_trigger_pipelines(component, substream, SOF_IPC4_PIPE_RESET, 0);
+}
+
+static void ipc4_ssp_dai_config_pcm_params_match(struct snd_sof_dev *sdev, const char *link_name,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_sof_dai_link *slink;
+ struct snd_sof_dai *dai;
+ bool dai_link_found = false;
+ int i;
+
+ list_for_each_entry(slink, &sdev->dai_link_list, list) {
+ if (!strcmp(slink->link->name, link_name)) {
+ dai_link_found = true;
+ break;
+ }
+ }
+
+ if (!dai_link_found)
+ return;
+
+ for (i = 0; i < slink->num_hw_configs; i++) {
+ struct snd_soc_tplg_hw_config *hw_config = &slink->hw_configs[i];
+
+ if (params_rate(params) == le32_to_cpu(hw_config->fsync_rate)) {
+ /* set current config for all DAI's with matching name */
+ list_for_each_entry(dai, &sdev->dai_list, list)
+ if (!strcmp(slink->link->name, dai->name))
+ dai->current_config = le32_to_cpu(hw_config->id);
+ break;
+ }
+ }
+}
+
+/*
+ * Fixup DAI link parameters for sampling rate based on
+ * DAI copier configuration.
+ */
+static int sof_ipc4_pcm_dai_link_fixup_rate(struct snd_sof_dev *sdev,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc4_copier *ipc4_copier)
+{
+ struct sof_ipc4_pin_format *pin_fmts = ipc4_copier->available_fmt.input_pin_fmts;
+ struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ int num_input_formats = ipc4_copier->available_fmt.num_input_formats;
+ unsigned int fe_rate = params_rate(params);
+ bool fe_be_rate_match = false;
+ bool single_be_rate = true;
+ unsigned int be_rate;
+ int i;
+
+ /*
+ * Copier does not change sampling rate, so we
+ * need to only consider the input pin information.
+ */
+ for (i = 0; i < num_input_formats; i++) {
+ unsigned int val = pin_fmts[i].audio_fmt.sampling_frequency;
+
+ if (i == 0)
+ be_rate = val;
+ else if (val != be_rate)
+ single_be_rate = false;
+
+ if (val == fe_rate) {
+ fe_be_rate_match = true;
+ break;
+ }
+ }
+
+ /*
+ * If rate is different than FE rate, topology must
+ * contain an SRC. But we do require topology to
+ * define a single rate in the DAI copier config in
+ * this case (FE rate may be variable).
+ */
+ if (!fe_be_rate_match) {
+ if (!single_be_rate) {
+ dev_err(sdev->dev, "Unable to select sampling rate for DAI link\n");
+ return -EINVAL;
+ }
+
+ rate->min = be_rate;
+ rate->max = rate->min;
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
+ struct snd_sof_dai *dai = snd_sof_find_dai(component, rtd->dai_link->name);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct sof_ipc4_copier *ipc4_copier;
+ bool use_chain_dma = false;
+ int dir;
+
+ if (!dai) {
+ dev_err(component->dev, "%s: No DAI found with name %s\n", __func__,
+ rtd->dai_link->name);
+ return -EINVAL;
+ }
+
+ ipc4_copier = dai->private;
+ if (!ipc4_copier) {
+ dev_err(component->dev, "%s: No private data found for DAI %s\n",
+ __func__, rtd->dai_link->name);
+ return -EINVAL;
+ }
+
+ for_each_pcm_streams(dir) {
+ struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, dir);
+
+ if (w) {
+ struct snd_sof_widget *swidget = w->dobj.private;
+ struct snd_sof_widget *pipe_widget = swidget->spipe->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+
+ if (pipeline->use_chain_dma)
+ use_chain_dma = true;
+ }
+ }
+
+ /* Chain DMA does not use copiers, so no fixup needed */
+ if (!use_chain_dma) {
+ int ret = sof_ipc4_pcm_dai_link_fixup_rate(sdev, params, ipc4_copier);
+
+ if (ret)
+ return ret;
+ }
+
+ switch (ipc4_copier->dai_type) {
+ case SOF_DAI_INTEL_SSP:
+ ipc4_ssp_dai_config_pcm_params_match(sdev, (char *)rtd->dai_link->name, params);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void sof_ipc4_pcm_free(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm)
+{
+ struct snd_sof_pcm_stream_pipeline_list *pipeline_list;
+ int stream;
+
+ for_each_pcm_streams(stream) {
+ pipeline_list = &spcm->stream[stream].pipeline_list;
+ kfree(pipeline_list->pipelines);
+ pipeline_list->pipelines = NULL;
+ kfree(spcm->stream[stream].private);
+ spcm->stream[stream].private = NULL;
+ }
+}
+
+static int sof_ipc4_pcm_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm)
+{
+ struct snd_sof_pcm_stream_pipeline_list *pipeline_list;
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct sof_ipc4_timestamp_info *stream_info;
+ bool support_info = true;
+ u32 abi_version;
+ u32 abi_offset;
+ int stream;
+
+ abi_offset = offsetof(struct sof_ipc4_fw_registers, abi_ver);
+ sof_mailbox_read(sdev, sdev->fw_info_box.offset + abi_offset, &abi_version,
+ sizeof(abi_version));
+
+ if (abi_version < SOF_IPC4_FW_REGS_ABI_VER)
+ support_info = false;
+
+ for_each_pcm_streams(stream) {
+ pipeline_list = &spcm->stream[stream].pipeline_list;
+
+ /* allocate memory for max number of pipeline IDs */
+ pipeline_list->pipelines = kcalloc(ipc4_data->max_num_pipelines,
+ sizeof(struct snd_sof_widget *), GFP_KERNEL);
+ if (!pipeline_list->pipelines) {
+ sof_ipc4_pcm_free(sdev, spcm);
+ return -ENOMEM;
+ }
+
+ if (!support_info)
+ continue;
+
+ stream_info = kzalloc(sizeof(*stream_info), GFP_KERNEL);
+ if (!stream_info) {
+ sof_ipc4_pcm_free(sdev, spcm);
+ return -ENOMEM;
+ }
+
+ spcm->stream[stream].private = stream_info;
+ }
+
+ return 0;
+}
+
+static void sof_ipc4_build_time_info(struct snd_sof_dev *sdev, struct snd_sof_pcm_stream *spcm)
+{
+ struct sof_ipc4_copier *host_copier = NULL;
+ struct sof_ipc4_copier *dai_copier = NULL;
+ struct sof_ipc4_llp_reading_slot llp_slot;
+ struct sof_ipc4_timestamp_info *info;
+ struct snd_soc_dapm_widget *widget;
+ struct snd_sof_dai *dai;
+ int i;
+
+ /* find host & dai to locate info in memory window */
+ for_each_dapm_widgets(spcm->list, i, widget) {
+ struct snd_sof_widget *swidget = widget->dobj.private;
+
+ if (!swidget)
+ continue;
+
+ if (WIDGET_IS_AIF(swidget->widget->id)) {
+ host_copier = swidget->private;
+ } else if (WIDGET_IS_DAI(swidget->widget->id)) {
+ dai = swidget->private;
+ dai_copier = dai->private;
+ }
+ }
+
+ /* both host and dai copier must be valid for time_info */
+ if (!host_copier || !dai_copier) {
+ dev_err(sdev->dev, "host or dai copier are not found\n");
+ return;
+ }
+
+ info = spcm->private;
+ info->host_copier = host_copier;
+ info->dai_copier = dai_copier;
+ info->llp_offset = offsetof(struct sof_ipc4_fw_registers, llp_gpdma_reading_slots) +
+ sdev->fw_info_box.offset;
+
+ /* find llp slot used by current dai */
+ for (i = 0; i < SOF_IPC4_MAX_LLP_GPDMA_READING_SLOTS; i++) {
+ sof_mailbox_read(sdev, info->llp_offset, &llp_slot, sizeof(llp_slot));
+ if (llp_slot.node_id == dai_copier->data.gtw_cfg.node_id)
+ break;
+
+ info->llp_offset += sizeof(llp_slot);
+ }
+
+ if (i < SOF_IPC4_MAX_LLP_GPDMA_READING_SLOTS)
+ return;
+
+ /* if no llp gpdma slot is used, check aggregated sdw slot */
+ info->llp_offset = offsetof(struct sof_ipc4_fw_registers, llp_sndw_reading_slots) +
+ sdev->fw_info_box.offset;
+ for (i = 0; i < SOF_IPC4_MAX_LLP_SNDW_READING_SLOTS; i++) {
+ sof_mailbox_read(sdev, info->llp_offset, &llp_slot, sizeof(llp_slot));
+ if (llp_slot.node_id == dai_copier->data.gtw_cfg.node_id)
+ break;
+
+ info->llp_offset += sizeof(llp_slot);
+ }
+
+ if (i < SOF_IPC4_MAX_LLP_SNDW_READING_SLOTS)
+ return;
+
+ /* check EVAD slot */
+ info->llp_offset = offsetof(struct sof_ipc4_fw_registers, llp_evad_reading_slot) +
+ sdev->fw_info_box.offset;
+ sof_mailbox_read(sdev, info->llp_offset, &llp_slot, sizeof(llp_slot));
+ if (llp_slot.node_id != dai_copier->data.gtw_cfg.node_id) {
+ dev_info(sdev->dev, "no llp found, fall back to default HDA path");
+ info->llp_offset = 0;
+ }
+}
+
+static int sof_ipc4_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct sof_ipc4_timestamp_info *time_info;
+ struct snd_sof_pcm *spcm;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ time_info = spcm->stream[substream->stream].private;
+ /* delay calculation is not supported by current fw_reg ABI */
+ if (!time_info)
+ return 0;
+
+ time_info->stream_start_offset = SOF_IPC4_INVALID_STREAM_POSITION;
+ time_info->llp_offset = 0;
+
+ sof_ipc4_build_time_info(sdev, &spcm->stream[substream->stream]);
+
+ return 0;
+}
+
+static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_sof_pcm_stream *stream,
+ struct sof_ipc4_timestamp_info *time_info)
+{
+ struct sof_ipc4_copier *host_copier = time_info->host_copier;
+ struct sof_ipc4_copier *dai_copier = time_info->dai_copier;
+ struct sof_ipc4_pipeline_registers ppl_reg;
+ u64 stream_start_position;
+ u32 dai_sample_size;
+ u32 ch, node_index;
+ u32 offset;
+
+ if (!host_copier || !dai_copier)
+ return -EINVAL;
+
+ if (host_copier->data.gtw_cfg.node_id == SOF_IPC4_INVALID_NODE_ID)
+ return -EINVAL;
+
+ node_index = SOF_IPC4_NODE_INDEX(host_copier->data.gtw_cfg.node_id);
+ offset = offsetof(struct sof_ipc4_fw_registers, pipeline_regs) + node_index * sizeof(ppl_reg);
+ sof_mailbox_read(sdev, sdev->fw_info_box.offset + offset, &ppl_reg, sizeof(ppl_reg));
+ if (ppl_reg.stream_start_offset == SOF_IPC4_INVALID_STREAM_POSITION)
+ return -EINVAL;
+
+ stream_start_position = ppl_reg.stream_start_offset;
+ ch = dai_copier->data.out_format.fmt_cfg;
+ ch = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(ch);
+ dai_sample_size = (dai_copier->data.out_format.bit_depth >> 3) * ch;
+ /* convert offset to sample count */
+ do_div(stream_start_position, dai_sample_size);
+ time_info->stream_start_offset = stream_start_position;
+
+ return 0;
+}
+
+static snd_pcm_sframes_t sof_ipc4_pcm_delay(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct sof_ipc4_timestamp_info *time_info;
+ struct sof_ipc4_llp_reading_slot llp;
+ snd_pcm_uframes_t head_ptr, tail_ptr;
+ struct snd_sof_pcm_stream *stream;
+ struct snd_sof_pcm *spcm;
+ u64 tmp_ptr;
+ int ret;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return 0;
+
+ stream = &spcm->stream[substream->stream];
+ time_info = stream->private;
+ if (!time_info)
+ return 0;
+
+ /*
+ * stream_start_offset is updated to memory window by FW based on
+ * pipeline statistics and it may be invalid if host query happens before
+ * the statistics is complete. And it will not change after the first initiailization.
+ */
+ if (time_info->stream_start_offset == SOF_IPC4_INVALID_STREAM_POSITION) {
+ ret = sof_ipc4_get_stream_start_offset(sdev, substream, stream, time_info);
+ if (ret < 0)
+ return 0;
+ }
+
+ /*
+ * HDaudio links don't support the LLP counter reported by firmware
+ * the link position is read directly from hardware registers.
+ */
+ if (!time_info->llp_offset) {
+ tmp_ptr = snd_sof_pcm_get_stream_position(sdev, component, substream);
+ if (!tmp_ptr)
+ return 0;
+ } else {
+ sof_mailbox_read(sdev, time_info->llp_offset, &llp, sizeof(llp));
+ tmp_ptr = ((u64)llp.reading.llp_u << 32) | llp.reading.llp_l;
+ }
+
+ /* In two cases dai dma position is not accurate
+ * (1) dai pipeline is started before host pipeline
+ * (2) multiple streams mixed into one. Each stream has the same dai dma position
+ *
+ * Firmware calculates correct stream_start_offset for all cases including above two.
+ * Driver subtracts stream_start_offset from dai dma position to get accurate one
+ */
+ tmp_ptr -= time_info->stream_start_offset;
+
+ /* Calculate the delay taking into account that both pointer can wrap */
+ div64_u64_rem(tmp_ptr, substream->runtime->boundary, &tmp_ptr);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ head_ptr = substream->runtime->status->hw_ptr;
+ tail_ptr = tmp_ptr;
+ } else {
+ head_ptr = tmp_ptr;
+ tail_ptr = substream->runtime->status->hw_ptr;
+ }
+
+ if (head_ptr < tail_ptr)
+ return substream->runtime->boundary - tail_ptr + head_ptr;
+
+ return head_ptr - tail_ptr;
+}
+
+const struct sof_ipc_pcm_ops ipc4_pcm_ops = {
+ .hw_params = sof_ipc4_pcm_hw_params,
+ .trigger = sof_ipc4_pcm_trigger,
+ .hw_free = sof_ipc4_pcm_hw_free,
+ .dai_link_fixup = sof_ipc4_pcm_dai_link_fixup,
+ .pcm_setup = sof_ipc4_pcm_setup,
+ .pcm_free = sof_ipc4_pcm_free,
+ .delay = sof_ipc4_pcm_delay,
+ .ipc_first_on_start = true,
+ .platform_stop_during_hw_free = true,
+};
diff --git a/sound/soc/sof/ipc4-priv.h b/sound/soc/sof/ipc4-priv.h
new file mode 100644
index 000000000..a5d0b2eae
--- /dev/null
+++ b/sound/soc/sof/ipc4-priv.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __SOUND_SOC_SOF_IPC4_PRIV_H
+#define __SOUND_SOC_SOF_IPC4_PRIV_H
+
+#include <linux/idr.h>
+#include <sound/sof/ext_manifest4.h>
+#include "sof-priv.h"
+
+/* The DSP window indices are fixed */
+#define SOF_IPC4_INBOX_WINDOW_IDX 0
+#define SOF_IPC4_OUTBOX_WINDOW_IDX 1
+#define SOF_IPC4_DEBUG_WINDOW_IDX 2
+
+enum sof_ipc4_mtrace_type {
+ SOF_IPC4_MTRACE_NOT_AVAILABLE = 0,
+ SOF_IPC4_MTRACE_INTEL_CAVS_1_5,
+ SOF_IPC4_MTRACE_INTEL_CAVS_1_8,
+ SOF_IPC4_MTRACE_INTEL_CAVS_2,
+};
+
+/**
+ * struct sof_ipc4_fw_module - IPC4 module info
+ * @sof_man4_module: Module info
+ * @fw_mod_cfg: Pointer to the module config start of the module
+ * @m_ida: Module instance identifier
+ * @private: Module private data
+ */
+struct sof_ipc4_fw_module {
+ struct sof_man4_module man4_module_entry;
+ const struct sof_man4_module_config *fw_mod_cfg;
+ struct ida m_ida;
+ void *private;
+};
+
+/**
+ * struct sof_ipc4_fw_library - IPC4 library information
+ * @sof_fw: SOF Firmware of the library
+ * @id: Library ID. 0 is reserved for basefw, external libraries must have unique
+ * ID number between 1 and (sof_ipc4_fw_data.max_libs_count - 1)
+ * Note: sof_ipc4_fw_data.max_libs_count == 1 implies that external libraries
+ * are not supported
+ * @num_modules : Number of FW modules in the library
+ * @modules: Array of FW modules
+ */
+struct sof_ipc4_fw_library {
+ struct sof_firmware sof_fw;
+ const char *name;
+ u32 id;
+ int num_modules;
+ struct sof_ipc4_fw_module *modules;
+};
+
+/**
+ * struct sof_ipc4_fw_data - IPC4-specific data
+ * @manifest_fw_hdr_offset: FW header offset in the manifest
+ * @fw_lib_xa: XArray for firmware libraries, including basefw (ID = 0)
+ * Used to store the FW libraries and to manage the unique IDs of the
+ * libraries.
+ * @nhlt: NHLT table either from the BIOS or the topology manifest
+ * @mtrace_type: mtrace type supported on the booted platform
+ * @mtrace_log_bytes: log bytes as reported by the firmware via fw_config reply
+ * @max_num_pipelines: max number of pipelines
+ * @max_libs_count: Maximum number of libraries support by the FW including the
+ * base firmware
+ *
+ * @load_library: Callback function for platform dependent library loading
+ * @pipeline_state_mutex: Mutex to protect pipeline triggers, ref counts, states and deletion
+ */
+struct sof_ipc4_fw_data {
+ u32 manifest_fw_hdr_offset;
+ struct xarray fw_lib_xa;
+ void *nhlt;
+ enum sof_ipc4_mtrace_type mtrace_type;
+ u32 mtrace_log_bytes;
+ int max_num_pipelines;
+ u32 max_libs_count;
+
+ int (*load_library)(struct snd_sof_dev *sdev,
+ struct sof_ipc4_fw_library *fw_lib, bool reload);
+ struct mutex pipeline_state_mutex; /* protect pipeline triggers, ref counts and states */
+};
+
+/**
+ * struct sof_ipc4_timestamp_info - IPC4 timestamp info
+ * @host_copier: the host copier of the pcm stream
+ * @dai_copier: the dai copier of the pcm stream
+ * @stream_start_offset: reported by fw in memory window
+ * @llp_offset: llp offset in memory window
+ */
+struct sof_ipc4_timestamp_info {
+ struct sof_ipc4_copier *host_copier;
+ struct sof_ipc4_copier *dai_copier;
+ u64 stream_start_offset;
+ u32 llp_offset;
+};
+
+extern const struct sof_ipc_fw_loader_ops ipc4_loader_ops;
+extern const struct sof_ipc_tplg_ops ipc4_tplg_ops;
+extern const struct sof_ipc_tplg_control_ops tplg_ipc4_control_ops;
+extern const struct sof_ipc_pcm_ops ipc4_pcm_ops;
+extern const struct sof_ipc_fw_tracing_ops ipc4_mtrace_ops;
+
+int sof_ipc4_set_pipeline_state(struct snd_sof_dev *sdev, u32 id, u32 state);
+int sof_ipc4_mtrace_update_pos(struct snd_sof_dev *sdev, int core);
+
+int sof_ipc4_query_fw_configuration(struct snd_sof_dev *sdev);
+int sof_ipc4_reload_fw_libraries(struct snd_sof_dev *sdev);
+struct sof_ipc4_fw_module *sof_ipc4_find_module_by_uuid(struct snd_sof_dev *sdev,
+ const guid_t *uuid);
+
+struct sof_ipc4_base_module_cfg;
+void sof_ipc4_update_cpc_from_manifest(struct snd_sof_dev *sdev,
+ struct sof_ipc4_fw_module *fw_module,
+ struct sof_ipc4_base_module_cfg *basecfg);
+
+#endif
diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
new file mode 100644
index 000000000..2c075afd2
--- /dev/null
+++ b/sound/soc/sof/ipc4-topology.c
@@ -0,0 +1,3044 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+//
+#include <linux/bitfield.h>
+#include <uapi/sound/sof/tokens.h>
+#include <sound/pcm_params.h>
+#include <sound/sof/ext_manifest4.h>
+#include <sound/intel-nhlt.h>
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ipc4-priv.h"
+#include "ipc4-topology.h"
+#include "ops.h"
+
+/*
+ * The ignore_cpc flag can be used to ignore the CPC value for all modules by
+ * using 0 instead.
+ * The CPC is sent to the firmware along with the SOF_IPC4_MOD_INIT_INSTANCE
+ * message and it is used for clock scaling.
+ * 0 as CPC value will instruct the firmware to use maximum frequency, thus
+ * deactivating the clock scaling.
+ */
+static bool ignore_cpc;
+module_param_named(ipc4_ignore_cpc, ignore_cpc, bool, 0444);
+MODULE_PARM_DESC(ipc4_ignore_cpc,
+ "Ignore CPC values. This option will disable clock scaling in firmware.");
+
+#define SOF_IPC4_GAIN_PARAM_ID 0
+#define SOF_IPC4_TPLG_ABI_SIZE 6
+#define SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS 2
+
+static DEFINE_IDA(alh_group_ida);
+static DEFINE_IDA(pipeline_ida);
+
+static const struct sof_topology_token ipc4_sched_tokens[] = {
+ {SOF_TKN_SCHED_LP_MODE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pipeline, lp_mode)},
+ {SOF_TKN_SCHED_USE_CHAIN_DMA, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct sof_ipc4_pipeline, use_chain_dma)},
+ {SOF_TKN_SCHED_CORE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pipeline, core_id)},
+};
+
+static const struct sof_topology_token pipeline_tokens[] = {
+ {SOF_TKN_SCHED_DYNAMIC_PIPELINE, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_widget, dynamic_pipeline_widget)},
+};
+
+static const struct sof_topology_token ipc4_comp_tokens[] = {
+ {SOF_TKN_COMP_IS_PAGES, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_base_module_cfg, is_pages)},
+};
+
+static const struct sof_topology_token ipc4_in_audio_format_tokens[] = {
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IN_RATE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, audio_fmt.sampling_frequency)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IN_BIT_DEPTH, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, audio_fmt.bit_depth)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IN_CH_MAP, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, audio_fmt.ch_map)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IN_CH_CFG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, audio_fmt.ch_cfg)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IN_INTERLEAVING_STYLE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct sof_ipc4_pin_format,
+ audio_fmt.interleaving_style)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IN_FMT_CFG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, audio_fmt.fmt_cfg)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_INPUT_PIN_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, pin_index)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IBS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, buffer_size)},
+};
+
+static const struct sof_topology_token ipc4_out_audio_format_tokens[] = {
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_RATE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, audio_fmt.sampling_frequency)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_BIT_DEPTH, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, audio_fmt.bit_depth)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CH_MAP, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, audio_fmt.ch_map)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CH_CFG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, audio_fmt.ch_cfg)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_INTERLEAVING_STYLE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct sof_ipc4_pin_format,
+ audio_fmt.interleaving_style)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_FMT_CFG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, audio_fmt.fmt_cfg)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUTPUT_PIN_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, pin_index)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OBS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pin_format, buffer_size)},
+};
+
+static const struct sof_topology_token ipc4_copier_deep_buffer_tokens[] = {
+ {SOF_TKN_INTEL_COPIER_DEEP_BUFFER_DMA_MS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32, 0},
+};
+
+static const struct sof_topology_token ipc4_copier_tokens[] = {
+ {SOF_TKN_INTEL_COPIER_NODE_TYPE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32, 0},
+};
+
+static const struct sof_topology_token ipc4_audio_fmt_num_tokens[] = {
+ {SOF_TKN_COMP_NUM_INPUT_AUDIO_FORMATS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_available_audio_format, num_input_formats)},
+ {SOF_TKN_COMP_NUM_OUTPUT_AUDIO_FORMATS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_available_audio_format, num_output_formats)},
+};
+
+static const struct sof_topology_token dai_tokens[] = {
+ {SOF_TKN_DAI_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_dai_type,
+ offsetof(struct sof_ipc4_copier, dai_type)},
+ {SOF_TKN_DAI_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_copier, dai_index)},
+};
+
+/* Component extended tokens */
+static const struct sof_topology_token comp_ext_tokens[] = {
+ {SOF_TKN_COMP_UUID, SND_SOC_TPLG_TUPLE_TYPE_UUID, get_token_uuid,
+ offsetof(struct snd_sof_widget, uuid)},
+ {SOF_TKN_COMP_CORE_ID, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct snd_sof_widget, core)},
+};
+
+static const struct sof_topology_token gain_tokens[] = {
+ {SOF_TKN_GAIN_RAMP_TYPE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct sof_ipc4_gain_params, curve_type)},
+ {SOF_TKN_GAIN_RAMP_DURATION,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_gain_params, curve_duration_l)},
+ {SOF_TKN_GAIN_VAL, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct sof_ipc4_gain_params, init_val)},
+};
+
+/* SRC */
+static const struct sof_topology_token src_tokens[] = {
+ {SOF_TKN_SRC_RATE_OUT, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_src_data, sink_rate)},
+};
+
+static const struct sof_token_info ipc4_token_list[SOF_TOKEN_COUNT] = {
+ [SOF_DAI_TOKENS] = {"DAI tokens", dai_tokens, ARRAY_SIZE(dai_tokens)},
+ [SOF_PIPELINE_TOKENS] = {"Pipeline tokens", pipeline_tokens, ARRAY_SIZE(pipeline_tokens)},
+ [SOF_SCHED_TOKENS] = {"Scheduler tokens", ipc4_sched_tokens,
+ ARRAY_SIZE(ipc4_sched_tokens)},
+ [SOF_COMP_EXT_TOKENS] = {"Comp extended tokens", comp_ext_tokens,
+ ARRAY_SIZE(comp_ext_tokens)},
+ [SOF_COMP_TOKENS] = {"IPC4 Component tokens",
+ ipc4_comp_tokens, ARRAY_SIZE(ipc4_comp_tokens)},
+ [SOF_IN_AUDIO_FORMAT_TOKENS] = {"IPC4 Input Audio format tokens",
+ ipc4_in_audio_format_tokens, ARRAY_SIZE(ipc4_in_audio_format_tokens)},
+ [SOF_OUT_AUDIO_FORMAT_TOKENS] = {"IPC4 Output Audio format tokens",
+ ipc4_out_audio_format_tokens, ARRAY_SIZE(ipc4_out_audio_format_tokens)},
+ [SOF_COPIER_DEEP_BUFFER_TOKENS] = {"IPC4 Copier deep buffer tokens",
+ ipc4_copier_deep_buffer_tokens, ARRAY_SIZE(ipc4_copier_deep_buffer_tokens)},
+ [SOF_COPIER_TOKENS] = {"IPC4 Copier tokens", ipc4_copier_tokens,
+ ARRAY_SIZE(ipc4_copier_tokens)},
+ [SOF_AUDIO_FMT_NUM_TOKENS] = {"IPC4 Audio format number tokens",
+ ipc4_audio_fmt_num_tokens, ARRAY_SIZE(ipc4_audio_fmt_num_tokens)},
+ [SOF_GAIN_TOKENS] = {"Gain tokens", gain_tokens, ARRAY_SIZE(gain_tokens)},
+ [SOF_SRC_TOKENS] = {"SRC tokens", src_tokens, ARRAY_SIZE(src_tokens)},
+};
+
+static void sof_ipc4_dbg_audio_format(struct device *dev, struct sof_ipc4_pin_format *pin_fmt,
+ int num_formats)
+{
+ int i;
+
+ for (i = 0; i < num_formats; i++) {
+ struct sof_ipc4_audio_format *fmt = &pin_fmt[i].audio_fmt;
+ dev_dbg(dev,
+ "Pin index #%d: %uHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x) buffer size %d\n",
+ pin_fmt[i].pin_index, fmt->sampling_frequency, fmt->bit_depth, fmt->ch_map,
+ fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg,
+ pin_fmt[i].buffer_size);
+ }
+}
+
+static const struct sof_ipc4_audio_format *
+sof_ipc4_get_input_pin_audio_fmt(struct snd_sof_widget *swidget, int pin_index)
+{
+ struct sof_ipc4_base_module_cfg_ext *base_cfg_ext;
+ struct sof_ipc4_process *process;
+ int i;
+
+ if (swidget->id != snd_soc_dapm_effect) {
+ struct sof_ipc4_base_module_cfg *base = swidget->private;
+
+ /* For non-process modules, base module config format is used for all input pins */
+ return &base->audio_fmt;
+ }
+
+ process = swidget->private;
+ base_cfg_ext = process->base_config_ext;
+
+ /*
+ * If there are multiple input formats available for a pin, the first available format
+ * is chosen.
+ */
+ for (i = 0; i < base_cfg_ext->num_input_pin_fmts; i++) {
+ struct sof_ipc4_pin_format *pin_format = &base_cfg_ext->pin_formats[i];
+
+ if (pin_format->pin_index == pin_index)
+ return &pin_format->audio_fmt;
+ }
+
+ return NULL;
+}
+
+/**
+ * sof_ipc4_get_audio_fmt - get available audio formats from swidget->tuples
+ * @scomp: pointer to pointer to SOC component
+ * @swidget: pointer to struct snd_sof_widget containing tuples
+ * @available_fmt: pointer to struct sof_ipc4_available_audio_format being filling in
+ * @module_base_cfg: Pointer to the base_config in the module init IPC payload
+ *
+ * Return: 0 if successful
+ */
+static int sof_ipc4_get_audio_fmt(struct snd_soc_component *scomp,
+ struct snd_sof_widget *swidget,
+ struct sof_ipc4_available_audio_format *available_fmt,
+ struct sof_ipc4_base_module_cfg *module_base_cfg)
+{
+ struct sof_ipc4_pin_format *in_format = NULL;
+ struct sof_ipc4_pin_format *out_format;
+ int ret;
+
+ ret = sof_update_ipc_object(scomp, available_fmt,
+ SOF_AUDIO_FMT_NUM_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*available_fmt), 1);
+ if (ret) {
+ dev_err(scomp->dev, "Failed to parse audio format token count\n");
+ return ret;
+ }
+
+ if (!available_fmt->num_input_formats && !available_fmt->num_output_formats) {
+ dev_err(scomp->dev, "No input/output pin formats set in topology\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(scomp->dev,
+ "Number of input audio formats: %d. Number of output audio formats: %d\n",
+ available_fmt->num_input_formats, available_fmt->num_output_formats);
+
+ /* set is_pages in the module's base_config */
+ ret = sof_update_ipc_object(scomp, module_base_cfg, SOF_COMP_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*module_base_cfg), 1);
+ if (ret) {
+ dev_err(scomp->dev, "parse comp tokens for %s failed, error: %d\n",
+ swidget->widget->name, ret);
+ return ret;
+ }
+
+ dev_dbg(scomp->dev, "widget %s: is_pages: %d\n", swidget->widget->name,
+ module_base_cfg->is_pages);
+
+ if (available_fmt->num_input_formats) {
+ in_format = kcalloc(available_fmt->num_input_formats,
+ sizeof(*in_format), GFP_KERNEL);
+ if (!in_format)
+ return -ENOMEM;
+ available_fmt->input_pin_fmts = in_format;
+
+ ret = sof_update_ipc_object(scomp, in_format,
+ SOF_IN_AUDIO_FORMAT_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*in_format),
+ available_fmt->num_input_formats);
+ if (ret) {
+ dev_err(scomp->dev, "parse input audio fmt tokens failed %d\n", ret);
+ goto err_in;
+ }
+
+ dev_dbg(scomp->dev, "Input audio formats for %s\n", swidget->widget->name);
+ sof_ipc4_dbg_audio_format(scomp->dev, in_format,
+ available_fmt->num_input_formats);
+ }
+
+ if (available_fmt->num_output_formats) {
+ out_format = kcalloc(available_fmt->num_output_formats, sizeof(*out_format),
+ GFP_KERNEL);
+ if (!out_format) {
+ ret = -ENOMEM;
+ goto err_in;
+ }
+
+ ret = sof_update_ipc_object(scomp, out_format,
+ SOF_OUT_AUDIO_FORMAT_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*out_format),
+ available_fmt->num_output_formats);
+ if (ret) {
+ dev_err(scomp->dev, "parse output audio fmt tokens failed\n");
+ goto err_out;
+ }
+
+ available_fmt->output_pin_fmts = out_format;
+ dev_dbg(scomp->dev, "Output audio formats for %s\n", swidget->widget->name);
+ sof_ipc4_dbg_audio_format(scomp->dev, out_format,
+ available_fmt->num_output_formats);
+ }
+
+ return 0;
+
+err_out:
+ kfree(out_format);
+err_in:
+ kfree(in_format);
+ available_fmt->input_pin_fmts = NULL;
+ return ret;
+}
+
+/* release the memory allocated in sof_ipc4_get_audio_fmt */
+static void sof_ipc4_free_audio_fmt(struct sof_ipc4_available_audio_format *available_fmt)
+
+{
+ kfree(available_fmt->output_pin_fmts);
+ available_fmt->output_pin_fmts = NULL;
+ kfree(available_fmt->input_pin_fmts);
+ available_fmt->input_pin_fmts = NULL;
+}
+
+static void sof_ipc4_widget_free_comp_pipeline(struct snd_sof_widget *swidget)
+{
+ kfree(swidget->private);
+}
+
+static int sof_ipc4_widget_set_module_info(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+
+ swidget->module_info = sof_ipc4_find_module_by_uuid(sdev, &swidget->uuid);
+
+ if (swidget->module_info)
+ return 0;
+
+ dev_err(sdev->dev, "failed to find module info for widget %s with UUID %pUL\n",
+ swidget->widget->name, &swidget->uuid);
+ return -EINVAL;
+}
+
+static int sof_ipc4_widget_setup_msg(struct snd_sof_widget *swidget, struct sof_ipc4_msg *msg)
+{
+ struct sof_ipc4_fw_module *fw_module;
+ uint32_t type;
+ int ret;
+
+ ret = sof_ipc4_widget_set_module_info(swidget);
+ if (ret)
+ return ret;
+
+ fw_module = swidget->module_info;
+
+ msg->primary = fw_module->man4_module_entry.id;
+ msg->primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_INIT_INSTANCE);
+ msg->primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg->primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+
+ msg->extension = SOF_IPC4_MOD_EXT_CORE_ID(swidget->core);
+
+ type = (fw_module->man4_module_entry.type & SOF_IPC4_MODULE_DP) ? 1 : 0;
+ msg->extension |= SOF_IPC4_MOD_EXT_DOMAIN(type);
+
+ return 0;
+}
+
+static void sof_ipc4_widget_update_kcontrol_module_id(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_fw_module *fw_module = swidget->module_info;
+ struct snd_sof_control *scontrol;
+
+ /* update module ID for all kcontrols for this widget */
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
+ if (scontrol->comp_id == swidget->comp_id) {
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct sof_ipc4_msg *msg = &cdata->msg;
+
+ msg->primary |= fw_module->man4_module_entry.id;
+ }
+ }
+}
+
+static int sof_ipc4_widget_setup_pcm(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_available_audio_format *available_fmt;
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc4_copier *ipc4_copier;
+ int node_type = 0;
+ int ret;
+
+ ipc4_copier = kzalloc(sizeof(*ipc4_copier), GFP_KERNEL);
+ if (!ipc4_copier)
+ return -ENOMEM;
+
+ swidget->private = ipc4_copier;
+ available_fmt = &ipc4_copier->available_fmt;
+
+ dev_dbg(scomp->dev, "Updating IPC structure for %s\n", swidget->widget->name);
+
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, available_fmt,
+ &ipc4_copier->data.base_config);
+ if (ret)
+ goto free_copier;
+
+ /*
+ * This callback is used by host copier and module-to-module copier,
+ * and only host copier needs to set gtw_cfg.
+ */
+ if (!WIDGET_IS_AIF(swidget->id))
+ goto skip_gtw_cfg;
+
+ ret = sof_update_ipc_object(scomp, &node_type,
+ SOF_COPIER_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(node_type), 1);
+
+ if (ret) {
+ dev_err(scomp->dev, "parse host copier node type token failed %d\n",
+ ret);
+ goto free_available_fmt;
+ }
+ dev_dbg(scomp->dev, "host copier '%s' node_type %u\n", swidget->widget->name, node_type);
+
+skip_gtw_cfg:
+ ipc4_copier->gtw_attr = kzalloc(sizeof(*ipc4_copier->gtw_attr), GFP_KERNEL);
+ if (!ipc4_copier->gtw_attr) {
+ ret = -ENOMEM;
+ goto free_available_fmt;
+ }
+
+ ipc4_copier->copier_config = (uint32_t *)ipc4_copier->gtw_attr;
+ ipc4_copier->data.gtw_cfg.config_length =
+ sizeof(struct sof_ipc4_gtw_attributes) >> 2;
+
+ switch (swidget->id) {
+ case snd_soc_dapm_aif_in:
+ case snd_soc_dapm_aif_out:
+ ipc4_copier->data.gtw_cfg.node_id = SOF_IPC4_NODE_TYPE(node_type);
+ break;
+ case snd_soc_dapm_buffer:
+ ipc4_copier->data.gtw_cfg.node_id = SOF_IPC4_INVALID_NODE_ID;
+ ipc4_copier->ipc_config_size = 0;
+ break;
+ default:
+ dev_err(scomp->dev, "invalid widget type %d\n", swidget->id);
+ ret = -EINVAL;
+ goto free_gtw_attr;
+ }
+
+ /* set up module info and message header */
+ ret = sof_ipc4_widget_setup_msg(swidget, &ipc4_copier->msg);
+ if (ret)
+ goto free_gtw_attr;
+
+ return 0;
+
+free_gtw_attr:
+ kfree(ipc4_copier->gtw_attr);
+free_available_fmt:
+ sof_ipc4_free_audio_fmt(available_fmt);
+free_copier:
+ kfree(ipc4_copier);
+ swidget->private = NULL;
+ return ret;
+}
+
+static void sof_ipc4_widget_free_comp_pcm(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_copier *ipc4_copier = swidget->private;
+ struct sof_ipc4_available_audio_format *available_fmt;
+
+ if (!ipc4_copier)
+ return;
+
+ available_fmt = &ipc4_copier->available_fmt;
+ kfree(available_fmt->output_pin_fmts);
+ kfree(ipc4_copier->gtw_attr);
+ kfree(ipc4_copier);
+ swidget->private = NULL;
+}
+
+static int sof_ipc4_widget_setup_comp_dai(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_available_audio_format *available_fmt;
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_ipc4_copier *ipc4_copier;
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+ int node_type = 0;
+ int ret;
+
+ ipc4_copier = kzalloc(sizeof(*ipc4_copier), GFP_KERNEL);
+ if (!ipc4_copier)
+ return -ENOMEM;
+
+ available_fmt = &ipc4_copier->available_fmt;
+
+ dev_dbg(scomp->dev, "Updating IPC structure for %s\n", swidget->widget->name);
+
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, available_fmt,
+ &ipc4_copier->data.base_config);
+ if (ret)
+ goto free_copier;
+
+ ret = sof_update_ipc_object(scomp, &node_type,
+ SOF_COPIER_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(node_type), 1);
+ if (ret) {
+ dev_err(scomp->dev, "parse dai node type failed %d\n", ret);
+ goto free_available_fmt;
+ }
+
+ ret = sof_update_ipc_object(scomp, ipc4_copier,
+ SOF_DAI_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(u32), 1);
+ if (ret) {
+ dev_err(scomp->dev, "parse dai copier node token failed %d\n", ret);
+ goto free_available_fmt;
+ }
+
+ dev_dbg(scomp->dev, "dai %s node_type %u dai_type %u dai_index %d\n", swidget->widget->name,
+ node_type, ipc4_copier->dai_type, ipc4_copier->dai_index);
+
+ ipc4_copier->data.gtw_cfg.node_id = SOF_IPC4_NODE_TYPE(node_type);
+
+ pipe_widget = swidget->spipe->pipe_widget;
+ pipeline = pipe_widget->private;
+ if (pipeline->use_chain_dma && ipc4_copier->dai_type != SOF_DAI_INTEL_HDA) {
+ dev_err(scomp->dev,
+ "Bad DAI type '%d', Chained DMA is only supported by HDA DAIs (%d).\n",
+ ipc4_copier->dai_type, SOF_DAI_INTEL_HDA);
+ ret = -ENODEV;
+ goto free_available_fmt;
+ }
+
+ switch (ipc4_copier->dai_type) {
+ case SOF_DAI_INTEL_ALH:
+ {
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_alh_configuration_blob *blob;
+ struct snd_soc_dapm_path *p;
+ struct snd_sof_widget *w;
+ int src_num = 0;
+
+ snd_soc_dapm_widget_for_each_source_path(swidget->widget, p)
+ src_num++;
+
+ if (swidget->id == snd_soc_dapm_dai_in && src_num == 0) {
+ /*
+ * The blob will not be used if the ALH copier is playback direction
+ * and doesn't connect to any source.
+ * It is fine to call kfree(ipc4_copier->copier_config) since
+ * ipc4_copier->copier_config is null.
+ */
+ ret = 0;
+ break;
+ }
+
+ blob = kzalloc(sizeof(*blob), GFP_KERNEL);
+ if (!blob) {
+ ret = -ENOMEM;
+ goto free_available_fmt;
+ }
+
+ list_for_each_entry(w, &sdev->widget_list, list) {
+ if (w->widget->sname &&
+ strcmp(w->widget->sname, swidget->widget->sname))
+ continue;
+
+ blob->alh_cfg.device_count++;
+ }
+
+ ipc4_copier->copier_config = (uint32_t *)blob;
+ ipc4_copier->data.gtw_cfg.config_length = sizeof(*blob) >> 2;
+ break;
+ }
+ case SOF_DAI_INTEL_SSP:
+ /* set SSP DAI index as the node_id */
+ ipc4_copier->data.gtw_cfg.node_id |=
+ SOF_IPC4_NODE_INDEX_INTEL_SSP(ipc4_copier->dai_index);
+ break;
+ case SOF_DAI_INTEL_DMIC:
+ /* set DMIC DAI index as the node_id */
+ ipc4_copier->data.gtw_cfg.node_id |=
+ SOF_IPC4_NODE_INDEX_INTEL_DMIC(ipc4_copier->dai_index);
+ break;
+ default:
+ ipc4_copier->gtw_attr = kzalloc(sizeof(*ipc4_copier->gtw_attr), GFP_KERNEL);
+ if (!ipc4_copier->gtw_attr) {
+ ret = -ENOMEM;
+ goto free_available_fmt;
+ }
+
+ ipc4_copier->copier_config = (uint32_t *)ipc4_copier->gtw_attr;
+ ipc4_copier->data.gtw_cfg.config_length =
+ sizeof(struct sof_ipc4_gtw_attributes) >> 2;
+ break;
+ }
+
+ dai->scomp = scomp;
+ dai->private = ipc4_copier;
+
+ /* set up module info and message header */
+ ret = sof_ipc4_widget_setup_msg(swidget, &ipc4_copier->msg);
+ if (ret)
+ goto free_copier_config;
+
+ return 0;
+
+free_copier_config:
+ kfree(ipc4_copier->copier_config);
+free_available_fmt:
+ sof_ipc4_free_audio_fmt(available_fmt);
+free_copier:
+ kfree(ipc4_copier);
+ dai->private = NULL;
+ dai->scomp = NULL;
+ return ret;
+}
+
+static void sof_ipc4_widget_free_comp_dai(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_available_audio_format *available_fmt;
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_ipc4_copier *ipc4_copier;
+
+ if (!dai)
+ return;
+
+ if (!dai->private) {
+ kfree(dai);
+ swidget->private = NULL;
+ return;
+ }
+
+ ipc4_copier = dai->private;
+ available_fmt = &ipc4_copier->available_fmt;
+
+ kfree(available_fmt->output_pin_fmts);
+ if (ipc4_copier->dai_type != SOF_DAI_INTEL_SSP &&
+ ipc4_copier->dai_type != SOF_DAI_INTEL_DMIC)
+ kfree(ipc4_copier->copier_config);
+ kfree(dai->private);
+ kfree(dai);
+ swidget->private = NULL;
+}
+
+static int sof_ipc4_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc4_pipeline *pipeline;
+ struct snd_sof_pipeline *spipe = swidget->spipe;
+ int ret;
+
+ pipeline = kzalloc(sizeof(*pipeline), GFP_KERNEL);
+ if (!pipeline)
+ return -ENOMEM;
+
+ ret = sof_update_ipc_object(scomp, pipeline, SOF_SCHED_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*pipeline), 1);
+ if (ret) {
+ dev_err(scomp->dev, "parsing scheduler tokens failed\n");
+ goto err;
+ }
+
+ swidget->core = pipeline->core_id;
+ spipe->core_mask |= BIT(pipeline->core_id);
+
+ if (pipeline->use_chain_dma) {
+ dev_dbg(scomp->dev, "Set up chain DMA for %s\n", swidget->widget->name);
+ swidget->private = pipeline;
+ return 0;
+ }
+
+ /* parse one set of pipeline tokens */
+ ret = sof_update_ipc_object(scomp, swidget, SOF_PIPELINE_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*swidget), 1);
+ if (ret) {
+ dev_err(scomp->dev, "parsing pipeline tokens failed\n");
+ goto err;
+ }
+
+ /* TODO: Get priority from topology */
+ pipeline->priority = 0;
+
+ dev_dbg(scomp->dev, "pipeline '%s': id %d, pri %d, core_id %u, lp mode %d\n",
+ swidget->widget->name, swidget->pipeline_id,
+ pipeline->priority, pipeline->core_id, pipeline->lp_mode);
+
+ swidget->private = pipeline;
+
+ pipeline->msg.primary = SOF_IPC4_GLB_PIPE_PRIORITY(pipeline->priority);
+ pipeline->msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_CREATE_PIPELINE);
+ pipeline->msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ pipeline->msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
+
+ pipeline->msg.extension = pipeline->lp_mode;
+ pipeline->msg.extension |= SOF_IPC4_GLB_PIPE_EXT_CORE_ID(pipeline->core_id);
+ pipeline->state = SOF_IPC4_PIPE_UNINITIALIZED;
+
+ return 0;
+err:
+ kfree(pipeline);
+ return ret;
+}
+
+static int sof_ipc4_widget_setup_comp_pga(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc4_gain *gain;
+ int ret;
+
+ gain = kzalloc(sizeof(*gain), GFP_KERNEL);
+ if (!gain)
+ return -ENOMEM;
+
+ swidget->private = gain;
+
+ gain->data.params.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
+ gain->data.params.init_val = SOF_IPC4_VOL_ZERO_DB;
+
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, &gain->available_fmt, &gain->data.base_config);
+ if (ret)
+ goto err;
+
+ ret = sof_update_ipc_object(scomp, &gain->data.params, SOF_GAIN_TOKENS,
+ swidget->tuples, swidget->num_tuples, sizeof(gain->data), 1);
+ if (ret) {
+ dev_err(scomp->dev, "Parsing gain tokens failed\n");
+ goto err;
+ }
+
+ dev_dbg(scomp->dev,
+ "pga widget %s: ramp type: %d, ramp duration %d, initial gain value: %#x\n",
+ swidget->widget->name, gain->data.params.curve_type,
+ gain->data.params.curve_duration_l, gain->data.params.init_val);
+
+ ret = sof_ipc4_widget_setup_msg(swidget, &gain->msg);
+ if (ret)
+ goto err;
+
+ sof_ipc4_widget_update_kcontrol_module_id(swidget);
+
+ return 0;
+err:
+ sof_ipc4_free_audio_fmt(&gain->available_fmt);
+ kfree(gain);
+ swidget->private = NULL;
+ return ret;
+}
+
+static void sof_ipc4_widget_free_comp_pga(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_gain *gain = swidget->private;
+
+ if (!gain)
+ return;
+
+ sof_ipc4_free_audio_fmt(&gain->available_fmt);
+ kfree(swidget->private);
+ swidget->private = NULL;
+}
+
+static int sof_ipc4_widget_setup_comp_mixer(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc4_mixer *mixer;
+ int ret;
+
+ dev_dbg(scomp->dev, "Updating IPC structure for %s\n", swidget->widget->name);
+
+ mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
+ if (!mixer)
+ return -ENOMEM;
+
+ swidget->private = mixer;
+
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, &mixer->available_fmt,
+ &mixer->base_config);
+ if (ret)
+ goto err;
+
+ ret = sof_ipc4_widget_setup_msg(swidget, &mixer->msg);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ sof_ipc4_free_audio_fmt(&mixer->available_fmt);
+ kfree(mixer);
+ swidget->private = NULL;
+ return ret;
+}
+
+static int sof_ipc4_widget_setup_comp_src(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_pipeline *spipe = swidget->spipe;
+ struct sof_ipc4_src *src;
+ int ret;
+
+ dev_dbg(scomp->dev, "Updating IPC structure for %s\n", swidget->widget->name);
+
+ src = kzalloc(sizeof(*src), GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+
+ swidget->private = src;
+
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, &src->available_fmt,
+ &src->data.base_config);
+ if (ret)
+ goto err;
+
+ ret = sof_update_ipc_object(scomp, &src->data, SOF_SRC_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*src), 1);
+ if (ret) {
+ dev_err(scomp->dev, "Parsing SRC tokens failed\n");
+ goto err;
+ }
+
+ spipe->core_mask |= BIT(swidget->core);
+
+ dev_dbg(scomp->dev, "SRC sink rate %d\n", src->data.sink_rate);
+
+ ret = sof_ipc4_widget_setup_msg(swidget, &src->msg);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ sof_ipc4_free_audio_fmt(&src->available_fmt);
+ kfree(src);
+ swidget->private = NULL;
+ return ret;
+}
+
+static void sof_ipc4_widget_free_comp_src(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_src *src = swidget->private;
+
+ if (!src)
+ return;
+
+ sof_ipc4_free_audio_fmt(&src->available_fmt);
+ kfree(swidget->private);
+ swidget->private = NULL;
+}
+
+static void sof_ipc4_widget_free_comp_mixer(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_mixer *mixer = swidget->private;
+
+ if (!mixer)
+ return;
+
+ sof_ipc4_free_audio_fmt(&mixer->available_fmt);
+ kfree(swidget->private);
+ swidget->private = NULL;
+}
+
+/*
+ * Add the process modules support. The process modules are defined as snd_soc_dapm_effect modules.
+ */
+static int sof_ipc4_widget_setup_comp_process(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc4_fw_module *fw_module;
+ struct snd_sof_pipeline *spipe = swidget->spipe;
+ struct sof_ipc4_process *process;
+ void *cfg;
+ int ret;
+
+ process = kzalloc(sizeof(*process), GFP_KERNEL);
+ if (!process)
+ return -ENOMEM;
+
+ swidget->private = process;
+
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, &process->available_fmt,
+ &process->base_config);
+ if (ret)
+ goto err;
+
+ ret = sof_ipc4_widget_setup_msg(swidget, &process->msg);
+ if (ret)
+ goto err;
+
+ /* parse process init module payload config type from module info */
+ fw_module = swidget->module_info;
+ process->init_config = FIELD_GET(SOF_IPC4_MODULE_INIT_CONFIG_MASK,
+ fw_module->man4_module_entry.type);
+
+ process->ipc_config_size = sizeof(struct sof_ipc4_base_module_cfg);
+
+ /* allocate memory for base config extension if needed */
+ if (process->init_config == SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT) {
+ struct sof_ipc4_base_module_cfg_ext *base_cfg_ext;
+ u32 ext_size = struct_size(base_cfg_ext, pin_formats,
+ size_add(swidget->num_input_pins,
+ swidget->num_output_pins));
+
+ base_cfg_ext = kzalloc(ext_size, GFP_KERNEL);
+ if (!base_cfg_ext) {
+ ret = -ENOMEM;
+ goto free_available_fmt;
+ }
+
+ base_cfg_ext->num_input_pin_fmts = swidget->num_input_pins;
+ base_cfg_ext->num_output_pin_fmts = swidget->num_output_pins;
+ process->base_config_ext = base_cfg_ext;
+ process->base_config_ext_size = ext_size;
+ process->ipc_config_size += ext_size;
+ }
+
+ cfg = kzalloc(process->ipc_config_size, GFP_KERNEL);
+ if (!cfg) {
+ ret = -ENOMEM;
+ goto free_base_cfg_ext;
+ }
+
+ process->ipc_config_data = cfg;
+
+ sof_ipc4_widget_update_kcontrol_module_id(swidget);
+
+ /* set pipeline core mask to keep track of the core the module is scheduled to run on */
+ spipe->core_mask |= BIT(swidget->core);
+
+ return 0;
+free_base_cfg_ext:
+ kfree(process->base_config_ext);
+ process->base_config_ext = NULL;
+free_available_fmt:
+ sof_ipc4_free_audio_fmt(&process->available_fmt);
+err:
+ kfree(process);
+ swidget->private = NULL;
+ return ret;
+}
+
+static void sof_ipc4_widget_free_comp_process(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_process *process = swidget->private;
+
+ if (!process)
+ return;
+
+ kfree(process->ipc_config_data);
+ kfree(process->base_config_ext);
+ sof_ipc4_free_audio_fmt(&process->available_fmt);
+ kfree(swidget->private);
+ swidget->private = NULL;
+}
+
+static void
+sof_ipc4_update_resource_usage(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget,
+ struct sof_ipc4_base_module_cfg *base_config)
+{
+ struct sof_ipc4_fw_module *fw_module = swidget->module_info;
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+ int task_mem, queue_mem;
+ int ibs, bss, total;
+
+ ibs = base_config->ibs;
+ bss = base_config->is_pages;
+
+ task_mem = SOF_IPC4_PIPELINE_OBJECT_SIZE;
+ task_mem += SOF_IPC4_MODULE_INSTANCE_LIST_ITEM_SIZE + bss;
+
+ if (fw_module->man4_module_entry.type & SOF_IPC4_MODULE_LL) {
+ task_mem += SOF_IPC4_FW_ROUNDUP(SOF_IPC4_LL_TASK_OBJECT_SIZE);
+ task_mem += SOF_IPC4_FW_MAX_QUEUE_COUNT * SOF_IPC4_MODULE_INSTANCE_LIST_ITEM_SIZE;
+ task_mem += SOF_IPC4_LL_TASK_LIST_ITEM_SIZE;
+ } else {
+ task_mem += SOF_IPC4_FW_ROUNDUP(SOF_IPC4_DP_TASK_OBJECT_SIZE);
+ task_mem += SOF_IPC4_DP_TASK_LIST_SIZE;
+ }
+
+ ibs = SOF_IPC4_FW_ROUNDUP(ibs);
+ queue_mem = SOF_IPC4_FW_MAX_QUEUE_COUNT * (SOF_IPC4_DATA_QUEUE_OBJECT_SIZE + ibs);
+
+ total = SOF_IPC4_FW_PAGE(task_mem + queue_mem);
+
+ pipe_widget = swidget->spipe->pipe_widget;
+ pipeline = pipe_widget->private;
+ pipeline->mem_usage += total;
+
+ /* Update base_config->cpc from the module manifest */
+ sof_ipc4_update_cpc_from_manifest(sdev, fw_module, base_config);
+
+ if (ignore_cpc) {
+ dev_dbg(sdev->dev, "%s: ibs / obs: %u / %u, forcing cpc to 0 from %u\n",
+ swidget->widget->name, base_config->ibs, base_config->obs,
+ base_config->cpc);
+ base_config->cpc = 0;
+ } else {
+ dev_dbg(sdev->dev, "%s: ibs / obs / cpc: %u / %u / %u\n",
+ swidget->widget->name, base_config->ibs, base_config->obs,
+ base_config->cpc);
+ }
+}
+
+static int sof_ipc4_widget_assign_instance_id(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_fw_module *fw_module = swidget->module_info;
+ int max_instances = fw_module->man4_module_entry.instance_max_count;
+
+ swidget->instance_id = ida_alloc_max(&fw_module->m_ida, max_instances, GFP_KERNEL);
+ if (swidget->instance_id < 0) {
+ dev_err(sdev->dev, "failed to assign instance id for widget %s",
+ swidget->widget->name);
+ return swidget->instance_id;
+ }
+
+ return 0;
+}
+
+/* update hw_params based on the audio stream format */
+static int sof_ipc4_update_hw_params(struct snd_sof_dev *sdev, struct snd_pcm_hw_params *params,
+ struct sof_ipc4_audio_format *fmt)
+{
+ snd_pcm_format_t snd_fmt;
+ struct snd_interval *i;
+ struct snd_mask *m;
+ int valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+ unsigned int channels, rate;
+
+ switch (valid_bits) {
+ case 16:
+ snd_fmt = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ case 24:
+ snd_fmt = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 32:
+ snd_fmt = SNDRV_PCM_FORMAT_S32_LE;
+ break;
+ default:
+ dev_err(sdev->dev, "invalid PCM valid_bits %d\n", valid_bits);
+ return -EINVAL;
+ }
+
+ m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ snd_mask_none(m);
+ snd_mask_set_format(m, snd_fmt);
+
+ rate = fmt->sampling_frequency;
+ i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ i->min = rate;
+ i->max = rate;
+
+ channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg);
+ i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ i->min = channels;
+ i->max = channels;
+
+ return 0;
+}
+
+static bool sof_ipc4_is_single_format(struct snd_sof_dev *sdev,
+ struct sof_ipc4_pin_format *pin_fmts, u32 pin_fmts_size)
+{
+ struct sof_ipc4_audio_format *fmt;
+ u32 rate, channels, valid_bits;
+ int i;
+
+ fmt = &pin_fmts[0].audio_fmt;
+ rate = fmt->sampling_frequency;
+ channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg);
+ valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+
+ /* check if all output formats in topology are the same */
+ for (i = 1; i < pin_fmts_size; i++) {
+ u32 _rate, _channels, _valid_bits;
+
+ fmt = &pin_fmts[i].audio_fmt;
+ _rate = fmt->sampling_frequency;
+ _channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg);
+ _valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+
+ if (_rate != rate || _channels != channels || _valid_bits != valid_bits)
+ return false;
+ }
+
+ return true;
+}
+
+static int sof_ipc4_init_output_audio_fmt(struct snd_sof_dev *sdev,
+ struct sof_ipc4_base_module_cfg *base_config,
+ struct sof_ipc4_available_audio_format *available_fmt,
+ u32 out_ref_rate, u32 out_ref_channels,
+ u32 out_ref_valid_bits)
+{
+ struct sof_ipc4_audio_format *out_fmt;
+ bool single_format;
+ int i;
+
+ if (!available_fmt->num_output_formats)
+ return -EINVAL;
+
+ single_format = sof_ipc4_is_single_format(sdev, available_fmt->output_pin_fmts,
+ available_fmt->num_output_formats);
+
+ /* pick the first format if there's only one available or if all formats are the same */
+ if (single_format) {
+ base_config->obs = available_fmt->output_pin_fmts[0].buffer_size;
+ return 0;
+ }
+
+ /*
+ * if there are multiple output formats, then choose the output format that matches
+ * the reference params
+ */
+ for (i = 0; i < available_fmt->num_output_formats; i++) {
+ u32 _out_rate, _out_channels, _out_valid_bits;
+
+ out_fmt = &available_fmt->output_pin_fmts[i].audio_fmt;
+ _out_rate = out_fmt->sampling_frequency;
+ _out_channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(out_fmt->fmt_cfg);
+ _out_valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(out_fmt->fmt_cfg);
+
+ if (_out_rate == out_ref_rate && _out_channels == out_ref_channels &&
+ _out_valid_bits == out_ref_valid_bits) {
+ base_config->obs = available_fmt->output_pin_fmts[i].buffer_size;
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int sof_ipc4_get_valid_bits(struct snd_sof_dev *sdev, struct snd_pcm_hw_params *params)
+{
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ return 16;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ return 24;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ return 32;
+ default:
+ dev_err(sdev->dev, "invalid pcm frame format %d\n", params_format(params));
+ return -EINVAL;
+ }
+}
+
+static int sof_ipc4_init_input_audio_fmt(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *swidget,
+ struct sof_ipc4_base_module_cfg *base_config,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc4_available_audio_format *available_fmt)
+{
+ struct sof_ipc4_pin_format *pin_fmts = available_fmt->input_pin_fmts;
+ u32 pin_fmts_size = available_fmt->num_input_formats;
+ u32 valid_bits;
+ u32 channels;
+ u32 rate;
+ bool single_format;
+ int sample_valid_bits;
+ int i = 0;
+
+ if (!available_fmt->num_input_formats) {
+ dev_err(sdev->dev, "no input formats for %s\n", swidget->widget->name);
+ return -EINVAL;
+ }
+
+ single_format = sof_ipc4_is_single_format(sdev, available_fmt->input_pin_fmts,
+ available_fmt->num_input_formats);
+ if (single_format)
+ goto in_fmt;
+
+ sample_valid_bits = sof_ipc4_get_valid_bits(sdev, params);
+ if (sample_valid_bits < 0)
+ return sample_valid_bits;
+
+ /*
+ * Search supported input audio formats with pin index 0 to match rate, channels and
+ * sample_valid_bits from reference params
+ */
+ for (i = 0; i < pin_fmts_size; i++) {
+ struct sof_ipc4_audio_format *fmt = &pin_fmts[i].audio_fmt;
+
+ if (pin_fmts[i].pin_index)
+ continue;
+
+ rate = fmt->sampling_frequency;
+ channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg);
+ valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+ if (params_rate(params) == rate && params_channels(params) == channels &&
+ sample_valid_bits == valid_bits) {
+ dev_dbg(sdev->dev, "matched audio format index for %uHz, %ubit, %u channels: %d\n",
+ rate, valid_bits, channels, i);
+ break;
+ }
+ }
+
+ if (i == pin_fmts_size) {
+ dev_err(sdev->dev, "%s: Unsupported audio format: %uHz, %ubit, %u channels\n",
+ __func__, params_rate(params), sample_valid_bits, params_channels(params));
+ return -EINVAL;
+ }
+
+in_fmt:
+ /* copy input format */
+ if (available_fmt->num_input_formats && i < available_fmt->num_input_formats) {
+ memcpy(&base_config->audio_fmt, &available_fmt->input_pin_fmts[i].audio_fmt,
+ sizeof(struct sof_ipc4_audio_format));
+
+ /* set base_cfg ibs/obs */
+ base_config->ibs = available_fmt->input_pin_fmts[i].buffer_size;
+
+ dev_dbg(sdev->dev, "Init input audio formats for %s\n", swidget->widget->name);
+ sof_ipc4_dbg_audio_format(sdev->dev, &available_fmt->input_pin_fmts[i], 1);
+ }
+
+ return i;
+}
+
+static void sof_ipc4_unprepare_copier_module(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_copier *ipc4_copier = NULL;
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+
+ /* reset pipeline memory usage */
+ pipe_widget = swidget->spipe->pipe_widget;
+ pipeline = pipe_widget->private;
+ pipeline->mem_usage = 0;
+
+ if (WIDGET_IS_AIF(swidget->id) || swidget->id == snd_soc_dapm_buffer) {
+ if (pipeline->use_chain_dma) {
+ pipeline->msg.primary = 0;
+ pipeline->msg.extension = 0;
+ }
+ ipc4_copier = swidget->private;
+ } else if (WIDGET_IS_DAI(swidget->id)) {
+ struct snd_sof_dai *dai = swidget->private;
+
+ ipc4_copier = dai->private;
+
+ if (pipeline->use_chain_dma) {
+ pipeline->msg.primary = 0;
+ pipeline->msg.extension = 0;
+ }
+
+ if (ipc4_copier->dai_type == SOF_DAI_INTEL_ALH) {
+ struct sof_ipc4_copier_data *copier_data = &ipc4_copier->data;
+ struct sof_ipc4_alh_configuration_blob *blob;
+ unsigned int group_id;
+
+ blob = (struct sof_ipc4_alh_configuration_blob *)ipc4_copier->copier_config;
+ if (blob->alh_cfg.device_count > 1) {
+ group_id = SOF_IPC4_NODE_INDEX(ipc4_copier->data.gtw_cfg.node_id) -
+ ALH_MULTI_GTW_BASE;
+ ida_free(&alh_group_ida, group_id);
+ }
+
+ /* clear the node ID */
+ copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
+ }
+ }
+
+ if (ipc4_copier) {
+ kfree(ipc4_copier->ipc_config_data);
+ ipc4_copier->ipc_config_data = NULL;
+ ipc4_copier->ipc_config_size = 0;
+ }
+}
+
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_INTEL_NHLT)
+static int snd_sof_get_hw_config_params(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
+ int *sample_rate, int *channel_count, int *bit_depth)
+{
+ struct snd_soc_tplg_hw_config *hw_config;
+ struct snd_sof_dai_link *slink;
+ bool dai_link_found = false;
+ bool hw_cfg_found = false;
+ int i;
+
+ /* get current hw_config from link */
+ list_for_each_entry(slink, &sdev->dai_link_list, list) {
+ if (!strcmp(slink->link->name, dai->name)) {
+ dai_link_found = true;
+ break;
+ }
+ }
+
+ if (!dai_link_found) {
+ dev_err(sdev->dev, "%s: no DAI link found for DAI %s\n", __func__, dai->name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < slink->num_hw_configs; i++) {
+ hw_config = &slink->hw_configs[i];
+ if (dai->current_config == le32_to_cpu(hw_config->id)) {
+ hw_cfg_found = true;
+ break;
+ }
+ }
+
+ if (!hw_cfg_found) {
+ dev_err(sdev->dev, "%s: no matching hw_config found for DAI %s\n", __func__,
+ dai->name);
+ return -EINVAL;
+ }
+
+ *bit_depth = le32_to_cpu(hw_config->tdm_slot_width);
+ *channel_count = le32_to_cpu(hw_config->tdm_slots);
+ *sample_rate = le32_to_cpu(hw_config->fsync_rate);
+
+ dev_dbg(sdev->dev, "sample rate: %d sample width: %d channels: %d\n",
+ *sample_rate, *bit_depth, *channel_count);
+
+ return 0;
+}
+
+static int snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
+ struct snd_pcm_hw_params *params, u32 dai_index,
+ u32 linktype, u8 dir, u32 **dst, u32 *len)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct nhlt_specific_cfg *cfg;
+ int sample_rate, channel_count;
+ int bit_depth, ret;
+ u32 nhlt_type;
+
+ /* convert to NHLT type */
+ switch (linktype) {
+ case SOF_DAI_INTEL_DMIC:
+ nhlt_type = NHLT_LINK_DMIC;
+ bit_depth = params_width(params);
+ channel_count = params_channels(params);
+ sample_rate = params_rate(params);
+ break;
+ case SOF_DAI_INTEL_SSP:
+ nhlt_type = NHLT_LINK_SSP;
+ ret = snd_sof_get_hw_config_params(sdev, dai, &sample_rate, &channel_count,
+ &bit_depth);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return 0;
+ }
+
+ dev_dbg(sdev->dev, "dai index %d nhlt type %d direction %d\n",
+ dai_index, nhlt_type, dir);
+
+ /* find NHLT blob with matching params */
+ cfg = intel_nhlt_get_endpoint_blob(sdev->dev, ipc4_data->nhlt, dai_index, nhlt_type,
+ bit_depth, bit_depth, channel_count, sample_rate,
+ dir, 0);
+
+ if (!cfg) {
+ dev_err(sdev->dev,
+ "no matching blob for sample rate: %d sample width: %d channels: %d\n",
+ sample_rate, bit_depth, channel_count);
+ return -EINVAL;
+ }
+
+ /* config length should be in dwords */
+ *len = cfg->size >> 2;
+ *dst = (u32 *)cfg->caps;
+
+ return 0;
+}
+#else
+static int snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
+ struct snd_pcm_hw_params *params, u32 dai_index,
+ u32 linktype, u8 dir, u32 **dst, u32 *len)
+{
+ return 0;
+}
+#endif
+
+static bool sof_ipc4_copier_is_single_format(struct snd_sof_dev *sdev,
+ struct sof_ipc4_pin_format *pin_fmts,
+ u32 pin_fmts_size)
+{
+ struct sof_ipc4_audio_format *fmt;
+ u32 valid_bits;
+ int i;
+
+ fmt = &pin_fmts[0].audio_fmt;
+ valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+
+ /* check if all output formats in topology are the same */
+ for (i = 1; i < pin_fmts_size; i++) {
+ u32 _valid_bits;
+
+ fmt = &pin_fmts[i].audio_fmt;
+ _valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+
+ if (_valid_bits != valid_bits)
+ return false;
+ }
+
+ return true;
+}
+
+static int
+sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ struct snd_pcm_hw_params *pipeline_params, int dir)
+{
+ struct sof_ipc4_available_audio_format *available_fmt;
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_copier_data *copier_data;
+ struct snd_pcm_hw_params *ref_params;
+ struct sof_ipc4_copier *ipc4_copier;
+ struct snd_sof_dai *dai;
+ u32 gtw_cfg_config_length;
+ u32 dma_config_tlv_size = 0;
+ void **ipc_config_data;
+ int *ipc_config_size;
+ u32 **data;
+ int ipc_size, ret, out_ref_valid_bits;
+ u32 out_ref_rate, out_ref_channels;
+ u32 deep_buffer_dma_ms = 0;
+ int output_fmt_index;
+ bool single_output_format;
+
+ dev_dbg(sdev->dev, "copier %s, type %d", swidget->widget->name, swidget->id);
+
+ switch (swidget->id) {
+ case snd_soc_dapm_aif_in:
+ case snd_soc_dapm_aif_out:
+ {
+ struct sof_ipc4_gtw_attributes *gtw_attr;
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+
+ /* parse the deep buffer dma size */
+ ret = sof_update_ipc_object(scomp, &deep_buffer_dma_ms,
+ SOF_COPIER_DEEP_BUFFER_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(u32), 1);
+ if (ret) {
+ dev_err(scomp->dev, "Failed to parse deep buffer dma size for %s\n",
+ swidget->widget->name);
+ return ret;
+ }
+
+ ipc4_copier = (struct sof_ipc4_copier *)swidget->private;
+ gtw_attr = ipc4_copier->gtw_attr;
+ copier_data = &ipc4_copier->data;
+ available_fmt = &ipc4_copier->available_fmt;
+
+ pipe_widget = swidget->spipe->pipe_widget;
+ pipeline = pipe_widget->private;
+
+ if (pipeline->use_chain_dma) {
+ u32 host_dma_id;
+ u32 fifo_size;
+
+ host_dma_id = platform_params->stream_tag - 1;
+ pipeline->msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_HOST_ID(host_dma_id);
+
+ /* Set SCS bit for S16_LE format only */
+ if (params_format(fe_params) == SNDRV_PCM_FORMAT_S16_LE)
+ pipeline->msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_SCS_MASK;
+
+ /*
+ * Despite its name the bitfield 'fifo_size' is used to define DMA buffer
+ * size. The expression calculates 2ms buffer size.
+ */
+ fifo_size = DIV_ROUND_UP((SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS *
+ params_rate(fe_params) *
+ params_channels(fe_params) *
+ params_physical_width(fe_params)), 8000);
+ pipeline->msg.extension |= SOF_IPC4_GLB_EXT_CHAIN_DMA_FIFO_SIZE(fifo_size);
+
+ /*
+ * Chain DMA does not support stream timestamping, set node_id to invalid
+ * to skip the code in sof_ipc4_get_stream_start_offset().
+ */
+ copier_data->gtw_cfg.node_id = SOF_IPC4_INVALID_NODE_ID;
+
+ return 0;
+ }
+
+ /*
+ * Use the input_pin_fmts to match pcm params for playback and the output_pin_fmts
+ * for capture.
+ */
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ ref_params = fe_params;
+ else
+ ref_params = pipeline_params;
+
+ copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
+ copier_data->gtw_cfg.node_id |=
+ SOF_IPC4_NODE_INDEX(platform_params->stream_tag - 1);
+
+ /* set gateway attributes */
+ gtw_attr->lp_buffer_alloc = pipeline->lp_mode;
+ break;
+ }
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ {
+ struct snd_sof_widget *pipe_widget = swidget->spipe->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+
+ if (pipeline->use_chain_dma)
+ return 0;
+
+ dai = swidget->private;
+
+ ipc4_copier = (struct sof_ipc4_copier *)dai->private;
+ copier_data = &ipc4_copier->data;
+ available_fmt = &ipc4_copier->available_fmt;
+
+ /*
+ * When there is format conversion within a pipeline, the number of supported
+ * output formats is typically limited to just 1 for the DAI copiers. But when there
+ * is no format conversion, the DAI copiers input format must match that of the
+ * FE hw_params for capture and the pipeline params for playback.
+ */
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ ref_params = pipeline_params;
+ else
+ ref_params = fe_params;
+
+ ret = snd_sof_get_nhlt_endpoint_data(sdev, dai, fe_params, ipc4_copier->dai_index,
+ ipc4_copier->dai_type, dir,
+ &ipc4_copier->copier_config,
+ &copier_data->gtw_cfg.config_length);
+ if (ret < 0)
+ return ret;
+
+ break;
+ }
+ case snd_soc_dapm_buffer:
+ {
+ ipc4_copier = (struct sof_ipc4_copier *)swidget->private;
+ copier_data = &ipc4_copier->data;
+ available_fmt = &ipc4_copier->available_fmt;
+ ref_params = pipeline_params;
+
+ break;
+ }
+ default:
+ dev_err(sdev->dev, "unsupported type %d for copier %s",
+ swidget->id, swidget->widget->name);
+ return -EINVAL;
+ }
+
+ /* set input and output audio formats */
+ ret = sof_ipc4_init_input_audio_fmt(sdev, swidget, &copier_data->base_config, ref_params,
+ available_fmt);
+ if (ret < 0)
+ return ret;
+
+ /* set the reference params for output format selection */
+ single_output_format = sof_ipc4_copier_is_single_format(sdev,
+ available_fmt->output_pin_fmts,
+ available_fmt->num_output_formats);
+ switch (swidget->id) {
+ case snd_soc_dapm_aif_in:
+ case snd_soc_dapm_dai_out:
+ case snd_soc_dapm_buffer:
+ {
+ struct sof_ipc4_audio_format *in_fmt;
+
+ in_fmt = &available_fmt->input_pin_fmts[ret].audio_fmt;
+ out_ref_rate = in_fmt->sampling_frequency;
+ out_ref_channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(in_fmt->fmt_cfg);
+
+ if (!single_output_format)
+ out_ref_valid_bits =
+ SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(in_fmt->fmt_cfg);
+ break;
+ }
+ case snd_soc_dapm_aif_out:
+ case snd_soc_dapm_dai_in:
+ out_ref_rate = params_rate(fe_params);
+ out_ref_channels = params_channels(fe_params);
+ if (!single_output_format) {
+ out_ref_valid_bits = sof_ipc4_get_valid_bits(sdev, fe_params);
+ if (out_ref_valid_bits < 0)
+ return out_ref_valid_bits;
+ }
+ break;
+ default:
+ /*
+ * Unsupported type should be caught by the former switch default
+ * case, this should never happen in reality.
+ */
+ return -EINVAL;
+ }
+
+ /*
+ * if the output format is the same across all available output formats, choose
+ * that as the reference.
+ */
+ if (single_output_format) {
+ struct sof_ipc4_audio_format *out_fmt;
+
+ out_fmt = &available_fmt->output_pin_fmts[0].audio_fmt;
+ out_ref_valid_bits =
+ SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(out_fmt->fmt_cfg);
+ }
+
+ dev_dbg(sdev->dev, "copier %s: reference output rate %d, channels %d valid_bits %d\n",
+ swidget->widget->name, out_ref_rate, out_ref_channels, out_ref_valid_bits);
+
+ output_fmt_index = sof_ipc4_init_output_audio_fmt(sdev, &copier_data->base_config,
+ available_fmt, out_ref_rate,
+ out_ref_channels, out_ref_valid_bits);
+ if (output_fmt_index < 0) {
+ dev_err(sdev->dev, "Failed to initialize output format for %s",
+ swidget->widget->name);
+ return output_fmt_index;
+ }
+
+ /*
+ * Set the output format. Current topology defines pin 0 input and output formats in pairs.
+ * This assumes that the pin 0 formats are defined before all other pins.
+ * So pick the output audio format with the same index as the chosen
+ * input format. This logic will need to be updated when the format definitions
+ * in topology change.
+ */
+ memcpy(&copier_data->out_format,
+ &available_fmt->output_pin_fmts[output_fmt_index].audio_fmt,
+ sizeof(struct sof_ipc4_audio_format));
+ dev_dbg(sdev->dev, "Output audio format for %s\n", swidget->widget->name);
+ sof_ipc4_dbg_audio_format(sdev->dev, &available_fmt->output_pin_fmts[output_fmt_index], 1);
+
+ switch (swidget->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ {
+ /*
+ * Only SOF_DAI_INTEL_ALH needs copier_data to set blob.
+ * That's why only ALH dai's blob is set after sof_ipc4_init_input_audio_fmt
+ */
+ if (ipc4_copier->dai_type == SOF_DAI_INTEL_ALH) {
+ struct sof_ipc4_alh_configuration_blob *blob;
+ struct sof_ipc4_copier_data *alh_data;
+ struct sof_ipc4_copier *alh_copier;
+ struct snd_sof_widget *w;
+ u32 ch_count = 0;
+ u32 ch_mask = 0;
+ u32 ch_map;
+ u32 step;
+ u32 mask;
+ int i;
+
+ blob = (struct sof_ipc4_alh_configuration_blob *)ipc4_copier->copier_config;
+
+ blob->gw_attr.lp_buffer_alloc = 0;
+
+ /* Get channel_mask from ch_map */
+ ch_map = copier_data->base_config.audio_fmt.ch_map;
+ for (i = 0; ch_map; i++) {
+ if ((ch_map & 0xf) != 0xf) {
+ ch_mask |= BIT(i);
+ ch_count++;
+ }
+ ch_map >>= 4;
+ }
+
+ step = ch_count / blob->alh_cfg.device_count;
+ mask = GENMASK(step - 1, 0);
+ /*
+ * Set each gtw_cfg.node_id to blob->alh_cfg.mapping[]
+ * for all widgets with the same stream name
+ */
+ i = 0;
+ list_for_each_entry(w, &sdev->widget_list, list) {
+ if (w->widget->sname &&
+ strcmp(w->widget->sname, swidget->widget->sname))
+ continue;
+
+ dai = w->private;
+ alh_copier = (struct sof_ipc4_copier *)dai->private;
+ alh_data = &alh_copier->data;
+ blob->alh_cfg.mapping[i].device = alh_data->gtw_cfg.node_id;
+ /*
+ * Set the same channel mask for playback as the audio data is
+ * duplicated for all speakers. For capture, split the channels
+ * among the aggregated DAIs. For example, with 4 channels on 2
+ * aggregated DAIs, the channel_mask should be 0x3 and 0xc for the
+ * two DAI's.
+ * The channel masks used depend on the cpu_dais used in the
+ * dailink at the machine driver level, which actually comes from
+ * the tables in soc_acpi files depending on the _ADR and devID
+ * registers for each codec.
+ */
+ if (w->id == snd_soc_dapm_dai_in)
+ blob->alh_cfg.mapping[i].channel_mask = ch_mask;
+ else
+ blob->alh_cfg.mapping[i].channel_mask = mask << (step * i);
+
+ i++;
+ }
+ if (blob->alh_cfg.device_count > 1) {
+ int group_id;
+
+ group_id = ida_alloc_max(&alh_group_ida, ALH_MULTI_GTW_COUNT - 1,
+ GFP_KERNEL);
+
+ if (group_id < 0)
+ return group_id;
+
+ /* add multi-gateway base */
+ group_id += ALH_MULTI_GTW_BASE;
+ copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
+ copier_data->gtw_cfg.node_id |= SOF_IPC4_NODE_INDEX(group_id);
+ }
+ }
+ }
+ }
+
+ /* modify the input params for the next widget */
+ ret = sof_ipc4_update_hw_params(sdev, pipeline_params, &copier_data->out_format);
+ if (ret)
+ return ret;
+
+ /*
+ * Set the gateway dma_buffer_size to 2ms buffer size to meet the FW expectation. In the
+ * deep buffer case, set the dma_buffer_size depending on the deep_buffer_dma_ms set
+ * in topology.
+ */
+ switch (swidget->id) {
+ case snd_soc_dapm_dai_in:
+ copier_data->gtw_cfg.dma_buffer_size =
+ SOF_IPC4_MIN_DMA_BUFFER_SIZE * copier_data->base_config.ibs;
+ break;
+ case snd_soc_dapm_aif_in:
+ copier_data->gtw_cfg.dma_buffer_size =
+ max((u32)SOF_IPC4_MIN_DMA_BUFFER_SIZE, deep_buffer_dma_ms) *
+ copier_data->base_config.ibs;
+ break;
+ case snd_soc_dapm_dai_out:
+ case snd_soc_dapm_aif_out:
+ copier_data->gtw_cfg.dma_buffer_size =
+ SOF_IPC4_MIN_DMA_BUFFER_SIZE * copier_data->base_config.obs;
+ break;
+ default:
+ break;
+ }
+
+ data = &ipc4_copier->copier_config;
+ ipc_config_size = &ipc4_copier->ipc_config_size;
+ ipc_config_data = &ipc4_copier->ipc_config_data;
+
+ /* config_length is DWORD based */
+ gtw_cfg_config_length = copier_data->gtw_cfg.config_length * 4;
+ ipc_size = sizeof(*copier_data) + gtw_cfg_config_length;
+
+ if (ipc4_copier->dma_config_tlv.type == SOF_IPC4_GTW_DMA_CONFIG_ID &&
+ ipc4_copier->dma_config_tlv.length) {
+ dma_config_tlv_size = sizeof(ipc4_copier->dma_config_tlv) +
+ ipc4_copier->dma_config_tlv.dma_config.dma_priv_config_size;
+
+ /* paranoia check on TLV size/length */
+ if (dma_config_tlv_size != ipc4_copier->dma_config_tlv.length +
+ sizeof(uint32_t) * 2) {
+ dev_err(sdev->dev, "Invalid configuration, TLV size %d length %d\n",
+ dma_config_tlv_size, ipc4_copier->dma_config_tlv.length);
+ return -EINVAL;
+ }
+
+ ipc_size += dma_config_tlv_size;
+
+ /* we also need to increase the size at the gtw level */
+ copier_data->gtw_cfg.config_length += dma_config_tlv_size / 4;
+ }
+
+ dev_dbg(sdev->dev, "copier %s, IPC size is %d", swidget->widget->name, ipc_size);
+
+ *ipc_config_data = kzalloc(ipc_size, GFP_KERNEL);
+ if (!*ipc_config_data)
+ return -ENOMEM;
+
+ *ipc_config_size = ipc_size;
+
+ /* update pipeline memory usage */
+ sof_ipc4_update_resource_usage(sdev, swidget, &copier_data->base_config);
+
+ /* copy IPC data */
+ memcpy(*ipc_config_data, (void *)copier_data, sizeof(*copier_data));
+ if (gtw_cfg_config_length)
+ memcpy(*ipc_config_data + sizeof(*copier_data),
+ *data, gtw_cfg_config_length);
+
+ /* add DMA Config TLV, if configured */
+ if (dma_config_tlv_size)
+ memcpy(*ipc_config_data + sizeof(*copier_data) +
+ gtw_cfg_config_length,
+ &ipc4_copier->dma_config_tlv, dma_config_tlv_size);
+
+ /*
+ * Restore gateway config length now that IPC payload is prepared. This avoids
+ * counting the DMA CONFIG TLV multiple times
+ */
+ copier_data->gtw_cfg.config_length = gtw_cfg_config_length / 4;
+
+ return 0;
+}
+
+static int sof_ipc4_prepare_gain_module(struct snd_sof_widget *swidget,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ struct snd_pcm_hw_params *pipeline_params, int dir)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_gain *gain = swidget->private;
+ struct sof_ipc4_available_audio_format *available_fmt = &gain->available_fmt;
+ struct sof_ipc4_audio_format *in_fmt;
+ u32 out_ref_rate, out_ref_channels, out_ref_valid_bits;
+ int ret;
+
+ ret = sof_ipc4_init_input_audio_fmt(sdev, swidget, &gain->data.base_config,
+ pipeline_params, available_fmt);
+ if (ret < 0)
+ return ret;
+
+ in_fmt = &available_fmt->input_pin_fmts[ret].audio_fmt;
+ out_ref_rate = in_fmt->sampling_frequency;
+ out_ref_channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(in_fmt->fmt_cfg);
+ out_ref_valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(in_fmt->fmt_cfg);
+
+ ret = sof_ipc4_init_output_audio_fmt(sdev, &gain->data.base_config, available_fmt,
+ out_ref_rate, out_ref_channels, out_ref_valid_bits);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to initialize output format for %s",
+ swidget->widget->name);
+ return ret;
+ }
+
+ /* update pipeline memory usage */
+ sof_ipc4_update_resource_usage(sdev, swidget, &gain->data.base_config);
+
+ return 0;
+}
+
+static int sof_ipc4_prepare_mixer_module(struct snd_sof_widget *swidget,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ struct snd_pcm_hw_params *pipeline_params, int dir)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_mixer *mixer = swidget->private;
+ struct sof_ipc4_available_audio_format *available_fmt = &mixer->available_fmt;
+ struct sof_ipc4_audio_format *in_fmt;
+ u32 out_ref_rate, out_ref_channels, out_ref_valid_bits;
+ int ret;
+
+ ret = sof_ipc4_init_input_audio_fmt(sdev, swidget, &mixer->base_config,
+ pipeline_params, available_fmt);
+ if (ret < 0)
+ return ret;
+
+ in_fmt = &available_fmt->input_pin_fmts[ret].audio_fmt;
+ out_ref_rate = in_fmt->sampling_frequency;
+ out_ref_channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(in_fmt->fmt_cfg);
+ out_ref_valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(in_fmt->fmt_cfg);
+
+ ret = sof_ipc4_init_output_audio_fmt(sdev, &mixer->base_config, available_fmt,
+ out_ref_rate, out_ref_channels, out_ref_valid_bits);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to initialize output format for %s",
+ swidget->widget->name);
+ return ret;
+ }
+
+ /* update pipeline memory usage */
+ sof_ipc4_update_resource_usage(sdev, swidget, &mixer->base_config);
+
+ return 0;
+}
+
+static int sof_ipc4_prepare_src_module(struct snd_sof_widget *swidget,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ struct snd_pcm_hw_params *pipeline_params, int dir)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_src *src = swidget->private;
+ struct sof_ipc4_available_audio_format *available_fmt = &src->available_fmt;
+ struct sof_ipc4_audio_format *out_audio_fmt;
+ struct sof_ipc4_audio_format *in_audio_fmt;
+ u32 out_ref_rate, out_ref_channels, out_ref_valid_bits;
+ int output_format_index, input_format_index;
+
+ input_format_index = sof_ipc4_init_input_audio_fmt(sdev, swidget, &src->data.base_config,
+ pipeline_params, available_fmt);
+ if (input_format_index < 0)
+ return input_format_index;
+
+ /*
+ * For playback, the SRC sink rate will be configured based on the requested output
+ * format, which is restricted to only deal with DAI's with a single format for now.
+ */
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK && available_fmt->num_output_formats > 1) {
+ dev_err(sdev->dev, "Invalid number of output formats: %d for SRC %s\n",
+ available_fmt->num_output_formats, swidget->widget->name);
+ return -EINVAL;
+ }
+
+ /*
+ * SRC does not perform format conversion, so the output channels and valid bit depth must
+ * be the same as that of the input.
+ */
+ in_audio_fmt = &available_fmt->input_pin_fmts[input_format_index].audio_fmt;
+ out_ref_channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(in_audio_fmt->fmt_cfg);
+ out_ref_valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(in_audio_fmt->fmt_cfg);
+
+ /*
+ * For capture, the SRC module should convert the rate to match the rate requested by the
+ * PCM hw_params. Set the reference params based on the fe_params unconditionally as it
+ * will be ignored for playback anyway.
+ */
+ out_ref_rate = params_rate(fe_params);
+
+ output_format_index = sof_ipc4_init_output_audio_fmt(sdev, &src->data.base_config,
+ available_fmt, out_ref_rate,
+ out_ref_channels, out_ref_valid_bits);
+ if (output_format_index < 0) {
+ dev_err(sdev->dev, "Failed to initialize output format for %s",
+ swidget->widget->name);
+ return output_format_index;
+ }
+
+ /* update pipeline memory usage */
+ sof_ipc4_update_resource_usage(sdev, swidget, &src->data.base_config);
+
+ out_audio_fmt = &available_fmt->output_pin_fmts[output_format_index].audio_fmt;
+ src->data.sink_rate = out_audio_fmt->sampling_frequency;
+
+ /* update pipeline_params for sink widgets */
+ return sof_ipc4_update_hw_params(sdev, pipeline_params, out_audio_fmt);
+}
+
+static int
+sof_ipc4_process_set_pin_formats(struct snd_sof_widget *swidget, int pin_type)
+{
+ struct sof_ipc4_process *process = swidget->private;
+ struct sof_ipc4_base_module_cfg_ext *base_cfg_ext = process->base_config_ext;
+ struct sof_ipc4_available_audio_format *available_fmt = &process->available_fmt;
+ struct sof_ipc4_pin_format *pin_format, *format_list_to_search;
+ struct snd_soc_component *scomp = swidget->scomp;
+ int num_pins, format_list_count;
+ int pin_format_offset = 0;
+ int i, j;
+
+ /* set number of pins, offset of pin format and format list to search based on pin type */
+ if (pin_type == SOF_PIN_TYPE_INPUT) {
+ num_pins = swidget->num_input_pins;
+ format_list_to_search = available_fmt->input_pin_fmts;
+ format_list_count = available_fmt->num_input_formats;
+ } else {
+ num_pins = swidget->num_output_pins;
+ pin_format_offset = swidget->num_input_pins;
+ format_list_to_search = available_fmt->output_pin_fmts;
+ format_list_count = available_fmt->num_output_formats;
+ }
+
+ for (i = pin_format_offset; i < num_pins + pin_format_offset; i++) {
+ pin_format = &base_cfg_ext->pin_formats[i];
+
+ /* Pin 0 audio formats are derived from the base config input/output format */
+ if (i == pin_format_offset) {
+ if (pin_type == SOF_PIN_TYPE_INPUT) {
+ pin_format->buffer_size = process->base_config.ibs;
+ pin_format->audio_fmt = process->base_config.audio_fmt;
+ } else {
+ pin_format->buffer_size = process->base_config.obs;
+ pin_format->audio_fmt = process->output_format;
+ }
+ continue;
+ }
+
+ /*
+ * For all other pins, find the pin formats from those set in topology. If there
+ * is more than one format specified for a pin, this will pick the first available
+ * one.
+ */
+ for (j = 0; j < format_list_count; j++) {
+ struct sof_ipc4_pin_format *pin_format_item = &format_list_to_search[j];
+
+ if (pin_format_item->pin_index == i - pin_format_offset) {
+ *pin_format = *pin_format_item;
+ break;
+ }
+ }
+
+ if (j == format_list_count) {
+ dev_err(scomp->dev, "%s pin %d format not found for %s\n",
+ (pin_type == SOF_PIN_TYPE_INPUT) ? "input" : "output",
+ i - pin_format_offset, swidget->widget->name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_process_add_base_cfg_extn(struct snd_sof_widget *swidget)
+{
+ int ret, i;
+
+ /* copy input and output pin formats */
+ for (i = 0; i <= SOF_PIN_TYPE_OUTPUT; i++) {
+ ret = sof_ipc4_process_set_pin_formats(swidget, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_prepare_process_module(struct snd_sof_widget *swidget,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ struct snd_pcm_hw_params *pipeline_params, int dir)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_process *process = swidget->private;
+ struct sof_ipc4_available_audio_format *available_fmt = &process->available_fmt;
+ struct sof_ipc4_audio_format *in_fmt;
+ u32 out_ref_rate, out_ref_channels, out_ref_valid_bits;
+ void *cfg = process->ipc_config_data;
+ int output_fmt_index;
+ int ret;
+
+ ret = sof_ipc4_init_input_audio_fmt(sdev, swidget, &process->base_config,
+ pipeline_params, available_fmt);
+ if (ret < 0)
+ return ret;
+
+ in_fmt = &available_fmt->input_pin_fmts[ret].audio_fmt;
+ out_ref_rate = in_fmt->sampling_frequency;
+ out_ref_channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(in_fmt->fmt_cfg);
+ out_ref_valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(in_fmt->fmt_cfg);
+
+ output_fmt_index = sof_ipc4_init_output_audio_fmt(sdev, &process->base_config,
+ available_fmt, out_ref_rate,
+ out_ref_channels, out_ref_valid_bits);
+ if (output_fmt_index < 0 && available_fmt->num_output_formats) {
+ dev_err(sdev->dev, "Failed to initialize output format for %s",
+ swidget->widget->name);
+ return output_fmt_index;
+ }
+
+ /* copy Pin 0 output format */
+ if (available_fmt->num_output_formats &&
+ output_fmt_index < available_fmt->num_output_formats &&
+ !available_fmt->output_pin_fmts[output_fmt_index].pin_index) {
+ memcpy(&process->output_format,
+ &available_fmt->output_pin_fmts[output_fmt_index].audio_fmt,
+ sizeof(struct sof_ipc4_audio_format));
+
+ /* modify the pipeline params with the pin 0 output format */
+ ret = sof_ipc4_update_hw_params(sdev, pipeline_params, &process->output_format);
+ if (ret)
+ return ret;
+ }
+
+ /* update pipeline memory usage */
+ sof_ipc4_update_resource_usage(sdev, swidget, &process->base_config);
+
+ /* ipc_config_data is composed of the base_config followed by an optional extension */
+ memcpy(cfg, &process->base_config, sizeof(struct sof_ipc4_base_module_cfg));
+ cfg += sizeof(struct sof_ipc4_base_module_cfg);
+
+ if (process->init_config == SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT) {
+ struct sof_ipc4_base_module_cfg_ext *base_cfg_ext = process->base_config_ext;
+
+ ret = sof_ipc4_process_add_base_cfg_extn(swidget);
+ if (ret < 0)
+ return ret;
+
+ memcpy(cfg, base_cfg_ext, process->base_config_ext_size);
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_control_load_volume(struct snd_sof_dev *sdev, struct snd_sof_control *scontrol)
+{
+ struct sof_ipc4_control_data *control_data;
+ struct sof_ipc4_msg *msg;
+ int i;
+
+ scontrol->size = struct_size(control_data, chanv, scontrol->num_channels);
+
+ /* scontrol->ipc_control_data will be freed in sof_control_unload */
+ scontrol->ipc_control_data = kzalloc(scontrol->size, GFP_KERNEL);
+ if (!scontrol->ipc_control_data)
+ return -ENOMEM;
+
+ control_data = scontrol->ipc_control_data;
+ control_data->index = scontrol->index;
+
+ msg = &control_data->msg;
+ msg->primary = SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_LARGE_CONFIG_SET);
+ msg->primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg->primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+
+ msg->extension = SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_GAIN_PARAM_ID);
+
+ /* set default volume values to 0dB in control */
+ for (i = 0; i < scontrol->num_channels; i++) {
+ control_data->chanv[i].channel = i;
+ control_data->chanv[i].value = SOF_IPC4_VOL_ZERO_DB;
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_control_load_bytes(struct snd_sof_dev *sdev, struct snd_sof_control *scontrol)
+{
+ struct sof_ipc4_control_data *control_data;
+ struct sof_ipc4_msg *msg;
+ int ret;
+
+ if (scontrol->max_size < (sizeof(*control_data) + sizeof(struct sof_abi_hdr))) {
+ dev_err(sdev->dev, "insufficient size for a bytes control %s: %zu.\n",
+ scontrol->name, scontrol->max_size);
+ return -EINVAL;
+ }
+
+ if (scontrol->priv_size > scontrol->max_size - sizeof(*control_data)) {
+ dev_err(sdev->dev, "scontrol %s bytes data size %zu exceeds max %zu.\n",
+ scontrol->name, scontrol->priv_size,
+ scontrol->max_size - sizeof(*control_data));
+ return -EINVAL;
+ }
+
+ scontrol->size = sizeof(struct sof_ipc4_control_data) + scontrol->priv_size;
+
+ scontrol->ipc_control_data = kzalloc(scontrol->max_size, GFP_KERNEL);
+ if (!scontrol->ipc_control_data)
+ return -ENOMEM;
+
+ control_data = scontrol->ipc_control_data;
+ control_data->index = scontrol->index;
+ if (scontrol->priv_size > 0) {
+ memcpy(control_data->data, scontrol->priv, scontrol->priv_size);
+ kfree(scontrol->priv);
+ scontrol->priv = NULL;
+
+ if (control_data->data->magic != SOF_IPC4_ABI_MAGIC) {
+ dev_err(sdev->dev, "Wrong ABI magic (%#x) for control: %s\n",
+ control_data->data->magic, scontrol->name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* TODO: check the ABI version */
+
+ if (control_data->data->size + sizeof(struct sof_abi_hdr) !=
+ scontrol->priv_size) {
+ dev_err(sdev->dev, "Control %s conflict in bytes %zu vs. priv size %zu.\n",
+ scontrol->name,
+ control_data->data->size + sizeof(struct sof_abi_hdr),
+ scontrol->priv_size);
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ msg = &control_data->msg;
+ msg->primary = SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_LARGE_CONFIG_SET);
+ msg->primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg->primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+
+ return 0;
+
+err:
+ kfree(scontrol->ipc_control_data);
+ scontrol->ipc_control_data = NULL;
+ return ret;
+}
+
+static int sof_ipc4_control_setup(struct snd_sof_dev *sdev, struct snd_sof_control *scontrol)
+{
+ switch (scontrol->info_type) {
+ case SND_SOC_TPLG_CTL_VOLSW:
+ case SND_SOC_TPLG_CTL_VOLSW_SX:
+ case SND_SOC_TPLG_CTL_VOLSW_XR_SX:
+ return sof_ipc4_control_load_volume(sdev, scontrol);
+ case SND_SOC_TPLG_CTL_BYTES:
+ return sof_ipc4_control_load_bytes(sdev, scontrol);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
+{
+ struct snd_sof_widget *pipe_widget = swidget->spipe->pipe_widget;
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct sof_ipc4_pipeline *pipeline;
+ struct sof_ipc4_msg *msg;
+ void *ipc_data = NULL;
+ u32 ipc_size = 0;
+ int ret;
+
+ switch (swidget->id) {
+ case snd_soc_dapm_scheduler:
+ pipeline = swidget->private;
+
+ if (pipeline->use_chain_dma) {
+ dev_warn(sdev->dev, "use_chain_dma set for scheduler %s",
+ swidget->widget->name);
+ return 0;
+ }
+
+ dev_dbg(sdev->dev, "pipeline: %d memory pages: %d\n", swidget->pipeline_id,
+ pipeline->mem_usage);
+
+ msg = &pipeline->msg;
+ msg->primary |= pipeline->mem_usage;
+
+ swidget->instance_id = ida_alloc_max(&pipeline_ida, ipc4_data->max_num_pipelines,
+ GFP_KERNEL);
+ if (swidget->instance_id < 0) {
+ dev_err(sdev->dev, "failed to assign pipeline id for %s: %d\n",
+ swidget->widget->name, swidget->instance_id);
+ return swidget->instance_id;
+ }
+ msg->primary &= ~SOF_IPC4_GLB_PIPE_INSTANCE_MASK;
+ msg->primary |= SOF_IPC4_GLB_PIPE_INSTANCE_ID(swidget->instance_id);
+ break;
+ case snd_soc_dapm_aif_in:
+ case snd_soc_dapm_aif_out:
+ case snd_soc_dapm_buffer:
+ {
+ struct sof_ipc4_copier *ipc4_copier = swidget->private;
+
+ pipeline = pipe_widget->private;
+ if (pipeline->use_chain_dma)
+ return 0;
+
+ ipc_size = ipc4_copier->ipc_config_size;
+ ipc_data = ipc4_copier->ipc_config_data;
+
+ msg = &ipc4_copier->msg;
+ break;
+ }
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ {
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_ipc4_copier *ipc4_copier = dai->private;
+
+ pipeline = pipe_widget->private;
+ if (pipeline->use_chain_dma)
+ return 0;
+
+ ipc_size = ipc4_copier->ipc_config_size;
+ ipc_data = ipc4_copier->ipc_config_data;
+
+ msg = &ipc4_copier->msg;
+ break;
+ }
+ case snd_soc_dapm_pga:
+ {
+ struct sof_ipc4_gain *gain = swidget->private;
+
+ ipc_size = sizeof(gain->data);
+ ipc_data = &gain->data;
+
+ msg = &gain->msg;
+ break;
+ }
+ case snd_soc_dapm_mixer:
+ {
+ struct sof_ipc4_mixer *mixer = swidget->private;
+
+ ipc_size = sizeof(mixer->base_config);
+ ipc_data = &mixer->base_config;
+
+ msg = &mixer->msg;
+ break;
+ }
+ case snd_soc_dapm_src:
+ {
+ struct sof_ipc4_src *src = swidget->private;
+
+ ipc_size = sizeof(src->data);
+ ipc_data = &src->data;
+
+ msg = &src->msg;
+ break;
+ }
+ case snd_soc_dapm_effect:
+ {
+ struct sof_ipc4_process *process = swidget->private;
+
+ if (!process->ipc_config_size) {
+ dev_err(sdev->dev, "module %s has no config data!\n",
+ swidget->widget->name);
+ return -EINVAL;
+ }
+
+ ipc_size = process->ipc_config_size;
+ ipc_data = process->ipc_config_data;
+
+ msg = &process->msg;
+ break;
+ }
+ default:
+ dev_err(sdev->dev, "widget type %d not supported", swidget->id);
+ return -EINVAL;
+ }
+
+ if (swidget->id != snd_soc_dapm_scheduler) {
+ ret = sof_ipc4_widget_assign_instance_id(sdev, swidget);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to assign instance id for %s\n",
+ swidget->widget->name);
+ return ret;
+ }
+
+ msg->primary &= ~SOF_IPC4_MOD_INSTANCE_MASK;
+ msg->primary |= SOF_IPC4_MOD_INSTANCE(swidget->instance_id);
+
+ msg->extension &= ~SOF_IPC4_MOD_EXT_PARAM_SIZE_MASK;
+ msg->extension |= ipc_size >> 2;
+
+ msg->extension &= ~SOF_IPC4_MOD_EXT_PPL_ID_MASK;
+ msg->extension |= SOF_IPC4_MOD_EXT_PPL_ID(pipe_widget->instance_id);
+ }
+ dev_dbg(sdev->dev, "Create widget %s instance %d - pipe %d - core %d\n",
+ swidget->widget->name, swidget->instance_id, swidget->pipeline_id, swidget->core);
+
+ msg->data_size = ipc_size;
+ msg->data_ptr = ipc_data;
+
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, msg, ipc_size);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to create module %s\n", swidget->widget->name);
+
+ if (swidget->id != snd_soc_dapm_scheduler) {
+ struct sof_ipc4_fw_module *fw_module = swidget->module_info;
+
+ ida_free(&fw_module->m_ida, swidget->instance_id);
+ } else {
+ ida_free(&pipeline_ida, swidget->instance_id);
+ }
+ }
+
+ return ret;
+}
+
+static int sof_ipc4_widget_free(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_fw_module *fw_module = swidget->module_info;
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ int ret = 0;
+
+ mutex_lock(&ipc4_data->pipeline_state_mutex);
+
+ /* freeing a pipeline frees all the widgets associated with it */
+ if (swidget->id == snd_soc_dapm_scheduler) {
+ struct sof_ipc4_pipeline *pipeline = swidget->private;
+ struct sof_ipc4_msg msg = {{ 0 }};
+ u32 header;
+
+ if (pipeline->use_chain_dma) {
+ dev_warn(sdev->dev, "use_chain_dma set for scheduler %s",
+ swidget->widget->name);
+ mutex_unlock(&ipc4_data->pipeline_state_mutex);
+ return 0;
+ }
+
+ header = SOF_IPC4_GLB_PIPE_INSTANCE_ID(swidget->instance_id);
+ header |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_DELETE_PIPELINE);
+ header |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ header |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
+
+ msg.primary = header;
+
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to free pipeline widget %s\n",
+ swidget->widget->name);
+
+ pipeline->mem_usage = 0;
+ pipeline->state = SOF_IPC4_PIPE_UNINITIALIZED;
+ ida_free(&pipeline_ida, swidget->instance_id);
+ swidget->instance_id = -EINVAL;
+ } else {
+ struct snd_sof_widget *pipe_widget = swidget->spipe->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+
+ if (!pipeline->use_chain_dma)
+ ida_free(&fw_module->m_ida, swidget->instance_id);
+ }
+
+ mutex_unlock(&ipc4_data->pipeline_state_mutex);
+
+ return ret;
+}
+
+static int sof_ipc4_get_queue_id(struct snd_sof_widget *src_widget,
+ struct snd_sof_widget *sink_widget, bool pin_type)
+{
+ struct snd_sof_widget *current_swidget;
+ struct snd_soc_component *scomp;
+ struct ida *queue_ida;
+ const char *buddy_name;
+ char **pin_binding;
+ u32 num_pins;
+ int i;
+
+ if (pin_type == SOF_PIN_TYPE_OUTPUT) {
+ current_swidget = src_widget;
+ pin_binding = src_widget->output_pin_binding;
+ queue_ida = &src_widget->output_queue_ida;
+ num_pins = src_widget->num_output_pins;
+ buddy_name = sink_widget->widget->name;
+ } else {
+ current_swidget = sink_widget;
+ pin_binding = sink_widget->input_pin_binding;
+ queue_ida = &sink_widget->input_queue_ida;
+ num_pins = sink_widget->num_input_pins;
+ buddy_name = src_widget->widget->name;
+ }
+
+ scomp = current_swidget->scomp;
+
+ if (num_pins < 1) {
+ dev_err(scomp->dev, "invalid %s num_pins: %d for queue allocation for %s\n",
+ (pin_type == SOF_PIN_TYPE_OUTPUT ? "output" : "input"),
+ num_pins, current_swidget->widget->name);
+ return -EINVAL;
+ }
+
+ /* If there is only one input/output pin, queue id must be 0 */
+ if (num_pins == 1)
+ return 0;
+
+ /* Allocate queue ID from pin binding array if it is defined in topology. */
+ if (pin_binding) {
+ for (i = 0; i < num_pins; i++) {
+ if (!strcmp(pin_binding[i], buddy_name))
+ return i;
+ }
+ /*
+ * Fail if no queue ID found from pin binding array, so that we don't
+ * mixed use pin binding array and ida for queue ID allocation.
+ */
+ dev_err(scomp->dev, "no %s queue id found from pin binding array for %s\n",
+ (pin_type == SOF_PIN_TYPE_OUTPUT ? "output" : "input"),
+ current_swidget->widget->name);
+ return -EINVAL;
+ }
+
+ /* If no pin binding array specified in topology, use ida to allocate one */
+ return ida_alloc_max(queue_ida, num_pins, GFP_KERNEL);
+}
+
+static void sof_ipc4_put_queue_id(struct snd_sof_widget *swidget, int queue_id,
+ bool pin_type)
+{
+ struct ida *queue_ida;
+ char **pin_binding;
+ int num_pins;
+
+ if (pin_type == SOF_PIN_TYPE_OUTPUT) {
+ pin_binding = swidget->output_pin_binding;
+ queue_ida = &swidget->output_queue_ida;
+ num_pins = swidget->num_output_pins;
+ } else {
+ pin_binding = swidget->input_pin_binding;
+ queue_ida = &swidget->input_queue_ida;
+ num_pins = swidget->num_input_pins;
+ }
+
+ /* Nothing to free if queue ID is not allocated with ida. */
+ if (num_pins == 1 || pin_binding)
+ return;
+
+ ida_free(queue_ida, queue_id);
+}
+
+static int sof_ipc4_set_copier_sink_format(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *src_widget,
+ struct snd_sof_widget *sink_widget,
+ int sink_id)
+{
+ struct sof_ipc4_copier_config_set_sink_format format;
+ const struct sof_ipc_ops *iops = sdev->ipc->ops;
+ struct sof_ipc4_base_module_cfg *src_config;
+ const struct sof_ipc4_audio_format *pin_fmt;
+ struct sof_ipc4_fw_module *fw_module;
+ struct sof_ipc4_msg msg = {{ 0 }};
+
+ dev_dbg(sdev->dev, "%s set copier sink %d format\n",
+ src_widget->widget->name, sink_id);
+
+ if (WIDGET_IS_DAI(src_widget->id)) {
+ struct snd_sof_dai *dai = src_widget->private;
+
+ src_config = dai->private;
+ } else {
+ src_config = src_widget->private;
+ }
+
+ fw_module = src_widget->module_info;
+
+ format.sink_id = sink_id;
+ memcpy(&format.source_fmt, &src_config->audio_fmt, sizeof(format.source_fmt));
+
+ pin_fmt = sof_ipc4_get_input_pin_audio_fmt(sink_widget, sink_id);
+ if (!pin_fmt) {
+ dev_err(sdev->dev, "Unable to get pin %d format for %s",
+ sink_id, sink_widget->widget->name);
+ return -EINVAL;
+ }
+
+ memcpy(&format.sink_fmt, pin_fmt, sizeof(format.sink_fmt));
+
+ msg.data_size = sizeof(format);
+ msg.data_ptr = &format;
+
+ msg.primary = fw_module->man4_module_entry.id;
+ msg.primary |= SOF_IPC4_MOD_INSTANCE(src_widget->instance_id);
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+
+ msg.extension =
+ SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_COPIER_MODULE_CFG_PARAM_SET_SINK_FORMAT);
+
+ return iops->set_get_data(sdev, &msg, msg.data_size, true);
+}
+
+static int sof_ipc4_route_setup(struct snd_sof_dev *sdev, struct snd_sof_route *sroute)
+{
+ struct snd_sof_widget *src_widget = sroute->src_widget;
+ struct snd_sof_widget *sink_widget = sroute->sink_widget;
+ struct snd_sof_widget *src_pipe_widget = src_widget->spipe->pipe_widget;
+ struct snd_sof_widget *sink_pipe_widget = sink_widget->spipe->pipe_widget;
+ struct sof_ipc4_fw_module *src_fw_module = src_widget->module_info;
+ struct sof_ipc4_fw_module *sink_fw_module = sink_widget->module_info;
+ struct sof_ipc4_pipeline *src_pipeline = src_pipe_widget->private;
+ struct sof_ipc4_pipeline *sink_pipeline = sink_pipe_widget->private;
+ struct sof_ipc4_msg msg = {{ 0 }};
+ u32 header, extension;
+ int ret;
+
+ /* no route set up if chain DMA is used */
+ if (src_pipeline->use_chain_dma || sink_pipeline->use_chain_dma) {
+ if (!src_pipeline->use_chain_dma || !sink_pipeline->use_chain_dma) {
+ dev_err(sdev->dev,
+ "use_chain_dma must be set for both src %s and sink %s pipelines\n",
+ src_widget->widget->name, sink_widget->widget->name);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ if (!src_fw_module || !sink_fw_module) {
+ dev_err(sdev->dev,
+ "cannot bind %s -> %s, no firmware module for: %s%s\n",
+ src_widget->widget->name, sink_widget->widget->name,
+ src_fw_module ? "" : " source",
+ sink_fw_module ? "" : " sink");
+
+ return -ENODEV;
+ }
+
+ sroute->src_queue_id = sof_ipc4_get_queue_id(src_widget, sink_widget,
+ SOF_PIN_TYPE_OUTPUT);
+ if (sroute->src_queue_id < 0) {
+ dev_err(sdev->dev, "failed to get queue ID for source widget: %s\n",
+ src_widget->widget->name);
+ return sroute->src_queue_id;
+ }
+
+ sroute->dst_queue_id = sof_ipc4_get_queue_id(src_widget, sink_widget,
+ SOF_PIN_TYPE_INPUT);
+ if (sroute->dst_queue_id < 0) {
+ dev_err(sdev->dev, "failed to get queue ID for sink widget: %s\n",
+ sink_widget->widget->name);
+ sof_ipc4_put_queue_id(src_widget, sroute->src_queue_id,
+ SOF_PIN_TYPE_OUTPUT);
+ return sroute->dst_queue_id;
+ }
+
+ /* Pin 0 format is already set during copier module init */
+ if (sroute->src_queue_id > 0 && WIDGET_IS_COPIER(src_widget->id)) {
+ ret = sof_ipc4_set_copier_sink_format(sdev, src_widget, sink_widget,
+ sroute->src_queue_id);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to set sink format for %s source queue ID %d\n",
+ src_widget->widget->name, sroute->src_queue_id);
+ goto out;
+ }
+ }
+
+ dev_dbg(sdev->dev, "bind %s:%d -> %s:%d\n",
+ src_widget->widget->name, sroute->src_queue_id,
+ sink_widget->widget->name, sroute->dst_queue_id);
+
+ header = src_fw_module->man4_module_entry.id;
+ header |= SOF_IPC4_MOD_INSTANCE(src_widget->instance_id);
+ header |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_BIND);
+ header |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ header |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+
+ extension = sink_fw_module->man4_module_entry.id;
+ extension |= SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE(sink_widget->instance_id);
+ extension |= SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID(sroute->dst_queue_id);
+ extension |= SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID(sroute->src_queue_id);
+
+ msg.primary = header;
+ msg.extension = extension;
+
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to bind modules %s:%d -> %s:%d\n",
+ src_widget->widget->name, sroute->src_queue_id,
+ sink_widget->widget->name, sroute->dst_queue_id);
+ goto out;
+ }
+
+ return ret;
+
+out:
+ sof_ipc4_put_queue_id(src_widget, sroute->src_queue_id, SOF_PIN_TYPE_OUTPUT);
+ sof_ipc4_put_queue_id(sink_widget, sroute->dst_queue_id, SOF_PIN_TYPE_INPUT);
+ return ret;
+}
+
+static int sof_ipc4_route_free(struct snd_sof_dev *sdev, struct snd_sof_route *sroute)
+{
+ struct snd_sof_widget *src_widget = sroute->src_widget;
+ struct snd_sof_widget *sink_widget = sroute->sink_widget;
+ struct sof_ipc4_fw_module *src_fw_module = src_widget->module_info;
+ struct sof_ipc4_fw_module *sink_fw_module = sink_widget->module_info;
+ struct sof_ipc4_msg msg = {{ 0 }};
+ struct snd_sof_widget *src_pipe_widget = src_widget->spipe->pipe_widget;
+ struct snd_sof_widget *sink_pipe_widget = sink_widget->spipe->pipe_widget;
+ struct sof_ipc4_pipeline *src_pipeline = src_pipe_widget->private;
+ struct sof_ipc4_pipeline *sink_pipeline = sink_pipe_widget->private;
+ u32 header, extension;
+ int ret = 0;
+
+ /* no route is set up if chain DMA is used */
+ if (src_pipeline->use_chain_dma || sink_pipeline->use_chain_dma)
+ return 0;
+
+ dev_dbg(sdev->dev, "unbind modules %s:%d -> %s:%d\n",
+ src_widget->widget->name, sroute->src_queue_id,
+ sink_widget->widget->name, sroute->dst_queue_id);
+
+ /*
+ * routes belonging to the same pipeline will be disconnected by the FW when the pipeline
+ * is freed. So avoid sending this IPC which will be ignored by the FW anyway.
+ */
+ if (src_widget->spipe->pipe_widget == sink_widget->spipe->pipe_widget)
+ goto out;
+
+ header = src_fw_module->man4_module_entry.id;
+ header |= SOF_IPC4_MOD_INSTANCE(src_widget->instance_id);
+ header |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_UNBIND);
+ header |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ header |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+
+ extension = sink_fw_module->man4_module_entry.id;
+ extension |= SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE(sink_widget->instance_id);
+ extension |= SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID(sroute->dst_queue_id);
+ extension |= SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID(sroute->src_queue_id);
+
+ msg.primary = header;
+ msg.extension = extension;
+
+ ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to unbind modules %s:%d -> %s:%d\n",
+ src_widget->widget->name, sroute->src_queue_id,
+ sink_widget->widget->name, sroute->dst_queue_id);
+out:
+ sof_ipc4_put_queue_id(sink_widget, sroute->dst_queue_id, SOF_PIN_TYPE_INPUT);
+ sof_ipc4_put_queue_id(src_widget, sroute->src_queue_id, SOF_PIN_TYPE_OUTPUT);
+
+ return ret;
+}
+
+static int sof_ipc4_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget,
+ unsigned int flags, struct snd_sof_dai_config_data *data)
+{
+ struct snd_sof_widget *pipe_widget = swidget->spipe->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_ipc4_gtw_attributes *gtw_attr;
+ struct sof_ipc4_copier_data *copier_data;
+ struct sof_ipc4_copier *ipc4_copier;
+
+ if (!dai || !dai->private) {
+ dev_err(sdev->dev, "Invalid DAI or DAI private data for %s\n",
+ swidget->widget->name);
+ return -EINVAL;
+ }
+
+ ipc4_copier = (struct sof_ipc4_copier *)dai->private;
+ copier_data = &ipc4_copier->data;
+
+ if (!data)
+ return 0;
+
+ switch (ipc4_copier->dai_type) {
+ case SOF_DAI_INTEL_HDA:
+ if (pipeline->use_chain_dma) {
+ pipeline->msg.primary &= ~SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK;
+ pipeline->msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID(data->dai_data);
+ break;
+ }
+ gtw_attr = ipc4_copier->gtw_attr;
+ gtw_attr->lp_buffer_alloc = pipeline->lp_mode;
+ fallthrough;
+ case SOF_DAI_INTEL_ALH:
+ /*
+ * Do not clear the node ID when this op is invoked with
+ * SOF_DAI_CONFIG_FLAGS_HW_FREE. It is needed to free the group_ida during
+ * unprepare.
+ */
+ if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) {
+ copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
+ copier_data->gtw_cfg.node_id |= SOF_IPC4_NODE_INDEX(data->dai_data);
+ }
+ break;
+ case SOF_DAI_INTEL_DMIC:
+ case SOF_DAI_INTEL_SSP:
+ /* nothing to do for SSP/DMIC */
+ break;
+ default:
+ dev_err(sdev->dev, "%s: unsupported dai type %d\n", __func__,
+ ipc4_copier->dai_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_parse_manifest(struct snd_soc_component *scomp, int index,
+ struct snd_soc_tplg_manifest *man)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct sof_manifest_tlv *manifest_tlv;
+ struct sof_manifest *manifest;
+ u32 size = le32_to_cpu(man->priv.size);
+ u8 *man_ptr = man->priv.data;
+ u32 len_check;
+ int i;
+
+ if (!size || size < SOF_IPC4_TPLG_ABI_SIZE) {
+ dev_err(scomp->dev, "%s: Invalid topology ABI size: %u\n",
+ __func__, size);
+ return -EINVAL;
+ }
+
+ manifest = (struct sof_manifest *)man_ptr;
+
+ dev_info(scomp->dev,
+ "Topology: ABI %d:%d:%d Kernel ABI %u:%u:%u\n",
+ le16_to_cpu(manifest->abi_major), le16_to_cpu(manifest->abi_minor),
+ le16_to_cpu(manifest->abi_patch),
+ SOF_ABI_MAJOR, SOF_ABI_MINOR, SOF_ABI_PATCH);
+
+ /* TODO: Add ABI compatibility check */
+
+ /* no more data after the ABI version */
+ if (size <= SOF_IPC4_TPLG_ABI_SIZE)
+ return 0;
+
+ manifest_tlv = manifest->items;
+ len_check = sizeof(struct sof_manifest);
+ for (i = 0; i < le16_to_cpu(manifest->count); i++) {
+ len_check += sizeof(struct sof_manifest_tlv) + le32_to_cpu(manifest_tlv->size);
+ if (len_check > size)
+ return -EINVAL;
+
+ switch (le32_to_cpu(manifest_tlv->type)) {
+ case SOF_MANIFEST_DATA_TYPE_NHLT:
+ /* no NHLT in BIOS, so use the one from topology manifest */
+ if (ipc4_data->nhlt)
+ break;
+ ipc4_data->nhlt = devm_kmemdup(sdev->dev, manifest_tlv->data,
+ le32_to_cpu(manifest_tlv->size), GFP_KERNEL);
+ if (!ipc4_data->nhlt)
+ return -ENOMEM;
+ break;
+ default:
+ dev_warn(scomp->dev, "Skipping unknown manifest data type %d\n",
+ manifest_tlv->type);
+ break;
+ }
+ man_ptr += sizeof(struct sof_manifest_tlv) + le32_to_cpu(manifest_tlv->size);
+ manifest_tlv = (struct sof_manifest_tlv *)man_ptr;
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_dai_get_clk(struct snd_sof_dev *sdev, struct snd_sof_dai *dai, int clk_type)
+{
+ struct sof_ipc4_copier *ipc4_copier = dai->private;
+ struct snd_soc_tplg_hw_config *hw_config;
+ struct snd_sof_dai_link *slink;
+ bool dai_link_found = false;
+ bool hw_cfg_found = false;
+ int i;
+
+ if (!ipc4_copier)
+ return 0;
+
+ list_for_each_entry(slink, &sdev->dai_link_list, list) {
+ if (!strcmp(slink->link->name, dai->name)) {
+ dai_link_found = true;
+ break;
+ }
+ }
+
+ if (!dai_link_found) {
+ dev_err(sdev->dev, "no DAI link found for DAI %s\n", dai->name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < slink->num_hw_configs; i++) {
+ hw_config = &slink->hw_configs[i];
+ if (dai->current_config == le32_to_cpu(hw_config->id)) {
+ hw_cfg_found = true;
+ break;
+ }
+ }
+
+ if (!hw_cfg_found) {
+ dev_err(sdev->dev, "no matching hw_config found for DAI %s\n", dai->name);
+ return -EINVAL;
+ }
+
+ switch (ipc4_copier->dai_type) {
+ case SOF_DAI_INTEL_SSP:
+ switch (clk_type) {
+ case SOF_DAI_CLK_INTEL_SSP_MCLK:
+ return le32_to_cpu(hw_config->mclk_rate);
+ case SOF_DAI_CLK_INTEL_SSP_BCLK:
+ return le32_to_cpu(hw_config->bclk_rate);
+ default:
+ dev_err(sdev->dev, "Invalid clk type for SSP %d\n", clk_type);
+ break;
+ }
+ break;
+ default:
+ dev_err(sdev->dev, "DAI type %d not supported yet!\n", ipc4_copier->dai_type);
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int sof_ipc4_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verify)
+{
+ struct snd_sof_pcm *spcm;
+ int dir, ret;
+
+ /*
+ * This function is called during system suspend, we need to make sure
+ * that all streams have been freed up.
+ * Freeing might have been skipped when xrun happened just at the start
+ * of the suspend and it sent a SNDRV_PCM_TRIGGER_STOP to the active
+ * stream. This will call sof_pcm_stream_free() with
+ * free_widget_list = false which will leave the kernel and firmware out
+ * of sync during suspend/resume.
+ *
+ * This will also make sure that paused streams handled correctly.
+ */
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ for_each_pcm_streams(dir) {
+ struct snd_pcm_substream *substream = spcm->stream[dir].substream;
+
+ if (!substream || !substream->runtime || spcm->stream[dir].suspend_ignored)
+ continue;
+
+ if (spcm->stream[dir].list) {
+ ret = sof_pcm_stream_free(sdev, substream, spcm, dir, true);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static int sof_ipc4_link_setup(struct snd_sof_dev *sdev, struct snd_soc_dai_link *link)
+{
+ if (link->no_pcm)
+ return 0;
+
+ /*
+ * set default trigger order for all links. Exceptions to
+ * the rule will be handled in sof_pcm_dai_link_fixup()
+ * For playback, the sequence is the following: start BE,
+ * start FE, stop FE, stop BE; for Capture the sequence is
+ * inverted start FE, start BE, stop BE, stop FE
+ */
+ link->trigger[SNDRV_PCM_STREAM_PLAYBACK] = SND_SOC_DPCM_TRIGGER_POST;
+ link->trigger[SNDRV_PCM_STREAM_CAPTURE] = SND_SOC_DPCM_TRIGGER_PRE;
+
+ return 0;
+}
+
+static enum sof_tokens common_copier_token_list[] = {
+ SOF_COMP_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_OUT_AUDIO_FORMAT_TOKENS,
+ SOF_COPIER_DEEP_BUFFER_TOKENS,
+ SOF_COPIER_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+};
+
+static enum sof_tokens pipeline_token_list[] = {
+ SOF_SCHED_TOKENS,
+ SOF_PIPELINE_TOKENS,
+};
+
+static enum sof_tokens dai_token_list[] = {
+ SOF_COMP_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_OUT_AUDIO_FORMAT_TOKENS,
+ SOF_COPIER_TOKENS,
+ SOF_DAI_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+};
+
+static enum sof_tokens pga_token_list[] = {
+ SOF_COMP_TOKENS,
+ SOF_GAIN_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_OUT_AUDIO_FORMAT_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+};
+
+static enum sof_tokens mixer_token_list[] = {
+ SOF_COMP_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_OUT_AUDIO_FORMAT_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+};
+
+static enum sof_tokens src_token_list[] = {
+ SOF_COMP_TOKENS,
+ SOF_SRC_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_OUT_AUDIO_FORMAT_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+};
+
+static enum sof_tokens process_token_list[] = {
+ SOF_COMP_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_OUT_AUDIO_FORMAT_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+};
+
+static const struct sof_ipc_tplg_widget_ops tplg_ipc4_widget_ops[SND_SOC_DAPM_TYPE_COUNT] = {
+ [snd_soc_dapm_aif_in] = {sof_ipc4_widget_setup_pcm, sof_ipc4_widget_free_comp_pcm,
+ common_copier_token_list, ARRAY_SIZE(common_copier_token_list),
+ NULL, sof_ipc4_prepare_copier_module,
+ sof_ipc4_unprepare_copier_module},
+ [snd_soc_dapm_aif_out] = {sof_ipc4_widget_setup_pcm, sof_ipc4_widget_free_comp_pcm,
+ common_copier_token_list, ARRAY_SIZE(common_copier_token_list),
+ NULL, sof_ipc4_prepare_copier_module,
+ sof_ipc4_unprepare_copier_module},
+ [snd_soc_dapm_dai_in] = {sof_ipc4_widget_setup_comp_dai, sof_ipc4_widget_free_comp_dai,
+ dai_token_list, ARRAY_SIZE(dai_token_list), NULL,
+ sof_ipc4_prepare_copier_module,
+ sof_ipc4_unprepare_copier_module},
+ [snd_soc_dapm_dai_out] = {sof_ipc4_widget_setup_comp_dai, sof_ipc4_widget_free_comp_dai,
+ dai_token_list, ARRAY_SIZE(dai_token_list), NULL,
+ sof_ipc4_prepare_copier_module,
+ sof_ipc4_unprepare_copier_module},
+ [snd_soc_dapm_buffer] = {sof_ipc4_widget_setup_pcm, sof_ipc4_widget_free_comp_pcm,
+ common_copier_token_list, ARRAY_SIZE(common_copier_token_list),
+ NULL, sof_ipc4_prepare_copier_module,
+ sof_ipc4_unprepare_copier_module},
+ [snd_soc_dapm_scheduler] = {sof_ipc4_widget_setup_comp_pipeline,
+ sof_ipc4_widget_free_comp_pipeline,
+ pipeline_token_list, ARRAY_SIZE(pipeline_token_list), NULL,
+ NULL, NULL},
+ [snd_soc_dapm_pga] = {sof_ipc4_widget_setup_comp_pga, sof_ipc4_widget_free_comp_pga,
+ pga_token_list, ARRAY_SIZE(pga_token_list), NULL,
+ sof_ipc4_prepare_gain_module,
+ NULL},
+ [snd_soc_dapm_mixer] = {sof_ipc4_widget_setup_comp_mixer, sof_ipc4_widget_free_comp_mixer,
+ mixer_token_list, ARRAY_SIZE(mixer_token_list),
+ NULL, sof_ipc4_prepare_mixer_module,
+ NULL},
+ [snd_soc_dapm_src] = {sof_ipc4_widget_setup_comp_src, sof_ipc4_widget_free_comp_src,
+ src_token_list, ARRAY_SIZE(src_token_list),
+ NULL, sof_ipc4_prepare_src_module,
+ NULL},
+ [snd_soc_dapm_effect] = {sof_ipc4_widget_setup_comp_process,
+ sof_ipc4_widget_free_comp_process,
+ process_token_list, ARRAY_SIZE(process_token_list),
+ NULL, sof_ipc4_prepare_process_module,
+ NULL},
+};
+
+const struct sof_ipc_tplg_ops ipc4_tplg_ops = {
+ .widget = tplg_ipc4_widget_ops,
+ .token_list = ipc4_token_list,
+ .control_setup = sof_ipc4_control_setup,
+ .control = &tplg_ipc4_control_ops,
+ .widget_setup = sof_ipc4_widget_setup,
+ .widget_free = sof_ipc4_widget_free,
+ .route_setup = sof_ipc4_route_setup,
+ .route_free = sof_ipc4_route_free,
+ .dai_config = sof_ipc4_dai_config,
+ .parse_manifest = sof_ipc4_parse_manifest,
+ .dai_get_clk = sof_ipc4_dai_get_clk,
+ .tear_down_all_pipelines = sof_ipc4_tear_down_all_pipelines,
+ .link_setup = sof_ipc4_link_setup,
+};
diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
new file mode 100644
index 000000000..21436657a
--- /dev/null
+++ b/sound/soc/sof/ipc4-topology.h
@@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_IPC4_TOPOLOGY_H__
+#define __INCLUDE_SOUND_SOF_IPC4_TOPOLOGY_H__
+
+#include <sound/sof/ipc4/header.h>
+
+#define SOF_IPC4_FW_PAGE_SIZE BIT(12)
+#define SOF_IPC4_FW_PAGE(x) ((((x) + BIT(12) - 1) & ~(BIT(12) - 1)) >> 12)
+#define SOF_IPC4_FW_ROUNDUP(x) (((x) + BIT(6) - 1) & (~(BIT(6) - 1)))
+
+#define SOF_IPC4_MODULE_LOAD_TYPE GENMASK(3, 0)
+#define SOF_IPC4_MODULE_AUTO_START BIT(4)
+/*
+ * Two module schedule domains in fw :
+ * LL domain - Low latency domain
+ * DP domain - Data processing domain
+ * The LL setting should be equal to !DP setting
+ */
+#define SOF_IPC4_MODULE_LL BIT(5)
+#define SOF_IPC4_MODULE_DP BIT(6)
+#define SOF_IPC4_MODULE_LIB_CODE BIT(7)
+#define SOF_IPC4_MODULE_INIT_CONFIG_MASK GENMASK(11, 8)
+
+#define SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG 0
+#define SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT 1
+
+#define SOF_IPC4_MODULE_INSTANCE_LIST_ITEM_SIZE 12
+#define SOF_IPC4_PIPELINE_OBJECT_SIZE 448
+#define SOF_IPC4_DATA_QUEUE_OBJECT_SIZE 128
+#define SOF_IPC4_LL_TASK_OBJECT_SIZE 72
+#define SOF_IPC4_DP_TASK_OBJECT_SIZE 104
+#define SOF_IPC4_DP_TASK_LIST_SIZE (12 + 8)
+#define SOF_IPC4_LL_TASK_LIST_ITEM_SIZE 12
+#define SOF_IPC4_FW_MAX_PAGE_COUNT 20
+#define SOF_IPC4_FW_MAX_QUEUE_COUNT 8
+
+/* Node index and mask applicable for host copier and ALH/HDA type DAI copiers */
+#define SOF_IPC4_NODE_INDEX_MASK 0xFF
+#define SOF_IPC4_NODE_INDEX(x) ((x) & SOF_IPC4_NODE_INDEX_MASK)
+#define SOF_IPC4_NODE_TYPE(x) ((x) << 8)
+
+/* Node ID for SSP type DAI copiers */
+#define SOF_IPC4_NODE_INDEX_INTEL_SSP(x) (((x) & 0xf) << 4)
+
+/* Node ID for DMIC type DAI copiers */
+#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) ((x) & 0x7)
+
+#define SOF_IPC4_GAIN_ALL_CHANNELS_MASK 0xffffffff
+#define SOF_IPC4_VOL_ZERO_DB 0x7fffffff
+
+#define SOF_IPC4_DMA_DEVICE_MAX_COUNT 16
+
+#define SOF_IPC4_INVALID_NODE_ID 0xffffffff
+
+/* FW requires minimum 2ms DMA buffer size */
+#define SOF_IPC4_MIN_DMA_BUFFER_SIZE 2
+
+/*
+ * The base of multi-gateways. Multi-gateways addressing starts from
+ * ALH_MULTI_GTW_BASE and there are ALH_MULTI_GTW_COUNT multi-sources
+ * and ALH_MULTI_GTW_COUNT multi-sinks available.
+ * Addressing is continuous from ALH_MULTI_GTW_BASE to
+ * ALH_MULTI_GTW_BASE + ALH_MULTI_GTW_COUNT - 1.
+ */
+#define ALH_MULTI_GTW_BASE 0x50
+/* A magic number from FW */
+#define ALH_MULTI_GTW_COUNT 8
+
+enum sof_ipc4_copier_module_config_params {
+/*
+ * Use LARGE_CONFIG_SET to initialize timestamp event. Ipc mailbox must
+ * contain properly built CopierConfigTimestampInitData struct.
+ */
+ SOF_IPC4_COPIER_MODULE_CFG_PARAM_TIMESTAMP_INIT = 1,
+/*
+ * Use LARGE_CONFIG_SET to initialize copier sink. Ipc mailbox must contain
+ * properly built CopierConfigSetSinkFormat struct.
+ */
+ SOF_IPC4_COPIER_MODULE_CFG_PARAM_SET_SINK_FORMAT,
+/*
+ * Use LARGE_CONFIG_SET to initialize and enable on Copier data segment
+ * event. Ipc mailbox must contain properly built DataSegmentEnabled struct.
+ */
+ SOF_IPC4_COPIER_MODULE_CFG_PARAM_DATA_SEGMENT_ENABLED,
+/*
+ * Use LARGE_CONFIG_GET to retrieve Linear Link Position (LLP) value for non
+ * HD-A gateways.
+ */
+ SOF_IPC4_COPIER_MODULE_CFG_PARAM_LLP_READING,
+/*
+ * Use LARGE_CONFIG_GET to retrieve Linear Link Position (LLP) value for non
+ * HD-A gateways and corresponding total processed data
+ */
+ SOF_IPC4_COPIER_MODULE_CFG_PARAM_LLP_READING_EXTENDED,
+/*
+ * Use LARGE_CONFIG_SET to setup attenuation on output pins. Data is just uint32_t.
+ * note Config is only allowed when output pin is set up for 32bit and source
+ * is connected to Gateway
+ */
+ SOF_IPC4_COPIER_MODULE_CFG_ATTENUATION,
+};
+
+struct sof_ipc4_copier_config_set_sink_format {
+/* Id of sink */
+ u32 sink_id;
+/*
+ * Input format used by the source
+ * attention must be the same as present if already initialized.
+ */
+ struct sof_ipc4_audio_format source_fmt;
+/* Output format used by the sink */
+ struct sof_ipc4_audio_format sink_fmt;
+} __packed __aligned(4);
+
+/**
+ * struct sof_ipc4_pipeline - pipeline config data
+ * @priority: Priority of this pipeline
+ * @lp_mode: Low power mode
+ * @mem_usage: Memory usage
+ * @core_id: Target core for the pipeline
+ * @state: Pipeline state
+ * @use_chain_dma: flag to indicate if the firmware shall use chained DMA
+ * @msg: message structure for pipeline
+ * @skip_during_fe_trigger: skip triggering this pipeline during the FE DAI trigger
+ */
+struct sof_ipc4_pipeline {
+ uint32_t priority;
+ uint32_t lp_mode;
+ uint32_t mem_usage;
+ uint32_t core_id;
+ int state;
+ bool use_chain_dma;
+ struct sof_ipc4_msg msg;
+ bool skip_during_fe_trigger;
+};
+
+/**
+ * struct sof_ipc4_multi_pipeline_data - multi pipeline trigger IPC data
+ * @count: Number of pipelines to be triggered
+ * @pipeline_instance_ids: Flexible array of IDs of the pipelines to be triggered
+ */
+struct ipc4_pipeline_set_state_data {
+ u32 count;
+ DECLARE_FLEX_ARRAY(u32, pipeline_instance_ids);
+} __packed;
+
+/**
+ * struct sof_ipc4_pin_format - Module pin format
+ * @pin_index: pin index
+ * @buffer_size: buffer size in bytes
+ * @audio_fmt: audio format for the pin
+ *
+ * This structure can be used for both output or input pins and the pin_index is relative to the
+ * pin type i.e output/input pin
+ */
+struct sof_ipc4_pin_format {
+ u32 pin_index;
+ u32 buffer_size;
+ struct sof_ipc4_audio_format audio_fmt;
+};
+
+/**
+ * struct sof_ipc4_available_audio_format - Available audio formats
+ * @output_pin_fmts: Available output pin formats
+ * @input_pin_fmts: Available input pin formats
+ * @num_input_formats: Number of input pin formats
+ * @num_output_formats: Number of output pin formats
+ */
+struct sof_ipc4_available_audio_format {
+ struct sof_ipc4_pin_format *output_pin_fmts;
+ struct sof_ipc4_pin_format *input_pin_fmts;
+ u32 num_input_formats;
+ u32 num_output_formats;
+};
+
+/**
+ * struct sof_copier_gateway_cfg - IPC gateway configuration
+ * @node_id: ID of Gateway Node
+ * @dma_buffer_size: Preferred Gateway DMA buffer size (in bytes)
+ * @config_length: Length of gateway node configuration blob specified in #config_data
+ * config_data: Gateway node configuration blob
+ */
+struct sof_copier_gateway_cfg {
+ uint32_t node_id;
+ uint32_t dma_buffer_size;
+ uint32_t config_length;
+ uint32_t config_data[];
+};
+
+/**
+ * struct sof_ipc4_copier_data - IPC data for copier
+ * @base_config: Base configuration including input audio format
+ * @out_format: Output audio format
+ * @copier_feature_mask: Copier feature mask
+ * @gtw_cfg: Gateway configuration
+ */
+struct sof_ipc4_copier_data {
+ struct sof_ipc4_base_module_cfg base_config;
+ struct sof_ipc4_audio_format out_format;
+ uint32_t copier_feature_mask;
+ struct sof_copier_gateway_cfg gtw_cfg;
+};
+
+/**
+ * struct sof_ipc4_gtw_attributes: Gateway attributes
+ * @lp_buffer_alloc: Gateway data requested in low power memory
+ * @alloc_from_reg_file: Gateway data requested in register file memory
+ * @rsvd: reserved for future use
+ */
+struct sof_ipc4_gtw_attributes {
+ uint32_t lp_buffer_alloc : 1;
+ uint32_t alloc_from_reg_file : 1;
+ uint32_t rsvd : 30;
+};
+
+/**
+ * struct sof_ipc4_dma_device_stream_ch_map: abstract representation of
+ * channel mapping to DMAs
+ * @device: representation of hardware device address or FIFO
+ * @channel_mask: channels handled by @device. Channels are expected to be
+ * contiguous
+ */
+struct sof_ipc4_dma_device_stream_ch_map {
+ uint32_t device;
+ uint32_t channel_mask;
+};
+
+/**
+ * struct sof_ipc4_dma_stream_ch_map: DMA configuration data
+ * @device_count: Number valid items in mapping array
+ * @mapping: device address and channel mask
+ */
+struct sof_ipc4_dma_stream_ch_map {
+ uint32_t device_count;
+ struct sof_ipc4_dma_device_stream_ch_map mapping[SOF_IPC4_DMA_DEVICE_MAX_COUNT];
+} __packed;
+
+#define SOF_IPC4_DMA_METHOD_HDA 1
+#define SOF_IPC4_DMA_METHOD_GPDMA 2 /* defined for consistency but not used */
+
+/**
+ * struct sof_ipc4_dma_config: DMA configuration
+ * @dma_method: HDAudio or GPDMA
+ * @pre_allocated_by_host: 1 if host driver allocates DMA channels, 0 otherwise
+ * @dma_channel_id: for HDaudio defined as @stream_id - 1
+ * @stream_id: HDaudio stream tag
+ * @dma_stream_channel_map: array of device/channel mappings
+ * @dma_priv_config_size: currently not used
+ * @dma_priv_config: currently not used
+ */
+struct sof_ipc4_dma_config {
+ uint8_t dma_method;
+ uint8_t pre_allocated_by_host;
+ uint16_t rsvd;
+ uint32_t dma_channel_id;
+ uint32_t stream_id;
+ struct sof_ipc4_dma_stream_ch_map dma_stream_channel_map;
+ uint32_t dma_priv_config_size;
+ uint8_t dma_priv_config[];
+} __packed;
+
+#define SOF_IPC4_GTW_DMA_CONFIG_ID 0x1000
+
+/**
+ * struct sof_ipc4_dma_config: DMA configuration
+ * @type: set to SOF_IPC4_GTW_DMA_CONFIG_ID
+ * @length: sizeof(struct sof_ipc4_dma_config) + dma_config.dma_priv_config_size
+ * @dma_config: actual DMA configuration
+ */
+struct sof_ipc4_dma_config_tlv {
+ uint32_t type;
+ uint32_t length;
+ struct sof_ipc4_dma_config dma_config;
+} __packed;
+
+/** struct sof_ipc4_alh_configuration_blob: ALH blob
+ * @gw_attr: Gateway attributes
+ * @alh_cfg: ALH configuration data
+ */
+struct sof_ipc4_alh_configuration_blob {
+ struct sof_ipc4_gtw_attributes gw_attr;
+ struct sof_ipc4_dma_stream_ch_map alh_cfg;
+};
+
+/**
+ * struct sof_ipc4_copier - copier config data
+ * @data: IPC copier data
+ * @copier_config: Copier + blob
+ * @ipc_config_size: Size of copier_config
+ * @available_fmt: Available audio format
+ * @frame_fmt: frame format
+ * @msg: message structure for copier
+ * @gtw_attr: Gateway attributes for copier blob
+ * @dai_type: DAI type
+ * @dai_index: DAI index
+ * @dma_config_tlv: DMA configuration
+ */
+struct sof_ipc4_copier {
+ struct sof_ipc4_copier_data data;
+ u32 *copier_config;
+ uint32_t ipc_config_size;
+ void *ipc_config_data;
+ struct sof_ipc4_available_audio_format available_fmt;
+ u32 frame_fmt;
+ struct sof_ipc4_msg msg;
+ struct sof_ipc4_gtw_attributes *gtw_attr;
+ u32 dai_type;
+ int dai_index;
+ struct sof_ipc4_dma_config_tlv dma_config_tlv;
+};
+
+/**
+ * struct sof_ipc4_ctrl_value_chan: generic channel mapped value data
+ * @channel: Channel ID
+ * @value: gain value
+ */
+struct sof_ipc4_ctrl_value_chan {
+ u32 channel;
+ u32 value;
+};
+
+/**
+ * struct sof_ipc4_control_data - IPC data for kcontrol IO
+ * @msg: message structure for kcontrol IO
+ * @index: pipeline ID
+ * @chanv: channel ID and value array used by volume type controls
+ * @data: data for binary kcontrols
+ */
+struct sof_ipc4_control_data {
+ struct sof_ipc4_msg msg;
+ int index;
+
+ union {
+ DECLARE_FLEX_ARRAY(struct sof_ipc4_ctrl_value_chan, chanv);
+ DECLARE_FLEX_ARRAY(struct sof_abi_hdr, data);
+ };
+};
+
+/**
+ * struct sof_ipc4_gain_params - IPC gain parameters
+ * @channels: Channels
+ * @init_val: Initial value
+ * @curve_type: Curve type
+ * @reserved: reserved for future use
+ * @curve_duration_l: Curve duration low part
+ * @curve_duration_h: Curve duration high part
+ */
+struct sof_ipc4_gain_params {
+ uint32_t channels;
+ uint32_t init_val;
+ uint32_t curve_type;
+ uint32_t reserved;
+ uint32_t curve_duration_l;
+ uint32_t curve_duration_h;
+} __packed __aligned(4);
+
+/**
+ * struct sof_ipc4_gain_data - IPC gain init blob
+ * @base_config: IPC base config data
+ * @params: Initial parameters for the gain module
+ */
+struct sof_ipc4_gain_data {
+ struct sof_ipc4_base_module_cfg base_config;
+ struct sof_ipc4_gain_params params;
+} __packed __aligned(4);
+
+/**
+ * struct sof_ipc4_gain - gain config data
+ * @data: IPC gain blob
+ * @available_fmt: Available audio format
+ * @msg: message structure for gain
+ */
+struct sof_ipc4_gain {
+ struct sof_ipc4_gain_data data;
+ struct sof_ipc4_available_audio_format available_fmt;
+ struct sof_ipc4_msg msg;
+};
+
+/**
+ * struct sof_ipc4_mixer - mixer config data
+ * @base_config: IPC base config data
+ * @available_fmt: Available audio format
+ * @msg: IPC4 message struct containing header and data info
+ */
+struct sof_ipc4_mixer {
+ struct sof_ipc4_base_module_cfg base_config;
+ struct sof_ipc4_available_audio_format available_fmt;
+ struct sof_ipc4_msg msg;
+};
+
+/*
+ * struct sof_ipc4_src_data - IPC data for SRC
+ * @base_config: IPC base config data
+ * @sink_rate: Output rate for sink module
+ */
+struct sof_ipc4_src_data {
+ struct sof_ipc4_base_module_cfg base_config;
+ uint32_t sink_rate;
+} __packed __aligned(4);
+
+/**
+ * struct sof_ipc4_src - SRC config data
+ * @data: IPC base config data
+ * @available_fmt: Available audio format
+ * @msg: IPC4 message struct containing header and data info
+ */
+struct sof_ipc4_src {
+ struct sof_ipc4_src_data data;
+ struct sof_ipc4_available_audio_format available_fmt;
+ struct sof_ipc4_msg msg;
+};
+
+/**
+ * struct sof_ipc4_base_module_cfg_ext - base module config extension containing the pin format
+ * information for the module. Both @num_input_pin_fmts and @num_output_pin_fmts cannot be 0 for a
+ * module.
+ * @num_input_pin_fmts: number of input pin formats in the @pin_formats array
+ * @num_output_pin_fmts: number of output pin formats in the @pin_formats array
+ * @reserved: reserved for future use
+ * @pin_formats: flexible array consisting of @num_input_pin_fmts input pin format items followed
+ * by @num_output_pin_fmts output pin format items
+ */
+struct sof_ipc4_base_module_cfg_ext {
+ u16 num_input_pin_fmts;
+ u16 num_output_pin_fmts;
+ u8 reserved[12];
+ DECLARE_FLEX_ARRAY(struct sof_ipc4_pin_format, pin_formats);
+} __packed;
+
+/**
+ * struct sof_ipc4_process - process config data
+ * @base_config: IPC base config data
+ * @base_config_ext: Base config extension data for module init
+ * @output_format: Output audio format
+ * @available_fmt: Available audio format
+ * @ipc_config_data: Process module config data
+ * @ipc_config_size: Size of process module config data
+ * @msg: IPC4 message struct containing header and data info
+ * @base_config_ext_size: Size of the base config extension data in bytes
+ * @init_config: Module init config type (SOF_IPC4_MODULE_INIT_CONFIG_TYPE_*)
+ */
+struct sof_ipc4_process {
+ struct sof_ipc4_base_module_cfg base_config;
+ struct sof_ipc4_base_module_cfg_ext *base_config_ext;
+ struct sof_ipc4_audio_format output_format;
+ struct sof_ipc4_available_audio_format available_fmt;
+ void *ipc_config_data;
+ uint32_t ipc_config_size;
+ struct sof_ipc4_msg msg;
+ u32 base_config_ext_size;
+ u32 init_config;
+};
+
+#endif
diff --git a/sound/soc/sof/ipc4.c b/sound/soc/sof/ipc4.c
new file mode 100644
index 000000000..1b0949673
--- /dev/null
+++ b/sound/soc/sof/ipc4.c
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+// Authors: Rander Wang <rander.wang@linux.intel.com>
+// Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+//
+#include <linux/firmware.h>
+#include <sound/sof/header.h>
+#include <sound/sof/ipc4/header.h>
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ipc4-fw-reg.h"
+#include "ipc4-priv.h"
+#include "ops.h"
+
+static const struct sof_ipc4_fw_status {
+ int status;
+ char *msg;
+} ipc4_status[] = {
+ {0, "The operation was successful"},
+ {1, "Invalid parameter specified"},
+ {2, "Unknown message type specified"},
+ {3, "Not enough space in the IPC reply buffer to complete the request"},
+ {4, "The system or resource is busy"},
+ {5, "Replaced ADSP IPC PENDING (unused)"},
+ {6, "Unknown error while processing the request"},
+ {7, "Unsupported operation requested"},
+ {8, "Reserved (ADSP_STAGE_UNINITIALIZED removed)"},
+ {9, "Specified resource not found"},
+ {10, "A resource's ID requested to be created is already assigned"},
+ {11, "Reserved (ADSP_IPC_OUT_OF_MIPS removed)"},
+ {12, "Required resource is in invalid state"},
+ {13, "Requested power transition failed to complete"},
+ {14, "Manifest of the library being loaded is invalid"},
+ {15, "Requested service or data is unavailable on the target platform"},
+ {42, "Library target address is out of storage memory range"},
+ {43, "Reserved"},
+ {44, "Image verification by CSE failed"},
+ {100, "General module management error"},
+ {101, "Module loading failed"},
+ {102, "Integrity check of the loaded module content failed"},
+ {103, "Attempt to unload code of the module in use"},
+ {104, "Other failure of module instance initialization request"},
+ {105, "Reserved (ADSP_IPC_OUT_OF_MIPS removed)"},
+ {106, "Reserved (ADSP_IPC_CONFIG_GET_ERROR removed)"},
+ {107, "Reserved (ADSP_IPC_CONFIG_SET_ERROR removed)"},
+ {108, "Reserved (ADSP_IPC_LARGE_CONFIG_GET_ERROR removed)"},
+ {109, "Reserved (ADSP_IPC_LARGE_CONFIG_SET_ERROR removed)"},
+ {110, "Invalid (out of range) module ID provided"},
+ {111, "Invalid module instance ID provided"},
+ {112, "Invalid queue (pin) ID provided"},
+ {113, "Invalid destination queue (pin) ID provided"},
+ {114, "Reserved (ADSP_IPC_BIND_UNBIND_DST_SINK_UNSUPPORTED removed)"},
+ {115, "Reserved (ADSP_IPC_UNLOAD_INST_EXISTS removed)"},
+ {116, "Invalid target code ID provided"},
+ {117, "Injection DMA buffer is too small for probing the input pin"},
+ {118, "Extraction DMA buffer is too small for probing the output pin"},
+ {120, "Invalid ID of configuration item provided in TLV list"},
+ {121, "Invalid length of configuration item provided in TLV list"},
+ {122, "Invalid structure of configuration item provided"},
+ {140, "Initialization of DMA Gateway failed"},
+ {141, "Invalid ID of gateway provided"},
+ {142, "Setting state of DMA Gateway failed"},
+ {143, "DMA_CONTROL message targeting gateway not allocated yet"},
+ {150, "Attempt to configure SCLK while I2S port is running"},
+ {151, "Attempt to configure MCLK while I2S port is running"},
+ {152, "Attempt to stop SCLK that is not running"},
+ {153, "Attempt to stop MCLK that is not running"},
+ {160, "Reserved (ADSP_IPC_PIPELINE_NOT_INITIALIZED removed)"},
+ {161, "Reserved (ADSP_IPC_PIPELINE_NOT_EXIST removed)"},
+ {162, "Reserved (ADSP_IPC_PIPELINE_SAVE_FAILED removed)"},
+ {163, "Reserved (ADSP_IPC_PIPELINE_RESTORE_FAILED removed)"},
+ {165, "Reserved (ADSP_IPC_PIPELINE_ALREADY_EXISTS removed)"},
+};
+
+static int sof_ipc4_check_reply_status(struct snd_sof_dev *sdev, u32 status)
+{
+ int i, ret;
+
+ status &= SOF_IPC4_REPLY_STATUS;
+
+ if (!status)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ipc4_status); i++) {
+ if (ipc4_status[i].status == status) {
+ dev_err(sdev->dev, "FW reported error: %u - %s\n",
+ status, ipc4_status[i].msg);
+ goto to_errno;
+ }
+ }
+
+ if (i == ARRAY_SIZE(ipc4_status))
+ dev_err(sdev->dev, "FW reported error: %u - Unknown\n", status);
+
+to_errno:
+ switch (status) {
+ case 8:
+ case 11:
+ case 105 ... 109:
+ case 114 ... 115:
+ case 160 ... 163:
+ case 165:
+ ret = -ENOENT;
+ break;
+ case 4:
+ case 150:
+ case 151:
+ ret = -EBUSY;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_VERBOSE_IPC)
+#define DBG_IPC4_MSG_TYPE_ENTRY(type) [SOF_IPC4_##type] = #type
+static const char * const ipc4_dbg_mod_msg_type[] = {
+ DBG_IPC4_MSG_TYPE_ENTRY(MOD_INIT_INSTANCE),
+ DBG_IPC4_MSG_TYPE_ENTRY(MOD_CONFIG_GET),
+ DBG_IPC4_MSG_TYPE_ENTRY(MOD_CONFIG_SET),
+ DBG_IPC4_MSG_TYPE_ENTRY(MOD_LARGE_CONFIG_GET),
+ DBG_IPC4_MSG_TYPE_ENTRY(MOD_LARGE_CONFIG_SET),
+ DBG_IPC4_MSG_TYPE_ENTRY(MOD_BIND),
+ DBG_IPC4_MSG_TYPE_ENTRY(MOD_UNBIND),
+ DBG_IPC4_MSG_TYPE_ENTRY(MOD_SET_DX),
+ DBG_IPC4_MSG_TYPE_ENTRY(MOD_SET_D0IX),
+ DBG_IPC4_MSG_TYPE_ENTRY(MOD_ENTER_MODULE_RESTORE),
+ DBG_IPC4_MSG_TYPE_ENTRY(MOD_EXIT_MODULE_RESTORE),
+ DBG_IPC4_MSG_TYPE_ENTRY(MOD_DELETE_INSTANCE),
+};
+
+static const char * const ipc4_dbg_glb_msg_type[] = {
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_BOOT_CONFIG),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_ROM_CONTROL),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_IPCGATEWAY_CMD),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_PERF_MEASUREMENTS_CMD),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_CHAIN_DMA),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_LOAD_MULTIPLE_MODULES),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_UNLOAD_MULTIPLE_MODULES),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_CREATE_PIPELINE),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_DELETE_PIPELINE),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_SET_PIPELINE_STATE),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_GET_PIPELINE_STATE),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_GET_PIPELINE_CONTEXT_SIZE),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_SAVE_PIPELINE),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_RESTORE_PIPELINE),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_LOAD_LIBRARY),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_INTERNAL_MESSAGE),
+ DBG_IPC4_MSG_TYPE_ENTRY(GLB_NOTIFICATION),
+};
+
+#define DBG_IPC4_NOTIFICATION_TYPE_ENTRY(type) [SOF_IPC4_NOTIFY_##type] = #type
+static const char * const ipc4_dbg_notification_type[] = {
+ DBG_IPC4_NOTIFICATION_TYPE_ENTRY(PHRASE_DETECTED),
+ DBG_IPC4_NOTIFICATION_TYPE_ENTRY(RESOURCE_EVENT),
+ DBG_IPC4_NOTIFICATION_TYPE_ENTRY(LOG_BUFFER_STATUS),
+ DBG_IPC4_NOTIFICATION_TYPE_ENTRY(TIMESTAMP_CAPTURED),
+ DBG_IPC4_NOTIFICATION_TYPE_ENTRY(FW_READY),
+ DBG_IPC4_NOTIFICATION_TYPE_ENTRY(FW_AUD_CLASS_RESULT),
+ DBG_IPC4_NOTIFICATION_TYPE_ENTRY(EXCEPTION_CAUGHT),
+ DBG_IPC4_NOTIFICATION_TYPE_ENTRY(MODULE_NOTIFICATION),
+ DBG_IPC4_NOTIFICATION_TYPE_ENTRY(PROBE_DATA_AVAILABLE),
+ DBG_IPC4_NOTIFICATION_TYPE_ENTRY(ASYNC_MSG_SRVC_MESSAGE),
+};
+
+static void sof_ipc4_log_header(struct device *dev, u8 *text, struct sof_ipc4_msg *msg,
+ bool data_size_valid)
+{
+ u32 val, type;
+ const u8 *str2 = NULL;
+ const u8 *str = NULL;
+
+ val = msg->primary & SOF_IPC4_MSG_TARGET_MASK;
+ type = SOF_IPC4_MSG_TYPE_GET(msg->primary);
+
+ if (val == SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG)) {
+ /* Module message */
+ if (type < SOF_IPC4_MOD_TYPE_LAST)
+ str = ipc4_dbg_mod_msg_type[type];
+ if (!str)
+ str = "Unknown Module message type";
+ } else {
+ /* Global FW message */
+ if (type < SOF_IPC4_GLB_TYPE_LAST)
+ str = ipc4_dbg_glb_msg_type[type];
+ if (!str)
+ str = "Unknown Global message type";
+
+ if (type == SOF_IPC4_GLB_NOTIFICATION) {
+ /* Notification message */
+ u32 notif = SOF_IPC4_NOTIFICATION_TYPE_GET(msg->primary);
+
+ /* Do not print log buffer notification if not desired */
+ if (notif == SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS &&
+ !sof_debug_check_flag(SOF_DBG_PRINT_DMA_POSITION_UPDATE_LOGS))
+ return;
+
+ if (notif < SOF_IPC4_NOTIFY_TYPE_LAST)
+ str2 = ipc4_dbg_notification_type[notif];
+ if (!str2)
+ str2 = "Unknown Global notification";
+ }
+ }
+
+ if (str2) {
+ if (data_size_valid && msg->data_size)
+ dev_dbg(dev, "%s: %#x|%#x: %s|%s [data size: %zu]\n",
+ text, msg->primary, msg->extension, str, str2,
+ msg->data_size);
+ else
+ dev_dbg(dev, "%s: %#x|%#x: %s|%s\n", text, msg->primary,
+ msg->extension, str, str2);
+ } else {
+ if (data_size_valid && msg->data_size)
+ dev_dbg(dev, "%s: %#x|%#x: %s [data size: %zu]\n",
+ text, msg->primary, msg->extension, str,
+ msg->data_size);
+ else
+ dev_dbg(dev, "%s: %#x|%#x: %s\n", text, msg->primary,
+ msg->extension, str);
+ }
+}
+#else /* CONFIG_SND_SOC_SOF_DEBUG_VERBOSE_IPC */
+static void sof_ipc4_log_header(struct device *dev, u8 *text, struct sof_ipc4_msg *msg,
+ bool data_size_valid)
+{
+ /* Do not print log buffer notification if not desired */
+ if (!sof_debug_check_flag(SOF_DBG_PRINT_DMA_POSITION_UPDATE_LOGS) &&
+ !SOF_IPC4_MSG_IS_MODULE_MSG(msg->primary) &&
+ SOF_IPC4_MSG_TYPE_GET(msg->primary) == SOF_IPC4_GLB_NOTIFICATION &&
+ SOF_IPC4_NOTIFICATION_TYPE_GET(msg->primary) == SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS)
+ return;
+
+ if (data_size_valid && msg->data_size)
+ dev_dbg(dev, "%s: %#x|%#x [data size: %zu]\n", text,
+ msg->primary, msg->extension, msg->data_size);
+ else
+ dev_dbg(dev, "%s: %#x|%#x\n", text, msg->primary, msg->extension);
+}
+#endif
+
+static void sof_ipc4_dump_payload(struct snd_sof_dev *sdev,
+ void *ipc_data, size_t size)
+{
+ print_hex_dump_debug("Message payload: ", DUMP_PREFIX_OFFSET,
+ 16, 4, ipc_data, size, false);
+}
+
+static int sof_ipc4_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc4_msg *ipc4_reply;
+ int ret;
+
+ /* get the generic reply */
+ ipc4_reply = msg->reply_data;
+
+ sof_ipc4_log_header(sdev->dev, "ipc tx reply", ipc4_reply, false);
+
+ ret = sof_ipc4_check_reply_status(sdev, ipc4_reply->primary);
+ if (ret)
+ return ret;
+
+ /* No other information is expected for non large config get replies */
+ if (!msg->reply_size || !SOF_IPC4_MSG_IS_MODULE_MSG(ipc4_reply->primary) ||
+ (SOF_IPC4_MSG_TYPE_GET(ipc4_reply->primary) != SOF_IPC4_MOD_LARGE_CONFIG_GET))
+ return 0;
+
+ /* Read the requested payload */
+ snd_sof_dsp_mailbox_read(sdev, sdev->dsp_box.offset, ipc4_reply->data_ptr,
+ msg->reply_size);
+
+ return 0;
+}
+
+/* wait for IPC message reply */
+static int ipc4_wait_tx_done(struct snd_sof_ipc *ipc, void *reply_data)
+{
+ struct snd_sof_ipc_msg *msg = &ipc->msg;
+ struct sof_ipc4_msg *ipc4_msg = msg->msg_data;
+ struct snd_sof_dev *sdev = ipc->sdev;
+ int ret;
+
+ /* wait for DSP IPC completion */
+ ret = wait_event_timeout(msg->waitq, msg->ipc_complete,
+ msecs_to_jiffies(sdev->ipc_timeout));
+ if (ret == 0) {
+ dev_err(sdev->dev, "ipc timed out for %#x|%#x\n",
+ ipc4_msg->primary, ipc4_msg->extension);
+ snd_sof_handle_fw_exception(ipc->sdev, "IPC timeout");
+ return -ETIMEDOUT;
+ }
+
+ if (msg->reply_error) {
+ dev_err(sdev->dev, "ipc error for msg %#x|%#x\n",
+ ipc4_msg->primary, ipc4_msg->extension);
+ ret = msg->reply_error;
+ } else {
+ if (reply_data) {
+ struct sof_ipc4_msg *ipc4_reply = msg->reply_data;
+ struct sof_ipc4_msg *ipc4_reply_data = reply_data;
+
+ /* Copy the header */
+ ipc4_reply_data->header_u64 = ipc4_reply->header_u64;
+ if (msg->reply_size && ipc4_reply_data->data_ptr) {
+ /* copy the payload returned from DSP */
+ memcpy(ipc4_reply_data->data_ptr, ipc4_reply->data_ptr,
+ msg->reply_size);
+ ipc4_reply_data->data_size = msg->reply_size;
+ }
+ }
+
+ ret = 0;
+ sof_ipc4_log_header(sdev->dev, "ipc tx done ", ipc4_msg, true);
+ }
+
+ /* re-enable dumps after successful IPC tx */
+ if (sdev->ipc_dump_printed) {
+ sdev->dbg_dump_printed = false;
+ sdev->ipc_dump_printed = false;
+ }
+
+ return ret;
+}
+
+static int ipc4_tx_msg_unlocked(struct snd_sof_ipc *ipc,
+ void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes)
+{
+ struct sof_ipc4_msg *ipc4_msg = msg_data;
+ struct snd_sof_dev *sdev = ipc->sdev;
+ int ret;
+
+ if (msg_bytes > ipc->max_payload_size || reply_bytes > ipc->max_payload_size)
+ return -EINVAL;
+
+ sof_ipc4_log_header(sdev->dev, "ipc tx ", msg_data, true);
+
+ ret = sof_ipc_send_msg(sdev, msg_data, msg_bytes, reply_bytes);
+ if (ret) {
+ dev_err_ratelimited(sdev->dev,
+ "%s: ipc message send for %#x|%#x failed: %d\n",
+ __func__, ipc4_msg->primary, ipc4_msg->extension, ret);
+ return ret;
+ }
+
+ /* now wait for completion */
+ return ipc4_wait_tx_done(ipc, reply_data);
+}
+
+static int sof_ipc4_tx_msg(struct snd_sof_dev *sdev, void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes, bool no_pm)
+{
+ struct snd_sof_ipc *ipc = sdev->ipc;
+ int ret;
+
+ if (!msg_data)
+ return -EINVAL;
+
+ if (!no_pm) {
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+
+ /* ensure the DSP is in D0i0 before sending a new IPC */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_state);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Serialise IPC TX */
+ mutex_lock(&ipc->tx_mutex);
+
+ ret = ipc4_tx_msg_unlocked(ipc, msg_data, msg_bytes, reply_data, reply_bytes);
+
+ if (sof_debug_check_flag(SOF_DBG_DUMP_IPC_MESSAGE_PAYLOAD)) {
+ struct sof_ipc4_msg *msg = NULL;
+
+ /* payload is indicated by non zero msg/reply_bytes */
+ if (msg_bytes)
+ msg = msg_data;
+ else if (reply_bytes)
+ msg = reply_data;
+
+ if (msg)
+ sof_ipc4_dump_payload(sdev, msg->data_ptr, msg->data_size);
+ }
+
+ mutex_unlock(&ipc->tx_mutex);
+
+ return ret;
+}
+
+static int sof_ipc4_set_get_data(struct snd_sof_dev *sdev, void *data,
+ size_t payload_bytes, bool set)
+{
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+ size_t payload_limit = sdev->ipc->max_payload_size;
+ struct sof_ipc4_msg *ipc4_msg = data;
+ struct sof_ipc4_msg tx = {{ 0 }};
+ struct sof_ipc4_msg rx = {{ 0 }};
+ size_t remaining = payload_bytes;
+ size_t offset = 0;
+ size_t chunk_size;
+ int ret;
+
+ if (!data)
+ return -EINVAL;
+
+ if ((ipc4_msg->primary & SOF_IPC4_MSG_TARGET_MASK) !=
+ SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG))
+ return -EINVAL;
+
+ ipc4_msg->primary &= ~SOF_IPC4_MSG_TYPE_MASK;
+ tx.primary = ipc4_msg->primary;
+ tx.extension = ipc4_msg->extension;
+
+ if (set)
+ tx.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_LARGE_CONFIG_SET);
+ else
+ tx.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_LARGE_CONFIG_GET);
+
+ tx.extension &= ~SOF_IPC4_MOD_EXT_MSG_SIZE_MASK;
+ tx.extension |= SOF_IPC4_MOD_EXT_MSG_SIZE(payload_bytes);
+
+ tx.extension |= SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK(1);
+
+ /* ensure the DSP is in D0i0 before sending IPC */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_state);
+ if (ret < 0)
+ return ret;
+
+ /* Serialise IPC TX */
+ mutex_lock(&sdev->ipc->tx_mutex);
+
+ do {
+ size_t tx_size, rx_size;
+
+ if (remaining > payload_limit) {
+ chunk_size = payload_limit;
+ } else {
+ chunk_size = remaining;
+ if (set)
+ tx.extension |= SOF_IPC4_MOD_EXT_MSG_LAST_BLOCK(1);
+ }
+
+ if (offset) {
+ tx.extension &= ~SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK_MASK;
+ tx.extension &= ~SOF_IPC4_MOD_EXT_MSG_SIZE_MASK;
+ tx.extension |= SOF_IPC4_MOD_EXT_MSG_SIZE(offset);
+ }
+
+ if (set) {
+ tx.data_size = chunk_size;
+ tx.data_ptr = ipc4_msg->data_ptr + offset;
+
+ tx_size = chunk_size;
+ rx_size = 0;
+ } else {
+ rx.primary = 0;
+ rx.extension = 0;
+ rx.data_size = chunk_size;
+ rx.data_ptr = ipc4_msg->data_ptr + offset;
+
+ tx_size = 0;
+ rx_size = chunk_size;
+ }
+
+ /* Send the message for the current chunk */
+ ret = ipc4_tx_msg_unlocked(sdev->ipc, &tx, tx_size, &rx, rx_size);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "%s: large config %s failed at offset %zu: %d\n",
+ __func__, set ? "set" : "get", offset, ret);
+ goto out;
+ }
+
+ if (!set && rx.extension & SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK_MASK) {
+ /* Verify the firmware reported total payload size */
+ rx_size = rx.extension & SOF_IPC4_MOD_EXT_MSG_SIZE_MASK;
+
+ if (rx_size > payload_bytes) {
+ dev_err(sdev->dev,
+ "%s: Receive buffer (%zu) is too small for %zu\n",
+ __func__, payload_bytes, rx_size);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (rx_size < chunk_size) {
+ chunk_size = rx_size;
+ remaining = rx_size;
+ } else if (rx_size < payload_bytes) {
+ remaining = rx_size;
+ }
+ }
+
+ offset += chunk_size;
+ remaining -= chunk_size;
+ } while (remaining);
+
+ /* Adjust the received data size if needed */
+ if (!set && payload_bytes != offset)
+ ipc4_msg->data_size = offset;
+
+ if (sof_debug_check_flag(SOF_DBG_DUMP_IPC_MESSAGE_PAYLOAD))
+ sof_ipc4_dump_payload(sdev, ipc4_msg->data_ptr, ipc4_msg->data_size);
+
+out:
+ mutex_unlock(&sdev->ipc->tx_mutex);
+
+ return ret;
+}
+
+static int sof_ipc4_init_msg_memory(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_msg *ipc4_msg;
+ struct snd_sof_ipc_msg *msg = &sdev->ipc->msg;
+
+ /* TODO: get max_payload_size from firmware */
+ sdev->ipc->max_payload_size = SOF_IPC4_MSG_MAX_SIZE;
+
+ /* Allocate memory for the ipc4 container and the maximum payload */
+ msg->reply_data = devm_kzalloc(sdev->dev, sdev->ipc->max_payload_size +
+ sizeof(struct sof_ipc4_msg), GFP_KERNEL);
+ if (!msg->reply_data)
+ return -ENOMEM;
+
+ ipc4_msg = msg->reply_data;
+ ipc4_msg->data_ptr = msg->reply_data + sizeof(struct sof_ipc4_msg);
+
+ return 0;
+}
+
+static int ipc4_fw_ready(struct snd_sof_dev *sdev, struct sof_ipc4_msg *ipc4_msg)
+{
+ int inbox_offset, inbox_size, outbox_offset, outbox_size;
+
+ /* no need to re-check version/ABI for subsequent boots */
+ if (!sdev->first_boot)
+ return 0;
+
+ /* Set up the windows for IPC communication */
+ inbox_offset = snd_sof_dsp_get_mailbox_offset(sdev);
+ if (inbox_offset < 0) {
+ dev_err(sdev->dev, "%s: No mailbox offset\n", __func__);
+ return inbox_offset;
+ }
+ inbox_size = SOF_IPC4_MSG_MAX_SIZE;
+ outbox_offset = snd_sof_dsp_get_window_offset(sdev, SOF_IPC4_OUTBOX_WINDOW_IDX);
+ outbox_size = SOF_IPC4_MSG_MAX_SIZE;
+
+ sdev->fw_info_box.offset = snd_sof_dsp_get_window_offset(sdev, SOF_IPC4_INBOX_WINDOW_IDX);
+ sdev->fw_info_box.size = sizeof(struct sof_ipc4_fw_registers);
+ sdev->dsp_box.offset = inbox_offset;
+ sdev->dsp_box.size = inbox_size;
+ sdev->host_box.offset = outbox_offset;
+ sdev->host_box.size = outbox_size;
+
+ sdev->debug_box.offset = snd_sof_dsp_get_window_offset(sdev,
+ SOF_IPC4_DEBUG_WINDOW_IDX);
+
+ dev_dbg(sdev->dev, "mailbox upstream 0x%x - size 0x%x\n",
+ inbox_offset, inbox_size);
+ dev_dbg(sdev->dev, "mailbox downstream 0x%x - size 0x%x\n",
+ outbox_offset, outbox_size);
+ dev_dbg(sdev->dev, "debug box 0x%x\n", sdev->debug_box.offset);
+
+ return sof_ipc4_init_msg_memory(sdev);
+}
+
+static void sof_ipc4_rx_msg(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_msg *ipc4_msg = sdev->ipc->msg.rx_data;
+ size_t data_size = 0;
+ int err;
+
+ if (!ipc4_msg || !SOF_IPC4_MSG_IS_NOTIFICATION(ipc4_msg->primary))
+ return;
+
+ ipc4_msg->data_ptr = NULL;
+ ipc4_msg->data_size = 0;
+
+ sof_ipc4_log_header(sdev->dev, "ipc rx ", ipc4_msg, false);
+
+ switch (SOF_IPC4_NOTIFICATION_TYPE_GET(ipc4_msg->primary)) {
+ case SOF_IPC4_NOTIFY_FW_READY:
+ /* check for FW boot completion */
+ if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS) {
+ err = ipc4_fw_ready(sdev, ipc4_msg);
+ if (err < 0)
+ sof_set_fw_state(sdev, SOF_FW_BOOT_READY_FAILED);
+ else
+ sof_set_fw_state(sdev, SOF_FW_BOOT_READY_OK);
+
+ /* wake up firmware loader */
+ wake_up(&sdev->boot_wait);
+ }
+
+ break;
+ case SOF_IPC4_NOTIFY_RESOURCE_EVENT:
+ data_size = sizeof(struct sof_ipc4_notify_resource_data);
+ break;
+ case SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS:
+ sof_ipc4_mtrace_update_pos(sdev, SOF_IPC4_LOG_CORE_GET(ipc4_msg->primary));
+ break;
+ case SOF_IPC4_NOTIFY_EXCEPTION_CAUGHT:
+ snd_sof_dsp_panic(sdev, 0, true);
+ break;
+ default:
+ dev_dbg(sdev->dev, "Unhandled DSP message: %#x|%#x\n",
+ ipc4_msg->primary, ipc4_msg->extension);
+ break;
+ }
+
+ if (data_size) {
+ ipc4_msg->data_ptr = kmalloc(data_size, GFP_KERNEL);
+ if (!ipc4_msg->data_ptr)
+ return;
+
+ ipc4_msg->data_size = data_size;
+ snd_sof_ipc_msg_data(sdev, NULL, ipc4_msg->data_ptr, ipc4_msg->data_size);
+ }
+
+ sof_ipc4_log_header(sdev->dev, "ipc rx done ", ipc4_msg, true);
+
+ if (data_size) {
+ kfree(ipc4_msg->data_ptr);
+ ipc4_msg->data_ptr = NULL;
+ ipc4_msg->data_size = 0;
+ }
+}
+
+static int sof_ipc4_set_core_state(struct snd_sof_dev *sdev, int core_idx, bool on)
+{
+ struct sof_ipc4_dx_state_info dx_state;
+ struct sof_ipc4_msg msg;
+
+ dx_state.core_mask = BIT(core_idx);
+ if (on)
+ dx_state.dx_mask = BIT(core_idx);
+ else
+ dx_state.dx_mask = 0;
+
+ msg.primary = SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_SET_DX);
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+ msg.extension = 0;
+ msg.data_ptr = &dx_state;
+ msg.data_size = sizeof(dx_state);
+
+ return sof_ipc4_tx_msg(sdev, &msg, msg.data_size, NULL, 0, false);
+}
+
+/*
+ * The context save callback is used to send a message to the firmware notifying
+ * it that the primary core is going to be turned off, which is used as an
+ * indication to prepare for a full power down, thus preparing for IMR boot
+ * (when supported)
+ *
+ * Note: in IPC4 there is no message used to restore context, thus no context
+ * restore callback is implemented
+ */
+static int sof_ipc4_ctx_save(struct snd_sof_dev *sdev)
+{
+ return sof_ipc4_set_core_state(sdev, SOF_DSP_PRIMARY_CORE, false);
+}
+
+static int sof_ipc4_set_pm_gate(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc4_msg msg = {{0}};
+
+ msg.primary = SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_SET_D0IX);
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+ msg.extension = flags;
+
+ return sof_ipc4_tx_msg(sdev, &msg, 0, NULL, 0, true);
+}
+
+static const struct sof_ipc_pm_ops ipc4_pm_ops = {
+ .ctx_save = sof_ipc4_ctx_save,
+ .set_core_state = sof_ipc4_set_core_state,
+ .set_pm_gate = sof_ipc4_set_pm_gate,
+};
+
+static int sof_ipc4_init(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+
+ mutex_init(&ipc4_data->pipeline_state_mutex);
+
+ xa_init_flags(&ipc4_data->fw_lib_xa, XA_FLAGS_ALLOC);
+
+ return 0;
+}
+
+static void sof_ipc4_exit(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct sof_ipc4_fw_library *fw_lib;
+ unsigned long lib_id;
+
+ xa_for_each(&ipc4_data->fw_lib_xa, lib_id, fw_lib) {
+ /*
+ * The basefw (ID == 0) is handled by generic code, it is not
+ * loaded by IPC4 code.
+ */
+ if (lib_id != 0)
+ release_firmware(fw_lib->sof_fw.fw);
+
+ fw_lib->sof_fw.fw = NULL;
+ }
+
+ xa_destroy(&ipc4_data->fw_lib_xa);
+}
+
+static int sof_ipc4_post_boot(struct snd_sof_dev *sdev)
+{
+ if (sdev->first_boot)
+ return sof_ipc4_query_fw_configuration(sdev);
+
+ return sof_ipc4_reload_fw_libraries(sdev);
+}
+
+const struct sof_ipc_ops ipc4_ops = {
+ .init = sof_ipc4_init,
+ .exit = sof_ipc4_exit,
+ .post_fw_boot = sof_ipc4_post_boot,
+ .tx_msg = sof_ipc4_tx_msg,
+ .rx_msg = sof_ipc4_rx_msg,
+ .set_get_data = sof_ipc4_set_get_data,
+ .get_reply = sof_ipc4_get_reply,
+ .pm = &ipc4_pm_ops,
+ .fw_loader = &ipc4_loader_ops,
+ .tplg = &ipc4_tplg_ops,
+ .pcm = &ipc4_pcm_ops,
+ .fw_tracing = &ipc4_mtrace_ops,
+};
diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
new file mode 100644
index 000000000..2f8555f11
--- /dev/null
+++ b/sound/soc/sof/loader.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+// Generic firmware loader.
+//
+
+#include <linux/firmware.h>
+#include "sof-priv.h"
+#include "ops.h"
+
+int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const char *fw_filename;
+ ssize_t ext_man_size;
+ int ret;
+
+ /* Don't request firmware again if firmware is already requested */
+ if (sdev->basefw.fw)
+ return 0;
+
+ fw_filename = kasprintf(GFP_KERNEL, "%s/%s",
+ plat_data->fw_filename_prefix,
+ plat_data->fw_filename);
+ if (!fw_filename)
+ return -ENOMEM;
+
+ ret = request_firmware(&sdev->basefw.fw, fw_filename, sdev->dev);
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: sof firmware file is missing, you might need to\n");
+ dev_err(sdev->dev,
+ " download it from https://github.com/thesofproject/sof-bin/\n");
+ goto err;
+ } else {
+ dev_dbg(sdev->dev, "request_firmware %s successful\n",
+ fw_filename);
+ }
+
+ /* check for extended manifest */
+ ext_man_size = sdev->ipc->ops->fw_loader->parse_ext_manifest(sdev);
+ if (ext_man_size > 0) {
+ /* when no error occurred, drop extended manifest */
+ sdev->basefw.payload_offset = ext_man_size;
+ } else if (!ext_man_size) {
+ /* No extended manifest, so nothing to skip during FW load */
+ dev_dbg(sdev->dev, "firmware doesn't contain extended manifest\n");
+ } else {
+ ret = ext_man_size;
+ dev_err(sdev->dev, "error: firmware %s contains unsupported or invalid extended manifest: %d\n",
+ fw_filename, ret);
+ }
+
+err:
+ kfree(fw_filename);
+
+ return ret;
+}
+EXPORT_SYMBOL(snd_sof_load_firmware_raw);
+
+int snd_sof_load_firmware_memcpy(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ ret = snd_sof_load_firmware_raw(sdev);
+ if (ret < 0)
+ return ret;
+
+ /* make sure the FW header and file is valid */
+ ret = sdev->ipc->ops->fw_loader->validate(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: invalid FW header\n");
+ goto error;
+ }
+
+ /* prepare the DSP for FW loading */
+ ret = snd_sof_dsp_reset(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to reset DSP\n");
+ goto error;
+ }
+
+ /* parse and load firmware modules to DSP */
+ if (sdev->ipc->ops->fw_loader->load_fw_to_dsp) {
+ ret = sdev->ipc->ops->fw_loader->load_fw_to_dsp(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Firmware loading failed\n");
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ release_firmware(sdev->basefw.fw);
+ sdev->basefw.fw = NULL;
+ return ret;
+
+}
+EXPORT_SYMBOL(snd_sof_load_firmware_memcpy);
+
+int snd_sof_run_firmware(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ init_waitqueue_head(&sdev->boot_wait);
+
+ /* (re-)enable dsp dump */
+ sdev->dbg_dump_printed = false;
+ sdev->ipc_dump_printed = false;
+
+ /* create read-only fw_version debugfs to store boot version info */
+ if (sdev->first_boot) {
+ ret = snd_sof_debugfs_buf_item(sdev, &sdev->fw_version,
+ sizeof(sdev->fw_version),
+ "fw_version", 0444);
+ /* errors are only due to memory allocation, not debugfs */
+ if (ret < 0) {
+ dev_err(sdev->dev, "snd_sof_debugfs_buf_item failed\n");
+ return ret;
+ }
+ }
+
+ /* perform pre fw run operations */
+ ret = snd_sof_dsp_pre_fw_run(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed pre fw run op\n");
+ return ret;
+ }
+
+ dev_dbg(sdev->dev, "booting DSP firmware\n");
+
+ /* boot the firmware on the DSP */
+ ret = snd_sof_dsp_run(sdev);
+ if (ret < 0) {
+ snd_sof_dsp_dbg_dump(sdev, "Failed to start DSP",
+ SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_PCI);
+ return ret;
+ }
+
+ /*
+ * now wait for the DSP to boot. There are 3 possible outcomes:
+ * 1. Boot wait times out indicating FW boot failure.
+ * 2. FW boots successfully and fw_ready op succeeds.
+ * 3. FW boots but fw_ready op fails.
+ */
+ ret = wait_event_timeout(sdev->boot_wait,
+ sdev->fw_state > SOF_FW_BOOT_IN_PROGRESS,
+ msecs_to_jiffies(sdev->boot_timeout));
+ if (ret == 0) {
+ snd_sof_dsp_dbg_dump(sdev, "Firmware boot failure due to timeout",
+ SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_MBOX |
+ SOF_DBG_DUMP_TEXT | SOF_DBG_DUMP_PCI);
+ return -EIO;
+ }
+
+ if (sdev->fw_state == SOF_FW_BOOT_READY_FAILED)
+ return -EIO; /* FW boots but fw_ready op failed */
+
+ dev_dbg(sdev->dev, "firmware boot complete\n");
+ sof_set_fw_state(sdev, SOF_FW_BOOT_COMPLETE);
+
+ /* perform post fw run operations */
+ ret = snd_sof_dsp_post_fw_run(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed post fw run op\n");
+ return ret;
+ }
+
+ if (sdev->ipc->ops->post_fw_boot)
+ return sdev->ipc->ops->post_fw_boot(sdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_run_firmware);
+
+void snd_sof_fw_unload(struct snd_sof_dev *sdev)
+{
+ /* TODO: support module unloading at runtime */
+ release_firmware(sdev->basefw.fw);
+ sdev->basefw.fw = NULL;
+}
+EXPORT_SYMBOL(snd_sof_fw_unload);
diff --git a/sound/soc/sof/mediatek/Kconfig b/sound/soc/sof/mediatek/Kconfig
new file mode 100644
index 000000000..4a2eddf60
--- /dev/null
+++ b/sound/soc/sof/mediatek/Kconfig
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+config SND_SOC_SOF_MTK_TOPLEVEL
+ bool "SOF support for MTK audio DSPs"
+ depends on ARM64 || COMPILE_TEST
+ depends on SND_SOC_SOF_OF
+ help
+ This adds support for Sound Open Firmware for Mediatek platforms.
+ It is top level for all mediatek platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+if SND_SOC_SOF_MTK_TOPLEVEL
+config SND_SOC_SOF_MTK_COMMON
+ tristate
+ select SND_SOC_SOF_OF_DEV
+ select SND_SOC_SOF
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_XTENSA
+ select SND_SOC_SOF_COMPRESS
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_MT8186
+ tristate "SOF support for MT8186 audio DSP"
+ select SND_SOC_SOF_MTK_COMMON
+ depends on MTK_ADSP_IPC
+ help
+ This adds support for Sound Open Firmware for Mediatek platforms
+ using the mt8186 processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_MT8195
+ tristate "SOF support for MT8195 audio DSP"
+ select SND_SOC_SOF_MTK_COMMON
+ depends on MTK_ADSP_IPC
+ help
+ This adds support for Sound Open Firmware for Mediatek platforms
+ using the mt8195 processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+endif ## SND_SOC_SOF_MTK_TOPLEVEL
diff --git a/sound/soc/sof/mediatek/Makefile b/sound/soc/sof/mediatek/Makefile
new file mode 100644
index 000000000..29c5afb2f
--- /dev/null
+++ b/sound/soc/sof/mediatek/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+obj-$(CONFIG_SND_SOC_SOF_MTK_COMMON) += mtk-adsp-common.o
+obj-$(CONFIG_SND_SOC_SOF_MT8195) += mt8195/
+obj-$(CONFIG_SND_SOC_SOF_MT8186) += mt8186/
diff --git a/sound/soc/sof/mediatek/adsp_helper.h b/sound/soc/sof/mediatek/adsp_helper.h
new file mode 100644
index 000000000..d41e904e6
--- /dev/null
+++ b/sound/soc/sof/mediatek/adsp_helper.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2021 MediaTek Corporation. All rights reserved.
+ */
+
+#ifndef __MTK_ADSP_HELPER_H__
+#define __MTK_ADSP_HELPER_H__
+
+#include <linux/firmware/mediatek/mtk-adsp-ipc.h>
+
+/*
+ * Global important adsp data structure.
+ */
+struct mtk_adsp_chip_info {
+ phys_addr_t pa_sram;
+ phys_addr_t pa_dram; /* adsp dram physical base */
+ phys_addr_t pa_shared_dram; /* adsp dram physical base */
+ phys_addr_t pa_cfgreg;
+ u32 sramsize;
+ u32 dramsize;
+ u32 cfgregsize;
+ u32 shared_size;
+ void __iomem *va_sram; /* corresponding to pa_sram */
+ void __iomem *va_dram; /* corresponding to pa_dram */
+ void __iomem *va_cfgreg;
+ void __iomem *shared_sram; /* part of va_sram */
+ void __iomem *shared_dram; /* part of va_dram */
+ phys_addr_t adsp_bootup_addr;
+ int dram_offset; /*dram offset between system and dsp view*/
+
+ phys_addr_t pa_secreg;
+ u32 secregsize;
+ void __iomem *va_secreg;
+
+ phys_addr_t pa_busreg;
+ u32 busregsize;
+ void __iomem *va_busreg;
+};
+
+struct adsp_priv {
+ struct device *dev;
+ struct snd_sof_dev *sdev;
+ struct mtk_adsp_ipc *dsp_ipc;
+ struct platform_device *ipc_dev;
+ struct mtk_adsp_chip_info *adsp;
+ struct clk **clk;
+ u32 (*ap2adsp_addr)(u32 addr, void *data);
+ u32 (*adsp2ap_addr)(u32 addr, void *data);
+
+ void *private_data;
+};
+
+#endif
diff --git a/sound/soc/sof/mediatek/mt8186/Makefile b/sound/soc/sof/mediatek/mt8186/Makefile
new file mode 100644
index 000000000..c1f5fc4e2
--- /dev/null
+++ b/sound/soc/sof/mediatek/mt8186/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+snd-sof-mt8186-objs := mt8186.o mt8186-clk.o mt8186-loader.o
+obj-$(CONFIG_SND_SOC_SOF_MT8186) += snd-sof-mt8186.o
+
diff --git a/sound/soc/sof/mediatek/mt8186/mt8186-clk.c b/sound/soc/sof/mediatek/mt8186/mt8186-clk.c
new file mode 100644
index 000000000..cb2ab5884
--- /dev/null
+++ b/sound/soc/sof/mediatek/mt8186/mt8186-clk.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2022 Mediatek Corporation. All rights reserved.
+//
+// Author: Allen-KH Cheng <allen-kh.cheng@mediatek.com>
+// Tinghan Shen <tinghan.shen@mediatek.com>
+//
+// Hardware interface for mt8186 DSP clock
+
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include "../../sof-audio.h"
+#include "../../ops.h"
+#include "../adsp_helper.h"
+#include "mt8186.h"
+#include "mt8186-clk.h"
+
+static const char *adsp_clks[ADSP_CLK_MAX] = {
+ [CLK_TOP_AUDIODSP] = "audiodsp",
+ [CLK_TOP_ADSP_BUS] = "adsp_bus",
+};
+
+int mt8186_adsp_init_clock(struct snd_sof_dev *sdev)
+{
+ struct adsp_priv *priv = sdev->pdata->hw_pdata;
+ struct device *dev = sdev->dev;
+ int i;
+
+ priv->clk = devm_kcalloc(dev, ADSP_CLK_MAX, sizeof(*priv->clk), GFP_KERNEL);
+ if (!priv->clk)
+ return -ENOMEM;
+
+ for (i = 0; i < ADSP_CLK_MAX; i++) {
+ priv->clk[i] = devm_clk_get(dev, adsp_clks[i]);
+
+ if (IS_ERR(priv->clk[i]))
+ return PTR_ERR(priv->clk[i]);
+ }
+
+ return 0;
+}
+
+static int adsp_enable_all_clock(struct snd_sof_dev *sdev)
+{
+ struct adsp_priv *priv = sdev->pdata->hw_pdata;
+ struct device *dev = sdev->dev;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk[CLK_TOP_AUDIODSP]);
+ if (ret) {
+ dev_err(dev, "%s clk_prepare_enable(audiodsp) fail %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->clk[CLK_TOP_ADSP_BUS]);
+ if (ret) {
+ dev_err(dev, "%s clk_prepare_enable(adsp_bus) fail %d\n",
+ __func__, ret);
+ clk_disable_unprepare(priv->clk[CLK_TOP_AUDIODSP]);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void adsp_disable_all_clock(struct snd_sof_dev *sdev)
+{
+ struct adsp_priv *priv = sdev->pdata->hw_pdata;
+
+ clk_disable_unprepare(priv->clk[CLK_TOP_ADSP_BUS]);
+ clk_disable_unprepare(priv->clk[CLK_TOP_AUDIODSP]);
+}
+
+int mt8186_adsp_clock_on(struct snd_sof_dev *sdev)
+{
+ struct device *dev = sdev->dev;
+ int ret;
+
+ ret = adsp_enable_all_clock(sdev);
+ if (ret) {
+ dev_err(dev, "failed to adsp_enable_clock: %d\n", ret);
+ return ret;
+ }
+ snd_sof_dsp_write(sdev, DSP_REG_BAR, ADSP_CK_EN,
+ UART_EN | DMA_EN | TIMER_EN | COREDBG_EN | CORE_CLK_EN);
+ snd_sof_dsp_write(sdev, DSP_REG_BAR, ADSP_UART_CTRL,
+ UART_BCLK_CG | UART_RSTN);
+
+ return 0;
+}
+
+void mt8186_adsp_clock_off(struct snd_sof_dev *sdev)
+{
+ snd_sof_dsp_write(sdev, DSP_REG_BAR, ADSP_CK_EN, 0);
+ snd_sof_dsp_write(sdev, DSP_REG_BAR, ADSP_UART_CTRL, 0);
+ adsp_disable_all_clock(sdev);
+}
+
diff --git a/sound/soc/sof/mediatek/mt8186/mt8186-clk.h b/sound/soc/sof/mediatek/mt8186/mt8186-clk.h
new file mode 100644
index 000000000..89c23caf0
--- /dev/null
+++ b/sound/soc/sof/mediatek/mt8186/mt8186-clk.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+/*
+ * Copyright (c) 2022 MediaTek Corporation. All rights reserved.
+ *
+ * Header file for the mt8186 DSP clock definition
+ */
+
+#ifndef __MT8186_CLK_H
+#define __MT8186_CLK_H
+
+struct snd_sof_dev;
+
+/* DSP clock */
+enum adsp_clk_id {
+ CLK_TOP_AUDIODSP,
+ CLK_TOP_ADSP_BUS,
+ ADSP_CLK_MAX
+};
+
+int mt8186_adsp_init_clock(struct snd_sof_dev *sdev);
+int mt8186_adsp_clock_on(struct snd_sof_dev *sdev);
+void mt8186_adsp_clock_off(struct snd_sof_dev *sdev);
+#endif
diff --git a/sound/soc/sof/mediatek/mt8186/mt8186-loader.c b/sound/soc/sof/mediatek/mt8186/mt8186-loader.c
new file mode 100644
index 000000000..946e6c432
--- /dev/null
+++ b/sound/soc/sof/mediatek/mt8186/mt8186-loader.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright (c) 2022 Mediatek Corporation. All rights reserved.
+//
+// Author: Allen-KH Cheng <allen-kh.cheng@mediatek.com>
+// Tinghan Shen <tinghan.shen@mediatek.com>
+//
+// Hardware interface for mt8186 DSP code loader
+
+#include <sound/sof.h>
+#include "mt8186.h"
+#include "../../ops.h"
+
+void mt8186_sof_hifixdsp_boot_sequence(struct snd_sof_dev *sdev, u32 boot_addr)
+{
+ /* set RUNSTALL to stop core */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_HIFI_IO_CONFIG,
+ RUNSTALL, RUNSTALL);
+
+ /* enable mbox 0 & 1 IRQ */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_MBOX_IRQ_EN,
+ DSP_MBOX0_IRQ_EN | DSP_MBOX1_IRQ_EN,
+ DSP_MBOX0_IRQ_EN | DSP_MBOX1_IRQ_EN);
+
+ /* set core boot address */
+ snd_sof_dsp_write(sdev, DSP_SECREG_BAR, ADSP_ALTVEC_C0, boot_addr);
+ snd_sof_dsp_write(sdev, DSP_SECREG_BAR, ADSP_ALTVECSEL, ADSP_ALTVECSEL_C0);
+
+ /* assert core reset */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_CFGREG_SW_RSTN,
+ SW_RSTN_C0 | SW_DBG_RSTN_C0,
+ SW_RSTN_C0 | SW_DBG_RSTN_C0);
+
+ /* hardware requirement */
+ udelay(1);
+
+ /* release core reset */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_CFGREG_SW_RSTN,
+ SW_RSTN_C0 | SW_DBG_RSTN_C0,
+ 0);
+
+ /* clear RUNSTALL (bit31) to start core */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_HIFI_IO_CONFIG,
+ RUNSTALL, 0);
+}
+
+void mt8186_sof_hifixdsp_shutdown(struct snd_sof_dev *sdev)
+{
+ /* set RUNSTALL to stop core */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_HIFI_IO_CONFIG,
+ RUNSTALL, RUNSTALL);
+
+ /* assert core reset */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_CFGREG_SW_RSTN,
+ SW_RSTN_C0 | SW_DBG_RSTN_C0,
+ SW_RSTN_C0 | SW_DBG_RSTN_C0);
+}
+
diff --git a/sound/soc/sof/mediatek/mt8186/mt8186.c b/sound/soc/sof/mediatek/mt8186/mt8186.c
new file mode 100644
index 000000000..f587edf9e
--- /dev/null
+++ b/sound/soc/sof/mediatek/mt8186/mt8186.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2022 Mediatek Inc. All rights reserved.
+//
+// Author: Allen-KH Cheng <allen-kh.cheng@mediatek.com>
+// Tinghan Shen <tinghan.shen@mediatek.com>
+
+/*
+ * Hardware interface for audio DSP on mt8186
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/module.h>
+
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include "../../ops.h"
+#include "../../sof-of-dev.h"
+#include "../../sof-audio.h"
+#include "../adsp_helper.h"
+#include "../mtk-adsp-common.h"
+#include "mt8186.h"
+#include "mt8186-clk.h"
+
+static int mt8186_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+
+static int mt8186_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+
+static int mt8186_send_msg(struct snd_sof_dev *sdev,
+ struct snd_sof_ipc_msg *msg)
+{
+ struct adsp_priv *priv = sdev->pdata->hw_pdata;
+
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+
+ return mtk_adsp_ipc_send(priv->dsp_ipc, MTK_ADSP_IPC_REQ, MTK_ADSP_IPC_OP_REQ);
+}
+
+static void mt8186_dsp_handle_reply(struct mtk_adsp_ipc *ipc)
+{
+ struct adsp_priv *priv = mtk_adsp_ipc_get_data(ipc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sdev->ipc_lock, flags);
+ snd_sof_ipc_process_reply(priv->sdev, 0);
+ spin_unlock_irqrestore(&priv->sdev->ipc_lock, flags);
+}
+
+static void mt8186_dsp_handle_request(struct mtk_adsp_ipc *ipc)
+{
+ struct adsp_priv *priv = mtk_adsp_ipc_get_data(ipc);
+ u32 p; /* panic code */
+ int ret;
+
+ /* Read the message from the debug box. */
+ sof_mailbox_read(priv->sdev, priv->sdev->debug_box.offset + 4,
+ &p, sizeof(p));
+
+ /* Check to see if the message is a panic code 0x0dead*** */
+ if ((p & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(priv->sdev, p, true);
+ } else {
+ snd_sof_ipc_msgs_rx(priv->sdev);
+
+ /* tell DSP cmd is done */
+ ret = mtk_adsp_ipc_send(priv->dsp_ipc, MTK_ADSP_IPC_RSP, MTK_ADSP_IPC_OP_RSP);
+ if (ret)
+ dev_err(priv->dev, "request send ipc failed");
+ }
+}
+
+static struct mtk_adsp_ipc_ops dsp_ops = {
+ .handle_reply = mt8186_dsp_handle_reply,
+ .handle_request = mt8186_dsp_handle_request,
+};
+
+static int platform_parse_resource(struct platform_device *pdev, void *data)
+{
+ struct resource *mmio;
+ struct resource res;
+ struct device_node *mem_region;
+ struct device *dev = &pdev->dev;
+ struct mtk_adsp_chip_info *adsp = data;
+ int ret;
+
+ mem_region = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!mem_region) {
+ dev_err(dev, "no dma memory-region phandle\n");
+ return -ENODEV;
+ }
+
+ ret = of_address_to_resource(mem_region, 0, &res);
+ of_node_put(mem_region);
+ if (ret) {
+ dev_err(dev, "of_address_to_resource dma failed\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "DMA %pR\n", &res);
+
+ adsp->pa_shared_dram = (phys_addr_t)res.start;
+ adsp->shared_size = resource_size(&res);
+ if (adsp->pa_shared_dram & DRAM_REMAP_MASK) {
+ dev_err(dev, "adsp shared dma memory(%#x) is not 4K-aligned\n",
+ (u32)adsp->pa_shared_dram);
+ return -EINVAL;
+ }
+
+ ret = of_reserved_mem_device_init(dev);
+ if (ret) {
+ dev_err(dev, "of_reserved_mem_device_init failed\n");
+ return ret;
+ }
+
+ mem_region = of_parse_phandle(dev->of_node, "memory-region", 1);
+ if (!mem_region) {
+ dev_err(dev, "no memory-region sysmem phandle\n");
+ return -ENODEV;
+ }
+
+ ret = of_address_to_resource(mem_region, 0, &res);
+ of_node_put(mem_region);
+ if (ret) {
+ dev_err(dev, "of_address_to_resource sysmem failed\n");
+ return ret;
+ }
+
+ adsp->pa_dram = (phys_addr_t)res.start;
+ if (adsp->pa_dram & DRAM_REMAP_MASK) {
+ dev_err(dev, "adsp memory(%#x) is not 4K-aligned\n",
+ (u32)adsp->pa_dram);
+ return -EINVAL;
+ }
+
+ adsp->dramsize = resource_size(&res);
+ if (adsp->dramsize < TOTAL_SIZE_SHARED_DRAM_FROM_TAIL) {
+ dev_err(dev, "adsp memory(%#x) is not enough for share\n",
+ adsp->dramsize);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "dram pbase=%pa size=%#x\n", &adsp->pa_dram, adsp->dramsize);
+
+ mmio = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!mmio) {
+ dev_err(dev, "no ADSP-CFG register resource\n");
+ return -ENXIO;
+ }
+
+ adsp->va_cfgreg = devm_ioremap_resource(dev, mmio);
+ if (IS_ERR(adsp->va_cfgreg))
+ return PTR_ERR(adsp->va_cfgreg);
+
+ adsp->pa_cfgreg = (phys_addr_t)mmio->start;
+ adsp->cfgregsize = resource_size(mmio);
+
+ dev_dbg(dev, "cfgreg pbase=%pa size=%#x\n", &adsp->pa_cfgreg, adsp->cfgregsize);
+
+ mmio = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ if (!mmio) {
+ dev_err(dev, "no SRAM resource\n");
+ return -ENXIO;
+ }
+
+ adsp->pa_sram = (phys_addr_t)mmio->start;
+ adsp->sramsize = resource_size(mmio);
+
+ dev_dbg(dev, "sram pbase=%pa size=%#x\n", &adsp->pa_sram, adsp->sramsize);
+
+ mmio = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sec");
+ if (!mmio) {
+ dev_err(dev, "no SEC register resource\n");
+ return -ENXIO;
+ }
+
+ adsp->va_secreg = devm_ioremap_resource(dev, mmio);
+ if (IS_ERR(adsp->va_secreg))
+ return PTR_ERR(adsp->va_secreg);
+
+ adsp->pa_secreg = (phys_addr_t)mmio->start;
+ adsp->secregsize = resource_size(mmio);
+
+ dev_dbg(dev, "secreg pbase=%pa size=%#x\n", &adsp->pa_secreg, adsp->secregsize);
+
+ mmio = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bus");
+ if (!mmio) {
+ dev_err(dev, "no BUS register resource\n");
+ return -ENXIO;
+ }
+
+ adsp->va_busreg = devm_ioremap_resource(dev, mmio);
+ if (IS_ERR(adsp->va_busreg))
+ return PTR_ERR(adsp->va_busreg);
+
+ adsp->pa_busreg = (phys_addr_t)mmio->start;
+ adsp->busregsize = resource_size(mmio);
+
+ dev_dbg(dev, "busreg pbase=%pa size=%#x\n", &adsp->pa_busreg, adsp->busregsize);
+
+ return 0;
+}
+
+static void adsp_sram_power_on(struct snd_sof_dev *sdev)
+{
+ snd_sof_dsp_update_bits(sdev, DSP_BUSREG_BAR, ADSP_SRAM_POOL_CON,
+ DSP_SRAM_POOL_PD_MASK, 0);
+}
+
+static void adsp_sram_power_off(struct snd_sof_dev *sdev)
+{
+ snd_sof_dsp_update_bits(sdev, DSP_BUSREG_BAR, ADSP_SRAM_POOL_CON,
+ DSP_SRAM_POOL_PD_MASK, DSP_SRAM_POOL_PD_MASK);
+}
+
+/* Init the basic DSP DRAM address */
+static int adsp_memory_remap_init(struct snd_sof_dev *sdev, struct mtk_adsp_chip_info *adsp)
+{
+ u32 offset;
+
+ offset = adsp->pa_dram - DRAM_PHYS_BASE_FROM_DSP_VIEW;
+ adsp->dram_offset = offset;
+ offset >>= DRAM_REMAP_SHIFT;
+
+ dev_dbg(sdev->dev, "adsp->pa_dram %pa, offset %#x\n", &adsp->pa_dram, offset);
+
+ snd_sof_dsp_write(sdev, DSP_BUSREG_BAR, DSP_C0_EMI_MAP_ADDR, offset);
+ snd_sof_dsp_write(sdev, DSP_BUSREG_BAR, DSP_C0_DMAEMI_MAP_ADDR, offset);
+
+ if (offset != snd_sof_dsp_read(sdev, DSP_BUSREG_BAR, DSP_C0_EMI_MAP_ADDR) ||
+ offset != snd_sof_dsp_read(sdev, DSP_BUSREG_BAR, DSP_C0_DMAEMI_MAP_ADDR)) {
+ dev_err(sdev->dev, "emi remap fail\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adsp_shared_base_ioremap(struct platform_device *pdev, void *data)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_adsp_chip_info *adsp = data;
+
+ /* remap shared-dram base to be non-cachable */
+ adsp->shared_dram = devm_ioremap(dev, adsp->pa_shared_dram,
+ adsp->shared_size);
+ if (!adsp->shared_dram) {
+ dev_err(dev, "failed to ioremap base %pa size %#x\n",
+ adsp->shared_dram, adsp->shared_size);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "shared-dram vbase=%p, phy addr :%pa, size=%#x\n",
+ adsp->shared_dram, &adsp->pa_shared_dram, adsp->shared_size);
+
+ return 0;
+}
+
+static int mt8186_run(struct snd_sof_dev *sdev)
+{
+ u32 adsp_bootup_addr;
+
+ adsp_bootup_addr = SRAM_PHYS_BASE_FROM_DSP_VIEW;
+ dev_dbg(sdev->dev, "HIFIxDSP boot from base : 0x%08X\n", adsp_bootup_addr);
+ mt8186_sof_hifixdsp_boot_sequence(sdev, adsp_bootup_addr);
+
+ return 0;
+}
+
+static int mt8186_dsp_probe(struct snd_sof_dev *sdev)
+{
+ struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
+ struct adsp_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ sdev->pdata->hw_pdata = priv;
+ priv->dev = sdev->dev;
+ priv->sdev = sdev;
+
+ priv->adsp = devm_kzalloc(&pdev->dev, sizeof(struct mtk_adsp_chip_info), GFP_KERNEL);
+ if (!priv->adsp)
+ return -ENOMEM;
+
+ ret = platform_parse_resource(pdev, priv->adsp);
+ if (ret)
+ return ret;
+
+ sdev->bar[SOF_FW_BLK_TYPE_IRAM] = devm_ioremap(sdev->dev,
+ priv->adsp->pa_sram,
+ priv->adsp->sramsize);
+ if (!sdev->bar[SOF_FW_BLK_TYPE_IRAM]) {
+ dev_err(sdev->dev, "failed to ioremap base %pa size %#x\n",
+ &priv->adsp->pa_sram, priv->adsp->sramsize);
+ return -ENOMEM;
+ }
+
+ priv->adsp->va_sram = sdev->bar[SOF_FW_BLK_TYPE_IRAM];
+
+ sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap(sdev->dev,
+ priv->adsp->pa_dram,
+ priv->adsp->dramsize);
+
+ if (!sdev->bar[SOF_FW_BLK_TYPE_SRAM]) {
+ dev_err(sdev->dev, "failed to ioremap base %pa size %#x\n",
+ &priv->adsp->pa_dram, priv->adsp->dramsize);
+ return -ENOMEM;
+ }
+
+ priv->adsp->va_dram = sdev->bar[SOF_FW_BLK_TYPE_SRAM];
+
+ ret = adsp_shared_base_ioremap(pdev, priv->adsp);
+ if (ret) {
+ dev_err(sdev->dev, "adsp_shared_base_ioremap fail!\n");
+ return ret;
+ }
+
+ sdev->bar[DSP_REG_BAR] = priv->adsp->va_cfgreg;
+ sdev->bar[DSP_SECREG_BAR] = priv->adsp->va_secreg;
+ sdev->bar[DSP_BUSREG_BAR] = priv->adsp->va_busreg;
+
+ sdev->mmio_bar = SOF_FW_BLK_TYPE_SRAM;
+ sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = mt8186_get_mailbox_offset(sdev);
+
+ ret = adsp_memory_remap_init(sdev, priv->adsp);
+ if (ret) {
+ dev_err(sdev->dev, "adsp_memory_remap_init fail!\n");
+ return ret;
+ }
+
+ /* enable adsp clock before touching registers */
+ ret = mt8186_adsp_init_clock(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "mt8186_adsp_init_clock failed\n");
+ return ret;
+ }
+
+ ret = mt8186_adsp_clock_on(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "mt8186_adsp_clock_on fail!\n");
+ return ret;
+ }
+
+ adsp_sram_power_on(sdev);
+
+ priv->ipc_dev = platform_device_register_data(&pdev->dev, "mtk-adsp-ipc",
+ PLATFORM_DEVID_NONE,
+ pdev, sizeof(*pdev));
+ if (IS_ERR(priv->ipc_dev)) {
+ ret = PTR_ERR(priv->ipc_dev);
+ dev_err(sdev->dev, "failed to create mtk-adsp-ipc device\n");
+ goto err_adsp_off;
+ }
+
+ priv->dsp_ipc = dev_get_drvdata(&priv->ipc_dev->dev);
+ if (!priv->dsp_ipc) {
+ ret = -EPROBE_DEFER;
+ dev_err(sdev->dev, "failed to get drvdata\n");
+ goto exit_pdev_unregister;
+ }
+
+ mtk_adsp_ipc_set_data(priv->dsp_ipc, priv);
+ priv->dsp_ipc->ops = &dsp_ops;
+
+ return 0;
+
+exit_pdev_unregister:
+ platform_device_unregister(priv->ipc_dev);
+err_adsp_off:
+ adsp_sram_power_off(sdev);
+ mt8186_adsp_clock_off(sdev);
+
+ return ret;
+}
+
+static int mt8186_dsp_remove(struct snd_sof_dev *sdev)
+{
+ struct adsp_priv *priv = sdev->pdata->hw_pdata;
+
+ platform_device_unregister(priv->ipc_dev);
+ mt8186_sof_hifixdsp_shutdown(sdev);
+ adsp_sram_power_off(sdev);
+ mt8186_adsp_clock_off(sdev);
+
+ return 0;
+}
+
+static int mt8186_dsp_shutdown(struct snd_sof_dev *sdev)
+{
+ return snd_sof_suspend(sdev->dev);
+}
+
+static int mt8186_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
+{
+ mt8186_sof_hifixdsp_shutdown(sdev);
+ adsp_sram_power_off(sdev);
+ mt8186_adsp_clock_off(sdev);
+
+ return 0;
+}
+
+static int mt8186_dsp_resume(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ ret = mt8186_adsp_clock_on(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "mt8186_adsp_clock_on fail!\n");
+ return ret;
+ }
+
+ adsp_sram_power_on(sdev);
+
+ return ret;
+}
+
+/* on mt8186 there is 1 to 1 match between type and BAR idx */
+static int mt8186_get_bar_index(struct snd_sof_dev *sdev, u32 type)
+{
+ return type;
+}
+
+static int mt8186_pcm_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params)
+{
+ platform_params->cont_update_posn = 1;
+
+ return 0;
+}
+
+static snd_pcm_uframes_t mt8186_pcm_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ int ret;
+ snd_pcm_uframes_t pos;
+ struct snd_sof_pcm *spcm;
+ struct sof_ipc_stream_posn posn;
+ struct snd_sof_pcm_stream *stream;
+ struct snd_soc_component *scomp = sdev->component;
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+
+ spcm = snd_sof_find_spcm_dai(scomp, rtd);
+ if (!spcm) {
+ dev_warn_ratelimited(sdev->dev, "warn: can't find PCM with DAI ID %d\n",
+ rtd->dai_link->id);
+ return 0;
+ }
+
+ stream = &spcm->stream[substream->stream];
+ ret = snd_sof_ipc_msg_data(sdev, stream, &posn, sizeof(posn));
+ if (ret < 0) {
+ dev_warn(sdev->dev, "failed to read stream position: %d\n", ret);
+ return 0;
+ }
+
+ memcpy(&stream->posn, &posn, sizeof(posn));
+ pos = spcm->stream[substream->stream].posn.host_posn;
+ pos = bytes_to_frames(substream->runtime, pos);
+
+ return pos;
+}
+
+static void mt8186_adsp_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ u32 dbg_pc, dbg_data, dbg_inst, dbg_ls0stat, dbg_status, faultinfo;
+
+ /* dump debug registers */
+ dbg_pc = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGPC);
+ dbg_data = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGDATA);
+ dbg_inst = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGINST);
+ dbg_ls0stat = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGLS0STAT);
+ dbg_status = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGSTATUS);
+ faultinfo = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PFAULTINFO);
+
+ dev_info(sdev->dev, "adsp dump : pc %#x, data %#x, dbg_inst %#x,",
+ dbg_pc, dbg_data, dbg_inst);
+ dev_info(sdev->dev, "ls0stat %#x, status %#x, faultinfo %#x",
+ dbg_ls0stat, dbg_status, faultinfo);
+
+ mtk_adsp_dump(sdev, flags);
+}
+
+static struct snd_soc_dai_driver mt8186_dai[] = {
+{
+ .name = "SOF_DL1",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+},
+{
+ .name = "SOF_DL2",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+},
+{
+ .name = "SOF_UL1",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+},
+{
+ .name = "SOF_UL2",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+},
+};
+
+/* mt8186 ops */
+static struct snd_sof_dsp_ops sof_mt8186_ops = {
+ /* probe and remove */
+ .probe = mt8186_dsp_probe,
+ .remove = mt8186_dsp_remove,
+ .shutdown = mt8186_dsp_shutdown,
+
+ /* DSP core boot */
+ .run = mt8186_run,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* ipc */
+ .send_msg = mt8186_send_msg,
+ .get_mailbox_offset = mt8186_get_mailbox_offset,
+ .get_window_offset = mt8186_get_window_offset,
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ /* misc */
+ .get_bar_index = mt8186_get_bar_index,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_hw_params = mt8186_pcm_hw_params,
+ .pcm_pointer = mt8186_pcm_pointer,
+ .pcm_close = sof_stream_pcm_close,
+
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* Firmware ops */
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+
+ /* DAI drivers */
+ .drv = mt8186_dai,
+ .num_drv = ARRAY_SIZE(mt8186_dai),
+
+ /* Debug information */
+ .dbg_dump = mt8186_adsp_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* PM */
+ .suspend = mt8186_dsp_suspend,
+ .resume = mt8186_dsp_resume,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+};
+
+static struct snd_sof_of_mach sof_mt8186_machs[] = {
+ {
+ .compatible = "mediatek,mt8186",
+ .sof_tplg_filename = "sof-mt8186.tplg",
+ },
+ {}
+};
+
+static const struct sof_dev_desc sof_of_mt8186_desc = {
+ .of_machines = sof_mt8186_machs,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "mediatek/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "mediatek/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-mt8186.ri",
+ },
+ .nocodec_tplg_filename = "sof-mt8186-nocodec.tplg",
+ .ops = &sof_mt8186_ops,
+};
+
+/*
+ * DL2, DL3, UL4, UL5 are registered as SOF FE, so creating the corresponding
+ * SOF BE to complete the pipeline.
+ */
+static struct snd_soc_dai_driver mt8188_dai[] = {
+{
+ .name = "SOF_DL2",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+},
+{
+ .name = "SOF_DL3",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+},
+{
+ .name = "SOF_UL4",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+},
+{
+ .name = "SOF_UL5",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+},
+};
+
+/* mt8188 ops */
+static struct snd_sof_dsp_ops sof_mt8188_ops;
+
+static int sof_mt8188_ops_init(struct snd_sof_dev *sdev)
+{
+ /* common defaults */
+ memcpy(&sof_mt8188_ops, &sof_mt8186_ops, sizeof(sof_mt8188_ops));
+
+ sof_mt8188_ops.drv = mt8188_dai;
+ sof_mt8188_ops.num_drv = ARRAY_SIZE(mt8188_dai);
+
+ return 0;
+}
+
+static struct snd_sof_of_mach sof_mt8188_machs[] = {
+ {
+ .compatible = "mediatek,mt8188",
+ .sof_tplg_filename = "sof-mt8188.tplg",
+ },
+ {}
+};
+
+static const struct sof_dev_desc sof_of_mt8188_desc = {
+ .of_machines = sof_mt8188_machs,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "mediatek/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "mediatek/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-mt8188.ri",
+ },
+ .nocodec_tplg_filename = "sof-mt8188-nocodec.tplg",
+ .ops = &sof_mt8188_ops,
+ .ops_init = sof_mt8188_ops_init,
+};
+
+static const struct of_device_id sof_of_mt8186_ids[] = {
+ { .compatible = "mediatek,mt8186-dsp", .data = &sof_of_mt8186_desc},
+ { .compatible = "mediatek,mt8188-dsp", .data = &sof_of_mt8188_desc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, sof_of_mt8186_ids);
+
+/* DT driver definition */
+static struct platform_driver snd_sof_of_mt8186_driver = {
+ .probe = sof_of_probe,
+ .remove = sof_of_remove,
+ .shutdown = sof_of_shutdown,
+ .driver = {
+ .name = "sof-audio-of-mt8186",
+ .pm = &sof_of_pm,
+ .of_match_table = sof_of_mt8186_ids,
+ },
+};
+module_platform_driver(snd_sof_of_mt8186_driver);
+
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_IMPORT_NS(SND_SOC_SOF_MTK_COMMON);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/mediatek/mt8186/mt8186.h b/sound/soc/sof/mediatek/mt8186/mt8186.h
new file mode 100644
index 000000000..91323f492
--- /dev/null
+++ b/sound/soc/sof/mediatek/mt8186/mt8186.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+/*
+ * Copyright (c) 2022 MediaTek Corporation. All rights reserved.
+ *
+ * Header file for the mt8186 DSP register definition
+ */
+
+#ifndef __MT8186_H
+#define __MT8186_H
+
+struct mtk_adsp_chip_info;
+struct snd_sof_dev;
+
+#define DSP_REG_BAR 4
+#define DSP_SECREG_BAR 5
+#define DSP_BUSREG_BAR 6
+
+/*****************************************************************************
+ * R E G I S T E R TABLE
+ *****************************************************************************/
+/* dsp cfg */
+#define ADSP_CFGREG_SW_RSTN 0x0000
+#define SW_DBG_RSTN_C0 BIT(0)
+#define SW_RSTN_C0 BIT(4)
+#define ADSP_HIFI_IO_CONFIG 0x000C
+#define TRACEMEMREADY BIT(15)
+#define RUNSTALL BIT(31)
+#define ADSP_IRQ_MASK 0x0030
+#define ADSP_DVFSRC_REQ 0x0040
+#define ADSP_DDREN_REQ_0 0x0044
+#define ADSP_SEMAPHORE 0x0064
+#define ADSP_WDT_CON_C0 0x007C
+#define ADSP_MBOX_IRQ_EN 0x009C
+#define DSP_MBOX0_IRQ_EN BIT(0)
+#define DSP_MBOX1_IRQ_EN BIT(1)
+#define DSP_MBOX2_IRQ_EN BIT(2)
+#define DSP_MBOX3_IRQ_EN BIT(3)
+#define DSP_MBOX4_IRQ_EN BIT(4)
+#define DSP_PDEBUGPC 0x013C
+#define DSP_PDEBUGDATA 0x0140
+#define DSP_PDEBUGINST 0x0144
+#define DSP_PDEBUGLS0STAT 0x0148
+#define DSP_PDEBUGSTATUS 0x014C
+#define DSP_PFAULTINFO 0x0150
+#define ADSP_CK_EN 0x1000
+#define CORE_CLK_EN BIT(0)
+#define COREDBG_EN BIT(1)
+#define TIMER_EN BIT(3)
+#define DMA_EN BIT(4)
+#define UART_EN BIT(5)
+#define ADSP_UART_CTRL 0x1010
+#define UART_BCLK_CG BIT(0)
+#define UART_RSTN BIT(3)
+
+/* dsp sec */
+#define ADSP_PRID 0x0
+#define ADSP_ALTVEC_C0 0x04
+#define ADSP_ALTVECSEL 0x0C
+#define MT8188_ADSP_ALTVECSEL_C0 BIT(0)
+#define MT8186_ADSP_ALTVECSEL_C0 BIT(1)
+
+/*
+ * On MT8188, BIT(1) is not evaluated and on MT8186 BIT(0) is not evaluated:
+ * We can simplify the driver by safely setting both bits regardless of the SoC.
+ */
+#define ADSP_ALTVECSEL_C0 (MT8188_ADSP_ALTVECSEL_C0 | \
+ MT8186_ADSP_ALTVECSEL_C0)
+
+/* dsp bus */
+#define ADSP_SRAM_POOL_CON 0x190
+#define DSP_SRAM_POOL_PD_MASK 0xF00F /* [0:3] and [12:15] */
+#define DSP_C0_EMI_MAP_ADDR 0xA00 /* ADSP Core0 To EMI Address Remap */
+#define DSP_C0_DMAEMI_MAP_ADDR 0xA08 /* DMA0 To EMI Address Remap */
+
+/* DSP memories */
+#define MBOX_OFFSET 0x500000 /* DRAM */
+#define MBOX_SIZE 0x1000 /* consistent with which in memory.h of sof fw */
+#define DSP_DRAM_SIZE 0xA00000 /* 16M */
+
+/*remap dram between AP and DSP view, 4KB aligned*/
+#define SRAM_PHYS_BASE_FROM_DSP_VIEW 0x4E100000 /* MT8186 DSP view */
+#define DRAM_PHYS_BASE_FROM_DSP_VIEW 0x60000000 /* MT8186 DSP view */
+#define DRAM_REMAP_SHIFT 12
+#define DRAM_REMAP_MASK 0xFFF
+
+#define SIZE_SHARED_DRAM_DL 0x40000 /*Shared buffer for Downlink*/
+#define SIZE_SHARED_DRAM_UL 0x40000 /*Shared buffer for Uplink*/
+#define TOTAL_SIZE_SHARED_DRAM_FROM_TAIL (SIZE_SHARED_DRAM_DL + SIZE_SHARED_DRAM_UL)
+
+void mt8186_sof_hifixdsp_boot_sequence(struct snd_sof_dev *sdev, u32 boot_addr);
+void mt8186_sof_hifixdsp_shutdown(struct snd_sof_dev *sdev);
+#endif
diff --git a/sound/soc/sof/mediatek/mt8195/Makefile b/sound/soc/sof/mediatek/mt8195/Makefile
new file mode 100644
index 000000000..afc4f21fc
--- /dev/null
+++ b/sound/soc/sof/mediatek/mt8195/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+snd-sof-mt8195-objs := mt8195.o mt8195-clk.o mt8195-loader.o
+obj-$(CONFIG_SND_SOC_SOF_MT8195) += snd-sof-mt8195.o
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195-clk.c b/sound/soc/sof/mediatek/mt8195/mt8195-clk.c
new file mode 100644
index 000000000..7cffcad00
--- /dev/null
+++ b/sound/soc/sof/mediatek/mt8195/mt8195-clk.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2021 Mediatek Corporation. All rights reserved.
+//
+// Author: YC Hung <yc.hung@mediatek.com>
+//
+// Hardware interface for mt8195 DSP clock
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include "mt8195.h"
+#include "mt8195-clk.h"
+#include "../adsp_helper.h"
+#include "../../sof-audio.h"
+
+static const char *adsp_clks[ADSP_CLK_MAX] = {
+ [CLK_TOP_ADSP] = "adsp_sel",
+ [CLK_TOP_CLK26M] = "clk26m_ck",
+ [CLK_TOP_AUDIO_LOCAL_BUS] = "audio_local_bus",
+ [CLK_TOP_MAINPLL_D7_D2] = "mainpll_d7_d2",
+ [CLK_SCP_ADSP_AUDIODSP] = "scp_adsp_audiodsp",
+ [CLK_TOP_AUDIO_H] = "audio_h",
+};
+
+int mt8195_adsp_init_clock(struct snd_sof_dev *sdev)
+{
+ struct device *dev = sdev->dev;
+ struct adsp_priv *priv = sdev->pdata->hw_pdata;
+ int i;
+
+ priv->clk = devm_kcalloc(dev, ADSP_CLK_MAX, sizeof(*priv->clk), GFP_KERNEL);
+
+ if (!priv->clk)
+ return -ENOMEM;
+
+ for (i = 0; i < ADSP_CLK_MAX; i++) {
+ priv->clk[i] = devm_clk_get(dev, adsp_clks[i]);
+ if (IS_ERR(priv->clk[i]))
+ return PTR_ERR(priv->clk[i]);
+ }
+
+ return 0;
+}
+
+static int adsp_enable_all_clock(struct snd_sof_dev *sdev)
+{
+ struct device *dev = sdev->dev;
+ struct adsp_priv *priv = sdev->pdata->hw_pdata;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk[CLK_TOP_MAINPLL_D7_D2]);
+ if (ret) {
+ dev_err(dev, "%s clk_prepare_enable(mainpll_d7_d2) fail %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->clk[CLK_TOP_ADSP]);
+ if (ret) {
+ dev_err(dev, "%s clk_prepare_enable(adsp_sel) fail %d\n",
+ __func__, ret);
+ goto disable_mainpll_d7_d2_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk[CLK_TOP_AUDIO_LOCAL_BUS]);
+ if (ret) {
+ dev_err(dev, "%s clk_prepare_enable(audio_local_bus) fail %d\n",
+ __func__, ret);
+ goto disable_dsp_sel_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk[CLK_SCP_ADSP_AUDIODSP]);
+ if (ret) {
+ dev_err(dev, "%s clk_prepare_enable(scp_adsp_audiodsp) fail %d\n",
+ __func__, ret);
+ goto disable_audio_local_bus_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk[CLK_TOP_AUDIO_H]);
+ if (ret) {
+ dev_err(dev, "%s clk_prepare_enable(audio_h) fail %d\n",
+ __func__, ret);
+ goto disable_scp_adsp_audiodsp_clk;
+ }
+
+ return 0;
+
+disable_scp_adsp_audiodsp_clk:
+ clk_disable_unprepare(priv->clk[CLK_SCP_ADSP_AUDIODSP]);
+disable_audio_local_bus_clk:
+ clk_disable_unprepare(priv->clk[CLK_TOP_AUDIO_LOCAL_BUS]);
+disable_dsp_sel_clk:
+ clk_disable_unprepare(priv->clk[CLK_TOP_ADSP]);
+disable_mainpll_d7_d2_clk:
+ clk_disable_unprepare(priv->clk[CLK_TOP_MAINPLL_D7_D2]);
+
+ return ret;
+}
+
+static void adsp_disable_all_clock(struct snd_sof_dev *sdev)
+{
+ struct adsp_priv *priv = sdev->pdata->hw_pdata;
+
+ clk_disable_unprepare(priv->clk[CLK_TOP_AUDIO_H]);
+ clk_disable_unprepare(priv->clk[CLK_SCP_ADSP_AUDIODSP]);
+ clk_disable_unprepare(priv->clk[CLK_TOP_AUDIO_LOCAL_BUS]);
+ clk_disable_unprepare(priv->clk[CLK_TOP_ADSP]);
+ clk_disable_unprepare(priv->clk[CLK_TOP_MAINPLL_D7_D2]);
+}
+
+static int adsp_default_clk_init(struct snd_sof_dev *sdev, bool enable)
+{
+ struct device *dev = sdev->dev;
+ struct adsp_priv *priv = sdev->pdata->hw_pdata;
+ int ret;
+
+ dev_dbg(dev, "%s: %s\n", __func__, enable ? "on" : "off");
+
+ if (enable) {
+ ret = clk_set_parent(priv->clk[CLK_TOP_ADSP],
+ priv->clk[CLK_TOP_CLK26M]);
+ if (ret) {
+ dev_err(dev, "failed to set dsp_sel to clk26m: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_set_parent(priv->clk[CLK_TOP_AUDIO_LOCAL_BUS],
+ priv->clk[CLK_TOP_MAINPLL_D7_D2]);
+ if (ret) {
+ dev_err(dev, "set audio_local_bus failed %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_set_parent(priv->clk[CLK_TOP_AUDIO_H],
+ priv->clk[CLK_TOP_CLK26M]);
+ if (ret) {
+ dev_err(dev, "set audio_h_sel failed %d\n", ret);
+ return ret;
+ }
+
+ ret = adsp_enable_all_clock(sdev);
+ if (ret) {
+ dev_err(dev, "failed to adsp_enable_clock: %d\n", ret);
+ return ret;
+ }
+ } else {
+ adsp_disable_all_clock(sdev);
+ }
+
+ return 0;
+}
+
+int adsp_clock_on(struct snd_sof_dev *sdev)
+{
+ /* Open ADSP clock */
+ return adsp_default_clk_init(sdev, 1);
+}
+
+int adsp_clock_off(struct snd_sof_dev *sdev)
+{
+ /* Close ADSP clock */
+ return adsp_default_clk_init(sdev, 0);
+}
+
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195-clk.h b/sound/soc/sof/mediatek/mt8195/mt8195-clk.h
new file mode 100644
index 000000000..9cc0573d5
--- /dev/null
+++ b/sound/soc/sof/mediatek/mt8195/mt8195-clk.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2021 MediaTek Corporation. All rights reserved.
+ *
+ * Header file for the mt8195 DSP clock definition
+ */
+
+#ifndef __MT8195_CLK_H
+#define __MT8195_CLK_H
+
+struct snd_sof_dev;
+
+/*DSP clock*/
+enum adsp_clk_id {
+ CLK_TOP_ADSP,
+ CLK_TOP_CLK26M,
+ CLK_TOP_AUDIO_LOCAL_BUS,
+ CLK_TOP_MAINPLL_D7_D2,
+ CLK_SCP_ADSP_AUDIODSP,
+ CLK_TOP_AUDIO_H,
+ ADSP_CLK_MAX
+};
+
+int mt8195_adsp_init_clock(struct snd_sof_dev *sdev);
+int adsp_clock_on(struct snd_sof_dev *sdev);
+int adsp_clock_off(struct snd_sof_dev *sdev);
+#endif
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195-loader.c b/sound/soc/sof/mediatek/mt8195/mt8195-loader.c
new file mode 100644
index 000000000..4be99ff4e
--- /dev/null
+++ b/sound/soc/sof/mediatek/mt8195/mt8195-loader.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright (c) 2021 Mediatek Corporation. All rights reserved.
+//
+// Author: YC Hung <yc.hung@mediatek.com>
+//
+// Hardware interface for mt8195 DSP code loader
+
+#include <sound/sof.h>
+#include "mt8195.h"
+#include "../../ops.h"
+
+void sof_hifixdsp_boot_sequence(struct snd_sof_dev *sdev, u32 boot_addr)
+{
+ /* ADSP bootup base */
+ snd_sof_dsp_write(sdev, DSP_REG_BAR, DSP_ALTRESETVEC, boot_addr);
+
+ /* pull high RunStall (set bit3 to 1) */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
+ ADSP_RUNSTALL, ADSP_RUNSTALL);
+
+ /* pull high StatVectorSel to use AltResetVec (set bit4 to 1) */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
+ STATVECTOR_SEL, STATVECTOR_SEL);
+
+ /* toggle DReset & BReset */
+ /* pull high DReset & BReset */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
+ ADSP_BRESET_SW | ADSP_DRESET_SW,
+ ADSP_BRESET_SW | ADSP_DRESET_SW);
+
+ /* delay 10 DSP cycles at 26M about 1us by IP vendor's suggestion */
+ udelay(1);
+
+ /* pull low DReset & BReset */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
+ ADSP_BRESET_SW | ADSP_DRESET_SW,
+ 0);
+
+ /* Enable PDebug */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_PDEBUGBUS0,
+ PDEBUG_ENABLE,
+ PDEBUG_ENABLE);
+
+ /* release RunStall (set bit3 to 0) */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
+ ADSP_RUNSTALL, 0);
+}
+
+void sof_hifixdsp_shutdown(struct snd_sof_dev *sdev)
+{
+ /* RUN_STALL pull high again to reset */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
+ ADSP_RUNSTALL, ADSP_RUNSTALL);
+
+ /* pull high DReset & BReset */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
+ ADSP_BRESET_SW | ADSP_DRESET_SW,
+ ADSP_BRESET_SW | ADSP_DRESET_SW);
+}
+
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.c b/sound/soc/sof/mediatek/mt8195/mt8195.c
new file mode 100644
index 000000000..7d6a56855
--- /dev/null
+++ b/sound/soc/sof/mediatek/mt8195/mt8195.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2021 Mediatek Inc. All rights reserved.
+//
+// Author: YC Hung <yc.hung@mediatek.com>
+//
+
+/*
+ * Hardware interface for audio DSP on mt8195
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/module.h>
+
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include "../../ops.h"
+#include "../../sof-of-dev.h"
+#include "../../sof-audio.h"
+#include "../adsp_helper.h"
+#include "../mtk-adsp-common.h"
+#include "mt8195.h"
+#include "mt8195-clk.h"
+
+static int mt8195_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+
+static int mt8195_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+
+static int mt8195_send_msg(struct snd_sof_dev *sdev,
+ struct snd_sof_ipc_msg *msg)
+{
+ struct adsp_priv *priv = sdev->pdata->hw_pdata;
+
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+
+ return mtk_adsp_ipc_send(priv->dsp_ipc, MTK_ADSP_IPC_REQ, MTK_ADSP_IPC_OP_REQ);
+}
+
+static void mt8195_dsp_handle_reply(struct mtk_adsp_ipc *ipc)
+{
+ struct adsp_priv *priv = mtk_adsp_ipc_get_data(ipc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sdev->ipc_lock, flags);
+ snd_sof_ipc_process_reply(priv->sdev, 0);
+ spin_unlock_irqrestore(&priv->sdev->ipc_lock, flags);
+}
+
+static void mt8195_dsp_handle_request(struct mtk_adsp_ipc *ipc)
+{
+ struct adsp_priv *priv = mtk_adsp_ipc_get_data(ipc);
+ u32 p; /* panic code */
+ int ret;
+
+ /* Read the message from the debug box. */
+ sof_mailbox_read(priv->sdev, priv->sdev->debug_box.offset + 4,
+ &p, sizeof(p));
+
+ /* Check to see if the message is a panic code 0x0dead*** */
+ if ((p & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(priv->sdev, p, true);
+ } else {
+ snd_sof_ipc_msgs_rx(priv->sdev);
+
+ /* tell DSP cmd is done */
+ ret = mtk_adsp_ipc_send(priv->dsp_ipc, MTK_ADSP_IPC_RSP, MTK_ADSP_IPC_OP_RSP);
+ if (ret)
+ dev_err(priv->dev, "request send ipc failed");
+ }
+}
+
+static struct mtk_adsp_ipc_ops dsp_ops = {
+ .handle_reply = mt8195_dsp_handle_reply,
+ .handle_request = mt8195_dsp_handle_request,
+};
+
+static int platform_parse_resource(struct platform_device *pdev, void *data)
+{
+ struct resource *mmio;
+ struct resource res;
+ struct device_node *mem_region;
+ struct device *dev = &pdev->dev;
+ struct mtk_adsp_chip_info *adsp = data;
+ int ret;
+
+ mem_region = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!mem_region) {
+ dev_err(dev, "no dma memory-region phandle\n");
+ return -ENODEV;
+ }
+
+ ret = of_address_to_resource(mem_region, 0, &res);
+ of_node_put(mem_region);
+ if (ret) {
+ dev_err(dev, "of_address_to_resource dma failed\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "DMA %pR\n", &res);
+
+ adsp->pa_shared_dram = (phys_addr_t)res.start;
+ adsp->shared_size = resource_size(&res);
+ if (adsp->pa_shared_dram & DRAM_REMAP_MASK) {
+ dev_err(dev, "adsp shared dma memory(%#x) is not 4K-aligned\n",
+ (u32)adsp->pa_shared_dram);
+ return -EINVAL;
+ }
+
+ ret = of_reserved_mem_device_init(dev);
+ if (ret) {
+ dev_err(dev, "of_reserved_mem_device_init failed\n");
+ return ret;
+ }
+
+ mem_region = of_parse_phandle(dev->of_node, "memory-region", 1);
+ if (!mem_region) {
+ dev_err(dev, "no memory-region sysmem phandle\n");
+ return -ENODEV;
+ }
+
+ ret = of_address_to_resource(mem_region, 0, &res);
+ of_node_put(mem_region);
+ if (ret) {
+ dev_err(dev, "of_address_to_resource sysmem failed\n");
+ return ret;
+ }
+
+ adsp->pa_dram = (phys_addr_t)res.start;
+ adsp->dramsize = resource_size(&res);
+ if (adsp->pa_dram & DRAM_REMAP_MASK) {
+ dev_err(dev, "adsp memory(%#x) is not 4K-aligned\n",
+ (u32)adsp->pa_dram);
+ return -EINVAL;
+ }
+
+ if (adsp->dramsize < TOTAL_SIZE_SHARED_DRAM_FROM_TAIL) {
+ dev_err(dev, "adsp memory(%#x) is not enough for share\n",
+ adsp->dramsize);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "dram pbase=%pa, dramsize=%#x\n",
+ &adsp->pa_dram, adsp->dramsize);
+
+ /* Parse CFG base */
+ mmio = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!mmio) {
+ dev_err(dev, "no ADSP-CFG register resource\n");
+ return -ENXIO;
+ }
+ /* remap for DSP register accessing */
+ adsp->va_cfgreg = devm_ioremap_resource(dev, mmio);
+ if (IS_ERR(adsp->va_cfgreg))
+ return PTR_ERR(adsp->va_cfgreg);
+
+ adsp->pa_cfgreg = (phys_addr_t)mmio->start;
+ adsp->cfgregsize = resource_size(mmio);
+
+ dev_dbg(dev, "cfgreg-vbase=%p, cfgregsize=%#x\n",
+ adsp->va_cfgreg, adsp->cfgregsize);
+
+ /* Parse SRAM */
+ mmio = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ if (!mmio) {
+ dev_err(dev, "no SRAM resource\n");
+ return -ENXIO;
+ }
+
+ adsp->pa_sram = (phys_addr_t)mmio->start;
+ adsp->sramsize = resource_size(mmio);
+
+ dev_dbg(dev, "sram pbase=%pa,%#x\n", &adsp->pa_sram, adsp->sramsize);
+
+ return ret;
+}
+
+static int adsp_sram_power_on(struct device *dev, bool on)
+{
+ void __iomem *va_dspsysreg;
+ u32 srampool_con;
+
+ va_dspsysreg = ioremap(ADSP_SRAM_POOL_CON, 0x4);
+ if (!va_dspsysreg) {
+ dev_err(dev, "failed to ioremap sram pool base %#x\n",
+ ADSP_SRAM_POOL_CON);
+ return -ENOMEM;
+ }
+
+ srampool_con = readl(va_dspsysreg);
+ if (on)
+ writel(srampool_con & ~DSP_SRAM_POOL_PD_MASK, va_dspsysreg);
+ else
+ writel(srampool_con | DSP_SRAM_POOL_PD_MASK, va_dspsysreg);
+
+ iounmap(va_dspsysreg);
+ return 0;
+}
+
+/* Init the basic DSP DRAM address */
+static int adsp_memory_remap_init(struct device *dev, struct mtk_adsp_chip_info *adsp)
+{
+ void __iomem *vaddr_emi_map;
+ int offset;
+
+ if (!adsp)
+ return -ENXIO;
+
+ vaddr_emi_map = devm_ioremap(dev, DSP_EMI_MAP_ADDR, 0x4);
+ if (!vaddr_emi_map) {
+ dev_err(dev, "failed to ioremap emi map base %#x\n",
+ DSP_EMI_MAP_ADDR);
+ return -ENOMEM;
+ }
+
+ offset = adsp->pa_dram - DRAM_PHYS_BASE_FROM_DSP_VIEW;
+ adsp->dram_offset = offset;
+ offset >>= DRAM_REMAP_SHIFT;
+ dev_dbg(dev, "adsp->pa_dram %pa, offset %#x\n", &adsp->pa_dram, offset);
+ writel(offset, vaddr_emi_map);
+ if (offset != readl(vaddr_emi_map)) {
+ dev_err(dev, "write emi map fail : %#x\n", readl(vaddr_emi_map));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adsp_shared_base_ioremap(struct platform_device *pdev, void *data)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_adsp_chip_info *adsp = data;
+
+ /* remap shared-dram base to be non-cachable */
+ adsp->shared_dram = devm_ioremap(dev, adsp->pa_shared_dram,
+ adsp->shared_size);
+ if (!adsp->shared_dram) {
+ dev_err(dev, "failed to ioremap base %pa size %#x\n",
+ adsp->shared_dram, adsp->shared_size);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "shared-dram vbase=%p, phy addr :%pa, size=%#x\n",
+ adsp->shared_dram, &adsp->pa_shared_dram, adsp->shared_size);
+
+ return 0;
+}
+
+static int mt8195_run(struct snd_sof_dev *sdev)
+{
+ u32 adsp_bootup_addr;
+
+ adsp_bootup_addr = SRAM_PHYS_BASE_FROM_DSP_VIEW;
+ dev_dbg(sdev->dev, "HIFIxDSP boot from base : 0x%08X\n", adsp_bootup_addr);
+ sof_hifixdsp_boot_sequence(sdev, adsp_bootup_addr);
+
+ return 0;
+}
+
+static int mt8195_dsp_probe(struct snd_sof_dev *sdev)
+{
+ struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
+ struct adsp_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ sdev->pdata->hw_pdata = priv;
+ priv->dev = sdev->dev;
+ priv->sdev = sdev;
+
+ priv->adsp = devm_kzalloc(&pdev->dev, sizeof(struct mtk_adsp_chip_info), GFP_KERNEL);
+ if (!priv->adsp)
+ return -ENOMEM;
+
+ ret = platform_parse_resource(pdev, priv->adsp);
+ if (ret)
+ return ret;
+
+ ret = mt8195_adsp_init_clock(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "mt8195_adsp_init_clock failed\n");
+ return -EINVAL;
+ }
+
+ ret = adsp_clock_on(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "adsp_clock_on fail!\n");
+ return -EINVAL;
+ }
+
+ ret = adsp_sram_power_on(sdev->dev, true);
+ if (ret) {
+ dev_err(sdev->dev, "adsp_sram_power_on fail!\n");
+ goto exit_clk_disable;
+ }
+
+ ret = adsp_memory_remap_init(&pdev->dev, priv->adsp);
+ if (ret) {
+ dev_err(sdev->dev, "adsp_memory_remap_init fail!\n");
+ goto err_adsp_sram_power_off;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_IRAM] = devm_ioremap(sdev->dev,
+ priv->adsp->pa_sram,
+ priv->adsp->sramsize);
+ if (!sdev->bar[SOF_FW_BLK_TYPE_IRAM]) {
+ dev_err(sdev->dev, "failed to ioremap base %pa size %#x\n",
+ &priv->adsp->pa_sram, priv->adsp->sramsize);
+ ret = -EINVAL;
+ goto err_adsp_sram_power_off;
+ }
+
+ priv->adsp->va_sram = sdev->bar[SOF_FW_BLK_TYPE_IRAM];
+
+ sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap(sdev->dev,
+ priv->adsp->pa_dram,
+ priv->adsp->dramsize);
+ if (!sdev->bar[SOF_FW_BLK_TYPE_SRAM]) {
+ dev_err(sdev->dev, "failed to ioremap base %pa size %#x\n",
+ &priv->adsp->pa_dram, priv->adsp->dramsize);
+ ret = -EINVAL;
+ goto err_adsp_sram_power_off;
+ }
+ priv->adsp->va_dram = sdev->bar[SOF_FW_BLK_TYPE_SRAM];
+
+ ret = adsp_shared_base_ioremap(pdev, priv->adsp);
+ if (ret) {
+ dev_err(sdev->dev, "adsp_shared_base_ioremap fail!\n");
+ goto err_adsp_sram_power_off;
+ }
+
+ sdev->bar[DSP_REG_BAR] = priv->adsp->va_cfgreg;
+
+ sdev->mmio_bar = SOF_FW_BLK_TYPE_SRAM;
+ sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = mt8195_get_mailbox_offset(sdev);
+
+ priv->ipc_dev = platform_device_register_data(&pdev->dev, "mtk-adsp-ipc",
+ PLATFORM_DEVID_NONE,
+ pdev, sizeof(*pdev));
+ if (IS_ERR(priv->ipc_dev)) {
+ ret = PTR_ERR(priv->ipc_dev);
+ dev_err(sdev->dev, "failed to register mtk-adsp-ipc device\n");
+ goto err_adsp_sram_power_off;
+ }
+
+ priv->dsp_ipc = dev_get_drvdata(&priv->ipc_dev->dev);
+ if (!priv->dsp_ipc) {
+ ret = -EPROBE_DEFER;
+ dev_err(sdev->dev, "failed to get drvdata\n");
+ goto exit_pdev_unregister;
+ }
+
+ mtk_adsp_ipc_set_data(priv->dsp_ipc, priv);
+ priv->dsp_ipc->ops = &dsp_ops;
+
+ return 0;
+
+exit_pdev_unregister:
+ platform_device_unregister(priv->ipc_dev);
+err_adsp_sram_power_off:
+ adsp_sram_power_on(&pdev->dev, false);
+exit_clk_disable:
+ adsp_clock_off(sdev);
+
+ return ret;
+}
+
+static int mt8195_dsp_shutdown(struct snd_sof_dev *sdev)
+{
+ return snd_sof_suspend(sdev->dev);
+}
+
+static int mt8195_dsp_remove(struct snd_sof_dev *sdev)
+{
+ struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
+ struct adsp_priv *priv = sdev->pdata->hw_pdata;
+
+ platform_device_unregister(priv->ipc_dev);
+ adsp_sram_power_on(&pdev->dev, false);
+ adsp_clock_off(sdev);
+
+ return 0;
+}
+
+static int mt8195_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
+{
+ struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
+ int ret;
+ u32 reset_sw, dbg_pc;
+
+ /* wait dsp enter idle, timeout is 1 second */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, DSP_REG_BAR,
+ DSP_RESET_SW, reset_sw,
+ ((reset_sw & ADSP_PWAIT) == ADSP_PWAIT),
+ SUSPEND_DSP_IDLE_POLL_INTERVAL_US,
+ SUSPEND_DSP_IDLE_TIMEOUT_US);
+ if (ret < 0) {
+ dbg_pc = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGPC);
+ dev_warn(sdev->dev, "dsp not idle, powering off anyway : swrest %#x, pc %#x, ret %d\n",
+ reset_sw, dbg_pc, ret);
+ }
+
+ /* stall and reset dsp */
+ sof_hifixdsp_shutdown(sdev);
+
+ /* power down adsp sram */
+ ret = adsp_sram_power_on(&pdev->dev, false);
+ if (ret) {
+ dev_err(sdev->dev, "adsp_sram_power_off fail!\n");
+ return ret;
+ }
+
+ /* turn off adsp clock */
+ return adsp_clock_off(sdev);
+}
+
+static int mt8195_dsp_resume(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ /* turn on adsp clock */
+ ret = adsp_clock_on(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "adsp_clock_on fail!\n");
+ return ret;
+ }
+
+ /* power on adsp sram */
+ ret = adsp_sram_power_on(sdev->dev, true);
+ if (ret)
+ dev_err(sdev->dev, "adsp_sram_power_on fail!\n");
+
+ return ret;
+}
+
+/* on mt8195 there is 1 to 1 match between type and BAR idx */
+static int mt8195_get_bar_index(struct snd_sof_dev *sdev, u32 type)
+{
+ return type;
+}
+
+static int mt8195_pcm_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params)
+{
+ platform_params->cont_update_posn = 1;
+
+ return 0;
+}
+
+static snd_pcm_uframes_t mt8195_pcm_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ int ret;
+ snd_pcm_uframes_t pos;
+ struct snd_sof_pcm *spcm;
+ struct sof_ipc_stream_posn posn;
+ struct snd_sof_pcm_stream *stream;
+ struct snd_soc_component *scomp = sdev->component;
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+
+ spcm = snd_sof_find_spcm_dai(scomp, rtd);
+ if (!spcm) {
+ dev_warn_ratelimited(sdev->dev, "warn: can't find PCM with DAI ID %d\n",
+ rtd->dai_link->id);
+ return 0;
+ }
+
+ stream = &spcm->stream[substream->stream];
+ ret = snd_sof_ipc_msg_data(sdev, stream, &posn, sizeof(posn));
+ if (ret < 0) {
+ dev_warn(sdev->dev, "failed to read stream position: %d\n", ret);
+ return 0;
+ }
+
+ memcpy(&stream->posn, &posn, sizeof(posn));
+ pos = spcm->stream[substream->stream].posn.host_posn;
+ pos = bytes_to_frames(substream->runtime, pos);
+
+ return pos;
+}
+
+static void mt8195_adsp_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ u32 dbg_pc, dbg_data, dbg_bus0, dbg_bus1, dbg_inst;
+ u32 dbg_ls0stat, dbg_ls1stat, faultbus, faultinfo, swrest;
+
+ /* dump debug registers */
+ dbg_pc = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGPC);
+ dbg_data = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGDATA);
+ dbg_bus0 = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGBUS0);
+ dbg_bus1 = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGBUS1);
+ dbg_inst = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGINST);
+ dbg_ls0stat = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGLS0STAT);
+ dbg_ls1stat = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGLS1STAT);
+ faultbus = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PFAULTBUS);
+ faultinfo = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PFAULTINFO);
+ swrest = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_RESET_SW);
+
+ dev_info(sdev->dev, "adsp dump : pc %#x, data %#x, bus0 %#x, bus1 %#x, swrest %#x",
+ dbg_pc, dbg_data, dbg_bus0, dbg_bus1, swrest);
+ dev_info(sdev->dev, "dbg_inst %#x, ls0stat %#x, ls1stat %#x, faultbus %#x, faultinfo %#x",
+ dbg_inst, dbg_ls0stat, dbg_ls1stat, faultbus, faultinfo);
+
+ mtk_adsp_dump(sdev, flags);
+}
+
+static struct snd_soc_dai_driver mt8195_dai[] = {
+{
+ .name = "SOF_DL2",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+},
+{
+ .name = "SOF_DL3",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+},
+{
+ .name = "SOF_UL4",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+},
+{
+ .name = "SOF_UL5",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+},
+};
+
+/* mt8195 ops */
+static struct snd_sof_dsp_ops sof_mt8195_ops = {
+ /* probe and remove */
+ .probe = mt8195_dsp_probe,
+ .remove = mt8195_dsp_remove,
+ .shutdown = mt8195_dsp_shutdown,
+
+ /* DSP core boot */
+ .run = mt8195_run,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* ipc */
+ .send_msg = mt8195_send_msg,
+ .get_mailbox_offset = mt8195_get_mailbox_offset,
+ .get_window_offset = mt8195_get_window_offset,
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ /* misc */
+ .get_bar_index = mt8195_get_bar_index,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_hw_params = mt8195_pcm_hw_params,
+ .pcm_pointer = mt8195_pcm_pointer,
+ .pcm_close = sof_stream_pcm_close,
+
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* Firmware ops */
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+
+ /* Debug information */
+ .dbg_dump = mt8195_adsp_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* DAI drivers */
+ .drv = mt8195_dai,
+ .num_drv = ARRAY_SIZE(mt8195_dai),
+
+ /* PM */
+ .suspend = mt8195_dsp_suspend,
+ .resume = mt8195_dsp_resume,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+};
+
+static struct snd_sof_of_mach sof_mt8195_machs[] = {
+ {
+ .compatible = "google,tomato",
+ .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682-dts.tplg"
+ }, {
+ .compatible = "mediatek,mt8195",
+ .sof_tplg_filename = "sof-mt8195.tplg"
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct sof_dev_desc sof_of_mt8195_desc = {
+ .of_machines = sof_mt8195_machs,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "mediatek/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "mediatek/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-mt8195.ri",
+ },
+ .nocodec_tplg_filename = "sof-mt8195-nocodec.tplg",
+ .ops = &sof_mt8195_ops,
+ .ipc_timeout = 1000,
+};
+
+static const struct of_device_id sof_of_mt8195_ids[] = {
+ { .compatible = "mediatek,mt8195-dsp", .data = &sof_of_mt8195_desc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, sof_of_mt8195_ids);
+
+/* DT driver definition */
+static struct platform_driver snd_sof_of_mt8195_driver = {
+ .probe = sof_of_probe,
+ .remove = sof_of_remove,
+ .shutdown = sof_of_shutdown,
+ .driver = {
+ .name = "sof-audio-of-mt8195",
+ .pm = &sof_of_pm,
+ .of_match_table = sof_of_mt8195_ids,
+ },
+};
+module_platform_driver(snd_sof_of_mt8195_driver);
+
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_IMPORT_NS(SND_SOC_SOF_MTK_COMMON);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.h b/sound/soc/sof/mediatek/mt8195/mt8195.h
new file mode 100644
index 000000000..b42291700
--- /dev/null
+++ b/sound/soc/sof/mediatek/mt8195/mt8195.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2021 MediaTek Corporation. All rights reserved.
+ *
+ * Header file for the mt8195 DSP register definition
+ */
+
+#ifndef __MT8195_H
+#define __MT8195_H
+
+struct mtk_adsp_chip_info;
+struct snd_sof_dev;
+
+#define DSP_REG_BASE 0x10803000
+#define SCP_CFGREG_BASE 0x10724000
+#define DSP_SYSAO_BASE 0x1080C000
+
+/*****************************************************************************
+ * R E G I S T E R TABLE
+ *****************************************************************************/
+#define DSP_JTAGMUX 0x0000
+#define DSP_ALTRESETVEC 0x0004
+#define DSP_PDEBUGDATA 0x0008
+#define DSP_PDEBUGBUS0 0x000c
+#define PDEBUG_ENABLE BIT(0)
+#define DSP_PDEBUGBUS1 0x0010
+#define DSP_PDEBUGINST 0x0014
+#define DSP_PDEBUGLS0STAT 0x0018
+#define DSP_PDEBUGLS1STAT 0x001c
+#define DSP_PDEBUGPC 0x0020
+#define DSP_RESET_SW 0x0024 /*reset sw*/
+#define ADSP_BRESET_SW BIT(0)
+#define ADSP_DRESET_SW BIT(1)
+#define ADSP_RUNSTALL BIT(3)
+#define STATVECTOR_SEL BIT(4)
+#define ADSP_PWAIT BIT(16)
+#define DSP_PFAULTBUS 0x0028
+#define DSP_PFAULTINFO 0x002c
+#define DSP_GPR00 0x0030
+#define DSP_GPR01 0x0034
+#define DSP_GPR02 0x0038
+#define DSP_GPR03 0x003c
+#define DSP_GPR04 0x0040
+#define DSP_GPR05 0x0044
+#define DSP_GPR06 0x0048
+#define DSP_GPR07 0x004c
+#define DSP_GPR08 0x0050
+#define DSP_GPR09 0x0054
+#define DSP_GPR0A 0x0058
+#define DSP_GPR0B 0x005c
+#define DSP_GPR0C 0x0060
+#define DSP_GPR0D 0x0064
+#define DSP_GPR0E 0x0068
+#define DSP_GPR0F 0x006c
+#define DSP_GPR10 0x0070
+#define DSP_GPR11 0x0074
+#define DSP_GPR12 0x0078
+#define DSP_GPR13 0x007c
+#define DSP_GPR14 0x0080
+#define DSP_GPR15 0x0084
+#define DSP_GPR16 0x0088
+#define DSP_GPR17 0x008c
+#define DSP_GPR18 0x0090
+#define DSP_GPR19 0x0094
+#define DSP_GPR1A 0x0098
+#define DSP_GPR1B 0x009c
+#define DSP_GPR1C 0x00a0
+#define DSP_GPR1D 0x00a4
+#define DSP_GPR1E 0x00a8
+#define DSP_GPR1F 0x00ac
+#define DSP_TCM_OFFSET 0x00b0 /* not used */
+#define DSP_DDR_OFFSET 0x00b4 /* not used */
+#define DSP_INTFDSP 0x00d0
+#define DSP_INTFDSP_CLR 0x00d4
+#define DSP_SRAM_PD_SW1 0x00d8
+#define DSP_SRAM_PD_SW2 0x00dc
+#define DSP_OCD 0x00e0
+#define DSP_RG_DSP_IRQ_POL 0x00f0 /* not used */
+#define DSP_DSP_IRQ_EN 0x00f4 /* not used */
+#define DSP_DSP_IRQ_LEVEL 0x00f8 /* not used */
+#define DSP_DSP_IRQ_STATUS 0x00fc /* not used */
+#define DSP_RG_INT2CIRQ 0x0114
+#define DSP_RG_INT_POL_CTL0 0x0120
+#define DSP_RG_INT_EN_CTL0 0x0130
+#define DSP_RG_INT_LV_CTL0 0x0140
+#define DSP_RG_INT_STATUS0 0x0150
+#define DSP_PDEBUGSTATUS0 0x0200
+#define DSP_PDEBUGSTATUS1 0x0204
+#define DSP_PDEBUGSTATUS2 0x0208
+#define DSP_PDEBUGSTATUS3 0x020c
+#define DSP_PDEBUGSTATUS4 0x0210
+#define DSP_PDEBUGSTATUS5 0x0214
+#define DSP_PDEBUGSTATUS6 0x0218
+#define DSP_PDEBUGSTATUS7 0x021c
+#define DSP_DSP2PSRAM_PRIORITY 0x0220 /* not used */
+#define DSP_AUDIO_DSP2SPM_INT 0x0224
+#define DSP_AUDIO_DSP2SPM_INT_ACK 0x0228
+#define DSP_AUDIO_DSP_DEBUG_SEL 0x022C
+#define DSP_AUDIO_DSP_EMI_BASE_ADDR 0x02E0 /* not used */
+#define DSP_AUDIO_DSP_SHARED_IRAM 0x02E4
+#define DSP_AUDIO_DSP_CKCTRL_P2P_CK_CON 0x02F0
+#define DSP_RG_SEMAPHORE00 0x0300
+#define DSP_RG_SEMAPHORE01 0x0304
+#define DSP_RG_SEMAPHORE02 0x0308
+#define DSP_RG_SEMAPHORE03 0x030C
+#define DSP_RG_SEMAPHORE04 0x0310
+#define DSP_RG_SEMAPHORE05 0x0314
+#define DSP_RG_SEMAPHORE06 0x0318
+#define DSP_RG_SEMAPHORE07 0x031C
+#define DSP_RESERVED_0 0x03F0
+#define DSP_RESERVED_1 0x03F4
+
+/* dsp wdt */
+#define DSP_WDT_MODE 0x0400
+
+/* dsp mbox */
+#define DSP_MBOX_IN_CMD 0x00
+#define DSP_MBOX_IN_CMD_CLR 0x04
+#define DSP_MBOX_OUT_CMD 0x1c
+#define DSP_MBOX_OUT_CMD_CLR 0x20
+#define DSP_MBOX_IN_MSG0 0x08
+#define DSP_MBOX_IN_MSG1 0x0C
+#define DSP_MBOX_OUT_MSG0 0x24
+#define DSP_MBOX_OUT_MSG1 0x28
+
+/*dsp sys ao*/
+#define ADSP_SRAM_POOL_CON (DSP_SYSAO_BASE + 0x30)
+#define DSP_SRAM_POOL_PD_MASK 0xf
+#define DSP_EMI_MAP_ADDR (DSP_SYSAO_BASE + 0x81c)
+
+/* DSP memories */
+#define MBOX_OFFSET 0x800000 /* DRAM */
+#define MBOX_SIZE 0x1000 /* consistent with which in memory.h of sof fw */
+#define DSP_DRAM_SIZE 0x1000000 /* 16M */
+
+#define DSP_REG_BAR 4
+#define DSP_MBOX0_BAR 5
+#define DSP_MBOX1_BAR 6
+#define DSP_MBOX2_BAR 7
+
+#define SIZE_SHARED_DRAM_DL 0x40000 /*Shared buffer for Downlink*/
+#define SIZE_SHARED_DRAM_UL 0x40000 /*Shared buffer for Uplink*/
+
+#define TOTAL_SIZE_SHARED_DRAM_FROM_TAIL \
+ (SIZE_SHARED_DRAM_DL + SIZE_SHARED_DRAM_UL)
+
+#define SRAM_PHYS_BASE_FROM_DSP_VIEW 0x40000000 /* MT8195 DSP view */
+#define DRAM_PHYS_BASE_FROM_DSP_VIEW 0x60000000 /* MT8195 DSP view */
+
+/*remap dram between AP and DSP view, 4KB aligned*/
+#define DRAM_REMAP_SHIFT 12
+#define DRAM_REMAP_MASK (BIT(DRAM_REMAP_SHIFT) - 1)
+
+/* suspend dsp idle check interval and timeout */
+#define SUSPEND_DSP_IDLE_TIMEOUT_US 1000000 /* timeout to wait dsp idle, 1 sec */
+#define SUSPEND_DSP_IDLE_POLL_INTERVAL_US 500 /* 0.5 msec */
+
+void sof_hifixdsp_boot_sequence(struct snd_sof_dev *sdev, u32 boot_addr);
+void sof_hifixdsp_shutdown(struct snd_sof_dev *sdev);
+#endif
diff --git a/sound/soc/sof/mediatek/mtk-adsp-common.c b/sound/soc/sof/mediatek/mtk-adsp-common.c
new file mode 100644
index 000000000..de8dbe27c
--- /dev/null
+++ b/sound/soc/sof/mediatek/mtk-adsp-common.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 MediaTek Inc. All rights reserved.
+//
+// Author: YC Hung <yc.hung@mediatek.com>
+
+/*
+ * Common helpers for the audio DSP on MediaTek platforms
+ */
+
+#include <linux/module.h>
+#include <sound/sof/xtensa.h>
+#include "../ops.h"
+#include "mtk-adsp-common.h"
+
+/**
+ * mtk_adsp_get_registers() - This function is called in case of DSP oops
+ * in order to gather information about the registers, filename and
+ * linenumber and stack.
+ * @sdev: SOF device
+ * @xoops: Stores information about registers.
+ * @panic_info: Stores information about filename and line number.
+ * @stack: Stores the stack dump.
+ * @stack_words: Size of the stack dump.
+ */
+static void mtk_adsp_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read registers */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
+}
+
+/**
+ * mtk_adsp_dump() - This function is called when a panic message is
+ * received from the firmware.
+ * @sdev: SOF device
+ * @flags: parameter not used but required by ops prototype
+ */
+void mtk_adsp_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info = {};
+ u32 stack[MTK_ADSP_STACK_DUMP_SIZE];
+ u32 status;
+
+ /* Get information about the panic status from the debug box area.
+ * Compute the trace point based on the status.
+ */
+ sof_mailbox_read(sdev, sdev->debug_box.offset + 0x4, &status, 4);
+
+ /* Get information about the registers, the filename and line
+ * number and the stack.
+ */
+ mtk_adsp_get_registers(sdev, &xoops, &panic_info, stack,
+ MTK_ADSP_STACK_DUMP_SIZE);
+
+ /* Print the information to the console */
+ sof_print_oops_and_stack(sdev, level, status, status, &xoops, &panic_info,
+ stack, MTK_ADSP_STACK_DUMP_SIZE);
+}
+EXPORT_SYMBOL(mtk_adsp_dump);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/mediatek/mtk-adsp-common.h b/sound/soc/sof/mediatek/mtk-adsp-common.h
new file mode 100644
index 000000000..612cff1f3
--- /dev/null
+++ b/sound/soc/sof/mediatek/mtk-adsp-common.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef __MEDIATEK_ADSP_COMMON_H__
+#define __MEDIATEK_ADSP_COMMON_H__
+
+#define EXCEPT_MAX_HDR_SIZE 0x400
+#define MTK_ADSP_STACK_DUMP_SIZE 32
+
+void mtk_adsp_dump(struct snd_sof_dev *sdev, u32 flags);
+#endif
diff --git a/sound/soc/sof/nocodec.c b/sound/soc/sof/nocodec.c
new file mode 100644
index 000000000..7c5bb9bad
--- /dev/null
+++ b/sound/soc/sof/nocodec.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include "sof-audio.h"
+#include "sof-priv.h"
+
+static struct snd_soc_card sof_nocodec_card = {
+ .name = "nocodec", /* the sof- prefix is added by the core */
+ .topology_shortname = "sof-nocodec",
+ .owner = THIS_MODULE
+};
+
+static int sof_nocodec_bes_setup(struct device *dev,
+ struct snd_soc_dai_driver *drv,
+ struct snd_soc_dai_link *links,
+ int link_num, struct snd_soc_card *card)
+{
+ struct snd_soc_dai_link_component *dlc;
+ int i;
+
+ if (!drv || !links || !card)
+ return -EINVAL;
+
+ /* set up BE dai_links */
+ for (i = 0; i < link_num; i++) {
+ dlc = devm_kcalloc(dev, 2, sizeof(*dlc), GFP_KERNEL);
+ if (!dlc)
+ return -ENOMEM;
+
+ links[i].name = devm_kasprintf(dev, GFP_KERNEL,
+ "NoCodec-%d", i);
+ if (!links[i].name)
+ return -ENOMEM;
+
+ links[i].stream_name = links[i].name;
+
+ links[i].cpus = &dlc[0];
+ links[i].codecs = &asoc_dummy_dlc;
+ links[i].platforms = &dlc[1];
+
+ links[i].num_cpus = 1;
+ links[i].num_codecs = 1;
+ links[i].num_platforms = 1;
+
+ links[i].id = i;
+ links[i].no_pcm = 1;
+ links[i].cpus->dai_name = drv[i].name;
+ links[i].platforms->name = dev_name(dev->parent);
+ if (drv[i].playback.channels_min)
+ links[i].dpcm_playback = 1;
+ if (drv[i].capture.channels_min)
+ links[i].dpcm_capture = 1;
+
+ links[i].be_hw_params_fixup = sof_pcm_dai_link_fixup;
+ }
+
+ card->dai_link = links;
+ card->num_links = link_num;
+
+ return 0;
+}
+
+static int sof_nocodec_setup(struct device *dev,
+ u32 num_dai_drivers,
+ struct snd_soc_dai_driver *dai_drivers)
+{
+ struct snd_soc_dai_link *links;
+
+ /* create dummy BE dai_links */
+ links = devm_kcalloc(dev, num_dai_drivers, sizeof(struct snd_soc_dai_link), GFP_KERNEL);
+ if (!links)
+ return -ENOMEM;
+
+ return sof_nocodec_bes_setup(dev, dai_drivers, links, num_dai_drivers, &sof_nocodec_card);
+}
+
+static int sof_nocodec_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &sof_nocodec_card;
+ struct snd_soc_acpi_mach *mach;
+ int ret;
+
+ card->dev = &pdev->dev;
+ card->topology_shortname_created = true;
+ mach = pdev->dev.platform_data;
+
+ ret = sof_nocodec_setup(card->dev, mach->mach_params.num_dai_drivers,
+ mach->mach_params.dai_drivers);
+ if (ret < 0)
+ return ret;
+
+ return devm_snd_soc_register_card(&pdev->dev, card);
+}
+
+static struct platform_driver sof_nocodec_audio = {
+ .probe = sof_nocodec_probe,
+ .driver = {
+ .name = "sof-nocodec",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+module_platform_driver(sof_nocodec_audio)
+
+MODULE_DESCRIPTION("ASoC sof nocodec");
+MODULE_AUTHOR("Liam Girdwood");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:sof-nocodec");
diff --git a/sound/soc/sof/ops.c b/sound/soc/sof/ops.c
new file mode 100644
index 000000000..ff066de4c
--- /dev/null
+++ b/sound/soc/sof/ops.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/pci.h>
+#include "ops.h"
+
+static
+bool snd_sof_pci_update_bits_unlocked(struct snd_sof_dev *sdev, u32 offset,
+ u32 mask, u32 value)
+{
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ unsigned int old, new;
+ u32 ret = 0;
+
+ pci_read_config_dword(pci, offset, &ret);
+ old = ret;
+ dev_dbg(sdev->dev, "Debug PCIR: %8.8x at %8.8x\n", old & mask, offset);
+
+ new = (old & ~mask) | (value & mask);
+
+ if (old == new)
+ return false;
+
+ pci_write_config_dword(pci, offset, new);
+ dev_dbg(sdev->dev, "Debug PCIW: %8.8x at %8.8x\n", value,
+ offset);
+
+ return true;
+}
+
+bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset,
+ u32 mask, u32 value)
+{
+ unsigned long flags;
+ bool change;
+
+ spin_lock_irqsave(&sdev->hw_lock, flags);
+ change = snd_sof_pci_update_bits_unlocked(sdev, offset, mask, value);
+ spin_unlock_irqrestore(&sdev->hw_lock, flags);
+ return change;
+}
+EXPORT_SYMBOL(snd_sof_pci_update_bits);
+
+bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value)
+{
+ unsigned int old, new;
+ u32 ret;
+
+ ret = snd_sof_dsp_read(sdev, bar, offset);
+
+ old = ret;
+ new = (old & ~mask) | (value & mask);
+
+ if (old == new)
+ return false;
+
+ snd_sof_dsp_write(sdev, bar, offset, new);
+
+ return true;
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits_unlocked);
+
+bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u64 mask, u64 value)
+{
+ u64 old, new;
+
+ old = snd_sof_dsp_read64(sdev, bar, offset);
+
+ new = (old & ~mask) | (value & mask);
+
+ if (old == new)
+ return false;
+
+ snd_sof_dsp_write64(sdev, bar, offset, new);
+
+ return true;
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits64_unlocked);
+
+/* This is for registers bits with attribute RWC */
+bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset,
+ u32 mask, u32 value)
+{
+ unsigned long flags;
+ bool change;
+
+ spin_lock_irqsave(&sdev->hw_lock, flags);
+ change = snd_sof_dsp_update_bits_unlocked(sdev, bar, offset, mask,
+ value);
+ spin_unlock_irqrestore(&sdev->hw_lock, flags);
+ return change;
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits);
+
+bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar, u32 offset,
+ u64 mask, u64 value)
+{
+ unsigned long flags;
+ bool change;
+
+ spin_lock_irqsave(&sdev->hw_lock, flags);
+ change = snd_sof_dsp_update_bits64_unlocked(sdev, bar, offset, mask,
+ value);
+ spin_unlock_irqrestore(&sdev->hw_lock, flags);
+ return change;
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits64);
+
+static
+void snd_sof_dsp_update_bits_forced_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value)
+{
+ unsigned int old, new;
+ u32 ret;
+
+ ret = snd_sof_dsp_read(sdev, bar, offset);
+
+ old = ret;
+ new = (old & ~mask) | (value & mask);
+
+ snd_sof_dsp_write(sdev, bar, offset, new);
+}
+
+/* This is for registers bits with attribute RWC */
+void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdev->hw_lock, flags);
+ snd_sof_dsp_update_bits_forced_unlocked(sdev, bar, offset, mask, value);
+ spin_unlock_irqrestore(&sdev->hw_lock, flags);
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits_forced);
+
+/**
+ * snd_sof_dsp_panic - handle a received DSP panic message
+ * @sdev: Pointer to the device's sdev
+ * @offset: offset of panic information
+ * @non_recoverable: the panic is fatal, no recovery will be done by the caller
+ */
+void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset, bool non_recoverable)
+{
+ /*
+ * if DSP is not ready and the dsp_oops_offset is not yet set, use the
+ * offset from the panic message.
+ */
+ if (!sdev->dsp_oops_offset)
+ sdev->dsp_oops_offset = offset;
+
+ /*
+ * Print warning if the offset from the panic message differs from
+ * dsp_oops_offset
+ */
+ if (sdev->dsp_oops_offset != offset)
+ dev_warn(sdev->dev,
+ "%s: dsp_oops_offset %zu differs from panic offset %u\n",
+ __func__, sdev->dsp_oops_offset, offset);
+
+ /*
+ * Set the fw_state to crashed only in case of non recoverable DSP panic
+ * event.
+ * Use different message within the snd_sof_dsp_dbg_dump() depending on
+ * the non_recoverable flag.
+ */
+ sdev->dbg_dump_printed = false;
+ if (non_recoverable) {
+ snd_sof_dsp_dbg_dump(sdev, "DSP panic!",
+ SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_MBOX);
+ sof_set_fw_state(sdev, SOF_FW_CRASHED);
+ sof_fw_trace_fw_crashed(sdev);
+ } else {
+ snd_sof_dsp_dbg_dump(sdev,
+ "DSP panic (recovery will be attempted)",
+ SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_MBOX);
+ }
+}
+EXPORT_SYMBOL(snd_sof_dsp_panic);
diff --git a/sound/soc/sof/ops.h b/sound/soc/sof/ops.h
new file mode 100644
index 000000000..9ab7b9be7
--- /dev/null
+++ b/sound/soc/sof/ops.h
@@ -0,0 +1,628 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOUND_SOC_SOF_IO_H
+#define __SOUND_SOC_SOF_IO_H
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <sound/pcm.h>
+#include "sof-priv.h"
+
+#define sof_ops(sdev) \
+ ((sdev)->pdata->desc->ops)
+
+static inline int sof_ops_init(struct snd_sof_dev *sdev)
+{
+ if (sdev->pdata->desc->ops_init)
+ return sdev->pdata->desc->ops_init(sdev);
+
+ return 0;
+}
+
+static inline void sof_ops_free(struct snd_sof_dev *sdev)
+{
+ if (sdev->pdata->desc->ops_free)
+ sdev->pdata->desc->ops_free(sdev);
+}
+
+/* Mandatory operations are verified during probing */
+
+/* init */
+static inline int snd_sof_probe(struct snd_sof_dev *sdev)
+{
+ return sof_ops(sdev)->probe(sdev);
+}
+
+static inline int snd_sof_remove(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->remove)
+ return sof_ops(sdev)->remove(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_shutdown(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->shutdown)
+ return sof_ops(sdev)->shutdown(sdev);
+
+ return 0;
+}
+
+/* control */
+
+/*
+ * snd_sof_dsp_run returns the core mask of the cores that are available
+ * after successful fw boot
+ */
+static inline int snd_sof_dsp_run(struct snd_sof_dev *sdev)
+{
+ return sof_ops(sdev)->run(sdev);
+}
+
+static inline int snd_sof_dsp_stall(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ if (sof_ops(sdev)->stall)
+ return sof_ops(sdev)->stall(sdev, core_mask);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_reset(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->reset)
+ return sof_ops(sdev)->reset(sdev);
+
+ return 0;
+}
+
+/* dsp core get/put */
+static inline int snd_sof_dsp_core_get(struct snd_sof_dev *sdev, int core)
+{
+ if (core > sdev->num_cores - 1) {
+ dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core,
+ sdev->num_cores);
+ return -EINVAL;
+ }
+
+ if (sof_ops(sdev)->core_get) {
+ int ret;
+
+ /* if current ref_count is > 0, increment it and return */
+ if (sdev->dsp_core_ref_count[core] > 0) {
+ sdev->dsp_core_ref_count[core]++;
+ return 0;
+ }
+
+ /* power up the core */
+ ret = sof_ops(sdev)->core_get(sdev, core);
+ if (ret < 0)
+ return ret;
+
+ /* increment ref_count */
+ sdev->dsp_core_ref_count[core]++;
+
+ /* and update enabled_cores_mask */
+ sdev->enabled_cores_mask |= BIT(core);
+
+ dev_dbg(sdev->dev, "Core %d powered up\n", core);
+ }
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_core_put(struct snd_sof_dev *sdev, int core)
+{
+ if (core > sdev->num_cores - 1) {
+ dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core,
+ sdev->num_cores);
+ return -EINVAL;
+ }
+
+ if (sof_ops(sdev)->core_put) {
+ int ret;
+
+ /* decrement ref_count and return if it is > 0 */
+ if (--(sdev->dsp_core_ref_count[core]) > 0)
+ return 0;
+
+ /* power down the core */
+ ret = sof_ops(sdev)->core_put(sdev, core);
+ if (ret < 0)
+ return ret;
+
+ /* and update enabled_cores_mask */
+ sdev->enabled_cores_mask &= ~BIT(core);
+
+ dev_dbg(sdev->dev, "Core %d powered down\n", core);
+ }
+
+ return 0;
+}
+
+/* pre/post fw load */
+static inline int snd_sof_dsp_pre_fw_run(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->pre_fw_run)
+ return sof_ops(sdev)->pre_fw_run(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_post_fw_run(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->post_fw_run)
+ return sof_ops(sdev)->post_fw_run(sdev);
+
+ return 0;
+}
+
+/* parse platform specific extended manifest */
+static inline int snd_sof_dsp_parse_platform_ext_manifest(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ if (sof_ops(sdev)->parse_platform_ext_manifest)
+ return sof_ops(sdev)->parse_platform_ext_manifest(sdev, hdr);
+
+ return 0;
+}
+
+/* misc */
+
+/**
+ * snd_sof_dsp_get_bar_index - Maps a section type with a BAR index
+ *
+ * @sdev: sof device
+ * @type: section type as described by snd_sof_fw_blk_type
+ *
+ * Returns the corresponding BAR index (a positive integer) or -EINVAL
+ * in case there is no mapping
+ */
+static inline int snd_sof_dsp_get_bar_index(struct snd_sof_dev *sdev, u32 type)
+{
+ if (sof_ops(sdev)->get_bar_index)
+ return sof_ops(sdev)->get_bar_index(sdev, type);
+
+ return sdev->mmio_bar;
+}
+
+static inline int snd_sof_dsp_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->get_mailbox_offset)
+ return sof_ops(sdev)->get_mailbox_offset(sdev);
+
+ dev_err(sdev->dev, "error: %s not defined\n", __func__);
+ return -ENOTSUPP;
+}
+
+static inline int snd_sof_dsp_get_window_offset(struct snd_sof_dev *sdev,
+ u32 id)
+{
+ if (sof_ops(sdev)->get_window_offset)
+ return sof_ops(sdev)->get_window_offset(sdev, id);
+
+ dev_err(sdev->dev, "error: %s not defined\n", __func__);
+ return -ENOTSUPP;
+}
+/* power management */
+static inline int snd_sof_dsp_resume(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->resume)
+ return sof_ops(sdev)->resume(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev,
+ u32 target_state)
+{
+ if (sof_ops(sdev)->suspend)
+ return sof_ops(sdev)->suspend(sdev, target_state);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_runtime_resume(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->runtime_resume)
+ return sof_ops(sdev)->runtime_resume(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_runtime_suspend(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->runtime_suspend)
+ return sof_ops(sdev)->runtime_suspend(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_runtime_idle(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->runtime_idle)
+ return sof_ops(sdev)->runtime_idle(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_hw_params_upon_resume(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->set_hw_params_upon_resume)
+ return sof_ops(sdev)->set_hw_params_upon_resume(sdev);
+ return 0;
+}
+
+static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq)
+{
+ if (sof_ops(sdev)->set_clk)
+ return sof_ops(sdev)->set_clk(sdev, freq);
+
+ return 0;
+}
+
+static inline int
+snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ int ret = 0;
+
+ mutex_lock(&sdev->power_state_access);
+
+ if (sof_ops(sdev)->set_power_state)
+ ret = sof_ops(sdev)->set_power_state(sdev, target_state);
+
+ mutex_unlock(&sdev->power_state_access);
+
+ return ret;
+}
+
+/* debug */
+void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, const char *msg, u32 flags);
+
+static inline int snd_sof_debugfs_add_region_item(struct snd_sof_dev *sdev,
+ enum snd_sof_fw_blk_type blk_type, u32 offset, size_t size,
+ const char *name, enum sof_debugfs_access_type access_type)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->debugfs_add_region_item)
+ return sof_ops(sdev)->debugfs_add_region_item(sdev, blk_type, offset,
+ size, name, access_type);
+
+ return 0;
+}
+
+/* register IO */
+static inline void snd_sof_dsp_write8(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u8 value)
+{
+ if (sof_ops(sdev)->write8)
+ sof_ops(sdev)->write8(sdev, sdev->bar[bar] + offset, value);
+ else
+ writeb(value, sdev->bar[bar] + offset);
+}
+
+static inline void snd_sof_dsp_write(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 value)
+{
+ if (sof_ops(sdev)->write)
+ sof_ops(sdev)->write(sdev, sdev->bar[bar] + offset, value);
+ else
+ writel(value, sdev->bar[bar] + offset);
+}
+
+static inline void snd_sof_dsp_write64(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u64 value)
+{
+ if (sof_ops(sdev)->write64)
+ sof_ops(sdev)->write64(sdev, sdev->bar[bar] + offset, value);
+ else
+ writeq(value, sdev->bar[bar] + offset);
+}
+
+static inline u8 snd_sof_dsp_read8(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset)
+{
+ if (sof_ops(sdev)->read8)
+ return sof_ops(sdev)->read8(sdev, sdev->bar[bar] + offset);
+ else
+ return readb(sdev->bar[bar] + offset);
+}
+
+static inline u32 snd_sof_dsp_read(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset)
+{
+ if (sof_ops(sdev)->read)
+ return sof_ops(sdev)->read(sdev, sdev->bar[bar] + offset);
+ else
+ return readl(sdev->bar[bar] + offset);
+}
+
+static inline u64 snd_sof_dsp_read64(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset)
+{
+ if (sof_ops(sdev)->read64)
+ return sof_ops(sdev)->read64(sdev, sdev->bar[bar] + offset);
+ else
+ return readq(sdev->bar[bar] + offset);
+}
+
+static inline void snd_sof_dsp_update8(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u8 mask, u8 value)
+{
+ u8 reg;
+
+ reg = snd_sof_dsp_read8(sdev, bar, offset);
+ reg &= ~mask;
+ reg |= value;
+ snd_sof_dsp_write8(sdev, bar, offset, reg);
+}
+
+/* block IO */
+static inline int snd_sof_dsp_block_read(struct snd_sof_dev *sdev,
+ enum snd_sof_fw_blk_type blk_type,
+ u32 offset, void *dest, size_t bytes)
+{
+ return sof_ops(sdev)->block_read(sdev, blk_type, offset, dest, bytes);
+}
+
+static inline int snd_sof_dsp_block_write(struct snd_sof_dev *sdev,
+ enum snd_sof_fw_blk_type blk_type,
+ u32 offset, void *src, size_t bytes)
+{
+ return sof_ops(sdev)->block_write(sdev, blk_type, offset, src, bytes);
+}
+
+/* mailbox IO */
+static inline void snd_sof_dsp_mailbox_read(struct snd_sof_dev *sdev,
+ u32 offset, void *dest, size_t bytes)
+{
+ if (sof_ops(sdev)->mailbox_read)
+ sof_ops(sdev)->mailbox_read(sdev, offset, dest, bytes);
+}
+
+static inline void snd_sof_dsp_mailbox_write(struct snd_sof_dev *sdev,
+ u32 offset, void *src, size_t bytes)
+{
+ if (sof_ops(sdev)->mailbox_write)
+ sof_ops(sdev)->mailbox_write(sdev, offset, src, bytes);
+}
+
+/* ipc */
+static inline int snd_sof_dsp_send_msg(struct snd_sof_dev *sdev,
+ struct snd_sof_ipc_msg *msg)
+{
+ return sof_ops(sdev)->send_msg(sdev, msg);
+}
+
+/* host PCM ops */
+static inline int
+snd_sof_pcm_platform_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_open)
+ return sof_ops(sdev)->pcm_open(sdev, substream);
+
+ return 0;
+}
+
+/* disconnect pcm substream to a host stream */
+static inline int
+snd_sof_pcm_platform_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_close)
+ return sof_ops(sdev)->pcm_close(sdev, substream);
+
+ return 0;
+}
+
+/* host stream hw params */
+static inline int
+snd_sof_pcm_platform_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_params)
+ return sof_ops(sdev)->pcm_hw_params(sdev, substream, params,
+ platform_params);
+
+ return 0;
+}
+
+/* host stream hw free */
+static inline int
+snd_sof_pcm_platform_hw_free(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_free)
+ return sof_ops(sdev)->pcm_hw_free(sdev, substream);
+
+ return 0;
+}
+
+/* host stream trigger */
+static inline int
+snd_sof_pcm_platform_trigger(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_trigger)
+ return sof_ops(sdev)->pcm_trigger(sdev, substream, cmd);
+
+ return 0;
+}
+
+/* Firmware loading */
+static inline int snd_sof_load_firmware(struct snd_sof_dev *sdev)
+{
+ dev_dbg(sdev->dev, "loading firmware\n");
+
+ return sof_ops(sdev)->load_firmware(sdev);
+}
+
+/* host DSP message data */
+static inline int snd_sof_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ void *p, size_t sz)
+{
+ return sof_ops(sdev)->ipc_msg_data(sdev, sps, p, sz);
+}
+/* host side configuration of the stream's data offset in stream mailbox area */
+static inline int
+snd_sof_set_stream_data_offset(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ size_t posn_offset)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->set_stream_data_offset)
+ return sof_ops(sdev)->set_stream_data_offset(sdev, sps,
+ posn_offset);
+
+ return 0;
+}
+
+/* host stream pointer */
+static inline snd_pcm_uframes_t
+snd_sof_pcm_platform_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_pointer)
+ return sof_ops(sdev)->pcm_pointer(sdev, substream);
+
+ return 0;
+}
+
+/* pcm ack */
+static inline int snd_sof_pcm_platform_ack(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_ack)
+ return sof_ops(sdev)->pcm_ack(sdev, substream);
+
+ return 0;
+}
+
+static inline u64 snd_sof_pcm_get_stream_position(struct snd_sof_dev *sdev,
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->get_stream_position)
+ return sof_ops(sdev)->get_stream_position(sdev, component, substream);
+
+ return 0;
+}
+
+/* machine driver */
+static inline int
+snd_sof_machine_register(struct snd_sof_dev *sdev, void *pdata)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->machine_register)
+ return sof_ops(sdev)->machine_register(sdev, pdata);
+
+ return 0;
+}
+
+static inline void
+snd_sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->machine_unregister)
+ sof_ops(sdev)->machine_unregister(sdev, pdata);
+}
+
+static inline struct snd_soc_acpi_mach *
+snd_sof_machine_select(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->machine_select)
+ return sof_ops(sdev)->machine_select(sdev);
+
+ return NULL;
+}
+
+static inline void
+snd_sof_set_mach_params(struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->set_mach_params)
+ sof_ops(sdev)->set_mach_params(mach, sdev);
+}
+
+/**
+ * snd_sof_dsp_register_poll_timeout - Periodically poll an address
+ * until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ */
+#define snd_sof_dsp_read_poll_timeout(sdev, bar, offset, val, cond, sleep_us, timeout_us) \
+({ \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __sleep_us = (sleep_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ might_sleep_if((__sleep_us) != 0); \
+ for (;;) { \
+ (val) = snd_sof_dsp_read(sdev, bar, offset); \
+ if (cond) { \
+ dev_dbg(sdev->dev, \
+ "FW Poll Status: reg[%#x]=%#x successful\n", \
+ (offset), (val)); \
+ break; \
+ } \
+ if (__timeout_us && \
+ ktime_compare(ktime_get(), __timeout) > 0) { \
+ (val) = snd_sof_dsp_read(sdev, bar, offset); \
+ dev_dbg(sdev->dev, \
+ "FW Poll Status: reg[%#x]=%#x timedout\n", \
+ (offset), (val)); \
+ break; \
+ } \
+ if (__sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+/* This is for registers bits with attribute RWC */
+bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset,
+ u32 mask, u32 value);
+
+bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value);
+
+bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u64 mask, u64 value);
+
+bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset,
+ u32 mask, u32 value);
+
+bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u64 mask, u64 value);
+
+void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value);
+
+int snd_sof_dsp_register_poll(struct snd_sof_dev *sdev, u32 bar, u32 offset,
+ u32 mask, u32 target, u32 timeout_ms,
+ u32 interval_us);
+
+void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset, bool non_recoverable);
+#endif
diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
new file mode 100644
index 000000000..d778717ca
--- /dev/null
+++ b/sound/soc/sof/pcm.c
@@ -0,0 +1,736 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+// PCM Layer, interface between ALSA and IPC.
+//
+
+#include <linux/pm_runtime.h>
+#include <sound/pcm_params.h>
+#include <sound/sof.h>
+#include <trace/events/sof.h>
+#include "sof-of-dev.h"
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "sof-utils.h"
+#include "ops.h"
+
+/* Create DMA buffer page table for DSP */
+static int create_page_table(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ unsigned char *dma_area, size_t size)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_pcm *spcm;
+ struct snd_dma_buffer *dmab = snd_pcm_get_dma_buf(substream);
+ int stream = substream->stream;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ return snd_sof_create_page_table(component->dev, dmab,
+ spcm->stream[stream].page_table.area, size);
+}
+
+/*
+ * sof pcm period elapse work
+ */
+static void snd_sof_pcm_period_elapsed_work(struct work_struct *work)
+{
+ struct snd_sof_pcm_stream *sps =
+ container_of(work, struct snd_sof_pcm_stream,
+ period_elapsed_work);
+
+ snd_pcm_period_elapsed(sps->substream);
+}
+
+void snd_sof_pcm_init_elapsed_work(struct work_struct *work)
+{
+ INIT_WORK(work, snd_sof_pcm_period_elapsed_work);
+}
+
+/*
+ * sof pcm period elapse, this could be called at irq thread context.
+ */
+void snd_sof_pcm_period_elapsed(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
+ struct snd_sof_pcm *spcm;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm) {
+ dev_err(component->dev,
+ "error: period elapsed for unknown stream!\n");
+ return;
+ }
+
+ /*
+ * snd_pcm_period_elapsed() can be called in interrupt context
+ * before IRQ_HANDLED is returned. Inside snd_pcm_period_elapsed(),
+ * when the PCM is done draining or xrun happened, a STOP IPC will
+ * then be sent and this IPC will hit IPC timeout.
+ * To avoid sending IPC before the previous IPC is handled, we
+ * schedule delayed work here to call the snd_pcm_period_elapsed().
+ */
+ schedule_work(&spcm->stream[substream->stream].period_elapsed_work);
+}
+EXPORT_SYMBOL(snd_sof_pcm_period_elapsed);
+
+static int
+sof_pcm_setup_connected_widgets(struct snd_sof_dev *sdev, struct snd_soc_pcm_runtime *rtd,
+ struct snd_sof_pcm *spcm, struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params, int dir)
+{
+ struct snd_soc_dai *dai;
+ int ret, j;
+
+ /* query DAPM for list of connected widgets and set them up */
+ for_each_rtd_cpu_dais(rtd, j, dai) {
+ struct snd_soc_dapm_widget_list *list;
+
+ ret = snd_soc_dapm_dai_get_connected_widgets(dai, dir, &list,
+ dpcm_end_walk_at_be);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dai %s has no valid %s path\n", dai->name,
+ dir == SNDRV_PCM_STREAM_PLAYBACK ? "playback" : "capture");
+ return ret;
+ }
+
+ spcm->stream[dir].list = list;
+
+ ret = sof_widget_list_setup(sdev, spcm, params, platform_params, dir);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed widget list set up for pcm %d dir %d\n",
+ spcm->pcm.pcm_id, dir);
+ spcm->stream[dir].list = NULL;
+ snd_soc_dapm_dai_free_widgets(&list);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int sof_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ const struct sof_ipc_pcm_ops *pcm_ops = sof_ipc_get_ops(sdev, pcm);
+ struct snd_sof_platform_stream_params platform_params = { 0 };
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_sof_pcm *spcm;
+ int ret;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ /*
+ * Handle repeated calls to hw_params() without free_pcm() in
+ * between. At least ALSA OSS emulation depends on this.
+ */
+ if (pcm_ops && pcm_ops->hw_free && spcm->prepared[substream->stream]) {
+ ret = pcm_ops->hw_free(component, substream);
+ if (ret < 0)
+ return ret;
+
+ spcm->prepared[substream->stream] = false;
+ }
+
+ dev_dbg(component->dev, "pcm: hw params stream %d dir %d\n",
+ spcm->pcm.pcm_id, substream->stream);
+
+ ret = snd_sof_pcm_platform_hw_params(sdev, substream, params, &platform_params);
+ if (ret < 0) {
+ dev_err(component->dev, "platform hw params failed\n");
+ return ret;
+ }
+
+ /* if this is a repeated hw_params without hw_free, skip setting up widgets */
+ if (!spcm->stream[substream->stream].list) {
+ ret = sof_pcm_setup_connected_widgets(sdev, rtd, spcm, params, &platform_params,
+ substream->stream);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* create compressed page table for audio firmware */
+ if (runtime->buffer_changed) {
+ ret = create_page_table(component, substream, runtime->dma_area,
+ runtime->dma_bytes);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ if (pcm_ops && pcm_ops->hw_params) {
+ ret = pcm_ops->hw_params(component, substream, params, &platform_params);
+ if (ret < 0)
+ return ret;
+ }
+
+ spcm->prepared[substream->stream] = true;
+
+ /* save pcm hw_params */
+ memcpy(&spcm->params[substream->stream], params, sizeof(*params));
+
+ return 0;
+}
+
+static int sof_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ const struct sof_ipc_pcm_ops *pcm_ops = sof_ipc_get_ops(sdev, pcm);
+ struct snd_sof_pcm *spcm;
+ int ret, err = 0;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(component->dev, "pcm: free stream %d dir %d\n",
+ spcm->pcm.pcm_id, substream->stream);
+
+ if (spcm->prepared[substream->stream]) {
+ /* stop DMA first if needed */
+ if (pcm_ops && pcm_ops->platform_stop_during_hw_free)
+ snd_sof_pcm_platform_trigger(sdev, substream, SNDRV_PCM_TRIGGER_STOP);
+
+ /* free PCM in the DSP */
+ if (pcm_ops && pcm_ops->hw_free) {
+ ret = pcm_ops->hw_free(component, substream);
+ if (ret < 0)
+ err = ret;
+ }
+
+ spcm->prepared[substream->stream] = false;
+ }
+
+ /* reset DMA */
+ ret = snd_sof_pcm_platform_hw_free(sdev, substream);
+ if (ret < 0) {
+ dev_err(component->dev, "error: platform hw free failed\n");
+ err = ret;
+ }
+
+ /* free the DAPM widget list */
+ ret = sof_widget_list_free(sdev, spcm, substream->stream);
+ if (ret < 0)
+ err = ret;
+
+ cancel_work_sync(&spcm->stream[substream->stream].period_elapsed_work);
+
+ return err;
+}
+
+static int sof_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_pcm *spcm;
+ int ret;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ if (spcm->prepared[substream->stream])
+ return 0;
+
+ dev_dbg(component->dev, "pcm: prepare stream %d dir %d\n",
+ spcm->pcm.pcm_id, substream->stream);
+
+ /* set hw_params */
+ ret = sof_pcm_hw_params(component,
+ substream, &spcm->params[substream->stream]);
+ if (ret < 0) {
+ dev_err(component->dev,
+ "error: set pcm hw_params after resume\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * FE dai link trigger actions are always executed in non-atomic context because
+ * they involve IPC's.
+ */
+static int sof_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ const struct sof_ipc_pcm_ops *pcm_ops = sof_ipc_get_ops(sdev, pcm);
+ struct snd_sof_pcm *spcm;
+ bool reset_hw_params = false;
+ bool ipc_first = false;
+ int ret = 0;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(component->dev, "pcm: trigger stream %d dir %d cmd %d\n",
+ spcm->pcm.pcm_id, substream->stream, cmd);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ipc_first = true;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (pcm_ops && pcm_ops->ipc_first_on_start)
+ ipc_first = true;
+ break;
+ case SNDRV_PCM_TRIGGER_START:
+ if (spcm->stream[substream->stream].suspend_ignored) {
+ /*
+ * This case will be triggered when INFO_RESUME is
+ * not supported, no need to re-start streams that
+ * remained enabled in D0ix.
+ */
+ spcm->stream[substream->stream].suspend_ignored = false;
+ return 0;
+ }
+
+ if (pcm_ops && pcm_ops->ipc_first_on_start)
+ ipc_first = true;
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (sdev->system_suspend_target == SOF_SUSPEND_S0IX &&
+ spcm->stream[substream->stream].d0i3_compatible) {
+ /*
+ * trap the event, not sending trigger stop to
+ * prevent the FW pipelines from being stopped,
+ * and mark the flag to ignore the upcoming DAPM
+ * PM events.
+ */
+ spcm->stream[substream->stream].suspend_ignored = true;
+ return 0;
+ }
+
+ /* On suspend the DMA must be stopped in DSPless mode */
+ if (sdev->dspless_mode_selected)
+ reset_hw_params = true;
+
+ fallthrough;
+ case SNDRV_PCM_TRIGGER_STOP:
+ ipc_first = true;
+ if (pcm_ops && pcm_ops->reset_hw_params_during_stop)
+ reset_hw_params = true;
+ break;
+ default:
+ dev_err(component->dev, "Unhandled trigger cmd %d\n", cmd);
+ return -EINVAL;
+ }
+
+ if (!ipc_first)
+ snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+
+ if (pcm_ops && pcm_ops->trigger)
+ ret = pcm_ops->trigger(component, substream, cmd);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_START:
+ /* invoke platform trigger to start DMA only if pcm_ops is successful */
+ if (ipc_first && !ret)
+ snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_STOP:
+ /* invoke platform trigger to stop DMA even if pcm_ops isn't set or if it failed */
+ if (!pcm_ops || !pcm_ops->platform_stop_during_hw_free)
+ snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+ break;
+ default:
+ break;
+ }
+
+ /* free PCM if reset_hw_params is set and the STOP IPC is successful */
+ if (!ret && reset_hw_params)
+ ret = sof_pcm_stream_free(sdev, substream, spcm, substream->stream, false);
+
+ return ret;
+}
+
+static snd_pcm_uframes_t sof_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ snd_pcm_uframes_t host, dai;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ /* use dsp ops pointer callback directly if set */
+ if (sof_ops(sdev)->pcm_pointer)
+ return sof_ops(sdev)->pcm_pointer(sdev, substream);
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ /* read position from DSP */
+ host = bytes_to_frames(substream->runtime,
+ spcm->stream[substream->stream].posn.host_posn);
+ dai = bytes_to_frames(substream->runtime,
+ spcm->stream[substream->stream].posn.dai_posn);
+
+ trace_sof_pcm_pointer_position(sdev, spcm, substream, host, dai);
+
+ return host;
+}
+
+static int sof_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_dsp_ops *ops = sof_ops(sdev);
+ struct snd_sof_pcm *spcm;
+ struct snd_soc_tplg_stream_caps *caps;
+ int ret;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(component->dev, "pcm: open stream %d dir %d\n",
+ spcm->pcm.pcm_id, substream->stream);
+
+
+ caps = &spcm->pcm.caps[substream->stream];
+
+ /* set runtime config */
+ runtime->hw.info = ops->hw_info; /* platform-specific */
+
+ /* set any runtime constraints based on topology */
+ runtime->hw.formats = le64_to_cpu(caps->formats);
+ runtime->hw.period_bytes_min = le32_to_cpu(caps->period_size_min);
+ runtime->hw.period_bytes_max = le32_to_cpu(caps->period_size_max);
+ runtime->hw.periods_min = le32_to_cpu(caps->periods_min);
+ runtime->hw.periods_max = le32_to_cpu(caps->periods_max);
+
+ /*
+ * caps->buffer_size_min is not used since the
+ * snd_pcm_hardware structure only defines buffer_bytes_max
+ */
+ runtime->hw.buffer_bytes_max = le32_to_cpu(caps->buffer_size_max);
+
+ dev_dbg(component->dev, "period min %zd max %zd bytes\n",
+ runtime->hw.period_bytes_min,
+ runtime->hw.period_bytes_max);
+ dev_dbg(component->dev, "period count %d max %d\n",
+ runtime->hw.periods_min,
+ runtime->hw.periods_max);
+ dev_dbg(component->dev, "buffer max %zd bytes\n",
+ runtime->hw.buffer_bytes_max);
+
+ /* set wait time - TODO: come from topology */
+ substream->wait_time = 500;
+
+ spcm->stream[substream->stream].posn.host_posn = 0;
+ spcm->stream[substream->stream].posn.dai_posn = 0;
+ spcm->stream[substream->stream].substream = substream;
+ spcm->prepared[substream->stream] = false;
+
+ ret = snd_sof_pcm_platform_open(sdev, substream);
+ if (ret < 0)
+ dev_err(component->dev, "error: pcm open failed %d\n", ret);
+
+ return ret;
+}
+
+static int sof_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ int err;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(component->dev, "pcm: close stream %d dir %d\n",
+ spcm->pcm.pcm_id, substream->stream);
+
+ err = snd_sof_pcm_platform_close(sdev, substream);
+ if (err < 0) {
+ dev_err(component->dev, "error: pcm close failed %d\n",
+ err);
+ /*
+ * keep going, no point in preventing the close
+ * from happening
+ */
+ }
+
+ return 0;
+}
+
+/*
+ * Pre-allocate playback/capture audio buffer pages.
+ * no need to explicitly release memory preallocated by sof_pcm_new in pcm_free
+ * snd_pcm_lib_preallocate_free_for_all() is called by the core.
+ */
+static int sof_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ struct snd_pcm *pcm = rtd->pcm;
+ struct snd_soc_tplg_stream_caps *caps;
+ int stream = SNDRV_PCM_STREAM_PLAYBACK;
+
+ /* find SOF PCM for this RTD */
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm) {
+ dev_warn(component->dev, "warn: can't find PCM with DAI ID %d\n",
+ rtd->dai_link->id);
+ return 0;
+ }
+
+ dev_dbg(component->dev, "creating new PCM %s\n", spcm->pcm.pcm_name);
+
+ /* do we need to pre-allocate playback audio buffer pages */
+ if (!spcm->pcm.playback)
+ goto capture;
+
+ caps = &spcm->pcm.caps[stream];
+
+ /* pre-allocate playback audio buffer pages */
+ dev_dbg(component->dev,
+ "spcm: allocate %s playback DMA buffer size 0x%x max 0x%x\n",
+ caps->name, caps->buffer_size_min, caps->buffer_size_max);
+
+ if (!pcm->streams[stream].substream) {
+ dev_err(component->dev, "error: NULL playback substream!\n");
+ return -EINVAL;
+ }
+
+ snd_pcm_set_managed_buffer(pcm->streams[stream].substream,
+ SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
+ 0, le32_to_cpu(caps->buffer_size_max));
+capture:
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+
+ /* do we need to pre-allocate capture audio buffer pages */
+ if (!spcm->pcm.capture)
+ return 0;
+
+ caps = &spcm->pcm.caps[stream];
+
+ /* pre-allocate capture audio buffer pages */
+ dev_dbg(component->dev,
+ "spcm: allocate %s capture DMA buffer size 0x%x max 0x%x\n",
+ caps->name, caps->buffer_size_min, caps->buffer_size_max);
+
+ if (!pcm->streams[stream].substream) {
+ dev_err(component->dev, "error: NULL capture substream!\n");
+ return -EINVAL;
+ }
+
+ snd_pcm_set_managed_buffer(pcm->streams[stream].substream,
+ SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
+ 0, le32_to_cpu(caps->buffer_size_max));
+
+ return 0;
+}
+
+/* fixup the BE DAI link to match any values from topology */
+int sof_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
+ struct snd_sof_dai *dai =
+ snd_sof_find_dai(component, (char *)rtd->dai_link->name);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ const struct sof_ipc_pcm_ops *pcm_ops = sof_ipc_get_ops(sdev, pcm);
+
+ /* no topology exists for this BE, try a common configuration */
+ if (!dai) {
+ dev_warn(component->dev,
+ "warning: no topology found for BE DAI %s config\n",
+ rtd->dai_link->name);
+
+ /* set 48k, stereo, 16bits by default */
+ rate->min = 48000;
+ rate->max = 48000;
+
+ channels->min = 2;
+ channels->max = 2;
+
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+
+ return 0;
+ }
+
+ if (pcm_ops && pcm_ops->dai_link_fixup)
+ return pcm_ops->dai_link_fixup(rtd, params);
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_pcm_dai_link_fixup);
+
+static int sof_pcm_probe(struct snd_soc_component *component)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const char *tplg_filename;
+ int ret;
+
+ /*
+ * make sure the device is pm_runtime_active before loading the
+ * topology and initiating IPC or bus transactions
+ */
+ ret = pm_runtime_resume_and_get(component->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ /* load the default topology */
+ sdev->component = component;
+
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s/%s",
+ plat_data->tplg_filename_prefix,
+ plat_data->tplg_filename);
+ if (!tplg_filename) {
+ ret = -ENOMEM;
+ goto pm_error;
+ }
+
+ ret = snd_sof_load_topology(component, tplg_filename);
+ if (ret < 0)
+ dev_err(component->dev, "error: failed to load DSP topology %d\n",
+ ret);
+
+pm_error:
+ pm_runtime_mark_last_busy(component->dev);
+ pm_runtime_put_autosuspend(component->dev);
+
+ return ret;
+}
+
+static void sof_pcm_remove(struct snd_soc_component *component)
+{
+ /* remove topology */
+ snd_soc_tplg_component_remove(component);
+}
+
+static int sof_pcm_ack(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+
+ return snd_sof_pcm_platform_ack(sdev, substream);
+}
+
+static snd_pcm_sframes_t sof_pcm_delay(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ const struct sof_ipc_pcm_ops *pcm_ops = sof_ipc_get_ops(sdev, pcm);
+
+ if (pcm_ops && pcm_ops->delay)
+ return pcm_ops->delay(component, substream);
+
+ return 0;
+}
+
+void snd_sof_new_platform_drv(struct snd_sof_dev *sdev)
+{
+ struct snd_soc_component_driver *pd = &sdev->plat_drv;
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const char *drv_name;
+
+ if (plat_data->machine)
+ drv_name = plat_data->machine->drv_name;
+ else if (plat_data->of_machine)
+ drv_name = plat_data->of_machine->drv_name;
+ else
+ drv_name = NULL;
+
+ pd->name = "sof-audio-component";
+ pd->probe = sof_pcm_probe;
+ pd->remove = sof_pcm_remove;
+ pd->open = sof_pcm_open;
+ pd->close = sof_pcm_close;
+ pd->hw_params = sof_pcm_hw_params;
+ pd->prepare = sof_pcm_prepare;
+ pd->hw_free = sof_pcm_hw_free;
+ pd->trigger = sof_pcm_trigger;
+ pd->pointer = sof_pcm_pointer;
+ pd->ack = sof_pcm_ack;
+ pd->delay = sof_pcm_delay;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMPRESS)
+ pd->compress_ops = &sof_compressed_ops;
+#endif
+
+ pd->pcm_construct = sof_pcm_new;
+ pd->ignore_machine = drv_name;
+ pd->be_pcm_base = SOF_BE_PCM_BASE;
+ pd->use_dai_pcm_id = true;
+ pd->topology_name_prefix = "sof";
+
+ /* increment module refcount when a pcm is opened */
+ pd->module_get_upon_open = 1;
+
+ pd->legacy_dai_naming = 1;
+
+ /*
+ * The fixup is only needed when the DSP is in use as with the DSPless
+ * mode we are directly using the audio interface
+ */
+ if (!sdev->dspless_mode_selected)
+ pd->be_hw_params_fixup = sof_pcm_dai_link_fixup;
+}
diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
new file mode 100644
index 000000000..704b21413
--- /dev/null
+++ b/sound/soc/sof/pm.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include "ops.h"
+#include "sof-priv.h"
+#include "sof-audio.h"
+
+/*
+ * Helper function to determine the target DSP state during
+ * system suspend. This function only cares about the device
+ * D-states. Platform-specific substates, if any, should be
+ * handled by the platform-specific parts.
+ */
+static u32 snd_sof_dsp_power_target(struct snd_sof_dev *sdev)
+{
+ u32 target_dsp_state;
+
+ switch (sdev->system_suspend_target) {
+ case SOF_SUSPEND_S5:
+ case SOF_SUSPEND_S4:
+ /* DSP should be in D3 if the system is suspending to S3+ */
+ case SOF_SUSPEND_S3:
+ /* DSP should be in D3 if the system is suspending to S3 */
+ target_dsp_state = SOF_DSP_PM_D3;
+ break;
+ case SOF_SUSPEND_S0IX:
+ /*
+ * Currently, the only criterion for retaining the DSP in D0
+ * is that there are streams that ignored the suspend trigger.
+ * Additional criteria such Soundwire clock-stop mode and
+ * device suspend latency considerations will be added later.
+ */
+ if (snd_sof_stream_suspend_ignored(sdev))
+ target_dsp_state = SOF_DSP_PM_D0;
+ else
+ target_dsp_state = SOF_DSP_PM_D3;
+ break;
+ default:
+ /* This case would be during runtime suspend */
+ target_dsp_state = SOF_DSP_PM_D3;
+ break;
+ }
+
+ return target_dsp_state;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+static void sof_cache_debugfs(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ list_for_each_entry(dfse, &sdev->dfsentry_list, list) {
+
+ /* nothing to do if debugfs buffer is not IO mem */
+ if (dfse->type == SOF_DFSENTRY_TYPE_BUF)
+ continue;
+
+ /* cache memory that is only accessible in D0 */
+ if (dfse->access_type == SOF_DEBUGFS_ACCESS_D0_ONLY)
+ memcpy_fromio(dfse->cache_buf, dfse->io_mem,
+ dfse->size);
+ }
+}
+#endif
+
+static int sof_resume(struct device *dev, bool runtime_resume)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ u32 old_state = sdev->dsp_power_state.state;
+ int ret;
+
+ /* do nothing if dsp resume callbacks are not set */
+ if (!runtime_resume && !sof_ops(sdev)->resume)
+ return 0;
+
+ if (runtime_resume && !sof_ops(sdev)->runtime_resume)
+ return 0;
+
+ /* DSP was never successfully started, nothing to resume */
+ if (sdev->first_boot)
+ return 0;
+
+ /*
+ * if the runtime_resume flag is set, call the runtime_resume routine
+ * or else call the system resume routine
+ */
+ if (runtime_resume)
+ ret = snd_sof_dsp_runtime_resume(sdev);
+ else
+ ret = snd_sof_dsp_resume(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to power up DSP after resume\n");
+ return ret;
+ }
+
+ if (sdev->dspless_mode_selected) {
+ sof_set_fw_state(sdev, SOF_DSPLESS_MODE);
+ return 0;
+ }
+
+ /*
+ * Nothing further to be done for platforms that support the low power
+ * D0 substate. Resume trace and return when resuming from
+ * low-power D0 substate
+ */
+ if (!runtime_resume && sof_ops(sdev)->set_power_state &&
+ old_state == SOF_DSP_PM_D0) {
+ ret = sof_fw_trace_resume(sdev);
+ if (ret < 0)
+ /* non fatal */
+ dev_warn(sdev->dev,
+ "failed to enable trace after resume %d\n", ret);
+ return 0;
+ }
+
+ sof_set_fw_state(sdev, SOF_FW_BOOT_PREPARE);
+
+ /* load the firmware */
+ ret = snd_sof_load_firmware(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to load DSP firmware after resume %d\n",
+ ret);
+ sof_set_fw_state(sdev, SOF_FW_BOOT_FAILED);
+ return ret;
+ }
+
+ sof_set_fw_state(sdev, SOF_FW_BOOT_IN_PROGRESS);
+
+ /*
+ * Boot the firmware. The FW boot status will be modified
+ * in snd_sof_run_firmware() depending on the outcome.
+ */
+ ret = snd_sof_run_firmware(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to boot DSP firmware after resume %d\n",
+ ret);
+ sof_set_fw_state(sdev, SOF_FW_BOOT_FAILED);
+ return ret;
+ }
+
+ /* resume DMA trace */
+ ret = sof_fw_trace_resume(sdev);
+ if (ret < 0) {
+ /* non fatal */
+ dev_warn(sdev->dev,
+ "warning: failed to init trace after resume %d\n",
+ ret);
+ }
+
+ /* restore pipelines */
+ if (tplg_ops && tplg_ops->set_up_all_pipelines) {
+ ret = tplg_ops->set_up_all_pipelines(sdev, false);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to restore pipeline after resume %d\n", ret);
+ goto setup_fail;
+ }
+ }
+
+ /* Notify clients not managed by pm framework about core resume */
+ sof_resume_clients(sdev);
+
+ /* notify DSP of system resume */
+ if (pm_ops && pm_ops->ctx_restore) {
+ ret = pm_ops->ctx_restore(sdev);
+ if (ret < 0)
+ dev_err(sdev->dev, "ctx_restore IPC error during resume: %d\n", ret);
+ }
+
+setup_fail:
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+ if (ret < 0) {
+ /*
+ * Debugfs cannot be read in runtime suspend, so cache
+ * the contents upon failure. This allows to capture
+ * possible DSP coredump information.
+ */
+ sof_cache_debugfs(sdev);
+ }
+#endif
+
+ return ret;
+}
+
+static int sof_suspend(struct device *dev, bool runtime_suspend)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ pm_message_t pm_state;
+ u32 target_state = snd_sof_dsp_power_target(sdev);
+ u32 old_state = sdev->dsp_power_state.state;
+ int ret;
+
+ /* do nothing if dsp suspend callback is not set */
+ if (!runtime_suspend && !sof_ops(sdev)->suspend)
+ return 0;
+
+ if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
+ return 0;
+
+ /* we need to tear down pipelines only if the DSP hardware is
+ * active, which happens for PCI devices. if the device is
+ * suspended, it is brought back to full power and then
+ * suspended again
+ */
+ if (tplg_ops && tplg_ops->tear_down_all_pipelines && (old_state == SOF_DSP_PM_D0))
+ tplg_ops->tear_down_all_pipelines(sdev, false);
+
+ if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
+ goto suspend;
+
+ /* prepare for streams to be resumed properly upon resume */
+ if (!runtime_suspend) {
+ ret = snd_sof_dsp_hw_params_upon_resume(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: setting hw_params flag during suspend %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ pm_state.event = target_state;
+
+ /* suspend DMA trace */
+ sof_fw_trace_suspend(sdev, pm_state);
+
+ /* Notify clients not managed by pm framework about core suspend */
+ sof_suspend_clients(sdev, pm_state);
+
+ /* Skip to platform-specific suspend if DSP is entering D0 */
+ if (target_state == SOF_DSP_PM_D0)
+ goto suspend;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+ /* cache debugfs contents during runtime suspend */
+ if (runtime_suspend)
+ sof_cache_debugfs(sdev);
+#endif
+ /* notify DSP of upcoming power down */
+ if (pm_ops && pm_ops->ctx_save) {
+ ret = pm_ops->ctx_save(sdev);
+ if (ret == -EBUSY || ret == -EAGAIN) {
+ /*
+ * runtime PM has logic to handle -EBUSY/-EAGAIN so
+ * pass these errors up
+ */
+ dev_err(sdev->dev, "ctx_save IPC error during suspend: %d\n", ret);
+ return ret;
+ } else if (ret < 0) {
+ /* FW in unexpected state, continue to power down */
+ dev_warn(sdev->dev, "ctx_save IPC error: %d, proceeding with suspend\n",
+ ret);
+ }
+ }
+
+suspend:
+
+ /* return if the DSP was not probed successfully */
+ if (sdev->fw_state == SOF_FW_BOOT_NOT_STARTED)
+ return 0;
+
+ /* platform-specific suspend */
+ if (runtime_suspend)
+ ret = snd_sof_dsp_runtime_suspend(sdev);
+ else
+ ret = snd_sof_dsp_suspend(sdev, target_state);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: failed to power down DSP during suspend %d\n",
+ ret);
+
+ /* Do not reset FW state if DSP is in D0 */
+ if (target_state == SOF_DSP_PM_D0)
+ return ret;
+
+ /* reset FW state */
+ sof_set_fw_state(sdev, SOF_FW_BOOT_NOT_STARTED);
+ sdev->enabled_cores_mask = 0;
+
+ return ret;
+}
+
+int snd_sof_dsp_power_down_notify(struct snd_sof_dev *sdev)
+{
+ const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm);
+
+ /* Notify DSP of upcoming power down */
+ if (sof_ops(sdev)->remove && pm_ops && pm_ops->ctx_save)
+ return pm_ops->ctx_save(sdev);
+
+ return 0;
+}
+
+int snd_sof_runtime_suspend(struct device *dev)
+{
+ return sof_suspend(dev, true);
+}
+EXPORT_SYMBOL(snd_sof_runtime_suspend);
+
+int snd_sof_runtime_idle(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+
+ return snd_sof_dsp_runtime_idle(sdev);
+}
+EXPORT_SYMBOL(snd_sof_runtime_idle);
+
+int snd_sof_runtime_resume(struct device *dev)
+{
+ return sof_resume(dev, true);
+}
+EXPORT_SYMBOL(snd_sof_runtime_resume);
+
+int snd_sof_resume(struct device *dev)
+{
+ return sof_resume(dev, false);
+}
+EXPORT_SYMBOL(snd_sof_resume);
+
+int snd_sof_suspend(struct device *dev)
+{
+ return sof_suspend(dev, false);
+}
+EXPORT_SYMBOL(snd_sof_suspend);
+
+int snd_sof_prepare(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ const struct sof_dev_desc *desc = sdev->pdata->desc;
+
+ /* will suspend to S3 by default */
+ sdev->system_suspend_target = SOF_SUSPEND_S3;
+
+ /*
+ * if the firmware is crashed or boot failed then we try to aim for S3
+ * to reboot the firmware
+ */
+ if (sdev->fw_state == SOF_FW_CRASHED ||
+ sdev->fw_state == SOF_FW_BOOT_FAILED)
+ return 0;
+
+ if (!desc->use_acpi_target_states)
+ return 0;
+
+#if defined(CONFIG_ACPI)
+ switch (acpi_target_system_state()) {
+ case ACPI_STATE_S0:
+ sdev->system_suspend_target = SOF_SUSPEND_S0IX;
+ break;
+ case ACPI_STATE_S1:
+ case ACPI_STATE_S2:
+ case ACPI_STATE_S3:
+ sdev->system_suspend_target = SOF_SUSPEND_S3;
+ break;
+ case ACPI_STATE_S4:
+ sdev->system_suspend_target = SOF_SUSPEND_S4;
+ break;
+ case ACPI_STATE_S5:
+ sdev->system_suspend_target = SOF_SUSPEND_S5;
+ break;
+ default:
+ break;
+ }
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_prepare);
+
+void snd_sof_complete(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+
+ sdev->system_suspend_target = SOF_SUSPEND_NONE;
+}
+EXPORT_SYMBOL(snd_sof_complete);
diff --git a/sound/soc/sof/sof-acpi-dev.c b/sound/soc/sof/sof-acpi-dev.c
new file mode 100644
index 000000000..1b04dcb33
--- /dev/null
+++ b/sound/soc/sof/sof-acpi-dev.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/acpi.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../intel/common/soc-intel-quirks.h"
+#include "ops.h"
+#include "sof-acpi-dev.h"
+
+/* platform specific devices */
+#include "intel/shim.h"
+
+static char *fw_path;
+module_param(fw_path, charp, 0444);
+MODULE_PARM_DESC(fw_path, "alternate path for SOF firmware.");
+
+static char *tplg_path;
+module_param(tplg_path, charp, 0444);
+MODULE_PARM_DESC(tplg_path, "alternate path for SOF topology.");
+
+static int sof_acpi_debug;
+module_param_named(sof_acpi_debug, sof_acpi_debug, int, 0444);
+MODULE_PARM_DESC(sof_acpi_debug, "SOF ACPI debug options (0x0 all off)");
+
+#define SOF_ACPI_DISABLE_PM_RUNTIME BIT(0)
+
+const struct dev_pm_ops sof_acpi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(snd_sof_suspend, snd_sof_resume)
+ SET_RUNTIME_PM_OPS(snd_sof_runtime_suspend, snd_sof_runtime_resume,
+ snd_sof_runtime_idle)
+};
+EXPORT_SYMBOL_NS(sof_acpi_pm, SND_SOC_SOF_ACPI_DEV);
+
+static void sof_acpi_probe_complete(struct device *dev)
+{
+ dev_dbg(dev, "Completing SOF ACPI probe");
+
+ if (sof_acpi_debug & SOF_ACPI_DISABLE_PM_RUNTIME)
+ return;
+
+ /* allow runtime_pm */
+ pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+}
+
+int sof_acpi_probe(struct platform_device *pdev, const struct sof_dev_desc *desc)
+{
+ struct device *dev = &pdev->dev;
+ struct snd_sof_pdata *sof_pdata;
+
+ dev_dbg(dev, "ACPI DSP detected");
+
+ sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL);
+ if (!sof_pdata)
+ return -ENOMEM;
+
+ if (!desc->ops) {
+ dev_err(dev, "error: no matching ACPI descriptor ops\n");
+ return -ENODEV;
+ }
+
+ sof_pdata->desc = desc;
+ sof_pdata->dev = &pdev->dev;
+ sof_pdata->fw_filename = desc->default_fw_filename[SOF_IPC];
+
+ /* alternate fw and tplg filenames ? */
+ if (fw_path)
+ sof_pdata->fw_filename_prefix = fw_path;
+ else
+ sof_pdata->fw_filename_prefix =
+ sof_pdata->desc->default_fw_path[SOF_IPC];
+
+ if (tplg_path)
+ sof_pdata->tplg_filename_prefix = tplg_path;
+ else
+ sof_pdata->tplg_filename_prefix =
+ sof_pdata->desc->default_tplg_path[SOF_IPC];
+
+ /* set callback to be called on successful device probe to enable runtime_pm */
+ sof_pdata->sof_probe_complete = sof_acpi_probe_complete;
+
+ /* call sof helper for DSP hardware probe */
+ return snd_sof_device_probe(dev, sof_pdata);
+}
+EXPORT_SYMBOL_NS(sof_acpi_probe, SND_SOC_SOF_ACPI_DEV);
+
+int sof_acpi_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (!(sof_acpi_debug & SOF_ACPI_DISABLE_PM_RUNTIME))
+ pm_runtime_disable(dev);
+
+ /* call sof helper for DSP hardware remove */
+ snd_sof_device_remove(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(sof_acpi_remove, SND_SOC_SOF_ACPI_DEV);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/sof-acpi-dev.h b/sound/soc/sof/sof-acpi-dev.h
new file mode 100644
index 000000000..5c2b558d2
--- /dev/null
+++ b/sound/soc/sof/sof-acpi-dev.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2021 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __SOUND_SOC_SOF_ACPI_H
+#define __SOUND_SOC_SOF_ACPI_H
+
+extern const struct dev_pm_ops sof_acpi_pm;
+int sof_acpi_probe(struct platform_device *pdev, const struct sof_dev_desc *desc);
+int sof_acpi_remove(struct platform_device *pdev);
+
+#endif
diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
new file mode 100644
index 000000000..77cc64ac7
--- /dev/null
+++ b/sound/soc/sof/sof-audio.c
@@ -0,0 +1,1127 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019 Intel Corporation. All rights reserved.
+//
+// Author: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+//
+
+#include <linux/bitfield.h>
+#include <trace/events/sof.h>
+#include "sof-audio.h"
+#include "sof-of-dev.h"
+#include "ops.h"
+
+static bool is_virtual_widget(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget *widget,
+ const char *func)
+{
+ switch (widget->id) {
+ case snd_soc_dapm_out_drv:
+ case snd_soc_dapm_output:
+ case snd_soc_dapm_input:
+ dev_dbg(sdev->dev, "%s: %s is a virtual widget\n", func, widget->name);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void sof_reset_route_setup_status(struct snd_sof_dev *sdev, struct snd_sof_widget *widget)
+{
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_sof_route *sroute;
+
+ list_for_each_entry(sroute, &sdev->route_list, list)
+ if (sroute->src_widget == widget || sroute->sink_widget == widget) {
+ if (sroute->setup && tplg_ops && tplg_ops->route_free)
+ tplg_ops->route_free(sdev, sroute);
+
+ sroute->setup = false;
+ }
+}
+
+static int sof_widget_free_unlocked(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *swidget)
+{
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_sof_pipeline *spipe = swidget->spipe;
+ struct snd_sof_widget *pipe_widget;
+ int err = 0;
+ int ret;
+
+ if (!swidget->private)
+ return 0;
+
+ trace_sof_widget_free(swidget);
+
+ /* only free when use_count is 0 */
+ if (--swidget->use_count)
+ return 0;
+
+ pipe_widget = swidget->spipe->pipe_widget;
+
+ /* reset route setup status for all routes that contain this widget */
+ sof_reset_route_setup_status(sdev, swidget);
+
+ /* free DAI config and continue to free widget even if it fails */
+ if (WIDGET_IS_DAI(swidget->id)) {
+ struct snd_sof_dai_config_data data;
+ unsigned int flags = SOF_DAI_CONFIG_FLAGS_HW_FREE;
+
+ data.dai_data = DMA_CHAN_INVALID;
+
+ if (tplg_ops && tplg_ops->dai_config) {
+ err = tplg_ops->dai_config(sdev, swidget, flags, &data);
+ if (err < 0)
+ dev_err(sdev->dev, "failed to free config for widget %s\n",
+ swidget->widget->name);
+ }
+ }
+
+ /* continue to disable core even if IPC fails */
+ if (tplg_ops && tplg_ops->widget_free) {
+ ret = tplg_ops->widget_free(sdev, swidget);
+ if (ret < 0 && !err)
+ err = ret;
+ }
+
+ /*
+ * decrement ref count for cores associated with all modules in the pipeline and clear
+ * the complete flag
+ */
+ if (swidget->id == snd_soc_dapm_scheduler) {
+ int i;
+
+ for_each_set_bit(i, &spipe->core_mask, sdev->num_cores) {
+ ret = snd_sof_dsp_core_put(sdev, i);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to disable target core: %d for pipeline %s\n",
+ i, swidget->widget->name);
+ if (!err)
+ err = ret;
+ }
+ }
+ swidget->spipe->complete = 0;
+ }
+
+ /*
+ * free the scheduler widget (same as pipe_widget) associated with the current swidget.
+ * skip for static pipelines
+ */
+ if (swidget->dynamic_pipeline_widget && swidget->id != snd_soc_dapm_scheduler) {
+ ret = sof_widget_free_unlocked(sdev, pipe_widget);
+ if (ret < 0 && !err)
+ err = ret;
+ }
+
+ if (!err)
+ dev_dbg(sdev->dev, "widget %s freed\n", swidget->widget->name);
+
+ return err;
+}
+
+int sof_widget_free(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
+{
+ int ret;
+
+ mutex_lock(&swidget->setup_mutex);
+ ret = sof_widget_free_unlocked(sdev, swidget);
+ mutex_unlock(&swidget->setup_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(sof_widget_free);
+
+static int sof_widget_setup_unlocked(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *swidget)
+{
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_sof_pipeline *spipe = swidget->spipe;
+ bool use_count_decremented = false;
+ int ret;
+ int i;
+
+ /* skip if there is no private data */
+ if (!swidget->private)
+ return 0;
+
+ trace_sof_widget_setup(swidget);
+
+ /* widget already set up */
+ if (++swidget->use_count > 1)
+ return 0;
+
+ /*
+ * The scheduler widget for a pipeline is not part of the connected DAPM
+ * widget list and it needs to be set up before the widgets in the pipeline
+ * are set up. The use_count for the scheduler widget is incremented for every
+ * widget in a given pipeline to ensure that it is freed only after the last
+ * widget in the pipeline is freed. Skip setting up scheduler widget for static pipelines.
+ */
+ if (swidget->dynamic_pipeline_widget && swidget->id != snd_soc_dapm_scheduler) {
+ if (!swidget->spipe || !swidget->spipe->pipe_widget) {
+ dev_err(sdev->dev, "No pipeline set for %s\n", swidget->widget->name);
+ ret = -EINVAL;
+ goto use_count_dec;
+ }
+
+ ret = sof_widget_setup_unlocked(sdev, swidget->spipe->pipe_widget);
+ if (ret < 0)
+ goto use_count_dec;
+ }
+
+ /* update ref count for cores associated with all modules in the pipeline */
+ if (swidget->id == snd_soc_dapm_scheduler) {
+ for_each_set_bit(i, &spipe->core_mask, sdev->num_cores) {
+ ret = snd_sof_dsp_core_get(sdev, i);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to enable target core %d for pipeline %s\n",
+ i, swidget->widget->name);
+ goto pipe_widget_free;
+ }
+ }
+ }
+
+ /* setup widget in the DSP */
+ if (tplg_ops && tplg_ops->widget_setup) {
+ ret = tplg_ops->widget_setup(sdev, swidget);
+ if (ret < 0)
+ goto pipe_widget_free;
+ }
+
+ /* send config for DAI components */
+ if (WIDGET_IS_DAI(swidget->id)) {
+ unsigned int flags = SOF_DAI_CONFIG_FLAGS_HW_PARAMS;
+
+ /*
+ * The config flags saved during BE DAI hw_params will be used for IPC3. IPC4 does
+ * not use the flags argument.
+ */
+ if (tplg_ops && tplg_ops->dai_config) {
+ ret = tplg_ops->dai_config(sdev, swidget, flags, NULL);
+ if (ret < 0)
+ goto widget_free;
+ }
+ }
+
+ /* restore kcontrols for widget */
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->widget_kcontrol_setup) {
+ ret = tplg_ops->control->widget_kcontrol_setup(sdev, swidget);
+ if (ret < 0)
+ goto widget_free;
+ }
+
+ dev_dbg(sdev->dev, "widget %s setup complete\n", swidget->widget->name);
+
+ return 0;
+
+widget_free:
+ /* widget use_count will be decremented by sof_widget_free() */
+ sof_widget_free_unlocked(sdev, swidget);
+ use_count_decremented = true;
+pipe_widget_free:
+ if (swidget->id != snd_soc_dapm_scheduler) {
+ sof_widget_free_unlocked(sdev, swidget->spipe->pipe_widget);
+ } else {
+ int j;
+
+ /* decrement ref count for all cores that were updated previously */
+ for_each_set_bit(j, &spipe->core_mask, sdev->num_cores) {
+ if (j >= i)
+ break;
+ snd_sof_dsp_core_put(sdev, j);
+ }
+ }
+use_count_dec:
+ if (!use_count_decremented)
+ swidget->use_count--;
+
+ return ret;
+}
+
+int sof_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
+{
+ int ret;
+
+ mutex_lock(&swidget->setup_mutex);
+ ret = sof_widget_setup_unlocked(sdev, swidget);
+ mutex_unlock(&swidget->setup_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(sof_widget_setup);
+
+int sof_route_setup(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget *wsource,
+ struct snd_soc_dapm_widget *wsink)
+{
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_sof_widget *src_widget = wsource->dobj.private;
+ struct snd_sof_widget *sink_widget = wsink->dobj.private;
+ struct snd_sof_route *sroute;
+ bool route_found = false;
+
+ /* ignore routes involving virtual widgets in topology */
+ if (is_virtual_widget(sdev, src_widget->widget, __func__) ||
+ is_virtual_widget(sdev, sink_widget->widget, __func__))
+ return 0;
+
+ /* find route matching source and sink widgets */
+ list_for_each_entry(sroute, &sdev->route_list, list)
+ if (sroute->src_widget == src_widget && sroute->sink_widget == sink_widget) {
+ route_found = true;
+ break;
+ }
+
+ if (!route_found) {
+ dev_err(sdev->dev, "error: cannot find SOF route for source %s -> %s sink\n",
+ wsource->name, wsink->name);
+ return -EINVAL;
+ }
+
+ /* nothing to do if route is already set up */
+ if (sroute->setup)
+ return 0;
+
+ if (tplg_ops && tplg_ops->route_setup) {
+ int ret = tplg_ops->route_setup(sdev, sroute);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ sroute->setup = true;
+ return 0;
+}
+
+static int sof_setup_pipeline_connections(struct snd_sof_dev *sdev,
+ struct snd_soc_dapm_widget_list *list, int dir)
+{
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_soc_dapm_widget *widget;
+ struct snd_sof_route *sroute;
+ struct snd_soc_dapm_path *p;
+ int ret = 0;
+ int i;
+
+ /*
+ * Set up connections between widgets in the sink/source paths based on direction.
+ * Some non-SOF widgets exist in topology either for compatibility or for the
+ * purpose of connecting a pipeline from a host to a DAI in order to receive the DAPM
+ * events. But they are not handled by the firmware. So ignore them.
+ */
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
+ for_each_dapm_widgets(list, i, widget) {
+ if (!widget->dobj.private)
+ continue;
+
+ snd_soc_dapm_widget_for_each_sink_path(widget, p) {
+ if (!widget_in_list(list, p->sink))
+ continue;
+
+ if (p->sink->dobj.private) {
+ ret = sof_route_setup(sdev, widget, p->sink);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+ } else {
+ for_each_dapm_widgets(list, i, widget) {
+ if (!widget->dobj.private)
+ continue;
+
+ snd_soc_dapm_widget_for_each_source_path(widget, p) {
+ if (!widget_in_list(list, p->source))
+ continue;
+
+ if (p->source->dobj.private) {
+ ret = sof_route_setup(sdev, p->source, widget);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+ }
+
+ /*
+ * The above loop handles connections between widgets that belong to the DAPM widget list.
+ * This is not sufficient to handle loopback cases between pipelines configured with
+ * different directions, e.g. a sidetone or an amplifier feedback connected to a speaker
+ * protection module.
+ */
+ list_for_each_entry(sroute, &sdev->route_list, list) {
+ bool src_widget_in_dapm_list, sink_widget_in_dapm_list;
+ struct snd_sof_widget *swidget;
+
+ if (sroute->setup)
+ continue;
+
+ src_widget_in_dapm_list = widget_in_list(list, sroute->src_widget->widget);
+ sink_widget_in_dapm_list = widget_in_list(list, sroute->sink_widget->widget);
+
+ /*
+ * if both source and sink are in the DAPM list, the route must already have been
+ * set up above. And if neither are in the DAPM list, the route shouldn't be
+ * handled now.
+ */
+ if (src_widget_in_dapm_list == sink_widget_in_dapm_list)
+ continue;
+
+ /*
+ * At this point either the source widget or the sink widget is in the DAPM list
+ * with a route that might need to be set up. Check the use_count of the widget
+ * that is not in the DAPM list to confirm if it is in use currently before setting
+ * up the route.
+ */
+ if (src_widget_in_dapm_list)
+ swidget = sroute->sink_widget;
+ else
+ swidget = sroute->src_widget;
+
+ mutex_lock(&swidget->setup_mutex);
+ if (!swidget->use_count) {
+ mutex_unlock(&swidget->setup_mutex);
+ continue;
+ }
+
+ if (tplg_ops && tplg_ops->route_setup) {
+ /*
+ * this route will get freed when either the source widget or the sink
+ * widget is freed during hw_free
+ */
+ ret = tplg_ops->route_setup(sdev, sroute);
+ if (!ret)
+ sroute->setup = true;
+ }
+
+ mutex_unlock(&swidget->setup_mutex);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+sof_unprepare_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget *widget,
+ struct snd_soc_dapm_widget_list *list)
+{
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_sof_widget *swidget = widget->dobj.private;
+ const struct sof_ipc_tplg_widget_ops *widget_ops;
+ struct snd_soc_dapm_path *p;
+
+ if (is_virtual_widget(sdev, widget, __func__))
+ return;
+
+ /* skip if the widget is in use or if it is already unprepared */
+ if (!swidget || !swidget->prepared || swidget->use_count > 0)
+ goto sink_unprepare;
+
+ widget_ops = tplg_ops ? tplg_ops->widget : NULL;
+ if (widget_ops && widget_ops[widget->id].ipc_unprepare)
+ /* unprepare the source widget */
+ widget_ops[widget->id].ipc_unprepare(swidget);
+
+ swidget->prepared = false;
+
+sink_unprepare:
+ /* unprepare all widgets in the sink paths */
+ snd_soc_dapm_widget_for_each_sink_path(widget, p) {
+ if (!widget_in_list(list, p->sink))
+ continue;
+ if (!p->walking && p->sink->dobj.private) {
+ p->walking = true;
+ sof_unprepare_widgets_in_path(sdev, p->sink, list);
+ p->walking = false;
+ }
+ }
+}
+
+static int
+sof_prepare_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget *widget,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ struct snd_pcm_hw_params *pipeline_params, int dir,
+ struct snd_soc_dapm_widget_list *list)
+{
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_sof_widget *swidget = widget->dobj.private;
+ const struct sof_ipc_tplg_widget_ops *widget_ops;
+ struct snd_soc_dapm_path *p;
+ int ret;
+
+ if (is_virtual_widget(sdev, widget, __func__))
+ return 0;
+
+ widget_ops = tplg_ops ? tplg_ops->widget : NULL;
+ if (!widget_ops)
+ return 0;
+
+ if (!swidget || !widget_ops[widget->id].ipc_prepare || swidget->prepared)
+ goto sink_prepare;
+
+ /* prepare the source widget */
+ ret = widget_ops[widget->id].ipc_prepare(swidget, fe_params, platform_params,
+ pipeline_params, dir);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to prepare widget %s\n", widget->name);
+ return ret;
+ }
+
+ swidget->prepared = true;
+
+sink_prepare:
+ /* prepare all widgets in the sink paths */
+ snd_soc_dapm_widget_for_each_sink_path(widget, p) {
+ if (!widget_in_list(list, p->sink))
+ continue;
+ if (!p->walking && p->sink->dobj.private) {
+ p->walking = true;
+ ret = sof_prepare_widgets_in_path(sdev, p->sink, fe_params,
+ platform_params, pipeline_params, dir,
+ list);
+ p->walking = false;
+ if (ret < 0) {
+ /* unprepare the source widget */
+ if (widget_ops[widget->id].ipc_unprepare &&
+ swidget && swidget->prepared) {
+ widget_ops[widget->id].ipc_unprepare(swidget);
+ swidget->prepared = false;
+ }
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * free all widgets in the sink path starting from the source widget
+ * (DAI type for capture, AIF type for playback)
+ */
+static int sof_free_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget *widget,
+ int dir, struct snd_sof_pcm *spcm)
+{
+ struct snd_soc_dapm_widget_list *list = spcm->stream[dir].list;
+ struct snd_soc_dapm_path *p;
+ int err;
+ int ret = 0;
+
+ if (is_virtual_widget(sdev, widget, __func__))
+ return 0;
+
+ if (widget->dobj.private) {
+ err = sof_widget_free(sdev, widget->dobj.private);
+ if (err < 0)
+ ret = err;
+ }
+
+ /* free all widgets in the sink paths even in case of error to keep use counts balanced */
+ snd_soc_dapm_widget_for_each_sink_path(widget, p) {
+ if (!p->walking) {
+ if (!widget_in_list(list, p->sink))
+ continue;
+
+ p->walking = true;
+
+ err = sof_free_widgets_in_path(sdev, p->sink, dir, spcm);
+ if (err < 0)
+ ret = err;
+ p->walking = false;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * set up all widgets in the sink path starting from the source widget
+ * (DAI type for capture, AIF type for playback).
+ * The error path in this function ensures that all successfully set up widgets getting freed.
+ */
+static int sof_set_up_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget *widget,
+ int dir, struct snd_sof_pcm *spcm)
+{
+ struct snd_sof_pcm_stream_pipeline_list *pipeline_list = &spcm->stream[dir].pipeline_list;
+ struct snd_soc_dapm_widget_list *list = spcm->stream[dir].list;
+ struct snd_sof_widget *swidget = widget->dobj.private;
+ struct snd_sof_pipeline *spipe;
+ struct snd_soc_dapm_path *p;
+ int ret;
+
+ if (is_virtual_widget(sdev, widget, __func__))
+ return 0;
+
+ if (swidget) {
+ int i;
+
+ ret = sof_widget_setup(sdev, widget->dobj.private);
+ if (ret < 0)
+ return ret;
+
+ /* skip populating the pipe_widgets array if it is NULL */
+ if (!pipeline_list->pipelines)
+ goto sink_setup;
+
+ /*
+ * Add the widget's pipe_widget to the list of pipelines to be triggered if not
+ * already in the list. This will result in the pipelines getting added in the
+ * order source to sink.
+ */
+ for (i = 0; i < pipeline_list->count; i++) {
+ spipe = pipeline_list->pipelines[i];
+ if (spipe == swidget->spipe)
+ break;
+ }
+
+ if (i == pipeline_list->count) {
+ pipeline_list->count++;
+ pipeline_list->pipelines[i] = swidget->spipe;
+ }
+ }
+
+sink_setup:
+ snd_soc_dapm_widget_for_each_sink_path(widget, p) {
+ if (!p->walking) {
+ if (!widget_in_list(list, p->sink))
+ continue;
+
+ p->walking = true;
+
+ ret = sof_set_up_widgets_in_path(sdev, p->sink, dir, spcm);
+ p->walking = false;
+ if (ret < 0) {
+ if (swidget)
+ sof_widget_free(sdev, swidget);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+sof_walk_widgets_in_order(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params, int dir,
+ enum sof_widget_op op)
+{
+ struct snd_soc_dapm_widget_list *list = spcm->stream[dir].list;
+ struct snd_soc_dapm_widget *widget;
+ char *str;
+ int ret = 0;
+ int i;
+
+ if (!list)
+ return 0;
+
+ for_each_dapm_widgets(list, i, widget) {
+ if (is_virtual_widget(sdev, widget, __func__))
+ continue;
+
+ /* starting widget for playback is AIF type */
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK && widget->id != snd_soc_dapm_aif_in)
+ continue;
+
+ /* starting widget for capture is DAI type */
+ if (dir == SNDRV_PCM_STREAM_CAPTURE && widget->id != snd_soc_dapm_dai_out)
+ continue;
+
+ switch (op) {
+ case SOF_WIDGET_SETUP:
+ ret = sof_set_up_widgets_in_path(sdev, widget, dir, spcm);
+ str = "set up";
+ break;
+ case SOF_WIDGET_FREE:
+ ret = sof_free_widgets_in_path(sdev, widget, dir, spcm);
+ str = "free";
+ break;
+ case SOF_WIDGET_PREPARE:
+ {
+ struct snd_pcm_hw_params pipeline_params;
+
+ str = "prepare";
+ /*
+ * When walking the list of connected widgets, the pipeline_params for each
+ * widget is modified by the source widget in the path. Use a local
+ * copy of the runtime params as the pipeline_params so that the runtime
+ * params does not get overwritten.
+ */
+ memcpy(&pipeline_params, fe_params, sizeof(*fe_params));
+
+ ret = sof_prepare_widgets_in_path(sdev, widget, fe_params, platform_params,
+ &pipeline_params, dir, list);
+ break;
+ }
+ case SOF_WIDGET_UNPREPARE:
+ sof_unprepare_widgets_in_path(sdev, widget, list);
+ break;
+ default:
+ dev_err(sdev->dev, "Invalid widget op %d\n", op);
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to %s connected widgets\n", str);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int sof_widget_list_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ int dir)
+{
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_soc_dapm_widget_list *list = spcm->stream[dir].list;
+ struct snd_soc_dapm_widget *widget;
+ int i, ret;
+
+ /* nothing to set up */
+ if (!list)
+ return 0;
+
+ /*
+ * Prepare widgets for set up. The prepare step is used to allocate memory, assign
+ * instance ID and pick the widget configuration based on the runtime PCM params.
+ */
+ ret = sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params,
+ dir, SOF_WIDGET_PREPARE);
+ if (ret < 0)
+ return ret;
+
+ /* Set up is used to send the IPC to the DSP to create the widget */
+ ret = sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params,
+ dir, SOF_WIDGET_SETUP);
+ if (ret < 0) {
+ sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params,
+ dir, SOF_WIDGET_UNPREPARE);
+ return ret;
+ }
+
+ /*
+ * error in setting pipeline connections will result in route status being reset for
+ * routes that were successfully set up when the widgets are freed.
+ */
+ ret = sof_setup_pipeline_connections(sdev, list, dir);
+ if (ret < 0)
+ goto widget_free;
+
+ /* complete pipelines */
+ for_each_dapm_widgets(list, i, widget) {
+ struct snd_sof_widget *swidget = widget->dobj.private;
+ struct snd_sof_widget *pipe_widget;
+ struct snd_sof_pipeline *spipe;
+
+ if (!swidget || sdev->dspless_mode_selected)
+ continue;
+
+ spipe = swidget->spipe;
+ if (!spipe) {
+ dev_err(sdev->dev, "no pipeline found for %s\n",
+ swidget->widget->name);
+ ret = -EINVAL;
+ goto widget_free;
+ }
+
+ pipe_widget = spipe->pipe_widget;
+ if (!pipe_widget) {
+ dev_err(sdev->dev, "error: no pipeline widget found for %s\n",
+ swidget->widget->name);
+ ret = -EINVAL;
+ goto widget_free;
+ }
+
+ if (spipe->complete)
+ continue;
+
+ if (tplg_ops && tplg_ops->pipeline_complete) {
+ spipe->complete = tplg_ops->pipeline_complete(sdev, pipe_widget);
+ if (spipe->complete < 0) {
+ ret = spipe->complete;
+ goto widget_free;
+ }
+ }
+ }
+
+ return 0;
+
+widget_free:
+ sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params, dir,
+ SOF_WIDGET_FREE);
+ sof_walk_widgets_in_order(sdev, spcm, NULL, NULL, dir, SOF_WIDGET_UNPREPARE);
+
+ return ret;
+}
+
+int sof_widget_list_free(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm, int dir)
+{
+ struct snd_sof_pcm_stream_pipeline_list *pipeline_list = &spcm->stream[dir].pipeline_list;
+ struct snd_soc_dapm_widget_list *list = spcm->stream[dir].list;
+ int ret;
+
+ /* nothing to free */
+ if (!list)
+ return 0;
+
+ /* send IPC to free widget in the DSP */
+ ret = sof_walk_widgets_in_order(sdev, spcm, NULL, NULL, dir, SOF_WIDGET_FREE);
+
+ /* unprepare the widget */
+ sof_walk_widgets_in_order(sdev, spcm, NULL, NULL, dir, SOF_WIDGET_UNPREPARE);
+
+ snd_soc_dapm_dai_free_widgets(&list);
+ spcm->stream[dir].list = NULL;
+
+ pipeline_list->count = 0;
+
+ return ret;
+}
+
+/*
+ * helper to determine if there are only D0i3 compatible
+ * streams active
+ */
+bool snd_sof_dsp_only_d0i3_compatible_stream_active(struct snd_sof_dev *sdev)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_sof_pcm *spcm;
+ bool d0i3_compatible_active = false;
+ int dir;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ for_each_pcm_streams(dir) {
+ substream = spcm->stream[dir].substream;
+ if (!substream || !substream->runtime)
+ continue;
+
+ /*
+ * substream->runtime being not NULL indicates
+ * that the stream is open. No need to check the
+ * stream state.
+ */
+ if (!spcm->stream[dir].d0i3_compatible)
+ return false;
+
+ d0i3_compatible_active = true;
+ }
+ }
+
+ return d0i3_compatible_active;
+}
+EXPORT_SYMBOL(snd_sof_dsp_only_d0i3_compatible_stream_active);
+
+bool snd_sof_stream_suspend_ignored(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pcm *spcm;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ if (spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].suspend_ignored ||
+ spcm->stream[SNDRV_PCM_STREAM_CAPTURE].suspend_ignored)
+ return true;
+ }
+
+ return false;
+}
+
+int sof_pcm_stream_free(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream,
+ struct snd_sof_pcm *spcm, int dir, bool free_widget_list)
+{
+ const struct sof_ipc_pcm_ops *pcm_ops = sof_ipc_get_ops(sdev, pcm);
+ int ret;
+
+ if (spcm->prepared[substream->stream]) {
+ /* stop DMA first if needed */
+ if (pcm_ops && pcm_ops->platform_stop_during_hw_free)
+ snd_sof_pcm_platform_trigger(sdev, substream, SNDRV_PCM_TRIGGER_STOP);
+
+ /* Send PCM_FREE IPC to reset pipeline */
+ if (pcm_ops && pcm_ops->hw_free) {
+ ret = pcm_ops->hw_free(sdev->component, substream);
+ if (ret < 0)
+ return ret;
+ }
+
+ spcm->prepared[substream->stream] = false;
+ }
+
+ /* reset the DMA */
+ ret = snd_sof_pcm_platform_hw_free(sdev, substream);
+ if (ret < 0)
+ return ret;
+
+ /* free widget list */
+ if (free_widget_list) {
+ ret = sof_widget_list_free(sdev, spcm, dir);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to free widgets during suspend\n");
+ }
+
+ return ret;
+}
+
+/*
+ * Generic object lookup APIs.
+ */
+
+struct snd_sof_pcm *snd_sof_find_spcm_name(struct snd_soc_component *scomp,
+ const char *name)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_pcm *spcm;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ /* match with PCM dai name */
+ if (strcmp(spcm->pcm.dai_name, name) == 0)
+ return spcm;
+
+ /* match with playback caps name if set */
+ if (*spcm->pcm.caps[0].name &&
+ !strcmp(spcm->pcm.caps[0].name, name))
+ return spcm;
+
+ /* match with capture caps name if set */
+ if (*spcm->pcm.caps[1].name &&
+ !strcmp(spcm->pcm.caps[1].name, name))
+ return spcm;
+ }
+
+ return NULL;
+}
+
+struct snd_sof_pcm *snd_sof_find_spcm_comp(struct snd_soc_component *scomp,
+ unsigned int comp_id,
+ int *direction)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_pcm *spcm;
+ int dir;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ for_each_pcm_streams(dir) {
+ if (spcm->stream[dir].comp_id == comp_id) {
+ *direction = dir;
+ return spcm;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+struct snd_sof_widget *snd_sof_find_swidget(struct snd_soc_component *scomp,
+ const char *name)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_widget *swidget;
+
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (strcmp(name, swidget->widget->name) == 0)
+ return swidget;
+ }
+
+ return NULL;
+}
+
+/* find widget by stream name and direction */
+struct snd_sof_widget *
+snd_sof_find_swidget_sname(struct snd_soc_component *scomp,
+ const char *pcm_name, int dir)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_widget *swidget;
+ enum snd_soc_dapm_type type;
+
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ type = snd_soc_dapm_aif_in;
+ else
+ type = snd_soc_dapm_aif_out;
+
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (!strcmp(pcm_name, swidget->widget->sname) &&
+ swidget->id == type)
+ return swidget;
+ }
+
+ return NULL;
+}
+
+struct snd_sof_dai *snd_sof_find_dai(struct snd_soc_component *scomp,
+ const char *name)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_dai *dai;
+
+ list_for_each_entry(dai, &sdev->dai_list, list) {
+ if (dai->name && (strcmp(name, dai->name) == 0))
+ return dai;
+ }
+
+ return NULL;
+}
+
+static int sof_dai_get_clk(struct snd_soc_pcm_runtime *rtd, int clk_type)
+{
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
+ struct snd_sof_dai *dai =
+ snd_sof_find_dai(component, (char *)rtd->dai_link->name);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ /* use the tplg configured mclk if existed */
+ if (!dai)
+ return 0;
+
+ if (tplg_ops && tplg_ops->dai_get_clk)
+ return tplg_ops->dai_get_clk(sdev, dai, clk_type);
+
+ return 0;
+}
+
+/*
+ * Helper to get SSP MCLK from a pcm_runtime.
+ * Return 0 if not exist.
+ */
+int sof_dai_get_mclk(struct snd_soc_pcm_runtime *rtd)
+{
+ return sof_dai_get_clk(rtd, SOF_DAI_CLK_INTEL_SSP_MCLK);
+}
+EXPORT_SYMBOL(sof_dai_get_mclk);
+
+/*
+ * Helper to get SSP BCLK from a pcm_runtime.
+ * Return 0 if not exist.
+ */
+int sof_dai_get_bclk(struct snd_soc_pcm_runtime *rtd)
+{
+ return sof_dai_get_clk(rtd, SOF_DAI_CLK_INTEL_SSP_BCLK);
+}
+EXPORT_SYMBOL(sof_dai_get_bclk);
+
+static struct snd_sof_of_mach *sof_of_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_sof_of_mach *mach = desc->of_machines;
+
+ if (!mach)
+ return NULL;
+
+ for (; mach->compatible; mach++) {
+ if (of_machine_is_compatible(mach->compatible)) {
+ sof_pdata->tplg_filename = mach->sof_tplg_filename;
+ if (mach->fw_filename)
+ sof_pdata->fw_filename = mach->fw_filename;
+
+ return mach;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * SOF Driver enumeration.
+ */
+int sof_machine_check(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach;
+
+ if (!IS_ENABLED(CONFIG_SND_SOC_SOF_FORCE_NOCODEC_MODE)) {
+ const struct snd_sof_of_mach *of_mach;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
+ sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
+ goto nocodec;
+
+ /* find machine */
+ mach = snd_sof_machine_select(sdev);
+ if (mach) {
+ sof_pdata->machine = mach;
+
+ if (sof_pdata->subsystem_id_set) {
+ mach->mach_params.subsystem_vendor = sof_pdata->subsystem_vendor;
+ mach->mach_params.subsystem_device = sof_pdata->subsystem_device;
+ mach->mach_params.subsystem_id_set = true;
+ }
+
+ snd_sof_set_mach_params(mach, sdev);
+ return 0;
+ }
+
+ of_mach = sof_of_machine_select(sdev);
+ if (of_mach) {
+ sof_pdata->of_machine = of_mach;
+ return 0;
+ }
+
+ if (!IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC)) {
+ dev_err(sdev->dev, "error: no matching ASoC machine driver found - aborting probe\n");
+ return -ENODEV;
+ }
+ } else {
+ dev_warn(sdev->dev, "Force to use nocodec mode\n");
+ }
+
+nocodec:
+ /* select nocodec mode */
+ dev_warn(sdev->dev, "Using nocodec machine driver\n");
+ mach = devm_kzalloc(sdev->dev, sizeof(*mach), GFP_KERNEL);
+ if (!mach)
+ return -ENOMEM;
+
+ mach->drv_name = "sof-nocodec";
+ if (!sof_pdata->tplg_filename)
+ sof_pdata->tplg_filename = desc->nocodec_tplg_filename;
+
+ sof_pdata->machine = mach;
+ snd_sof_set_mach_params(mach, sdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_machine_check);
+
+int sof_machine_register(struct snd_sof_dev *sdev, void *pdata)
+{
+ struct snd_sof_pdata *plat_data = pdata;
+ const char *drv_name;
+ const void *mach;
+ int size;
+
+ drv_name = plat_data->machine->drv_name;
+ mach = plat_data->machine;
+ size = sizeof(*plat_data->machine);
+
+ /* register machine driver, pass machine info as pdata */
+ plat_data->pdev_mach =
+ platform_device_register_data(sdev->dev, drv_name,
+ PLATFORM_DEVID_NONE, mach, size);
+ if (IS_ERR(plat_data->pdev_mach))
+ return PTR_ERR(plat_data->pdev_mach);
+
+ dev_dbg(sdev->dev, "created machine %s\n",
+ dev_name(&plat_data->pdev_mach->dev));
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_machine_register);
+
+void sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata)
+{
+ struct snd_sof_pdata *plat_data = pdata;
+
+ if (!IS_ERR_OR_NULL(plat_data->pdev_mach))
+ platform_device_unregister(plat_data->pdev_mach);
+}
+EXPORT_SYMBOL(sof_machine_unregister);
diff --git a/sound/soc/sof/sof-audio.h b/sound/soc/sof/sof-audio.h
new file mode 100644
index 000000000..a6d6bcd00
--- /dev/null
+++ b/sound/soc/sof/sof-audio.h
@@ -0,0 +1,646 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2019 Intel Corporation. All rights reserved.
+ *
+ * Author: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+ */
+
+#ifndef __SOUND_SOC_SOF_AUDIO_H
+#define __SOUND_SOC_SOF_AUDIO_H
+
+#include <linux/workqueue.h>
+
+#include <sound/soc.h>
+#include <sound/control.h>
+#include <sound/sof/stream.h> /* needs to be included before control.h */
+#include <sound/sof/control.h>
+#include <sound/sof/dai.h>
+#include <sound/sof/topology.h>
+#include "sof-priv.h"
+
+#define SOF_AUDIO_PCM_DRV_NAME "sof-audio-component"
+
+/*
+ * The ipc4 firmware only supports up to 8 sink or source pins
+ * per widget, because only 3 bits are used for queue(pin) ID
+ * in ipc4 protocol.
+ */
+#define SOF_WIDGET_MAX_NUM_PINS 8
+
+/* Widget pin type */
+#define SOF_PIN_TYPE_INPUT 0
+#define SOF_PIN_TYPE_OUTPUT 1
+
+/* max number of FE PCMs before BEs */
+#define SOF_BE_PCM_BASE 16
+
+#define DMA_CHAN_INVALID 0xFFFFFFFF
+
+#define WIDGET_IS_DAI(id) ((id) == snd_soc_dapm_dai_in || (id) == snd_soc_dapm_dai_out)
+#define WIDGET_IS_AIF(id) ((id) == snd_soc_dapm_aif_in || (id) == snd_soc_dapm_aif_out)
+#define WIDGET_IS_AIF_OR_DAI(id) (WIDGET_IS_DAI(id) || WIDGET_IS_AIF(id))
+#define WIDGET_IS_COPIER(id) (WIDGET_IS_AIF_OR_DAI(id) || (id) == snd_soc_dapm_buffer)
+
+#define SOF_DAI_CLK_INTEL_SSP_MCLK 0
+#define SOF_DAI_CLK_INTEL_SSP_BCLK 1
+
+enum sof_widget_op {
+ SOF_WIDGET_PREPARE,
+ SOF_WIDGET_SETUP,
+ SOF_WIDGET_FREE,
+ SOF_WIDGET_UNPREPARE,
+};
+
+/*
+ * Volume fractional word length define to 16 sets
+ * the volume linear gain value to use Qx.16 format
+ */
+#define VOLUME_FWL 16
+
+#define SOF_TLV_ITEMS 3
+
+static inline u32 mixer_to_ipc(unsigned int value, u32 *volume_map, int size)
+{
+ if (value >= size)
+ return volume_map[size - 1];
+
+ return volume_map[value];
+}
+
+static inline u32 ipc_to_mixer(u32 value, u32 *volume_map, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (volume_map[i] >= value)
+ return i;
+ }
+
+ return i - 1;
+}
+
+struct snd_sof_widget;
+struct snd_sof_route;
+struct snd_sof_control;
+struct snd_sof_dai;
+struct snd_sof_pcm;
+
+struct snd_sof_dai_config_data {
+ int dai_index;
+ int dai_data; /* contains DAI-specific information */
+};
+
+/**
+ * struct sof_ipc_pcm_ops - IPC-specific PCM ops
+ * @hw_params: Function pointer for hw_params
+ * @hw_free: Function pointer for hw_free
+ * @trigger: Function pointer for trigger
+ * @dai_link_fixup: Function pointer for DAI link fixup
+ * @pcm_setup: Function pointer for IPC-specific PCM set up that can be used for allocating
+ * additional memory in the SOF PCM stream structure
+ * @pcm_free: Function pointer for PCM free that can be used for freeing any
+ * additional memory in the SOF PCM stream structure
+ * @delay: Function pointer for pcm delay calculation
+ * @reset_hw_params_during_stop: Flag indicating whether the hw_params should be reset during the
+ * STOP pcm trigger
+ * @ipc_first_on_start: Send IPC before invoking platform trigger during
+ * START/PAUSE_RELEASE triggers
+ * @platform_stop_during_hw_free: Invoke the platform trigger during hw_free. This is needed for
+ * IPC4 where a pipeline is only paused during stop/pause/suspend
+ * triggers. The FW keeps the host DMA running in this case and
+ * therefore the host must do the same and should stop the DMA during
+ * hw_free.
+ */
+struct sof_ipc_pcm_ops {
+ int (*hw_params)(struct snd_soc_component *component, struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params);
+ int (*hw_free)(struct snd_soc_component *component, struct snd_pcm_substream *substream);
+ int (*trigger)(struct snd_soc_component *component, struct snd_pcm_substream *substream,
+ int cmd);
+ int (*dai_link_fixup)(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params);
+ int (*pcm_setup)(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm);
+ void (*pcm_free)(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm);
+ snd_pcm_sframes_t (*delay)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ bool reset_hw_params_during_stop;
+ bool ipc_first_on_start;
+ bool platform_stop_during_hw_free;
+};
+
+/**
+ * struct sof_ipc_tplg_control_ops - IPC-specific ops for topology kcontrol IO
+ */
+struct sof_ipc_tplg_control_ops {
+ bool (*volume_put)(struct snd_sof_control *scontrol, struct snd_ctl_elem_value *ucontrol);
+ int (*volume_get)(struct snd_sof_control *scontrol, struct snd_ctl_elem_value *ucontrol);
+ bool (*switch_put)(struct snd_sof_control *scontrol, struct snd_ctl_elem_value *ucontrol);
+ int (*switch_get)(struct snd_sof_control *scontrol, struct snd_ctl_elem_value *ucontrol);
+ bool (*enum_put)(struct snd_sof_control *scontrol, struct snd_ctl_elem_value *ucontrol);
+ int (*enum_get)(struct snd_sof_control *scontrol, struct snd_ctl_elem_value *ucontrol);
+ int (*bytes_put)(struct snd_sof_control *scontrol, struct snd_ctl_elem_value *ucontrol);
+ int (*bytes_get)(struct snd_sof_control *scontrol, struct snd_ctl_elem_value *ucontrol);
+ int (*bytes_ext_get)(struct snd_sof_control *scontrol,
+ const unsigned int __user *binary_data, unsigned int size);
+ int (*bytes_ext_volatile_get)(struct snd_sof_control *scontrol,
+ const unsigned int __user *binary_data, unsigned int size);
+ int (*bytes_ext_put)(struct snd_sof_control *scontrol,
+ const unsigned int __user *binary_data, unsigned int size);
+ /* update control data based on notification from the DSP */
+ void (*update)(struct snd_sof_dev *sdev, void *ipc_control_message);
+ /* Optional callback to setup kcontrols associated with an swidget */
+ int (*widget_kcontrol_setup)(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget);
+ /* mandatory callback to set up volume table for volume kcontrols */
+ int (*set_up_volume_table)(struct snd_sof_control *scontrol, int tlv[SOF_TLV_ITEMS],
+ int size);
+};
+
+/**
+ * struct sof_ipc_tplg_widget_ops - IPC-specific ops for topology widgets
+ * @ipc_setup: Function pointer for setting up widget IPC params
+ * @ipc_free: Function pointer for freeing widget IPC params
+ * @token_list: List of token ID's that should be parsed for the widget
+ * @token_list_size: number of elements in token_list
+ * @bind_event: Function pointer for binding events to the widget
+ * @ipc_prepare: Optional op for preparing a widget for set up
+ * @ipc_unprepare: Optional op for unpreparing a widget
+ */
+struct sof_ipc_tplg_widget_ops {
+ int (*ipc_setup)(struct snd_sof_widget *swidget);
+ void (*ipc_free)(struct snd_sof_widget *swidget);
+ enum sof_tokens *token_list;
+ int token_list_size;
+ int (*bind_event)(struct snd_soc_component *scomp, struct snd_sof_widget *swidget,
+ u16 event_type);
+ int (*ipc_prepare)(struct snd_sof_widget *swidget,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ struct snd_pcm_hw_params *source_params, int dir);
+ void (*ipc_unprepare)(struct snd_sof_widget *swidget);
+};
+
+/**
+ * struct sof_ipc_tplg_ops - IPC-specific topology ops
+ * @widget: Array of pointers to IPC-specific ops for widgets. This should always be of size
+ * SND_SOF_DAPM_TYPE_COUNT i.e one per widget type. Unsupported widget types will be
+ * initialized to 0.
+ * @control: Pointer to the IPC-specific ops for topology kcontrol IO
+ * @route_setup: Function pointer for setting up pipeline connections
+ * @route_free: Function pointer for freeing pipeline connections.
+ * @token_list: List of all tokens supported by the IPC version. The size of the token_list
+ * array should be SOF_TOKEN_COUNT. The unused elements in the array will be
+ * initialized to 0.
+ * @control_setup: Function pointer for setting up kcontrol IPC-specific data
+ * @control_free: Function pointer for freeing kcontrol IPC-specific data
+ * @pipeline_complete: Function pointer for pipeline complete IPC
+ * @widget_setup: Function pointer for setting up setup in the DSP
+ * @widget_free: Function pointer for freeing widget in the DSP
+ * @dai_config: Function pointer for sending DAI config IPC to the DSP
+ * @dai_get_clk: Function pointer for getting the DAI clock setting
+ * @set_up_all_pipelines: Function pointer for setting up all topology pipelines
+ * @tear_down_all_pipelines: Function pointer for tearing down all topology pipelines
+ * @parse_manifest: Function pointer for ipc4 specific parsing of topology manifest
+ * @link_setup: Function pointer for IPC-specific DAI link set up
+ *
+ * Note: function pointers (ops) are optional
+ */
+struct sof_ipc_tplg_ops {
+ const struct sof_ipc_tplg_widget_ops *widget;
+ const struct sof_ipc_tplg_control_ops *control;
+ int (*route_setup)(struct snd_sof_dev *sdev, struct snd_sof_route *sroute);
+ int (*route_free)(struct snd_sof_dev *sdev, struct snd_sof_route *sroute);
+ const struct sof_token_info *token_list;
+ int (*control_setup)(struct snd_sof_dev *sdev, struct snd_sof_control *scontrol);
+ int (*control_free)(struct snd_sof_dev *sdev, struct snd_sof_control *scontrol);
+ int (*pipeline_complete)(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget);
+ int (*widget_setup)(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget);
+ int (*widget_free)(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget);
+ int (*dai_config)(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget,
+ unsigned int flags, struct snd_sof_dai_config_data *data);
+ int (*dai_get_clk)(struct snd_sof_dev *sdev, struct snd_sof_dai *dai, int clk_type);
+ int (*set_up_all_pipelines)(struct snd_sof_dev *sdev, bool verify);
+ int (*tear_down_all_pipelines)(struct snd_sof_dev *sdev, bool verify);
+ int (*parse_manifest)(struct snd_soc_component *scomp, int index,
+ struct snd_soc_tplg_manifest *man);
+ int (*link_setup)(struct snd_sof_dev *sdev, struct snd_soc_dai_link *link);
+};
+
+/** struct snd_sof_tuple - Tuple info
+ * @token: Token ID
+ * @value: union of a string or a u32 values
+ */
+struct snd_sof_tuple {
+ u32 token;
+ union {
+ u32 v;
+ const char *s;
+ } value;
+};
+
+/*
+ * List of SOF token ID's. The order of ID's does not matter as token arrays are looked up based on
+ * the ID.
+ */
+enum sof_tokens {
+ SOF_PCM_TOKENS,
+ SOF_PIPELINE_TOKENS,
+ SOF_SCHED_TOKENS,
+ SOF_ASRC_TOKENS,
+ SOF_SRC_TOKENS,
+ SOF_COMP_TOKENS,
+ SOF_BUFFER_TOKENS,
+ SOF_VOLUME_TOKENS,
+ SOF_PROCESS_TOKENS,
+ SOF_DAI_TOKENS,
+ SOF_DAI_LINK_TOKENS,
+ SOF_HDA_TOKENS,
+ SOF_SSP_TOKENS,
+ SOF_ALH_TOKENS,
+ SOF_DMIC_TOKENS,
+ SOF_DMIC_PDM_TOKENS,
+ SOF_ESAI_TOKENS,
+ SOF_SAI_TOKENS,
+ SOF_AFE_TOKENS,
+ SOF_CORE_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_OUT_AUDIO_FORMAT_TOKENS,
+ SOF_COPIER_DEEP_BUFFER_TOKENS,
+ SOF_COPIER_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_COPIER_FORMAT_TOKENS,
+ SOF_GAIN_TOKENS,
+ SOF_ACPDMIC_TOKENS,
+ SOF_ACPI2S_TOKENS,
+
+ /* this should be the last */
+ SOF_TOKEN_COUNT,
+};
+
+/**
+ * struct sof_topology_token - SOF topology token definition
+ * @token: Token number
+ * @type: Token type
+ * @get_token: Function pointer to parse the token value and save it in a object
+ * @offset: Offset within an object to save the token value into
+ */
+struct sof_topology_token {
+ u32 token;
+ u32 type;
+ int (*get_token)(void *elem, void *object, u32 offset);
+ u32 offset;
+};
+
+struct sof_token_info {
+ const char *name;
+ const struct sof_topology_token *tokens;
+ int count;
+};
+
+/**
+ * struct snd_sof_pcm_stream_pipeline_list - List of pipelines associated with a PCM stream
+ * @count: number of pipeline widgets in the @pipe_widgets array
+ * @pipelines: array of pipelines
+ */
+struct snd_sof_pcm_stream_pipeline_list {
+ u32 count;
+ struct snd_sof_pipeline **pipelines;
+};
+
+/* PCM stream, mapped to FW component */
+struct snd_sof_pcm_stream {
+ u32 comp_id;
+ struct snd_dma_buffer page_table;
+ struct sof_ipc_stream_posn posn;
+ struct snd_pcm_substream *substream;
+ struct snd_compr_stream *cstream;
+ struct work_struct period_elapsed_work;
+ struct snd_soc_dapm_widget_list *list; /* list of connected DAPM widgets */
+ bool d0i3_compatible; /* DSP can be in D0I3 when this pcm is opened */
+ /*
+ * flag to indicate that the DSP pipelines should be kept
+ * active or not while suspending the stream
+ */
+ bool suspend_ignored;
+ struct snd_sof_pcm_stream_pipeline_list pipeline_list;
+
+ /* used by IPC implementation and core does not touch it */
+ void *private;
+};
+
+/* ALSA SOF PCM device */
+struct snd_sof_pcm {
+ struct snd_soc_component *scomp;
+ struct snd_soc_tplg_pcm pcm;
+ struct snd_sof_pcm_stream stream[2];
+ struct list_head list; /* list in sdev pcm list */
+ struct snd_pcm_hw_params params[2];
+ bool prepared[2]; /* PCM_PARAMS set successfully */
+};
+
+struct snd_sof_led_control {
+ unsigned int use_led;
+ unsigned int direction;
+ int led_value;
+};
+
+/* ALSA SOF Kcontrol device */
+struct snd_sof_control {
+ struct snd_soc_component *scomp;
+ const char *name;
+ int comp_id;
+ int min_volume_step; /* min volume step for volume_table */
+ int max_volume_step; /* max volume step for volume_table */
+ int num_channels;
+ unsigned int access;
+ int info_type;
+ int index; /* pipeline ID */
+ void *priv; /* private data copied from topology */
+ size_t priv_size; /* size of private data */
+ size_t max_size;
+ void *ipc_control_data;
+ void *old_ipc_control_data;
+ int max; /* applicable to volume controls */
+ u32 size; /* cdata size */
+ u32 *volume_table; /* volume table computed from tlv data*/
+
+ struct list_head list; /* list in sdev control list */
+
+ struct snd_sof_led_control led_ctl;
+
+ /* if true, the control's data needs to be updated from Firmware */
+ bool comp_data_dirty;
+};
+
+/** struct snd_sof_dai_link - DAI link info
+ * @tuples: array of parsed tuples
+ * @num_tuples: number of tuples in the tuples array
+ * @link: Pointer to snd_soc_dai_link
+ * @hw_configs: Pointer to hw configs in topology
+ * @num_hw_configs: Number of hw configs in topology
+ * @default_hw_cfg_id: Default hw config ID
+ * @type: DAI type
+ * @list: item in snd_sof_dev dai_link list
+ */
+struct snd_sof_dai_link {
+ struct snd_sof_tuple *tuples;
+ int num_tuples;
+ struct snd_soc_dai_link *link;
+ struct snd_soc_tplg_hw_config *hw_configs;
+ int num_hw_configs;
+ int default_hw_cfg_id;
+ int type;
+ struct list_head list;
+};
+
+/* ASoC SOF DAPM widget */
+struct snd_sof_widget {
+ struct snd_soc_component *scomp;
+ int comp_id;
+ int pipeline_id;
+ /*
+ * the prepared flag is used to indicate that a widget has been prepared for getting set
+ * up in the DSP.
+ */
+ bool prepared;
+
+ struct mutex setup_mutex; /* to protect the swidget setup and free operations */
+
+ /*
+ * use_count is protected by the PCM mutex held by the core and the
+ * setup_mutex against non stream domain races (kcontrol access for
+ * example)
+ */
+ int use_count;
+
+ int core;
+ int id; /* id is the DAPM widget type */
+ /*
+ * Instance ID is set dynamically when the widget gets set up in the FW. It should be
+ * unique for each module type across all pipelines. This will not be used in SOF_IPC.
+ */
+ int instance_id;
+
+ /*
+ * Flag indicating if the widget should be set up dynamically when a PCM is opened.
+ * This flag is only set for the scheduler type widget in topology. During topology
+ * loading, this flag is propagated to all the widgets belonging to the same pipeline.
+ * When this flag is not set, a widget is set up at the time of topology loading
+ * and retained until the DSP enters D3. It will need to be set up again when resuming
+ * from D3.
+ */
+ bool dynamic_pipeline_widget;
+
+ struct snd_soc_dapm_widget *widget;
+ struct list_head list; /* list in sdev widget list */
+ struct snd_sof_pipeline *spipe;
+ void *module_info;
+
+ const guid_t uuid;
+
+ int num_tuples;
+ struct snd_sof_tuple *tuples;
+
+ /*
+ * The allowed range for num_input/output_pins is [0, SOF_WIDGET_MAX_NUM_PINS].
+ * Widgets may have zero input or output pins, for example the tone widget has
+ * zero input pins.
+ */
+ u32 num_input_pins;
+ u32 num_output_pins;
+
+ /*
+ * The input/output pin binding array, it takes the form of
+ * [widget_name_connected_to_pin0, widget_name_connected_to_pin1, ...],
+ * with the index as the queue ID.
+ *
+ * The array is used for special pin binding. Note that even if there
+ * is only one input/output pin requires special pin binding, pin binding
+ * should be defined for all input/output pins in topology, for pin(s) that
+ * are not used, give the value "NotConnected".
+ *
+ * If pin binding is not defined in topology, nothing to parse in the kernel,
+ * input_pin_binding and output_pin_binding shall be NULL.
+ */
+ char **input_pin_binding;
+ char **output_pin_binding;
+
+ struct ida output_queue_ida;
+ struct ida input_queue_ida;
+
+ void *private; /* core does not touch this */
+};
+
+/** struct snd_sof_pipeline - ASoC SOF pipeline
+ * @pipe_widget: Pointer to the pipeline widget
+ * @started_count: Count of number of PCM's that have started this pipeline
+ * @paused_count: Count of number of PCM's that have started and have currently paused this
+ pipeline
+ * @complete: flag used to indicate that pipeline set up is complete.
+ * @core_mask: Mask containing target cores for all modules in the pipeline
+ * @list: List item in sdev pipeline_list
+ */
+struct snd_sof_pipeline {
+ struct snd_sof_widget *pipe_widget;
+ int started_count;
+ int paused_count;
+ int complete;
+ unsigned long core_mask;
+ struct list_head list;
+};
+
+/* ASoC SOF DAPM route */
+struct snd_sof_route {
+ struct snd_soc_component *scomp;
+
+ struct snd_soc_dapm_route *route;
+ struct list_head list; /* list in sdev route list */
+ struct snd_sof_widget *src_widget;
+ struct snd_sof_widget *sink_widget;
+ bool setup;
+
+ int src_queue_id;
+ int dst_queue_id;
+
+ void *private;
+};
+
+/* ASoC DAI device */
+struct snd_sof_dai {
+ struct snd_soc_component *scomp;
+ const char *name;
+
+ int number_configs;
+ int current_config;
+ struct list_head list; /* list in sdev dai list */
+ /* core should not touch this */
+ const void *platform_private;
+ void *private;
+};
+
+/*
+ * Kcontrols.
+ */
+
+int snd_sof_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_volume_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+int snd_sof_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_enum_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_enum_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_bytes_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_bytes_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size);
+int snd_sof_bytes_ext_get(struct snd_kcontrol *kcontrol,
+ unsigned int __user *binary_data,
+ unsigned int size);
+int snd_sof_bytes_ext_volatile_get(struct snd_kcontrol *kcontrol, unsigned int __user *binary_data,
+ unsigned int size);
+void snd_sof_control_notify(struct snd_sof_dev *sdev,
+ struct sof_ipc_ctrl_data *cdata);
+
+/*
+ * Topology.
+ * There is no snd_sof_free_topology since topology components will
+ * be freed by snd_soc_unregister_component,
+ */
+int snd_sof_load_topology(struct snd_soc_component *scomp, const char *file);
+
+/*
+ * Stream IPC
+ */
+int snd_sof_ipc_stream_posn(struct snd_soc_component *scomp,
+ struct snd_sof_pcm *spcm, int direction,
+ struct sof_ipc_stream_posn *posn);
+
+struct snd_sof_widget *snd_sof_find_swidget(struct snd_soc_component *scomp,
+ const char *name);
+struct snd_sof_widget *
+snd_sof_find_swidget_sname(struct snd_soc_component *scomp,
+ const char *pcm_name, int dir);
+struct snd_sof_dai *snd_sof_find_dai(struct snd_soc_component *scomp,
+ const char *name);
+
+static inline
+struct snd_sof_pcm *snd_sof_find_spcm_dai(struct snd_soc_component *scomp,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_pcm *spcm;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ if (le32_to_cpu(spcm->pcm.dai_id) == rtd->dai_link->id)
+ return spcm;
+ }
+
+ return NULL;
+}
+
+struct snd_sof_pcm *snd_sof_find_spcm_name(struct snd_soc_component *scomp,
+ const char *name);
+struct snd_sof_pcm *snd_sof_find_spcm_comp(struct snd_soc_component *scomp,
+ unsigned int comp_id,
+ int *direction);
+void snd_sof_pcm_period_elapsed(struct snd_pcm_substream *substream);
+void snd_sof_pcm_init_elapsed_work(struct work_struct *work);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMPRESS)
+void snd_sof_compr_fragment_elapsed(struct snd_compr_stream *cstream);
+void snd_sof_compr_init_elapsed_work(struct work_struct *work);
+#else
+static inline void snd_sof_compr_fragment_elapsed(struct snd_compr_stream *cstream) { }
+static inline void snd_sof_compr_init_elapsed_work(struct work_struct *work) { }
+#endif
+
+/* DAI link fixup */
+int sof_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params);
+
+/* PM */
+bool snd_sof_stream_suspend_ignored(struct snd_sof_dev *sdev);
+bool snd_sof_dsp_only_d0i3_compatible_stream_active(struct snd_sof_dev *sdev);
+
+/* Machine driver enumeration */
+int sof_machine_register(struct snd_sof_dev *sdev, void *pdata);
+void sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata);
+
+int sof_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget);
+int sof_widget_free(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget);
+int sof_route_setup(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget *wsource,
+ struct snd_soc_dapm_widget *wsink);
+
+/* PCM */
+int sof_widget_list_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ int dir);
+int sof_widget_list_free(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm, int dir);
+int sof_pcm_dsp_pcm_free(struct snd_pcm_substream *substream, struct snd_sof_dev *sdev,
+ struct snd_sof_pcm *spcm);
+int sof_pcm_stream_free(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream,
+ struct snd_sof_pcm *spcm, int dir, bool free_widget_list);
+int get_token_u32(void *elem, void *object, u32 offset);
+int get_token_u16(void *elem, void *object, u32 offset);
+int get_token_comp_format(void *elem, void *object, u32 offset);
+int get_token_dai_type(void *elem, void *object, u32 offset);
+int get_token_uuid(void *elem, void *object, u32 offset);
+int get_token_string(void *elem, void *object, u32 offset);
+int sof_update_ipc_object(struct snd_soc_component *scomp, void *object, enum sof_tokens token_id,
+ struct snd_sof_tuple *tuples, int num_tuples,
+ size_t object_size, int token_instance_num);
+u32 vol_compute_gain(u32 value, int *tlv);
+#endif
diff --git a/sound/soc/sof/sof-client-ipc-flood-test.c b/sound/soc/sof/sof-client-ipc-flood-test.c
new file mode 100644
index 000000000..c0d6723ae
--- /dev/null
+++ b/sound/soc/sof/sof-client-ipc-flood-test.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+// Authors: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+//
+
+#include <linux/auxiliary_bus.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <sound/sof/header.h>
+
+#include "sof-client.h"
+
+#define MAX_IPC_FLOOD_DURATION_MS 1000
+#define MAX_IPC_FLOOD_COUNT 10000
+#define IPC_FLOOD_TEST_RESULT_LEN 512
+#define SOF_IPC_CLIENT_SUSPEND_DELAY_MS 3000
+
+#define DEBUGFS_IPC_FLOOD_COUNT "ipc_flood_count"
+#define DEBUGFS_IPC_FLOOD_DURATION "ipc_flood_duration_ms"
+
+struct sof_ipc_flood_priv {
+ struct dentry *dfs_root;
+ struct dentry *dfs_link[2];
+ char *buf;
+};
+
+static int sof_ipc_flood_dfs_open(struct inode *inode, struct file *file)
+{
+ struct sof_client_dev *cdev = inode->i_private;
+ int ret;
+
+ if (sof_client_get_fw_state(cdev) == SOF_FW_CRASHED)
+ return -ENODEV;
+
+ ret = debugfs_file_get(file->f_path.dentry);
+ if (unlikely(ret))
+ return ret;
+
+ ret = simple_open(inode, file);
+ if (ret)
+ debugfs_file_put(file->f_path.dentry);
+
+ return ret;
+}
+
+/*
+ * helper function to perform the flood test. Only one of the two params, ipc_duration_ms
+ * or ipc_count, will be non-zero and will determine the type of test
+ */
+static int sof_debug_ipc_flood_test(struct sof_client_dev *cdev,
+ bool flood_duration_test,
+ unsigned long ipc_duration_ms,
+ unsigned long ipc_count)
+{
+ struct sof_ipc_flood_priv *priv = cdev->data;
+ struct device *dev = &cdev->auxdev.dev;
+ struct sof_ipc_cmd_hdr hdr;
+ u64 min_response_time = U64_MAX;
+ ktime_t start, end, test_end;
+ u64 avg_response_time = 0;
+ u64 max_response_time = 0;
+ u64 ipc_response_time;
+ int i = 0;
+ int ret;
+
+ /* configure test IPC */
+ hdr.cmd = SOF_IPC_GLB_TEST_MSG | SOF_IPC_TEST_IPC_FLOOD;
+ hdr.size = sizeof(hdr);
+
+ /* set test end time for duration flood test */
+ if (flood_duration_test)
+ test_end = ktime_get_ns() + ipc_duration_ms * NSEC_PER_MSEC;
+
+ /* send test IPC's */
+ while (1) {
+ start = ktime_get();
+ ret = sof_client_ipc_tx_message_no_reply(cdev, &hdr);
+ end = ktime_get();
+
+ if (ret < 0)
+ break;
+
+ /* compute min and max response times */
+ ipc_response_time = ktime_to_ns(ktime_sub(end, start));
+ min_response_time = min(min_response_time, ipc_response_time);
+ max_response_time = max(max_response_time, ipc_response_time);
+
+ /* sum up response times */
+ avg_response_time += ipc_response_time;
+ i++;
+
+ /* test complete? */
+ if (flood_duration_test) {
+ if (ktime_to_ns(end) >= test_end)
+ break;
+ } else {
+ if (i == ipc_count)
+ break;
+ }
+ }
+
+ if (ret < 0)
+ dev_err(dev, "ipc flood test failed at %d iterations\n", i);
+
+ /* return if the first IPC fails */
+ if (!i)
+ return ret;
+
+ /* compute average response time */
+ do_div(avg_response_time, i);
+
+ /* clear previous test output */
+ memset(priv->buf, 0, IPC_FLOOD_TEST_RESULT_LEN);
+
+ if (!ipc_count) {
+ dev_dbg(dev, "IPC Flood test duration: %lums\n", ipc_duration_ms);
+ snprintf(priv->buf, IPC_FLOOD_TEST_RESULT_LEN,
+ "IPC Flood test duration: %lums\n", ipc_duration_ms);
+ }
+
+ dev_dbg(dev, "IPC Flood count: %d, Avg response time: %lluns\n",
+ i, avg_response_time);
+ dev_dbg(dev, "Max response time: %lluns\n", max_response_time);
+ dev_dbg(dev, "Min response time: %lluns\n", min_response_time);
+
+ /* format output string and save test results */
+ snprintf(priv->buf + strlen(priv->buf),
+ IPC_FLOOD_TEST_RESULT_LEN - strlen(priv->buf),
+ "IPC Flood count: %d\nAvg response time: %lluns\n",
+ i, avg_response_time);
+
+ snprintf(priv->buf + strlen(priv->buf),
+ IPC_FLOOD_TEST_RESULT_LEN - strlen(priv->buf),
+ "Max response time: %lluns\nMin response time: %lluns\n",
+ max_response_time, min_response_time);
+
+ return ret;
+}
+
+/*
+ * Writing to the debugfs entry initiates the IPC flood test based on
+ * the IPC count or the duration specified by the user.
+ */
+static ssize_t sof_ipc_flood_dfs_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct sof_client_dev *cdev = file->private_data;
+ struct device *dev = &cdev->auxdev.dev;
+ unsigned long ipc_duration_ms = 0;
+ bool flood_duration_test = false;
+ unsigned long ipc_count = 0;
+ struct dentry *dentry;
+ int err;
+ size_t size;
+ char *string;
+ int ret;
+
+ string = kzalloc(count + 1, GFP_KERNEL);
+ if (!string)
+ return -ENOMEM;
+
+ size = simple_write_to_buffer(string, count, ppos, buffer, count);
+
+ /*
+ * write op is only supported for ipc_flood_count or
+ * ipc_flood_duration_ms debugfs entries atm.
+ * ipc_flood_count floods the DSP with the number of IPC's specified.
+ * ipc_duration_ms test floods the DSP for the time specified
+ * in the debugfs entry.
+ */
+ dentry = file->f_path.dentry;
+ if (strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_COUNT) &&
+ strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_DURATION)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_DURATION))
+ flood_duration_test = true;
+
+ /* test completion criterion */
+ if (flood_duration_test)
+ ret = kstrtoul(string, 0, &ipc_duration_ms);
+ else
+ ret = kstrtoul(string, 0, &ipc_count);
+ if (ret < 0)
+ goto out;
+
+ /* limit max duration/ipc count for flood test */
+ if (flood_duration_test) {
+ if (!ipc_duration_ms) {
+ ret = size;
+ goto out;
+ }
+
+ /* find the minimum. min() is not used to avoid warnings */
+ if (ipc_duration_ms > MAX_IPC_FLOOD_DURATION_MS)
+ ipc_duration_ms = MAX_IPC_FLOOD_DURATION_MS;
+ } else {
+ if (!ipc_count) {
+ ret = size;
+ goto out;
+ }
+
+ /* find the minimum. min() is not used to avoid warnings */
+ if (ipc_count > MAX_IPC_FLOOD_COUNT)
+ ipc_count = MAX_IPC_FLOOD_COUNT;
+ }
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err_ratelimited(dev, "debugfs write failed to resume %d\n", ret);
+ goto out;
+ }
+
+ /* flood test */
+ ret = sof_debug_ipc_flood_test(cdev, flood_duration_test,
+ ipc_duration_ms, ipc_count);
+
+ pm_runtime_mark_last_busy(dev);
+ err = pm_runtime_put_autosuspend(dev);
+ if (err < 0)
+ dev_err_ratelimited(dev, "debugfs write failed to idle %d\n", err);
+
+ /* return size if test is successful */
+ if (ret >= 0)
+ ret = size;
+out:
+ kfree(string);
+ return ret;
+}
+
+/* return the result of the last IPC flood test */
+static ssize_t sof_ipc_flood_dfs_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct sof_client_dev *cdev = file->private_data;
+ struct sof_ipc_flood_priv *priv = cdev->data;
+ size_t size_ret;
+
+ struct dentry *dentry;
+
+ dentry = file->f_path.dentry;
+ if (!strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_COUNT) ||
+ !strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_DURATION)) {
+ if (*ppos)
+ return 0;
+
+ count = min_t(size_t, count, strlen(priv->buf));
+ size_ret = copy_to_user(buffer, priv->buf, count);
+ if (size_ret)
+ return -EFAULT;
+
+ *ppos += count;
+ return count;
+ }
+ return count;
+}
+
+static int sof_ipc_flood_dfs_release(struct inode *inode, struct file *file)
+{
+ debugfs_file_put(file->f_path.dentry);
+
+ return 0;
+}
+
+static const struct file_operations sof_ipc_flood_fops = {
+ .open = sof_ipc_flood_dfs_open,
+ .read = sof_ipc_flood_dfs_read,
+ .llseek = default_llseek,
+ .write = sof_ipc_flood_dfs_write,
+ .release = sof_ipc_flood_dfs_release,
+
+ .owner = THIS_MODULE,
+};
+
+/*
+ * The IPC test client creates a couple of debugfs entries that will be used
+ * flood tests. Users can write to these entries to execute the IPC flood test
+ * by specifying either the number of IPCs to flood the DSP with or the duration
+ * (in ms) for which the DSP should be flooded with test IPCs. At the
+ * end of each test, the average, min and max response times are reported back.
+ * The results of the last flood test can be accessed by reading the debugfs
+ * entries.
+ */
+static int sof_ipc_flood_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct sof_client_dev *cdev = auxiliary_dev_to_sof_client_dev(auxdev);
+ struct dentry *debugfs_root = sof_client_get_debugfs_root(cdev);
+ struct device *dev = &auxdev->dev;
+ struct sof_ipc_flood_priv *priv;
+
+ /* allocate memory for client data */
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->buf = devm_kmalloc(dev, IPC_FLOOD_TEST_RESULT_LEN, GFP_KERNEL);
+ if (!priv->buf)
+ return -ENOMEM;
+
+ cdev->data = priv;
+
+ /* create debugfs root folder with device name under parent SOF dir */
+ priv->dfs_root = debugfs_create_dir(dev_name(dev), debugfs_root);
+ if (!IS_ERR_OR_NULL(priv->dfs_root)) {
+ /* create read-write ipc_flood_count debugfs entry */
+ debugfs_create_file(DEBUGFS_IPC_FLOOD_COUNT, 0644, priv->dfs_root,
+ cdev, &sof_ipc_flood_fops);
+
+ /* create read-write ipc_flood_duration_ms debugfs entry */
+ debugfs_create_file(DEBUGFS_IPC_FLOOD_DURATION, 0644,
+ priv->dfs_root, cdev, &sof_ipc_flood_fops);
+
+ if (auxdev->id == 0) {
+ /*
+ * Create symlinks for backwards compatibility to the
+ * first IPC flood test instance
+ */
+ char target[100];
+
+ snprintf(target, 100, "%s/" DEBUGFS_IPC_FLOOD_COUNT,
+ dev_name(dev));
+ priv->dfs_link[0] =
+ debugfs_create_symlink(DEBUGFS_IPC_FLOOD_COUNT,
+ debugfs_root, target);
+
+ snprintf(target, 100, "%s/" DEBUGFS_IPC_FLOOD_DURATION,
+ dev_name(dev));
+ priv->dfs_link[1] =
+ debugfs_create_symlink(DEBUGFS_IPC_FLOOD_DURATION,
+ debugfs_root, target);
+ }
+ }
+
+ /* enable runtime PM */
+ pm_runtime_set_autosuspend_delay(dev, SOF_IPC_CLIENT_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_idle(dev);
+
+ return 0;
+}
+
+static void sof_ipc_flood_remove(struct auxiliary_device *auxdev)
+{
+ struct sof_client_dev *cdev = auxiliary_dev_to_sof_client_dev(auxdev);
+ struct sof_ipc_flood_priv *priv = cdev->data;
+
+ pm_runtime_disable(&auxdev->dev);
+
+ if (auxdev->id == 0) {
+ debugfs_remove(priv->dfs_link[0]);
+ debugfs_remove(priv->dfs_link[1]);
+ }
+
+ debugfs_remove_recursive(priv->dfs_root);
+}
+
+static const struct auxiliary_device_id sof_ipc_flood_client_id_table[] = {
+ { .name = "snd_sof.ipc_flood" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, sof_ipc_flood_client_id_table);
+
+/*
+ * No need for driver pm_ops as the generic pm callbacks in the auxiliary bus
+ * type are enough to ensure that the parent SOF device resumes to bring the DSP
+ * back to D0.
+ * Driver name will be set based on KBUILD_MODNAME.
+ */
+static struct auxiliary_driver sof_ipc_flood_client_drv = {
+ .probe = sof_ipc_flood_probe,
+ .remove = sof_ipc_flood_remove,
+
+ .id_table = sof_ipc_flood_client_id_table,
+};
+
+module_auxiliary_driver(sof_ipc_flood_client_drv);
+
+MODULE_DESCRIPTION("SOF IPC Flood Test Client Driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-client-ipc-kernel-injector.c b/sound/soc/sof/sof-client-ipc-kernel-injector.c
new file mode 100644
index 000000000..ad0ed2d57
--- /dev/null
+++ b/sound/soc/sof/sof-client-ipc-kernel-injector.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2023 Google Inc. All rights reserved.
+//
+// Author: Curtis Malainey <cujomalainey@chromium.org>
+//
+
+#include <linux/auxiliary_bus.h>
+#include <linux/debugfs.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <sound/sof/header.h>
+
+#include "sof-client.h"
+
+#define SOF_IPC_CLIENT_SUSPEND_DELAY_MS 3000
+
+struct sof_msg_inject_priv {
+ struct dentry *kernel_dfs_file;
+ size_t max_msg_size;
+
+ void *kernel_buffer;
+};
+
+static int sof_msg_inject_dfs_open(struct inode *inode, struct file *file)
+{
+ int ret = debugfs_file_get(file->f_path.dentry);
+
+ if (unlikely(ret))
+ return ret;
+
+ ret = simple_open(inode, file);
+ if (ret)
+ debugfs_file_put(file->f_path.dentry);
+
+ return ret;
+}
+
+static ssize_t sof_kernel_msg_inject_dfs_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct sof_client_dev *cdev = file->private_data;
+ struct sof_msg_inject_priv *priv = cdev->data;
+ struct sof_ipc_cmd_hdr *hdr = priv->kernel_buffer;
+ struct device *dev = &cdev->auxdev.dev;
+ ssize_t size;
+ int ret;
+
+ if (*ppos)
+ return 0;
+
+ size = simple_write_to_buffer(priv->kernel_buffer, priv->max_msg_size,
+ ppos, buffer, count);
+ if (size < 0)
+ return size;
+ if (size != count)
+ return -EFAULT;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err_ratelimited(dev, "debugfs write failed to resume %d\n", ret);
+ return ret;
+ }
+
+ sof_client_ipc_rx_message(cdev, hdr, priv->kernel_buffer);
+
+ pm_runtime_mark_last_busy(dev);
+ ret = pm_runtime_put_autosuspend(dev);
+ if (ret < 0)
+ dev_err_ratelimited(dev, "debugfs write failed to idle %d\n", ret);
+
+ return count;
+};
+
+static int sof_msg_inject_dfs_release(struct inode *inode, struct file *file)
+{
+ debugfs_file_put(file->f_path.dentry);
+
+ return 0;
+}
+
+static const struct file_operations sof_kernel_msg_inject_fops = {
+ .open = sof_msg_inject_dfs_open,
+ .write = sof_kernel_msg_inject_dfs_write,
+ .release = sof_msg_inject_dfs_release,
+
+ .owner = THIS_MODULE,
+};
+
+static int sof_msg_inject_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct sof_client_dev *cdev = auxiliary_dev_to_sof_client_dev(auxdev);
+ struct dentry *debugfs_root = sof_client_get_debugfs_root(cdev);
+ struct device *dev = &auxdev->dev;
+ struct sof_msg_inject_priv *priv;
+ size_t alloc_size;
+
+ /* allocate memory for client data */
+ priv = devm_kzalloc(&auxdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->max_msg_size = sof_client_get_ipc_max_payload_size(cdev);
+ alloc_size = priv->max_msg_size;
+ priv->kernel_buffer = devm_kmalloc(dev, alloc_size, GFP_KERNEL);
+
+ if (!priv->kernel_buffer)
+ return -ENOMEM;
+
+ cdev->data = priv;
+
+ priv->kernel_dfs_file = debugfs_create_file("kernel_ipc_msg_inject", 0644,
+ debugfs_root, cdev,
+ &sof_kernel_msg_inject_fops);
+
+ /* enable runtime PM */
+ pm_runtime_set_autosuspend_delay(dev, SOF_IPC_CLIENT_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_idle(dev);
+
+ return 0;
+}
+
+static void sof_msg_inject_remove(struct auxiliary_device *auxdev)
+{
+ struct sof_client_dev *cdev = auxiliary_dev_to_sof_client_dev(auxdev);
+ struct sof_msg_inject_priv *priv = cdev->data;
+
+ pm_runtime_disable(&auxdev->dev);
+
+ debugfs_remove(priv->kernel_dfs_file);
+}
+
+static const struct auxiliary_device_id sof_msg_inject_client_id_table[] = {
+ { .name = "snd_sof.kernel_injector" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, sof_msg_inject_client_id_table);
+
+/*
+ * No need for driver pm_ops as the generic pm callbacks in the auxiliary bus
+ * type are enough to ensure that the parent SOF device resumes to bring the DSP
+ * back to D0.
+ * Driver name will be set based on KBUILD_MODNAME.
+ */
+static struct auxiliary_driver sof_msg_inject_client_drv = {
+ .probe = sof_msg_inject_probe,
+ .remove = sof_msg_inject_remove,
+
+ .id_table = sof_msg_inject_client_id_table,
+};
+
+module_auxiliary_driver(sof_msg_inject_client_drv);
+
+MODULE_DESCRIPTION("SOF IPC Kernel Injector Client Driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-client-ipc-msg-injector.c b/sound/soc/sof/sof-client-ipc-msg-injector.c
new file mode 100644
index 000000000..752d53206
--- /dev/null
+++ b/sound/soc/sof/sof-client-ipc-msg-injector.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+// Author: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+//
+
+#include <linux/auxiliary_bus.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <sound/sof/header.h>
+#include <sound/sof/ipc4/header.h>
+
+#include "sof-client.h"
+
+#define SOF_IPC_CLIENT_SUSPEND_DELAY_MS 3000
+
+struct sof_msg_inject_priv {
+ struct dentry *dfs_file;
+ size_t max_msg_size;
+ enum sof_ipc_type ipc_type;
+
+ void *tx_buffer;
+ void *rx_buffer;
+};
+
+static int sof_msg_inject_dfs_open(struct inode *inode, struct file *file)
+{
+ struct sof_client_dev *cdev = inode->i_private;
+ int ret;
+
+ if (sof_client_get_fw_state(cdev) == SOF_FW_CRASHED)
+ return -ENODEV;
+
+ ret = debugfs_file_get(file->f_path.dentry);
+ if (unlikely(ret))
+ return ret;
+
+ ret = simple_open(inode, file);
+ if (ret)
+ debugfs_file_put(file->f_path.dentry);
+
+ return ret;
+}
+
+static ssize_t sof_msg_inject_dfs_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct sof_client_dev *cdev = file->private_data;
+ struct sof_msg_inject_priv *priv = cdev->data;
+ struct sof_ipc_reply *rhdr = priv->rx_buffer;
+
+ if (!rhdr->hdr.size || !count || *ppos)
+ return 0;
+
+ if (count > rhdr->hdr.size)
+ count = rhdr->hdr.size;
+
+ if (copy_to_user(buffer, priv->rx_buffer, count))
+ return -EFAULT;
+
+ *ppos += count;
+ return count;
+}
+
+static ssize_t sof_msg_inject_ipc4_dfs_read(struct file *file,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct sof_client_dev *cdev = file->private_data;
+ struct sof_msg_inject_priv *priv = cdev->data;
+ struct sof_ipc4_msg *ipc4_msg = priv->rx_buffer;
+ size_t header_size = sizeof(ipc4_msg->header_u64);
+ size_t remaining;
+
+ if (!ipc4_msg->header_u64 || !count || *ppos)
+ return 0;
+
+ /* we need space for the header at minimum (u64) */
+ if (count < header_size)
+ return -ENOSPC;
+
+ remaining = header_size;
+
+ /* Only get large config have payload */
+ if (SOF_IPC4_MSG_IS_MODULE_MSG(ipc4_msg->primary) &&
+ (SOF_IPC4_MSG_TYPE_GET(ipc4_msg->primary) == SOF_IPC4_MOD_LARGE_CONFIG_GET))
+ remaining += ipc4_msg->data_size;
+
+ if (count > remaining)
+ count = remaining;
+ else if (count < remaining)
+ remaining = count;
+
+ /* copy the header first */
+ if (copy_to_user(buffer, &ipc4_msg->header_u64, header_size))
+ return -EFAULT;
+
+ *ppos += header_size;
+ remaining -= header_size;
+
+ if (!remaining)
+ return count;
+
+ if (remaining > ipc4_msg->data_size)
+ remaining = ipc4_msg->data_size;
+
+ /* Copy the payload */
+ if (copy_to_user(buffer + *ppos, ipc4_msg->data_ptr, remaining))
+ return -EFAULT;
+
+ *ppos += remaining;
+ return count;
+}
+
+static int sof_msg_inject_send_message(struct sof_client_dev *cdev)
+{
+ struct sof_msg_inject_priv *priv = cdev->data;
+ struct device *dev = &cdev->auxdev.dev;
+ int ret, err;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err_ratelimited(dev, "debugfs write failed to resume %d\n", ret);
+ return ret;
+ }
+
+ /* send the message */
+ ret = sof_client_ipc_tx_message(cdev, priv->tx_buffer, priv->rx_buffer,
+ priv->max_msg_size);
+ if (ret)
+ dev_err(dev, "IPC message send failed: %d\n", ret);
+
+ pm_runtime_mark_last_busy(dev);
+ err = pm_runtime_put_autosuspend(dev);
+ if (err < 0)
+ dev_err_ratelimited(dev, "debugfs write failed to idle %d\n", err);
+
+ return ret;
+}
+
+static ssize_t sof_msg_inject_dfs_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct sof_client_dev *cdev = file->private_data;
+ struct sof_msg_inject_priv *priv = cdev->data;
+ ssize_t size;
+ int ret;
+
+ if (*ppos)
+ return 0;
+
+ size = simple_write_to_buffer(priv->tx_buffer, priv->max_msg_size,
+ ppos, buffer, count);
+ if (size < 0)
+ return size;
+ if (size != count)
+ return -EFAULT;
+
+ memset(priv->rx_buffer, 0, priv->max_msg_size);
+
+ ret = sof_msg_inject_send_message(cdev);
+
+ /* return the error code if test failed */
+ if (ret < 0)
+ size = ret;
+
+ return size;
+};
+
+static ssize_t sof_msg_inject_ipc4_dfs_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct sof_client_dev *cdev = file->private_data;
+ struct sof_msg_inject_priv *priv = cdev->data;
+ struct sof_ipc4_msg *ipc4_msg = priv->tx_buffer;
+ size_t data_size;
+ int ret;
+
+ if (*ppos)
+ return 0;
+
+ if (count < sizeof(ipc4_msg->header_u64))
+ return -EINVAL;
+
+ /* copy the header first */
+ if (copy_from_user(&ipc4_msg->header_u64, buffer,
+ sizeof(ipc4_msg->header_u64)))
+ return -EFAULT;
+
+ data_size = count - sizeof(ipc4_msg->header_u64);
+ if (data_size > priv->max_msg_size)
+ return -EINVAL;
+
+ /* Copy the payload */
+ if (copy_from_user(ipc4_msg->data_ptr,
+ buffer + sizeof(ipc4_msg->header_u64), data_size))
+ return -EFAULT;
+
+ ipc4_msg->data_size = data_size;
+
+ /* Initialize the reply storage */
+ ipc4_msg = priv->rx_buffer;
+ ipc4_msg->header_u64 = 0;
+ ipc4_msg->data_size = priv->max_msg_size;
+ memset(ipc4_msg->data_ptr, 0, priv->max_msg_size);
+
+ ret = sof_msg_inject_send_message(cdev);
+
+ /* return the error code if test failed */
+ if (ret < 0)
+ return ret;
+
+ return count;
+};
+
+static int sof_msg_inject_dfs_release(struct inode *inode, struct file *file)
+{
+ debugfs_file_put(file->f_path.dentry);
+
+ return 0;
+}
+
+static const struct file_operations sof_msg_inject_fops = {
+ .open = sof_msg_inject_dfs_open,
+ .read = sof_msg_inject_dfs_read,
+ .write = sof_msg_inject_dfs_write,
+ .llseek = default_llseek,
+ .release = sof_msg_inject_dfs_release,
+
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations sof_msg_inject_ipc4_fops = {
+ .open = sof_msg_inject_dfs_open,
+ .read = sof_msg_inject_ipc4_dfs_read,
+ .write = sof_msg_inject_ipc4_dfs_write,
+ .llseek = default_llseek,
+ .release = sof_msg_inject_dfs_release,
+
+ .owner = THIS_MODULE,
+};
+
+static int sof_msg_inject_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct sof_client_dev *cdev = auxiliary_dev_to_sof_client_dev(auxdev);
+ struct dentry *debugfs_root = sof_client_get_debugfs_root(cdev);
+ static const struct file_operations *fops;
+ struct device *dev = &auxdev->dev;
+ struct sof_msg_inject_priv *priv;
+ size_t alloc_size;
+
+ /* allocate memory for client data */
+ priv = devm_kzalloc(&auxdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ipc_type = sof_client_get_ipc_type(cdev);
+ priv->max_msg_size = sof_client_get_ipc_max_payload_size(cdev);
+ alloc_size = priv->max_msg_size;
+
+ if (priv->ipc_type == SOF_INTEL_IPC4)
+ alloc_size += sizeof(struct sof_ipc4_msg);
+
+ priv->tx_buffer = devm_kmalloc(dev, alloc_size, GFP_KERNEL);
+ priv->rx_buffer = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+ if (!priv->tx_buffer || !priv->rx_buffer)
+ return -ENOMEM;
+
+ if (priv->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_msg *ipc4_msg;
+
+ ipc4_msg = priv->tx_buffer;
+ ipc4_msg->data_ptr = priv->tx_buffer + sizeof(struct sof_ipc4_msg);
+
+ ipc4_msg = priv->rx_buffer;
+ ipc4_msg->data_ptr = priv->rx_buffer + sizeof(struct sof_ipc4_msg);
+
+ fops = &sof_msg_inject_ipc4_fops;
+ } else {
+ fops = &sof_msg_inject_fops;
+ }
+
+ cdev->data = priv;
+
+ priv->dfs_file = debugfs_create_file("ipc_msg_inject", 0644, debugfs_root,
+ cdev, fops);
+
+ /* enable runtime PM */
+ pm_runtime_set_autosuspend_delay(dev, SOF_IPC_CLIENT_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_idle(dev);
+
+ return 0;
+}
+
+static void sof_msg_inject_remove(struct auxiliary_device *auxdev)
+{
+ struct sof_client_dev *cdev = auxiliary_dev_to_sof_client_dev(auxdev);
+ struct sof_msg_inject_priv *priv = cdev->data;
+
+ pm_runtime_disable(&auxdev->dev);
+
+ debugfs_remove(priv->dfs_file);
+}
+
+static const struct auxiliary_device_id sof_msg_inject_client_id_table[] = {
+ { .name = "snd_sof.msg_injector" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, sof_msg_inject_client_id_table);
+
+/*
+ * No need for driver pm_ops as the generic pm callbacks in the auxiliary bus
+ * type are enough to ensure that the parent SOF device resumes to bring the DSP
+ * back to D0.
+ * Driver name will be set based on KBUILD_MODNAME.
+ */
+static struct auxiliary_driver sof_msg_inject_client_drv = {
+ .probe = sof_msg_inject_probe,
+ .remove = sof_msg_inject_remove,
+
+ .id_table = sof_msg_inject_client_id_table,
+};
+
+module_auxiliary_driver(sof_msg_inject_client_drv);
+
+MODULE_DESCRIPTION("SOF IPC Message Injector Client Driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-client-probes-ipc3.c b/sound/soc/sof/sof-client-probes-ipc3.c
new file mode 100644
index 000000000..5e8eb1958
--- /dev/null
+++ b/sound/soc/sof/sof-client-probes-ipc3.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2019-2022 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+// Code moved to this file by:
+// Jyri Sarha <jyri.sarha@intel.com>
+//
+
+#include <linux/stddef.h>
+#include <sound/soc.h>
+#include <sound/sof/header.h>
+#include "sof-client.h"
+#include "sof-client-probes.h"
+
+struct sof_probe_dma {
+ unsigned int stream_tag;
+ unsigned int dma_buffer_size;
+} __packed;
+
+struct sof_ipc_probe_dma_add_params {
+ struct sof_ipc_cmd_hdr hdr;
+ unsigned int num_elems;
+ struct sof_probe_dma dma[];
+} __packed;
+
+struct sof_ipc_probe_info_params {
+ struct sof_ipc_reply rhdr;
+ unsigned int num_elems;
+ union {
+ DECLARE_FLEX_ARRAY(struct sof_probe_dma, dma);
+ DECLARE_FLEX_ARRAY(struct sof_probe_point_desc, desc);
+ };
+} __packed;
+
+struct sof_ipc_probe_point_add_params {
+ struct sof_ipc_cmd_hdr hdr;
+ unsigned int num_elems;
+ struct sof_probe_point_desc desc[];
+} __packed;
+
+struct sof_ipc_probe_point_remove_params {
+ struct sof_ipc_cmd_hdr hdr;
+ unsigned int num_elems;
+ unsigned int buffer_id[];
+} __packed;
+
+/**
+ * ipc3_probes_init - initialize data probing
+ * @cdev: SOF client device
+ * @stream_tag: Extractor stream tag
+ * @buffer_size: DMA buffer size to set for extractor
+ *
+ * Host chooses whether extraction is supported or not by providing
+ * valid stream tag to DSP. Once specified, stream described by that
+ * tag will be tied to DSP for extraction for the entire lifetime of
+ * probe.
+ *
+ * Probing is initialized only once and each INIT request must be
+ * matched by DEINIT call.
+ */
+static int ipc3_probes_init(struct sof_client_dev *cdev, u32 stream_tag,
+ size_t buffer_size)
+{
+ struct sof_ipc_probe_dma_add_params *msg;
+ size_t size = struct_size(msg, dma, 1);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_INIT;
+ msg->num_elems = 1;
+ msg->dma[0].stream_tag = stream_tag;
+ msg->dma[0].dma_buffer_size = buffer_size;
+
+ ret = sof_client_ipc_tx_message_no_reply(cdev, msg);
+ kfree(msg);
+ return ret;
+}
+
+/**
+ * ipc3_probes_deinit - cleanup after data probing
+ * @cdev: SOF client device
+ *
+ * Host sends DEINIT request to free previously initialized probe
+ * on DSP side once it is no longer needed. DEINIT only when there
+ * are no probes connected and with all injectors detached.
+ */
+static int ipc3_probes_deinit(struct sof_client_dev *cdev)
+{
+ struct sof_ipc_cmd_hdr msg;
+
+ msg.size = sizeof(msg);
+ msg.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_DEINIT;
+
+ return sof_client_ipc_tx_message_no_reply(cdev, &msg);
+}
+
+static int ipc3_probes_info(struct sof_client_dev *cdev, unsigned int cmd,
+ void **params, size_t *num_params)
+{
+ size_t max_msg_size = sof_client_get_ipc_max_payload_size(cdev);
+ struct sof_ipc_probe_info_params msg = {{{0}}};
+ struct sof_ipc_probe_info_params *reply;
+ size_t bytes;
+ int ret;
+
+ *params = NULL;
+ *num_params = 0;
+
+ reply = kzalloc(max_msg_size, GFP_KERNEL);
+ if (!reply)
+ return -ENOMEM;
+ msg.rhdr.hdr.size = sizeof(msg);
+ msg.rhdr.hdr.cmd = SOF_IPC_GLB_PROBE | cmd;
+
+ ret = sof_client_ipc_tx_message(cdev, &msg, reply, max_msg_size);
+ if (ret < 0 || reply->rhdr.error < 0)
+ goto exit;
+
+ if (!reply->num_elems)
+ goto exit;
+
+ if (cmd == SOF_IPC_PROBE_DMA_INFO)
+ bytes = sizeof(reply->dma[0]);
+ else
+ bytes = sizeof(reply->desc[0]);
+ bytes *= reply->num_elems;
+ *params = kmemdup(&reply->dma[0], bytes, GFP_KERNEL);
+ if (!*params) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ *num_params = reply->num_elems;
+
+exit:
+ kfree(reply);
+ return ret;
+}
+
+/**
+ * ipc3_probes_points_info - retrieve list of active probe points
+ * @cdev: SOF client device
+ * @desc: Returned list of active probes
+ * @num_desc: Returned count of active probes
+ *
+ * Host sends PROBE_POINT_INFO request to obtain list of active probe
+ * points, valid for disconnection when given probe is no longer
+ * required.
+ */
+static int ipc3_probes_points_info(struct sof_client_dev *cdev,
+ struct sof_probe_point_desc **desc,
+ size_t *num_desc)
+{
+ return ipc3_probes_info(cdev, SOF_IPC_PROBE_POINT_INFO,
+ (void **)desc, num_desc);
+}
+
+/**
+ * ipc3_probes_points_add - connect specified probes
+ * @cdev: SOF client device
+ * @desc: List of probe points to connect
+ * @num_desc: Number of elements in @desc
+ *
+ * Dynamically connects to provided set of endpoints. Immediately
+ * after connection is established, host must be prepared to
+ * transfer data from or to target stream given the probing purpose.
+ *
+ * Each probe point should be removed using PROBE_POINT_REMOVE
+ * request when no longer needed.
+ */
+static int ipc3_probes_points_add(struct sof_client_dev *cdev,
+ struct sof_probe_point_desc *desc,
+ size_t num_desc)
+{
+ struct sof_ipc_probe_point_add_params *msg;
+ size_t size = struct_size(msg, desc, num_desc);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->num_elems = num_desc;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_POINT_ADD;
+ memcpy(&msg->desc[0], desc, size - sizeof(*msg));
+
+ ret = sof_client_ipc_tx_message_no_reply(cdev, msg);
+ kfree(msg);
+ return ret;
+}
+
+/**
+ * ipc3_probes_points_remove - disconnect specified probes
+ * @cdev: SOF client device
+ * @buffer_id: List of probe points to disconnect
+ * @num_buffer_id: Number of elements in @desc
+ *
+ * Removes previously connected probes from list of active probe
+ * points and frees all resources on DSP side.
+ */
+static int ipc3_probes_points_remove(struct sof_client_dev *cdev,
+ unsigned int *buffer_id,
+ size_t num_buffer_id)
+{
+ struct sof_ipc_probe_point_remove_params *msg;
+ size_t size = struct_size(msg, buffer_id, num_buffer_id);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->num_elems = num_buffer_id;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_POINT_REMOVE;
+ memcpy(&msg->buffer_id[0], buffer_id, size - sizeof(*msg));
+
+ ret = sof_client_ipc_tx_message_no_reply(cdev, msg);
+ kfree(msg);
+ return ret;
+}
+
+const struct sof_probes_ipc_ops ipc3_probe_ops = {
+ .init = ipc3_probes_init,
+ .deinit = ipc3_probes_deinit,
+ .points_info = ipc3_probes_points_info,
+ .points_add = ipc3_probes_points_add,
+ .points_remove = ipc3_probes_points_remove,
+};
diff --git a/sound/soc/sof/sof-client-probes-ipc4.c b/sound/soc/sof/sof-client-probes-ipc4.c
new file mode 100644
index 000000000..c56a85854
--- /dev/null
+++ b/sound/soc/sof/sof-client-probes-ipc4.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2019-2022 Intel Corporation. All rights reserved.
+//
+// Author: Jyri Sarha <jyri.sarha@intel.com>
+//
+
+#include <sound/soc.h>
+#include <sound/sof/ipc4/header.h>
+#include <uapi/sound/sof/header.h>
+#include "sof-priv.h"
+#include "ipc4-priv.h"
+#include "sof-client.h"
+#include "sof-client-probes.h"
+
+enum sof_ipc4_dma_type {
+ SOF_IPC4_DMA_HDA_HOST_OUTPUT = 0,
+ SOF_IPC4_DMA_HDA_HOST_INPUT = 1,
+ SOF_IPC4_DMA_HDA_LINK_OUTPUT = 8,
+ SOF_IPC4_DMA_HDA_LINK_INPUT = 9,
+ SOF_IPC4_DMA_DMIC_LINK_INPUT = 11,
+ SOF_IPC4_DMA_I2S_LINK_OUTPUT = 12,
+ SOF_IPC4_DMA_I2S_LINK_INPUT = 13,
+};
+
+enum sof_ipc4_probe_runtime_param {
+ SOF_IPC4_PROBE_INJECTION_DMA = 1,
+ SOF_IPC4_PROBE_INJECTION_DMA_DETACH,
+ SOF_IPC4_PROBE_POINTS,
+ SOF_IPC4_PROBE_POINTS_DISCONNECT,
+};
+
+struct sof_ipc4_probe_gtw_cfg {
+ u32 node_id;
+ u32 dma_buffer_size;
+} __packed __aligned(4);
+
+#define SOF_IPC4_PROBE_NODE_ID_INDEX(x) ((x) & GENMASK(7, 0))
+#define SOF_IPC4_PROBE_NODE_ID_TYPE(x) (((x) << 8) & GENMASK(12, 8))
+
+struct sof_ipc4_probe_cfg {
+ struct sof_ipc4_base_module_cfg base;
+ struct sof_ipc4_probe_gtw_cfg gtw_cfg;
+} __packed __aligned(4);
+
+enum sof_ipc4_probe_type {
+ SOF_IPC4_PROBE_TYPE_INPUT = 0,
+ SOF_IPC4_PROBE_TYPE_OUTPUT,
+ SOF_IPC4_PROBE_TYPE_INTERNAL
+};
+
+struct sof_ipc4_probe_point {
+ u32 point_id;
+ u32 purpose;
+ u32 stream_tag;
+} __packed __aligned(4);
+
+#define INVALID_PIPELINE_ID 0xFF
+
+/**
+ * sof_ipc4_probe_get_module_info - Get IPC4 module info for probe module
+ * @cdev: SOF client device
+ * @return: Pointer to IPC4 probe module info
+ *
+ * Look up the IPC4 probe module info based on the hard coded uuid and
+ * store the value for the future calls.
+ */
+static struct sof_man4_module *sof_ipc4_probe_get_module_info(struct sof_client_dev *cdev)
+{
+ struct sof_probes_priv *priv = cdev->data;
+ struct device *dev = &cdev->auxdev.dev;
+ static const guid_t probe_uuid =
+ GUID_INIT(0x7CAD0808, 0xAB10, 0xCD23,
+ 0xEF, 0x45, 0x12, 0xAB, 0x34, 0xCD, 0x56, 0xEF);
+
+ if (!priv->ipc_priv) {
+ struct sof_ipc4_fw_module *fw_module =
+ sof_client_ipc4_find_module(cdev, &probe_uuid);
+
+ if (!fw_module) {
+ dev_err(dev, "%s: no matching uuid found", __func__);
+ return NULL;
+ }
+
+ priv->ipc_priv = &fw_module->man4_module_entry;
+ }
+
+ return (struct sof_man4_module *)priv->ipc_priv;
+}
+
+/**
+ * ipc4_probes_init - initialize data probing
+ * @cdev: SOF client device
+ * @stream_tag: Extractor stream tag
+ * @buffer_size: DMA buffer size to set for extractor
+ * @return: 0 on success, negative error code on error
+ *
+ * Host chooses whether extraction is supported or not by providing
+ * valid stream tag to DSP. Once specified, stream described by that
+ * tag will be tied to DSP for extraction for the entire lifetime of
+ * probe.
+ *
+ * Probing is initialized only once and each INIT request must be
+ * matched by DEINIT call.
+ */
+static int ipc4_probes_init(struct sof_client_dev *cdev, u32 stream_tag,
+ size_t buffer_size)
+{
+ struct sof_man4_module *mentry = sof_ipc4_probe_get_module_info(cdev);
+ struct sof_ipc4_msg msg;
+ struct sof_ipc4_probe_cfg cfg;
+
+ if (!mentry)
+ return -ENODEV;
+
+ memset(&cfg, '\0', sizeof(cfg));
+ cfg.gtw_cfg.node_id = SOF_IPC4_PROBE_NODE_ID_INDEX(stream_tag - 1) |
+ SOF_IPC4_PROBE_NODE_ID_TYPE(SOF_IPC4_DMA_HDA_HOST_INPUT);
+
+ cfg.gtw_cfg.dma_buffer_size = buffer_size;
+
+ msg.primary = mentry->id;
+ msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_INIT_INSTANCE);
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+ msg.extension = SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE(INVALID_PIPELINE_ID);
+ msg.extension |= SOF_IPC4_MOD_EXT_CORE_ID(0);
+
+ msg.data_size = sizeof(cfg);
+ msg.data_ptr = &cfg;
+
+ return sof_client_ipc_tx_message_no_reply(cdev, &msg);
+}
+
+/**
+ * ipc4_probes_deinit - cleanup after data probing
+ * @cdev: SOF client device
+ * @return: 0 on success, negative error code on error
+ *
+ * Host sends DEINIT request to free previously initialized probe
+ * on DSP side once it is no longer needed. DEINIT only when there
+ * are no probes connected and with all injectors detached.
+ */
+static int ipc4_probes_deinit(struct sof_client_dev *cdev)
+{
+ struct sof_man4_module *mentry = sof_ipc4_probe_get_module_info(cdev);
+ struct sof_ipc4_msg msg;
+
+ if (!mentry)
+ return -ENODEV;
+
+ msg.primary = mentry->id;
+ msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_DELETE_INSTANCE);
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+ msg.extension = SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE(INVALID_PIPELINE_ID);
+ msg.extension |= SOF_IPC4_MOD_EXT_CORE_ID(0);
+
+ msg.data_size = 0;
+ msg.data_ptr = NULL;
+
+ return sof_client_ipc_tx_message_no_reply(cdev, &msg);
+}
+
+/**
+ * ipc4_probes_points_info - retrieve list of active probe points
+ * @cdev: SOF client device
+ * @desc: Returned list of active probes
+ * @num_desc: Returned count of active probes
+ * @return: 0 on success, negative error code on error
+ *
+ * Dummy implementation returning empty list of probes.
+ */
+static int ipc4_probes_points_info(struct sof_client_dev *cdev,
+ struct sof_probe_point_desc **desc,
+ size_t *num_desc)
+{
+ /* TODO: Firmware side implementation needed first */
+ *desc = NULL;
+ *num_desc = 0;
+ return 0;
+}
+
+/**
+ * ipc4_probes_points_add - connect specified probes
+ * @cdev: SOF client device
+ * @desc: List of probe points to connect
+ * @num_desc: Number of elements in @desc
+ * @return: 0 on success, negative error code on error
+ *
+ * Translates the generic probe point presentation to an IPC4
+ * message to dynamically connect the provided set of endpoints.
+ */
+static int ipc4_probes_points_add(struct sof_client_dev *cdev,
+ struct sof_probe_point_desc *desc,
+ size_t num_desc)
+{
+ struct sof_man4_module *mentry = sof_ipc4_probe_get_module_info(cdev);
+ struct sof_ipc4_probe_point *points;
+ struct sof_ipc4_msg msg;
+ int i, ret;
+
+ if (!mentry)
+ return -ENODEV;
+
+ /* The sof_probe_point_desc and sof_ipc4_probe_point structs
+ * are of same size and even the integers are the same in the
+ * same order, and similar meaning, but since there is no
+ * performance issue I wrote the conversion explicitly open for
+ * future development.
+ */
+ points = kcalloc(num_desc, sizeof(*points), GFP_KERNEL);
+ if (!points)
+ return -ENOMEM;
+
+ for (i = 0; i < num_desc; i++) {
+ points[i].point_id = desc[i].buffer_id;
+ points[i].purpose = desc[i].purpose;
+ points[i].stream_tag = desc[i].stream_tag;
+ }
+
+ msg.primary = mentry->id;
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+
+ msg.extension = SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_PROBE_POINTS);
+
+ msg.data_size = sizeof(*points) * num_desc;
+ msg.data_ptr = points;
+
+ ret = sof_client_ipc_set_get_data(cdev, &msg, true);
+
+ kfree(points);
+
+ return ret;
+}
+
+/**
+ * ipc4_probes_points_remove - disconnect specified probes
+ * @cdev: SOF client device
+ * @buffer_id: List of probe points to disconnect
+ * @num_buffer_id: Number of elements in @desc
+ * @return: 0 on success, negative error code on error
+ *
+ * Converts the generic buffer_id to IPC4 probe_point_id and remove
+ * the probe points with an IPC4 for message.
+ */
+static int ipc4_probes_points_remove(struct sof_client_dev *cdev,
+ unsigned int *buffer_id, size_t num_buffer_id)
+{
+ struct sof_man4_module *mentry = sof_ipc4_probe_get_module_info(cdev);
+ struct sof_ipc4_msg msg;
+ u32 *probe_point_ids;
+ int i, ret;
+
+ if (!mentry)
+ return -ENODEV;
+
+ probe_point_ids = kcalloc(num_buffer_id, sizeof(*probe_point_ids),
+ GFP_KERNEL);
+ if (!probe_point_ids)
+ return -ENOMEM;
+
+ for (i = 0; i < num_buffer_id; i++)
+ probe_point_ids[i] = buffer_id[i];
+
+ msg.primary = mentry->id;
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+
+ msg.extension =
+ SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_PROBE_POINTS_DISCONNECT);
+
+ msg.data_size = num_buffer_id * sizeof(*probe_point_ids);
+ msg.data_ptr = probe_point_ids;
+
+ ret = sof_client_ipc_set_get_data(cdev, &msg, true);
+
+ kfree(probe_point_ids);
+
+ return ret;
+}
+
+const struct sof_probes_ipc_ops ipc4_probe_ops = {
+ .init = ipc4_probes_init,
+ .deinit = ipc4_probes_deinit,
+ .points_info = ipc4_probes_points_info,
+ .points_add = ipc4_probes_points_add,
+ .points_remove = ipc4_probes_points_remove,
+};
diff --git a/sound/soc/sof/sof-client-probes.c b/sound/soc/sof/sof-client-probes.c
new file mode 100644
index 000000000..740b63782
--- /dev/null
+++ b/sound/soc/sof/sof-client-probes.c
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2019-2022 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+// SOF client support:
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+//
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/string_helpers.h>
+#include <linux/stddef.h>
+
+#include <sound/soc.h>
+#include <sound/sof/header.h>
+#include "sof-client.h"
+#include "sof-client-probes.h"
+
+#define SOF_PROBES_SUSPEND_DELAY_MS 3000
+/* only extraction supported for now */
+#define SOF_PROBES_NUM_DAI_LINKS 1
+
+#define SOF_PROBES_INVALID_NODE_ID UINT_MAX
+
+static bool __read_mostly sof_probes_enabled;
+module_param_named(enable, sof_probes_enabled, bool, 0444);
+MODULE_PARM_DESC(enable, "Enable SOF probes support");
+
+static int sof_probes_compr_startup(struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_card *card = snd_soc_component_get_drvdata(dai->component);
+ struct sof_client_dev *cdev = snd_soc_card_get_drvdata(card);
+ struct sof_probes_priv *priv = cdev->data;
+ const struct sof_probes_host_ops *ops = priv->host_ops;
+ int ret;
+
+ if (sof_client_get_fw_state(cdev) == SOF_FW_CRASHED)
+ return -ENODEV;
+
+ ret = sof_client_core_module_get(cdev);
+ if (ret)
+ return ret;
+
+ ret = ops->startup(cdev, cstream, dai, &priv->extractor_stream_tag);
+ if (ret) {
+ dev_err(dai->dev, "Failed to startup probe stream: %d\n", ret);
+ priv->extractor_stream_tag = SOF_PROBES_INVALID_NODE_ID;
+ sof_client_core_module_put(cdev);
+ }
+
+ return ret;
+}
+
+static int sof_probes_compr_shutdown(struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_card *card = snd_soc_component_get_drvdata(dai->component);
+ struct sof_client_dev *cdev = snd_soc_card_get_drvdata(card);
+ struct sof_probes_priv *priv = cdev->data;
+ const struct sof_probes_host_ops *ops = priv->host_ops;
+ const struct sof_probes_ipc_ops *ipc = priv->ipc_ops;
+ struct sof_probe_point_desc *desc;
+ size_t num_desc;
+ int i, ret;
+
+ /* disconnect all probe points */
+ ret = ipc->points_info(cdev, &desc, &num_desc);
+ if (ret < 0) {
+ dev_err(dai->dev, "Failed to get probe points: %d\n", ret);
+ goto exit;
+ }
+
+ for (i = 0; i < num_desc; i++)
+ ipc->points_remove(cdev, &desc[i].buffer_id, 1);
+ kfree(desc);
+
+exit:
+ ret = ipc->deinit(cdev);
+ if (ret < 0)
+ dev_err(dai->dev, "Failed to deinit probe: %d\n", ret);
+
+ priv->extractor_stream_tag = SOF_PROBES_INVALID_NODE_ID;
+ snd_compr_free_pages(cstream);
+
+ ret = ops->shutdown(cdev, cstream, dai);
+
+ sof_client_core_module_put(cdev);
+
+ return ret;
+}
+
+static int sof_probes_compr_set_params(struct snd_compr_stream *cstream,
+ struct snd_compr_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_card *card = snd_soc_component_get_drvdata(dai->component);
+ struct sof_client_dev *cdev = snd_soc_card_get_drvdata(card);
+ struct snd_compr_runtime *rtd = cstream->runtime;
+ struct sof_probes_priv *priv = cdev->data;
+ const struct sof_probes_host_ops *ops = priv->host_ops;
+ const struct sof_probes_ipc_ops *ipc = priv->ipc_ops;
+ int ret;
+
+ cstream->dma_buffer.dev.type = SNDRV_DMA_TYPE_DEV_SG;
+ cstream->dma_buffer.dev.dev = sof_client_get_dma_dev(cdev);
+ ret = snd_compr_malloc_pages(cstream, rtd->buffer_size);
+ if (ret < 0)
+ return ret;
+
+ ret = ops->set_params(cdev, cstream, params, dai);
+ if (ret)
+ return ret;
+
+ ret = ipc->init(cdev, priv->extractor_stream_tag, rtd->dma_bytes);
+ if (ret < 0) {
+ dev_err(dai->dev, "Failed to init probe: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sof_probes_compr_trigger(struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_card *card = snd_soc_component_get_drvdata(dai->component);
+ struct sof_client_dev *cdev = snd_soc_card_get_drvdata(card);
+ struct sof_probes_priv *priv = cdev->data;
+ const struct sof_probes_host_ops *ops = priv->host_ops;
+
+ return ops->trigger(cdev, cstream, cmd, dai);
+}
+
+static int sof_probes_compr_pointer(struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_card *card = snd_soc_component_get_drvdata(dai->component);
+ struct sof_client_dev *cdev = snd_soc_card_get_drvdata(card);
+ struct sof_probes_priv *priv = cdev->data;
+ const struct sof_probes_host_ops *ops = priv->host_ops;
+
+ return ops->pointer(cdev, cstream, tstamp, dai);
+}
+
+static const struct snd_soc_cdai_ops sof_probes_compr_ops = {
+ .startup = sof_probes_compr_startup,
+ .shutdown = sof_probes_compr_shutdown,
+ .set_params = sof_probes_compr_set_params,
+ .trigger = sof_probes_compr_trigger,
+ .pointer = sof_probes_compr_pointer,
+};
+
+static int sof_probes_compr_copy(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ char __user *buf, size_t count)
+{
+ struct snd_compr_runtime *rtd = cstream->runtime;
+ unsigned int offset, n;
+ void *ptr;
+ int ret;
+
+ if (count > rtd->buffer_size)
+ count = rtd->buffer_size;
+
+ div_u64_rem(rtd->total_bytes_transferred, rtd->buffer_size, &offset);
+ ptr = rtd->dma_area + offset;
+ n = rtd->buffer_size - offset;
+
+ if (count < n) {
+ ret = copy_to_user(buf, ptr, count);
+ } else {
+ ret = copy_to_user(buf, ptr, n);
+ ret += copy_to_user(buf + n, rtd->dma_area, count - n);
+ }
+
+ if (ret)
+ return count - ret;
+ return count;
+}
+
+static const struct snd_compress_ops sof_probes_compressed_ops = {
+ .copy = sof_probes_compr_copy,
+};
+
+static ssize_t sof_probes_dfs_points_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ struct sof_client_dev *cdev = file->private_data;
+ struct sof_probes_priv *priv = cdev->data;
+ struct device *dev = &cdev->auxdev.dev;
+ struct sof_probe_point_desc *desc;
+ const struct sof_probes_ipc_ops *ipc = priv->ipc_ops;
+ int remaining, offset;
+ size_t num_desc;
+ char *buf;
+ int i, ret, err;
+
+ if (priv->extractor_stream_tag == SOF_PROBES_INVALID_NODE_ID) {
+ dev_warn(dev, "no extractor stream running\n");
+ return -ENOENT;
+ }
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err_ratelimited(dev, "debugfs read failed to resume %d\n", ret);
+ goto exit;
+ }
+
+ ret = ipc->points_info(cdev, &desc, &num_desc);
+ if (ret < 0)
+ goto pm_error;
+
+ for (i = 0; i < num_desc; i++) {
+ offset = strlen(buf);
+ remaining = PAGE_SIZE - offset;
+ ret = snprintf(buf + offset, remaining,
+ "Id: %#010x Purpose: %u Node id: %#x\n",
+ desc[i].buffer_id, desc[i].purpose, desc[i].stream_tag);
+ if (ret < 0 || ret >= remaining) {
+ /* truncate the output buffer at the last full line */
+ buf[offset] = '\0';
+ break;
+ }
+ }
+
+ ret = simple_read_from_buffer(to, count, ppos, buf, strlen(buf));
+
+ kfree(desc);
+
+pm_error:
+ pm_runtime_mark_last_busy(dev);
+ err = pm_runtime_put_autosuspend(dev);
+ if (err < 0)
+ dev_err_ratelimited(dev, "debugfs read failed to idle %d\n", err);
+
+exit:
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+sof_probes_dfs_points_write(struct file *file, const char __user *from,
+ size_t count, loff_t *ppos)
+{
+ struct sof_client_dev *cdev = file->private_data;
+ struct sof_probes_priv *priv = cdev->data;
+ const struct sof_probes_ipc_ops *ipc = priv->ipc_ops;
+ struct device *dev = &cdev->auxdev.dev;
+ struct sof_probe_point_desc *desc;
+ u32 num_elems, *array;
+ size_t bytes;
+ int ret, err;
+
+ if (priv->extractor_stream_tag == SOF_PROBES_INVALID_NODE_ID) {
+ dev_warn(dev, "no extractor stream running\n");
+ return -ENOENT;
+ }
+
+ ret = parse_int_array_user(from, count, (int **)&array);
+ if (ret < 0)
+ return ret;
+
+ num_elems = *array;
+ bytes = sizeof(*array) * num_elems;
+ if (bytes % sizeof(*desc)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ desc = (struct sof_probe_point_desc *)&array[1];
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err_ratelimited(dev, "debugfs write failed to resume %d\n", ret);
+ goto exit;
+ }
+
+ ret = ipc->points_add(cdev, desc, bytes / sizeof(*desc));
+ if (!ret)
+ ret = count;
+
+ pm_runtime_mark_last_busy(dev);
+ err = pm_runtime_put_autosuspend(dev);
+ if (err < 0)
+ dev_err_ratelimited(dev, "debugfs write failed to idle %d\n", err);
+exit:
+ kfree(array);
+ return ret;
+}
+
+static const struct file_operations sof_probes_points_fops = {
+ .open = simple_open,
+ .read = sof_probes_dfs_points_read,
+ .write = sof_probes_dfs_points_write,
+ .llseek = default_llseek,
+
+ .owner = THIS_MODULE,
+};
+
+static ssize_t
+sof_probes_dfs_points_remove_write(struct file *file, const char __user *from,
+ size_t count, loff_t *ppos)
+{
+ struct sof_client_dev *cdev = file->private_data;
+ struct sof_probes_priv *priv = cdev->data;
+ const struct sof_probes_ipc_ops *ipc = priv->ipc_ops;
+ struct device *dev = &cdev->auxdev.dev;
+ int ret, err;
+ u32 *array;
+
+ if (priv->extractor_stream_tag == SOF_PROBES_INVALID_NODE_ID) {
+ dev_warn(dev, "no extractor stream running\n");
+ return -ENOENT;
+ }
+
+ ret = parse_int_array_user(from, count, (int **)&array);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err_ratelimited(dev, "debugfs write failed to resume %d\n", ret);
+ goto exit;
+ }
+
+ ret = ipc->points_remove(cdev, &array[1], array[0]);
+ if (!ret)
+ ret = count;
+
+ pm_runtime_mark_last_busy(dev);
+ err = pm_runtime_put_autosuspend(dev);
+ if (err < 0)
+ dev_err_ratelimited(dev, "debugfs write failed to idle %d\n", err);
+exit:
+ kfree(array);
+ return ret;
+}
+
+static const struct file_operations sof_probes_points_remove_fops = {
+ .open = simple_open,
+ .write = sof_probes_dfs_points_remove_write,
+ .llseek = default_llseek,
+
+ .owner = THIS_MODULE,
+};
+
+static const struct snd_soc_dai_ops sof_probes_dai_ops = {
+ .compress_new = snd_soc_new_compress,
+};
+
+static struct snd_soc_dai_driver sof_probes_dai_drv[] = {
+{
+ .name = "Probe Extraction CPU DAI",
+ .ops = &sof_probes_dai_ops,
+ .cops = &sof_probes_compr_ops,
+ .capture = {
+ .stream_name = "Probe Extraction",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ },
+},
+};
+
+static const struct snd_soc_component_driver sof_probes_component = {
+ .name = "sof-probes-component",
+ .compress_ops = &sof_probes_compressed_ops,
+ .module_get_upon_open = 1,
+ .legacy_dai_naming = 1,
+};
+
+SND_SOC_DAILINK_DEF(dummy, DAILINK_COMP_ARRAY(COMP_DUMMY()));
+
+static int sof_probes_client_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct sof_client_dev *cdev = auxiliary_dev_to_sof_client_dev(auxdev);
+ struct dentry *dfsroot = sof_client_get_debugfs_root(cdev);
+ struct device *dev = &auxdev->dev;
+ struct snd_soc_dai_link_component platform_component[] = {
+ {
+ .name = dev_name(dev),
+ }
+ };
+ struct snd_soc_card *card;
+ struct sof_probes_priv *priv;
+ struct snd_soc_dai_link_component *cpus;
+ struct sof_probes_host_ops *ops;
+ struct snd_soc_dai_link *links;
+ int ret;
+
+ /* do not set up the probes support if it is not enabled */
+ if (!sof_probes_enabled)
+ return -ENXIO;
+
+ ops = dev_get_platdata(dev);
+ if (!ops) {
+ dev_err(dev, "missing platform data\n");
+ return -ENODEV;
+ }
+ if (!ops->startup || !ops->shutdown || !ops->set_params || !ops->trigger ||
+ !ops->pointer) {
+ dev_err(dev, "missing platform callback(s)\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->host_ops = ops;
+
+ switch (sof_client_get_ipc_type(cdev)) {
+#ifdef CONFIG_SND_SOC_SOF_INTEL_IPC4
+ case SOF_INTEL_IPC4:
+ priv->ipc_ops = &ipc4_probe_ops;
+ break;
+#endif
+#ifdef CONFIG_SND_SOC_SOF_IPC3
+ case SOF_IPC:
+ priv->ipc_ops = &ipc3_probe_ops;
+ break;
+#endif
+ default:
+ dev_err(dev, "Matching IPC ops not found.");
+ return -ENODEV;
+ }
+
+ cdev->data = priv;
+
+ /* register probes component driver and dai */
+ ret = devm_snd_soc_register_component(dev, &sof_probes_component,
+ sof_probes_dai_drv,
+ ARRAY_SIZE(sof_probes_dai_drv));
+ if (ret < 0) {
+ dev_err(dev, "failed to register SOF probes DAI driver %d\n", ret);
+ return ret;
+ }
+
+ /* set client data */
+ priv->extractor_stream_tag = SOF_PROBES_INVALID_NODE_ID;
+
+ /* create read-write probes_points debugfs entry */
+ priv->dfs_points = debugfs_create_file("probe_points", 0644, dfsroot,
+ cdev, &sof_probes_points_fops);
+
+ /* create read-write probe_points_remove debugfs entry */
+ priv->dfs_points_remove = debugfs_create_file("probe_points_remove", 0644,
+ dfsroot, cdev,
+ &sof_probes_points_remove_fops);
+
+ links = devm_kcalloc(dev, SOF_PROBES_NUM_DAI_LINKS, sizeof(*links), GFP_KERNEL);
+ cpus = devm_kcalloc(dev, SOF_PROBES_NUM_DAI_LINKS, sizeof(*cpus), GFP_KERNEL);
+ if (!links || !cpus) {
+ debugfs_remove(priv->dfs_points);
+ debugfs_remove(priv->dfs_points_remove);
+ return -ENOMEM;
+ }
+
+ /* extraction DAI link */
+ links[0].name = "Compress Probe Capture";
+ links[0].id = 0;
+ links[0].cpus = &cpus[0];
+ links[0].num_cpus = 1;
+ links[0].cpus->dai_name = "Probe Extraction CPU DAI";
+ links[0].codecs = dummy;
+ links[0].num_codecs = 1;
+ links[0].platforms = platform_component;
+ links[0].num_platforms = ARRAY_SIZE(platform_component);
+ links[0].nonatomic = 1;
+
+ card = &priv->card;
+
+ card->dev = dev;
+ card->name = "sof-probes";
+ card->owner = THIS_MODULE;
+ card->num_links = SOF_PROBES_NUM_DAI_LINKS;
+ card->dai_link = links;
+
+ /* set idle_bias_off to prevent the core from resuming the card->dev */
+ card->dapm.idle_bias_off = true;
+
+ snd_soc_card_set_drvdata(card, cdev);
+
+ ret = devm_snd_soc_register_card(dev, card);
+ if (ret < 0) {
+ debugfs_remove(priv->dfs_points);
+ debugfs_remove(priv->dfs_points_remove);
+ dev_err(dev, "Probes card register failed %d\n", ret);
+ return ret;
+ }
+
+ /* enable runtime PM */
+ pm_runtime_set_autosuspend_delay(dev, SOF_PROBES_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_idle(dev);
+
+ return 0;
+}
+
+static void sof_probes_client_remove(struct auxiliary_device *auxdev)
+{
+ struct sof_client_dev *cdev = auxiliary_dev_to_sof_client_dev(auxdev);
+ struct sof_probes_priv *priv = cdev->data;
+
+ if (!sof_probes_enabled)
+ return;
+
+ pm_runtime_disable(&auxdev->dev);
+ debugfs_remove(priv->dfs_points);
+ debugfs_remove(priv->dfs_points_remove);
+}
+
+static const struct auxiliary_device_id sof_probes_client_id_table[] = {
+ { .name = "snd_sof.hda-probes", },
+ { .name = "snd_sof.acp-probes", },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, sof_probes_client_id_table);
+
+/* driver name will be set based on KBUILD_MODNAME */
+static struct auxiliary_driver sof_probes_client_drv = {
+ .probe = sof_probes_client_probe,
+ .remove = sof_probes_client_remove,
+
+ .id_table = sof_probes_client_id_table,
+};
+
+module_auxiliary_driver(sof_probes_client_drv);
+
+MODULE_DESCRIPTION("SOF Probes Client Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-client-probes.h b/sound/soc/sof/sof-client-probes.h
new file mode 100644
index 000000000..da04d65b8
--- /dev/null
+++ b/sound/soc/sof/sof-client-probes.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOF_CLIENT_PROBES_H
+#define __SOF_CLIENT_PROBES_H
+
+struct snd_compr_stream;
+struct snd_compr_tstamp;
+struct snd_compr_params;
+struct sof_client_dev;
+struct snd_soc_dai;
+
+/*
+ * Callbacks used on platforms where the control for audio is split between
+ * DSP and host, like HDA.
+ */
+struct sof_probes_host_ops {
+ int (*startup)(struct sof_client_dev *cdev, struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai, u32 *stream_id);
+ int (*shutdown)(struct sof_client_dev *cdev, struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai);
+ int (*set_params)(struct sof_client_dev *cdev, struct snd_compr_stream *cstream,
+ struct snd_compr_params *params,
+ struct snd_soc_dai *dai);
+ int (*trigger)(struct sof_client_dev *cdev, struct snd_compr_stream *cstream,
+ int cmd, struct snd_soc_dai *dai);
+ int (*pointer)(struct sof_client_dev *cdev, struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp,
+ struct snd_soc_dai *dai);
+};
+
+struct sof_probe_point_desc {
+ unsigned int buffer_id;
+ unsigned int purpose;
+ unsigned int stream_tag;
+} __packed;
+
+struct sof_probes_ipc_ops {
+ int (*init)(struct sof_client_dev *cdev, u32 stream_tag,
+ size_t buffer_size);
+ int (*deinit)(struct sof_client_dev *cdev);
+ int (*points_info)(struct sof_client_dev *cdev,
+ struct sof_probe_point_desc **desc,
+ size_t *num_desc);
+ int (*points_add)(struct sof_client_dev *cdev,
+ struct sof_probe_point_desc *desc,
+ size_t num_desc);
+ int (*points_remove)(struct sof_client_dev *cdev,
+ unsigned int *buffer_id, size_t num_buffer_id);
+};
+
+extern const struct sof_probes_ipc_ops ipc3_probe_ops;
+extern const struct sof_probes_ipc_ops ipc4_probe_ops;
+
+struct sof_probes_priv {
+ struct dentry *dfs_points;
+ struct dentry *dfs_points_remove;
+ u32 extractor_stream_tag;
+ struct snd_soc_card card;
+ void *ipc_priv;
+
+ const struct sof_probes_host_ops *host_ops;
+ const struct sof_probes_ipc_ops *ipc_ops;
+};
+
+#endif
diff --git a/sound/soc/sof/sof-client.c b/sound/soc/sof/sof-client.c
new file mode 100644
index 000000000..284de96e7
--- /dev/null
+++ b/sound/soc/sof/sof-client.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+// Authors: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+//
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <sound/sof/ipc4/header.h>
+#include "ops.h"
+#include "sof-client.h"
+#include "sof-priv.h"
+#include "ipc3-priv.h"
+#include "ipc4-priv.h"
+
+/**
+ * struct sof_ipc_event_entry - IPC client event description
+ * @ipc_msg_type: IPC msg type of the event the client is interested
+ * @cdev: sof_client_dev of the requesting client
+ * @callback: Callback function of the client
+ * @list: item in SOF core client event list
+ */
+struct sof_ipc_event_entry {
+ u32 ipc_msg_type;
+ struct sof_client_dev *cdev;
+ sof_client_event_callback callback;
+ struct list_head list;
+};
+
+/**
+ * struct sof_state_event_entry - DSP panic event subscription entry
+ * @cdev: sof_client_dev of the requesting client
+ * @callback: Callback function of the client
+ * @list: item in SOF core client event list
+ */
+struct sof_state_event_entry {
+ struct sof_client_dev *cdev;
+ sof_client_fw_state_callback callback;
+ struct list_head list;
+};
+
+static void sof_client_auxdev_release(struct device *dev)
+{
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+ struct sof_client_dev *cdev = auxiliary_dev_to_sof_client_dev(auxdev);
+
+ kfree(cdev->auxdev.dev.platform_data);
+ kfree(cdev);
+}
+
+static int sof_client_dev_add_data(struct sof_client_dev *cdev, const void *data,
+ size_t size)
+{
+ void *d = NULL;
+
+ if (data) {
+ d = kmemdup(data, size, GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ }
+
+ cdev->auxdev.dev.platform_data = d;
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
+static int sof_register_ipc_flood_test(struct snd_sof_dev *sdev)
+{
+ int ret = 0;
+ int i;
+
+ if (sdev->pdata->ipc_type != SOF_IPC)
+ return 0;
+
+ for (i = 0; i < CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST_NUM; i++) {
+ ret = sof_client_dev_register(sdev, "ipc_flood", i, NULL, 0);
+ if (ret < 0)
+ break;
+ }
+
+ if (ret) {
+ for (; i >= 0; --i)
+ sof_client_dev_unregister(sdev, "ipc_flood", i);
+ }
+
+ return ret;
+}
+
+static void sof_unregister_ipc_flood_test(struct snd_sof_dev *sdev)
+{
+ int i;
+
+ for (i = 0; i < CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST_NUM; i++)
+ sof_client_dev_unregister(sdev, "ipc_flood", i);
+}
+#else
+static inline int sof_register_ipc_flood_test(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline void sof_unregister_ipc_flood_test(struct snd_sof_dev *sdev) {}
+#endif /* CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_MSG_INJECTOR)
+static int sof_register_ipc_msg_injector(struct snd_sof_dev *sdev)
+{
+ return sof_client_dev_register(sdev, "msg_injector", 0, NULL, 0);
+}
+
+static void sof_unregister_ipc_msg_injector(struct snd_sof_dev *sdev)
+{
+ sof_client_dev_unregister(sdev, "msg_injector", 0);
+}
+#else
+static inline int sof_register_ipc_msg_injector(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline void sof_unregister_ipc_msg_injector(struct snd_sof_dev *sdev) {}
+#endif /* CONFIG_SND_SOC_SOF_DEBUG_IPC_MSG_INJECTOR */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_KERNEL_INJECTOR)
+static int sof_register_ipc_kernel_injector(struct snd_sof_dev *sdev)
+{
+ /* Only IPC3 supported right now */
+ if (sdev->pdata->ipc_type != SOF_IPC)
+ return 0;
+
+ return sof_client_dev_register(sdev, "kernel_injector", 0, NULL, 0);
+}
+
+static void sof_unregister_ipc_kernel_injector(struct snd_sof_dev *sdev)
+{
+ sof_client_dev_unregister(sdev, "kernel_injector", 0);
+}
+#else
+static inline int sof_register_ipc_kernel_injector(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline void sof_unregister_ipc_kernel_injector(struct snd_sof_dev *sdev) {}
+#endif /* CONFIG_SND_SOC_SOF_DEBUG_IPC_KERNEL_INJECTOR */
+
+int sof_register_clients(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ if (sdev->dspless_mode_selected)
+ return 0;
+
+ /* Register platform independent client devices */
+ ret = sof_register_ipc_flood_test(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "IPC flood test client registration failed\n");
+ return ret;
+ }
+
+ ret = sof_register_ipc_msg_injector(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "IPC message injector client registration failed\n");
+ goto err_msg_injector;
+ }
+
+ ret = sof_register_ipc_kernel_injector(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "IPC kernel injector client registration failed\n");
+ goto err_kernel_injector;
+ }
+
+ /* Platform depndent client device registration */
+
+ if (sof_ops(sdev) && sof_ops(sdev)->register_ipc_clients)
+ ret = sof_ops(sdev)->register_ipc_clients(sdev);
+
+ if (!ret)
+ return 0;
+
+ sof_unregister_ipc_kernel_injector(sdev);
+
+err_kernel_injector:
+ sof_unregister_ipc_msg_injector(sdev);
+
+err_msg_injector:
+ sof_unregister_ipc_flood_test(sdev);
+
+ return ret;
+}
+
+void sof_unregister_clients(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->unregister_ipc_clients)
+ sof_ops(sdev)->unregister_ipc_clients(sdev);
+
+ sof_unregister_ipc_kernel_injector(sdev);
+ sof_unregister_ipc_msg_injector(sdev);
+ sof_unregister_ipc_flood_test(sdev);
+}
+
+int sof_client_dev_register(struct snd_sof_dev *sdev, const char *name, u32 id,
+ const void *data, size_t size)
+{
+ struct auxiliary_device *auxdev;
+ struct sof_client_dev *cdev;
+ int ret;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ cdev->sdev = sdev;
+ auxdev = &cdev->auxdev;
+ auxdev->name = name;
+ auxdev->dev.parent = sdev->dev;
+ auxdev->dev.release = sof_client_auxdev_release;
+ auxdev->id = id;
+
+ ret = sof_client_dev_add_data(cdev, data, size);
+ if (ret < 0)
+ goto err_dev_add_data;
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to initialize client dev %s.%d\n", name, id);
+ goto err_dev_init;
+ }
+
+ ret = auxiliary_device_add(&cdev->auxdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to add client dev %s.%d\n", name, id);
+ /*
+ * sof_client_auxdev_release() will be invoked to free up memory
+ * allocations through put_device()
+ */
+ auxiliary_device_uninit(&cdev->auxdev);
+ return ret;
+ }
+
+ /* add to list of SOF client devices */
+ mutex_lock(&sdev->ipc_client_mutex);
+ list_add(&cdev->list, &sdev->ipc_client_list);
+ mutex_unlock(&sdev->ipc_client_mutex);
+
+ return 0;
+
+err_dev_init:
+ kfree(cdev->auxdev.dev.platform_data);
+
+err_dev_add_data:
+ kfree(cdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_dev_register, SND_SOC_SOF_CLIENT);
+
+void sof_client_dev_unregister(struct snd_sof_dev *sdev, const char *name, u32 id)
+{
+ struct sof_client_dev *cdev;
+
+ mutex_lock(&sdev->ipc_client_mutex);
+
+ /*
+ * sof_client_auxdev_release() will be invoked to free up memory
+ * allocations through put_device()
+ */
+ list_for_each_entry(cdev, &sdev->ipc_client_list, list) {
+ if (!strcmp(cdev->auxdev.name, name) && cdev->auxdev.id == id) {
+ list_del(&cdev->list);
+ auxiliary_device_delete(&cdev->auxdev);
+ auxiliary_device_uninit(&cdev->auxdev);
+ break;
+ }
+ }
+
+ mutex_unlock(&sdev->ipc_client_mutex);
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_dev_unregister, SND_SOC_SOF_CLIENT);
+
+int sof_client_ipc_tx_message(struct sof_client_dev *cdev, void *ipc_msg,
+ void *reply_data, size_t reply_bytes)
+{
+ if (cdev->sdev->pdata->ipc_type == SOF_IPC) {
+ struct sof_ipc_cmd_hdr *hdr = ipc_msg;
+
+ return sof_ipc_tx_message(cdev->sdev->ipc, ipc_msg, hdr->size,
+ reply_data, reply_bytes);
+ } else if (cdev->sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_msg *msg = ipc_msg;
+
+ return sof_ipc_tx_message(cdev->sdev->ipc, ipc_msg, msg->data_size,
+ reply_data, reply_bytes);
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_ipc_tx_message, SND_SOC_SOF_CLIENT);
+
+int sof_client_ipc_rx_message(struct sof_client_dev *cdev, void *ipc_msg, void *msg_buf)
+{
+ if (cdev->sdev->pdata->ipc_type == SOF_IPC) {
+ struct sof_ipc_cmd_hdr *hdr = ipc_msg;
+
+ if (hdr->size < sizeof(hdr)) {
+ dev_err(cdev->sdev->dev, "The received message size is invalid\n");
+ return -EINVAL;
+ }
+
+ sof_ipc3_do_rx_work(cdev->sdev, ipc_msg, msg_buf);
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_ipc_rx_message, SND_SOC_SOF_CLIENT);
+
+int sof_client_ipc_set_get_data(struct sof_client_dev *cdev, void *ipc_msg,
+ bool set)
+{
+ if (cdev->sdev->pdata->ipc_type == SOF_IPC) {
+ struct sof_ipc_cmd_hdr *hdr = ipc_msg;
+
+ return sof_ipc_set_get_data(cdev->sdev->ipc, ipc_msg, hdr->size,
+ set);
+ } else if (cdev->sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_msg *msg = ipc_msg;
+
+ return sof_ipc_set_get_data(cdev->sdev->ipc, ipc_msg,
+ msg->data_size, set);
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_ipc_set_get_data, SND_SOC_SOF_CLIENT);
+
+#ifdef CONFIG_SND_SOC_SOF_INTEL_IPC4
+struct sof_ipc4_fw_module *sof_client_ipc4_find_module(struct sof_client_dev *c, const guid_t *uuid)
+{
+ struct snd_sof_dev *sdev = c->sdev;
+
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4)
+ return sof_ipc4_find_module_by_uuid(sdev, uuid);
+ dev_err(sdev->dev, "Only supported with IPC4\n");
+
+ return NULL;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_ipc4_find_module, SND_SOC_SOF_CLIENT);
+#endif
+
+int sof_suspend_clients(struct snd_sof_dev *sdev, pm_message_t state)
+{
+ struct auxiliary_driver *adrv;
+ struct sof_client_dev *cdev;
+
+ mutex_lock(&sdev->ipc_client_mutex);
+
+ list_for_each_entry(cdev, &sdev->ipc_client_list, list) {
+ /* Skip devices without loaded driver */
+ if (!cdev->auxdev.dev.driver)
+ continue;
+
+ adrv = to_auxiliary_drv(cdev->auxdev.dev.driver);
+ if (adrv->suspend)
+ adrv->suspend(&cdev->auxdev, state);
+ }
+
+ mutex_unlock(&sdev->ipc_client_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(sof_suspend_clients, SND_SOC_SOF_CLIENT);
+
+int sof_resume_clients(struct snd_sof_dev *sdev)
+{
+ struct auxiliary_driver *adrv;
+ struct sof_client_dev *cdev;
+
+ mutex_lock(&sdev->ipc_client_mutex);
+
+ list_for_each_entry(cdev, &sdev->ipc_client_list, list) {
+ /* Skip devices without loaded driver */
+ if (!cdev->auxdev.dev.driver)
+ continue;
+
+ adrv = to_auxiliary_drv(cdev->auxdev.dev.driver);
+ if (adrv->resume)
+ adrv->resume(&cdev->auxdev);
+ }
+
+ mutex_unlock(&sdev->ipc_client_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(sof_resume_clients, SND_SOC_SOF_CLIENT);
+
+struct dentry *sof_client_get_debugfs_root(struct sof_client_dev *cdev)
+{
+ return cdev->sdev->debugfs_root;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_get_debugfs_root, SND_SOC_SOF_CLIENT);
+
+/* DMA buffer allocation in client drivers must use the core SOF device */
+struct device *sof_client_get_dma_dev(struct sof_client_dev *cdev)
+{
+ return cdev->sdev->dev;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_get_dma_dev, SND_SOC_SOF_CLIENT);
+
+const struct sof_ipc_fw_version *sof_client_get_fw_version(struct sof_client_dev *cdev)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+
+ return &sdev->fw_ready.version;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_get_fw_version, SND_SOC_SOF_CLIENT);
+
+size_t sof_client_get_ipc_max_payload_size(struct sof_client_dev *cdev)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+
+ return sdev->ipc->max_payload_size;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_get_ipc_max_payload_size, SND_SOC_SOF_CLIENT);
+
+enum sof_ipc_type sof_client_get_ipc_type(struct sof_client_dev *cdev)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+
+ return sdev->pdata->ipc_type;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_get_ipc_type, SND_SOC_SOF_CLIENT);
+
+/* module refcount management of SOF core */
+int sof_client_core_module_get(struct sof_client_dev *cdev)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+
+ if (!try_module_get(sdev->dev->driver->owner))
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_core_module_get, SND_SOC_SOF_CLIENT);
+
+void sof_client_core_module_put(struct sof_client_dev *cdev)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+
+ module_put(sdev->dev->driver->owner);
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_core_module_put, SND_SOC_SOF_CLIENT);
+
+/* IPC event handling */
+void sof_client_ipc_rx_dispatcher(struct snd_sof_dev *sdev, void *msg_buf)
+{
+ struct sof_ipc_event_entry *event;
+ u32 msg_type;
+
+ if (sdev->pdata->ipc_type == SOF_IPC) {
+ struct sof_ipc_cmd_hdr *hdr = msg_buf;
+
+ msg_type = hdr->cmd & SOF_GLB_TYPE_MASK;
+ } else if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_msg *msg = msg_buf;
+
+ msg_type = SOF_IPC4_NOTIFICATION_TYPE_GET(msg->primary);
+ } else {
+ dev_dbg_once(sdev->dev, "Not supported IPC version: %d\n",
+ sdev->pdata->ipc_type);
+ return;
+ }
+
+ mutex_lock(&sdev->client_event_handler_mutex);
+
+ list_for_each_entry(event, &sdev->ipc_rx_handler_list, list) {
+ if (event->ipc_msg_type == msg_type)
+ event->callback(event->cdev, msg_buf);
+ }
+
+ mutex_unlock(&sdev->client_event_handler_mutex);
+}
+
+int sof_client_register_ipc_rx_handler(struct sof_client_dev *cdev,
+ u32 ipc_msg_type,
+ sof_client_event_callback callback)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ struct sof_ipc_event_entry *event;
+
+ if (!callback)
+ return -EINVAL;
+
+ if (cdev->sdev->pdata->ipc_type == SOF_IPC) {
+ if (!(ipc_msg_type & SOF_GLB_TYPE_MASK))
+ return -EINVAL;
+ } else if (cdev->sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ if (!(ipc_msg_type & SOF_IPC4_NOTIFICATION_TYPE_MASK))
+ return -EINVAL;
+ } else {
+ dev_warn(sdev->dev, "%s: Not supported IPC version: %d\n",
+ __func__, sdev->pdata->ipc_type);
+ return -EINVAL;
+ }
+
+ event = kmalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ event->ipc_msg_type = ipc_msg_type;
+ event->cdev = cdev;
+ event->callback = callback;
+
+ /* add to list of SOF client devices */
+ mutex_lock(&sdev->client_event_handler_mutex);
+ list_add(&event->list, &sdev->ipc_rx_handler_list);
+ mutex_unlock(&sdev->client_event_handler_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_register_ipc_rx_handler, SND_SOC_SOF_CLIENT);
+
+void sof_client_unregister_ipc_rx_handler(struct sof_client_dev *cdev,
+ u32 ipc_msg_type)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ struct sof_ipc_event_entry *event;
+
+ mutex_lock(&sdev->client_event_handler_mutex);
+
+ list_for_each_entry(event, &sdev->ipc_rx_handler_list, list) {
+ if (event->cdev == cdev && event->ipc_msg_type == ipc_msg_type) {
+ list_del(&event->list);
+ kfree(event);
+ break;
+ }
+ }
+
+ mutex_unlock(&sdev->client_event_handler_mutex);
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_unregister_ipc_rx_handler, SND_SOC_SOF_CLIENT);
+
+/*DSP state notification and query */
+void sof_client_fw_state_dispatcher(struct snd_sof_dev *sdev)
+{
+ struct sof_state_event_entry *event;
+
+ mutex_lock(&sdev->client_event_handler_mutex);
+
+ list_for_each_entry(event, &sdev->fw_state_handler_list, list)
+ event->callback(event->cdev, sdev->fw_state);
+
+ mutex_unlock(&sdev->client_event_handler_mutex);
+}
+
+int sof_client_register_fw_state_handler(struct sof_client_dev *cdev,
+ sof_client_fw_state_callback callback)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ struct sof_state_event_entry *event;
+
+ if (!callback)
+ return -EINVAL;
+
+ event = kmalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ event->cdev = cdev;
+ event->callback = callback;
+
+ /* add to list of SOF client devices */
+ mutex_lock(&sdev->client_event_handler_mutex);
+ list_add(&event->list, &sdev->fw_state_handler_list);
+ mutex_unlock(&sdev->client_event_handler_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_register_fw_state_handler, SND_SOC_SOF_CLIENT);
+
+void sof_client_unregister_fw_state_handler(struct sof_client_dev *cdev)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ struct sof_state_event_entry *event;
+
+ mutex_lock(&sdev->client_event_handler_mutex);
+
+ list_for_each_entry(event, &sdev->fw_state_handler_list, list) {
+ if (event->cdev == cdev) {
+ list_del(&event->list);
+ kfree(event);
+ break;
+ }
+ }
+
+ mutex_unlock(&sdev->client_event_handler_mutex);
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_unregister_fw_state_handler, SND_SOC_SOF_CLIENT);
+
+enum sof_fw_state sof_client_get_fw_state(struct sof_client_dev *cdev)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+
+ return sdev->fw_state;
+}
+EXPORT_SYMBOL_NS_GPL(sof_client_get_fw_state, SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/sof-client.h b/sound/soc/sof/sof-client.h
new file mode 100644
index 000000000..b6ccc2cd6
--- /dev/null
+++ b/sound/soc/sof/sof-client.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_SOF_CLIENT_H
+#define __SOC_SOF_CLIENT_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <sound/sof.h>
+
+struct sof_ipc_fw_version;
+struct sof_ipc_cmd_hdr;
+struct snd_sof_dev;
+struct dentry;
+
+struct sof_ipc4_fw_module;
+
+/**
+ * struct sof_client_dev - SOF client device
+ * @auxdev: auxiliary device
+ * @sdev: pointer to SOF core device struct
+ * @list: item in SOF core client dev list
+ * @data: device specific data
+ */
+struct sof_client_dev {
+ struct auxiliary_device auxdev;
+ struct snd_sof_dev *sdev;
+ struct list_head list;
+ void *data;
+};
+
+#define sof_client_dev_to_sof_dev(cdev) ((cdev)->sdev)
+
+#define auxiliary_dev_to_sof_client_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct sof_client_dev, auxdev)
+
+#define dev_to_sof_client_dev(dev) \
+ container_of(to_auxiliary_dev(dev), struct sof_client_dev, auxdev)
+
+int sof_client_ipc_tx_message(struct sof_client_dev *cdev, void *ipc_msg,
+ void *reply_data, size_t reply_bytes);
+static inline int sof_client_ipc_tx_message_no_reply(struct sof_client_dev *cdev, void *ipc_msg)
+{
+ return sof_client_ipc_tx_message(cdev, ipc_msg, NULL, 0);
+}
+int sof_client_ipc_set_get_data(struct sof_client_dev *cdev, void *ipc_msg,
+ bool set);
+
+struct sof_ipc4_fw_module *sof_client_ipc4_find_module(struct sof_client_dev *c, const guid_t *u);
+
+struct dentry *sof_client_get_debugfs_root(struct sof_client_dev *cdev);
+struct device *sof_client_get_dma_dev(struct sof_client_dev *cdev);
+const struct sof_ipc_fw_version *sof_client_get_fw_version(struct sof_client_dev *cdev);
+size_t sof_client_get_ipc_max_payload_size(struct sof_client_dev *cdev);
+enum sof_ipc_type sof_client_get_ipc_type(struct sof_client_dev *cdev);
+
+/* module refcount management of SOF core */
+int sof_client_core_module_get(struct sof_client_dev *cdev);
+void sof_client_core_module_put(struct sof_client_dev *cdev);
+
+/* IPC notification */
+typedef void (*sof_client_event_callback)(struct sof_client_dev *cdev, void *msg_buf);
+
+int sof_client_register_ipc_rx_handler(struct sof_client_dev *cdev,
+ u32 ipc_msg_type,
+ sof_client_event_callback callback);
+void sof_client_unregister_ipc_rx_handler(struct sof_client_dev *cdev,
+ u32 ipc_msg_type);
+
+/* DSP state notification and query */
+typedef void (*sof_client_fw_state_callback)(struct sof_client_dev *cdev,
+ enum sof_fw_state state);
+
+int sof_client_register_fw_state_handler(struct sof_client_dev *cdev,
+ sof_client_fw_state_callback callback);
+void sof_client_unregister_fw_state_handler(struct sof_client_dev *cdev);
+enum sof_fw_state sof_client_get_fw_state(struct sof_client_dev *cdev);
+int sof_client_ipc_rx_message(struct sof_client_dev *cdev, void *ipc_msg, void *msg_buf);
+
+#endif /* __SOC_SOF_CLIENT_H */
diff --git a/sound/soc/sof/sof-of-dev.c b/sound/soc/sof/sof-of-dev.c
new file mode 100644
index 000000000..53faecced
--- /dev/null
+++ b/sound/soc/sof/sof-of-dev.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright 2019 NXP
+//
+// Author: Daniel Baluta <daniel.baluta@nxp.com>
+//
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pm_runtime.h>
+#include <sound/sof.h>
+
+#include "sof-of-dev.h"
+#include "ops.h"
+
+static char *fw_path;
+module_param(fw_path, charp, 0444);
+MODULE_PARM_DESC(fw_path, "alternate path for SOF firmware.");
+
+static char *tplg_path;
+module_param(tplg_path, charp, 0444);
+MODULE_PARM_DESC(tplg_path, "alternate path for SOF topology.");
+
+const struct dev_pm_ops sof_of_pm = {
+ .prepare = snd_sof_prepare,
+ .complete = snd_sof_complete,
+ SET_SYSTEM_SLEEP_PM_OPS(snd_sof_suspend, snd_sof_resume)
+ SET_RUNTIME_PM_OPS(snd_sof_runtime_suspend, snd_sof_runtime_resume,
+ NULL)
+};
+EXPORT_SYMBOL(sof_of_pm);
+
+static void sof_of_probe_complete(struct device *dev)
+{
+ /* allow runtime_pm */
+ pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+}
+
+int sof_of_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct sof_dev_desc *desc;
+ struct snd_sof_pdata *sof_pdata;
+
+ dev_info(&pdev->dev, "DT DSP detected");
+
+ sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL);
+ if (!sof_pdata)
+ return -ENOMEM;
+
+ desc = device_get_match_data(dev);
+ if (!desc)
+ return -ENODEV;
+
+ if (!desc->ops) {
+ dev_err(dev, "error: no matching DT descriptor ops\n");
+ return -ENODEV;
+ }
+
+ sof_pdata->desc = desc;
+ sof_pdata->dev = &pdev->dev;
+ sof_pdata->fw_filename = desc->default_fw_filename[SOF_IPC];
+
+ if (fw_path)
+ sof_pdata->fw_filename_prefix = fw_path;
+ else
+ sof_pdata->fw_filename_prefix = sof_pdata->desc->default_fw_path[SOF_IPC];
+
+ if (tplg_path)
+ sof_pdata->tplg_filename_prefix = tplg_path;
+ else
+ sof_pdata->tplg_filename_prefix = sof_pdata->desc->default_tplg_path[SOF_IPC];
+
+ /* set callback to be called on successful device probe to enable runtime_pm */
+ sof_pdata->sof_probe_complete = sof_of_probe_complete;
+
+ /* call sof helper for DSP hardware probe */
+ return snd_sof_device_probe(dev, sof_pdata);
+}
+EXPORT_SYMBOL(sof_of_probe);
+
+int sof_of_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ /* call sof helper for DSP hardware remove */
+ snd_sof_device_remove(&pdev->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_of_remove);
+
+void sof_of_shutdown(struct platform_device *pdev)
+{
+ snd_sof_device_shutdown(&pdev->dev);
+}
+EXPORT_SYMBOL(sof_of_shutdown);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/sof-of-dev.h b/sound/soc/sof/sof-of-dev.h
new file mode 100644
index 000000000..2948b3a0d
--- /dev/null
+++ b/sound/soc/sof/sof-of-dev.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright 2021 NXP
+ */
+
+#ifndef __SOUND_SOC_SOF_OF_H
+#define __SOUND_SOC_SOF_OF_H
+
+struct snd_sof_of_mach {
+ const char *compatible;
+ const char *drv_name;
+ const char *fw_filename;
+ const char *sof_tplg_filename;
+};
+
+extern const struct dev_pm_ops sof_of_pm;
+
+int sof_of_probe(struct platform_device *pdev);
+int sof_of_remove(struct platform_device *pdev);
+void sof_of_shutdown(struct platform_device *pdev);
+
+#endif
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
new file mode 100644
index 000000000..69a2352f2
--- /dev/null
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/firmware.h>
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_data/x86/soc.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "ops.h"
+#include "sof-pci-dev.h"
+
+static char *fw_path;
+module_param(fw_path, charp, 0444);
+MODULE_PARM_DESC(fw_path, "alternate path for SOF firmware.");
+
+static char *fw_filename;
+module_param(fw_filename, charp, 0444);
+MODULE_PARM_DESC(fw_filename, "alternate filename for SOF firmware.");
+
+static char *lib_path;
+module_param(lib_path, charp, 0444);
+MODULE_PARM_DESC(lib_path, "alternate path for SOF firmware libraries.");
+
+static char *tplg_path;
+module_param(tplg_path, charp, 0444);
+MODULE_PARM_DESC(tplg_path, "alternate path for SOF topology.");
+
+static char *tplg_filename;
+module_param(tplg_filename, charp, 0444);
+MODULE_PARM_DESC(tplg_filename, "alternate filename for SOF topology.");
+
+static int sof_pci_debug;
+module_param_named(sof_pci_debug, sof_pci_debug, int, 0444);
+MODULE_PARM_DESC(sof_pci_debug, "SOF PCI debug options (0x0 all off)");
+
+static int sof_pci_ipc_type = -1;
+module_param_named(ipc_type, sof_pci_ipc_type, int, 0444);
+MODULE_PARM_DESC(ipc_type, "SOF IPC type (0): SOF, (1) Intel CAVS");
+
+static const char *sof_dmi_override_tplg_name;
+static bool sof_dmi_use_community_key;
+
+#define SOF_PCI_DISABLE_PM_RUNTIME BIT(0)
+
+static int sof_tplg_cb(const struct dmi_system_id *id)
+{
+ sof_dmi_override_tplg_name = id->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id sof_tplg_table[] = {
+ {
+ .callback = sof_tplg_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Volteer"),
+ DMI_MATCH(DMI_OEM_STRING, "AUDIO-MAX98373_ALC5682I_I2S_UP4"),
+ },
+ .driver_data = "sof-tgl-rt5682-ssp0-max98373-ssp2.tplg",
+ },
+ {
+ .callback = sof_tplg_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alder Lake Client Platform"),
+ DMI_MATCH(DMI_OEM_STRING, "AUDIO-ADL_MAX98373_ALC5682I_I2S"),
+ },
+ .driver_data = "sof-adl-rt5682-ssp0-max98373-ssp2.tplg",
+ },
+ {
+ .callback = sof_tplg_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Brya"),
+ DMI_MATCH(DMI_OEM_STRING, "AUDIO-MAX98390_ALC5682I_I2S"),
+ },
+ .driver_data = "sof-adl-max98390-ssp2-rt5682-ssp0.tplg",
+ },
+ {
+ .callback = sof_tplg_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Brya"),
+ DMI_MATCH(DMI_OEM_STRING, "AUDIO_AMP-MAX98360_ALC5682VS_I2S_2WAY"),
+ },
+ .driver_data = "sof-adl-max98360a-rt5682-2way.tplg",
+ },
+ {
+ .callback = sof_tplg_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Brya"),
+ DMI_MATCH(DMI_OEM_STRING, "AUDIO-AUDIO_MAX98357_ALC5682I_I2S_2WAY"),
+ },
+ .driver_data = "sof-adl-max98357a-rt5682-2way.tplg",
+ },
+ {
+ .callback = sof_tplg_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Brya"),
+ DMI_MATCH(DMI_OEM_STRING, "AUDIO-MAX98360_ALC5682I_I2S_AMP_SSP2"),
+ },
+ .driver_data = "sof-adl-max98357a-rt5682.tplg",
+ },
+ {}
+};
+
+/* all Up boards use the community key */
+static int up_use_community_key(const struct dmi_system_id *id)
+{
+ sof_dmi_use_community_key = true;
+ return 1;
+}
+
+/*
+ * For ApolloLake Chromebooks we want to force the use of the Intel production key.
+ * All newer platforms use the community key
+ */
+static int chromebook_use_community_key(const struct dmi_system_id *id)
+{
+ if (!soc_intel_is_apl())
+ sof_dmi_use_community_key = true;
+ return 1;
+}
+
+static const struct dmi_system_id community_key_platforms[] = {
+ {
+ .ident = "Up boards",
+ .callback = up_use_community_key,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
+ }
+ },
+ {
+ .ident = "Google Chromebooks",
+ .callback = chromebook_use_community_key,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google"),
+ }
+ },
+ {
+ .ident = "Google firmware",
+ .callback = chromebook_use_community_key,
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VERSION, "Google"),
+ }
+ },
+ {},
+};
+
+const struct dev_pm_ops sof_pci_pm = {
+ .prepare = snd_sof_prepare,
+ .complete = snd_sof_complete,
+ SET_SYSTEM_SLEEP_PM_OPS(snd_sof_suspend, snd_sof_resume)
+ SET_RUNTIME_PM_OPS(snd_sof_runtime_suspend, snd_sof_runtime_resume,
+ snd_sof_runtime_idle)
+};
+EXPORT_SYMBOL_NS(sof_pci_pm, SND_SOC_SOF_PCI_DEV);
+
+static void sof_pci_probe_complete(struct device *dev)
+{
+ dev_dbg(dev, "Completing SOF PCI probe");
+
+ if (sof_pci_debug & SOF_PCI_DISABLE_PM_RUNTIME)
+ return;
+
+ /* allow runtime_pm */
+ pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+
+ /*
+ * runtime pm for pci device is "forbidden" by default.
+ * so call pm_runtime_allow() to enable it.
+ */
+ pm_runtime_allow(dev);
+
+ /* mark last_busy for pm_runtime to make sure not suspend immediately */
+ pm_runtime_mark_last_busy(dev);
+
+ /* follow recommendation in pci-driver.c to decrement usage counter */
+ pm_runtime_put_noidle(dev);
+}
+
+int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ struct device *dev = &pci->dev;
+ const struct sof_dev_desc *desc =
+ (const struct sof_dev_desc *)pci_id->driver_data;
+ struct snd_sof_pdata *sof_pdata;
+ int ret;
+
+ dev_dbg(&pci->dev, "PCI DSP detected");
+
+ if (!desc) {
+ dev_err(dev, "error: no matching PCI descriptor\n");
+ return -ENODEV;
+ }
+
+ if (!desc->ops) {
+ dev_err(dev, "error: no matching PCI descriptor ops\n");
+ return -ENODEV;
+ }
+
+ sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL);
+ if (!sof_pdata)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pci);
+ if (ret < 0)
+ return ret;
+
+ ret = pci_request_regions(pci, "Audio DSP");
+ if (ret < 0)
+ return ret;
+
+ sof_pdata->name = pci_name(pci);
+
+ /* PCI defines a vendor ID of 0xFFFF as invalid. */
+ if (pci->subsystem_vendor != 0xFFFF) {
+ sof_pdata->subsystem_vendor = pci->subsystem_vendor;
+ sof_pdata->subsystem_device = pci->subsystem_device;
+ sof_pdata->subsystem_id_set = true;
+ }
+
+ sof_pdata->desc = desc;
+ sof_pdata->dev = dev;
+
+ sof_pdata->ipc_type = desc->ipc_default;
+
+ if (sof_pci_ipc_type < 0) {
+ sof_pdata->ipc_type = desc->ipc_default;
+ } else {
+ dev_info(dev, "overriding default IPC %d to requested %d\n",
+ desc->ipc_default, sof_pci_ipc_type);
+ if (sof_pci_ipc_type >= SOF_IPC_TYPE_COUNT) {
+ dev_err(dev, "invalid request value %d\n", sof_pci_ipc_type);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!(BIT(sof_pci_ipc_type) & desc->ipc_supported_mask)) {
+ dev_err(dev, "invalid request value %d, supported mask is %#x\n",
+ sof_pci_ipc_type, desc->ipc_supported_mask);
+ ret = -EINVAL;
+ goto out;
+ }
+ sof_pdata->ipc_type = sof_pci_ipc_type;
+ }
+
+ if (fw_filename) {
+ sof_pdata->fw_filename = fw_filename;
+
+ dev_dbg(dev, "Module parameter used, changed fw filename to %s\n",
+ sof_pdata->fw_filename);
+ } else {
+ sof_pdata->fw_filename = desc->default_fw_filename[sof_pdata->ipc_type];
+ }
+
+ /*
+ * for platforms using the SOF community key, change the
+ * default path automatically to pick the right files from the
+ * linux-firmware tree. This can be overridden with the
+ * fw_path kernel parameter, e.g. for developers.
+ */
+
+ /* alternate fw and tplg filenames ? */
+ if (fw_path) {
+ sof_pdata->fw_filename_prefix = fw_path;
+
+ dev_dbg(dev,
+ "Module parameter used, changed fw path to %s\n",
+ sof_pdata->fw_filename_prefix);
+
+ } else if (dmi_check_system(community_key_platforms) && sof_dmi_use_community_key) {
+ sof_pdata->fw_filename_prefix =
+ devm_kasprintf(dev, GFP_KERNEL, "%s/%s",
+ sof_pdata->desc->default_fw_path[sof_pdata->ipc_type],
+ "community");
+
+ dev_dbg(dev,
+ "Platform uses community key, changed fw path to %s\n",
+ sof_pdata->fw_filename_prefix);
+ } else {
+ sof_pdata->fw_filename_prefix =
+ sof_pdata->desc->default_fw_path[sof_pdata->ipc_type];
+ }
+
+ if (lib_path) {
+ sof_pdata->fw_lib_prefix = lib_path;
+
+ dev_dbg(dev, "Module parameter used, changed fw_lib path to %s\n",
+ sof_pdata->fw_lib_prefix);
+
+ } else if (sof_pdata->desc->default_lib_path[sof_pdata->ipc_type]) {
+ if (dmi_check_system(community_key_platforms) && sof_dmi_use_community_key) {
+ sof_pdata->fw_lib_prefix =
+ devm_kasprintf(dev, GFP_KERNEL, "%s/%s",
+ sof_pdata->desc->default_lib_path[sof_pdata->ipc_type],
+ "community");
+
+ dev_dbg(dev,
+ "Platform uses community key, changed fw_lib path to %s\n",
+ sof_pdata->fw_lib_prefix);
+ } else {
+ sof_pdata->fw_lib_prefix =
+ sof_pdata->desc->default_lib_path[sof_pdata->ipc_type];
+ }
+ }
+
+ if (tplg_path)
+ sof_pdata->tplg_filename_prefix = tplg_path;
+ else
+ sof_pdata->tplg_filename_prefix =
+ sof_pdata->desc->default_tplg_path[sof_pdata->ipc_type];
+
+ /*
+ * the topology filename will be provided in the machine descriptor, unless
+ * it is overridden by a module parameter or DMI quirk.
+ */
+ if (tplg_filename) {
+ sof_pdata->tplg_filename = tplg_filename;
+
+ dev_dbg(dev, "Module parameter used, changed tplg filename to %s\n",
+ sof_pdata->tplg_filename);
+ } else {
+ dmi_check_system(sof_tplg_table);
+ if (sof_dmi_override_tplg_name)
+ sof_pdata->tplg_filename = sof_dmi_override_tplg_name;
+ }
+
+ /* set callback to be called on successful device probe to enable runtime_pm */
+ sof_pdata->sof_probe_complete = sof_pci_probe_complete;
+
+ /* call sof helper for DSP hardware probe */
+ ret = snd_sof_device_probe(dev, sof_pdata);
+
+out:
+ if (ret)
+ pci_release_regions(pci);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS(sof_pci_probe, SND_SOC_SOF_PCI_DEV);
+
+void sof_pci_remove(struct pci_dev *pci)
+{
+ /* call sof helper for DSP hardware remove */
+ snd_sof_device_remove(&pci->dev);
+
+ /* follow recommendation in pci-driver.c to increment usage counter */
+ if (snd_sof_device_probe_completed(&pci->dev) &&
+ !(sof_pci_debug & SOF_PCI_DISABLE_PM_RUNTIME))
+ pm_runtime_get_noresume(&pci->dev);
+
+ /* release pci regions and disable device */
+ pci_release_regions(pci);
+}
+EXPORT_SYMBOL_NS(sof_pci_remove, SND_SOC_SOF_PCI_DEV);
+
+void sof_pci_shutdown(struct pci_dev *pci)
+{
+ snd_sof_device_shutdown(&pci->dev);
+}
+EXPORT_SYMBOL_NS(sof_pci_shutdown, SND_SOC_SOF_PCI_DEV);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/sof-pci-dev.h b/sound/soc/sof/sof-pci-dev.h
new file mode 100644
index 000000000..81155a59e
--- /dev/null
+++ b/sound/soc/sof/sof-pci-dev.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2021 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __SOUND_SOC_SOF_PCI_H
+#define __SOUND_SOC_SOF_PCI_H
+
+extern const struct dev_pm_ops sof_pci_pm;
+int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id);
+void sof_pci_remove(struct pci_dev *pci);
+void sof_pci_shutdown(struct pci_dev *pci);
+
+#endif
diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
new file mode 100644
index 000000000..d4f6702e9
--- /dev/null
+++ b/sound/soc/sof/sof-priv.h
@@ -0,0 +1,872 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOUND_SOC_SOF_PRIV_H
+#define __SOUND_SOC_SOF_PRIV_H
+
+#include <linux/device.h>
+#include <sound/hdaudio.h>
+#include <sound/sof.h>
+#include <sound/sof/info.h>
+#include <sound/sof/pm.h>
+#include <sound/sof/trace.h>
+#include <uapi/sound/sof/fw.h>
+#include <sound/sof/ext_manifest.h>
+
+struct snd_sof_pcm_stream;
+
+/* Flag definitions used in sof_core_debug (sof_debug module parameter) */
+#define SOF_DBG_ENABLE_TRACE BIT(0)
+#define SOF_DBG_RETAIN_CTX BIT(1) /* prevent DSP D3 on FW exception */
+#define SOF_DBG_VERIFY_TPLG BIT(2) /* verify topology during load */
+#define SOF_DBG_DYNAMIC_PIPELINES_OVERRIDE BIT(3) /* 0: use topology token
+ * 1: override topology
+ */
+#define SOF_DBG_DYNAMIC_PIPELINES_ENABLE BIT(4) /* 0: use static pipelines
+ * 1: use dynamic pipelines
+ */
+#define SOF_DBG_DISABLE_MULTICORE BIT(5) /* schedule all pipelines/widgets
+ * on primary core
+ */
+#define SOF_DBG_PRINT_ALL_DUMPS BIT(6) /* Print all ipc and dsp dumps */
+#define SOF_DBG_IGNORE_D3_PERSISTENT BIT(7) /* ignore the DSP D3 persistent capability
+ * and always download firmware upon D3 exit
+ */
+#define SOF_DBG_PRINT_DMA_POSITION_UPDATE_LOGS BIT(8) /* print DMA position updates
+ * in dmesg logs
+ */
+#define SOF_DBG_PRINT_IPC_SUCCESS_LOGS BIT(9) /* print IPC success
+ * in dmesg logs
+ */
+#define SOF_DBG_FORCE_NOCODEC BIT(10) /* ignore all codec-related
+ * configurations
+ */
+#define SOF_DBG_DUMP_IPC_MESSAGE_PAYLOAD BIT(11) /* On top of the IPC message header
+ * dump the message payload also
+ */
+#define SOF_DBG_DSPLESS_MODE BIT(15) /* Do not initialize and use the DSP */
+
+/* Flag definitions used for controlling the DSP dump behavior */
+#define SOF_DBG_DUMP_REGS BIT(0)
+#define SOF_DBG_DUMP_MBOX BIT(1)
+#define SOF_DBG_DUMP_TEXT BIT(2)
+#define SOF_DBG_DUMP_PCI BIT(3)
+/* Output this dump (at the DEBUG level) only when SOF_DBG_PRINT_ALL_DUMPS is set */
+#define SOF_DBG_DUMP_OPTIONAL BIT(4)
+
+/* global debug state set by SOF_DBG_ flags */
+bool sof_debug_check_flag(int mask);
+
+/* max BARs mmaped devices can use */
+#define SND_SOF_BARS 8
+
+/* time in ms for runtime suspend delay */
+#define SND_SOF_SUSPEND_DELAY_MS 2000
+
+/* DMA buffer size for trace */
+#define DMA_BUF_SIZE_FOR_TRACE (PAGE_SIZE * 16)
+
+#define SOF_IPC_DSP_REPLY 0
+#define SOF_IPC_HOST_REPLY 1
+
+/* convenience constructor for DAI driver streams */
+#define SOF_DAI_STREAM(sname, scmin, scmax, srates, sfmt) \
+ {.stream_name = sname, .channels_min = scmin, .channels_max = scmax, \
+ .rates = srates, .formats = sfmt}
+
+#define SOF_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_FLOAT)
+
+/* So far the primary core on all DSPs has ID 0 */
+#define SOF_DSP_PRIMARY_CORE 0
+
+/* max number of DSP cores */
+#define SOF_MAX_DSP_NUM_CORES 8
+
+struct sof_dsp_power_state {
+ u32 state;
+ u32 substate; /* platform-specific */
+};
+
+/* System suspend target state */
+enum sof_system_suspend_state {
+ SOF_SUSPEND_NONE = 0,
+ SOF_SUSPEND_S0IX,
+ SOF_SUSPEND_S3,
+ SOF_SUSPEND_S4,
+ SOF_SUSPEND_S5,
+};
+
+enum sof_dfsentry_type {
+ SOF_DFSENTRY_TYPE_IOMEM = 0,
+ SOF_DFSENTRY_TYPE_BUF,
+};
+
+enum sof_debugfs_access_type {
+ SOF_DEBUGFS_ACCESS_ALWAYS = 0,
+ SOF_DEBUGFS_ACCESS_D0_ONLY,
+};
+
+struct sof_compr_stream {
+ u64 copied_total;
+ u32 sampling_rate;
+ u16 channels;
+ u16 sample_container_bytes;
+ size_t posn_offset;
+};
+
+struct snd_sof_dev;
+struct snd_sof_ipc_msg;
+struct snd_sof_ipc;
+struct snd_sof_debugfs_map;
+struct snd_soc_tplg_ops;
+struct snd_soc_component;
+struct snd_sof_pdata;
+
+/**
+ * struct snd_sof_platform_stream_params - platform dependent stream parameters
+ * @stream_tag: Stream tag to use
+ * @use_phy_addr: Use the provided @phy_addr for configuration
+ * @phy_addr: Platform dependent address to be used, if @use_phy_addr
+ * is true
+ * @no_ipc_position: Disable position update IPC from firmware
+ */
+struct snd_sof_platform_stream_params {
+ u16 stream_tag;
+ bool use_phy_address;
+ u32 phy_addr;
+ bool no_ipc_position;
+ bool cont_update_posn;
+};
+
+/**
+ * struct sof_firmware - Container struct for SOF firmware
+ * @fw: Pointer to the firmware
+ * @payload_offset: Offset of the data within the loaded firmware image to be
+ * loaded to the DSP (skipping for example ext_manifest section)
+ */
+struct sof_firmware {
+ const struct firmware *fw;
+ u32 payload_offset;
+};
+
+/*
+ * SOF DSP HW abstraction operations.
+ * Used to abstract DSP HW architecture and any IO busses between host CPU
+ * and DSP device(s).
+ */
+struct snd_sof_dsp_ops {
+
+ /* probe/remove/shutdown */
+ int (*probe)(struct snd_sof_dev *sof_dev); /* mandatory */
+ int (*remove)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*shutdown)(struct snd_sof_dev *sof_dev); /* optional */
+
+ /* DSP core boot / reset */
+ int (*run)(struct snd_sof_dev *sof_dev); /* mandatory */
+ int (*stall)(struct snd_sof_dev *sof_dev, unsigned int core_mask); /* optional */
+ int (*reset)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*core_get)(struct snd_sof_dev *sof_dev, int core); /* optional */
+ int (*core_put)(struct snd_sof_dev *sof_dev, int core); /* optional */
+
+ /*
+ * Register IO: only used by respective drivers themselves,
+ * TODO: consider removing these operations and calling respective
+ * implementations directly
+ */
+ void (*write8)(struct snd_sof_dev *sof_dev, void __iomem *addr,
+ u8 value); /* optional */
+ u8 (*read8)(struct snd_sof_dev *sof_dev,
+ void __iomem *addr); /* optional */
+ void (*write)(struct snd_sof_dev *sof_dev, void __iomem *addr,
+ u32 value); /* optional */
+ u32 (*read)(struct snd_sof_dev *sof_dev,
+ void __iomem *addr); /* optional */
+ void (*write64)(struct snd_sof_dev *sof_dev, void __iomem *addr,
+ u64 value); /* optional */
+ u64 (*read64)(struct snd_sof_dev *sof_dev,
+ void __iomem *addr); /* optional */
+
+ /* memcpy IO */
+ int (*block_read)(struct snd_sof_dev *sof_dev,
+ enum snd_sof_fw_blk_type type, u32 offset,
+ void *dest, size_t size); /* mandatory */
+ int (*block_write)(struct snd_sof_dev *sof_dev,
+ enum snd_sof_fw_blk_type type, u32 offset,
+ void *src, size_t size); /* mandatory */
+
+ /* Mailbox IO */
+ void (*mailbox_read)(struct snd_sof_dev *sof_dev,
+ u32 offset, void *dest,
+ size_t size); /* optional */
+ void (*mailbox_write)(struct snd_sof_dev *sof_dev,
+ u32 offset, void *src,
+ size_t size); /* optional */
+
+ /* doorbell */
+ irqreturn_t (*irq_handler)(int irq, void *context); /* optional */
+ irqreturn_t (*irq_thread)(int irq, void *context); /* optional */
+
+ /* ipc */
+ int (*send_msg)(struct snd_sof_dev *sof_dev,
+ struct snd_sof_ipc_msg *msg); /* mandatory */
+
+ /* FW loading */
+ int (*load_firmware)(struct snd_sof_dev *sof_dev); /* mandatory */
+ int (*load_module)(struct snd_sof_dev *sof_dev,
+ struct snd_sof_mod_hdr *hdr); /* optional */
+
+ /* connect pcm substream to a host stream */
+ int (*pcm_open)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream); /* optional */
+ /* disconnect pcm substream to a host stream */
+ int (*pcm_close)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream); /* optional */
+
+ /* host stream hw params */
+ int (*pcm_hw_params)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params); /* optional */
+
+ /* host stream hw_free */
+ int (*pcm_hw_free)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream); /* optional */
+
+ /* host stream trigger */
+ int (*pcm_trigger)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ int cmd); /* optional */
+
+ /* host stream pointer */
+ snd_pcm_uframes_t (*pcm_pointer)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream); /* optional */
+
+ /* pcm ack */
+ int (*pcm_ack)(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream); /* optional */
+
+ /*
+ * optional callback to retrieve the link DMA position for the substream
+ * when the position is not reported in the shared SRAM windows but
+ * instead from a host-accessible hardware counter.
+ */
+ u64 (*get_stream_position)(struct snd_sof_dev *sdev,
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream); /* optional */
+
+ /* host read DSP stream data */
+ int (*ipc_msg_data)(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ void *p, size_t sz); /* mandatory */
+
+ /* host side configuration of the stream's data offset in stream mailbox area */
+ int (*set_stream_data_offset)(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ size_t posn_offset); /* optional */
+
+ /* pre/post firmware run */
+ int (*pre_fw_run)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*post_fw_run)(struct snd_sof_dev *sof_dev); /* optional */
+
+ /* parse platform specific extended manifest, optional */
+ int (*parse_platform_ext_manifest)(struct snd_sof_dev *sof_dev,
+ const struct sof_ext_man_elem_header *hdr);
+
+ /* DSP PM */
+ int (*suspend)(struct snd_sof_dev *sof_dev,
+ u32 target_state); /* optional */
+ int (*resume)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*runtime_suspend)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*runtime_resume)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*runtime_idle)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*set_hw_params_upon_resume)(struct snd_sof_dev *sdev); /* optional */
+ int (*set_power_state)(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state); /* optional */
+
+ /* DSP clocking */
+ int (*set_clk)(struct snd_sof_dev *sof_dev, u32 freq); /* optional */
+
+ /* debug */
+ const struct snd_sof_debugfs_map *debug_map; /* optional */
+ int debug_map_count; /* optional */
+ void (*dbg_dump)(struct snd_sof_dev *sof_dev,
+ u32 flags); /* optional */
+ void (*ipc_dump)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*debugfs_add_region_item)(struct snd_sof_dev *sdev,
+ enum snd_sof_fw_blk_type blk_type, u32 offset,
+ size_t size, const char *name,
+ enum sof_debugfs_access_type access_type); /* optional */
+
+ /* host DMA trace (IPC3) */
+ int (*trace_init)(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmatb,
+ struct sof_ipc_dma_trace_params_ext *dtrace_params); /* optional */
+ int (*trace_release)(struct snd_sof_dev *sdev); /* optional */
+ int (*trace_trigger)(struct snd_sof_dev *sdev,
+ int cmd); /* optional */
+
+ /* misc */
+ int (*get_bar_index)(struct snd_sof_dev *sdev,
+ u32 type); /* optional */
+ int (*get_mailbox_offset)(struct snd_sof_dev *sdev);/* mandatory for common loader code */
+ int (*get_window_offset)(struct snd_sof_dev *sdev,
+ u32 id);/* mandatory for common loader code */
+
+ /* machine driver ops */
+ int (*machine_register)(struct snd_sof_dev *sdev,
+ void *pdata); /* optional */
+ void (*machine_unregister)(struct snd_sof_dev *sdev,
+ void *pdata); /* optional */
+ struct snd_soc_acpi_mach * (*machine_select)(struct snd_sof_dev *sdev); /* optional */
+ void (*set_mach_params)(struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev); /* optional */
+
+ /* IPC client ops */
+ int (*register_ipc_clients)(struct snd_sof_dev *sdev); /* optional */
+ void (*unregister_ipc_clients)(struct snd_sof_dev *sdev); /* optional */
+
+ /* DAI ops */
+ struct snd_soc_dai_driver *drv;
+ int num_drv;
+
+ /* ALSA HW info flags, will be stored in snd_pcm_runtime.hw.info */
+ u32 hw_info;
+
+ const struct dsp_arch_ops *dsp_arch_ops;
+};
+
+/* DSP architecture specific callbacks for oops and stack dumps */
+struct dsp_arch_ops {
+ void (*dsp_oops)(struct snd_sof_dev *sdev, const char *level, void *oops);
+ void (*dsp_stack)(struct snd_sof_dev *sdev, const char *level, void *oops,
+ u32 *stack, u32 stack_words);
+};
+
+#define sof_dsp_arch_ops(sdev) ((sdev)->pdata->desc->ops->dsp_arch_ops)
+
+/* FS entry for debug files that can expose DSP memories, registers */
+struct snd_sof_dfsentry {
+ size_t size;
+ size_t buf_data_size; /* length of buffered data for file read operation */
+ enum sof_dfsentry_type type;
+ /*
+ * access_type specifies if the
+ * memory -> DSP resource (memory, register etc) is always accessible
+ * or if it is accessible only when the DSP is in D0.
+ */
+ enum sof_debugfs_access_type access_type;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+ char *cache_buf; /* buffer to cache the contents of debugfs memory */
+#endif
+ struct snd_sof_dev *sdev;
+ struct list_head list; /* list in sdev dfsentry list */
+ union {
+ void __iomem *io_mem;
+ void *buf;
+ };
+};
+
+/* Debug mapping for any DSP memory or registers that can used for debug */
+struct snd_sof_debugfs_map {
+ const char *name;
+ u32 bar;
+ u32 offset;
+ u32 size;
+ /*
+ * access_type specifies if the memory is always accessible
+ * or if it is accessible only when the DSP is in D0.
+ */
+ enum sof_debugfs_access_type access_type;
+};
+
+/* mailbox descriptor, used for host <-> DSP IPC */
+struct snd_sof_mailbox {
+ u32 offset;
+ size_t size;
+};
+
+/* IPC message descriptor for host <-> DSP IO */
+struct snd_sof_ipc_msg {
+ /* message data */
+ void *msg_data;
+ void *reply_data;
+ size_t msg_size;
+ size_t reply_size;
+ int reply_error;
+
+ /* notification, firmware initiated messages */
+ void *rx_data;
+
+ wait_queue_head_t waitq;
+ bool ipc_complete;
+};
+
+/**
+ * struct sof_ipc_fw_tracing_ops - IPC-specific firmware tracing ops
+ * @init: Function pointer for initialization of the tracing
+ * @free: Optional function pointer for freeing of the tracing
+ * @fw_crashed: Optional function pointer to notify the tracing of a firmware crash
+ * @suspend: Function pointer for system/runtime suspend
+ * @resume: Function pointer for system/runtime resume
+ */
+struct sof_ipc_fw_tracing_ops {
+ int (*init)(struct snd_sof_dev *sdev);
+ void (*free)(struct snd_sof_dev *sdev);
+ void (*fw_crashed)(struct snd_sof_dev *sdev);
+ void (*suspend)(struct snd_sof_dev *sdev, pm_message_t pm_state);
+ int (*resume)(struct snd_sof_dev *sdev);
+};
+
+/**
+ * struct sof_ipc_pm_ops - IPC-specific PM ops
+ * @ctx_save: Optional function pointer for context save
+ * @ctx_restore: Optional function pointer for context restore
+ * @set_core_state: Optional function pointer for turning on/off a DSP core
+ * @set_pm_gate: Optional function pointer for pm gate settings
+ */
+struct sof_ipc_pm_ops {
+ int (*ctx_save)(struct snd_sof_dev *sdev);
+ int (*ctx_restore)(struct snd_sof_dev *sdev);
+ int (*set_core_state)(struct snd_sof_dev *sdev, int core_idx, bool on);
+ int (*set_pm_gate)(struct snd_sof_dev *sdev, u32 flags);
+};
+
+/**
+ * struct sof_ipc_fw_loader_ops - IPC/FW-specific loader ops
+ * @validate: Function pointer for validating the firmware image
+ * @parse_ext_manifest: Function pointer for parsing the manifest of the firmware
+ * @load_fw_to_dsp: Optional function pointer for loading the firmware to the
+ * DSP.
+ * The function implements generic, hardware independent way
+ * of loading the initial firmware and its modules (if any).
+ */
+struct sof_ipc_fw_loader_ops {
+ int (*validate)(struct snd_sof_dev *sdev);
+ size_t (*parse_ext_manifest)(struct snd_sof_dev *sdev);
+ int (*load_fw_to_dsp)(struct snd_sof_dev *sdev);
+};
+
+struct sof_ipc_tplg_ops;
+struct sof_ipc_pcm_ops;
+
+/**
+ * struct sof_ipc_ops - IPC-specific ops
+ * @tplg: Pointer to IPC-specific topology ops
+ * @pm: Pointer to PM ops
+ * @pcm: Pointer to PCM ops
+ * @fw_loader: Pointer to Firmware Loader ops
+ * @fw_tracing: Optional pointer to Firmware tracing ops
+ *
+ * @init: Optional pointer for IPC related initialization
+ * @exit: Optional pointer for IPC related cleanup
+ * @post_fw_boot: Optional pointer to execute IPC related tasks after firmware
+ * boot.
+ *
+ * @tx_msg: Function pointer for sending a 'short' IPC message
+ * @set_get_data: Function pointer for set/get data ('large' IPC message). This
+ * function may split up the 'large' message and use the @tx_msg
+ * path to transfer individual chunks, or use other means to transfer
+ * the message.
+ * @get_reply: Function pointer for fetching the reply to
+ * sdev->ipc->msg.reply_data
+ * @rx_msg: Function pointer for handling a received message
+ *
+ * Note: both @tx_msg and @set_get_data considered as TX functions and they are
+ * serialized for the duration of the instructed transfer. A large message sent
+ * via @set_get_data is a single transfer even if at the hardware level it is
+ * handled with multiple chunks.
+ */
+struct sof_ipc_ops {
+ const struct sof_ipc_tplg_ops *tplg;
+ const struct sof_ipc_pm_ops *pm;
+ const struct sof_ipc_pcm_ops *pcm;
+ const struct sof_ipc_fw_loader_ops *fw_loader;
+ const struct sof_ipc_fw_tracing_ops *fw_tracing;
+
+ int (*init)(struct snd_sof_dev *sdev);
+ void (*exit)(struct snd_sof_dev *sdev);
+ int (*post_fw_boot)(struct snd_sof_dev *sdev);
+
+ int (*tx_msg)(struct snd_sof_dev *sdev, void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes, bool no_pm);
+ int (*set_get_data)(struct snd_sof_dev *sdev, void *data, size_t data_bytes,
+ bool set);
+ int (*get_reply)(struct snd_sof_dev *sdev);
+ void (*rx_msg)(struct snd_sof_dev *sdev);
+};
+
+/* SOF generic IPC data */
+struct snd_sof_ipc {
+ struct snd_sof_dev *sdev;
+
+ /* protects messages and the disable flag */
+ struct mutex tx_mutex;
+ /* disables further sending of ipc's */
+ bool disable_ipc_tx;
+
+ /* Maximum allowed size of a single IPC message/reply */
+ size_t max_payload_size;
+
+ struct snd_sof_ipc_msg msg;
+
+ /* IPC ops based on version */
+ const struct sof_ipc_ops *ops;
+};
+
+/* Helper to retrieve the IPC ops */
+#define sof_ipc_get_ops(sdev, ops_name) \
+ (((sdev)->ipc && (sdev)->ipc->ops) ? (sdev)->ipc->ops->ops_name : NULL)
+
+/*
+ * SOF Device Level.
+ */
+struct snd_sof_dev {
+ struct device *dev;
+ spinlock_t ipc_lock; /* lock for IPC users */
+ spinlock_t hw_lock; /* lock for HW IO access */
+
+ /*
+ * When true the DSP is not used.
+ * It is set under the following condition:
+ * User sets the SOF_DBG_DSPLESS_MODE flag in sof_debug module parameter
+ * and
+ * the platform advertises that it can support such mode
+ * pdata->desc->dspless_mode_supported is true.
+ */
+ bool dspless_mode_selected;
+
+ /* Main, Base firmware image */
+ struct sof_firmware basefw;
+
+ /*
+ * ASoC components. plat_drv fields are set dynamically so
+ * can't use const
+ */
+ struct snd_soc_component_driver plat_drv;
+
+ /* current DSP power state */
+ struct sof_dsp_power_state dsp_power_state;
+ /* mutex to protect the dsp_power_state access */
+ struct mutex power_state_access;
+
+ /* Intended power target of system suspend */
+ enum sof_system_suspend_state system_suspend_target;
+
+ /* DSP firmware boot */
+ wait_queue_head_t boot_wait;
+ enum sof_fw_state fw_state;
+ bool first_boot;
+
+ /* work queue in case the probe is implemented in two steps */
+ struct work_struct probe_work;
+ bool probe_completed;
+
+ /* DSP HW differentiation */
+ struct snd_sof_pdata *pdata;
+
+ /* IPC */
+ struct snd_sof_ipc *ipc;
+ struct snd_sof_mailbox fw_info_box; /* FW shared memory */
+ struct snd_sof_mailbox dsp_box; /* DSP initiated IPC */
+ struct snd_sof_mailbox host_box; /* Host initiated IPC */
+ struct snd_sof_mailbox stream_box; /* Stream position update */
+ struct snd_sof_mailbox debug_box; /* Debug info updates */
+ struct snd_sof_ipc_msg *msg;
+ int ipc_irq;
+ u32 next_comp_id; /* monotonic - reset during S3 */
+
+ /* memory bases for mmaped DSPs - set by dsp_init() */
+ void __iomem *bar[SND_SOF_BARS]; /* DSP base address */
+ int mmio_bar;
+ int mailbox_bar;
+ size_t dsp_oops_offset;
+
+ /* debug */
+ struct dentry *debugfs_root;
+ struct list_head dfsentry_list;
+ bool dbg_dump_printed;
+ bool ipc_dump_printed;
+
+ /* firmware loader */
+ struct sof_ipc_fw_ready fw_ready;
+ struct sof_ipc_fw_version fw_version;
+ struct sof_ipc_cc_version *cc_version;
+
+ /* topology */
+ struct snd_soc_tplg_ops *tplg_ops;
+ struct list_head pcm_list;
+ struct list_head kcontrol_list;
+ struct list_head widget_list;
+ struct list_head pipeline_list;
+ struct list_head dai_list;
+ struct list_head dai_link_list;
+ struct list_head route_list;
+ struct snd_soc_component *component;
+ u32 enabled_cores_mask; /* keep track of enabled cores */
+ bool led_present;
+
+ /* FW configuration */
+ struct sof_ipc_window *info_window;
+
+ /* IPC timeouts in ms */
+ int ipc_timeout;
+ int boot_timeout;
+
+ /* firmwre tracing */
+ bool fw_trace_is_supported; /* set with Kconfig or module parameter */
+ void *fw_trace_data; /* private data used by firmware tracing implementation */
+
+ bool msi_enabled;
+
+ /* DSP core context */
+ u32 num_cores;
+
+ /*
+ * ref count per core that will be modified during system suspend/resume and during pcm
+ * hw_params/hw_free. This doesn't need to be protected with a mutex because pcm
+ * hw_params/hw_free are already protected by the PCM mutex in the ALSA framework in
+ * sound/core/ when streams are active and during system suspend/resume, streams are
+ * already suspended.
+ */
+ int dsp_core_ref_count[SOF_MAX_DSP_NUM_CORES];
+
+ /*
+ * Used to keep track of registered IPC client devices so that they can
+ * be removed when the parent SOF module is removed.
+ */
+ struct list_head ipc_client_list;
+
+ /* mutex to protect client list */
+ struct mutex ipc_client_mutex;
+
+ /*
+ * Used for tracking the IPC client's RX registration for DSP initiated
+ * message handling.
+ */
+ struct list_head ipc_rx_handler_list;
+
+ /*
+ * Used for tracking the IPC client's registration for DSP state change
+ * notification
+ */
+ struct list_head fw_state_handler_list;
+
+ /* to protect the ipc_rx_handler_list and dsp_state_handler_list list */
+ struct mutex client_event_handler_mutex;
+
+ /* quirks to override topology values */
+ bool mclk_id_override;
+ u16 mclk_id_quirk; /* same size as in IPC3 definitions */
+
+ void *private; /* core does not touch this */
+};
+
+/*
+ * Device Level.
+ */
+
+int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data);
+int snd_sof_device_remove(struct device *dev);
+int snd_sof_device_shutdown(struct device *dev);
+bool snd_sof_device_probe_completed(struct device *dev);
+
+int snd_sof_runtime_suspend(struct device *dev);
+int snd_sof_runtime_resume(struct device *dev);
+int snd_sof_runtime_idle(struct device *dev);
+int snd_sof_resume(struct device *dev);
+int snd_sof_suspend(struct device *dev);
+int snd_sof_dsp_power_down_notify(struct snd_sof_dev *sdev);
+int snd_sof_prepare(struct device *dev);
+void snd_sof_complete(struct device *dev);
+
+void snd_sof_new_platform_drv(struct snd_sof_dev *sdev);
+
+/*
+ * Compress support
+ */
+extern struct snd_compress_ops sof_compressed_ops;
+
+/*
+ * Firmware loading.
+ */
+int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev);
+int snd_sof_load_firmware_memcpy(struct snd_sof_dev *sdev);
+int snd_sof_run_firmware(struct snd_sof_dev *sdev);
+void snd_sof_fw_unload(struct snd_sof_dev *sdev);
+
+/*
+ * IPC low level APIs.
+ */
+struct snd_sof_ipc *snd_sof_ipc_init(struct snd_sof_dev *sdev);
+void snd_sof_ipc_free(struct snd_sof_dev *sdev);
+void snd_sof_ipc_get_reply(struct snd_sof_dev *sdev);
+void snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id);
+static inline void snd_sof_ipc_msgs_rx(struct snd_sof_dev *sdev)
+{
+ sdev->ipc->ops->rx_msg(sdev);
+}
+int sof_ipc_tx_message(struct snd_sof_ipc *ipc, void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes);
+static inline int sof_ipc_tx_message_no_reply(struct snd_sof_ipc *ipc, void *msg_data,
+ size_t msg_bytes)
+{
+ return sof_ipc_tx_message(ipc, msg_data, msg_bytes, NULL, 0);
+}
+int sof_ipc_set_get_data(struct snd_sof_ipc *ipc, void *msg_data,
+ size_t msg_bytes, bool set);
+int sof_ipc_tx_message_no_pm(struct snd_sof_ipc *ipc, void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes);
+static inline int sof_ipc_tx_message_no_pm_no_reply(struct snd_sof_ipc *ipc, void *msg_data,
+ size_t msg_bytes)
+{
+ return sof_ipc_tx_message_no_pm(ipc, msg_data, msg_bytes, NULL, 0);
+}
+int sof_ipc_send_msg(struct snd_sof_dev *sdev, void *msg_data, size_t msg_bytes,
+ size_t reply_bytes);
+
+static inline void snd_sof_ipc_process_reply(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ snd_sof_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, msg_id);
+}
+
+/*
+ * Trace/debug
+ */
+int snd_sof_dbg_init(struct snd_sof_dev *sdev);
+void snd_sof_free_debug(struct snd_sof_dev *sdev);
+int snd_sof_debugfs_buf_item(struct snd_sof_dev *sdev,
+ void *base, size_t size,
+ const char *name, mode_t mode);
+void sof_print_oops_and_stack(struct snd_sof_dev *sdev, const char *level,
+ u32 panic_code, u32 tracep_code, void *oops,
+ struct sof_ipc_panic_info *panic_info,
+ void *stack, size_t stack_words);
+void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev, const char *msg);
+int snd_sof_dbg_memory_info_init(struct snd_sof_dev *sdev);
+int snd_sof_debugfs_add_region_item_iomem(struct snd_sof_dev *sdev,
+ enum snd_sof_fw_blk_type blk_type, u32 offset, size_t size,
+ const char *name, enum sof_debugfs_access_type access_type);
+/* Firmware tracing */
+int sof_fw_trace_init(struct snd_sof_dev *sdev);
+void sof_fw_trace_free(struct snd_sof_dev *sdev);
+void sof_fw_trace_fw_crashed(struct snd_sof_dev *sdev);
+void sof_fw_trace_suspend(struct snd_sof_dev *sdev, pm_message_t pm_state);
+int sof_fw_trace_resume(struct snd_sof_dev *sdev);
+
+/*
+ * DSP Architectures.
+ */
+static inline void sof_stack(struct snd_sof_dev *sdev, const char *level,
+ void *oops, u32 *stack, u32 stack_words)
+{
+ sof_dsp_arch_ops(sdev)->dsp_stack(sdev, level, oops, stack,
+ stack_words);
+}
+
+static inline void sof_oops(struct snd_sof_dev *sdev, const char *level, void *oops)
+{
+ if (sof_dsp_arch_ops(sdev)->dsp_oops)
+ sof_dsp_arch_ops(sdev)->dsp_oops(sdev, level, oops);
+}
+
+extern const struct dsp_arch_ops sof_xtensa_arch_ops;
+
+/*
+ * Firmware state tracking
+ */
+void sof_set_fw_state(struct snd_sof_dev *sdev, enum sof_fw_state new_state);
+
+/*
+ * Utilities
+ */
+void sof_io_write(struct snd_sof_dev *sdev, void __iomem *addr, u32 value);
+void sof_io_write64(struct snd_sof_dev *sdev, void __iomem *addr, u64 value);
+u32 sof_io_read(struct snd_sof_dev *sdev, void __iomem *addr);
+u64 sof_io_read64(struct snd_sof_dev *sdev, void __iomem *addr);
+void sof_mailbox_write(struct snd_sof_dev *sdev, u32 offset,
+ void *message, size_t bytes);
+void sof_mailbox_read(struct snd_sof_dev *sdev, u32 offset,
+ void *message, size_t bytes);
+int sof_block_write(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
+ u32 offset, void *src, size_t size);
+int sof_block_read(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
+ u32 offset, void *dest, size_t size);
+
+int sof_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ void *p, size_t sz);
+int sof_set_stream_data_offset(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ size_t posn_offset);
+
+int sof_stream_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int sof_stream_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+
+int sof_machine_check(struct snd_sof_dev *sdev);
+
+/* SOF client support */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_CLIENT)
+int sof_client_dev_register(struct snd_sof_dev *sdev, const char *name, u32 id,
+ const void *data, size_t size);
+void sof_client_dev_unregister(struct snd_sof_dev *sdev, const char *name, u32 id);
+int sof_register_clients(struct snd_sof_dev *sdev);
+void sof_unregister_clients(struct snd_sof_dev *sdev);
+void sof_client_ipc_rx_dispatcher(struct snd_sof_dev *sdev, void *msg_buf);
+void sof_client_fw_state_dispatcher(struct snd_sof_dev *sdev);
+int sof_suspend_clients(struct snd_sof_dev *sdev, pm_message_t state);
+int sof_resume_clients(struct snd_sof_dev *sdev);
+#else /* CONFIG_SND_SOC_SOF_CLIENT */
+static inline int sof_client_dev_register(struct snd_sof_dev *sdev, const char *name,
+ u32 id, const void *data, size_t size)
+{
+ return 0;
+}
+
+static inline void sof_client_dev_unregister(struct snd_sof_dev *sdev,
+ const char *name, u32 id)
+{
+}
+
+static inline int sof_register_clients(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline void sof_unregister_clients(struct snd_sof_dev *sdev)
+{
+}
+
+static inline void sof_client_ipc_rx_dispatcher(struct snd_sof_dev *sdev, void *msg_buf)
+{
+}
+
+static inline void sof_client_fw_state_dispatcher(struct snd_sof_dev *sdev)
+{
+}
+
+static inline int sof_suspend_clients(struct snd_sof_dev *sdev, pm_message_t state)
+{
+ return 0;
+}
+
+static inline int sof_resume_clients(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+#endif /* CONFIG_SND_SOC_SOF_CLIENT */
+
+/* Main ops for IPC implementations */
+extern const struct sof_ipc_ops ipc3_ops;
+extern const struct sof_ipc_ops ipc4_ops;
+
+#endif
diff --git a/sound/soc/sof/sof-utils.c b/sound/soc/sof/sof-utils.c
new file mode 100644
index 000000000..b6345a734
--- /dev/null
+++ b/sound/soc/sof/sof-utils.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
+//
+// Author: Keyon Jie <yang.jie@linux.intel.com>
+//
+
+#include <asm/unaligned.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <sound/memalloc.h>
+#include <linux/module.h>
+#include "sof-utils.h"
+
+/*
+ * Generic buffer page table creation.
+ * Take the each physical page address and drop the least significant unused
+ * bits from each (based on PAGE_SIZE). Then pack valid page address bits
+ * into compressed page table.
+ */
+
+int snd_sof_create_page_table(struct device *dev,
+ struct snd_dma_buffer *dmab,
+ unsigned char *page_table, size_t size)
+{
+ int i, pages;
+
+ pages = snd_sgbuf_aligned_pages(size);
+
+ dev_dbg(dev, "generating page table for %p size 0x%zx pages %d\n",
+ dmab->area, size, pages);
+
+ for (i = 0; i < pages; i++) {
+ /*
+ * The number of valid address bits for each page is 20.
+ * idx determines the byte position within page_table
+ * where the current page's address is stored
+ * in the compressed page_table.
+ * This can be calculated by multiplying the page number by 2.5.
+ */
+ u32 idx = (5 * i) >> 1;
+ u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT;
+ u8 *pg_table;
+
+ pg_table = (u8 *)(page_table + idx);
+
+ /*
+ * pagetable compression:
+ * byte 0 byte 1 byte 2 byte 3 byte 4 byte 5
+ * ___________pfn 0__________ __________pfn 1___________ _pfn 2...
+ * .... .... .... .... .... .... .... .... .... .... ....
+ * It is created by:
+ * 1. set current location to 0, PFN index i to 0
+ * 2. put pfn[i] at current location in Little Endian byte order
+ * 3. calculate an intermediate value as
+ * x = (pfn[i+1] << 4) | (pfn[i] & 0xf)
+ * 4. put x at offset (current location + 2) in LE byte order
+ * 5. increment current location by 5 bytes, increment i by 2
+ * 6. continue to (2)
+ */
+ if (i & 1)
+ put_unaligned_le32((pg_table[0] & 0xf) | pfn << 4,
+ pg_table);
+ else
+ put_unaligned_le32(pfn, pg_table);
+ }
+
+ return pages;
+}
+EXPORT_SYMBOL(snd_sof_create_page_table);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/sof-utils.h b/sound/soc/sof/sof-utils.h
new file mode 100644
index 000000000..6f9028938
--- /dev/null
+++ b/sound/soc/sof/sof-utils.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __SOC_SOF_UTILS_H
+#define __SOC_SOF_UTILS_H
+
+struct snd_dma_buffer;
+struct device;
+
+int snd_sof_create_page_table(struct device *dev,
+ struct snd_dma_buffer *dmab,
+ unsigned char *page_table, size_t size);
+
+#endif
diff --git a/sound/soc/sof/stream-ipc.c b/sound/soc/sof/stream-ipc.c
new file mode 100644
index 000000000..216b454f6
--- /dev/null
+++ b/sound/soc/sof/stream-ipc.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019 Intel Corporation. All rights reserved.
+//
+// Authors: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
+
+/* Generic SOF IPC code */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <sound/pcm.h>
+#include <sound/sof/stream.h>
+
+#include "ops.h"
+#include "sof-priv.h"
+#include "sof-audio.h"
+
+struct sof_stream {
+ size_t posn_offset;
+};
+
+/* Mailbox-based Generic IPC implementation */
+int sof_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ void *p, size_t sz)
+{
+ if (!sps || !sdev->stream_box.size) {
+ snd_sof_dsp_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
+ } else {
+ size_t posn_offset;
+
+ if (sps->substream) {
+ struct sof_stream *stream = sps->substream->runtime->private_data;
+
+ /* The stream might already be closed */
+ if (!stream)
+ return -ESTRPIPE;
+
+ posn_offset = stream->posn_offset;
+ } else {
+
+ struct sof_compr_stream *sstream = sps->cstream->runtime->private_data;
+
+ if (!sstream)
+ return -ESTRPIPE;
+
+ posn_offset = sstream->posn_offset;
+ }
+
+ snd_sof_dsp_mailbox_read(sdev, posn_offset, p, sz);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_ipc_msg_data);
+
+int sof_set_stream_data_offset(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm_stream *sps,
+ size_t posn_offset)
+{
+ /* check if offset is overflow or it is not aligned */
+ if (posn_offset > sdev->stream_box.size ||
+ posn_offset % sizeof(struct sof_ipc_stream_posn) != 0)
+ return -EINVAL;
+
+ posn_offset += sdev->stream_box.offset;
+
+ if (sps->substream) {
+ struct sof_stream *stream = sps->substream->runtime->private_data;
+
+ stream->posn_offset = posn_offset;
+ dev_dbg(sdev->dev, "pcm: stream dir %d, posn mailbox offset is %zu",
+ sps->substream->stream, posn_offset);
+ } else if (sps->cstream) {
+ struct sof_compr_stream *sstream = sps->cstream->runtime->private_data;
+
+ sstream->posn_offset = posn_offset;
+ dev_dbg(sdev->dev, "compr: stream dir %d, posn mailbox offset is %zu",
+ sps->cstream->direction, posn_offset);
+ } else {
+ dev_err(sdev->dev, "No stream opened");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_set_stream_data_offset);
+
+int sof_stream_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct sof_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+
+ if (!stream)
+ return -ENOMEM;
+
+ /* binding pcm substream to hda stream */
+ substream->runtime->private_data = stream;
+
+ /* align to DMA minimum transfer size */
+ snd_pcm_hw_constraint_step(substream->runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 4);
+
+ /* avoid circular buffer wrap in middle of period */
+ snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_stream_pcm_open);
+
+int sof_stream_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct sof_stream *stream = substream->runtime->private_data;
+
+ substream->runtime->private_data = NULL;
+ kfree(stream);
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_stream_pcm_close);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
new file mode 100644
index 000000000..7133ec133
--- /dev/null
+++ b/sound/soc/sof/topology.c
@@ -0,0 +1,2463 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/workqueue.h>
+#include <sound/tlv.h>
+#include <uapi/sound/sof/tokens.h>
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ops.h"
+
+#define COMP_ID_UNASSIGNED 0xffffffff
+/*
+ * Constants used in the computation of linear volume gain
+ * from dB gain 20th root of 10 in Q1.16 fixed-point notation
+ */
+#define VOL_TWENTIETH_ROOT_OF_TEN 73533
+/* 40th root of 10 in Q1.16 fixed-point notation*/
+#define VOL_FORTIETH_ROOT_OF_TEN 69419
+
+/* 0.5 dB step value in topology TLV */
+#define VOL_HALF_DB_STEP 50
+
+/* TLV data items */
+#define TLV_MIN 0
+#define TLV_STEP 1
+#define TLV_MUTE 2
+
+/**
+ * sof_update_ipc_object - Parse multiple sets of tokens within the token array associated with the
+ * token ID.
+ * @scomp: pointer to SOC component
+ * @object: target IPC struct to save the parsed values
+ * @token_id: token ID for the token array to be searched
+ * @tuples: pointer to the tuples array
+ * @num_tuples: number of tuples in the tuples array
+ * @object_size: size of the object
+ * @token_instance_num: number of times the same @token_id needs to be parsed i.e. the function
+ * looks for @token_instance_num of each token in the token array associated
+ * with the @token_id
+ */
+int sof_update_ipc_object(struct snd_soc_component *scomp, void *object, enum sof_tokens token_id,
+ struct snd_sof_tuple *tuples, int num_tuples,
+ size_t object_size, int token_instance_num)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ const struct sof_token_info *token_list;
+ const struct sof_topology_token *tokens;
+ int i, j;
+
+ token_list = tplg_ops ? tplg_ops->token_list : NULL;
+ /* nothing to do if token_list is NULL */
+ if (!token_list)
+ return 0;
+
+ if (token_list[token_id].count < 0) {
+ dev_err(scomp->dev, "Invalid token count for token ID: %d\n", token_id);
+ return -EINVAL;
+ }
+
+ /* No tokens to match */
+ if (!token_list[token_id].count)
+ return 0;
+
+ tokens = token_list[token_id].tokens;
+ if (!tokens) {
+ dev_err(scomp->dev, "Invalid tokens for token id: %d\n", token_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < token_list[token_id].count; i++) {
+ int offset = 0;
+ int num_tokens_matched = 0;
+
+ for (j = 0; j < num_tuples; j++) {
+ if (tokens[i].token == tuples[j].token) {
+ switch (tokens[i].type) {
+ case SND_SOC_TPLG_TUPLE_TYPE_WORD:
+ {
+ u32 *val = (u32 *)((u8 *)object + tokens[i].offset +
+ offset);
+
+ *val = tuples[j].value.v;
+ break;
+ }
+ case SND_SOC_TPLG_TUPLE_TYPE_SHORT:
+ case SND_SOC_TPLG_TUPLE_TYPE_BOOL:
+ {
+ u16 *val = (u16 *)((u8 *)object + tokens[i].offset +
+ offset);
+
+ *val = (u16)tuples[j].value.v;
+ break;
+ }
+ case SND_SOC_TPLG_TUPLE_TYPE_STRING:
+ {
+ if (!tokens[i].get_token) {
+ dev_err(scomp->dev,
+ "get_token not defined for token %d in %s\n",
+ tokens[i].token, token_list[token_id].name);
+ return -EINVAL;
+ }
+
+ tokens[i].get_token((void *)tuples[j].value.s, object,
+ tokens[i].offset + offset);
+ break;
+ }
+ default:
+ break;
+ }
+
+ num_tokens_matched++;
+
+ /* found all required sets of current token. Move to the next one */
+ if (!(num_tokens_matched % token_instance_num))
+ break;
+
+ /* move to the next object */
+ offset += object_size;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static inline int get_tlv_data(const int *p, int tlv[SOF_TLV_ITEMS])
+{
+ /* we only support dB scale TLV type at the moment */
+ if ((int)p[SNDRV_CTL_TLVO_TYPE] != SNDRV_CTL_TLVT_DB_SCALE)
+ return -EINVAL;
+
+ /* min value in topology tlv data is multiplied by 100 */
+ tlv[TLV_MIN] = (int)p[SNDRV_CTL_TLVO_DB_SCALE_MIN] / 100;
+
+ /* volume steps */
+ tlv[TLV_STEP] = (int)(p[SNDRV_CTL_TLVO_DB_SCALE_MUTE_AND_STEP] &
+ TLV_DB_SCALE_MASK);
+
+ /* mute ON/OFF */
+ if ((p[SNDRV_CTL_TLVO_DB_SCALE_MUTE_AND_STEP] &
+ TLV_DB_SCALE_MUTE) == 0)
+ tlv[TLV_MUTE] = 0;
+ else
+ tlv[TLV_MUTE] = 1;
+
+ return 0;
+}
+
+/*
+ * Function to truncate an unsigned 64-bit number
+ * by x bits and return 32-bit unsigned number. This
+ * function also takes care of rounding while truncating
+ */
+static inline u32 vol_shift_64(u64 i, u32 x)
+{
+ /* do not truncate more than 32 bits */
+ if (x > 32)
+ x = 32;
+
+ if (x == 0)
+ return (u32)i;
+
+ return (u32)(((i >> (x - 1)) + 1) >> 1);
+}
+
+/*
+ * Function to compute a ^ exp where,
+ * a is a fractional number represented by a fixed-point
+ * integer with a fractional world length of "fwl"
+ * exp is an integer
+ * fwl is the fractional word length
+ * Return value is a fractional number represented by a
+ * fixed-point integer with a fractional word length of "fwl"
+ */
+static u32 vol_pow32(u32 a, int exp, u32 fwl)
+{
+ int i, iter;
+ u32 power = 1 << fwl;
+ u64 numerator;
+
+ /* if exponent is 0, return 1 */
+ if (exp == 0)
+ return power;
+
+ /* determine the number of iterations based on the exponent */
+ if (exp < 0)
+ iter = exp * -1;
+ else
+ iter = exp;
+
+ /* mutiply a "iter" times to compute power */
+ for (i = 0; i < iter; i++) {
+ /*
+ * Product of 2 Qx.fwl fixed-point numbers yields a Q2*x.2*fwl
+ * Truncate product back to fwl fractional bits with rounding
+ */
+ power = vol_shift_64((u64)power * a, fwl);
+ }
+
+ if (exp > 0) {
+ /* if exp is positive, return the result */
+ return power;
+ }
+
+ /* if exp is negative, return the multiplicative inverse */
+ numerator = (u64)1 << (fwl << 1);
+ do_div(numerator, power);
+
+ return (u32)numerator;
+}
+
+/*
+ * Function to calculate volume gain from TLV data.
+ * This function can only handle gain steps that are multiples of 0.5 dB
+ */
+u32 vol_compute_gain(u32 value, int *tlv)
+{
+ int dB_gain;
+ u32 linear_gain;
+ int f_step;
+
+ /* mute volume */
+ if (value == 0 && tlv[TLV_MUTE])
+ return 0;
+
+ /*
+ * compute dB gain from tlv. tlv_step
+ * in topology is multiplied by 100
+ */
+ dB_gain = tlv[TLV_MIN] + (value * tlv[TLV_STEP]) / 100;
+
+ /*
+ * compute linear gain represented by fixed-point
+ * int with VOLUME_FWL fractional bits
+ */
+ linear_gain = vol_pow32(VOL_TWENTIETH_ROOT_OF_TEN, dB_gain, VOLUME_FWL);
+
+ /* extract the fractional part of volume step */
+ f_step = tlv[TLV_STEP] - (tlv[TLV_STEP] / 100);
+
+ /* if volume step is an odd multiple of 0.5 dB */
+ if (f_step == VOL_HALF_DB_STEP && (value & 1))
+ linear_gain = vol_shift_64((u64)linear_gain *
+ VOL_FORTIETH_ROOT_OF_TEN,
+ VOLUME_FWL);
+
+ return linear_gain;
+}
+
+/*
+ * Set up volume table for kcontrols from tlv data
+ * "size" specifies the number of entries in the table
+ */
+static int set_up_volume_table(struct snd_sof_control *scontrol,
+ int tlv[SOF_TLV_ITEMS], int size)
+{
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ if (tplg_ops && tplg_ops->control && tplg_ops->control->set_up_volume_table)
+ return tplg_ops->control->set_up_volume_table(scontrol, tlv, size);
+
+ dev_err(scomp->dev, "Mandatory op %s not set\n", __func__);
+ return -EINVAL;
+}
+
+struct sof_dai_types {
+ const char *name;
+ enum sof_ipc_dai_type type;
+};
+
+static const struct sof_dai_types sof_dais[] = {
+ {"SSP", SOF_DAI_INTEL_SSP},
+ {"HDA", SOF_DAI_INTEL_HDA},
+ {"DMIC", SOF_DAI_INTEL_DMIC},
+ {"ALH", SOF_DAI_INTEL_ALH},
+ {"SAI", SOF_DAI_IMX_SAI},
+ {"ESAI", SOF_DAI_IMX_ESAI},
+ {"ACP", SOF_DAI_AMD_BT},
+ {"ACPSP", SOF_DAI_AMD_SP},
+ {"ACPDMIC", SOF_DAI_AMD_DMIC},
+ {"ACPHS", SOF_DAI_AMD_HS},
+ {"AFE", SOF_DAI_MEDIATEK_AFE},
+ {"ACPSP_VIRTUAL", SOF_DAI_AMD_SP_VIRTUAL},
+ {"ACPHS_VIRTUAL", SOF_DAI_AMD_HS_VIRTUAL},
+
+};
+
+static enum sof_ipc_dai_type find_dai(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sof_dais); i++) {
+ if (strcmp(name, sof_dais[i].name) == 0)
+ return sof_dais[i].type;
+ }
+
+ return SOF_DAI_INTEL_NONE;
+}
+
+/*
+ * Supported Frame format types and lookup, add new ones to end of list.
+ */
+
+struct sof_frame_types {
+ const char *name;
+ enum sof_ipc_frame frame;
+};
+
+static const struct sof_frame_types sof_frames[] = {
+ {"s16le", SOF_IPC_FRAME_S16_LE},
+ {"s24le", SOF_IPC_FRAME_S24_4LE},
+ {"s32le", SOF_IPC_FRAME_S32_LE},
+ {"float", SOF_IPC_FRAME_FLOAT},
+};
+
+static enum sof_ipc_frame find_format(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sof_frames); i++) {
+ if (strcmp(name, sof_frames[i].name) == 0)
+ return sof_frames[i].frame;
+ }
+
+ /* use s32le if nothing is specified */
+ return SOF_IPC_FRAME_S32_LE;
+}
+
+int get_token_u32(void *elem, void *object, u32 offset)
+{
+ struct snd_soc_tplg_vendor_value_elem *velem = elem;
+ u32 *val = (u32 *)((u8 *)object + offset);
+
+ *val = le32_to_cpu(velem->value);
+ return 0;
+}
+
+int get_token_u16(void *elem, void *object, u32 offset)
+{
+ struct snd_soc_tplg_vendor_value_elem *velem = elem;
+ u16 *val = (u16 *)((u8 *)object + offset);
+
+ *val = (u16)le32_to_cpu(velem->value);
+ return 0;
+}
+
+int get_token_uuid(void *elem, void *object, u32 offset)
+{
+ struct snd_soc_tplg_vendor_uuid_elem *velem = elem;
+ u8 *dst = (u8 *)object + offset;
+
+ memcpy(dst, velem->uuid, UUID_SIZE);
+
+ return 0;
+}
+
+/*
+ * The string gets from topology will be stored in heap, the owner only
+ * holds a char* member point to the heap.
+ */
+int get_token_string(void *elem, void *object, u32 offset)
+{
+ /* "dst" here points to the char* member of the owner */
+ char **dst = (char **)((u8 *)object + offset);
+
+ *dst = kstrdup(elem, GFP_KERNEL);
+ if (!*dst)
+ return -ENOMEM;
+ return 0;
+};
+
+int get_token_comp_format(void *elem, void *object, u32 offset)
+{
+ u32 *val = (u32 *)((u8 *)object + offset);
+
+ *val = find_format((const char *)elem);
+ return 0;
+}
+
+int get_token_dai_type(void *elem, void *object, u32 offset)
+{
+ u32 *val = (u32 *)((u8 *)object + offset);
+
+ *val = find_dai((const char *)elem);
+ return 0;
+}
+
+/* PCM */
+static const struct sof_topology_token stream_tokens[] = {
+ {SOF_TKN_STREAM_PLAYBACK_COMPATIBLE_D0I3, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_pcm, stream[0].d0i3_compatible)},
+ {SOF_TKN_STREAM_CAPTURE_COMPATIBLE_D0I3, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_pcm, stream[1].d0i3_compatible)},
+};
+
+/* Leds */
+static const struct sof_topology_token led_tokens[] = {
+ {SOF_TKN_MUTE_LED_USE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct snd_sof_led_control, use_led)},
+ {SOF_TKN_MUTE_LED_DIRECTION, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct snd_sof_led_control, direction)},
+};
+
+static const struct sof_topology_token comp_pin_tokens[] = {
+ {SOF_TKN_COMP_NUM_INPUT_PINS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct snd_sof_widget, num_input_pins)},
+ {SOF_TKN_COMP_NUM_OUTPUT_PINS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct snd_sof_widget, num_output_pins)},
+};
+
+static const struct sof_topology_token comp_input_pin_binding_tokens[] = {
+ {SOF_TKN_COMP_INPUT_PIN_BINDING_WNAME, SND_SOC_TPLG_TUPLE_TYPE_STRING,
+ get_token_string, 0},
+};
+
+static const struct sof_topology_token comp_output_pin_binding_tokens[] = {
+ {SOF_TKN_COMP_OUTPUT_PIN_BINDING_WNAME, SND_SOC_TPLG_TUPLE_TYPE_STRING,
+ get_token_string, 0},
+};
+
+/**
+ * sof_parse_uuid_tokens - Parse multiple sets of UUID tokens
+ * @scomp: pointer to soc component
+ * @object: target ipc struct for parsed values
+ * @offset: offset within the object pointer
+ * @tokens: array of struct sof_topology_token containing the tokens to be matched
+ * @num_tokens: number of tokens in tokens array
+ * @array: source pointer to consecutive vendor arrays in topology
+ *
+ * This function parses multiple sets of string type tokens in vendor arrays
+ */
+static int sof_parse_uuid_tokens(struct snd_soc_component *scomp,
+ void *object, size_t offset,
+ const struct sof_topology_token *tokens, int num_tokens,
+ struct snd_soc_tplg_vendor_array *array)
+{
+ struct snd_soc_tplg_vendor_uuid_elem *elem;
+ int found = 0;
+ int i, j;
+
+ /* parse element by element */
+ for (i = 0; i < le32_to_cpu(array->num_elems); i++) {
+ elem = &array->uuid[i];
+
+ /* search for token */
+ for (j = 0; j < num_tokens; j++) {
+ /* match token type */
+ if (tokens[j].type != SND_SOC_TPLG_TUPLE_TYPE_UUID)
+ continue;
+
+ /* match token id */
+ if (tokens[j].token != le32_to_cpu(elem->token))
+ continue;
+
+ /* matched - now load token */
+ tokens[j].get_token(elem, object,
+ offset + tokens[j].offset);
+
+ found++;
+ }
+ }
+
+ return found;
+}
+
+/**
+ * sof_copy_tuples - Parse tokens and copy them to the @tuples array
+ * @sdev: pointer to struct snd_sof_dev
+ * @array: source pointer to consecutive vendor arrays in topology
+ * @array_size: size of @array
+ * @token_id: Token ID associated with a token array
+ * @token_instance_num: number of times the same @token_id needs to be parsed i.e. the function
+ * looks for @token_instance_num of each token in the token array associated
+ * with the @token_id
+ * @tuples: tuples array to copy the matched tuples to
+ * @tuples_size: size of @tuples
+ * @num_copied_tuples: pointer to the number of copied tuples in the tuples array
+ *
+ */
+static int sof_copy_tuples(struct snd_sof_dev *sdev, struct snd_soc_tplg_vendor_array *array,
+ int array_size, u32 token_id, int token_instance_num,
+ struct snd_sof_tuple *tuples, int tuples_size, int *num_copied_tuples)
+{
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ const struct sof_token_info *token_list;
+ const struct sof_topology_token *tokens;
+ int found = 0;
+ int num_tokens, asize;
+ int i, j;
+
+ token_list = tplg_ops ? tplg_ops->token_list : NULL;
+ /* nothing to do if token_list is NULL */
+ if (!token_list)
+ return 0;
+
+ if (!tuples || !num_copied_tuples) {
+ dev_err(sdev->dev, "Invalid tuples array\n");
+ return -EINVAL;
+ }
+
+ tokens = token_list[token_id].tokens;
+ num_tokens = token_list[token_id].count;
+
+ if (!tokens) {
+ dev_err(sdev->dev, "No token array defined for token ID: %d\n", token_id);
+ return -EINVAL;
+ }
+
+ /* check if there's space in the tuples array for new tokens */
+ if (*num_copied_tuples >= tuples_size) {
+ dev_err(sdev->dev, "No space in tuples array for new tokens from %s",
+ token_list[token_id].name);
+ return -EINVAL;
+ }
+
+ while (array_size > 0 && found < num_tokens * token_instance_num) {
+ asize = le32_to_cpu(array->size);
+
+ /* validate asize */
+ if (asize < 0) {
+ dev_err(sdev->dev, "Invalid array size 0x%x\n", asize);
+ return -EINVAL;
+ }
+
+ /* make sure there is enough data before parsing */
+ array_size -= asize;
+ if (array_size < 0) {
+ dev_err(sdev->dev, "Invalid array size 0x%x\n", asize);
+ return -EINVAL;
+ }
+
+ /* parse element by element */
+ for (i = 0; i < le32_to_cpu(array->num_elems); i++) {
+ /* search for token */
+ for (j = 0; j < num_tokens; j++) {
+ /* match token type */
+ if (!(tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_WORD ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BYTE ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BOOL ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_STRING))
+ continue;
+
+ if (tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_STRING) {
+ struct snd_soc_tplg_vendor_string_elem *elem;
+
+ elem = &array->string[i];
+
+ /* match token id */
+ if (tokens[j].token != le32_to_cpu(elem->token))
+ continue;
+
+ tuples[*num_copied_tuples].token = tokens[j].token;
+ tuples[*num_copied_tuples].value.s = elem->string;
+ } else {
+ struct snd_soc_tplg_vendor_value_elem *elem;
+
+ elem = &array->value[i];
+
+ /* match token id */
+ if (tokens[j].token != le32_to_cpu(elem->token))
+ continue;
+
+ tuples[*num_copied_tuples].token = tokens[j].token;
+ tuples[*num_copied_tuples].value.v =
+ le32_to_cpu(elem->value);
+ }
+ found++;
+ (*num_copied_tuples)++;
+
+ /* stop if there's no space for any more new tuples */
+ if (*num_copied_tuples == tuples_size)
+ return 0;
+ }
+
+ /* stop when we've found the required token instances */
+ if (found == num_tokens * token_instance_num)
+ return 0;
+ }
+
+ /* next array */
+ array = (struct snd_soc_tplg_vendor_array *)((u8 *)array + asize);
+ }
+
+ return 0;
+}
+
+/**
+ * sof_parse_string_tokens - Parse multiple sets of tokens
+ * @scomp: pointer to soc component
+ * @object: target ipc struct for parsed values
+ * @offset: offset within the object pointer
+ * @tokens: array of struct sof_topology_token containing the tokens to be matched
+ * @num_tokens: number of tokens in tokens array
+ * @array: source pointer to consecutive vendor arrays in topology
+ *
+ * This function parses multiple sets of string type tokens in vendor arrays
+ */
+static int sof_parse_string_tokens(struct snd_soc_component *scomp,
+ void *object, int offset,
+ const struct sof_topology_token *tokens, int num_tokens,
+ struct snd_soc_tplg_vendor_array *array)
+{
+ struct snd_soc_tplg_vendor_string_elem *elem;
+ int found = 0;
+ int i, j, ret;
+
+ /* parse element by element */
+ for (i = 0; i < le32_to_cpu(array->num_elems); i++) {
+ elem = &array->string[i];
+
+ /* search for token */
+ for (j = 0; j < num_tokens; j++) {
+ /* match token type */
+ if (tokens[j].type != SND_SOC_TPLG_TUPLE_TYPE_STRING)
+ continue;
+
+ /* match token id */
+ if (tokens[j].token != le32_to_cpu(elem->token))
+ continue;
+
+ /* matched - now load token */
+ ret = tokens[j].get_token(elem->string, object, offset + tokens[j].offset);
+ if (ret < 0)
+ return ret;
+
+ found++;
+ }
+ }
+
+ return found;
+}
+
+/**
+ * sof_parse_word_tokens - Parse multiple sets of tokens
+ * @scomp: pointer to soc component
+ * @object: target ipc struct for parsed values
+ * @offset: offset within the object pointer
+ * @tokens: array of struct sof_topology_token containing the tokens to be matched
+ * @num_tokens: number of tokens in tokens array
+ * @array: source pointer to consecutive vendor arrays in topology
+ *
+ * This function parses multiple sets of word type tokens in vendor arrays
+ */
+static int sof_parse_word_tokens(struct snd_soc_component *scomp,
+ void *object, int offset,
+ const struct sof_topology_token *tokens, int num_tokens,
+ struct snd_soc_tplg_vendor_array *array)
+{
+ struct snd_soc_tplg_vendor_value_elem *elem;
+ int found = 0;
+ int i, j;
+
+ /* parse element by element */
+ for (i = 0; i < le32_to_cpu(array->num_elems); i++) {
+ elem = &array->value[i];
+
+ /* search for token */
+ for (j = 0; j < num_tokens; j++) {
+ /* match token type */
+ if (!(tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_WORD ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BYTE ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BOOL))
+ continue;
+
+ /* match token id */
+ if (tokens[j].token != le32_to_cpu(elem->token))
+ continue;
+
+ /* load token */
+ tokens[j].get_token(elem, object, offset + tokens[j].offset);
+
+ found++;
+ }
+ }
+
+ return found;
+}
+
+/**
+ * sof_parse_token_sets - Parse multiple sets of tokens
+ * @scomp: pointer to soc component
+ * @object: target ipc struct for parsed values
+ * @tokens: token definition array describing what tokens to parse
+ * @count: number of tokens in definition array
+ * @array: source pointer to consecutive vendor arrays in topology
+ * @array_size: total size of @array
+ * @token_instance_num: number of times the same tokens needs to be parsed i.e. the function
+ * looks for @token_instance_num of each token in the @tokens
+ * @object_size: offset to next target ipc struct with multiple sets
+ *
+ * This function parses multiple sets of tokens in vendor arrays into
+ * consecutive ipc structs.
+ */
+static int sof_parse_token_sets(struct snd_soc_component *scomp,
+ void *object, const struct sof_topology_token *tokens,
+ int count, struct snd_soc_tplg_vendor_array *array,
+ int array_size, int token_instance_num, size_t object_size)
+{
+ size_t offset = 0;
+ int found = 0;
+ int total = 0;
+ int asize;
+ int ret;
+
+ while (array_size > 0 && total < count * token_instance_num) {
+ asize = le32_to_cpu(array->size);
+
+ /* validate asize */
+ if (asize < 0) { /* FIXME: A zero-size array makes no sense */
+ dev_err(scomp->dev, "error: invalid array size 0x%x\n",
+ asize);
+ return -EINVAL;
+ }
+
+ /* make sure there is enough data before parsing */
+ array_size -= asize;
+ if (array_size < 0) {
+ dev_err(scomp->dev, "error: invalid array size 0x%x\n",
+ asize);
+ return -EINVAL;
+ }
+
+ /* call correct parser depending on type */
+ switch (le32_to_cpu(array->type)) {
+ case SND_SOC_TPLG_TUPLE_TYPE_UUID:
+ found += sof_parse_uuid_tokens(scomp, object, offset, tokens, count,
+ array);
+ break;
+ case SND_SOC_TPLG_TUPLE_TYPE_STRING:
+
+ ret = sof_parse_string_tokens(scomp, object, offset, tokens, count,
+ array);
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: no memory to copy string token\n");
+ return ret;
+ }
+
+ found += ret;
+ break;
+ case SND_SOC_TPLG_TUPLE_TYPE_BOOL:
+ case SND_SOC_TPLG_TUPLE_TYPE_BYTE:
+ case SND_SOC_TPLG_TUPLE_TYPE_WORD:
+ case SND_SOC_TPLG_TUPLE_TYPE_SHORT:
+ found += sof_parse_word_tokens(scomp, object, offset, tokens, count,
+ array);
+ break;
+ default:
+ dev_err(scomp->dev, "error: unknown token type %d\n",
+ array->type);
+ return -EINVAL;
+ }
+
+ /* next array */
+ array = (struct snd_soc_tplg_vendor_array *)((u8 *)array
+ + asize);
+
+ /* move to next target struct */
+ if (found >= count) {
+ offset += object_size;
+ total += found;
+ found = 0;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * sof_parse_tokens - Parse one set of tokens
+ * @scomp: pointer to soc component
+ * @object: target ipc struct for parsed values
+ * @tokens: token definition array describing what tokens to parse
+ * @num_tokens: number of tokens in definition array
+ * @array: source pointer to consecutive vendor arrays in topology
+ * @array_size: total size of @array
+ *
+ * This function parses a single set of tokens in vendor arrays into
+ * consecutive ipc structs.
+ */
+static int sof_parse_tokens(struct snd_soc_component *scomp, void *object,
+ const struct sof_topology_token *tokens, int num_tokens,
+ struct snd_soc_tplg_vendor_array *array,
+ int array_size)
+
+{
+ /*
+ * sof_parse_tokens is used when topology contains only a single set of
+ * identical tuples arrays. So additional parameters to
+ * sof_parse_token_sets are sets = 1 (only 1 set) and
+ * object_size = 0 (irrelevant).
+ */
+ return sof_parse_token_sets(scomp, object, tokens, num_tokens, array,
+ array_size, 1, 0);
+}
+
+/*
+ * Standard Kcontrols.
+ */
+
+static int sof_control_load_volume(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_mixer_control *mc =
+ container_of(hdr, struct snd_soc_tplg_mixer_control, hdr);
+ int tlv[SOF_TLV_ITEMS];
+ unsigned int mask;
+ int ret;
+
+ /* validate topology data */
+ if (le32_to_cpu(mc->num_channels) > SND_SOC_TPLG_MAX_CHAN)
+ return -EINVAL;
+
+ /*
+ * If control has more than 2 channels we need to override the info. This is because even if
+ * ASoC layer has defined topology's max channel count to SND_SOC_TPLG_MAX_CHAN = 8, the
+ * pre-defined dapm control types (and related functions) creating the actual control
+ * restrict the channels only to mono or stereo.
+ */
+ if (le32_to_cpu(mc->num_channels) > 2)
+ kc->info = snd_sof_volume_info;
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->min_volume_step = le32_to_cpu(mc->min);
+ scontrol->max_volume_step = le32_to_cpu(mc->max);
+ scontrol->num_channels = le32_to_cpu(mc->num_channels);
+
+ scontrol->max = le32_to_cpu(mc->max);
+ if (le32_to_cpu(mc->max) == 1)
+ goto skip;
+
+ /* extract tlv data */
+ if (!kc->tlv.p || get_tlv_data(kc->tlv.p, tlv) < 0) {
+ dev_err(scomp->dev, "error: invalid TLV data\n");
+ return -EINVAL;
+ }
+
+ /* set up volume table */
+ ret = set_up_volume_table(scontrol, tlv, le32_to_cpu(mc->max) + 1);
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: setting up volume table\n");
+ return ret;
+ }
+
+skip:
+ /* set up possible led control from mixer private data */
+ ret = sof_parse_tokens(scomp, &scontrol->led_ctl, led_tokens,
+ ARRAY_SIZE(led_tokens), mc->priv.array,
+ le32_to_cpu(mc->priv.size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse led tokens failed %d\n",
+ le32_to_cpu(mc->priv.size));
+ goto err;
+ }
+
+ if (scontrol->led_ctl.use_led) {
+ mask = scontrol->led_ctl.direction ? SNDRV_CTL_ELEM_ACCESS_MIC_LED :
+ SNDRV_CTL_ELEM_ACCESS_SPK_LED;
+ scontrol->access &= ~SNDRV_CTL_ELEM_ACCESS_LED_MASK;
+ scontrol->access |= mask;
+ kc->access &= ~SNDRV_CTL_ELEM_ACCESS_LED_MASK;
+ kc->access |= mask;
+ sdev->led_present = true;
+ }
+
+ dev_dbg(scomp->dev, "tplg: load kcontrol index %d chans %d\n",
+ scontrol->comp_id, scontrol->num_channels);
+
+ return 0;
+
+err:
+ if (le32_to_cpu(mc->max) > 1)
+ kfree(scontrol->volume_table);
+
+ return ret;
+}
+
+static int sof_control_load_enum(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_enum_control *ec =
+ container_of(hdr, struct snd_soc_tplg_enum_control, hdr);
+
+ /* validate topology data */
+ if (le32_to_cpu(ec->num_channels) > SND_SOC_TPLG_MAX_CHAN)
+ return -EINVAL;
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->num_channels = le32_to_cpu(ec->num_channels);
+
+ dev_dbg(scomp->dev, "tplg: load kcontrol index %d chans %d comp_id %d\n",
+ scontrol->comp_id, scontrol->num_channels, scontrol->comp_id);
+
+ return 0;
+}
+
+static int sof_control_load_bytes(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_bytes_control *control =
+ container_of(hdr, struct snd_soc_tplg_bytes_control, hdr);
+ struct soc_bytes_ext *sbe = (struct soc_bytes_ext *)kc->private_value;
+ size_t priv_size = le32_to_cpu(control->priv.size);
+
+ scontrol->max_size = sbe->max;
+ scontrol->comp_id = sdev->next_comp_id;
+
+ dev_dbg(scomp->dev, "tplg: load kcontrol index %d\n", scontrol->comp_id);
+
+ /* copy the private data */
+ if (priv_size > 0) {
+ scontrol->priv = kmemdup(control->priv.data, priv_size, GFP_KERNEL);
+ if (!scontrol->priv)
+ return -ENOMEM;
+
+ scontrol->priv_size = priv_size;
+ }
+
+ return 0;
+}
+
+/* external kcontrol init - used for any driver specific init */
+static int sof_control_load(struct snd_soc_component *scomp, int index,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct soc_mixer_control *sm;
+ struct soc_bytes_ext *sbe;
+ struct soc_enum *se;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_dobj *dobj;
+ struct snd_sof_control *scontrol;
+ int ret;
+
+ dev_dbg(scomp->dev, "tplg: load control type %d name : %s\n",
+ hdr->type, hdr->name);
+
+ scontrol = kzalloc(sizeof(*scontrol), GFP_KERNEL);
+ if (!scontrol)
+ return -ENOMEM;
+
+ scontrol->name = kstrdup(hdr->name, GFP_KERNEL);
+ if (!scontrol->name) {
+ kfree(scontrol);
+ return -ENOMEM;
+ }
+
+ scontrol->scomp = scomp;
+ scontrol->access = kc->access;
+ scontrol->info_type = le32_to_cpu(hdr->ops.info);
+ scontrol->index = kc->index;
+
+ switch (le32_to_cpu(hdr->ops.info)) {
+ case SND_SOC_TPLG_CTL_VOLSW:
+ case SND_SOC_TPLG_CTL_VOLSW_SX:
+ case SND_SOC_TPLG_CTL_VOLSW_XR_SX:
+ sm = (struct soc_mixer_control *)kc->private_value;
+ dobj = &sm->dobj;
+ ret = sof_control_load_volume(scomp, scontrol, kc, hdr);
+ break;
+ case SND_SOC_TPLG_CTL_BYTES:
+ sbe = (struct soc_bytes_ext *)kc->private_value;
+ dobj = &sbe->dobj;
+ ret = sof_control_load_bytes(scomp, scontrol, kc, hdr);
+ break;
+ case SND_SOC_TPLG_CTL_ENUM:
+ case SND_SOC_TPLG_CTL_ENUM_VALUE:
+ se = (struct soc_enum *)kc->private_value;
+ dobj = &se->dobj;
+ ret = sof_control_load_enum(scomp, scontrol, kc, hdr);
+ break;
+ case SND_SOC_TPLG_CTL_RANGE:
+ case SND_SOC_TPLG_CTL_STROBE:
+ case SND_SOC_TPLG_DAPM_CTL_VOLSW:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
+ case SND_SOC_TPLG_DAPM_CTL_PIN:
+ default:
+ dev_warn(scomp->dev, "control type not supported %d:%d:%d\n",
+ hdr->ops.get, hdr->ops.put, hdr->ops.info);
+ kfree(scontrol->name);
+ kfree(scontrol);
+ return 0;
+ }
+
+ if (ret < 0) {
+ kfree(scontrol->name);
+ kfree(scontrol);
+ return ret;
+ }
+
+ scontrol->led_ctl.led_value = -1;
+
+ dobj->private = scontrol;
+ list_add(&scontrol->list, &sdev->kcontrol_list);
+ return 0;
+}
+
+static int sof_control_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_sof_control *scontrol = dobj->private;
+ int ret = 0;
+
+ dev_dbg(scomp->dev, "tplg: unload control name : %s\n", scontrol->name);
+
+ if (tplg_ops && tplg_ops->control_free) {
+ ret = tplg_ops->control_free(sdev, scontrol);
+ if (ret < 0)
+ dev_err(scomp->dev, "failed to free control: %s\n", scontrol->name);
+ }
+
+ /* free all data before returning in case of error too */
+ kfree(scontrol->ipc_control_data);
+ kfree(scontrol->priv);
+ kfree(scontrol->name);
+ list_del(&scontrol->list);
+ kfree(scontrol);
+
+ return ret;
+}
+
+/*
+ * DAI Topology
+ */
+
+static int sof_connect_dai_widget(struct snd_soc_component *scomp,
+ struct snd_soc_dapm_widget *w,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct snd_sof_dai *dai)
+{
+ struct snd_soc_card *card = scomp->card;
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_soc_dai *cpu_dai;
+ int stream;
+ int i;
+
+ if (!w->sname) {
+ dev_err(scomp->dev, "Widget %s does not have stream\n", w->name);
+ return -EINVAL;
+ }
+
+ if (w->id == snd_soc_dapm_dai_out)
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+ else if (w->id == snd_soc_dapm_dai_in)
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+ else
+ goto end;
+
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ /* does stream match DAI link ? */
+ if (!rtd->dai_link->stream_name ||
+ !strstr(rtd->dai_link->stream_name, w->sname))
+ continue;
+
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ /*
+ * Please create DAI widget in the right order
+ * to ensure BE will connect to the right DAI
+ * widget.
+ */
+ if (!snd_soc_dai_get_widget(cpu_dai, stream)) {
+ snd_soc_dai_set_widget(cpu_dai, stream, w);
+ break;
+ }
+ }
+ if (i == rtd->dai_link->num_cpus) {
+ dev_err(scomp->dev, "error: can't find BE for DAI %s\n", w->name);
+
+ return -EINVAL;
+ }
+
+ dai->name = rtd->dai_link->name;
+ dev_dbg(scomp->dev, "tplg: connected widget %s -> DAI link %s\n",
+ w->name, rtd->dai_link->name);
+ }
+end:
+ /* check we have a connection */
+ if (!dai->name) {
+ dev_err(scomp->dev, "error: can't connect DAI %s stream %s\n",
+ w->name, w->sname);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void sof_disconnect_dai_widget(struct snd_soc_component *scomp,
+ struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_card *card = scomp->card;
+ struct snd_soc_pcm_runtime *rtd;
+ const char *sname = w->sname;
+ struct snd_soc_dai *cpu_dai;
+ int i, stream;
+
+ if (!sname)
+ return;
+
+ if (w->id == snd_soc_dapm_dai_out)
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+ else if (w->id == snd_soc_dapm_dai_in)
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+ else
+ return;
+
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ /* does stream match DAI link ? */
+ if (!rtd->dai_link->stream_name ||
+ !strstr(rtd->dai_link->stream_name, sname))
+ continue;
+
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai)
+ if (snd_soc_dai_get_widget(cpu_dai, stream) == w) {
+ snd_soc_dai_set_widget(cpu_dai, stream, NULL);
+ break;
+ }
+ }
+}
+
+/* bind PCM ID to host component ID */
+static int spcm_bind(struct snd_soc_component *scomp, struct snd_sof_pcm *spcm,
+ int dir)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_widget *host_widget;
+
+ if (sdev->dspless_mode_selected)
+ return 0;
+
+ host_widget = snd_sof_find_swidget_sname(scomp,
+ spcm->pcm.caps[dir].name,
+ dir);
+ if (!host_widget) {
+ dev_err(scomp->dev, "can't find host comp to bind pcm\n");
+ return -EINVAL;
+ }
+
+ spcm->stream[dir].comp_id = host_widget->comp_id;
+
+ return 0;
+}
+
+static int sof_get_token_value(u32 token_id, struct snd_sof_tuple *tuples, int num_tuples)
+{
+ int i;
+
+ if (!tuples)
+ return -EINVAL;
+
+ for (i = 0; i < num_tuples; i++) {
+ if (tuples[i].token == token_id)
+ return tuples[i].value.v;
+ }
+
+ return -EINVAL;
+}
+
+static int sof_widget_parse_tokens(struct snd_soc_component *scomp, struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ enum sof_tokens *object_token_list, int count)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ const struct sof_token_info *token_list;
+ int num_tuples = 0;
+ int ret, i;
+
+ token_list = tplg_ops ? tplg_ops->token_list : NULL;
+ /* nothing to do if token_list is NULL */
+ if (!token_list)
+ return 0;
+
+ if (count > 0 && !object_token_list) {
+ dev_err(scomp->dev, "No token list for widget %s\n", swidget->widget->name);
+ return -EINVAL;
+ }
+
+ /* calculate max size of tuples array */
+ for (i = 0; i < count; i++)
+ num_tuples += token_list[object_token_list[i]].count;
+
+ /* allocate memory for tuples array */
+ swidget->tuples = kcalloc(num_tuples, sizeof(*swidget->tuples), GFP_KERNEL);
+ if (!swidget->tuples)
+ return -ENOMEM;
+
+ /* parse token list for widget */
+ for (i = 0; i < count; i++) {
+ int num_sets = 1;
+
+ if (object_token_list[i] >= SOF_TOKEN_COUNT) {
+ dev_err(scomp->dev, "Invalid token id %d for widget %s\n",
+ object_token_list[i], swidget->widget->name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (object_token_list[i]) {
+ case SOF_COMP_EXT_TOKENS:
+ /* parse and save UUID in swidget */
+ ret = sof_parse_tokens(scomp, swidget,
+ token_list[object_token_list[i]].tokens,
+ token_list[object_token_list[i]].count,
+ private->array, le32_to_cpu(private->size));
+ if (ret < 0) {
+ dev_err(scomp->dev, "Failed parsing %s for widget %s\n",
+ token_list[object_token_list[i]].name,
+ swidget->widget->name);
+ goto err;
+ }
+
+ continue;
+ case SOF_IN_AUDIO_FORMAT_TOKENS:
+ num_sets = sof_get_token_value(SOF_TKN_COMP_NUM_INPUT_AUDIO_FORMATS,
+ swidget->tuples, swidget->num_tuples);
+ if (num_sets < 0) {
+ dev_err(sdev->dev, "Invalid input audio format count for %s\n",
+ swidget->widget->name);
+ ret = num_sets;
+ goto err;
+ }
+ break;
+ case SOF_OUT_AUDIO_FORMAT_TOKENS:
+ num_sets = sof_get_token_value(SOF_TKN_COMP_NUM_OUTPUT_AUDIO_FORMATS,
+ swidget->tuples, swidget->num_tuples);
+ if (num_sets < 0) {
+ dev_err(sdev->dev, "Invalid output audio format count for %s\n",
+ swidget->widget->name);
+ ret = num_sets;
+ goto err;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (num_sets > 1) {
+ struct snd_sof_tuple *new_tuples;
+
+ num_tuples += token_list[object_token_list[i]].count * (num_sets - 1);
+ new_tuples = krealloc(swidget->tuples,
+ sizeof(*new_tuples) * num_tuples, GFP_KERNEL);
+ if (!new_tuples) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ swidget->tuples = new_tuples;
+ }
+
+ /* copy one set of tuples per token ID into swidget->tuples */
+ ret = sof_copy_tuples(sdev, private->array, le32_to_cpu(private->size),
+ object_token_list[i], num_sets, swidget->tuples,
+ num_tuples, &swidget->num_tuples);
+ if (ret < 0) {
+ dev_err(scomp->dev, "Failed parsing %s for widget %s err: %d\n",
+ token_list[object_token_list[i]].name, swidget->widget->name, ret);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ kfree(swidget->tuples);
+ return ret;
+}
+
+static void sof_free_pin_binding(struct snd_sof_widget *swidget,
+ bool pin_type)
+{
+ char **pin_binding;
+ u32 num_pins;
+ int i;
+
+ if (pin_type == SOF_PIN_TYPE_INPUT) {
+ pin_binding = swidget->input_pin_binding;
+ num_pins = swidget->num_input_pins;
+ } else {
+ pin_binding = swidget->output_pin_binding;
+ num_pins = swidget->num_output_pins;
+ }
+
+ if (pin_binding) {
+ for (i = 0; i < num_pins; i++)
+ kfree(pin_binding[i]);
+ }
+
+ kfree(pin_binding);
+}
+
+static int sof_parse_pin_binding(struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_private *priv, bool pin_type)
+{
+ const struct sof_topology_token *pin_binding_token;
+ char *pin_binding[SOF_WIDGET_MAX_NUM_PINS];
+ int token_count;
+ u32 num_pins;
+ char **pb;
+ int ret;
+ int i;
+
+ if (pin_type == SOF_PIN_TYPE_INPUT) {
+ num_pins = swidget->num_input_pins;
+ pin_binding_token = comp_input_pin_binding_tokens;
+ token_count = ARRAY_SIZE(comp_input_pin_binding_tokens);
+ } else {
+ num_pins = swidget->num_output_pins;
+ pin_binding_token = comp_output_pin_binding_tokens;
+ token_count = ARRAY_SIZE(comp_output_pin_binding_tokens);
+ }
+
+ memset(pin_binding, 0, SOF_WIDGET_MAX_NUM_PINS * sizeof(char *));
+ ret = sof_parse_token_sets(swidget->scomp, pin_binding, pin_binding_token,
+ token_count, priv->array, le32_to_cpu(priv->size),
+ num_pins, sizeof(char *));
+ if (ret < 0)
+ goto err;
+
+ /* copy pin binding array to swidget only if it is defined in topology */
+ if (pin_binding[0]) {
+ pb = kmemdup(pin_binding, num_pins * sizeof(char *), GFP_KERNEL);
+ if (!pb) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ if (pin_type == SOF_PIN_TYPE_INPUT)
+ swidget->input_pin_binding = pb;
+ else
+ swidget->output_pin_binding = pb;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < num_pins; i++)
+ kfree(pin_binding[i]);
+
+ return ret;
+}
+
+static int get_w_no_wname_in_long_name(void *elem, void *object, u32 offset)
+{
+ struct snd_soc_tplg_vendor_value_elem *velem = elem;
+ struct snd_soc_dapm_widget *w = object;
+
+ w->no_wname_in_kcontrol_name = !!le32_to_cpu(velem->value);
+ return 0;
+}
+
+static const struct sof_topology_token dapm_widget_tokens[] = {
+ {SOF_TKN_COMP_NO_WNAME_IN_KCONTROL_NAME, SND_SOC_TPLG_TUPLE_TYPE_BOOL,
+ get_w_no_wname_in_long_name, 0}
+};
+
+/* external widget init - used for any driver specific init */
+static int sof_widget_ready(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dapm_widget *w,
+ struct snd_soc_tplg_dapm_widget *tw)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ const struct sof_ipc_tplg_widget_ops *widget_ops;
+ struct snd_soc_tplg_private *priv = &tw->priv;
+ enum sof_tokens *token_list = NULL;
+ struct snd_sof_widget *swidget;
+ struct snd_sof_dai *dai;
+ int token_list_size = 0;
+ int ret = 0;
+
+ swidget = kzalloc(sizeof(*swidget), GFP_KERNEL);
+ if (!swidget)
+ return -ENOMEM;
+
+ swidget->scomp = scomp;
+ swidget->widget = w;
+ swidget->comp_id = sdev->next_comp_id++;
+ swidget->id = w->id;
+ swidget->pipeline_id = index;
+ swidget->private = NULL;
+ mutex_init(&swidget->setup_mutex);
+
+ ida_init(&swidget->output_queue_ida);
+ ida_init(&swidget->input_queue_ida);
+
+ ret = sof_parse_tokens(scomp, w, dapm_widget_tokens, ARRAY_SIZE(dapm_widget_tokens),
+ priv->array, le32_to_cpu(priv->size));
+ if (ret < 0) {
+ dev_err(scomp->dev, "failed to parse dapm widget tokens for %s\n",
+ w->name);
+ goto widget_free;
+ }
+
+ ret = sof_parse_tokens(scomp, swidget, comp_pin_tokens,
+ ARRAY_SIZE(comp_pin_tokens), priv->array,
+ le32_to_cpu(priv->size));
+ if (ret < 0) {
+ dev_err(scomp->dev, "failed to parse component pin tokens for %s\n",
+ w->name);
+ goto widget_free;
+ }
+
+ if (swidget->num_input_pins > SOF_WIDGET_MAX_NUM_PINS ||
+ swidget->num_output_pins > SOF_WIDGET_MAX_NUM_PINS) {
+ dev_err(scomp->dev, "invalid pins for %s: [input: %d, output: %d]\n",
+ swidget->widget->name, swidget->num_input_pins, swidget->num_output_pins);
+ ret = -EINVAL;
+ goto widget_free;
+ }
+
+ if (swidget->num_input_pins > 1) {
+ ret = sof_parse_pin_binding(swidget, priv, SOF_PIN_TYPE_INPUT);
+ /* on parsing error, pin binding is not allocated, nothing to free. */
+ if (ret < 0) {
+ dev_err(scomp->dev, "failed to parse input pin binding for %s\n",
+ w->name);
+ goto widget_free;
+ }
+ }
+
+ if (swidget->num_output_pins > 1) {
+ ret = sof_parse_pin_binding(swidget, priv, SOF_PIN_TYPE_OUTPUT);
+ /* on parsing error, pin binding is not allocated, nothing to free. */
+ if (ret < 0) {
+ dev_err(scomp->dev, "failed to parse output pin binding for %s\n",
+ w->name);
+ goto widget_free;
+ }
+ }
+
+ dev_dbg(scomp->dev,
+ "tplg: widget %d (%s) is ready [type: %d, pipe: %d, pins: %d / %d, stream: %s]\n",
+ swidget->comp_id, w->name, swidget->id, index,
+ swidget->num_input_pins, swidget->num_output_pins,
+ strnlen(w->sname, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) > 0 ? w->sname : "none");
+
+ widget_ops = tplg_ops ? tplg_ops->widget : NULL;
+ if (widget_ops) {
+ token_list = widget_ops[w->id].token_list;
+ token_list_size = widget_ops[w->id].token_list_size;
+ }
+
+ /* handle any special case widgets */
+ switch (w->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ dai = kzalloc(sizeof(*dai), GFP_KERNEL);
+ if (!dai) {
+ ret = -ENOMEM;
+ goto widget_free;
+ }
+
+ ret = sof_widget_parse_tokens(scomp, swidget, tw, token_list, token_list_size);
+ if (!ret)
+ ret = sof_connect_dai_widget(scomp, w, tw, dai);
+ if (ret < 0) {
+ kfree(dai);
+ break;
+ }
+ list_add(&dai->list, &sdev->dai_list);
+ swidget->private = dai;
+ break;
+ case snd_soc_dapm_effect:
+ /* check we have some tokens - we need at least process type */
+ if (le32_to_cpu(tw->priv.size) == 0) {
+ dev_err(scomp->dev, "error: process tokens not found\n");
+ ret = -EINVAL;
+ break;
+ }
+ ret = sof_widget_parse_tokens(scomp, swidget, tw, token_list, token_list_size);
+ break;
+ case snd_soc_dapm_pga:
+ if (!le32_to_cpu(tw->num_kcontrols)) {
+ dev_err(scomp->dev, "invalid kcontrol count %d for volume\n",
+ tw->num_kcontrols);
+ ret = -EINVAL;
+ break;
+ }
+
+ fallthrough;
+ case snd_soc_dapm_mixer:
+ case snd_soc_dapm_buffer:
+ case snd_soc_dapm_scheduler:
+ case snd_soc_dapm_aif_out:
+ case snd_soc_dapm_aif_in:
+ case snd_soc_dapm_src:
+ case snd_soc_dapm_asrc:
+ case snd_soc_dapm_siggen:
+ case snd_soc_dapm_mux:
+ case snd_soc_dapm_demux:
+ ret = sof_widget_parse_tokens(scomp, swidget, tw, token_list, token_list_size);
+ break;
+ case snd_soc_dapm_switch:
+ case snd_soc_dapm_dai_link:
+ case snd_soc_dapm_kcontrol:
+ default:
+ dev_dbg(scomp->dev, "widget type %d name %s not handled\n", swidget->id, tw->name);
+ break;
+ }
+
+ /* check token parsing reply */
+ if (ret < 0) {
+ dev_err(scomp->dev,
+ "error: failed to add widget id %d type %d name : %s stream %s\n",
+ tw->shift, swidget->id, tw->name,
+ strnlen(tw->sname, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) > 0
+ ? tw->sname : "none");
+ goto widget_free;
+ }
+
+ if (sof_debug_check_flag(SOF_DBG_DISABLE_MULTICORE)) {
+ swidget->core = SOF_DSP_PRIMARY_CORE;
+ } else {
+ int core = sof_get_token_value(SOF_TKN_COMP_CORE_ID, swidget->tuples,
+ swidget->num_tuples);
+
+ if (core >= 0)
+ swidget->core = core;
+ }
+
+ /* bind widget to external event */
+ if (tw->event_type) {
+ if (widget_ops && widget_ops[w->id].bind_event) {
+ ret = widget_ops[w->id].bind_event(scomp, swidget,
+ le16_to_cpu(tw->event_type));
+ if (ret) {
+ dev_err(scomp->dev, "widget event binding failed for %s\n",
+ swidget->widget->name);
+ goto free;
+ }
+ }
+ }
+
+ /* create and add pipeline for scheduler type widgets */
+ if (w->id == snd_soc_dapm_scheduler) {
+ struct snd_sof_pipeline *spipe;
+
+ spipe = kzalloc(sizeof(*spipe), GFP_KERNEL);
+ if (!spipe) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ spipe->pipe_widget = swidget;
+ swidget->spipe = spipe;
+ list_add(&spipe->list, &sdev->pipeline_list);
+ }
+
+ w->dobj.private = swidget;
+ list_add(&swidget->list, &sdev->widget_list);
+ return ret;
+free:
+ kfree(swidget->private);
+ kfree(swidget->tuples);
+widget_free:
+ kfree(swidget);
+ return ret;
+}
+
+static int sof_route_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_route *sroute;
+
+ sroute = dobj->private;
+ if (!sroute)
+ return 0;
+
+ /* free sroute and its private data */
+ kfree(sroute->private);
+ list_del(&sroute->list);
+ kfree(sroute);
+
+ return 0;
+}
+
+static int sof_widget_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ const struct sof_ipc_tplg_widget_ops *widget_ops;
+ const struct snd_kcontrol_new *kc;
+ struct snd_soc_dapm_widget *widget;
+ struct snd_sof_control *scontrol;
+ struct snd_sof_widget *swidget;
+ struct soc_mixer_control *sm;
+ struct soc_bytes_ext *sbe;
+ struct snd_sof_dai *dai;
+ struct soc_enum *se;
+ int i;
+
+ swidget = dobj->private;
+ if (!swidget)
+ return 0;
+
+ widget = swidget->widget;
+
+ switch (swidget->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ dai = swidget->private;
+
+ if (dai)
+ list_del(&dai->list);
+
+ sof_disconnect_dai_widget(scomp, widget);
+
+ break;
+ case snd_soc_dapm_scheduler:
+ {
+ struct snd_sof_pipeline *spipe = swidget->spipe;
+
+ list_del(&spipe->list);
+ kfree(spipe);
+ swidget->spipe = NULL;
+ break;
+ }
+ default:
+ break;
+ }
+ for (i = 0; i < widget->num_kcontrols; i++) {
+ kc = &widget->kcontrol_news[i];
+ switch (widget->dobj.widget.kcontrol_type[i]) {
+ case SND_SOC_TPLG_TYPE_MIXER:
+ sm = (struct soc_mixer_control *)kc->private_value;
+ scontrol = sm->dobj.private;
+ if (sm->max > 1)
+ kfree(scontrol->volume_table);
+ break;
+ case SND_SOC_TPLG_TYPE_ENUM:
+ se = (struct soc_enum *)kc->private_value;
+ scontrol = se->dobj.private;
+ break;
+ case SND_SOC_TPLG_TYPE_BYTES:
+ sbe = (struct soc_bytes_ext *)kc->private_value;
+ scontrol = sbe->dobj.private;
+ break;
+ default:
+ dev_warn(scomp->dev, "unsupported kcontrol_type\n");
+ goto out;
+ }
+ kfree(scontrol->ipc_control_data);
+ list_del(&scontrol->list);
+ kfree(scontrol->name);
+ kfree(scontrol);
+ }
+
+out:
+ /* free IPC related data */
+ widget_ops = tplg_ops ? tplg_ops->widget : NULL;
+ if (widget_ops && widget_ops[swidget->id].ipc_free)
+ widget_ops[swidget->id].ipc_free(swidget);
+
+ ida_destroy(&swidget->output_queue_ida);
+ ida_destroy(&swidget->input_queue_ida);
+
+ sof_free_pin_binding(swidget, SOF_PIN_TYPE_INPUT);
+ sof_free_pin_binding(swidget, SOF_PIN_TYPE_OUTPUT);
+
+ kfree(swidget->tuples);
+
+ /* remove and free swidget object */
+ list_del(&swidget->list);
+ kfree(swidget);
+
+ return 0;
+}
+
+/*
+ * DAI HW configuration.
+ */
+
+/* FE DAI - used for any driver specific init */
+static int sof_dai_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_driver *dai_drv,
+ struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_pcm_ops *ipc_pcm_ops = sof_ipc_get_ops(sdev, pcm);
+ struct snd_soc_tplg_stream_caps *caps;
+ struct snd_soc_tplg_private *private = &pcm->priv;
+ struct snd_sof_pcm *spcm;
+ int stream;
+ int ret;
+
+ /* nothing to do for BEs atm */
+ if (!pcm)
+ return 0;
+
+ spcm = kzalloc(sizeof(*spcm), GFP_KERNEL);
+ if (!spcm)
+ return -ENOMEM;
+
+ spcm->scomp = scomp;
+
+ for_each_pcm_streams(stream) {
+ spcm->stream[stream].comp_id = COMP_ID_UNASSIGNED;
+ if (pcm->compress)
+ snd_sof_compr_init_elapsed_work(&spcm->stream[stream].period_elapsed_work);
+ else
+ snd_sof_pcm_init_elapsed_work(&spcm->stream[stream].period_elapsed_work);
+ }
+
+ spcm->pcm = *pcm;
+ dev_dbg(scomp->dev, "tplg: load pcm %s\n", pcm->dai_name);
+
+ /* perform pcm set op */
+ if (ipc_pcm_ops && ipc_pcm_ops->pcm_setup) {
+ ret = ipc_pcm_ops->pcm_setup(sdev, spcm);
+ if (ret < 0) {
+ kfree(spcm);
+ return ret;
+ }
+ }
+
+ dai_drv->dobj.private = spcm;
+ list_add(&spcm->list, &sdev->pcm_list);
+
+ ret = sof_parse_tokens(scomp, spcm, stream_tokens,
+ ARRAY_SIZE(stream_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret) {
+ dev_err(scomp->dev, "error: parse stream tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ /* do we need to allocate playback PCM DMA pages */
+ if (!spcm->pcm.playback)
+ goto capture;
+
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+
+ caps = &spcm->pcm.caps[stream];
+
+ /* allocate playback page table buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
+ PAGE_SIZE, &spcm->stream[stream].page_table);
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: can't alloc page table for %s %d\n",
+ caps->name, ret);
+
+ return ret;
+ }
+
+ /* bind pcm to host comp */
+ ret = spcm_bind(scomp, spcm, stream);
+ if (ret) {
+ dev_err(scomp->dev,
+ "error: can't bind pcm to host\n");
+ goto free_playback_tables;
+ }
+
+capture:
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+
+ /* do we need to allocate capture PCM DMA pages */
+ if (!spcm->pcm.capture)
+ return ret;
+
+ caps = &spcm->pcm.caps[stream];
+
+ /* allocate capture page table buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
+ PAGE_SIZE, &spcm->stream[stream].page_table);
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: can't alloc page table for %s %d\n",
+ caps->name, ret);
+ goto free_playback_tables;
+ }
+
+ /* bind pcm to host comp */
+ ret = spcm_bind(scomp, spcm, stream);
+ if (ret) {
+ dev_err(scomp->dev,
+ "error: can't bind pcm to host\n");
+ snd_dma_free_pages(&spcm->stream[stream].page_table);
+ goto free_playback_tables;
+ }
+
+ return ret;
+
+free_playback_tables:
+ if (spcm->pcm.playback)
+ snd_dma_free_pages(&spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].page_table);
+
+ return ret;
+}
+
+static int sof_dai_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_pcm_ops *ipc_pcm_ops = sof_ipc_get_ops(sdev, pcm);
+ struct snd_sof_pcm *spcm = dobj->private;
+
+ /* free PCM DMA pages */
+ if (spcm->pcm.playback)
+ snd_dma_free_pages(&spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].page_table);
+
+ if (spcm->pcm.capture)
+ snd_dma_free_pages(&spcm->stream[SNDRV_PCM_STREAM_CAPTURE].page_table);
+
+ /* perform pcm free op */
+ if (ipc_pcm_ops && ipc_pcm_ops->pcm_free)
+ ipc_pcm_ops->pcm_free(sdev, spcm);
+
+ /* remove from list and free spcm */
+ list_del(&spcm->list);
+ kfree(spcm);
+
+ return 0;
+}
+
+static const struct sof_topology_token common_dai_link_tokens[] = {
+ {SOF_TKN_DAI_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_dai_type,
+ offsetof(struct snd_sof_dai_link, type)},
+};
+
+/* DAI link - used for any driver specific init */
+static int sof_link_load(struct snd_soc_component *scomp, int index, struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ const struct sof_token_info *token_list;
+ struct snd_sof_dai_link *slink;
+ u32 token_id = 0;
+ int num_tuples = 0;
+ int ret, num_sets;
+
+ if (!link->platforms) {
+ dev_err(scomp->dev, "error: no platforms\n");
+ return -EINVAL;
+ }
+ link->platforms->name = dev_name(scomp->dev);
+
+ if (tplg_ops && tplg_ops->link_setup) {
+ ret = tplg_ops->link_setup(sdev, link);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Set nonatomic property for FE dai links as their trigger action involves IPC's */
+ if (!link->no_pcm) {
+ link->nonatomic = true;
+ return 0;
+ }
+
+ /* check we have some tokens - we need at least DAI type */
+ if (le32_to_cpu(private->size) == 0) {
+ dev_err(scomp->dev, "error: expected tokens for DAI, none found\n");
+ return -EINVAL;
+ }
+
+ slink = kzalloc(sizeof(*slink), GFP_KERNEL);
+ if (!slink)
+ return -ENOMEM;
+
+ slink->num_hw_configs = le32_to_cpu(cfg->num_hw_configs);
+ slink->hw_configs = kmemdup(cfg->hw_config,
+ sizeof(*slink->hw_configs) * slink->num_hw_configs,
+ GFP_KERNEL);
+ if (!slink->hw_configs) {
+ kfree(slink);
+ return -ENOMEM;
+ }
+
+ slink->default_hw_cfg_id = le32_to_cpu(cfg->default_hw_config_id);
+ slink->link = link;
+
+ dev_dbg(scomp->dev, "tplg: %d hw_configs found, default id: %d for dai link %s!\n",
+ slink->num_hw_configs, slink->default_hw_cfg_id, link->name);
+
+ ret = sof_parse_tokens(scomp, slink, common_dai_link_tokens,
+ ARRAY_SIZE(common_dai_link_tokens),
+ private->array, le32_to_cpu(private->size));
+ if (ret < 0) {
+ dev_err(scomp->dev, "Failed tp parse common DAI link tokens\n");
+ kfree(slink->hw_configs);
+ kfree(slink);
+ return ret;
+ }
+
+ token_list = tplg_ops ? tplg_ops->token_list : NULL;
+ if (!token_list)
+ goto out;
+
+ /* calculate size of tuples array */
+ num_tuples += token_list[SOF_DAI_LINK_TOKENS].count;
+ num_sets = slink->num_hw_configs;
+ switch (slink->type) {
+ case SOF_DAI_INTEL_SSP:
+ token_id = SOF_SSP_TOKENS;
+ num_tuples += token_list[SOF_SSP_TOKENS].count * slink->num_hw_configs;
+ break;
+ case SOF_DAI_INTEL_DMIC:
+ token_id = SOF_DMIC_TOKENS;
+ num_tuples += token_list[SOF_DMIC_TOKENS].count;
+
+ /* Allocate memory for max PDM controllers */
+ num_tuples += token_list[SOF_DMIC_PDM_TOKENS].count * SOF_DAI_INTEL_DMIC_NUM_CTRL;
+ break;
+ case SOF_DAI_INTEL_HDA:
+ token_id = SOF_HDA_TOKENS;
+ num_tuples += token_list[SOF_HDA_TOKENS].count;
+ break;
+ case SOF_DAI_INTEL_ALH:
+ token_id = SOF_ALH_TOKENS;
+ num_tuples += token_list[SOF_ALH_TOKENS].count;
+ break;
+ case SOF_DAI_IMX_SAI:
+ token_id = SOF_SAI_TOKENS;
+ num_tuples += token_list[SOF_SAI_TOKENS].count;
+ break;
+ case SOF_DAI_IMX_ESAI:
+ token_id = SOF_ESAI_TOKENS;
+ num_tuples += token_list[SOF_ESAI_TOKENS].count;
+ break;
+ case SOF_DAI_MEDIATEK_AFE:
+ token_id = SOF_AFE_TOKENS;
+ num_tuples += token_list[SOF_AFE_TOKENS].count;
+ break;
+ case SOF_DAI_AMD_DMIC:
+ token_id = SOF_ACPDMIC_TOKENS;
+ num_tuples += token_list[SOF_ACPDMIC_TOKENS].count;
+ break;
+ case SOF_DAI_AMD_SP:
+ case SOF_DAI_AMD_HS:
+ case SOF_DAI_AMD_SP_VIRTUAL:
+ case SOF_DAI_AMD_HS_VIRTUAL:
+ token_id = SOF_ACPI2S_TOKENS;
+ num_tuples += token_list[SOF_ACPI2S_TOKENS].count;
+ break;
+ default:
+ break;
+ }
+
+ /* allocate memory for tuples array */
+ slink->tuples = kcalloc(num_tuples, sizeof(*slink->tuples), GFP_KERNEL);
+ if (!slink->tuples) {
+ kfree(slink->hw_configs);
+ kfree(slink);
+ return -ENOMEM;
+ }
+
+ if (token_list[SOF_DAI_LINK_TOKENS].tokens) {
+ /* parse one set of DAI link tokens */
+ ret = sof_copy_tuples(sdev, private->array, le32_to_cpu(private->size),
+ SOF_DAI_LINK_TOKENS, 1, slink->tuples,
+ num_tuples, &slink->num_tuples);
+ if (ret < 0) {
+ dev_err(scomp->dev, "failed to parse %s for dai link %s\n",
+ token_list[SOF_DAI_LINK_TOKENS].name, link->name);
+ goto err;
+ }
+ }
+
+ /* nothing more to do if there are no DAI type-specific tokens defined */
+ if (!token_id || !token_list[token_id].tokens)
+ goto out;
+
+ /* parse "num_sets" sets of DAI-specific tokens */
+ ret = sof_copy_tuples(sdev, private->array, le32_to_cpu(private->size),
+ token_id, num_sets, slink->tuples, num_tuples, &slink->num_tuples);
+ if (ret < 0) {
+ dev_err(scomp->dev, "failed to parse %s for dai link %s\n",
+ token_list[token_id].name, link->name);
+ goto err;
+ }
+
+ /* for DMIC, also parse all sets of DMIC PDM tokens based on active PDM count */
+ if (token_id == SOF_DMIC_TOKENS) {
+ num_sets = sof_get_token_value(SOF_TKN_INTEL_DMIC_NUM_PDM_ACTIVE,
+ slink->tuples, slink->num_tuples);
+
+ if (num_sets < 0) {
+ dev_err(sdev->dev, "Invalid active PDM count for %s\n", link->name);
+ ret = num_sets;
+ goto err;
+ }
+
+ ret = sof_copy_tuples(sdev, private->array, le32_to_cpu(private->size),
+ SOF_DMIC_PDM_TOKENS, num_sets, slink->tuples,
+ num_tuples, &slink->num_tuples);
+ if (ret < 0) {
+ dev_err(scomp->dev, "failed to parse %s for dai link %s\n",
+ token_list[SOF_DMIC_PDM_TOKENS].name, link->name);
+ goto err;
+ }
+ }
+out:
+ link->dobj.private = slink;
+ list_add(&slink->list, &sdev->dai_link_list);
+
+ return 0;
+
+err:
+ kfree(slink->tuples);
+ kfree(slink->hw_configs);
+ kfree(slink);
+
+ return ret;
+}
+
+static int sof_link_unload(struct snd_soc_component *scomp, struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_dai_link *slink = dobj->private;
+
+ if (!slink)
+ return 0;
+
+ kfree(slink->tuples);
+ list_del(&slink->list);
+ kfree(slink->hw_configs);
+ kfree(slink);
+ dobj->private = NULL;
+
+ return 0;
+}
+
+/* DAI link - used for any driver specific init */
+static int sof_route_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dapm_route *route)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_widget *source_swidget, *sink_swidget;
+ struct snd_soc_dobj *dobj = &route->dobj;
+ struct snd_sof_route *sroute;
+ int ret = 0;
+
+ /* allocate memory for sroute and connect */
+ sroute = kzalloc(sizeof(*sroute), GFP_KERNEL);
+ if (!sroute)
+ return -ENOMEM;
+
+ sroute->scomp = scomp;
+ dev_dbg(scomp->dev, "sink %s control %s source %s\n",
+ route->sink, route->control ? route->control : "none",
+ route->source);
+
+ /* source component */
+ source_swidget = snd_sof_find_swidget(scomp, (char *)route->source);
+ if (!source_swidget) {
+ dev_err(scomp->dev, "error: source %s not found\n",
+ route->source);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Virtual widgets of type output/out_drv may be added in topology
+ * for compatibility. These are not handled by the FW.
+ * So, don't send routes whose source/sink widget is of such types
+ * to the DSP.
+ */
+ if (source_swidget->id == snd_soc_dapm_out_drv ||
+ source_swidget->id == snd_soc_dapm_output)
+ goto err;
+
+ /* sink component */
+ sink_swidget = snd_sof_find_swidget(scomp, (char *)route->sink);
+ if (!sink_swidget) {
+ dev_err(scomp->dev, "error: sink %s not found\n",
+ route->sink);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Don't send routes whose sink widget is of type
+ * output or out_drv to the DSP
+ */
+ if (sink_swidget->id == snd_soc_dapm_out_drv ||
+ sink_swidget->id == snd_soc_dapm_output)
+ goto err;
+
+ sroute->route = route;
+ dobj->private = sroute;
+ sroute->src_widget = source_swidget;
+ sroute->sink_widget = sink_swidget;
+
+ /* add route to route list */
+ list_add(&sroute->list, &sdev->route_list);
+
+ return 0;
+err:
+ kfree(sroute);
+ return ret;
+}
+
+/**
+ * sof_set_widget_pipeline - Set pipeline for a component
+ * @sdev: pointer to struct snd_sof_dev
+ * @spipe: pointer to struct snd_sof_pipeline
+ * @swidget: pointer to struct snd_sof_widget that has the same pipeline ID as @pipe_widget
+ *
+ * Return: 0 if successful, -EINVAL on error.
+ * The function checks if @swidget is associated with any volatile controls. If so, setting
+ * the dynamic_pipeline_widget is disallowed.
+ */
+static int sof_set_widget_pipeline(struct snd_sof_dev *sdev, struct snd_sof_pipeline *spipe,
+ struct snd_sof_widget *swidget)
+{
+ struct snd_sof_widget *pipe_widget = spipe->pipe_widget;
+ struct snd_sof_control *scontrol;
+
+ if (pipe_widget->dynamic_pipeline_widget) {
+ /* dynamic widgets cannot have volatile kcontrols */
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list)
+ if (scontrol->comp_id == swidget->comp_id &&
+ (scontrol->access & SNDRV_CTL_ELEM_ACCESS_VOLATILE)) {
+ dev_err(sdev->dev,
+ "error: volatile control found for dynamic widget %s\n",
+ swidget->widget->name);
+ return -EINVAL;
+ }
+ }
+
+ /* set the pipeline and apply the dynamic_pipeline_widget_flag */
+ swidget->spipe = spipe;
+ swidget->dynamic_pipeline_widget = pipe_widget->dynamic_pipeline_widget;
+
+ return 0;
+}
+
+/* completion - called at completion of firmware loading */
+static int sof_complete(struct snd_soc_component *scomp)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ const struct sof_ipc_tplg_widget_ops *widget_ops;
+ struct snd_sof_control *scontrol;
+ struct snd_sof_pipeline *spipe;
+ int ret;
+
+ widget_ops = tplg_ops ? tplg_ops->widget : NULL;
+
+ /* first update all control IPC structures based on the IPC version */
+ if (tplg_ops && tplg_ops->control_setup)
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
+ ret = tplg_ops->control_setup(sdev, scontrol);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed updating IPC struct for control %s\n",
+ scontrol->name);
+ return ret;
+ }
+ }
+
+ /* set up the IPC structures for the pipeline widgets */
+ list_for_each_entry(spipe, &sdev->pipeline_list, list) {
+ struct snd_sof_widget *pipe_widget = spipe->pipe_widget;
+ struct snd_sof_widget *swidget;
+
+ pipe_widget->instance_id = -EINVAL;
+
+ /* Update the scheduler widget's IPC structure */
+ if (widget_ops && widget_ops[pipe_widget->id].ipc_setup) {
+ ret = widget_ops[pipe_widget->id].ipc_setup(pipe_widget);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed updating IPC struct for %s\n",
+ pipe_widget->widget->name);
+ return ret;
+ }
+ }
+
+ /* set the pipeline and update the IPC structure for the non scheduler widgets */
+ list_for_each_entry(swidget, &sdev->widget_list, list)
+ if (swidget->widget->id != snd_soc_dapm_scheduler &&
+ swidget->pipeline_id == pipe_widget->pipeline_id) {
+ ret = sof_set_widget_pipeline(sdev, spipe, swidget);
+ if (ret < 0)
+ return ret;
+
+ if (widget_ops && widget_ops[swidget->id].ipc_setup) {
+ ret = widget_ops[swidget->id].ipc_setup(swidget);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "failed updating IPC struct for %s\n",
+ swidget->widget->name);
+ return ret;
+ }
+ }
+ }
+ }
+
+ /* verify topology components loading including dynamic pipelines */
+ if (sof_debug_check_flag(SOF_DBG_VERIFY_TPLG)) {
+ if (tplg_ops && tplg_ops->set_up_all_pipelines &&
+ tplg_ops->tear_down_all_pipelines) {
+ ret = tplg_ops->set_up_all_pipelines(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to set up all topology pipelines: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = tplg_ops->tear_down_all_pipelines(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to tear down topology pipelines: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ /* set up static pipelines */
+ if (tplg_ops && tplg_ops->set_up_all_pipelines)
+ return tplg_ops->set_up_all_pipelines(sdev, false);
+
+ return 0;
+}
+
+/* manifest - optional to inform component of manifest */
+static int sof_manifest(struct snd_soc_component *scomp, int index,
+ struct snd_soc_tplg_manifest *man)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+
+ if (tplg_ops && tplg_ops->parse_manifest)
+ return tplg_ops->parse_manifest(scomp, index, man);
+
+ return 0;
+}
+
+/* vendor specific kcontrol handlers available for binding */
+static const struct snd_soc_tplg_kcontrol_ops sof_io_ops[] = {
+ {SOF_TPLG_KCTL_VOL_ID, snd_sof_volume_get, snd_sof_volume_put},
+ {SOF_TPLG_KCTL_BYTES_ID, snd_sof_bytes_get, snd_sof_bytes_put},
+ {SOF_TPLG_KCTL_ENUM_ID, snd_sof_enum_get, snd_sof_enum_put},
+ {SOF_TPLG_KCTL_SWITCH_ID, snd_sof_switch_get, snd_sof_switch_put},
+};
+
+/* vendor specific bytes ext handlers available for binding */
+static const struct snd_soc_tplg_bytes_ext_ops sof_bytes_ext_ops[] = {
+ {SOF_TPLG_KCTL_BYTES_ID, snd_sof_bytes_ext_get, snd_sof_bytes_ext_put},
+ {SOF_TPLG_KCTL_BYTES_VOLATILE_RO, snd_sof_bytes_ext_volatile_get},
+};
+
+static struct snd_soc_tplg_ops sof_tplg_ops = {
+ /* external kcontrol init - used for any driver specific init */
+ .control_load = sof_control_load,
+ .control_unload = sof_control_unload,
+
+ /* external kcontrol init - used for any driver specific init */
+ .dapm_route_load = sof_route_load,
+ .dapm_route_unload = sof_route_unload,
+
+ /* external widget init - used for any driver specific init */
+ /* .widget_load is not currently used */
+ .widget_ready = sof_widget_ready,
+ .widget_unload = sof_widget_unload,
+
+ /* FE DAI - used for any driver specific init */
+ .dai_load = sof_dai_load,
+ .dai_unload = sof_dai_unload,
+
+ /* DAI link - used for any driver specific init */
+ .link_load = sof_link_load,
+ .link_unload = sof_link_unload,
+
+ /* completion - called at completion of firmware loading */
+ .complete = sof_complete,
+
+ /* manifest - optional to inform component of manifest */
+ .manifest = sof_manifest,
+
+ /* vendor specific kcontrol handlers available for binding */
+ .io_ops = sof_io_ops,
+ .io_ops_count = ARRAY_SIZE(sof_io_ops),
+
+ /* vendor specific bytes ext handlers available for binding */
+ .bytes_ext_ops = sof_bytes_ext_ops,
+ .bytes_ext_ops_count = ARRAY_SIZE(sof_bytes_ext_ops),
+};
+
+static int snd_sof_dspless_kcontrol(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return 0;
+}
+
+static const struct snd_soc_tplg_kcontrol_ops sof_dspless_io_ops[] = {
+ {SOF_TPLG_KCTL_VOL_ID, snd_sof_dspless_kcontrol, snd_sof_dspless_kcontrol},
+ {SOF_TPLG_KCTL_BYTES_ID, snd_sof_dspless_kcontrol, snd_sof_dspless_kcontrol},
+ {SOF_TPLG_KCTL_ENUM_ID, snd_sof_dspless_kcontrol, snd_sof_dspless_kcontrol},
+ {SOF_TPLG_KCTL_SWITCH_ID, snd_sof_dspless_kcontrol, snd_sof_dspless_kcontrol},
+};
+
+static int snd_sof_dspless_bytes_ext_get(struct snd_kcontrol *kcontrol,
+ unsigned int __user *binary_data,
+ unsigned int size)
+{
+ return 0;
+}
+
+static int snd_sof_dspless_bytes_ext_put(struct snd_kcontrol *kcontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size)
+{
+ return 0;
+}
+
+static const struct snd_soc_tplg_bytes_ext_ops sof_dspless_bytes_ext_ops[] = {
+ {SOF_TPLG_KCTL_BYTES_ID, snd_sof_dspless_bytes_ext_get, snd_sof_dspless_bytes_ext_put},
+ {SOF_TPLG_KCTL_BYTES_VOLATILE_RO, snd_sof_dspless_bytes_ext_get},
+};
+
+/* external widget init - used for any driver specific init */
+static int sof_dspless_widget_ready(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dapm_widget *w,
+ struct snd_soc_tplg_dapm_widget *tw)
+{
+ if (WIDGET_IS_DAI(w->id)) {
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_widget *swidget;
+ struct snd_sof_dai dai;
+ int ret;
+
+ swidget = kzalloc(sizeof(*swidget), GFP_KERNEL);
+ if (!swidget)
+ return -ENOMEM;
+
+ memset(&dai, 0, sizeof(dai));
+
+ ret = sof_connect_dai_widget(scomp, w, tw, &dai);
+ if (ret) {
+ kfree(swidget);
+ return ret;
+ }
+
+ swidget->scomp = scomp;
+ swidget->widget = w;
+ mutex_init(&swidget->setup_mutex);
+ w->dobj.private = swidget;
+ list_add(&swidget->list, &sdev->widget_list);
+ }
+
+ return 0;
+}
+
+static int sof_dspless_widget_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_soc_dapm_widget *w = container_of(dobj, struct snd_soc_dapm_widget, dobj);
+
+ if (WIDGET_IS_DAI(w->id)) {
+ struct snd_sof_widget *swidget = dobj->private;
+
+ sof_disconnect_dai_widget(scomp, w);
+
+ if (!swidget)
+ return 0;
+
+ /* remove and free swidget object */
+ list_del(&swidget->list);
+ kfree(swidget);
+ }
+
+ return 0;
+}
+
+static int sof_dspless_link_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg)
+{
+ link->platforms->name = dev_name(scomp->dev);
+
+ /* Set nonatomic property for FE dai links for FE-BE compatibility */
+ if (!link->no_pcm)
+ link->nonatomic = true;
+
+ return 0;
+}
+
+static struct snd_soc_tplg_ops sof_dspless_tplg_ops = {
+ /* external widget init - used for any driver specific init */
+ .widget_ready = sof_dspless_widget_ready,
+ .widget_unload = sof_dspless_widget_unload,
+
+ /* FE DAI - used for any driver specific init */
+ .dai_load = sof_dai_load,
+ .dai_unload = sof_dai_unload,
+
+ /* DAI link - used for any driver specific init */
+ .link_load = sof_dspless_link_load,
+
+ /* vendor specific kcontrol handlers available for binding */
+ .io_ops = sof_dspless_io_ops,
+ .io_ops_count = ARRAY_SIZE(sof_dspless_io_ops),
+
+ /* vendor specific bytes ext handlers available for binding */
+ .bytes_ext_ops = sof_dspless_bytes_ext_ops,
+ .bytes_ext_ops_count = ARRAY_SIZE(sof_dspless_bytes_ext_ops),
+};
+
+int snd_sof_load_topology(struct snd_soc_component *scomp, const char *file)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct firmware *fw;
+ int ret;
+
+ dev_dbg(scomp->dev, "loading topology:%s\n", file);
+
+ ret = request_firmware(&fw, file, scomp->dev);
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: tplg request firmware %s failed err: %d\n",
+ file, ret);
+ dev_err(scomp->dev,
+ "you may need to download the firmware from https://github.com/thesofproject/sof-bin/\n");
+ return ret;
+ }
+
+ if (sdev->dspless_mode_selected)
+ ret = snd_soc_tplg_component_load(scomp, &sof_dspless_tplg_ops, fw);
+ else
+ ret = snd_soc_tplg_component_load(scomp, &sof_tplg_ops, fw);
+
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: tplg component load failed %d\n",
+ ret);
+ ret = -EINVAL;
+ }
+
+ release_firmware(fw);
+
+ if (ret >= 0 && sdev->led_present)
+ ret = snd_ctl_led_request();
+
+ return ret;
+}
+EXPORT_SYMBOL(snd_sof_load_topology);
diff --git a/sound/soc/sof/trace.c b/sound/soc/sof/trace.c
new file mode 100644
index 000000000..b2ab51e52
--- /dev/null
+++ b/sound/soc/sof/trace.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+
+#include "sof-priv.h"
+
+int sof_fw_trace_init(struct snd_sof_dev *sdev)
+{
+ const struct sof_ipc_fw_tracing_ops *fw_tracing = sof_ipc_get_ops(sdev, fw_tracing);
+
+ if (!fw_tracing) {
+ dev_info(sdev->dev, "Firmware tracing is not available\n");
+ sdev->fw_trace_is_supported = false;
+
+ return 0;
+ }
+
+ return fw_tracing->init(sdev);
+}
+
+void sof_fw_trace_free(struct snd_sof_dev *sdev)
+{
+ if (!sdev->fw_trace_is_supported)
+ return;
+
+ if (sdev->ipc->ops->fw_tracing->free)
+ sdev->ipc->ops->fw_tracing->free(sdev);
+}
+
+void sof_fw_trace_fw_crashed(struct snd_sof_dev *sdev)
+{
+ if (!sdev->fw_trace_is_supported)
+ return;
+
+ if (sdev->ipc->ops->fw_tracing->fw_crashed)
+ sdev->ipc->ops->fw_tracing->fw_crashed(sdev);
+}
+
+void sof_fw_trace_suspend(struct snd_sof_dev *sdev, pm_message_t pm_state)
+{
+ if (!sdev->fw_trace_is_supported)
+ return;
+
+ sdev->ipc->ops->fw_tracing->suspend(sdev, pm_state);
+}
+
+int sof_fw_trace_resume(struct snd_sof_dev *sdev)
+{
+ if (!sdev->fw_trace_is_supported)
+ return 0;
+
+ return sdev->ipc->ops->fw_tracing->resume(sdev);
+}
diff --git a/sound/soc/sof/xtensa/Kconfig b/sound/soc/sof/xtensa/Kconfig
new file mode 100644
index 000000000..defd6d3dc
--- /dev/null
+++ b/sound/soc/sof/xtensa/Kconfig
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SND_SOC_SOF_XTENSA
+ tristate
diff --git a/sound/soc/sof/xtensa/Makefile b/sound/soc/sof/xtensa/Makefile
new file mode 100644
index 000000000..b8376ea04
--- /dev/null
+++ b/sound/soc/sof/xtensa/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+snd-sof-xtensa-dsp-objs := core.o
+
+obj-$(CONFIG_SND_SOC_SOF_XTENSA) += snd-sof-xtensa-dsp.o
diff --git a/sound/soc/sof/xtensa/core.c b/sound/soc/sof/xtensa/core.c
new file mode 100644
index 000000000..bebbe3a28
--- /dev/null
+++ b/sound/soc/sof/xtensa/core.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Pan Xiuli <xiuli.pan@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include "../sof-priv.h"
+
+struct xtensa_exception_cause {
+ u32 id;
+ const char *msg;
+ const char *description;
+};
+
+/*
+ * From 4.4.1.5 table 4-64 Exception Causes of Xtensa
+ * Instruction Set Architecture (ISA) Reference Manual
+ */
+static const struct xtensa_exception_cause xtensa_exception_causes[] = {
+ {0, "IllegalInstructionCause", "Illegal instruction"},
+ {1, "SyscallCause", "SYSCALL instruction"},
+ {2, "InstructionFetchErrorCause",
+ "Processor internal physical address or data error during instruction fetch"},
+ {3, "LoadStoreErrorCause",
+ "Processor internal physical address or data error during load or store"},
+ {4, "Level1InterruptCause",
+ "Level-1 interrupt as indicated by set level-1 bits in the INTERRUPT register"},
+ {5, "AllocaCause",
+ "MOVSP instruction, if caller’s registers are not in the register file"},
+ {6, "IntegerDivideByZeroCause",
+ "QUOS, QUOU, REMS, or REMU divisor operand is zero"},
+ {8, "PrivilegedCause",
+ "Attempt to execute a privileged operation when CRING ? 0"},
+ {9, "LoadStoreAlignmentCause", "Load or store to an unaligned address"},
+ {12, "InstrPIFDataErrorCause",
+ "PIF data error during instruction fetch"},
+ {13, "LoadStorePIFDataErrorCause",
+ "Synchronous PIF data error during LoadStore access"},
+ {14, "InstrPIFAddrErrorCause",
+ "PIF address error during instruction fetch"},
+ {15, "LoadStorePIFAddrErrorCause",
+ "Synchronous PIF address error during LoadStore access"},
+ {16, "InstTLBMissCause", "Error during Instruction TLB refill"},
+ {17, "InstTLBMultiHitCause",
+ "Multiple instruction TLB entries matched"},
+ {18, "InstFetchPrivilegeCause",
+ "An instruction fetch referenced a virtual address at a ring level less than CRING"},
+ {20, "InstFetchProhibitedCause",
+ "An instruction fetch referenced a page mapped with an attribute that does not permit instruction fetch"},
+ {24, "LoadStoreTLBMissCause",
+ "Error during TLB refill for a load or store"},
+ {25, "LoadStoreTLBMultiHitCause",
+ "Multiple TLB entries matched for a load or store"},
+ {26, "LoadStorePrivilegeCause",
+ "A load or store referenced a virtual address at a ring level less than CRING"},
+ {28, "LoadProhibitedCause",
+ "A load referenced a page mapped with an attribute that does not permit loads"},
+ {32, "Coprocessor0Disabled",
+ "Coprocessor 0 instruction when cp0 disabled"},
+ {33, "Coprocessor1Disabled",
+ "Coprocessor 1 instruction when cp1 disabled"},
+ {34, "Coprocessor2Disabled",
+ "Coprocessor 2 instruction when cp2 disabled"},
+ {35, "Coprocessor3Disabled",
+ "Coprocessor 3 instruction when cp3 disabled"},
+ {36, "Coprocessor4Disabled",
+ "Coprocessor 4 instruction when cp4 disabled"},
+ {37, "Coprocessor5Disabled",
+ "Coprocessor 5 instruction when cp5 disabled"},
+ {38, "Coprocessor6Disabled",
+ "Coprocessor 6 instruction when cp6 disabled"},
+ {39, "Coprocessor7Disabled",
+ "Coprocessor 7 instruction when cp7 disabled"},
+};
+
+/* only need xtensa atm */
+static void xtensa_dsp_oops(struct snd_sof_dev *sdev, const char *level, void *oops)
+{
+ struct sof_ipc_dsp_oops_xtensa *xoops = oops;
+ int i;
+
+ dev_printk(level, sdev->dev, "error: DSP Firmware Oops\n");
+ for (i = 0; i < ARRAY_SIZE(xtensa_exception_causes); i++) {
+ if (xtensa_exception_causes[i].id == xoops->exccause) {
+ dev_printk(level, sdev->dev,
+ "error: Exception Cause: %s, %s\n",
+ xtensa_exception_causes[i].msg,
+ xtensa_exception_causes[i].description);
+ }
+ }
+ dev_printk(level, sdev->dev,
+ "EXCCAUSE 0x%8.8x EXCVADDR 0x%8.8x PS 0x%8.8x SAR 0x%8.8x\n",
+ xoops->exccause, xoops->excvaddr, xoops->ps, xoops->sar);
+ dev_printk(level, sdev->dev,
+ "EPC1 0x%8.8x EPC2 0x%8.8x EPC3 0x%8.8x EPC4 0x%8.8x",
+ xoops->epc1, xoops->epc2, xoops->epc3, xoops->epc4);
+ dev_printk(level, sdev->dev,
+ "EPC5 0x%8.8x EPC6 0x%8.8x EPC7 0x%8.8x DEPC 0x%8.8x",
+ xoops->epc5, xoops->epc6, xoops->epc7, xoops->depc);
+ dev_printk(level, sdev->dev,
+ "EPS2 0x%8.8x EPS3 0x%8.8x EPS4 0x%8.8x EPS5 0x%8.8x",
+ xoops->eps2, xoops->eps3, xoops->eps4, xoops->eps5);
+ dev_printk(level, sdev->dev,
+ "EPS6 0x%8.8x EPS7 0x%8.8x INTENABL 0x%8.8x INTERRU 0x%8.8x",
+ xoops->eps6, xoops->eps7, xoops->intenable, xoops->interrupt);
+}
+
+static void xtensa_stack(struct snd_sof_dev *sdev, const char *level, void *oops,
+ u32 *stack, u32 stack_words)
+{
+ struct sof_ipc_dsp_oops_xtensa *xoops = oops;
+ u32 stack_ptr = xoops->plat_hdr.stackptr;
+ /* 4 * 8chars + 3 ws + 1 terminating NUL */
+ unsigned char buf[4 * 8 + 3 + 1];
+ int i;
+
+ dev_printk(level, sdev->dev, "stack dump from 0x%8.8x\n", stack_ptr);
+
+ /*
+ * example output:
+ * 0x0049fbb0: 8000f2d0 0049fc00 6f6c6c61 00632e63
+ */
+ for (i = 0; i < stack_words; i += 4) {
+ hex_dump_to_buffer(stack + i, 16, 16, 4,
+ buf, sizeof(buf), false);
+ dev_printk(level, sdev->dev, "0x%08x: %s\n", stack_ptr + i * 4, buf);
+ }
+}
+
+const struct dsp_arch_ops sof_xtensa_arch_ops = {
+ .dsp_oops = xtensa_dsp_oops,
+ .dsp_stack = xtensa_stack,
+};
+EXPORT_SYMBOL_NS(sof_xtensa_arch_ops, SND_SOC_SOF_XTENSA);
+
+MODULE_DESCRIPTION("SOF Xtensa DSP support");
+MODULE_LICENSE("Dual BSD/GPL");