summaryrefslogtreecommitdiffstats
path: root/sound/soc/sof/intel
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--sound/soc/sof/intel/Kconfig342
-rw-r--r--sound/soc/sof/intel/Makefile38
-rw-r--r--sound/soc/sof/intel/apl.c116
-rw-r--r--sound/soc/sof/intel/atom.c420
-rw-r--r--sound/soc/sof/intel/atom.h74
-rw-r--r--sound/soc/sof/intel/bdw.c703
-rw-r--r--sound/soc/sof/intel/byt.c491
-rw-r--r--sound/soc/sof/intel/cnl.c477
-rw-r--r--sound/soc/sof/intel/ext_manifest.h35
-rw-r--r--sound/soc/sof/intel/hda-bus.c93
-rw-r--r--sound/soc/sof/intel/hda-codec.c287
-rw-r--r--sound/soc/sof/intel/hda-common-ops.c106
-rw-r--r--sound/soc/sof/intel/hda-ctrl.c364
-rw-r--r--sound/soc/sof/intel/hda-dai.c1001
-rw-r--r--sound/soc/sof/intel/hda-dsp.c1068
-rw-r--r--sound/soc/sof/intel/hda-ipc.c389
-rw-r--r--sound/soc/sof/intel/hda-ipc.h56
-rw-r--r--sound/soc/sof/intel/hda-loader-skl.c580
-rw-r--r--sound/soc/sof/intel/hda-loader.c589
-rw-r--r--sound/soc/sof/intel/hda-pcm.c275
-rw-r--r--sound/soc/sof/intel/hda-probes.c148
-rw-r--r--sound/soc/sof/intel/hda-stream.c1099
-rw-r--r--sound/soc/sof/intel/hda-trace.c95
-rw-r--r--sound/soc/sof/intel/hda.c1706
-rw-r--r--sound/soc/sof/intel/hda.h860
-rw-r--r--sound/soc/sof/intel/icl.c187
-rw-r--r--sound/soc/sof/intel/mtl.c673
-rw-r--r--sound/soc/sof/intel/mtl.h75
-rw-r--r--sound/soc/sof/intel/pci-apl.c105
-rw-r--r--sound/soc/sof/intel/pci-cnl.c140
-rw-r--r--sound/soc/sof/intel/pci-icl.c108
-rw-r--r--sound/soc/sof/intel/pci-mtl.c72
-rw-r--r--sound/soc/sof/intel/pci-skl.c93
-rw-r--r--sound/soc/sof/intel/pci-tgl.c304
-rw-r--r--sound/soc/sof/intel/pci-tng.c255
-rw-r--r--sound/soc/sof/intel/shim.h209
-rw-r--r--sound/soc/sof/intel/skl.c118
-rw-r--r--sound/soc/sof/intel/tgl.c217
38 files changed, 13968 insertions, 0 deletions
diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
new file mode 100644
index 000000000..7af495fb6
--- /dev/null
+++ b/sound/soc/sof/intel/Kconfig
@@ -0,0 +1,342 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SND_SOC_SOF_INTEL_TOPLEVEL
+ bool "SOF support for Intel audio DSPs"
+ depends on X86 || COMPILE_TEST
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+if SND_SOC_SOF_INTEL_TOPLEVEL
+
+config SND_SOC_SOF_INTEL_HIFI_EP_IPC
+ tristate
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_INTEL_ATOM_HIFI_EP
+ tristate
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_INTEL_HIFI_EP_IPC
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_INTEL_COMMON
+ tristate
+ select SND_SOC_SOF
+ select SND_SOC_ACPI_INTEL_MATCH
+ select SND_SOC_SOF_XTENSA
+ select SND_SOC_INTEL_MACH
+ select SND_SOC_ACPI if ACPI
+ select SND_INTEL_DSP_CONFIG
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+if SND_SOC_SOF_ACPI
+
+config SND_SOC_SOF_BAYTRAIL
+ tristate "SOF support for Baytrail, Braswell and Cherrytrail"
+ default SND_SOC_SOF_ACPI
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
+ select SND_SOC_SOF_ACPI_DEV
+ select IOSF_MBI if X86 && PCI
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Baytrail, Braswell or Cherrytrail processors.
+ This option can coexist in the same build with the Atom legacy
+ drivers, currently the default but which will be deprecated
+ at some point.
+ Existing firmware/topology binaries and UCM configurations
+ typically located in the root file system are already
+ compatible with both SOF or Atom/SST legacy drivers.
+ This is a recommended option for distributions.
+ Say Y if you want to enable SOF on Baytrail/Cherrytrail.
+ If unsure select "N".
+
+config SND_SOC_SOF_BROADWELL
+ tristate "SOF support for Broadwell"
+ default SND_SOC_SOF_ACPI
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_INTEL_HIFI_EP_IPC
+ select SND_SOC_SOF_ACPI_DEV
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Broadwell processors.
+ This option can coexist in the same build with the default 'catpt'
+ driver.
+ Existing firmware/topology binaries and UCM configurations typically
+ located in the root file system are already compatible with both SOF
+ or catpt drivers.
+ SOF does not fully support Broadwell and has limitations related to
+ DMA and suspend-resume, this is not a recommended option for
+ distributions.
+ Say Y if you want to enable SOF on Broadwell.
+ If unsure select "N".
+
+endif ## SND_SOC_SOF_ACPI
+
+if SND_SOC_SOF_PCI
+
+config SND_SOC_SOF_MERRIFIELD
+ tristate "SOF support for Tangier/Merrifield"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_PCI_DEV
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Tangier/Merrifield processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_INTEL_SKL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_SKYLAKE
+ tristate "SOF support for SkyLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_SKL
+ help
+ This adds support for the Intel(R) platforms using the SkyLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+ This is intended only for developers and not a recommend option for distros.
+
+config SND_SOC_SOF_KABYLAKE
+ tristate "SOF support for KabyLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_SKL
+ help
+ This adds support for the Intel(R) platforms using the KabyLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+ This is intended only for developers and not a recommend option for distros.
+
+config SND_SOC_SOF_INTEL_APL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_APOLLOLAKE
+ tristate "SOF support for Apollolake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_APL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Apollolake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_GEMINILAKE
+ tristate "SOF support for GeminiLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_APL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Geminilake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_INTEL_CNL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_CANNONLAKE
+ tristate "SOF support for Cannonlake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_CNL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Cannonlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_COFFEELAKE
+ tristate "SOF support for CoffeeLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_CNL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Coffeelake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_COMETLAKE
+ tristate "SOF support for CometLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_CNL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Cometlake processors.
+ If unsure select "N".
+
+config SND_SOC_SOF_INTEL_ICL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_ICELAKE
+ tristate "SOF support for Icelake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_ICL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Icelake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_JASPERLAKE
+ tristate "SOF support for JasperLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_ICL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the JasperLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_INTEL_TGL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_TIGERLAKE
+ tristate "SOF support for Tigerlake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_TGL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Tigerlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_ELKHARTLAKE
+ tristate "SOF support for ElkhartLake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_TGL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the ElkhartLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_ALDERLAKE
+ tristate "SOF support for Alderlake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_TGL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Alderlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_INTEL_MTL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_METEORLAKE
+ tristate "SOF support for Meteorlake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_MTL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Meteorlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_HDA_COMMON
+ tristate
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_PCI_DEV
+ select SND_INTEL_DSP_CONFIG
+ select SND_SOC_SOF_HDA_LINK_BASELINE
+ select SND_SOC_SOF_HDA_PROBES
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+if SND_SOC_SOF_HDA_COMMON
+
+config SND_SOC_SOF_HDA_LINK
+ bool "SOF support for HDA Links(HDA/HDMI)"
+ depends on SND_SOC_SOF_NOCODEC=n
+ select SND_SOC_SOF_PROBE_WORK_QUEUE
+ help
+ This adds support for HDA links(HDA/HDMI) with Sound Open Firmware
+ for Intel(R) platforms.
+ Say Y if you want to enable HDA links with SOF.
+ If unsure select "N".
+
+config SND_SOC_SOF_HDA_AUDIO_CODEC
+ bool "SOF support for HDAudio codecs"
+ depends on SND_SOC_SOF_HDA_LINK
+ help
+ This adds support for HDAudio codecs with Sound Open Firmware
+ for Intel(R) platforms.
+ Say Y if you want to enable HDAudio codecs with SOF.
+ If unsure select "N".
+
+endif ## SND_SOC_SOF_HDA_COMMON
+
+config SND_SOC_SOF_HDA_LINK_BASELINE
+ tristate
+ select SND_SOC_SOF_HDA if SND_SOC_SOF_HDA_LINK
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_HDA
+ tristate
+ select SND_HDA_EXT_CORE if SND_SOC_SOF_HDA_LINK
+ select SND_SOC_HDAC_HDA if SND_SOC_SOF_HDA_AUDIO_CODEC
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_HDA_PROBES
+ tristate
+ select SND_SOC_SOF_DEBUG_PROBES
+ help
+ The option enables the data probing for Intel(R) Skylake and newer
+ (HDA) platforms.
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ tristate
+ select SOUNDWIRE_INTEL if SND_SOC_SOF_INTEL_SOUNDWIRE
+ select SND_INTEL_SOUNDWIRE_ACPI if SND_SOC_SOF_INTEL_SOUNDWIRE
+
+config SND_SOC_SOF_INTEL_SOUNDWIRE
+ tristate "SOF support for SoundWire"
+ default SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ depends on SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ depends on ACPI && SOUNDWIRE
+ depends on !(SOUNDWIRE=m && SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=y)
+ help
+ This adds support for SoundWire with Sound Open Firmware
+ for Intel(R) platforms.
+ Say Y if you want to enable SoundWire links with SOF.
+ If unsure select "N".
+
+endif ## SND_SOC_SOF_PCI
+
+endif ## SND_SOC_SOF_INTEL_TOPLEVEL
diff --git a/sound/soc/sof/intel/Makefile b/sound/soc/sof/intel/Makefile
new file mode 100644
index 000000000..8b8ea0361
--- /dev/null
+++ b/sound/soc/sof/intel/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+snd-sof-acpi-intel-byt-objs := byt.o
+snd-sof-acpi-intel-bdw-objs := bdw.o
+
+snd-sof-intel-hda-common-objs := hda.o hda-loader.o hda-stream.o hda-trace.o \
+ hda-dsp.o hda-ipc.o hda-ctrl.o hda-pcm.o \
+ hda-dai.o hda-bus.o \
+ skl.o hda-loader-skl.o \
+ apl.o cnl.o tgl.o icl.o mtl.o hda-common-ops.o
+
+snd-sof-intel-hda-common-$(CONFIG_SND_SOC_SOF_HDA_PROBES) += hda-probes.o
+
+snd-sof-intel-hda-objs := hda-codec.o
+
+snd-sof-intel-atom-objs := atom.o
+
+obj-$(CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP) += snd-sof-intel-atom.o
+obj-$(CONFIG_SND_SOC_SOF_BAYTRAIL) += snd-sof-acpi-intel-byt.o
+obj-$(CONFIG_SND_SOC_SOF_BROADWELL) += snd-sof-acpi-intel-bdw.o
+obj-$(CONFIG_SND_SOC_SOF_HDA_COMMON) += snd-sof-intel-hda-common.o
+obj-$(CONFIG_SND_SOC_SOF_HDA) += snd-sof-intel-hda.o
+
+snd-sof-pci-intel-tng-objs := pci-tng.o
+snd-sof-pci-intel-skl-objs := pci-skl.o
+snd-sof-pci-intel-apl-objs := pci-apl.o
+snd-sof-pci-intel-cnl-objs := pci-cnl.o
+snd-sof-pci-intel-icl-objs := pci-icl.o
+snd-sof-pci-intel-tgl-objs := pci-tgl.o
+snd-sof-pci-intel-mtl-objs := pci-mtl.o
+
+obj-$(CONFIG_SND_SOC_SOF_MERRIFIELD) += snd-sof-pci-intel-tng.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_SKL) += snd-sof-pci-intel-skl.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_APL) += snd-sof-pci-intel-apl.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_CNL) += snd-sof-pci-intel-cnl.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_ICL) += snd-sof-pci-intel-icl.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_TGL) += snd-sof-pci-intel-tgl.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_MTL) += snd-sof-pci-intel-mtl.o
diff --git a/sound/soc/sof/intel/apl.c b/sound/soc/sof/intel/apl.c
new file mode 100644
index 000000000..1549ca758
--- /dev/null
+++ b/sound/soc/sof/intel/apl.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Apollolake and GeminiLake
+ */
+
+#include <sound/sof/ext_manifest4.h>
+#include "../ipc4-priv.h"
+#include "../sof-priv.h"
+#include "hda.h"
+#include "../sof-audio.h"
+
+static const struct snd_sof_debugfs_map apl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+/* apollolake ops */
+struct snd_sof_dsp_ops sof_apl_ops;
+EXPORT_SYMBOL_NS(sof_apl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_apl_ops_init(struct snd_sof_dev *sdev)
+{
+ /* common defaults */
+ memcpy(&sof_apl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* probe/remove/shutdown */
+ sof_apl_ops.shutdown = hda_dsp_shutdown;
+
+ if (sdev->pdata->ipc_type == SOF_IPC) {
+ /* doorbell */
+ sof_apl_ops.irq_thread = hda_dsp_ipc_irq_thread;
+
+ /* ipc */
+ sof_apl_ops.send_msg = hda_dsp_ipc_send_msg;
+
+ /* debug */
+ sof_apl_ops.ipc_dump = hda_ipc_dump;
+ }
+
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_1_5;
+
+ /* doorbell */
+ sof_apl_ops.irq_thread = hda_dsp_ipc4_irq_thread;
+
+ /* ipc */
+ sof_apl_ops.send_msg = hda_dsp_ipc4_send_msg;
+
+ /* debug */
+ sof_apl_ops.ipc_dump = hda_ipc4_dump;
+ }
+
+ /* set DAI driver ops */
+ hda_set_dai_drv_ops(sdev, &sof_apl_ops);
+
+ /* debug */
+ sof_apl_ops.debug_map = apl_dsp_debugfs;
+ sof_apl_ops.debug_map_count = ARRAY_SIZE(apl_dsp_debugfs);
+
+ /* firmware run */
+ sof_apl_ops.run = hda_dsp_cl_boot_firmware;
+
+ /* pre/post fw run */
+ sof_apl_ops.post_fw_run = hda_dsp_post_fw_run;
+
+ /* dsp core get/put */
+ sof_apl_ops.core_get = hda_dsp_core_get;
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_apl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc apl_chip_info = {
+ /* Apollolake */
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(1, 0),
+ .ipc_req = HDA_DSP_REG_HIPCI,
+ .ipc_req_mask = HDA_DSP_REG_HIPCI_BUSY,
+ .ipc_ack = HDA_DSP_REG_HIPCIE,
+ .ipc_ack_mask = HDA_DSP_REG_HIPCIE_DONE,
+ .ipc_ctl = HDA_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 150,
+ .ssp_count = APL_SSP_COUNT,
+ .ssp_base_offset = APL_SSP_BASE_OFFSET,
+ .quirks = SOF_INTEL_PROCEN_FMT_QUIRK,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_1_5_PLUS,
+};
+EXPORT_SYMBOL_NS(apl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/atom.c b/sound/soc/sof/intel/atom.c
new file mode 100644
index 000000000..bd9789b48
--- /dev/null
+++ b/sound/soc/sof/intel/atom.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2021 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Atom devices
+ */
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/intel-dsp-config.h>
+#include "../ops.h"
+#include "shim.h"
+#include "atom.h"
+#include "../sof-acpi-dev.h"
+#include "../sof-audio.h"
+#include "../../intel/common/soc-intel-quirks.h"
+
+static void atom_host_done(struct snd_sof_dev *sdev);
+static void atom_dsp_done(struct snd_sof_dev *sdev);
+
+/*
+ * Debug
+ */
+
+static void atom_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read regsisters */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
+}
+
+void atom_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[STACK_DUMP_SIZE];
+ u64 status, panic, imrd, imrx;
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCD);
+ panic = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCX);
+ atom_get_registers(sdev, &xoops, &panic_info, stack,
+ STACK_DUMP_SIZE);
+ sof_print_oops_and_stack(sdev, KERN_ERR, status, panic, &xoops,
+ &panic_info, stack, STACK_DUMP_SIZE);
+
+ /* provide some context for firmware debug */
+ imrx = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IMRX);
+ imrd = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IMRD);
+ dev_err(sdev->dev,
+ "error: ipc host -> DSP: pending %s complete %s raw 0x%llx\n",
+ (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
+ (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ dev_err(sdev->dev,
+ "error: mask host: pending %s complete %s raw 0x%llx\n",
+ (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
+ (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ dev_err(sdev->dev,
+ "error: ipc DSP -> host: pending %s complete %s raw 0x%llx\n",
+ (status & SHIM_IPCD_BUSY) ? "yes" : "no",
+ (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ dev_err(sdev->dev,
+ "error: mask DSP: pending %s complete %s raw 0x%llx\n",
+ (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
+ (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+
+}
+EXPORT_SYMBOL_NS(atom_dump, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+/*
+ * IPC Doorbell IRQ handler and thread.
+ */
+
+irqreturn_t atom_irq_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u64 ipcx, ipcd;
+ int ret = IRQ_NONE;
+
+ ipcx = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCX);
+ ipcd = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCD);
+
+ if (ipcx & SHIM_BYT_IPCX_DONE) {
+
+ /* reply message from DSP, Mask Done interrupt first */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR,
+ SHIM_IMRX,
+ SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (ipcd & SHIM_BYT_IPCD_BUSY) {
+
+ /* new message from DSP, Mask Busy interrupt first */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR,
+ SHIM_IMRX,
+ SHIM_IMRX_BUSY,
+ SHIM_IMRX_BUSY);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_NS(atom_irq_handler, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+irqreturn_t atom_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u64 ipcx, ipcd;
+
+ ipcx = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCX);
+ ipcd = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCD);
+
+ /* reply message from DSP */
+ if (ipcx & SHIM_BYT_IPCX_DONE) {
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /*
+ * handle immediate reply from DSP core. If the msg is
+ * found, set done bit in cmd_done which is called at the
+ * end of message processing function, else set it here
+ * because the done bit can't be set in cmd_done function
+ * which is triggered by msg
+ */
+ snd_sof_ipc_process_reply(sdev, ipcx);
+
+ atom_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ }
+
+ /* new message from DSP */
+ if (ipcd & SHIM_BYT_IPCD_BUSY) {
+
+ /* Handle messages from DSP Core */
+ if ((ipcd & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, PANIC_OFFSET(ipcd) + MBOX_OFFSET,
+ true);
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ atom_host_done(sdev);
+ }
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_NS(atom_irq_thread, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+int atom_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ /* unmask and prepare to receive Done interrupt */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_DONE, 0);
+
+ /* send the message */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write64(sdev, DSP_BAR, SHIM_IPCX, SHIM_BYT_IPCX_BUSY);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(atom_send_msg, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+int atom_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+EXPORT_SYMBOL_NS(atom_get_mailbox_offset, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+int atom_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+EXPORT_SYMBOL_NS(atom_get_window_offset, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+static void atom_host_done(struct snd_sof_dev *sdev)
+{
+ /* clear BUSY bit and set DONE bit - accept new messages */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR, SHIM_IPCD,
+ SHIM_BYT_IPCD_BUSY |
+ SHIM_BYT_IPCD_DONE,
+ SHIM_BYT_IPCD_DONE);
+
+ /* unmask and prepare to receive next new message */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY, 0);
+}
+
+static void atom_dsp_done(struct snd_sof_dev *sdev)
+{
+ /* clear DONE bit - tell DSP we have completed */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR, SHIM_IPCX,
+ SHIM_BYT_IPCX_DONE, 0);
+}
+
+/*
+ * DSP control.
+ */
+
+int atom_run(struct snd_sof_dev *sdev)
+{
+ int tries = 10;
+
+ /* release stall and wait to unstall */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_STALL, 0x0);
+ while (tries--) {
+ if (!(snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_CSR) &
+ SHIM_BYT_CSR_PWAITMODE))
+ break;
+ msleep(100);
+ }
+ if (tries < 0)
+ return -ENODEV;
+
+ /* return init core mask */
+ return 1;
+}
+EXPORT_SYMBOL_NS(atom_run, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+int atom_reset(struct snd_sof_dev *sdev)
+{
+ /* put DSP into reset, set reset vector and stall */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL |
+ SHIM_BYT_CSR_STALL,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL |
+ SHIM_BYT_CSR_STALL);
+
+ usleep_range(10, 15);
+
+ /* take DSP out of reset and keep stalled for FW loading */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(atom_reset, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
+ const char *sof_tplg_filename,
+ const char *ssp_str)
+{
+ const char *tplg_filename = NULL;
+ const char *split_ext;
+ char *filename, *tmp;
+
+ filename = kstrdup(sof_tplg_filename, GFP_KERNEL);
+ if (!filename)
+ return NULL;
+
+ /* this assumes a .tplg extension */
+ tmp = filename;
+ split_ext = strsep(&tmp, ".");
+ if (split_ext)
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s-%s.tplg",
+ split_ext, ssp_str);
+ kfree(filename);
+
+ return tplg_filename;
+}
+
+struct snd_soc_acpi_mach *atom_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach;
+ struct platform_device *pdev;
+ const char *tplg_filename;
+
+ mach = snd_soc_acpi_find_machine(desc->machines);
+ if (!mach) {
+ dev_warn(sdev->dev, "warning: No matching ASoC machine driver found\n");
+ return NULL;
+ }
+
+ pdev = to_platform_device(sdev->dev);
+ if (soc_intel_is_byt_cr(pdev)) {
+ dev_dbg(sdev->dev,
+ "BYT-CR detected, SSP0 used instead of SSP2\n");
+
+ tplg_filename = fixup_tplg_name(sdev,
+ mach->sof_tplg_filename,
+ "ssp0");
+ } else {
+ tplg_filename = mach->sof_tplg_filename;
+ }
+
+ if (!tplg_filename) {
+ dev_dbg(sdev->dev,
+ "error: no topology filename\n");
+ return NULL;
+ }
+
+ sof_pdata->tplg_filename = tplg_filename;
+ mach->mach_params.acpi_ipc_irq_index = desc->irqindex_host_ipc;
+
+ return mach;
+}
+EXPORT_SYMBOL_NS(atom_machine_select, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+/* Atom DAIs */
+struct snd_soc_dai_driver atom_dai[] = {
+{
+ .name = "ssp0-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp1-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp2-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ }
+},
+{
+ .name = "ssp3-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp4-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp5-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+};
+EXPORT_SYMBOL_NS(atom_dai, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+void atom_set_mach_params(struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct snd_soc_acpi_mach_params *mach_params;
+
+ mach_params = &mach->mach_params;
+ mach_params->platform = dev_name(sdev->dev);
+ mach_params->num_dai_drivers = desc->ops->num_drv;
+ mach_params->dai_drivers = desc->ops->drv;
+}
+EXPORT_SYMBOL_NS(atom_set_mach_params, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/atom.h b/sound/soc/sof/intel/atom.h
new file mode 100644
index 000000000..b965e5e08
--- /dev/null
+++ b/sound/soc/sof/intel/atom.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2017-2021 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_ATOM_H
+#define __SOF_INTEL_ATOM_H
+
+/* DSP memories */
+#define IRAM_OFFSET 0x0C0000
+#define IRAM_SIZE (80 * 1024)
+#define DRAM_OFFSET 0x100000
+#define DRAM_SIZE (160 * 1024)
+#define SHIM_OFFSET 0x140000
+#define SHIM_SIZE_BYT 0x100
+#define SHIM_SIZE_CHT 0x118
+#define MBOX_OFFSET 0x144000
+#define MBOX_SIZE 0x1000
+#define EXCEPT_OFFSET 0x800
+#define EXCEPT_MAX_HDR_SIZE 0x400
+
+/* DSP peripherals */
+#define DMAC0_OFFSET 0x098000
+#define DMAC1_OFFSET 0x09c000
+#define DMAC2_OFFSET 0x094000
+#define DMAC_SIZE 0x420
+#define SSP0_OFFSET 0x0a0000
+#define SSP1_OFFSET 0x0a1000
+#define SSP2_OFFSET 0x0a2000
+#define SSP3_OFFSET 0x0a4000
+#define SSP4_OFFSET 0x0a5000
+#define SSP5_OFFSET 0x0a6000
+#define SSP_SIZE 0x100
+
+#define STACK_DUMP_SIZE 32
+
+#define PCI_BAR_SIZE 0x200000
+
+#define PANIC_OFFSET(x) (((x) & GENMASK_ULL(47, 32)) >> 32)
+
+/*
+ * Debug
+ */
+
+#define MBOX_DUMP_SIZE 0x30
+
+/* BARs */
+#define DSP_BAR 0
+#define PCI_BAR 1
+#define IMR_BAR 2
+
+irqreturn_t atom_irq_handler(int irq, void *context);
+irqreturn_t atom_irq_thread(int irq, void *context);
+
+int atom_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
+int atom_get_mailbox_offset(struct snd_sof_dev *sdev);
+int atom_get_window_offset(struct snd_sof_dev *sdev, u32 id);
+
+int atom_run(struct snd_sof_dev *sdev);
+int atom_reset(struct snd_sof_dev *sdev);
+void atom_dump(struct snd_sof_dev *sdev, u32 flags);
+
+struct snd_soc_acpi_mach *atom_machine_select(struct snd_sof_dev *sdev);
+void atom_set_mach_params(struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev);
+
+extern struct snd_soc_dai_driver atom_dai[];
+
+#endif
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
new file mode 100644
index 000000000..a446154f2
--- /dev/null
+++ b/sound/soc/sof/intel/bdw.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Broadwell
+ */
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/intel-dsp-config.h>
+#include "../ops.h"
+#include "shim.h"
+#include "../sof-acpi-dev.h"
+#include "../sof-audio.h"
+
+/* BARs */
+#define BDW_DSP_BAR 0
+#define BDW_PCI_BAR 1
+
+/*
+ * Debug
+ */
+
+/* DSP memories for BDW */
+#define IRAM_OFFSET 0xA0000
+#define BDW_IRAM_SIZE (10 * 32 * 1024)
+#define DRAM_OFFSET 0x00000
+#define BDW_DRAM_SIZE (20 * 32 * 1024)
+#define SHIM_OFFSET 0xFB000
+#define SHIM_SIZE 0x100
+#define MBOX_OFFSET 0x9E000
+#define MBOX_SIZE 0x1000
+#define MBOX_DUMP_SIZE 0x30
+#define EXCEPT_OFFSET 0x800
+#define EXCEPT_MAX_HDR_SIZE 0x400
+
+/* DSP peripherals */
+#define DMAC0_OFFSET 0xFE000
+#define DMAC1_OFFSET 0xFF000
+#define DMAC_SIZE 0x420
+#define SSP0_OFFSET 0xFC000
+#define SSP1_OFFSET 0xFD000
+#define SSP_SIZE 0x100
+
+#define BDW_STACK_DUMP_SIZE 32
+
+#define BDW_PANIC_OFFSET(x) ((x) & 0xFFFF)
+
+static const struct snd_sof_debugfs_map bdw_debugfs[] = {
+ {"dmac0", BDW_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", BDW_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", BDW_DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", BDW_DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", BDW_DSP_BAR, IRAM_OFFSET, BDW_IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", BDW_DSP_BAR, DRAM_OFFSET, BDW_DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", BDW_DSP_BAR, SHIM_OFFSET, SHIM_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void bdw_host_done(struct snd_sof_dev *sdev);
+static void bdw_dsp_done(struct snd_sof_dev *sdev);
+
+/*
+ * DSP Control.
+ */
+
+static int bdw_run(struct snd_sof_dev *sdev)
+{
+ /* set opportunistic mode on engine 0,1 for all channels */
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_HMDC,
+ SHIM_HMDC_HDDA_E0_ALLCH |
+ SHIM_HMDC_HDDA_E1_ALLCH, 0);
+
+ /* set DSP to RUN */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_STALL, 0x0);
+
+ /* return init core mask */
+ return 1;
+}
+
+static int bdw_reset(struct snd_sof_dev *sdev)
+{
+ /* put DSP into reset and stall */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_RST | SHIM_CSR_STALL,
+ SHIM_CSR_RST | SHIM_CSR_STALL);
+
+ /* keep in reset for 10ms */
+ mdelay(10);
+
+ /* take DSP out of reset and keep stalled for FW loading */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_RST | SHIM_CSR_STALL,
+ SHIM_CSR_STALL);
+
+ return 0;
+}
+
+static int bdw_set_dsp_D0(struct snd_sof_dev *sdev)
+{
+ int tries = 10;
+ u32 reg;
+
+ /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2,
+ PCI_VDRTCL2_DCLCGE |
+ PCI_VDRTCL2_DTCGE, 0);
+
+ /* Disable D3PG (VDRTCTL0.D3PGD = 1) */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL0,
+ PCI_VDRTCL0_D3PGD, PCI_VDRTCL0_D3PGD);
+
+ /* Set D0 state */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_PMCS,
+ PCI_PMCS_PS_MASK, 0);
+
+ /* check that ADSP shim is enabled */
+ while (tries--) {
+ reg = readl(sdev->bar[BDW_PCI_BAR] + PCI_PMCS)
+ & PCI_PMCS_PS_MASK;
+ if (reg == 0)
+ goto finish;
+
+ msleep(20);
+ }
+
+ return -ENODEV;
+
+finish:
+ /*
+ * select SSP1 19.2MHz base clock, SSP clock 0,
+ * turn off Low Power Clock
+ */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_S1IOCS | SHIM_CSR_SBCS1 |
+ SHIM_CSR_LPCS, 0x0);
+
+ /* stall DSP core, set clk to 192/96Mhz */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR,
+ SHIM_CSR, SHIM_CSR_STALL |
+ SHIM_CSR_DCS_MASK,
+ SHIM_CSR_STALL |
+ SHIM_CSR_DCS(4));
+
+ /* Set 24MHz MCLK, prevent local clock gating, enable SSP0 clock */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CLKCTL,
+ SHIM_CLKCTL_MASK |
+ SHIM_CLKCTL_DCPLCG |
+ SHIM_CLKCTL_SCOE0,
+ SHIM_CLKCTL_MASK |
+ SHIM_CLKCTL_DCPLCG |
+ SHIM_CLKCTL_SCOE0);
+
+ /* Stall and reset core, set CSR */
+ bdw_reset(sdev);
+
+ /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2,
+ PCI_VDRTCL2_DCLCGE |
+ PCI_VDRTCL2_DTCGE,
+ PCI_VDRTCL2_DCLCGE |
+ PCI_VDRTCL2_DTCGE);
+
+ usleep_range(50, 55);
+
+ /* switch on audio PLL */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2,
+ PCI_VDRTCL2_APLLSE_MASK, 0);
+
+ /*
+ * set default power gating control, enable power gating control for
+ * all blocks. that is, can't be accessed, please enable each block
+ * before accessing.
+ */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL0,
+ 0xfffffffC, 0x0);
+
+ /* disable DMA finish function for SSP0 & SSP1 */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR2,
+ SHIM_CSR2_SDFD_SSP1,
+ SHIM_CSR2_SDFD_SSP1);
+
+ /* set on-demond mode on engine 0,1 for all channels */
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_HMDC,
+ SHIM_HMDC_HDDA_E0_ALLCH |
+ SHIM_HMDC_HDDA_E1_ALLCH,
+ SHIM_HMDC_HDDA_E0_ALLCH |
+ SHIM_HMDC_HDDA_E1_ALLCH);
+
+ /* Enable Interrupt from both sides */
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_IMRX,
+ (SHIM_IMRX_BUSY | SHIM_IMRX_DONE), 0x0);
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_IMRD,
+ (SHIM_IMRD_DONE | SHIM_IMRD_BUSY |
+ SHIM_IMRD_SSP0 | SHIM_IMRD_DMAC), 0x0);
+
+ /* clear IPC registers */
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCX, 0x0);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCD, 0x0);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, 0x80, 0x6);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, 0xe0, 0x300a);
+
+ return 0;
+}
+
+static void bdw_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read registers */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
+}
+
+static void bdw_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[BDW_STACK_DUMP_SIZE];
+ u32 status, panic, imrx, imrd;
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD);
+ panic = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCX);
+ bdw_get_registers(sdev, &xoops, &panic_info, stack,
+ BDW_STACK_DUMP_SIZE);
+ sof_print_oops_and_stack(sdev, KERN_ERR, status, panic, &xoops,
+ &panic_info, stack, BDW_STACK_DUMP_SIZE);
+
+ /* provide some context for firmware debug */
+ imrx = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IMRX);
+ imrd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IMRD);
+ dev_err(sdev->dev,
+ "error: ipc host -> DSP: pending %s complete %s raw 0x%8.8x\n",
+ (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
+ (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ dev_err(sdev->dev,
+ "error: mask host: pending %s complete %s raw 0x%8.8x\n",
+ (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
+ (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ dev_err(sdev->dev,
+ "error: ipc DSP -> host: pending %s complete %s raw 0x%8.8x\n",
+ (status & SHIM_IPCD_BUSY) ? "yes" : "no",
+ (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ dev_err(sdev->dev,
+ "error: mask DSP: pending %s complete %s raw 0x%8.8x\n",
+ (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
+ (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+}
+
+/*
+ * IPC Doorbell IRQ handler and thread.
+ */
+
+static irqreturn_t bdw_irq_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 isr;
+ int ret = IRQ_NONE;
+
+ /* Interrupt arrived, check src */
+ isr = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_ISRX);
+ if (isr & (SHIM_ISRX_DONE | SHIM_ISRX_BUSY))
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+static irqreturn_t bdw_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 ipcx, ipcd, imrx;
+
+ imrx = snd_sof_dsp_read64(sdev, BDW_DSP_BAR, SHIM_IMRX);
+ ipcx = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCX);
+
+ /* reply message from DSP */
+ if (ipcx & SHIM_IPCX_DONE &&
+ !(imrx & SHIM_IMRX_DONE)) {
+ /* Mask Done interrupt before return */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR,
+ SHIM_IMRX, SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /*
+ * handle immediate reply from DSP core. If the msg is
+ * found, set done bit in cmd_done which is called at the
+ * end of message processing function, else set it here
+ * because the done bit can't be set in cmd_done function
+ * which is triggered by msg
+ */
+ snd_sof_ipc_process_reply(sdev, ipcx);
+
+ bdw_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ }
+
+ ipcd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD);
+
+ /* new message from DSP */
+ if (ipcd & SHIM_IPCD_BUSY &&
+ !(imrx & SHIM_IMRX_BUSY)) {
+ /* Mask Busy interrupt before return */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR,
+ SHIM_IMRX, SHIM_IMRX_BUSY,
+ SHIM_IMRX_BUSY);
+
+ /* Handle messages from DSP Core */
+ if ((ipcd & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, BDW_PANIC_OFFSET(ipcx) + MBOX_OFFSET,
+ true);
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ bdw_host_done(sdev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * IPC Mailbox IO
+ */
+
+static int bdw_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ /* send the message */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCX, SHIM_IPCX_BUSY);
+
+ return 0;
+}
+
+static int bdw_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+
+static int bdw_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+
+static void bdw_host_done(struct snd_sof_dev *sdev)
+{
+ /* clear BUSY bit and set DONE bit - accept new messages */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IPCD,
+ SHIM_IPCD_BUSY | SHIM_IPCD_DONE,
+ SHIM_IPCD_DONE);
+
+ /* unmask busy interrupt */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY, 0);
+}
+
+static void bdw_dsp_done(struct snd_sof_dev *sdev)
+{
+ /* clear DONE bit - tell DSP we have completed */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IPCX,
+ SHIM_IPCX_DONE, 0);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_DONE, 0);
+}
+
+/*
+ * Probe and remove.
+ */
+static int bdw_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ const struct sof_intel_dsp_desc *chip;
+ struct resource *mmio;
+ u32 base, size;
+ int ret;
+
+ chip = get_chip_info(sdev->pdata);
+ if (!chip) {
+ dev_err(sdev->dev, "error: no such device supported\n");
+ return -EIO;
+ }
+
+ sdev->num_cores = chip->cores_num;
+
+ /* LPE base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_lpe_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get LPE base at idx %d\n",
+ desc->resindex_lpe_base);
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[BDW_DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BDW_DSP_BAR]) {
+ dev_err(sdev->dev,
+ "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[BDW_DSP_BAR]);
+
+ /* TODO: add offsets */
+ sdev->mmio_bar = BDW_DSP_BAR;
+ sdev->mailbox_bar = BDW_DSP_BAR;
+ sdev->dsp_oops_offset = MBOX_OFFSET;
+
+ /* PCI base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_pcicfg_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get PCI base at idx %d\n",
+ desc->resindex_pcicfg_base);
+ return -ENODEV;
+ }
+
+ dev_dbg(sdev->dev, "PCI base at 0x%x size 0x%x", base, size);
+ sdev->bar[BDW_PCI_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BDW_PCI_BAR]) {
+ dev_err(sdev->dev,
+ "error: failed to ioremap PCI base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "PCI VADDR %p\n", sdev->bar[BDW_PCI_BAR]);
+
+ /* register our IRQ */
+ sdev->ipc_irq = platform_get_irq(pdev, desc->irqindex_host_ipc);
+ if (sdev->ipc_irq < 0)
+ return sdev->ipc_irq;
+
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ bdw_irq_handler, bdw_irq_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable the DSP SHIM */
+ ret = bdw_set_dsp_D0(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DSP D0\n");
+ return ret;
+ }
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(sdev->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return ret;
+}
+
+static struct snd_soc_acpi_mach *bdw_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach;
+
+ mach = snd_soc_acpi_find_machine(desc->machines);
+ if (!mach) {
+ dev_warn(sdev->dev, "warning: No matching ASoC machine driver found\n");
+ return NULL;
+ }
+
+ sof_pdata->tplg_filename = mach->sof_tplg_filename;
+ mach->mach_params.acpi_ipc_irq_index = desc->irqindex_host_ipc;
+
+ return mach;
+}
+
+static void bdw_set_mach_params(struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct snd_soc_acpi_mach_params *mach_params;
+
+ mach_params = &mach->mach_params;
+ mach_params->platform = dev_name(sdev->dev);
+ mach_params->num_dai_drivers = desc->ops->num_drv;
+ mach_params->dai_drivers = desc->ops->drv;
+}
+
+/* Broadwell DAIs */
+static struct snd_soc_dai_driver bdw_dai[] = {
+{
+ .name = "ssp0-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp1-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+};
+
+/* broadwell ops */
+static struct snd_sof_dsp_ops sof_bdw_ops = {
+ /*Device init */
+ .probe = bdw_probe,
+
+ /* DSP Core Control */
+ .run = bdw_run,
+ .reset = bdw_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* ipc */
+ .send_msg = bdw_send_msg,
+ .get_mailbox_offset = bdw_get_mailbox_offset,
+ .get_window_offset = bdw_get_window_offset,
+
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ /* machine driver */
+ .machine_select = bdw_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = bdw_set_mach_params,
+
+ /* debug */
+ .debug_map = bdw_debugfs,
+ .debug_map_count = ARRAY_SIZE(bdw_debugfs),
+ .dbg_dump = bdw_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_close = sof_stream_pcm_close,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = bdw_dai,
+ .num_drv = ARRAY_SIZE(bdw_dai),
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+};
+
+static const struct sof_intel_dsp_desc bdw_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+ .hw_ip_version = SOF_INTEL_BROADWELL,
+};
+
+static const struct sof_dev_desc sof_acpi_broadwell_desc = {
+ .machines = snd_soc_acpi_intel_broadwell_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = 0,
+ .chip_info = &bdw_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-bdw.ri",
+ },
+ .nocodec_tplg_filename = "sof-bdw-nocodec.tplg",
+ .ops = &sof_bdw_ops,
+};
+
+static const struct acpi_device_id sof_broadwell_match[] = {
+ { "INT3438", (unsigned long)&sof_acpi_broadwell_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sof_broadwell_match);
+
+static int sof_broadwell_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct acpi_device_id *id;
+ const struct sof_dev_desc *desc;
+ int ret;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return -ENODEV;
+
+ ret = snd_intel_acpi_dsp_driver_probe(dev, id->id);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+ dev_dbg(dev, "SOF ACPI driver not selected, aborting probe\n");
+ return -ENODEV;
+ }
+
+ desc = (const struct sof_dev_desc *)id->driver_data;
+ return sof_acpi_probe(pdev, desc);
+}
+
+/* acpi_driver definition */
+static struct platform_driver snd_sof_acpi_intel_bdw_driver = {
+ .probe = sof_broadwell_probe,
+ .remove = sof_acpi_remove,
+ .driver = {
+ .name = "sof-audio-acpi-intel-bdw",
+ .pm = &sof_acpi_pm,
+ .acpi_match_table = sof_broadwell_match,
+ },
+};
+module_platform_driver(snd_sof_acpi_intel_bdw_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_IMPORT_NS(SND_SOC_SOF_ACPI_DEV);
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
new file mode 100644
index 000000000..e6dc4ff53
--- /dev/null
+++ b/sound/soc/sof/intel/byt.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Baytrail, Braswell and Cherrytrail.
+ */
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/intel-dsp-config.h>
+#include "../ops.h"
+#include "atom.h"
+#include "shim.h"
+#include "../sof-acpi-dev.h"
+#include "../sof-audio.h"
+#include "../../intel/common/soc-intel-quirks.h"
+
+static const struct snd_sof_debugfs_map byt_debugfs[] = {
+ {"dmac0", DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", DSP_BAR, SHIM_OFFSET, SHIM_SIZE_BYT,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static const struct snd_sof_debugfs_map cht_debugfs[] = {
+ {"dmac0", DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac2", DSP_BAR, DMAC2_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp3", DSP_BAR, SSP3_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp4", DSP_BAR, SSP4_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp5", DSP_BAR, SSP5_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", DSP_BAR, SHIM_OFFSET, SHIM_SIZE_CHT,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void byt_reset_dsp_disable_int(struct snd_sof_dev *sdev)
+{
+ /* Disable Interrupt from both sides */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRX, 0x3, 0x3);
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRD, 0x3, 0x3);
+
+ /* Put DSP into reset, set reset vector */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL);
+}
+
+static int byt_suspend(struct snd_sof_dev *sdev, u32 target_state)
+{
+ byt_reset_dsp_disable_int(sdev);
+
+ return 0;
+}
+
+static int byt_resume(struct snd_sof_dev *sdev)
+{
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ return 0;
+}
+
+static int byt_remove(struct snd_sof_dev *sdev)
+{
+ byt_reset_dsp_disable_int(sdev);
+
+ return 0;
+}
+
+static int byt_acpi_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ const struct sof_intel_dsp_desc *chip;
+ struct resource *mmio;
+ u32 base, size;
+ int ret;
+
+ chip = get_chip_info(sdev->pdata);
+ if (!chip) {
+ dev_err(sdev->dev, "error: no such device supported\n");
+ return -EIO;
+ }
+
+ sdev->num_cores = chip->cores_num;
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(sdev->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* LPE base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_lpe_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get LPE base at idx %d\n",
+ desc->resindex_lpe_base);
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[DSP_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[DSP_BAR]);
+
+ /* TODO: add offsets */
+ sdev->mmio_bar = DSP_BAR;
+ sdev->mailbox_bar = DSP_BAR;
+
+ /* IMR base - optional */
+ if (desc->resindex_imr_base == -1)
+ goto irq;
+
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_imr_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get IMR base at idx %d\n",
+ desc->resindex_imr_base);
+ return -ENODEV;
+ }
+
+ /* some BIOSes don't map IMR */
+ if (base == 0x55aa55aa || base == 0x0) {
+ dev_info(sdev->dev, "IMR not set by BIOS. Ignoring\n");
+ goto irq;
+ }
+
+ dev_dbg(sdev->dev, "IMR base at 0x%x size 0x%x", base, size);
+ sdev->bar[IMR_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[IMR_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap IMR base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "IMR VADDR %p\n", sdev->bar[IMR_BAR]);
+
+irq:
+ /* register our IRQ */
+ sdev->ipc_irq = platform_get_irq(pdev, desc->irqindex_host_ipc);
+ if (sdev->ipc_irq < 0)
+ return sdev->ipc_irq;
+
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ atom_irq_handler, atom_irq_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return ret;
+}
+
+/* baytrail ops */
+static struct snd_sof_dsp_ops sof_byt_ops = {
+ /* device init */
+ .probe = byt_acpi_probe,
+ .remove = byt_remove,
+
+ /* DSP core boot / reset */
+ .run = atom_run,
+ .reset = atom_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* doorbell */
+ .irq_handler = atom_irq_handler,
+ .irq_thread = atom_irq_thread,
+
+ /* ipc */
+ .send_msg = atom_send_msg,
+ .get_mailbox_offset = atom_get_mailbox_offset,
+ .get_window_offset = atom_get_window_offset,
+
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ /* machine driver */
+ .machine_select = atom_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = atom_set_mach_params,
+
+ /* debug */
+ .debug_map = byt_debugfs,
+ .debug_map_count = ARRAY_SIZE(byt_debugfs),
+ .dbg_dump = atom_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_close = sof_stream_pcm_close,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* PM */
+ .suspend = byt_suspend,
+ .resume = byt_resume,
+
+ /* DAI drivers */
+ .drv = atom_dai,
+ .num_drv = 3, /* we have only 3 SSPs on byt*/
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+};
+
+static const struct sof_intel_dsp_desc byt_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+ .hw_ip_version = SOF_INTEL_BAYTRAIL,
+};
+
+/* cherrytrail and braswell ops */
+static struct snd_sof_dsp_ops sof_cht_ops = {
+ /* device init */
+ .probe = byt_acpi_probe,
+ .remove = byt_remove,
+
+ /* DSP core boot / reset */
+ .run = atom_run,
+ .reset = atom_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* doorbell */
+ .irq_handler = atom_irq_handler,
+ .irq_thread = atom_irq_thread,
+
+ /* ipc */
+ .send_msg = atom_send_msg,
+ .get_mailbox_offset = atom_get_mailbox_offset,
+ .get_window_offset = atom_get_window_offset,
+
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ /* machine driver */
+ .machine_select = atom_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = atom_set_mach_params,
+
+ /* debug */
+ .debug_map = cht_debugfs,
+ .debug_map_count = ARRAY_SIZE(cht_debugfs),
+ .dbg_dump = atom_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_close = sof_stream_pcm_close,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* PM */
+ .suspend = byt_suspend,
+ .resume = byt_resume,
+
+ /* DAI drivers */
+ .drv = atom_dai,
+ /* all 6 SSPs may be available for cherrytrail */
+ .num_drv = 6,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+};
+
+static const struct sof_intel_dsp_desc cht_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+ .hw_ip_version = SOF_INTEL_BAYTRAIL,
+};
+
+/* BYTCR uses different IRQ index */
+static const struct sof_dev_desc sof_acpi_baytrailcr_desc = {
+ .machines = snd_soc_acpi_intel_baytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = 2,
+ .irqindex_host_ipc = 0,
+ .chip_info = &byt_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-byt.ri",
+ },
+ .nocodec_tplg_filename = "sof-byt-nocodec.tplg",
+ .ops = &sof_byt_ops,
+};
+
+static const struct sof_dev_desc sof_acpi_baytrail_desc = {
+ .machines = snd_soc_acpi_intel_baytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = 2,
+ .irqindex_host_ipc = 5,
+ .chip_info = &byt_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-byt.ri",
+ },
+ .nocodec_tplg_filename = "sof-byt-nocodec.tplg",
+ .ops = &sof_byt_ops,
+};
+
+static const struct sof_dev_desc sof_acpi_cherrytrail_desc = {
+ .machines = snd_soc_acpi_intel_cherrytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = 2,
+ .irqindex_host_ipc = 5,
+ .chip_info = &cht_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-cht.ri",
+ },
+ .nocodec_tplg_filename = "sof-cht-nocodec.tplg",
+ .ops = &sof_cht_ops,
+};
+
+static const struct acpi_device_id sof_baytrail_match[] = {
+ { "80860F28", (unsigned long)&sof_acpi_baytrail_desc },
+ { "808622A8", (unsigned long)&sof_acpi_cherrytrail_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sof_baytrail_match);
+
+static int sof_baytrail_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct sof_dev_desc *desc;
+ const struct acpi_device_id *id;
+ int ret;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return -ENODEV;
+
+ ret = snd_intel_acpi_dsp_driver_probe(dev, id->id);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+ dev_dbg(dev, "SOF ACPI driver not selected, aborting probe\n");
+ return -ENODEV;
+ }
+
+ desc = (const struct sof_dev_desc *)id->driver_data;
+ if (desc == &sof_acpi_baytrail_desc && soc_intel_is_byt_cr(pdev))
+ desc = &sof_acpi_baytrailcr_desc;
+
+ return sof_acpi_probe(pdev, desc);
+}
+
+/* acpi_driver definition */
+static struct platform_driver snd_sof_acpi_intel_byt_driver = {
+ .probe = sof_baytrail_probe,
+ .remove = sof_acpi_remove,
+ .driver = {
+ .name = "sof-audio-acpi-intel-byt",
+ .pm = &sof_acpi_pm,
+ .acpi_match_table = sof_baytrail_match,
+ },
+};
+module_platform_driver(snd_sof_acpi_intel_byt_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_IMPORT_NS(SND_SOC_SOF_ACPI_DEV);
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
new file mode 100644
index 000000000..19d0b1909
--- /dev/null
+++ b/sound/soc/sof/intel/cnl.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Cannonlake.
+ */
+
+#include <sound/sof/ext_manifest4.h>
+#include <sound/sof/ipc4/header.h>
+#include <trace/events/sof_intel.h>
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+#include "../sof-audio.h"
+
+static const struct snd_sof_debugfs_map cnl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void cnl_ipc_host_done(struct snd_sof_dev *sdev);
+static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev);
+
+irqreturn_t cnl_ipc4_irq_thread(int irq, void *context)
+{
+ struct sof_ipc4_msg notification_data = {{ 0 }};
+ struct snd_sof_dev *sdev = context;
+ bool ipc_irq = false;
+ u32 hipcida, hipctdr;
+
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
+ if (hipcida & CNL_DSP_REG_HIPCIDA_DONE) {
+ /* DSP received the message */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCCTL,
+ CNL_DSP_REG_HIPCCTL_DONE, 0);
+ cnl_ipc_dsp_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
+ if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
+ /* Message from DSP (reply or notification) */
+ u32 hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCTDD);
+ u32 primary = hipctdr & CNL_DSP_REG_HIPCTDR_MSG_MASK;
+ u32 extension = hipctdd & CNL_DSP_REG_HIPCTDD_MSG_MASK;
+
+ if (primary & SOF_IPC4_MSG_DIR_MASK) {
+ /* Reply received */
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
+
+ data->primary = primary;
+ data->extension = extension;
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ snd_sof_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, data->primary);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev,
+ "IPC reply before FW_READY: %#x|%#x\n",
+ primary, extension);
+ }
+ } else {
+ /* Notification received */
+ notification_data.primary = primary;
+ notification_data.extension = extension;
+
+ sdev->ipc->msg.rx_data = &notification_data;
+ snd_sof_ipc_msgs_rx(sdev);
+ sdev->ipc->msg.rx_data = NULL;
+ }
+
+ /* Let DSP know that we have finished processing the message */
+ cnl_ipc_host_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq)
+ /* This interrupt is not shared so no need to return IRQ_NONE. */
+ dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 hipci;
+ u32 hipcida;
+ u32 hipctdr;
+ u32 hipctdd;
+ u32 msg;
+ u32 msg_ext;
+ bool ipc_irq = false;
+
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
+ hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDD);
+ hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR);
+
+ /* reply message from DSP */
+ if (hipcida & CNL_DSP_REG_HIPCIDA_DONE) {
+ msg_ext = hipci & CNL_DSP_REG_HIPCIDR_MSG_MASK;
+ msg = hipcida & CNL_DSP_REG_HIPCIDA_MSG_MASK;
+
+ trace_sof_intel_ipc_firmware_response(sdev, msg, msg_ext);
+
+ /* mask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCCTL,
+ CNL_DSP_REG_HIPCCTL_DONE, 0);
+
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /* handle immediate reply from DSP core */
+ hda_dsp_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, msg);
+
+ cnl_ipc_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev, "IPC reply before FW_READY: %#x\n",
+ msg);
+ }
+
+ ipc_irq = true;
+ }
+
+ /* new message from DSP */
+ if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
+ msg = hipctdr & CNL_DSP_REG_HIPCTDR_MSG_MASK;
+ msg_ext = hipctdd & CNL_DSP_REG_HIPCTDD_MSG_MASK;
+
+ trace_sof_intel_ipc_firmware_initiated(sdev, msg, msg_ext);
+
+ /* handle messages from DSP */
+ if ((hipctdr & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ bool non_recoverable = true;
+
+ /*
+ * This is a PANIC message!
+ *
+ * If it is arriving during firmware boot and it is not
+ * the last boot attempt then change the non_recoverable
+ * to false as the DSP might be able to boot in the next
+ * iteration(s)
+ */
+ if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS &&
+ hda->boot_iteration < HDA_FW_BOOT_ATTEMPTS)
+ non_recoverable = false;
+
+ snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext),
+ non_recoverable);
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ cnl_ipc_host_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq) {
+ /*
+ * This interrupt is not shared so no need to return IRQ_NONE.
+ */
+ dev_dbg_ratelimited(sdev->dev,
+ "nothing to do in IPC IRQ thread\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cnl_ipc_host_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * clear busy interrupt to tell dsp controller this
+ * interrupt has been accepted, not trigger it again
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCTDR,
+ CNL_DSP_REG_HIPCTDR_BUSY,
+ CNL_DSP_REG_HIPCTDR_BUSY);
+ /*
+ * set done bit to ack dsp the msg has been
+ * processed and send reply msg to dsp
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCTDA,
+ CNL_DSP_REG_HIPCTDA_DONE,
+ CNL_DSP_REG_HIPCTDA_DONE);
+}
+
+static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * set DONE bit - tell DSP we have received the reply msg
+ * from DSP, and processed it, don't send more reply to host
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCIDA,
+ CNL_DSP_REG_HIPCIDA_DONE,
+ CNL_DSP_REG_HIPCIDA_DONE);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCCTL,
+ CNL_DSP_REG_HIPCCTL_DONE,
+ CNL_DSP_REG_HIPCCTL_DONE);
+}
+
+static bool cnl_compact_ipc_compress(struct snd_sof_ipc_msg *msg,
+ u32 *dr, u32 *dd)
+{
+ struct sof_ipc_pm_gate *pm_gate = msg->msg_data;
+
+ if (pm_gate->hdr.cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
+ /* send the compact message via the primary register */
+ *dr = HDA_IPC_MSG_COMPACT | HDA_IPC_PM_GATE;
+
+ /* send payload via the extended data register */
+ *dd = pm_gate->flags;
+
+ return true;
+ }
+
+ return false;
+}
+
+int cnl_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct sof_ipc4_msg *msg_data = msg->msg_data;
+
+ /* send the message via mailbox */
+ if (msg_data->data_size)
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
+ msg_data->data_size);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD, msg_data->extension);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ msg_data->primary | CNL_DSP_REG_HIPCIDR_BUSY);
+
+ return 0;
+}
+
+int cnl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct sof_ipc_cmd_hdr *hdr;
+ u32 dr = 0;
+ u32 dd = 0;
+
+ /*
+ * Currently the only compact IPC supported is the PM_GATE
+ * IPC which is used for transitioning the DSP between the
+ * D0I0 and D0I3 states. And these are sent only during the
+ * set_power_state() op. Therefore, there will never be a case
+ * that a compact IPC results in the DSP exiting D0I3 without
+ * the host and FW being in sync.
+ */
+ if (cnl_compact_ipc_compress(msg, &dr, &dd)) {
+ /* send the message via IPC registers */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD,
+ dd);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ CNL_DSP_REG_HIPCIDR_BUSY | dr);
+ return 0;
+ }
+
+ /* send the message via mailbox */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ CNL_DSP_REG_HIPCIDR_BUSY);
+
+ hdr = msg->msg_data;
+
+ /*
+ * Use mod_delayed_work() to schedule the delayed work
+ * to avoid scheduling multiple workqueue items when
+ * IPCs are sent at a high-rate. mod_delayed_work()
+ * modifies the timer if the work is pending.
+ * Also, a new delayed work should not be queued after the
+ * CTX_SAVE IPC, which is sent before the DSP enters D3.
+ */
+ if (hdr->cmd != (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE))
+ mod_delayed_work(system_wq, &hdev->d0i3_work,
+ msecs_to_jiffies(SOF_HDA_D0I3_WORK_DELAY_MS));
+
+ return 0;
+}
+
+void cnl_ipc_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcctl;
+ u32 hipcida;
+ u32 hipctdr;
+
+ hda_ipc_irq_dump(sdev);
+
+ /* read IPC status */
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCCTL);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev,
+ "error: host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
+ hipcida, hipctdr, hipcctl);
+}
+
+void cnl_ipc4_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcidr, hipcidd, hipcida, hipctdr, hipctdd, hipctda, hipcctl;
+
+ hda_ipc_irq_dump(sdev);
+
+ hipcidr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR);
+ hipcidd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD);
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
+ hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDD);
+ hipctda = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDA);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCCTL);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev,
+ "Host IPC initiator: %#x|%#x|%#x, target: %#x|%#x|%#x, ctl: %#x\n",
+ hipcidr, hipcidd, hipcida, hipctdr, hipctdd, hipctda, hipcctl);
+}
+
+/* cannonlake ops */
+struct snd_sof_dsp_ops sof_cnl_ops;
+EXPORT_SYMBOL_NS(sof_cnl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_cnl_ops_init(struct snd_sof_dev *sdev)
+{
+ /* common defaults */
+ memcpy(&sof_cnl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* probe/remove/shutdown */
+ sof_cnl_ops.shutdown = hda_dsp_shutdown;
+
+ /* ipc */
+ if (sdev->pdata->ipc_type == SOF_IPC) {
+ /* doorbell */
+ sof_cnl_ops.irq_thread = cnl_ipc_irq_thread;
+
+ /* ipc */
+ sof_cnl_ops.send_msg = cnl_ipc_send_msg;
+
+ /* debug */
+ sof_cnl_ops.ipc_dump = cnl_ipc_dump;
+ }
+
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_1_8;
+
+ /* doorbell */
+ sof_cnl_ops.irq_thread = cnl_ipc4_irq_thread;
+
+ /* ipc */
+ sof_cnl_ops.send_msg = cnl_ipc4_send_msg;
+
+ /* debug */
+ sof_cnl_ops.ipc_dump = cnl_ipc4_dump;
+ }
+
+ /* set DAI driver ops */
+ hda_set_dai_drv_ops(sdev, &sof_cnl_ops);
+
+ /* debug */
+ sof_cnl_ops.debug_map = cnl_dsp_debugfs;
+ sof_cnl_ops.debug_map_count = ARRAY_SIZE(cnl_dsp_debugfs);
+
+ /* pre/post fw run */
+ sof_cnl_ops.post_fw_run = hda_dsp_post_fw_run;
+
+ /* firmware run */
+ sof_cnl_ops.run = hda_dsp_cl_boot_firmware;
+
+ /* dsp core get/put */
+ sof_cnl_ops.core_get = hda_dsp_core_get;
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_cnl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc cnl_chip_info = {
+ /* Cannonlake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(3, 0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = CNL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_1_8,
+};
+EXPORT_SYMBOL_NS(cnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+/*
+ * JasperLake is technically derived from IceLake, and should be in
+ * described in icl.c. However since JasperLake was designed with
+ * two cores, it cannot support the IceLake-specific power-up sequences
+ * which rely on core3. To simplify, JasperLake uses the CannonLake ops and
+ * is described in cnl.c
+ */
+const struct sof_intel_dsp_desc jsl_chip_info = {
+ /* Jasperlake */
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(1, 0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_2_0,
+};
+EXPORT_SYMBOL_NS(jsl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/ext_manifest.h b/sound/soc/sof/intel/ext_manifest.h
new file mode 100644
index 000000000..2dfae9285
--- /dev/null
+++ b/sound/soc/sof/intel/ext_manifest.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2020 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Intel extended manifest is a extra place to store Intel cavs specific
+ * metadata about firmware, for example LPRO/HPRO configuration is
+ * Intel cavs specific. This part of output binary is not signed.
+ */
+
+#ifndef __INTEL_CAVS_EXT_MANIFEST_H__
+#define __INTEL_CAVS_EXT_MANIFEST_H__
+
+#include <sound/sof/ext_manifest.h>
+
+/* EXT_MAN_ELEM_PLATFORM_CONFIG_DATA elements identificators */
+enum sof_cavs_config_elem_type {
+ SOF_EXT_MAN_CAVS_CONFIG_EMPTY = 0,
+ SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO = 1,
+ SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE = 2,
+ SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE = 3,
+};
+
+/* EXT_MAN_ELEM_PLATFORM_CONFIG_DATA elements */
+struct sof_ext_man_cavs_config_data {
+ struct sof_ext_man_elem_header hdr;
+
+ struct sof_config_elem elems[];
+} __packed;
+
+#endif /* __INTEL_CAVS_EXT_MANIFEST_H__ */
diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c
new file mode 100644
index 000000000..0862ff8b6
--- /dev/null
+++ b/sound/soc/sof/intel/hda-bus.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Keyon Jie <yang.jie@linux.intel.com>
+
+#include <linux/io.h>
+#include <sound/hdaudio.h>
+#include <sound/hda_i915.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_register.h>
+#include "../sof-priv.h"
+#include "hda.h"
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+#include "../../codecs/hdac_hda.h"
+#define sof_hda_ext_ops snd_soc_hdac_hda_get_ops()
+#else
+#define sof_hda_ext_ops NULL
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+static void update_codec_wake_enable(struct hdac_bus *bus, unsigned int addr, bool link_power)
+{
+ unsigned int mask = snd_hdac_chip_readw(bus, WAKEEN);
+
+ if (link_power)
+ mask &= ~BIT(addr);
+ else
+ mask |= BIT(addr);
+
+ snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
+}
+
+static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable)
+{
+ struct hdac_bus *bus = codec->bus;
+ bool oldstate = test_bit(codec->addr, &bus->codec_powered);
+
+ snd_hdac_ext_bus_link_power(codec, enable);
+
+ if (enable == oldstate)
+ return;
+
+ /*
+ * Both codec driver and controller can hold references to
+ * display power. To avoid unnecessary power-up/down cycles,
+ * controller doesn't immediately release its reference.
+ *
+ * If the codec driver powers down the link, release
+ * the controller reference as well.
+ */
+ if (codec->addr == HDA_IDISP_ADDR && !enable)
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
+
+ /* WAKEEN needs to be set for disabled links */
+ update_codec_wake_enable(bus, codec->addr, enable);
+}
+
+static const struct hdac_bus_ops bus_core_ops = {
+ .command = snd_hdac_bus_send_cmd,
+ .get_response = snd_hdac_bus_get_response,
+ .link_power = sof_hda_bus_link_power,
+};
+#endif
+
+/*
+ * This can be used for both with/without hda link support.
+ */
+void sof_hda_bus_init(struct hdac_bus *bus, struct device *dev)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_ext_bus_init(bus, dev, &bus_core_ops, sof_hda_ext_ops);
+#else /* CONFIG_SND_SOC_SOF_HDA */
+ memset(bus, 0, sizeof(*bus));
+ bus->dev = dev;
+
+ INIT_LIST_HEAD(&bus->stream_list);
+
+ bus->irq = -1;
+
+ /*
+ * There is only one HDA bus atm. keep the index as 0.
+ * Need to fix when there are more than one HDA bus.
+ */
+ bus->idx = 0;
+
+ spin_lock_init(&bus->reg_lock);
+#endif /* CONFIG_SND_SOC_SOF_HDA */
+}
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
new file mode 100644
index 000000000..a0dfd7de4
--- /dev/null
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Keyon Jie <yang.jie@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_i915.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "hda.h"
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+#include "../../codecs/hdac_hda.h"
+#endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
+
+#define CODEC_PROBE_RETRIES 3
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+#define IDISP_VID_INTEL 0x80860000
+
+/* load the legacy HDA codec driver */
+static int request_codec_module(struct hda_codec *codec)
+{
+#ifdef MODULE
+ char alias[MODULE_NAME_LEN];
+ const char *mod = NULL;
+
+ switch (codec->probe_id) {
+ case HDA_CODEC_ID_GENERIC:
+#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
+ mod = "snd-hda-codec-generic";
+#endif
+ break;
+ default:
+ snd_hdac_codec_modalias(&codec->core, alias, sizeof(alias));
+ mod = alias;
+ break;
+ }
+
+ if (mod) {
+ dev_dbg(&codec->core.dev, "loading codec module: %s\n", mod);
+ request_module(mod);
+ }
+#endif /* MODULE */
+ return device_attach(hda_codec_dev(codec));
+}
+
+static int hda_codec_load_module(struct hda_codec *codec)
+{
+ int ret;
+
+ ret = snd_hdac_device_register(&codec->core);
+ if (ret) {
+ dev_err(&codec->core.dev, "failed to register hdac device\n");
+ put_device(&codec->core.dev);
+ return ret;
+ }
+
+ ret = request_codec_module(codec);
+ if (ret <= 0) {
+ codec->probe_id = HDA_CODEC_ID_GENERIC;
+ ret = request_codec_module(codec);
+ }
+
+ return ret;
+}
+
+/* enable controller wake up event for all codecs with jack connectors */
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ struct hda_bus *hbus = sof_to_hbus(sdev);
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hda_codec *codec;
+ unsigned int mask = 0;
+
+ if (enable) {
+ list_for_each_codec(codec, hbus)
+ if (codec->jacktbl.used)
+ mask |= BIT(codec->core.addr);
+ }
+
+ snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
+}
+
+/* check jack status after resuming from suspend mode */
+void hda_codec_jack_check(struct snd_sof_dev *sdev)
+{
+ struct hda_bus *hbus = sof_to_hbus(sdev);
+ struct hda_codec *codec;
+
+ list_for_each_codec(codec, hbus)
+ /*
+ * Wake up all jack-detecting codecs regardless whether an event
+ * has been recorded in STATESTS
+ */
+ if (codec->jacktbl.used)
+ pm_request_resume(&codec->core.dev);
+}
+#else
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable) {}
+void hda_codec_jack_check(struct snd_sof_dev *sdev) {}
+#endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
+EXPORT_SYMBOL_NS(hda_codec_jack_wake_enable, SND_SOC_SOF_HDA_AUDIO_CODEC);
+EXPORT_SYMBOL_NS(hda_codec_jack_check, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+#if IS_ENABLED(CONFIG_SND_HDA_GENERIC)
+#define is_generic_config(bus) \
+ ((bus)->modelname && !strcmp((bus)->modelname, "generic"))
+#else
+#define is_generic_config(x) 0
+#endif
+
+static struct hda_codec *hda_codec_device_init(struct hdac_bus *bus, int addr, int type)
+{
+ struct hda_codec *codec;
+
+ codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "ehdaudio%dD%d", bus->idx, addr);
+ if (IS_ERR(codec)) {
+ dev_err(bus->dev, "device init failed for hdac device\n");
+ return codec;
+ }
+
+ codec->core.type = type;
+
+ return codec;
+}
+
+/* probe individual codec */
+static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
+ bool hda_codec_use_common_hdmi)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ struct hdac_hda_priv *hda_priv;
+ int type = HDA_DEV_LEGACY;
+#endif
+ struct hda_bus *hbus = sof_to_hbus(sdev);
+ struct hda_codec *codec;
+ u32 hda_cmd = (address << 28) | (AC_NODE_ROOT << 20) |
+ (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
+ u32 resp = -1;
+ int ret, retry = 0;
+
+ do {
+ mutex_lock(&hbus->core.cmd_mutex);
+ snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
+ snd_hdac_bus_get_response(&hbus->core, address, &resp);
+ mutex_unlock(&hbus->core.cmd_mutex);
+ } while (resp == -1 && retry++ < CODEC_PROBE_RETRIES);
+
+ if (resp == -1)
+ return -EIO;
+ dev_dbg(sdev->dev, "HDA codec #%d probed OK: response: %x\n",
+ address, resp);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ hda_priv = devm_kzalloc(sdev->dev, sizeof(*hda_priv), GFP_KERNEL);
+ if (!hda_priv)
+ return -ENOMEM;
+
+ /* only probe ASoC codec drivers for HDAC-HDMI */
+ if (!hda_codec_use_common_hdmi && (resp & 0xFFFF0000) == IDISP_VID_INTEL)
+ type = HDA_DEV_ASOC;
+
+ codec = hda_codec_device_init(&hbus->core, address, type);
+ ret = PTR_ERR_OR_ZERO(codec);
+ if (ret < 0)
+ return ret;
+
+ hda_priv->codec = codec;
+ dev_set_drvdata(&codec->core.dev, hda_priv);
+
+ if ((resp & 0xFFFF0000) == IDISP_VID_INTEL) {
+ if (!hbus->core.audio_component) {
+ dev_dbg(sdev->dev,
+ "iDisp hw present but no driver\n");
+ ret = -ENOENT;
+ goto out;
+ }
+ hda_priv->need_display_power = true;
+ }
+
+ if (is_generic_config(hbus))
+ codec->probe_id = HDA_CODEC_ID_GENERIC;
+ else
+ codec->probe_id = 0;
+
+ if (type == HDA_DEV_LEGACY) {
+ ret = hda_codec_load_module(codec);
+ /*
+ * handle ret==0 (no driver bound) as an error, but pass
+ * other return codes without modification
+ */
+ if (ret == 0)
+ ret = -ENOENT;
+ }
+
+out:
+ if (ret < 0) {
+ snd_hdac_device_unregister(&codec->core);
+ put_device(&codec->core.dev);
+ }
+#else
+ codec = hda_codec_device_init(&hbus->core, address, HDA_DEV_ASOC);
+ ret = PTR_ERR_OR_ZERO(codec);
+#endif
+
+ return ret;
+}
+
+/* Codec initialization */
+void hda_codec_probe_bus(struct snd_sof_dev *sdev,
+ bool hda_codec_use_common_hdmi)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int i, ret;
+
+ /* probe codecs in avail slots */
+ for (i = 0; i < HDA_MAX_CODECS; i++) {
+
+ if (!(bus->codec_mask & (1 << i)))
+ continue;
+
+ ret = hda_codec_probe(sdev, i, hda_codec_use_common_hdmi);
+ if (ret < 0) {
+ dev_warn(bus->dev, "codec #%d probe error, ret: %d\n",
+ i, ret);
+ bus->codec_mask &= ~BIT(i);
+ }
+ }
+}
+EXPORT_SYMBOL_NS(hda_codec_probe_bus, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) || \
+ IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)
+
+void hda_codec_i915_display_power(struct snd_sof_dev *sdev, bool enable)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (HDA_IDISP_CODEC(bus->codec_mask)) {
+ dev_dbg(bus->dev, "Turning i915 HDAC power %d\n", enable);
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, enable);
+ }
+}
+EXPORT_SYMBOL_NS(hda_codec_i915_display_power, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+
+int hda_codec_i915_init(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int ret;
+
+ /* i915 exposes a HDA codec for HDMI audio */
+ ret = snd_hdac_i915_init(bus);
+ if (ret < 0)
+ return ret;
+
+ /* codec_mask not yet known, power up for probe */
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(hda_codec_i915_init, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+
+int hda_codec_i915_exit(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (!bus->audio_component)
+ return 0;
+
+ /* power down unconditionally */
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
+
+ return snd_hdac_i915_exit(bus);
+}
+EXPORT_SYMBOL_NS(hda_codec_i915_exit, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+
+#endif
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/hda-common-ops.c b/sound/soc/sof/intel/hda-common-ops.c
new file mode 100644
index 000000000..b2326396c
--- /dev/null
+++ b/sound/soc/sof/intel/hda-common-ops.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+
+/*
+ * common ops for SKL+ HDAudio platforms
+ */
+
+#include "../sof-priv.h"
+#include "hda.h"
+#include "../sof-audio.h"
+
+struct snd_sof_dsp_ops sof_hda_common_ops = {
+ /* probe/remove/shutdown */
+ .probe = hda_dsp_probe,
+ .remove = hda_dsp_remove,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* ipc */
+ .get_mailbox_offset = hda_dsp_ipc_get_mailbox_offset,
+ .get_window_offset = hda_dsp_ipc_get_window_offset,
+
+ .ipc_msg_data = hda_ipc_msg_data,
+ .set_stream_data_offset = hda_set_stream_data_offset,
+
+ /* machine driver */
+ .machine_select = hda_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = hda_set_mach_params,
+
+ /* debug */
+ .dbg_dump = hda_dsp_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = hda_dsp_pcm_open,
+ .pcm_close = hda_dsp_pcm_close,
+ .pcm_hw_params = hda_dsp_pcm_hw_params,
+ .pcm_hw_free = hda_dsp_stream_hw_free,
+ .pcm_trigger = hda_dsp_pcm_trigger,
+ .pcm_pointer = hda_dsp_pcm_pointer,
+ .pcm_ack = hda_dsp_pcm_ack,
+
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_raw,
+
+ /* pre/post fw run */
+ .pre_fw_run = hda_dsp_pre_fw_run,
+
+ /* firmware run */
+ .run = hda_dsp_cl_boot_firmware,
+
+ /* parse platform specific extended manifest */
+ .parse_platform_ext_manifest = hda_dsp_ext_man_get_cavs_config_data,
+
+ /* dsp core get/put */
+
+ /* trace callback */
+ .trace_init = hda_dsp_trace_init,
+ .trace_release = hda_dsp_trace_release,
+ .trace_trigger = hda_dsp_trace_trigger,
+
+ /* client ops */
+ .register_ipc_clients = hda_register_clients,
+ .unregister_ipc_clients = hda_unregister_clients,
+
+ /* DAI drivers */
+ .drv = skl_dai,
+ .num_drv = SOF_SKL_NUM_DAIS,
+
+ /* PM */
+ .suspend = hda_dsp_suspend,
+ .resume = hda_dsp_resume,
+ .runtime_suspend = hda_dsp_runtime_suspend,
+ .runtime_resume = hda_dsp_runtime_resume,
+ .runtime_idle = hda_dsp_runtime_idle,
+ .set_hw_params_upon_resume = hda_dsp_set_hw_params_upon_resume,
+ .set_power_state = hda_dsp_set_power_state,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+};
diff --git a/sound/soc/sof/intel/hda-ctrl.c b/sound/soc/sof/intel/hda-ctrl.c
new file mode 100644
index 000000000..0c29bb196
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ctrl.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/hda_component.h>
+#include "../ops.h"
+#include "hda.h"
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+static int hda_codec_mask = -1;
+module_param_named(codec_mask, hda_codec_mask, int, 0444);
+MODULE_PARM_DESC(codec_mask, "SOF HDA codec mask for probing");
+#endif
+
+/*
+ * HDA Operations.
+ */
+
+int hda_dsp_ctrl_link_reset(struct snd_sof_dev *sdev, bool reset)
+{
+ unsigned long timeout;
+ u32 gctl = 0;
+ u32 val;
+
+ /* 0 to enter reset and 1 to exit reset */
+ val = reset ? 0 : SOF_HDA_GCTL_RESET;
+
+ /* enter/exit HDA controller reset */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCTL,
+ SOF_HDA_GCTL_RESET, val);
+
+ /* wait to enter/exit reset */
+ timeout = jiffies + msecs_to_jiffies(HDA_DSP_CTRL_RESET_TIMEOUT);
+ while (time_before(jiffies, timeout)) {
+ gctl = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCTL);
+ if ((gctl & SOF_HDA_GCTL_RESET) == val)
+ return 0;
+ usleep_range(500, 1000);
+ }
+
+ /* enter/exit reset failed */
+ dev_err(sdev->dev, "error: failed to %s HDA controller gctl 0x%x\n",
+ reset ? "reset" : "ready", gctl);
+ return -EIO;
+}
+
+int hda_dsp_ctrl_get_caps(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ u32 cap, offset, feature;
+ int count = 0;
+ int ret;
+
+ /*
+ * On some devices, one reset cycle is necessary before reading
+ * capabilities
+ */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0)
+ return ret;
+ ret = hda_dsp_ctrl_link_reset(sdev, false);
+ if (ret < 0)
+ return ret;
+
+ offset = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_LLCH);
+
+ do {
+ dev_dbg(sdev->dev, "checking for capabilities at offset 0x%x\n",
+ offset & SOF_HDA_CAP_NEXT_MASK);
+
+ cap = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, offset);
+
+ if (cap == -1) {
+ dev_dbg(bus->dev, "Invalid capability reg read\n");
+ break;
+ }
+
+ feature = (cap & SOF_HDA_CAP_ID_MASK) >> SOF_HDA_CAP_ID_OFF;
+
+ switch (feature) {
+ case SOF_HDA_PP_CAP_ID:
+ dev_dbg(sdev->dev, "found DSP capability at 0x%x\n",
+ offset);
+ bus->ppcap = bus->remap_addr + offset;
+ sdev->bar[HDA_DSP_PP_BAR] = bus->ppcap;
+ break;
+ case SOF_HDA_SPIB_CAP_ID:
+ dev_dbg(sdev->dev, "found SPIB capability at 0x%x\n",
+ offset);
+ bus->spbcap = bus->remap_addr + offset;
+ sdev->bar[HDA_DSP_SPIB_BAR] = bus->spbcap;
+ break;
+ case SOF_HDA_DRSM_CAP_ID:
+ dev_dbg(sdev->dev, "found DRSM capability at 0x%x\n",
+ offset);
+ bus->drsmcap = bus->remap_addr + offset;
+ sdev->bar[HDA_DSP_DRSM_BAR] = bus->drsmcap;
+ break;
+ case SOF_HDA_GTS_CAP_ID:
+ dev_dbg(sdev->dev, "found GTS capability at 0x%x\n",
+ offset);
+ bus->gtscap = bus->remap_addr + offset;
+ break;
+ case SOF_HDA_ML_CAP_ID:
+ dev_dbg(sdev->dev, "found ML capability at 0x%x\n",
+ offset);
+ bus->mlcap = bus->remap_addr + offset;
+ break;
+ default:
+ dev_dbg(sdev->dev, "found capability %d at 0x%x\n",
+ feature, offset);
+ break;
+ }
+
+ offset = cap & SOF_HDA_CAP_NEXT_MASK;
+ } while (count++ <= SOF_HDA_MAX_CAPS && offset);
+
+ return 0;
+}
+
+void hda_dsp_ctrl_ppcap_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? SOF_HDA_PPCTL_GPROCEN : 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, val);
+}
+
+void hda_dsp_ctrl_ppcap_int_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? SOF_HDA_PPCTL_PIE : 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_PIE, val);
+}
+
+void hda_dsp_ctrl_misc_clock_gating(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? PCI_CGCTL_MISCBDCGE_MASK : 0;
+
+ snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_MISCBDCGE_MASK, val);
+}
+
+/*
+ * enable/disable audio dsp clock gating and power gating bits.
+ * This allows the HW to opportunistically power and clock gate
+ * the audio dsp when it is idle
+ */
+int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val;
+
+ /* enable/disable audio dsp clock gating */
+ val = enable ? PCI_CGCTL_ADSPDCGE : 0;
+ snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_ADSPDCGE, val);
+
+ /* enable/disable DMI Link L1 support */
+ val = enable ? HDA_VS_INTEL_EM2_L1SEN : 0;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, val);
+
+ /* enable/disable audio dsp power gating */
+ val = enable ? 0 : PCI_PGCTL_ADSPPGD;
+ snd_sof_pci_update_bits(sdev, PCI_PGCTL, PCI_PGCTL_ADSPPGD, val);
+
+ return 0;
+}
+
+int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_ext_link *hlink;
+#endif
+ struct hdac_stream *stream;
+ int sd_offset, ret = 0;
+
+ if (bus->chip_init)
+ return 0;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_set_codec_wakeup(bus, true);
+#endif
+ hda_dsp_ctrl_misc_clock_gating(sdev, false);
+
+ if (full_reset) {
+ /* reset HDA controller */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to reset HDA controller\n");
+ goto err;
+ }
+
+ usleep_range(500, 1000);
+
+ /* exit HDA controller reset */
+ ret = hda_dsp_ctrl_link_reset(sdev, false);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
+ goto err;
+ }
+
+ usleep_range(1000, 1200);
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* check to see if controller is ready */
+ if (!snd_hdac_chip_readb(bus, GCTL)) {
+ dev_dbg(bus->dev, "controller not ready!\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Accept unsolicited responses */
+ snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, AZX_GCTL_UNSOL);
+
+ /* detect codecs */
+ if (!bus->codec_mask) {
+ bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+ dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask);
+ }
+
+ if (hda_codec_mask != -1) {
+ bus->codec_mask &= hda_codec_mask;
+ dev_dbg(bus->dev, "filtered codec_mask = 0x%lx\n",
+ bus->codec_mask);
+ }
+#endif
+
+ /* clear stream status */
+ list_for_each_entry(stream, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(stream);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+ }
+
+ /* clear WAKESTS */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
+ SOF_HDA_WAKESTS_INT_MASK);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* clear rirb status */
+ snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
+#endif
+
+ /* clear interrupt status register */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* initialize the codec command I/O */
+ snd_hdac_bus_init_cmd_io(bus);
+#endif
+
+ /* enable CIE and GIE interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN);
+
+ /* program the position buffer */
+ if (bus->use_posbuf && bus->posbuf.addr) {
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE,
+ (u32)bus->posbuf.addr);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPUBASE,
+ upper_32_bits(bus->posbuf.addr));
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* Reset stream-to-link mapping */
+ list_for_each_entry(hlink, &bus->hlink_list, list)
+ writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
+#endif
+
+ bus->chip_init = true;
+
+err:
+ hda_dsp_ctrl_misc_clock_gating(sdev, true);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_set_codec_wakeup(bus, false);
+#endif
+
+ return ret;
+}
+
+void hda_dsp_ctrl_stop_chip(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *stream;
+ int sd_offset;
+
+ if (!bus->chip_init)
+ return;
+
+ /* disable interrupts in stream descriptor */
+ list_for_each_entry(stream, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(stream);
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_CL_SD_CTL,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ 0);
+ }
+
+ /* disable SIE for all streams */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_ALL_STREAM, 0);
+
+ /* disable controller CIE and GIE */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
+ 0);
+
+ /* clear stream status */
+ list_for_each_entry(stream, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(stream);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+ }
+
+ /* clear WAKESTS */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
+ SOF_HDA_WAKESTS_INT_MASK);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* clear rirb status */
+ snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
+#endif
+
+ /* clear interrupt status register */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* disable CORB/RIRB */
+ snd_hdac_bus_stop_cmd_io(bus);
+#endif
+ /* disable position buffer */
+ if (bus->use_posbuf && bus->posbuf.addr) {
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_ADSP_DPLBASE, 0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_ADSP_DPUBASE, 0);
+ }
+
+ bus->chip_init = false;
+}
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
new file mode 100644
index 000000000..5f03ee390
--- /dev/null
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -0,0 +1,1001 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Keyon Jie <yang.jie@linux.intel.com>
+//
+
+#include <sound/pcm_params.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/intel-nhlt.h>
+#include <sound/sof/ipc4/header.h>
+#include <uapi/sound/sof/header.h>
+#include "../ipc4-priv.h"
+#include "../ipc4-topology.h"
+#include "../sof-priv.h"
+#include "../sof-audio.h"
+#include "hda.h"
+
+/*
+ * The default method is to fetch NHLT from BIOS. With this parameter set
+ * it is possible to override that with NHLT in the SOF topology manifest.
+ */
+static bool hda_use_tplg_nhlt;
+module_param_named(sof_use_tplg_nhlt, hda_use_tplg_nhlt, bool, 0444);
+MODULE_PARM_DESC(sof_use_tplg_nhlt, "SOF topology nhlt override");
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+
+struct hda_pipe_params {
+ u32 ch;
+ u32 s_freq;
+ u32 s_fmt;
+ u8 linktype;
+ snd_pcm_format_t format;
+ int link_index;
+ int stream;
+ unsigned int link_bps;
+};
+
+/*
+ * This function checks if the host dma channel corresponding
+ * to the link DMA stream_tag argument is assigned to one
+ * of the FEs connected to the BE DAI.
+ */
+static bool hda_check_fes(struct snd_soc_pcm_runtime *rtd,
+ int dir, int stream_tag)
+{
+ struct snd_pcm_substream *fe_substream;
+ struct hdac_stream *fe_hstream;
+ struct snd_soc_dpcm *dpcm;
+
+ for_each_dpcm_fe(rtd, dir, dpcm) {
+ fe_substream = snd_soc_dpcm_get_substream(dpcm->fe, dir);
+ fe_hstream = fe_substream->runtime->private_data;
+ if (fe_hstream->stream_tag == stream_tag)
+ return true;
+ }
+
+ return false;
+}
+
+static struct hdac_ext_stream *
+hda_link_stream_assign(struct hdac_bus *bus,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct sof_intel_hda_stream *hda_stream;
+ const struct sof_intel_dsp_desc *chip;
+ struct snd_sof_dev *sdev;
+ struct hdac_ext_stream *res = NULL;
+ struct hdac_stream *hstream = NULL;
+
+ int stream_dir = substream->stream;
+
+ if (!bus->ppcap) {
+ dev_err(bus->dev, "stream type not supported\n");
+ return NULL;
+ }
+
+ spin_lock_irq(&bus->reg_lock);
+ list_for_each_entry(hstream, &bus->stream_list, list) {
+ struct hdac_ext_stream *hext_stream =
+ stream_to_hdac_ext_stream(hstream);
+ if (hstream->direction != substream->stream)
+ continue;
+
+ hda_stream = hstream_to_sof_hda_stream(hext_stream);
+ sdev = hda_stream->sdev;
+ chip = get_chip_info(sdev->pdata);
+
+ /* check if link is available */
+ if (!hext_stream->link_locked) {
+ /*
+ * choose the first available link for platforms that do not have the
+ * PROCEN_FMT_QUIRK set.
+ */
+ if (!(chip->quirks & SOF_INTEL_PROCEN_FMT_QUIRK)) {
+ res = hext_stream;
+ break;
+ }
+
+ if (hstream->opened) {
+ /*
+ * check if the stream tag matches the stream
+ * tag of one of the connected FEs
+ */
+ if (hda_check_fes(rtd, stream_dir,
+ hstream->stream_tag)) {
+ res = hext_stream;
+ break;
+ }
+ } else {
+ res = hext_stream;
+
+ /*
+ * This must be a hostless stream.
+ * So reserve the host DMA channel.
+ */
+ hda_stream->host_reserved = 1;
+ break;
+ }
+ }
+ }
+
+ if (res) {
+ /* Make sure that host and link DMA is decoupled. */
+ snd_hdac_ext_stream_decouple_locked(bus, res, true);
+
+ res->link_locked = 1;
+ res->link_substream = substream;
+ }
+ spin_unlock_irq(&bus->reg_lock);
+
+ return res;
+}
+
+static int hda_link_dma_cleanup(struct snd_pcm_substream *substream,
+ struct hdac_stream *hstream,
+ struct snd_soc_dai *cpu_dai,
+ struct snd_soc_dai *codec_dai,
+ bool trigger_suspend_stop)
+{
+ struct hdac_ext_stream *hext_stream = snd_soc_dai_get_dma_data(cpu_dai, substream);
+ struct hdac_bus *bus = hstream->bus;
+ struct sof_intel_hda_stream *hda_stream;
+ struct hdac_ext_link *link;
+ int stream_tag;
+
+ link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
+ if (!link)
+ return -EINVAL;
+
+ if (trigger_suspend_stop)
+ snd_hdac_ext_link_stream_clear(hext_stream);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ stream_tag = hdac_stream(hext_stream)->stream_tag;
+ snd_hdac_ext_link_clear_stream_id(link, stream_tag);
+ }
+ snd_soc_dai_set_dma_data(cpu_dai, substream, NULL);
+ snd_hdac_ext_stream_release(hext_stream, HDAC_EXT_STREAM_TYPE_LINK);
+ hext_stream->link_prepared = 0;
+
+ /* free the host DMA channel reserved by hostless streams */
+ hda_stream = hstream_to_sof_hda_stream(hext_stream);
+ hda_stream->host_reserved = 0;
+
+ return 0;
+}
+
+static int hda_link_dma_params(struct hdac_ext_stream *hext_stream,
+ struct hda_pipe_params *params)
+{
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ unsigned char stream_tag = hstream->stream_tag;
+ struct hdac_bus *bus = hstream->bus;
+ struct hdac_ext_link *link;
+ unsigned int format_val;
+
+ snd_hdac_ext_link_stream_reset(hext_stream);
+
+ format_val = snd_hdac_calc_stream_format(params->s_freq, params->ch,
+ params->format,
+ params->link_bps, 0);
+
+ dev_dbg(bus->dev, "format_val=%d, rate=%d, ch=%d, format=%d\n",
+ format_val, params->s_freq, params->ch, params->format);
+
+ snd_hdac_ext_link_stream_setup(hext_stream, format_val);
+
+ if (hext_stream->hstream.direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ list_for_each_entry(link, &bus->hlink_list, list) {
+ if (link->index == params->link_index)
+ snd_hdac_ext_link_set_stream_id(link,
+ stream_tag);
+ }
+ }
+
+ hext_stream->link_prepared = 1;
+
+ return 0;
+}
+
+static int hda_link_dma_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *hext_stream;
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct hda_pipe_params p_params = {0};
+ struct hdac_bus *bus = hstream->bus;
+ struct hdac_ext_link *link;
+
+ link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
+ if (!link)
+ return -EINVAL;
+
+ hext_stream = snd_soc_dai_get_dma_data(cpu_dai, substream);
+ if (!hext_stream) {
+ hext_stream = hda_link_stream_assign(bus, substream);
+ if (!hext_stream)
+ return -EBUSY;
+
+ snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)hext_stream);
+ }
+
+ /* set the hdac_stream in the codec dai */
+ snd_soc_dai_set_stream(codec_dai, hdac_stream(hext_stream), substream->stream);
+
+ p_params.s_fmt = snd_pcm_format_width(params_format(params));
+ p_params.ch = params_channels(params);
+ p_params.s_freq = params_rate(params);
+ p_params.stream = substream->stream;
+ p_params.link_index = link->index;
+ p_params.format = params_format(params);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ p_params.link_bps = codec_dai->driver->playback.sig_bits;
+ else
+ p_params.link_bps = codec_dai->driver->capture.sig_bits;
+
+ return hda_link_dma_params(hext_stream, &p_params);
+}
+
+static int hda_link_dma_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ int stream = substream->stream;
+
+ return hda_link_dma_hw_params(substream, &rtd->dpcm[stream].hw_params);
+}
+
+static int hda_link_dma_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct hdac_ext_stream *hext_stream = snd_soc_dai_get_dma_data(cpu_dai, substream);
+ int ret;
+
+ if (!hext_stream)
+ return 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ snd_hdac_ext_link_stream_start(hext_stream);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ ret = hda_link_dma_cleanup(substream, hstream, cpu_dai, codec_dai, true);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ snd_hdac_ext_link_stream_clear(hext_stream);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hda_link_dma_hw_free(struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct hdac_ext_stream *hext_stream;
+
+ hext_stream = snd_soc_dai_get_dma_data(cpu_dai, substream);
+ if (!hext_stream)
+ return 0;
+
+ return hda_link_dma_cleanup(substream, hstream, cpu_dai, codec_dai, false);
+}
+
+static int hda_dai_widget_update(struct snd_soc_dapm_widget *w,
+ int channel, bool widget_setup)
+{
+ struct snd_sof_dai_config_data data;
+
+ data.dai_data = channel;
+
+ /* set up/free DAI widget and send DAI_CONFIG IPC */
+ if (widget_setup)
+ return hda_ctrl_dai_widget_setup(w, SOF_DAI_CONFIG_FLAGS_2_STEP_STOP, &data);
+
+ return hda_ctrl_dai_widget_free(w, SOF_DAI_CONFIG_FLAGS_NONE, &data);
+}
+
+static int hda_dai_hw_params_update(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream;
+ struct snd_soc_dapm_widget *w;
+ int stream_tag;
+
+ hext_stream = snd_soc_dai_get_dma_data(dai, substream);
+ if (!hext_stream)
+ return -EINVAL;
+
+ stream_tag = hdac_stream(hext_stream)->stream_tag;
+
+ w = snd_soc_dai_get_widget(dai, substream->stream);
+
+ /* set up the DAI widget and send the DAI_CONFIG with the new tag */
+ return hda_dai_widget_update(w, stream_tag - 1, true);
+}
+
+static int hda_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream =
+ snd_soc_dai_get_dma_data(dai, substream);
+ int ret;
+
+ if (hext_stream && hext_stream->link_prepared)
+ return 0;
+
+ ret = hda_link_dma_hw_params(substream, params);
+ if (ret < 0)
+ return ret;
+
+ return hda_dai_hw_params_update(substream, params, dai);
+}
+
+
+static int hda_dai_config_pause_push_ipc(struct snd_soc_dapm_widget *w)
+{
+ struct snd_sof_widget *swidget = w->dobj.private;
+ struct snd_soc_component *component = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
+ int ret = 0;
+
+ if (tplg_ops->dai_config) {
+ ret = tplg_ops->dai_config(sdev, swidget, SOF_DAI_CONFIG_FLAGS_PAUSE, NULL);
+ if (ret < 0)
+ dev_err(sdev->dev, "%s: DAI config failed for widget %s\n", __func__,
+ w->name);
+ }
+
+ return ret;
+}
+
+static int hda_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream =
+ snd_soc_dai_get_dma_data(dai, substream);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(dai->component);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ int stream = substream->stream;
+ int ret;
+
+ if (hext_stream && hext_stream->link_prepared)
+ return 0;
+
+ dev_dbg(sdev->dev, "prepare stream dir %d\n", substream->stream);
+
+ ret = hda_link_dma_prepare(substream);
+ if (ret < 0)
+ return ret;
+
+ return hda_dai_hw_params_update(substream, &rtd->dpcm[stream].hw_params, dai);
+}
+
+static int hda_dai_hw_free_ipc(int stream, /* direction */
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_dapm_widget *w;
+
+ w = snd_soc_dai_get_widget(dai, stream);
+
+ /* free the link DMA channel in the FW and the DAI widget */
+ return hda_dai_widget_update(w, DMA_CHAN_INVALID, false);
+}
+
+static int ipc3_hda_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct snd_soc_dapm_widget *w;
+ int ret;
+
+ dev_dbg(dai->dev, "cmd=%d dai %s direction %d\n", cmd,
+ dai->name, substream->stream);
+
+ ret = hda_link_dma_trigger(substream, cmd);
+ if (ret < 0)
+ return ret;
+
+ w = snd_soc_dai_get_widget(dai, substream->stream);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ /*
+ * free DAI widget during stop/suspend to keep widget use_count's balanced.
+ */
+ ret = hda_dai_hw_free_ipc(substream->stream, dai);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = hda_dai_config_pause_push_ipc(w);
+ if (ret < 0)
+ return ret;
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+/*
+ * In contrast to IPC3, the dai trigger in IPC4 mixes pipeline state changes
+ * (over IPC channel) and DMA state change (direct host register changes).
+ */
+static int ipc4_hda_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream = snd_soc_dai_get_dma_data(dai, substream);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(dai->component);
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_sof_widget *swidget;
+ struct snd_soc_dapm_widget *w;
+ struct snd_soc_dai *codec_dai;
+ struct hdac_stream *hstream;
+ struct snd_soc_dai *cpu_dai;
+ int ret;
+
+ dev_dbg(dai->dev, "cmd=%d dai %s direction %d\n", cmd,
+ dai->name, substream->stream);
+
+ hstream = substream->runtime->private_data;
+ rtd = asoc_substream_to_rtd(substream);
+ cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
+
+ w = snd_soc_dai_get_widget(dai, substream->stream);
+ swidget = w->dobj.private;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ snd_hdac_ext_link_stream_start(hext_stream);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ {
+ struct snd_sof_widget *pipe_widget = swidget->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_PAUSED);
+ if (ret < 0)
+ return ret;
+
+ pipeline->state = SOF_IPC4_PIPE_PAUSED;
+
+ snd_hdac_ext_link_stream_clear(hext_stream);
+
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_RESET);
+ if (ret < 0)
+ return ret;
+
+ pipeline->state = SOF_IPC4_PIPE_RESET;
+
+ ret = hda_link_dma_cleanup(substream, hstream, cpu_dai, codec_dai, false);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: failed to clean up link DMA\n", __func__);
+ return ret;
+ }
+ break;
+ }
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ {
+ struct snd_sof_widget *pipe_widget = swidget->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_PAUSED);
+ if (ret < 0)
+ return ret;
+
+ pipeline->state = SOF_IPC4_PIPE_PAUSED;
+
+ snd_hdac_ext_link_stream_clear(hext_stream);
+ break;
+ }
+ default:
+ dev_err(sdev->dev, "%s: unknown trigger command %d\n", __func__, cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hda_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int ret;
+
+ ret = hda_link_dma_hw_free(substream);
+ if (ret < 0)
+ return ret;
+
+ return hda_dai_hw_free_ipc(substream->stream, dai);
+}
+
+static const struct snd_soc_dai_ops ipc3_hda_dai_ops = {
+ .hw_params = hda_dai_hw_params,
+ .hw_free = hda_dai_hw_free,
+ .trigger = ipc3_hda_dai_trigger,
+ .prepare = hda_dai_prepare,
+};
+
+static int hda_dai_suspend(struct hdac_bus *bus)
+{
+ struct snd_soc_pcm_runtime *rtd;
+ struct hdac_ext_stream *hext_stream;
+ struct hdac_stream *s;
+ int ret;
+
+ /* set internal flag for BE */
+ list_for_each_entry(s, &bus->stream_list, list) {
+
+ hext_stream = stream_to_hdac_ext_stream(s);
+
+ /*
+ * clear stream. This should already be taken care for running
+ * streams when the SUSPEND trigger is called. But paused
+ * streams do not get suspended, so this needs to be done
+ * explicitly during suspend.
+ */
+ if (hext_stream->link_substream) {
+ struct snd_soc_dai *cpu_dai;
+ struct snd_soc_dai *codec_dai;
+
+ rtd = asoc_substream_to_rtd(hext_stream->link_substream);
+ cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
+
+ ret = hda_link_dma_cleanup(hext_stream->link_substream, s,
+ cpu_dai, codec_dai, false);
+ if (ret < 0)
+ return ret;
+
+ /* for consistency with TRIGGER_SUSPEND we free DAI resources */
+ ret = hda_dai_hw_free_ipc(hdac_stream(hext_stream)->direction, cpu_dai);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops ipc4_hda_dai_ops = {
+ .hw_params = hda_dai_hw_params,
+ .hw_free = hda_dai_hw_free,
+ .trigger = ipc4_hda_dai_trigger,
+ .prepare = hda_dai_prepare,
+};
+
+#endif
+
+/* only one flag used so far to harden hw_params/hw_free/trigger/prepare */
+struct ssp_dai_dma_data {
+ bool setup;
+};
+
+static int ssp_dai_setup_or_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai,
+ bool setup)
+{
+ struct snd_soc_dapm_widget *w;
+
+ w = snd_soc_dai_get_widget(dai, substream->stream);
+
+ if (setup)
+ return hda_ctrl_dai_widget_setup(w, SOF_DAI_CONFIG_FLAGS_NONE, NULL);
+
+ return hda_ctrl_dai_widget_free(w, SOF_DAI_CONFIG_FLAGS_NONE, NULL);
+}
+
+static int ssp_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct ssp_dai_dma_data *dma_data;
+
+ dma_data = kzalloc(sizeof(*dma_data), GFP_KERNEL);
+ if (!dma_data)
+ return -ENOMEM;
+
+ snd_soc_dai_set_dma_data(dai, substream, dma_data);
+
+ return 0;
+}
+
+static int ssp_dai_setup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai,
+ bool setup)
+{
+ struct ssp_dai_dma_data *dma_data;
+ int ret = 0;
+
+ dma_data = snd_soc_dai_get_dma_data(dai, substream);
+ if (!dma_data) {
+ dev_err(dai->dev, "%s: failed to get dma_data\n", __func__);
+ return -EIO;
+ }
+
+ if (dma_data->setup != setup) {
+ ret = ssp_dai_setup_or_free(substream, dai, setup);
+ if (!ret)
+ dma_data->setup = setup;
+ }
+ return ret;
+}
+
+static int ssp_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ /* params are ignored for now */
+ return ssp_dai_setup(substream, dai, true);
+}
+
+static int ssp_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ /*
+ * the SSP will only be reconfigured during resume operations and
+ * not in case of xruns
+ */
+ return ssp_dai_setup(substream, dai, true);
+}
+
+static int ipc3_ssp_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ if (cmd != SNDRV_PCM_TRIGGER_SUSPEND)
+ return 0;
+
+ return ssp_dai_setup(substream, dai, false);
+}
+
+static int ssp_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ return ssp_dai_setup(substream, dai, false);
+}
+
+static void ssp_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct ssp_dai_dma_data *dma_data;
+
+ dma_data = snd_soc_dai_get_dma_data(dai, substream);
+ if (!dma_data) {
+ dev_err(dai->dev, "%s: failed to get dma_data\n", __func__);
+ return;
+ }
+ snd_soc_dai_set_dma_data(dai, substream, NULL);
+ kfree(dma_data);
+}
+
+static const struct snd_soc_dai_ops ipc3_ssp_dai_ops = {
+ .startup = ssp_dai_startup,
+ .hw_params = ssp_dai_hw_params,
+ .prepare = ssp_dai_prepare,
+ .trigger = ipc3_ssp_dai_trigger,
+ .hw_free = ssp_dai_hw_free,
+ .shutdown = ssp_dai_shutdown,
+};
+
+static int ipc4_be_dai_common_trigger(struct snd_soc_dai *dai, int cmd, int stream)
+{
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+ struct snd_sof_widget *swidget;
+ struct snd_soc_dapm_widget *w;
+ struct snd_sof_dev *sdev;
+ int ret;
+
+ w = snd_soc_dai_get_widget(dai, stream);
+ swidget = w->dobj.private;
+ pipe_widget = swidget->pipe_widget;
+ pipeline = pipe_widget->private;
+ sdev = snd_soc_component_get_drvdata(swidget->scomp);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_PAUSED);
+ if (ret < 0)
+ return ret;
+ pipeline->state = SOF_IPC4_PIPE_PAUSED;
+
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_RESET);
+ if (ret < 0)
+ return ret;
+ pipeline->state = SOF_IPC4_PIPE_RESET;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_PAUSED);
+ if (ret < 0)
+ return ret;
+ pipeline->state = SOF_IPC4_PIPE_PAUSED;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ipc4_be_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ return ipc4_be_dai_common_trigger(dai, cmd, substream->stream);
+}
+
+static const struct snd_soc_dai_ops ipc4_dmic_dai_ops = {
+ .trigger = ipc4_be_dai_trigger,
+};
+
+static const struct snd_soc_dai_ops ipc4_ssp_dai_ops = {
+ .trigger = ipc4_be_dai_trigger,
+};
+
+void hda_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops)
+{
+ int i;
+
+ switch (sdev->pdata->ipc_type) {
+ case SOF_IPC:
+ for (i = 0; i < ops->num_drv; i++) {
+ if (strstr(ops->drv[i].name, "SSP")) {
+ ops->drv[i].ops = &ipc3_ssp_dai_ops;
+ continue;
+ }
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ if (strstr(ops->drv[i].name, "iDisp") ||
+ strstr(ops->drv[i].name, "Analog") ||
+ strstr(ops->drv[i].name, "Digital"))
+ ops->drv[i].ops = &ipc3_hda_dai_ops;
+#endif
+ }
+ break;
+ case SOF_INTEL_IPC4:
+ {
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+
+ for (i = 0; i < ops->num_drv; i++) {
+ if (strstr(ops->drv[i].name, "DMIC")) {
+ ops->drv[i].ops = &ipc4_dmic_dai_ops;
+ continue;
+ }
+ if (strstr(ops->drv[i].name, "SSP")) {
+ ops->drv[i].ops = &ipc4_ssp_dai_ops;
+ continue;
+ }
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ if (strstr(ops->drv[i].name, "iDisp") ||
+ strstr(ops->drv[i].name, "Analog") ||
+ strstr(ops->drv[i].name, "Digital"))
+ ops->drv[i].ops = &ipc4_hda_dai_ops;
+#endif
+ }
+
+ if (!hda_use_tplg_nhlt)
+ ipc4_data->nhlt = intel_nhlt_init(sdev->dev);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE))
+ sdw_callback.trigger = ipc4_be_dai_common_trigger;
+
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+void hda_ops_free(struct snd_sof_dev *sdev)
+{
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+
+ if (!hda_use_tplg_nhlt)
+ intel_nhlt_free(ipc4_data->nhlt);
+ }
+}
+EXPORT_SYMBOL_NS(hda_ops_free, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+/*
+ * common dai driver for skl+ platforms.
+ * some products who use this DAI array only physically have a subset of
+ * the DAIs, but no harm is done here by adding the whole set.
+ */
+struct snd_soc_dai_driver skl_dai[] = {
+{
+ .name = "SSP0 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP1 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP2 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP3 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP4 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP5 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "DMIC01 Pin",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+},
+{
+ .name = "DMIC16k Pin",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+},
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+{
+ .name = "iDisp1 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "iDisp2 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "iDisp3 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "iDisp4 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "Analog CPU DAI",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+},
+{
+ .name = "Digital CPU DAI",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+},
+{
+ .name = "Alt Analog CPU DAI",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+},
+#endif
+};
+
+int hda_dsp_dais_suspend(struct snd_sof_dev *sdev)
+{
+ /*
+ * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
+ * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
+ * Since the component suspend is called last, we can trap this corner case
+ * and force the DAIs to release their resources.
+ */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ int ret;
+
+ ret = hda_dai_suspend(sof_to_bus(sdev));
+ if (ret < 0)
+ return ret;
+#endif
+
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
new file mode 100644
index 000000000..428aee8fd
--- /dev/null
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -0,0 +1,1068 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <trace/events/sof_intel.h>
+#include "../sof-audio.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+
+static bool hda_enable_trace_D0I3_S0;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
+module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
+MODULE_PARM_DESC(enable_trace_D0I3_S0,
+ "SOF HDA enable trace when the DSP is in D0I3 in S0");
+#endif
+
+/*
+ * DSP Core control.
+ */
+
+static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ u32 adspcs;
+ u32 reset;
+ int ret;
+
+ /* set reset bits for cores */
+ reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ reset, reset);
+
+ /* poll with timeout to check if operation successful */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ ((adspcs & reset) == reset),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
+
+ /* has core entered reset ? */
+ adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS);
+ if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
+ HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
+ dev_err(sdev->dev,
+ "error: reset enter failed: core_mask %x adspcs 0x%x\n",
+ core_mask, adspcs);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ unsigned int crst;
+ u32 adspcs;
+ int ret;
+
+ /* clear reset bits for cores */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CRST_MASK(core_mask),
+ 0);
+
+ /* poll with timeout to check if operation successful */
+ crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ !(adspcs & crst),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
+
+ /* has core left reset ? */
+ adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS);
+ if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
+ dev_err(sdev->dev,
+ "error: reset leave failed: core_mask %x adspcs 0x%x\n",
+ core_mask, adspcs);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ /* stall core */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+ /* set reset state */
+ return hda_dsp_core_reset_enter(sdev, core_mask);
+}
+
+bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ int val;
+ bool is_enable;
+
+ val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
+
+#define MASK_IS_EQUAL(v, m, field) ({ \
+ u32 _m = field(m); \
+ ((v) & _m) == _m; \
+})
+
+ is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
+ MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
+ !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
+ !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+#undef MASK_IS_EQUAL
+
+ dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
+ is_enable, core_mask);
+
+ return is_enable;
+}
+
+int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ int ret;
+
+ /* leave reset state */
+ ret = hda_dsp_core_reset_leave(sdev, core_mask);
+ if (ret < 0)
+ return ret;
+
+ /* run core */
+ dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
+ 0);
+
+ /* is core now running ? */
+ if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
+ hda_dsp_core_stall_reset(sdev, core_mask);
+ dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
+ core_mask);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/*
+ * Power Management.
+ */
+
+int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int cpa;
+ u32 adspcs;
+ int ret;
+
+ /* restrict core_mask to host managed cores mask */
+ core_mask &= chip->host_managed_cores_mask;
+ /* return if core_mask is not valid */
+ if (!core_mask)
+ return 0;
+
+ /* update bits */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_SPA_MASK(core_mask),
+ HDA_DSP_ADSPCS_SPA_MASK(core_mask));
+
+ /* poll with timeout to check if operation successful */
+ cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ (adspcs & cpa) == cpa,
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
+
+ /* did core power up ? */
+ adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS);
+ if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
+ HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
+ dev_err(sdev->dev,
+ "error: power up core failed core_mask %xadspcs 0x%x\n",
+ core_mask, adspcs);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ u32 adspcs;
+ int ret;
+
+ /* update bits */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+
+ return ret;
+}
+
+int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ int ret;
+
+ /* restrict core_mask to host managed cores mask */
+ core_mask &= chip->host_managed_cores_mask;
+
+ /* return if core_mask is not valid or cores are already enabled */
+ if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
+ return 0;
+
+ /* power up */
+ ret = hda_dsp_core_power_up(sdev, core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
+ core_mask);
+ return ret;
+ }
+
+ return hda_dsp_core_run(sdev, core_mask);
+}
+
+int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
+ unsigned int core_mask)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ int ret;
+
+ /* restrict core_mask to host managed cores mask */
+ core_mask &= chip->host_managed_cores_mask;
+
+ /* return if core_mask is not valid */
+ if (!core_mask)
+ return 0;
+
+ /* place core in reset prior to power down */
+ ret = hda_dsp_core_stall_reset(sdev, core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
+ core_mask);
+ return ret;
+ }
+
+ /* power down core */
+ ret = hda_dsp_core_power_down(sdev, core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
+ core_mask, ret);
+ return ret;
+ }
+
+ /* make sure we are in OFF state */
+ if (hda_dsp_core_is_enabled(sdev, core_mask)) {
+ dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
+ core_mask, ret);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* enable IPC DONE and BUSY interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
+ HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
+
+ /* enable IPC interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
+}
+
+void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* disable IPC interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, 0);
+
+ /* disable IPC BUSY and DONE interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
+}
+
+static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
+
+ while (snd_hdac_chip_readb(bus, VS_D0I3C) & SOF_HDA_VS_D0I3C_CIP) {
+ if (!retry--)
+ return -ETIMEDOUT;
+ usleep_range(10, 15);
+ }
+
+ return 0;
+}
+
+static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_pm_gate pm_gate;
+ struct sof_ipc_reply reply;
+
+ memset(&pm_gate, 0, sizeof(pm_gate));
+
+ /* configure pm_gate ipc message */
+ pm_gate.hdr.size = sizeof(pm_gate);
+ pm_gate.hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE;
+ pm_gate.flags = flags;
+
+ /* send pm_gate ipc to dsp */
+ return sof_ipc_tx_message_no_pm(sdev->ipc, &pm_gate, sizeof(pm_gate),
+ &reply, sizeof(reply));
+}
+
+static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int ret;
+
+ /* Write to D0I3C after Command-In-Progress bit is cleared */
+ ret = hda_dsp_wait_d0i3c_done(sdev);
+ if (ret < 0) {
+ dev_err(bus->dev, "CIP timeout before D0I3C update!\n");
+ return ret;
+ }
+
+ /* Update D0I3C register */
+ snd_hdac_chip_updateb(bus, VS_D0I3C, SOF_HDA_VS_D0I3C_I3, value);
+
+ /* Wait for cmd in progress to be cleared before exiting the function */
+ ret = hda_dsp_wait_d0i3c_done(sdev);
+ if (ret < 0) {
+ dev_err(bus->dev, "CIP timeout after D0I3C update!\n");
+ return ret;
+ }
+
+ trace_sof_intel_D0I3C_updated(sdev, snd_hdac_chip_readb(bus, VS_D0I3C));
+
+ return 0;
+}
+
+static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ u32 flags = 0;
+ int ret;
+ u8 value = 0;
+
+ /*
+ * Sanity check for illegal state transitions
+ * The only allowed transitions are:
+ * 1. D3 -> D0I0
+ * 2. D0I0 -> D0I3
+ * 3. D0I3 -> D0I0
+ */
+ switch (sdev->dsp_power_state.state) {
+ case SOF_DSP_PM_D0:
+ /* Follow the sequence below for D0 substate transitions */
+ break;
+ case SOF_DSP_PM_D3:
+ /* Follow regular flow for D3 -> D0 transition */
+ return 0;
+ default:
+ dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
+ sdev->dsp_power_state.state, target_state->state);
+ return -EINVAL;
+ }
+
+ /* Set flags and register value for D0 target substate */
+ if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
+ value = SOF_HDA_VS_D0I3C_I3;
+
+ /*
+ * Trace DMA need to be disabled when the DSP enters
+ * D0I3 for S0Ix suspend, but it can be kept enabled
+ * when the DSP enters D0I3 while the system is in S0
+ * for debug purpose.
+ */
+ if (!sdev->fw_trace_is_supported ||
+ !hda_enable_trace_D0I3_S0 ||
+ sdev->system_suspend_target != SOF_SUSPEND_NONE)
+ flags = HDA_PM_NO_DMA_TRACE;
+ } else {
+ /* prevent power gating in D0I0 */
+ flags = HDA_PM_PPG;
+ }
+
+ /* update D0I3C register */
+ ret = hda_dsp_update_d0i3c_register(sdev, value);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Notify the DSP of the state change.
+ * If this IPC fails, revert the D0I3C register update in order
+ * to prevent partial state change.
+ */
+ ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: PM_GATE ipc error %d\n", ret);
+ goto revert;
+ }
+
+ return ret;
+
+revert:
+ /* fallback to the previous register value */
+ value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
+
+ /*
+ * This can fail but return the IPC error to signal that
+ * the state change failed.
+ */
+ hda_dsp_update_d0i3c_register(sdev, value);
+
+ return ret;
+}
+
+/* helper to log DSP state */
+static void hda_dsp_state_log(struct snd_sof_dev *sdev)
+{
+ switch (sdev->dsp_power_state.state) {
+ case SOF_DSP_PM_D0:
+ switch (sdev->dsp_power_state.substate) {
+ case SOF_HDA_DSP_PM_D0I0:
+ dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
+ break;
+ case SOF_HDA_DSP_PM_D0I3:
+ dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
+ break;
+ default:
+ dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
+ sdev->dsp_power_state.substate);
+ break;
+ }
+ break;
+ case SOF_DSP_PM_D1:
+ dev_dbg(sdev->dev, "Current DSP power state: D1\n");
+ break;
+ case SOF_DSP_PM_D2:
+ dev_dbg(sdev->dev, "Current DSP power state: D2\n");
+ break;
+ case SOF_DSP_PM_D3:
+ dev_dbg(sdev->dev, "Current DSP power state: D3\n");
+ break;
+ default:
+ dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
+ sdev->dsp_power_state.state);
+ break;
+ }
+}
+
+/*
+ * All DSP power state transitions are initiated by the driver.
+ * If the requested state change fails, the error is simply returned.
+ * Further state transitions are attempted only when the set_power_save() op
+ * is called again either because of a new IPC sent to the DSP or
+ * during system suspend/resume.
+ */
+int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ int ret = 0;
+
+ /*
+ * When the DSP is already in D0I3 and the target state is D0I3,
+ * it could be the case that the DSP is in D0I3 during S0
+ * and the system is suspending to S0Ix. Therefore,
+ * hda_dsp_set_D0_state() must be called to disable trace DMA
+ * by sending the PM_GATE IPC to the FW.
+ */
+ if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
+ sdev->system_suspend_target == SOF_SUSPEND_S0IX)
+ goto set_state;
+
+ /*
+ * For all other cases, return without doing anything if
+ * the DSP is already in the target state.
+ */
+ if (target_state->state == sdev->dsp_power_state.state &&
+ target_state->substate == sdev->dsp_power_state.substate)
+ return 0;
+
+set_state:
+ switch (target_state->state) {
+ case SOF_DSP_PM_D0:
+ ret = hda_dsp_set_D0_state(sdev, target_state);
+ break;
+ case SOF_DSP_PM_D3:
+ /* The only allowed transition is: D0I0 -> D3 */
+ if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
+ sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
+ break;
+
+ dev_err(sdev->dev,
+ "error: transition from %d to %d not allowed\n",
+ sdev->dsp_power_state.state, target_state->state);
+ return -EINVAL;
+ default:
+ dev_err(sdev->dev, "error: target state unsupported %d\n",
+ target_state->state);
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "failed to set requested target DSP state %d substate %d\n",
+ target_state->state, target_state->substate);
+ return ret;
+ }
+
+ sdev->dsp_power_state = *target_state;
+ hda_dsp_state_log(sdev);
+ return ret;
+}
+
+/*
+ * Audio DSP states may transform as below:-
+ *
+ * Opportunistic D0I3 in S0
+ * Runtime +---------------------+ Delayed D0i3 work timeout
+ * suspend | +--------------------+
+ * +------------+ D0I0(active) | |
+ * | | <---------------+ |
+ * | +--------> | New IPC | |
+ * | |Runtime +--^--+---------^--+--+ (via mailbox) | |
+ * | |resume | | | | | |
+ * | | | | | | | |
+ * | | System| | | | | |
+ * | | resume| | S3/S0IX | | | |
+ * | | | | suspend | | S0IX | |
+ * | | | | | |suspend | |
+ * | | | | | | | |
+ * | | | | | | | |
+ * +-v---+-----------+--v-------+ | | +------+----v----+
+ * | | | +-----------> |
+ * | D3 (suspended) | | | D0I3 |
+ * | | +--------------+ |
+ * | | System resume | |
+ * +----------------------------+ +----------------+
+ *
+ * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
+ * ignored the suspend trigger. Otherwise the DSP
+ * is in D3.
+ */
+
+static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+#endif
+ int ret, j;
+
+ /*
+ * The memory used for IMR boot loses its content in deeper than S3 state
+ * We must not try IMR boot on next power up (as it will fail).
+ *
+ * In case of firmware crash or boot failure set the skip_imr_boot to true
+ * as well in order to try to re-load the firmware to do a 'cold' boot.
+ */
+ if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
+ sdev->fw_state == SOF_FW_CRASHED ||
+ sdev->fw_state == SOF_FW_BOOT_FAILED)
+ hda->skip_imr_boot = true;
+
+ ret = chip->disable_interrupts(sdev);
+ if (ret < 0)
+ return ret;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ hda_codec_jack_wake_enable(sdev, runtime_suspend);
+
+ /* power down all hda link */
+ snd_hdac_ext_bus_link_power_down_all(bus);
+#endif
+
+ ret = chip->power_down_dsp(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to power down DSP during suspend\n");
+ return ret;
+ }
+
+ /* reset ref counts for all cores */
+ for (j = 0; j < chip->cores_num; j++)
+ sdev->dsp_core_ref_count[j] = 0;
+
+ /* disable ppcap interrupt */
+ hda_dsp_ctrl_ppcap_enable(sdev, false);
+ hda_dsp_ctrl_ppcap_int_enable(sdev, false);
+
+ /* disable hda bus irq and streams */
+ hda_dsp_ctrl_stop_chip(sdev);
+
+ /* disable LP retention mode */
+ snd_sof_pci_update_bits(sdev, PCI_PGCTL,
+ PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
+
+ /* reset controller */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to reset controller during suspend\n");
+ return ret;
+ }
+
+ /* display codec can powered off after link reset */
+ hda_codec_i915_display_power(sdev, false);
+
+ return 0;
+}
+
+static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_link *hlink = NULL;
+#endif
+ int ret;
+
+ /* display codec must be powered before link reset */
+ hda_codec_i915_display_power(sdev, true);
+
+ /*
+ * clear TCSEL to clear playback on some HD Audio
+ * codecs. PCI TCSEL is defined in the Intel manuals.
+ */
+ snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
+
+ /* reset and start hda controller */
+ ret = hda_dsp_ctrl_init_chip(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to start controller after resume\n");
+ goto cleanup;
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* check jack status */
+ if (runtime_resume) {
+ hda_codec_jack_wake_enable(sdev, false);
+ if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
+ hda_codec_jack_check(sdev);
+ }
+
+ /* turn off the links that were off before suspend */
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
+ if (!hlink->ref_count)
+ snd_hdac_ext_bus_link_power_down(hlink);
+ }
+
+ /* check dma status and clean up CORB/RIRB buffers */
+ if (!bus->cmd_dma_state)
+ snd_hdac_bus_stop_cmd_io(bus);
+#endif
+
+ /* enable ppcap interrupt */
+ hda_dsp_ctrl_ppcap_enable(sdev, true);
+ hda_dsp_ctrl_ppcap_int_enable(sdev, true);
+
+cleanup:
+ /* display codec can powered off after controller init */
+ hda_codec_i915_display_power(sdev, false);
+
+ return 0;
+}
+
+int hda_dsp_resume(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ .substate = SOF_HDA_DSP_PM_D0I0,
+ };
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_link *hlink = NULL;
+#endif
+ int ret;
+
+ /* resume from D0I3 */
+ if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* power up links that were active before suspend */
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
+ if (hlink->ref_count) {
+ ret = snd_hdac_ext_bus_link_power_up(hlink);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error %d in %s: failed to power up links",
+ ret, __func__);
+ return ret;
+ }
+ }
+ }
+
+ /* set up CORB/RIRB buffers if was on before suspend */
+ if (bus->cmd_dma_state)
+ snd_hdac_bus_init_cmd_io(bus);
+#endif
+
+ /* Set DSP power state */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_state);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
+ target_state.state, target_state.substate);
+ return ret;
+ }
+
+ /* restore L1SEN bit */
+ if (hda->l1_support_changed)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, 0);
+
+ /* restore and disable the system wakeup */
+ pci_restore_state(pci);
+ disable_irq_wake(pci->irq);
+ return 0;
+ }
+
+ /* init hda controller. DSP cores will be powered up during fw boot */
+ ret = hda_resume(sdev, false);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
+}
+
+int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
+{
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+ int ret;
+
+ /* init hda controller. DSP cores will be powered up during fw boot */
+ ret = hda_resume(sdev, true);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
+}
+
+int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *hbus = sof_to_bus(sdev);
+
+ if (hbus->codec_powered) {
+ dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
+ (unsigned int)hbus->codec_powered);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D3,
+ };
+ int ret;
+
+ /* cancel any attempt for DSP D0I3 */
+ cancel_delayed_work_sync(&hda->d0i3_work);
+
+ /* stop hda controller and power dsp off */
+ ret = hda_suspend(sdev, true);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
+}
+
+int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = target_state,
+ .substate = target_state == SOF_DSP_PM_D0 ?
+ SOF_HDA_DSP_PM_D0I3 : 0,
+ };
+ int ret;
+
+ /* cancel any attempt for DSP D0I3 */
+ cancel_delayed_work_sync(&hda->d0i3_work);
+
+ if (target_state == SOF_DSP_PM_D0) {
+ /* Set DSP power state */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
+ target_dsp_state.state,
+ target_dsp_state.substate);
+ return ret;
+ }
+
+ /* enable L1SEN to make sure the system can enter S0Ix */
+ hda->l1_support_changed =
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN,
+ HDA_VS_INTEL_EM2_L1SEN);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* stop the CORB/RIRB DMA if it is On */
+ if (bus->cmd_dma_state)
+ snd_hdac_bus_stop_cmd_io(bus);
+
+ /* no link can be powered in s0ix state */
+ ret = snd_hdac_ext_bus_link_power_down_all(bus);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error %d in %s: failed to power down links",
+ ret, __func__);
+ return ret;
+ }
+#endif
+
+ /* enable the system waking up via IPC IRQ */
+ enable_irq_wake(pci->irq);
+ pci_save_state(pci);
+ return 0;
+ }
+
+ /* stop hda controller and power dsp off */
+ ret = hda_suspend(sdev, false);
+ if (ret < 0) {
+ dev_err(bus->dev, "error: suspending dsp\n");
+ return ret;
+ }
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *s;
+ unsigned int active_streams = 0;
+ int sd_offset;
+ u32 val;
+
+ list_for_each_entry(s, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(s);
+ val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ sd_offset);
+ if (val & SOF_HDA_SD_CTL_DMA_START)
+ active_streams |= BIT(s->index);
+ }
+
+ return active_streams;
+}
+
+static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ /*
+ * Do not assume a certain timing between the prior
+ * suspend flow, and running of this quirk function.
+ * This is needed if the controller was just put
+ * to reset before calling this function.
+ */
+ usleep_range(500, 1000);
+
+ /*
+ * Take controller out of reset to flush DMA
+ * transactions.
+ */
+ ret = hda_dsp_ctrl_link_reset(sdev, false);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(500, 1000);
+
+ /* Restore state for shutdown, back to reset */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev)
+{
+ unsigned int active_streams;
+ int ret, ret2;
+
+ /* check if DMA cleanup has been successful */
+ active_streams = hda_dsp_check_for_dma_streams(sdev);
+
+ sdev->system_suspend_target = SOF_SUSPEND_S3;
+ ret = snd_sof_suspend(sdev->dev);
+
+ if (active_streams) {
+ dev_warn(sdev->dev,
+ "There were active DSP streams (%#x) at shutdown, trying to recover\n",
+ active_streams);
+ ret2 = hda_dsp_s5_quirk(sdev);
+ if (ret2 < 0)
+ dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2);
+ }
+
+ return ret;
+}
+
+int hda_dsp_shutdown(struct snd_sof_dev *sdev)
+{
+ sdev->system_suspend_target = SOF_SUSPEND_S3;
+ return snd_sof_suspend(sdev->dev);
+}
+
+int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ /* make sure all DAI resources are freed */
+ ret = hda_dsp_dais_suspend(sdev);
+ if (ret < 0)
+ dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__);
+
+ return ret;
+}
+
+void hda_dsp_d0i3_work(struct work_struct *work)
+{
+ struct sof_intel_hda_dev *hdev = container_of(work,
+ struct sof_intel_hda_dev,
+ d0i3_work.work);
+ struct hdac_bus *bus = &hdev->hbus.core;
+ struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
+ struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ .substate = SOF_HDA_DSP_PM_D0I3,
+ };
+ int ret;
+
+ /* DSP can enter D0I3 iff only D0I3-compatible streams are active */
+ if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
+ /* remain in D0I0 */
+ return;
+
+ /* This can fail but error cannot be propagated */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_state);
+ if (ret < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: failed to set DSP state %d substate %d\n",
+ target_state.state, target_state.substate);
+}
+
+int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
+{
+ const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
+ int ret, ret1;
+
+ /* power up core */
+ ret = hda_dsp_enable_core(sdev, BIT(core));
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to power up core %d with err: %d\n",
+ core, ret);
+ return ret;
+ }
+
+ /* No need to send IPC for primary core or if FW boot is not complete */
+ if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE)
+ return 0;
+
+ /* No need to continue the set_core_state ops is not available */
+ if (!pm_ops->set_core_state)
+ return 0;
+
+ /* Now notify DSP for secondary cores */
+ ret = pm_ops->set_core_state(sdev, core, true);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n",
+ core, ret);
+ goto power_down;
+ }
+
+ return ret;
+
+power_down:
+ /* power down core if it is host managed and return the original error if this fails too */
+ ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core));
+ if (ret1 < 0)
+ dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1);
+
+ return ret;
+}
+
+int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev)
+{
+ hda_sdw_int_enable(sdev, false);
+ hda_dsp_ipc_int_disable(sdev);
+
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
new file mode 100644
index 000000000..9b3667c70
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ipc.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/sof/ipc4/header.h>
+#include <trace/events/sof_intel.h>
+#include "../ops.h"
+#include "hda.h"
+
+static void hda_dsp_ipc_host_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * tell DSP cmd is done - clear busy
+ * interrupt and send reply msg to dsp
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCT,
+ HDA_DSP_REG_HIPCT_BUSY,
+ HDA_DSP_REG_HIPCT_BUSY);
+
+ /* unmask BUSY interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_BUSY,
+ HDA_DSP_REG_HIPCCTL_BUSY);
+}
+
+static void hda_dsp_ipc_dsp_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * set DONE bit - tell DSP we have received the reply msg
+ * from DSP, and processed it, don't send more reply to host
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCIE,
+ HDA_DSP_REG_HIPCIE_DONE,
+ HDA_DSP_REG_HIPCIE_DONE);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_DONE,
+ HDA_DSP_REG_HIPCCTL_DONE);
+}
+
+int hda_dsp_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ /* send IPC message to DSP */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI,
+ HDA_DSP_REG_HIPCI_BUSY);
+
+ return 0;
+}
+
+int hda_dsp_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct sof_ipc4_msg *msg_data = msg->msg_data;
+
+ /* send the message via mailbox */
+ if (msg_data->data_size)
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
+ msg_data->data_size);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE, msg_data->extension);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI,
+ msg_data->primary | HDA_DSP_REG_HIPCI_BUSY);
+
+ return 0;
+}
+
+void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ struct sof_ipc_cmd_hdr *hdr;
+
+ /*
+ * Sometimes, there is unexpected reply ipc arriving. The reply
+ * ipc belongs to none of the ipcs sent from driver.
+ * In this case, the driver must ignore the ipc.
+ */
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
+ return;
+ }
+
+ hdr = msg->msg_data;
+ if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE) ||
+ hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
+ /*
+ * memory windows are powered off before sending IPC reply,
+ * so we can't read the mailbox for CTX_SAVE and PM_GATE
+ * replies.
+ */
+ reply.error = 0;
+ reply.hdr.cmd = SOF_IPC_GLB_REPLY;
+ reply.hdr.size = sizeof(reply);
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+
+ msg->reply_error = 0;
+ } else {
+ snd_sof_ipc_get_reply(sdev);
+ }
+}
+
+irqreturn_t hda_dsp_ipc4_irq_thread(int irq, void *context)
+{
+ struct sof_ipc4_msg notification_data = {{ 0 }};
+ struct snd_sof_dev *sdev = context;
+ bool ipc_irq = false;
+ u32 hipcie, hipct;
+
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
+ if (hipcie & HDA_DSP_REG_HIPCIE_DONE) {
+ /* DSP received the message */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_DONE, 0);
+ hda_dsp_ipc_dsp_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
+ if (hipct & HDA_DSP_REG_HIPCT_BUSY) {
+ /* Message from DSP (reply or notification) */
+ u32 hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCTE);
+ u32 primary = hipct & HDA_DSP_REG_HIPCT_MSG_MASK;
+ u32 extension = hipcte & HDA_DSP_REG_HIPCTE_MSG_MASK;
+
+ /* mask BUSY interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_BUSY, 0);
+
+ if (primary & SOF_IPC4_MSG_DIR_MASK) {
+ /* Reply received */
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
+
+ data->primary = primary;
+ data->extension = extension;
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ snd_sof_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, data->primary);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev,
+ "IPC reply before FW_READY: %#x|%#x\n",
+ primary, extension);
+ }
+ } else {
+ /* Notification received */
+
+ notification_data.primary = primary;
+ notification_data.extension = extension;
+ sdev->ipc->msg.rx_data = &notification_data;
+ snd_sof_ipc_msgs_rx(sdev);
+ sdev->ipc->msg.rx_data = NULL;
+ }
+
+ /* Let DSP know that we have finished processing the message */
+ hda_dsp_ipc_host_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq)
+ /* This interrupt is not shared so no need to return IRQ_NONE. */
+ dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
+
+ return IRQ_HANDLED;
+}
+
+/* IPC handler thread */
+irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 hipci;
+ u32 hipcie;
+ u32 hipct;
+ u32 hipcte;
+ u32 msg;
+ u32 msg_ext;
+ bool ipc_irq = false;
+
+ /* read IPC status */
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCIE);
+ hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
+ hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI);
+ hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCTE);
+
+ /* is this a reply message from the DSP */
+ if (hipcie & HDA_DSP_REG_HIPCIE_DONE) {
+ msg = hipci & HDA_DSP_REG_HIPCI_MSG_MASK;
+ msg_ext = hipcie & HDA_DSP_REG_HIPCIE_MSG_MASK;
+
+ trace_sof_intel_ipc_firmware_response(sdev, msg, msg_ext);
+
+ /* mask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_DONE, 0);
+
+ /*
+ * Make sure the interrupt thread cannot be preempted between
+ * waking up the sender and re-enabling the interrupt. Also
+ * protect against a theoretical race with sof_ipc_tx_message():
+ * if the DSP is fast enough to receive an IPC message, reply to
+ * it, and the host interrupt processing calls this function on
+ * a different core from the one, where the sending is taking
+ * place, the message might not yet be marked as expecting a
+ * reply.
+ */
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /* handle immediate reply from DSP core */
+ hda_dsp_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, msg);
+
+ /* set the done bit */
+ hda_dsp_ipc_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev, "IPC reply before FW_READY: %#x\n",
+ msg);
+ }
+
+ ipc_irq = true;
+ }
+
+ /* is this a new message from DSP */
+ if (hipct & HDA_DSP_REG_HIPCT_BUSY) {
+ msg = hipct & HDA_DSP_REG_HIPCT_MSG_MASK;
+ msg_ext = hipcte & HDA_DSP_REG_HIPCTE_MSG_MASK;
+
+ trace_sof_intel_ipc_firmware_initiated(sdev, msg, msg_ext);
+
+ /* mask BUSY interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_BUSY, 0);
+
+ /* handle messages from DSP */
+ if ((hipct & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ bool non_recoverable = true;
+
+ /*
+ * This is a PANIC message!
+ *
+ * If it is arriving during firmware boot and it is not
+ * the last boot attempt then change the non_recoverable
+ * to false as the DSP might be able to boot in the next
+ * iteration(s)
+ */
+ if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS &&
+ hda->boot_iteration < HDA_FW_BOOT_ATTEMPTS)
+ non_recoverable = false;
+
+ snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext),
+ non_recoverable);
+ } else {
+ /* normal message - process normally */
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ hda_dsp_ipc_host_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq) {
+ /*
+ * This interrupt is not shared so no need to return IRQ_NONE.
+ */
+ dev_dbg_ratelimited(sdev->dev,
+ "nothing to do in IPC IRQ thread\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Check if an IPC IRQ occurred */
+bool hda_dsp_check_ipc_irq(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ bool ret = false;
+ u32 irq_status;
+
+ /* store status */
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS);
+ trace_sof_intel_hda_irq_ipc_check(sdev, irq_status);
+
+ /* invalid message ? */
+ if (irq_status == 0xffffffff)
+ goto out;
+
+ /* IPC message ? */
+ if (irq_status & HDA_DSP_ADSPIS_IPC)
+ ret = true;
+
+ /* CLDMA message ? */
+ if (irq_status & HDA_DSP_ADSPIS_CL_DMA) {
+ hda->code_loading = 0;
+ wake_up(&hda->waitq);
+ ret = false;
+ }
+
+out:
+ return ret;
+}
+
+int hda_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return HDA_DSP_MBOX_UPLINK_OFFSET;
+}
+
+int hda_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return SRAM_WINDOW_OFFSET(id);
+}
+
+int hda_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz)
+{
+ if (!substream || !sdev->stream_box.size) {
+ sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
+ } else {
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = container_of(hstream,
+ struct sof_intel_hda_stream,
+ hext_stream.hstream);
+
+ /* The stream might already be closed */
+ if (!hstream)
+ return -ESTRPIPE;
+
+ sof_mailbox_read(sdev, hda_stream->sof_intel_stream.posn_offset, p, sz);
+ }
+
+ return 0;
+}
+
+int hda_set_stream_data_offset(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ size_t posn_offset)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = container_of(hstream, struct sof_intel_hda_stream,
+ hext_stream.hstream);
+
+ /* check for unaligned offset or overflow */
+ if (posn_offset > sdev->stream_box.size ||
+ posn_offset % sizeof(struct sof_ipc_stream_posn) != 0)
+ return -EINVAL;
+
+ hda_stream->sof_intel_stream.posn_offset = sdev->stream_box.offset + posn_offset;
+
+ dev_dbg(sdev->dev, "pcm: stream dir %d, posn mailbox offset is %zu",
+ substream->stream, hda_stream->sof_intel_stream.posn_offset);
+
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-ipc.h b/sound/soc/sof/intel/hda-ipc.h
new file mode 100644
index 000000000..8ec5e9f6f
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ipc.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2019 Intel Corporation. All rights reserved.
+ *
+ * Author: Keyon Jie <yang.jie@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_HDA_IPC_H
+#define __SOF_INTEL_HDA_IPC_H
+
+/*
+ * Primary register, mapped to
+ * - DIPCTDR (HIPCIDR) in sideband IPC (cAVS 1.8+)
+ * - DIPCT in cAVS 1.5 IPC
+ *
+ * Secondary register, mapped to:
+ * - DIPCTDD (HIPCIDD) in sideband IPC (cAVS 1.8+)
+ * - DIPCTE in cAVS 1.5 IPC
+ */
+
+/* Common bits in primary register */
+
+/* Reserved for doorbell */
+#define HDA_IPC_RSVD_31 BIT(31)
+/* Target, 0 - normal message, 1 - compact message(cAVS compatible) */
+#define HDA_IPC_MSG_COMPACT BIT(30)
+/* Direction, 0 - request, 1 - response */
+#define HDA_IPC_RSP BIT(29)
+
+#define HDA_IPC_TYPE_SHIFT 24
+#define HDA_IPC_TYPE_MASK GENMASK(28, 24)
+#define HDA_IPC_TYPE(x) ((x) << HDA_IPC_TYPE_SHIFT)
+
+#define HDA_IPC_PM_GATE HDA_IPC_TYPE(0x8U)
+
+/* Command specific payload bits in secondary register */
+
+/* Disable DMA tracing (0 - keep tracing, 1 - to disable DMA trace) */
+#define HDA_PM_NO_DMA_TRACE BIT(4)
+/* Prevent clock gating (0 - cg allowed, 1 - DSP clock always on) */
+#define HDA_PM_PCG BIT(3)
+/* Prevent power gating (0 - deep power state transitions allowed) */
+#define HDA_PM_PPG BIT(2)
+/* Indicates whether streaming is active */
+#define HDA_PM_PG_STREAMING BIT(1)
+#define HDA_PM_PG_RSVD BIT(0)
+
+irqreturn_t cnl_ipc_irq_thread(int irq, void *context);
+int cnl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
+void cnl_ipc_dump(struct snd_sof_dev *sdev);
+void cnl_ipc4_dump(struct snd_sof_dev *sdev);
+
+#endif
diff --git a/sound/soc/sof/intel/hda-loader-skl.c b/sound/soc/sof/intel/hda-loader-skl.c
new file mode 100644
index 000000000..0193fb396
--- /dev/null
+++ b/sound/soc/sof/intel/hda-loader-skl.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
+//
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/sof.h>
+#include <sound/pcm_params.h>
+
+#include "../sof-priv.h"
+#include "../ops.h"
+#include "hda.h"
+
+#define HDA_SKL_WAIT_TIMEOUT 500 /* 500 msec */
+#define HDA_SKL_CLDMA_MAX_BUFFER_SIZE (32 * PAGE_SIZE)
+
+/* Stream Reset */
+#define HDA_CL_SD_CTL_SRST_SHIFT 0
+#define HDA_CL_SD_CTL_SRST(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_SRST_SHIFT)
+
+/* Stream Run */
+#define HDA_CL_SD_CTL_RUN_SHIFT 1
+#define HDA_CL_SD_CTL_RUN(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_RUN_SHIFT)
+
+/* Interrupt On Completion Enable */
+#define HDA_CL_SD_CTL_IOCE_SHIFT 2
+#define HDA_CL_SD_CTL_IOCE(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_IOCE_SHIFT)
+
+/* FIFO Error Interrupt Enable */
+#define HDA_CL_SD_CTL_FEIE_SHIFT 3
+#define HDA_CL_SD_CTL_FEIE(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_FEIE_SHIFT)
+
+/* Descriptor Error Interrupt Enable */
+#define HDA_CL_SD_CTL_DEIE_SHIFT 4
+#define HDA_CL_SD_CTL_DEIE(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_DEIE_SHIFT)
+
+/* FIFO Limit Change */
+#define HDA_CL_SD_CTL_FIFOLC_SHIFT 5
+#define HDA_CL_SD_CTL_FIFOLC(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_FIFOLC_SHIFT)
+
+/* Stripe Control */
+#define HDA_CL_SD_CTL_STRIPE_SHIFT 16
+#define HDA_CL_SD_CTL_STRIPE(x) (((x) & 0x3) << \
+ HDA_CL_SD_CTL_STRIPE_SHIFT)
+
+/* Traffic Priority */
+#define HDA_CL_SD_CTL_TP_SHIFT 18
+#define HDA_CL_SD_CTL_TP(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_TP_SHIFT)
+
+/* Bidirectional Direction Control */
+#define HDA_CL_SD_CTL_DIR_SHIFT 19
+#define HDA_CL_SD_CTL_DIR(x) (((x) & 0x1) << \
+ HDA_CL_SD_CTL_DIR_SHIFT)
+
+/* Stream Number */
+#define HDA_CL_SD_CTL_STRM_SHIFT 20
+#define HDA_CL_SD_CTL_STRM(x) (((x) & 0xf) << \
+ HDA_CL_SD_CTL_STRM_SHIFT)
+
+#define HDA_CL_SD_CTL_INT(x) \
+ (HDA_CL_SD_CTL_IOCE(x) | \
+ HDA_CL_SD_CTL_FEIE(x) | \
+ HDA_CL_SD_CTL_DEIE(x))
+
+#define HDA_CL_SD_CTL_INT_MASK \
+ (HDA_CL_SD_CTL_IOCE(1) | \
+ HDA_CL_SD_CTL_FEIE(1) | \
+ HDA_CL_SD_CTL_DEIE(1))
+
+#define DMA_ADDRESS_128_BITS_ALIGNMENT 7
+#define BDL_ALIGN(x) ((x) >> DMA_ADDRESS_128_BITS_ALIGNMENT)
+
+/* Buffer Descriptor List Lower Base Address */
+#define HDA_CL_SD_BDLPLBA_SHIFT 7
+#define HDA_CL_SD_BDLPLBA_MASK GENMASK(31, 7)
+#define HDA_CL_SD_BDLPLBA(x) \
+ ((BDL_ALIGN(lower_32_bits(x)) << HDA_CL_SD_BDLPLBA_SHIFT) & \
+ HDA_CL_SD_BDLPLBA_MASK)
+
+/* Buffer Descriptor List Upper Base Address */
+#define HDA_CL_SD_BDLPUBA(x) \
+ (upper_32_bits(x))
+
+/* Software Position in Buffer Enable */
+#define HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT 0
+#define HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_MASK \
+ (1 << HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT)
+
+#define HDA_CL_SPBFIFO_SPBFCCTL_SPIBE(x) \
+ (((x) << HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT) & \
+ HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_MASK)
+
+#define HDA_CL_DMA_SD_INT_COMPLETE 0x4
+
+static int cl_skl_cldma_setup_bdle(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab_data,
+ __le32 **bdlp, int size, int with_ioc)
+{
+ phys_addr_t addr = virt_to_phys(dmab_data->area);
+ __le32 *bdl = *bdlp;
+
+ /*
+ * This code is simplified by using one fragment of physical memory and assuming
+ * all the code fits. This could be improved with scatter-gather but the firmware
+ * size is limited by DSP memory anyways
+ */
+ bdl[0] = cpu_to_le32(lower_32_bits(addr));
+ bdl[1] = cpu_to_le32(upper_32_bits(addr));
+ bdl[2] = cpu_to_le32(size);
+ bdl[3] = (!with_ioc) ? 0 : cpu_to_le32(0x01);
+
+ return 1; /* one fragment */
+}
+
+static void cl_skl_cldma_stream_run(struct snd_sof_dev *sdev, bool enable)
+{
+ int sd_offset = SOF_HDA_ADSP_LOADER_BASE;
+ unsigned char val;
+ int retries;
+ u32 run = enable ? 0x1 : 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CTL,
+ HDA_CL_SD_CTL_RUN(1), HDA_CL_SD_CTL_RUN(run));
+
+ retries = 300;
+ do {
+ udelay(3);
+
+ /* waiting for hardware to report the stream Run bit set */
+ val = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CTL);
+ val &= HDA_CL_SD_CTL_RUN(1);
+ if (enable && val)
+ break;
+ else if (!enable && !val)
+ break;
+ } while (--retries);
+
+ if (retries == 0)
+ dev_err(sdev->dev, "%s: failed to set Run bit=%d enable=%d\n",
+ __func__, val, enable);
+}
+
+static void cl_skl_cldma_stream_clear(struct snd_sof_dev *sdev)
+{
+ int sd_offset = SOF_HDA_ADSP_LOADER_BASE;
+
+ /* make sure Run bit is cleared before setting stream register */
+ cl_skl_cldma_stream_run(sdev, 0);
+
+ /* Disable the Interrupt On Completion, FIFO Error Interrupt,
+ * Descriptor Error Interrupt and set the cldma stream number to 0.
+ */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CTL,
+ HDA_CL_SD_CTL_INT_MASK, HDA_CL_SD_CTL_INT(0));
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CTL,
+ HDA_CL_SD_CTL_STRM(0xf), HDA_CL_SD_CTL_STRM(0));
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL, HDA_CL_SD_BDLPLBA(0));
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU, 0);
+
+ /* Set the Cyclic Buffer Length to 0. */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CBL, 0);
+ /* Set the Last Valid Index. */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_LVI, 0);
+}
+
+static void cl_skl_cldma_setup_spb(struct snd_sof_dev *sdev,
+ unsigned int size, bool enable)
+{
+ int sd_offset = SOF_DSP_REG_CL_SPBFIFO;
+
+ if (enable)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
+ HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
+ HDA_CL_SPBFIFO_SPBFCCTL_SPIBE(1));
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SPBFIFO_SPIB, size);
+}
+
+static void cl_skl_cldma_set_intr(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? HDA_DSP_ADSPIC_CL_DMA : 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_CL_DMA, val);
+}
+
+static void cl_skl_cldma_cleanup_spb(struct snd_sof_dev *sdev)
+{
+ int sd_offset = SOF_DSP_REG_CL_SPBFIFO;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
+ HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
+ HDA_CL_SPBFIFO_SPBFCCTL_SPIBE(0));
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SPBFIFO_SPIB, 0);
+}
+
+static void cl_skl_cldma_setup_controller(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab_bdl,
+ unsigned int max_size, u32 count)
+{
+ int sd_offset = SOF_HDA_ADSP_LOADER_BASE;
+
+ /* Clear the stream first and then set it. */
+ cl_skl_cldma_stream_clear(sdev);
+
+ /* setting the stream register */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
+ HDA_CL_SD_BDLPLBA(dmab_bdl->addr));
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
+ HDA_CL_SD_BDLPUBA(dmab_bdl->addr));
+
+ /* Set the Cyclic Buffer Length. */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CBL, max_size);
+ /* Set the Last Valid Index. */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_LVI, count - 1);
+
+ /* Set the Interrupt On Completion, FIFO Error Interrupt,
+ * Descriptor Error Interrupt and the cldma stream number.
+ */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CTL,
+ HDA_CL_SD_CTL_INT_MASK, HDA_CL_SD_CTL_INT(1));
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CTL,
+ HDA_CL_SD_CTL_STRM(0xf),
+ HDA_CL_SD_CTL_STRM(1));
+}
+
+static int cl_stream_prepare_skl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct snd_dma_buffer *dmab_bdl)
+
+{
+ unsigned int bufsize = HDA_SKL_CLDMA_MAX_BUFFER_SIZE;
+ __le32 *bdl;
+ int frags;
+ int ret;
+
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev, bufsize, dmab);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: failed to alloc fw buffer: %x\n", __func__, ret);
+ return ret;
+ }
+
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev, bufsize, dmab_bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: failed to alloc blde: %x\n", __func__, ret);
+ snd_dma_free_pages(dmab);
+ return ret;
+ }
+
+ bdl = (__le32 *)dmab_bdl->area;
+ frags = cl_skl_cldma_setup_bdle(sdev, dmab, &bdl, bufsize, 1);
+ cl_skl_cldma_setup_controller(sdev, dmab_bdl, bufsize, frags);
+
+ return ret;
+}
+
+static void cl_cleanup_skl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct snd_dma_buffer *dmab_bdl)
+{
+ cl_skl_cldma_cleanup_spb(sdev);
+ cl_skl_cldma_stream_clear(sdev);
+ snd_dma_free_pages(dmab);
+ snd_dma_free_pages(dmab_bdl);
+}
+
+static int cl_dsp_init_skl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct snd_dma_buffer *dmab_bdl)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int status;
+ u32 flags;
+ int ret;
+
+ /* check if the init_core is already enabled, if yes, reset and make it run,
+ * if not, powerdown and enable it again.
+ */
+ if (hda_dsp_core_is_enabled(sdev, chip->init_core_mask)) {
+ /* if enabled, reset it, and run the init_core. */
+ ret = hda_dsp_core_stall_reset(sdev, chip->init_core_mask);
+ if (ret < 0)
+ goto err;
+
+ ret = hda_dsp_core_run(sdev, chip->init_core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: dsp core start failed %d\n", __func__, ret);
+ goto err;
+ }
+ } else {
+ /* if not enabled, power down it first and then powerup and run
+ * the init_core.
+ */
+ ret = hda_dsp_core_reset_power_down(sdev, chip->init_core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: dsp core0 disable fail: %d\n", __func__, ret);
+ goto err;
+ }
+ ret = hda_dsp_enable_core(sdev, chip->init_core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: dsp core0 enable fail: %d\n", __func__, ret);
+ goto err;
+ }
+ }
+
+ /* prepare DMA for code loader stream */
+ ret = cl_stream_prepare_skl(sdev, dmab, dmab_bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: dma prepare fw loading err: %x\n", __func__, ret);
+ return ret;
+ }
+
+ /* enable the interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
+
+ /* enable IPC DONE interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ HDA_DSP_REG_HIPCCTL_DONE,
+ HDA_DSP_REG_HIPCCTL_DONE);
+
+ /* enable IPC BUSY interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ HDA_DSP_REG_HIPCCTL_BUSY,
+ HDA_DSP_REG_HIPCCTL_BUSY);
+
+ /* polling the ROM init status information. */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->rom_status_reg, status,
+ (FSR_TO_STATE_CODE(status)
+ == FSR_STATE_INIT_DONE),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ chip->rom_init_timeout *
+ USEC_PER_MSEC);
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX;
+
+ snd_sof_dsp_dbg_dump(sdev, "Boot failed\n", flags);
+ cl_cleanup_skl(sdev, dmab, dmab_bdl);
+ hda_dsp_core_reset_power_down(sdev, chip->init_core_mask);
+ return ret;
+}
+
+static void cl_skl_cldma_fill_buffer(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ unsigned int bufsize,
+ unsigned int copysize,
+ const void *curr_pos,
+ bool intr_enable)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+
+ /* copy the image into the buffer with the maximum buffer size. */
+ unsigned int size = (bufsize == copysize) ? bufsize : copysize;
+
+ memcpy(dmab->area, curr_pos, size);
+
+ /* Set the wait condition for every load. */
+ hda->code_loading = 1;
+
+ /* Set the interrupt. */
+ if (intr_enable)
+ cl_skl_cldma_set_intr(sdev, true);
+
+ /* Set the SPB. */
+ cl_skl_cldma_setup_spb(sdev, size, true);
+
+ /* Trigger the code loading stream. */
+ cl_skl_cldma_stream_run(sdev, true);
+}
+
+static int cl_skl_cldma_wait_interruptible(struct snd_sof_dev *sdev,
+ bool intr_wait)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ int sd_offset = SOF_HDA_ADSP_LOADER_BASE;
+ u8 cl_dma_intr_status;
+
+ /*
+ * Wait for CLDMA interrupt to inform the binary segment transfer is
+ * complete.
+ */
+ if (!wait_event_timeout(hda->waitq, !hda->code_loading,
+ msecs_to_jiffies(HDA_SKL_WAIT_TIMEOUT))) {
+ dev_err(sdev->dev, "cldma copy timeout\n");
+ dev_err(sdev->dev, "ROM code=%#x: FW status=%#x\n",
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_ROM_ERROR),
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg));
+ return -EIO;
+ }
+
+ /* now check DMA interrupt status */
+ cl_dma_intr_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS);
+
+ if (!(cl_dma_intr_status & HDA_CL_DMA_SD_INT_COMPLETE)) {
+ dev_err(sdev->dev, "cldma copy failed\n");
+ return -EIO;
+ }
+
+ dev_dbg(sdev->dev, "cldma buffer copy complete\n");
+ return 0;
+}
+
+static int
+cl_skl_cldma_copy_to_buf(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ const void *bin,
+ u32 total_size, u32 bufsize)
+{
+ unsigned int bytes_left = total_size;
+ const void *curr_pos = bin;
+ int ret;
+
+ if (total_size <= 0)
+ return -EINVAL;
+
+ while (bytes_left > 0) {
+ if (bytes_left > bufsize) {
+ dev_dbg(sdev->dev, "cldma copy %#x bytes\n", bufsize);
+
+ cl_skl_cldma_fill_buffer(sdev, dmab, bufsize, bufsize, curr_pos, true);
+
+ ret = cl_skl_cldma_wait_interruptible(sdev, false);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: fw failed to load. %#x bytes remaining\n",
+ __func__, bytes_left);
+ return ret;
+ }
+
+ bytes_left -= bufsize;
+ curr_pos += bufsize;
+ } else {
+ dev_dbg(sdev->dev, "cldma copy %#x bytes\n", bytes_left);
+
+ cl_skl_cldma_set_intr(sdev, false);
+ cl_skl_cldma_fill_buffer(sdev, dmab, bufsize, bytes_left, curr_pos, false);
+ return 0;
+ }
+ }
+
+ return bytes_left;
+}
+
+static int cl_copy_fw_skl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab)
+
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const struct firmware *fw = plat_data->fw;
+ struct firmware stripped_firmware;
+ unsigned int bufsize = HDA_SKL_CLDMA_MAX_BUFFER_SIZE;
+ int ret;
+
+ stripped_firmware.data = plat_data->fw->data + plat_data->fw_offset;
+ stripped_firmware.size = plat_data->fw->size - plat_data->fw_offset;
+
+ dev_dbg(sdev->dev, "firmware size: %#zx buffer size %#x\n", fw->size, bufsize);
+
+ ret = cl_skl_cldma_copy_to_buf(sdev, dmab, stripped_firmware.data,
+ stripped_firmware.size, bufsize);
+ if (ret < 0)
+ dev_err(sdev->dev, "%s: fw copy failed %d\n", __func__, ret);
+
+ return ret;
+}
+
+int hda_dsp_cl_boot_firmware_skl(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ struct snd_dma_buffer dmab_bdl;
+ struct snd_dma_buffer dmab;
+ unsigned int reg;
+ u32 flags;
+ int ret;
+
+ ret = cl_dsp_init_skl(sdev, &dmab, &dmab_bdl);
+
+ /* retry enabling core and ROM load. seemed to help */
+ if (ret < 0) {
+ ret = cl_dsp_init_skl(sdev, &dmab, &dmab_bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error code=%#x: FW status=%#x\n",
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_ROM_ERROR),
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg));
+ dev_err(sdev->dev, "Core En/ROM load fail:%d\n", ret);
+ return ret;
+ }
+ }
+
+ dev_dbg(sdev->dev, "ROM init successful\n");
+
+ /* at this point DSP ROM has been initialized and should be ready for
+ * code loading and firmware boot
+ */
+ ret = cl_copy_fw_skl(sdev, &dmab);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: load firmware failed : %d\n", __func__, ret);
+ goto err;
+ }
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->rom_status_reg, reg,
+ (FSR_TO_STATE_CODE(reg)
+ == FSR_STATE_ROM_BASEFW_ENTERED),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_BASEFW_TIMEOUT_US);
+
+ dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
+
+ cl_skl_cldma_stream_run(sdev, false);
+ cl_cleanup_skl(sdev, &dmab, &dmab_bdl);
+
+ if (!ret)
+ return chip->init_core_mask;
+
+ return ret;
+
+err:
+ flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX;
+
+ snd_sof_dsp_dbg_dump(sdev, "Boot failed\n", flags);
+
+ /* power down DSP */
+ hda_dsp_core_reset_power_down(sdev, chip->init_core_mask);
+ cl_skl_cldma_stream_run(sdev, false);
+ cl_cleanup_skl(sdev, &dmab, &dmab_bdl);
+
+ dev_err(sdev->dev, "%s: load fw failed err: %d\n", __func__, ret);
+ return ret;
+}
diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
new file mode 100644
index 000000000..98812d51b
--- /dev/null
+++ b/sound/soc/sof/intel/hda-loader.c
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for HDA DSP code loader
+ */
+
+#include <linux/firmware.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/sof.h>
+#include "ext_manifest.h"
+#include "../ops.h"
+#include "../sof-priv.h"
+#include "hda.h"
+
+static void hda_ssp_set_cbp_cfp(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ int i;
+
+ /* DSP is powered up, set all SSPs to clock consumer/codec provider mode */
+ for (i = 0; i < chip->ssp_count; i++) {
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ chip->ssp_base_offset
+ + i * SSP_DEV_MEM_SIZE
+ + SSP_SSC1_OFFSET,
+ SSP_SET_CBP_CFP,
+ SSP_SET_CBP_CFP);
+ }
+}
+
+struct hdac_ext_stream *hda_cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
+ unsigned int size, struct snd_dma_buffer *dmab,
+ int direction)
+{
+ struct hdac_ext_stream *hext_stream;
+ struct hdac_stream *hstream;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ int ret;
+
+ hext_stream = hda_dsp_stream_get(sdev, direction, 0);
+
+ if (!hext_stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return ERR_PTR(-ENODEV);
+ }
+ hstream = &hext_stream->hstream;
+ hstream->substream = NULL;
+
+ /* allocate DMA buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: memory alloc failed: %d\n", ret);
+ goto out_put;
+ }
+
+ hstream->period_bytes = 0;/* initialize period_bytes */
+ hstream->format_val = format;
+ hstream->bufsize = size;
+
+ if (direction == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = hda_dsp_iccmax_stream_hw_params(sdev, hext_stream, dmab, NULL);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: iccmax stream prepare failed: %d\n", ret);
+ goto out_free;
+ }
+ } else {
+ ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
+ goto out_free;
+ }
+ hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_ENABLE, size);
+ }
+
+ return hext_stream;
+
+out_free:
+ snd_dma_free_pages(dmab);
+out_put:
+ hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
+ return ERR_PTR(ret);
+}
+
+/*
+ * first boot sequence has some extra steps.
+ * power on all host managed cores and only unstall/run the boot core to boot the
+ * DSP then turn off all non boot cores (if any) is powered on.
+ */
+int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int status, target_status;
+ u32 flags, ipc_hdr, j;
+ unsigned long mask;
+ char *dump_msg;
+ int ret;
+
+ /* step 1: power up corex */
+ ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n");
+ goto err;
+ }
+
+ hda_ssp_set_cbp_cfp(sdev);
+
+ /* step 2: Send ROM_CONTROL command (stream_tag is ignored for IMR boot) */
+ ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL;
+ if (!imr_boot)
+ ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr);
+
+ /* step 3: unset core 0 reset state & unstall/run core 0 */
+ ret = hda_dsp_core_run(sdev, chip->init_core_mask);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "error: dsp core start failed %d\n", ret);
+ ret = -EIO;
+ goto err;
+ }
+
+ /* step 4: wait for IPC DONE bit from ROM */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->ipc_ack, status,
+ ((status & chip->ipc_ack_mask)
+ == chip->ipc_ack_mask),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_INIT_TIMEOUT_US);
+
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "error: %s: timeout for HIPCIE done\n",
+ __func__);
+ goto err;
+ }
+
+ /* set DONE bit to clear the reply IPC message */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ chip->ipc_ack,
+ chip->ipc_ack_mask,
+ chip->ipc_ack_mask);
+
+ /* step 5: power down cores that are no longer needed */
+ ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask &
+ ~(chip->init_core_mask));
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "error: dsp core x power down failed\n");
+ goto err;
+ }
+
+ /* step 6: enable IPC interrupts */
+ hda_dsp_ipc_int_enable(sdev);
+
+ /*
+ * step 7:
+ * - Cold/Full boot: wait for ROM init to proceed to download the firmware
+ * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR)
+ */
+ if (imr_boot)
+ target_status = FSR_STATE_FW_ENTERED;
+ else
+ target_status = FSR_STATE_INIT_DONE;
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->rom_status_reg, status,
+ (FSR_TO_STATE_CODE(status) == target_status),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ chip->rom_init_timeout *
+ USEC_PER_MSEC);
+ if (!ret) {
+ /* set enabled cores mask and increment ref count for cores in init_core_mask */
+ sdev->enabled_cores_mask |= chip->init_core_mask;
+ mask = sdev->enabled_cores_mask;
+ for_each_set_bit(j, &mask, SOF_MAX_DSP_NUM_CORES)
+ sdev->dsp_core_ref_count[j]++;
+ return 0;
+ }
+
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "%s: timeout with rom_status_reg (%#x) read\n",
+ __func__, chip->rom_status_reg);
+
+err:
+ flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL;
+
+ /* after max boot attempts make sure that the dump is printed */
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ flags &= ~SOF_DBG_DUMP_OPTIONAL;
+
+ dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d",
+ hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS);
+ snd_sof_dsp_dbg_dump(sdev, dump_msg, flags);
+ hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
+
+ kfree(dump_msg);
+ return ret;
+}
+
+static int cl_trigger(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream, int cmd)
+{
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+
+ /* code loader is special case that reuses stream ops */
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ 1 << hstream->index,
+ 1 << hstream->index);
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->running = true;
+ return 0;
+ default:
+ return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
+ }
+}
+
+int hda_cl_cleanup(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
+ struct hdac_ext_stream *hext_stream)
+{
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ int ret = 0;
+
+ if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0);
+ else
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_SD_CTL_DMA_START, 0);
+
+ hda_dsp_stream_put(sdev, hstream->direction, hstream->stream_tag);
+ hstream->running = 0;
+ hstream->substream = NULL;
+
+ /* reset BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL, 0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU, 0);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0);
+ snd_dma_free_pages(dmab);
+ dmab->area = NULL;
+ hstream->bufsize = 0;
+ hstream->format_val = 0;
+
+ return ret;
+}
+
+int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int reg;
+ int ret, status;
+
+ ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_START);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: DMA trigger start failed\n");
+ return ret;
+ }
+
+ status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->rom_status_reg, reg,
+ (FSR_TO_STATE_CODE(reg) == FSR_STATE_FW_ENTERED),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_BASEFW_TIMEOUT_US);
+
+ /*
+ * even in case of errors we still need to stop the DMAs,
+ * but we return the initial error should the DMA stop also fail
+ */
+
+ if (status < 0) {
+ dev_err(sdev->dev,
+ "%s: timeout with rom_status_reg (%#x) read\n",
+ __func__, chip->rom_status_reg);
+ }
+
+ ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: DMA trigger stop failed\n");
+ if (!status)
+ status = ret;
+ }
+
+ return status;
+}
+
+int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ struct hdac_ext_stream *iccmax_stream;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct firmware stripped_firmware;
+ struct snd_dma_buffer dmab_bdl;
+ int ret, ret1;
+ u8 original_gb;
+
+ /* save the original LTRP guardband value */
+ original_gb = snd_hdac_chip_readb(bus, VS_LTRP) & HDA_VS_INTEL_LTRP_GB_MASK;
+
+ if (plat_data->fw->size <= plat_data->fw_offset) {
+ dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
+ return -EINVAL;
+ }
+
+ stripped_firmware.size = plat_data->fw->size - plat_data->fw_offset;
+
+ /* prepare capture stream for ICCMAX */
+ iccmax_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT, stripped_firmware.size,
+ &dmab_bdl, SNDRV_PCM_STREAM_CAPTURE);
+ if (IS_ERR(iccmax_stream)) {
+ dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n");
+ return PTR_ERR(iccmax_stream);
+ }
+
+ ret = hda_dsp_cl_boot_firmware(sdev);
+
+ /*
+ * Perform iccmax stream cleanup. This should be done even if firmware loading fails.
+ * If the cleanup also fails, we return the initial error
+ */
+ ret1 = hda_cl_cleanup(sdev, &dmab_bdl, iccmax_stream);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n");
+
+ /* set return value to indicate cleanup failure */
+ if (!ret)
+ ret = ret1;
+ }
+
+ /* restore the original guardband value after FW boot */
+ snd_hdac_chip_updateb(bus, VS_LTRP, HDA_VS_INTEL_LTRP_GB_MASK, original_gb);
+
+ return ret;
+}
+
+static int hda_dsp_boot_imr(struct snd_sof_dev *sdev)
+{
+ const struct sof_intel_dsp_desc *chip_info;
+ int ret;
+
+ chip_info = get_chip_info(sdev->pdata);
+ if (chip_info->cl_init)
+ ret = chip_info->cl_init(sdev, 0, true);
+ else
+ ret = -EINVAL;
+
+ if (!ret)
+ hda_sdw_process_wakeen(sdev);
+
+ return ret;
+}
+
+int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const struct sof_dev_desc *desc = plat_data->desc;
+ const struct sof_intel_dsp_desc *chip_info;
+ struct hdac_ext_stream *hext_stream;
+ struct firmware stripped_firmware;
+ struct snd_dma_buffer dmab;
+ int ret, ret1, i;
+
+ if (hda->imrboot_supported && !sdev->first_boot && !hda->skip_imr_boot) {
+ dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n");
+ hda->boot_iteration = 0;
+ ret = hda_dsp_boot_imr(sdev);
+ if (!ret)
+ return 0;
+
+ dev_warn(sdev->dev, "IMR restore failed, trying to cold boot\n");
+ }
+
+ chip_info = desc->chip_info;
+
+ if (plat_data->fw->size <= plat_data->fw_offset) {
+ dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
+ return -EINVAL;
+ }
+
+ stripped_firmware.data = plat_data->fw->data + plat_data->fw_offset;
+ stripped_firmware.size = plat_data->fw->size - plat_data->fw_offset;
+
+ /* init for booting wait */
+ init_waitqueue_head(&sdev->boot_wait);
+
+ /* prepare DMA for code loader stream */
+ hext_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT,
+ stripped_firmware.size,
+ &dmab, SNDRV_PCM_STREAM_PLAYBACK);
+ if (IS_ERR(hext_stream)) {
+ dev_err(sdev->dev, "error: dma prepare for fw loading failed\n");
+ return PTR_ERR(hext_stream);
+ }
+
+ memcpy(dmab.area, stripped_firmware.data,
+ stripped_firmware.size);
+
+ /* try ROM init a few times before giving up */
+ for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) {
+ dev_dbg(sdev->dev,
+ "Attempting iteration %d of Core En/ROM load...\n", i);
+
+ hda->boot_iteration = i + 1;
+ if (chip_info->cl_init)
+ ret = chip_info->cl_init(sdev, hext_stream->hstream.stream_tag, false);
+ else
+ ret = -EINVAL;
+
+ /* don't retry anymore if successful */
+ if (!ret)
+ break;
+ }
+
+ if (i == HDA_FW_BOOT_ATTEMPTS) {
+ dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n",
+ i, ret);
+ goto cleanup;
+ }
+
+ /*
+ * When a SoundWire link is in clock stop state, a Slave
+ * device may trigger in-band wakes for events such as jack
+ * insertion or acoustic event detection. This event will lead
+ * to a WAKEEN interrupt, handled by the PCI device and routed
+ * to PME if the PCI device is in D3. The resume function in
+ * audio PCI driver will be invoked by ACPI for PME event and
+ * initialize the device and process WAKEEN interrupt.
+ *
+ * The WAKEEN interrupt should be processed ASAP to prevent an
+ * interrupt flood, otherwise other interrupts, such IPC,
+ * cannot work normally. The WAKEEN is handled after the ROM
+ * is initialized successfully, which ensures power rails are
+ * enabled before accessing the SoundWire SHIM registers
+ */
+ if (!sdev->first_boot)
+ hda_sdw_process_wakeen(sdev);
+
+ /*
+ * Set the boot_iteration to the last attempt, indicating that the
+ * DSP ROM has been initialized and from this point there will be no
+ * retry done to boot.
+ *
+ * Continue with code loading and firmware boot
+ */
+ hda->boot_iteration = HDA_FW_BOOT_ATTEMPTS;
+ ret = hda_cl_copy_fw(sdev, hext_stream);
+ if (!ret) {
+ dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
+ hda->skip_imr_boot = false;
+ } else {
+ snd_sof_dsp_dbg_dump(sdev, "Firmware download failed",
+ SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX);
+ hda->skip_imr_boot = true;
+ }
+
+cleanup:
+ /*
+ * Perform codeloader stream cleanup.
+ * This should be done even if firmware loading fails.
+ * If the cleanup also fails, we return the initial error
+ */
+ ret1 = hda_cl_cleanup(sdev, &dmab, hext_stream);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
+
+ /* set return value to indicate cleanup failure */
+ if (!ret)
+ ret = ret1;
+ }
+
+ /*
+ * return primary core id if both fw copy
+ * and stream clean up are successful
+ */
+ if (!ret)
+ return chip_info->init_core_mask;
+
+ /* disable DSP */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
+ SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, 0);
+ return ret;
+}
+
+/* pre fw run operations */
+int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev)
+{
+ /* disable clock gating and power gating */
+ return hda_dsp_ctrl_clock_power_gating(sdev, false);
+}
+
+/* post fw run operations */
+int hda_dsp_post_fw_run(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ if (sdev->first_boot) {
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ ret = hda_sdw_startup(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: could not startup SoundWire links\n");
+ return ret;
+ }
+
+ /* Check if IMR boot is usable */
+ if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT) &&
+ (sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT ||
+ sdev->pdata->ipc_type == SOF_INTEL_IPC4))
+ hdev->imrboot_supported = true;
+ }
+
+ hda_sdw_int_enable(sdev, true);
+
+ /* re-enable clock gating and power gating */
+ return hda_dsp_ctrl_clock_power_gating(sdev, true);
+}
+
+int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct sof_ext_man_cavs_config_data *config_data =
+ container_of(hdr, struct sof_ext_man_cavs_config_data, hdr);
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ int i, elem_num;
+
+ /* calculate total number of config data elements */
+ elem_num = (hdr->size - sizeof(struct sof_ext_man_elem_header))
+ / sizeof(struct sof_config_elem);
+ if (elem_num <= 0) {
+ dev_err(sdev->dev, "cavs config data is inconsistent: %d\n", elem_num);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < elem_num; i++)
+ switch (config_data->elems[i].token) {
+ case SOF_EXT_MAN_CAVS_CONFIG_EMPTY:
+ /* skip empty token */
+ break;
+ case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO:
+ hda->clk_config_lpro = config_data->elems[i].value;
+ dev_dbg(sdev->dev, "FW clock config: %s\n",
+ hda->clk_config_lpro ? "LPRO" : "HPRO");
+ break;
+ case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE:
+ case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE:
+ /* These elements are defined but not being used yet. No warn is required */
+ break;
+ default:
+ dev_info(sdev->dev, "unsupported token type: %d\n",
+ config_data->elems[i].token);
+ }
+
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
new file mode 100644
index 000000000..0a9c80216
--- /dev/null
+++ b/sound/soc/sof/intel/hda-pcm.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <linux/moduleparam.h>
+#include <sound/hda_register.h>
+#include <sound/pcm_params.h>
+#include <trace/events/sof_intel.h>
+#include "../sof-audio.h"
+#include "../ops.h"
+#include "hda.h"
+
+#define SDnFMT_BASE(x) ((x) << 14)
+#define SDnFMT_MULT(x) (((x) - 1) << 11)
+#define SDnFMT_DIV(x) (((x) - 1) << 8)
+#define SDnFMT_BITS(x) ((x) << 4)
+#define SDnFMT_CHAN(x) ((x) << 0)
+
+static bool hda_always_enable_dmi_l1;
+module_param_named(always_enable_dmi_l1, hda_always_enable_dmi_l1, bool, 0444);
+MODULE_PARM_DESC(always_enable_dmi_l1, "SOF HDA always enable DMI l1");
+
+static bool hda_disable_rewinds = IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_DISABLE_REWINDS);
+module_param_named(disable_rewinds, hda_disable_rewinds, bool, 0444);
+MODULE_PARM_DESC(disable_rewinds, "SOF HDA disable rewinds");
+
+u32 hda_dsp_get_mult_div(struct snd_sof_dev *sdev, int rate)
+{
+ switch (rate) {
+ case 8000:
+ return SDnFMT_DIV(6);
+ case 9600:
+ return SDnFMT_DIV(5);
+ case 11025:
+ return SDnFMT_BASE(1) | SDnFMT_DIV(4);
+ case 16000:
+ return SDnFMT_DIV(3);
+ case 22050:
+ return SDnFMT_BASE(1) | SDnFMT_DIV(2);
+ case 32000:
+ return SDnFMT_DIV(3) | SDnFMT_MULT(2);
+ case 44100:
+ return SDnFMT_BASE(1);
+ case 48000:
+ return 0;
+ case 88200:
+ return SDnFMT_BASE(1) | SDnFMT_MULT(2);
+ case 96000:
+ return SDnFMT_MULT(2);
+ case 176400:
+ return SDnFMT_BASE(1) | SDnFMT_MULT(4);
+ case 192000:
+ return SDnFMT_MULT(4);
+ default:
+ dev_warn(sdev->dev, "can't find div rate %d using 48kHz\n",
+ rate);
+ return 0; /* use 48KHz if not found */
+ }
+};
+
+u32 hda_dsp_get_bits(struct snd_sof_dev *sdev, int sample_bits)
+{
+ switch (sample_bits) {
+ case 8:
+ return SDnFMT_BITS(0);
+ case 16:
+ return SDnFMT_BITS(1);
+ case 20:
+ return SDnFMT_BITS(2);
+ case 24:
+ return SDnFMT_BITS(3);
+ case 32:
+ return SDnFMT_BITS(4);
+ default:
+ dev_warn(sdev->dev, "can't find %d bits using 16bit\n",
+ sample_bits);
+ return SDnFMT_BITS(1); /* use 16bits format if not found */
+ }
+};
+
+int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream);
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct snd_dma_buffer *dmab;
+ int ret;
+ u32 size, rate, bits;
+
+ size = params_buffer_bytes(params);
+ rate = hda_dsp_get_mult_div(sdev, params_rate(params));
+ bits = hda_dsp_get_bits(sdev, params_width(params));
+
+ hstream->substream = substream;
+
+ dmab = substream->runtime->dma_buffer_p;
+
+ hstream->format_val = rate | bits | (params_channels(params) - 1);
+ hstream->bufsize = size;
+ hstream->period_bytes = params_period_bytes(params);
+ hstream->no_period_wakeup =
+ (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
+ (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
+
+ ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, params);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
+ return ret;
+ }
+
+ /* enable SPIB when rewinds are disabled */
+ if (hda_disable_rewinds)
+ hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_ENABLE, 0);
+ else
+ hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0);
+
+ if (hda)
+ platform_params->no_ipc_position = hda->no_ipc_position;
+
+ platform_params->stream_tag = hstream->stream_tag;
+
+ return 0;
+}
+
+/* update SPIB register with appl position */
+int hda_dsp_pcm_ack(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ ssize_t appl_pos, buf_size;
+ u32 spib;
+
+ appl_pos = frames_to_bytes(runtime, runtime->control->appl_ptr);
+ buf_size = frames_to_bytes(runtime, runtime->buffer_size);
+
+ spib = appl_pos % buf_size;
+
+ /* Allowable value for SPIB is 1 byte to max buffer size */
+ if (!spib)
+ spib = buf_size;
+
+ sof_io_write(sdev, hext_stream->spib_addr, spib);
+
+ return 0;
+}
+
+int hda_dsp_pcm_trigger(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream);
+
+ return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
+}
+
+snd_pcm_uframes_t hda_dsp_pcm_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_component *scomp = sdev->component;
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct snd_sof_pcm *spcm;
+ snd_pcm_uframes_t pos;
+
+ spcm = snd_sof_find_spcm_dai(scomp, rtd);
+ if (!spcm) {
+ dev_warn_ratelimited(sdev->dev, "warn: can't find PCM with DAI ID %d\n",
+ rtd->dai_link->id);
+ return 0;
+ }
+
+ if (hda && !hda->no_ipc_position) {
+ /* read position from IPC position */
+ pos = spcm->stream[substream->stream].posn.host_posn;
+ goto found;
+ }
+
+ pos = hda_dsp_stream_get_position(hstream, substream->stream, true);
+found:
+ pos = bytes_to_frames(substream->runtime, pos);
+
+ trace_sof_intel_hda_dsp_pcm(sdev, hstream, substream, pos);
+ return pos;
+}
+
+int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_component *scomp = sdev->component;
+ struct hdac_ext_stream *dsp_stream;
+ struct snd_sof_pcm *spcm;
+ int direction = substream->stream;
+ u32 flags = 0;
+
+ spcm = snd_sof_find_spcm_dai(scomp, rtd);
+ if (!spcm) {
+ dev_err(sdev->dev, "error: can't find PCM with DAI ID %d\n", rtd->dai_link->id);
+ return -EINVAL;
+ }
+
+ /*
+ * if we want the .ack to work, we need to prevent the control from being mapped.
+ * The status can still be mapped.
+ */
+ if (hda_disable_rewinds)
+ runtime->hw.info |= SNDRV_PCM_INFO_NO_REWINDS | SNDRV_PCM_INFO_SYNC_APPLPTR;
+
+ /*
+ * All playback streams are DMI L1 capable, capture streams need
+ * pause push/release to be disabled
+ */
+ if (hda_always_enable_dmi_l1 && direction == SNDRV_PCM_STREAM_CAPTURE)
+ runtime->hw.info &= ~SNDRV_PCM_INFO_PAUSE;
+
+ if (hda_always_enable_dmi_l1 ||
+ direction == SNDRV_PCM_STREAM_PLAYBACK ||
+ spcm->stream[substream->stream].d0i3_compatible)
+ flags |= SOF_HDA_STREAM_DMI_L1_COMPATIBLE;
+
+ dsp_stream = hda_dsp_stream_get(sdev, direction, flags);
+ if (!dsp_stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return -ENODEV;
+ }
+
+ /* minimum as per HDA spec */
+ snd_pcm_hw_constraint_step(substream->runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 4);
+
+ /* avoid circular buffer wrap in middle of period */
+ snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+
+ /* binding pcm substream to hda stream */
+ substream->runtime->private_data = &dsp_stream->hstream;
+ return 0;
+}
+
+int hda_dsp_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ int direction = substream->stream;
+ int ret;
+
+ ret = hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
+
+ if (ret) {
+ dev_dbg(sdev->dev, "stream %s not opened!\n", substream->name);
+ return -ENODEV;
+ }
+
+ /* unbinding pcm substream to hda stream */
+ substream->runtime->private_data = NULL;
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-probes.c b/sound/soc/sof/intel/hda-probes.c
new file mode 100644
index 000000000..56a533c63
--- /dev/null
+++ b/sound/soc/sof/intel/hda-probes.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019-2021 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+// Converted to SOF client:
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/soc.h>
+#include "../sof-priv.h"
+#include "../sof-client-probes.h"
+#include "../sof-client.h"
+#include "hda.h"
+
+static inline struct hdac_ext_stream *
+hda_compr_get_stream(struct snd_compr_stream *cstream)
+{
+ return cstream->runtime->private_data;
+}
+
+static int hda_probes_compr_startup(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai, u32 *stream_id)
+{
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ struct hdac_ext_stream *hext_stream;
+
+ hext_stream = hda_dsp_stream_get(sdev, cstream->direction, 0);
+ if (!hext_stream)
+ return -EBUSY;
+
+ hdac_stream(hext_stream)->curr_pos = 0;
+ hdac_stream(hext_stream)->cstream = cstream;
+ cstream->runtime->private_data = hext_stream;
+
+ *stream_id = hdac_stream(hext_stream)->stream_tag;
+
+ return 0;
+}
+
+static int hda_probes_compr_shutdown(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ int ret;
+
+ ret = hda_dsp_stream_put(sdev, cstream->direction,
+ hdac_stream(hext_stream)->stream_tag);
+ if (ret < 0) {
+ dev_dbg(sdev->dev, "stream put failed: %d\n", ret);
+ return ret;
+ }
+
+ hdac_stream(hext_stream)->cstream = NULL;
+ cstream->runtime->private_data = NULL;
+
+ return 0;
+}
+
+static int hda_probes_compr_set_params(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+ struct hdac_stream *hstream = hdac_stream(hext_stream);
+ struct snd_dma_buffer *dmab;
+ u32 bits, rate;
+ int bps, ret;
+
+ dmab = cstream->runtime->dma_buffer_p;
+ /* compr params do not store bit depth, default to S32_LE */
+ bps = snd_pcm_format_physical_width(SNDRV_PCM_FORMAT_S32_LE);
+ if (bps < 0)
+ return bps;
+ bits = hda_dsp_get_bits(sdev, bps);
+ rate = hda_dsp_get_mult_div(sdev, params->codec.sample_rate);
+
+ hstream->format_val = rate | bits | (params->codec.ch_out - 1);
+ hstream->bufsize = cstream->runtime->buffer_size;
+ hstream->period_bytes = cstream->runtime->fragment_size;
+ hstream->no_period_wakeup = 0;
+
+ ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hda_probes_compr_trigger(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
+ struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
+
+ return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
+}
+
+static int hda_probes_compr_pointer(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
+ struct snd_soc_pcm_stream *pstream;
+
+ pstream = &dai->driver->capture;
+ tstamp->copied_total = hdac_stream(hext_stream)->curr_pos;
+ tstamp->sampling_rate = snd_pcm_rate_bit_to_rate(pstream->rates);
+
+ return 0;
+}
+
+/* SOF client implementation */
+static const struct sof_probes_host_ops hda_probes_ops = {
+ .startup = hda_probes_compr_startup,
+ .shutdown = hda_probes_compr_shutdown,
+ .set_params = hda_probes_compr_set_params,
+ .trigger = hda_probes_compr_trigger,
+ .pointer = hda_probes_compr_pointer,
+};
+
+int hda_probes_register(struct snd_sof_dev *sdev)
+{
+ return sof_client_dev_register(sdev, "hda-probes", 0, &hda_probes_ops,
+ sizeof(hda_probes_ops));
+}
+
+void hda_probes_unregister(struct snd_sof_dev *sdev)
+{
+ sof_client_dev_unregister(sdev, "hda-probes", 0);
+}
+
+MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
new file mode 100644
index 000000000..be60e7785
--- /dev/null
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -0,0 +1,1099 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <linux/pm_runtime.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/sof.h>
+#include <trace/events/sof_intel.h>
+#include "../ops.h"
+#include "../sof-audio.h"
+#include "hda.h"
+
+#define HDA_LTRP_GB_VALUE_US 95
+
+static inline const char *hda_hstream_direction_str(struct hdac_stream *hstream)
+{
+ if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK)
+ return "Playback";
+ else
+ return "Capture";
+}
+
+static char *hda_hstream_dbg_get_stream_info_str(struct hdac_stream *hstream)
+{
+ struct snd_soc_pcm_runtime *rtd;
+
+ if (hstream->substream)
+ rtd = asoc_substream_to_rtd(hstream->substream);
+ else if (hstream->cstream)
+ rtd = hstream->cstream->private_data;
+ else
+ /* Non audio DMA user, like dma-trace */
+ return kasprintf(GFP_KERNEL, "-- (%s, stream_tag: %u)",
+ hda_hstream_direction_str(hstream),
+ hstream->stream_tag);
+
+ return kasprintf(GFP_KERNEL, "dai_link \"%s\" (%s, stream_tag: %u)",
+ rtd->dai_link->name, hda_hstream_direction_str(hstream),
+ hstream->stream_tag);
+}
+
+/*
+ * set up one of BDL entries for a stream
+ */
+static int hda_setup_bdle(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct hdac_stream *hstream,
+ struct sof_intel_dsp_bdl **bdlp,
+ int offset, int size, int ioc)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sof_intel_dsp_bdl *bdl = *bdlp;
+
+ while (size > 0) {
+ dma_addr_t addr;
+ int chunk;
+
+ if (hstream->frags >= HDA_DSP_MAX_BDL_ENTRIES) {
+ dev_err(sdev->dev, "error: stream frags exceeded\n");
+ return -EINVAL;
+ }
+
+ addr = snd_sgbuf_get_addr(dmab, offset);
+ /* program BDL addr */
+ bdl->addr_l = cpu_to_le32(lower_32_bits(addr));
+ bdl->addr_h = cpu_to_le32(upper_32_bits(addr));
+ /* program BDL size */
+ chunk = snd_sgbuf_get_chunk_size(dmab, offset, size);
+ /* one BDLE should not cross 4K boundary */
+ if (bus->align_bdle_4k) {
+ u32 remain = 0x1000 - (offset & 0xfff);
+
+ if (chunk > remain)
+ chunk = remain;
+ }
+ bdl->size = cpu_to_le32(chunk);
+ /* only program IOC when the whole segment is processed */
+ size -= chunk;
+ bdl->ioc = (size || !ioc) ? 0 : cpu_to_le32(0x01);
+ bdl++;
+ hstream->frags++;
+ offset += chunk;
+ }
+
+ *bdlp = bdl;
+ return offset;
+}
+
+/*
+ * set up Buffer Descriptor List (BDL) for host memory transfer
+ * BDL describes the location of the individual buffers and is little endian.
+ */
+int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct hdac_stream *hstream)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct sof_intel_dsp_bdl *bdl;
+ int i, offset, period_bytes, periods;
+ int remain, ioc;
+
+ period_bytes = hstream->period_bytes;
+ dev_dbg(sdev->dev, "period_bytes:0x%x\n", period_bytes);
+ if (!period_bytes)
+ period_bytes = hstream->bufsize;
+
+ periods = hstream->bufsize / period_bytes;
+
+ dev_dbg(sdev->dev, "periods:%d\n", periods);
+
+ remain = hstream->bufsize % period_bytes;
+ if (remain)
+ periods++;
+
+ /* program the initial BDL entries */
+ bdl = (struct sof_intel_dsp_bdl *)hstream->bdl.area;
+ offset = 0;
+ hstream->frags = 0;
+
+ /*
+ * set IOC if don't use position IPC
+ * and period_wakeup needed.
+ */
+ ioc = hda->no_ipc_position ?
+ !hstream->no_period_wakeup : 0;
+
+ for (i = 0; i < periods; i++) {
+ if (i == (periods - 1) && remain)
+ /* set the last small entry */
+ offset = hda_setup_bdle(sdev, dmab,
+ hstream, &bdl, offset,
+ remain, 0);
+ else
+ offset = hda_setup_bdle(sdev, dmab,
+ hstream, &bdl, offset,
+ period_bytes, ioc);
+ }
+
+ return offset;
+}
+
+int hda_dsp_stream_spib_config(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream,
+ int enable, u32 size)
+{
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ u32 mask;
+
+ if (!sdev->bar[HDA_DSP_SPIB_BAR]) {
+ dev_err(sdev->dev, "error: address of spib capability is NULL\n");
+ return -EINVAL;
+ }
+
+ mask = (1 << hstream->index);
+
+ /* enable/disable SPIB for the stream */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_SPIB_BAR,
+ SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL, mask,
+ enable << hstream->index);
+
+ /* set the SPIB value */
+ sof_io_write(sdev, hext_stream->spib_addr, size);
+
+ return 0;
+}
+
+/* get next unused stream */
+struct hdac_ext_stream *
+hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction, u32 flags)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sof_intel_hda_stream *hda_stream;
+ struct hdac_ext_stream *hext_stream = NULL;
+ struct hdac_stream *s;
+
+ spin_lock_irq(&bus->reg_lock);
+
+ /* get an unused stream */
+ list_for_each_entry(s, &bus->stream_list, list) {
+ if (s->direction == direction && !s->opened) {
+ hext_stream = stream_to_hdac_ext_stream(s);
+ hda_stream = container_of(hext_stream,
+ struct sof_intel_hda_stream,
+ hext_stream);
+ /* check if the host DMA channel is reserved */
+ if (hda_stream->host_reserved)
+ continue;
+
+ s->opened = true;
+ break;
+ }
+ }
+
+ spin_unlock_irq(&bus->reg_lock);
+
+ /* stream found ? */
+ if (!hext_stream) {
+ dev_err(sdev->dev, "error: no free %s streams\n",
+ direction == SNDRV_PCM_STREAM_PLAYBACK ?
+ "playback" : "capture");
+ return hext_stream;
+ }
+
+ hda_stream->flags = flags;
+
+ /*
+ * Prevent DMI Link L1 entry for streams that don't support it.
+ * Workaround to address a known issue with host DMA that results
+ * in xruns during pause/release in capture scenarios.
+ */
+ if (!(flags & SOF_HDA_STREAM_DMI_L1_COMPATIBLE))
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, 0);
+
+ return hext_stream;
+}
+
+/* free a stream */
+int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sof_intel_hda_stream *hda_stream;
+ struct hdac_ext_stream *hext_stream;
+ struct hdac_stream *s;
+ bool dmi_l1_enable = true;
+ bool found = false;
+
+ spin_lock_irq(&bus->reg_lock);
+
+ /*
+ * close stream matching the stream tag and check if there are any open streams
+ * that are DMI L1 incompatible.
+ */
+ list_for_each_entry(s, &bus->stream_list, list) {
+ hext_stream = stream_to_hdac_ext_stream(s);
+ hda_stream = container_of(hext_stream, struct sof_intel_hda_stream, hext_stream);
+
+ if (!s->opened)
+ continue;
+
+ if (s->direction == direction && s->stream_tag == stream_tag) {
+ s->opened = false;
+ found = true;
+ } else if (!(hda_stream->flags & SOF_HDA_STREAM_DMI_L1_COMPATIBLE)) {
+ dmi_l1_enable = false;
+ }
+ }
+
+ spin_unlock_irq(&bus->reg_lock);
+
+ /* Enable DMI L1 if permitted */
+ if (dmi_l1_enable)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN);
+
+ if (!found) {
+ dev_err(sdev->dev, "%s: stream_tag %d not opened!\n",
+ __func__, stream_tag);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int hda_dsp_stream_reset(struct snd_sof_dev *sdev, struct hdac_stream *hstream)
+{
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ int timeout = HDA_DSP_STREAM_RESET_TIMEOUT;
+ u32 val;
+
+ /* enter stream reset */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, SOF_STREAM_SD_OFFSET_CRST,
+ SOF_STREAM_SD_OFFSET_CRST);
+ do {
+ val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, sd_offset);
+ if (val & SOF_STREAM_SD_OFFSET_CRST)
+ break;
+ } while (--timeout);
+ if (timeout == 0) {
+ dev_err(sdev->dev, "timeout waiting for stream reset\n");
+ return -ETIMEDOUT;
+ }
+
+ timeout = HDA_DSP_STREAM_RESET_TIMEOUT;
+
+ /* exit stream reset and wait to read a zero before reading any other register */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, SOF_STREAM_SD_OFFSET_CRST, 0x0);
+
+ /* wait for hardware to report that stream is out of reset */
+ udelay(3);
+ do {
+ val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, sd_offset);
+ if ((val & SOF_STREAM_SD_OFFSET_CRST) == 0)
+ break;
+ } while (--timeout);
+ if (timeout == 0) {
+ dev_err(sdev->dev, "timeout waiting for stream to exit reset\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream, int cmd)
+{
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ u32 dma_start = SOF_HDA_SD_CTL_DMA_START;
+ int ret = 0;
+ u32 run;
+
+ /* cmd must be for audio stream */
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_START:
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ 1 << hstream->index,
+ 1 << hstream->index);
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev,
+ HDA_DSP_HDA_BAR,
+ sd_offset, run,
+ ((run & dma_start) == dma_start),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_STREAM_RUN_TIMEOUT);
+
+ if (ret >= 0)
+ hstream->running = true;
+
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_STOP:
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK, 0x0);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
+ sd_offset, run,
+ !(run & dma_start),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_STREAM_RUN_TIMEOUT);
+
+ if (ret >= 0) {
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->running = false;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_INTCTL,
+ 1 << hstream->index, 0x0);
+ }
+ break;
+ default:
+ dev_err(sdev->dev, "error: unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ if (ret < 0) {
+ char *stream_name = hda_hstream_dbg_get_stream_info_str(hstream);
+
+ dev_err(sdev->dev,
+ "%s: cmd %d on %s: timeout on STREAM_SD_OFFSET read\n",
+ __func__, cmd, stream_name ? stream_name : "unknown stream");
+ kfree(stream_name);
+ }
+
+ return ret;
+}
+
+/* minimal recommended programming for ICCMAX stream */
+int hda_dsp_iccmax_stream_hw_params(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ int ret;
+ u32 mask = 0x1 << hstream->index;
+
+ if (!hext_stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return -ENODEV;
+ }
+
+ if (!dmab) {
+ dev_err(sdev->dev, "error: no dma buffer allocated!\n");
+ return -ENODEV;
+ }
+
+ if (hstream->posbuf)
+ *hstream->posbuf = 0;
+
+ /* reset BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
+ 0x0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
+ 0x0);
+
+ hstream->frags = 0;
+
+ ret = hda_dsp_stream_setup_bdl(sdev, dmab, hstream);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: set up of BDL failed\n");
+ return ret;
+ }
+
+ /* program BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
+ (u32)hstream->bdl.addr);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
+ upper_32_bits(hstream->bdl.addr));
+
+ /* program cyclic buffer length */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CBL,
+ hstream->bufsize);
+
+ /* program last valid index */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_LVI,
+ 0xffff, (hstream->frags - 1));
+
+ /* decouple host and link DMA, enable DSP features */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, mask);
+
+ /* Follow HW recommendation to set the guardband value to 95us during FW boot */
+ snd_hdac_chip_updateb(bus, VS_LTRP, HDA_VS_INTEL_LTRP_GB_MASK, HDA_LTRP_GB_VALUE_US);
+
+ /* start DMA */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_SD_CTL_DMA_START, SOF_HDA_SD_CTL_DMA_START);
+
+ return 0;
+}
+
+/*
+ * prepare for common hdac registers settings, for both code loader
+ * and normal stream.
+ */
+int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params)
+{
+ const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ int ret;
+ u32 dma_start = SOF_HDA_SD_CTL_DMA_START;
+ u32 mask;
+ u32 run;
+
+ if (!hext_stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return -ENODEV;
+ }
+
+ if (!dmab) {
+ dev_err(sdev->dev, "error: no dma buffer allocated!\n");
+ return -ENODEV;
+ }
+
+ /* decouple host and link DMA */
+ mask = 0x1 << hstream->index;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, mask);
+
+ /* clear stream status */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_DMA_SD_INT_MASK |
+ SOF_HDA_SD_CTL_DMA_START, 0);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
+ sd_offset, run,
+ !(run & dma_start),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_STREAM_RUN_TIMEOUT);
+
+ if (ret < 0) {
+ char *stream_name = hda_hstream_dbg_get_stream_info_str(hstream);
+
+ dev_err(sdev->dev,
+ "%s: on %s: timeout on STREAM_SD_OFFSET read1\n",
+ __func__, stream_name ? stream_name : "unknown stream");
+ kfree(stream_name);
+ return ret;
+ }
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ /* stream reset */
+ ret = hda_dsp_stream_reset(sdev, hstream);
+ if (ret < 0)
+ return ret;
+
+ if (hstream->posbuf)
+ *hstream->posbuf = 0;
+
+ /* reset BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
+ 0x0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
+ 0x0);
+
+ /* clear stream status */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_DMA_SD_INT_MASK |
+ SOF_HDA_SD_CTL_DMA_START, 0);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
+ sd_offset, run,
+ !(run & dma_start),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_STREAM_RUN_TIMEOUT);
+
+ if (ret < 0) {
+ char *stream_name = hda_hstream_dbg_get_stream_info_str(hstream);
+
+ dev_err(sdev->dev,
+ "%s: on %s: timeout on STREAM_SD_OFFSET read1\n",
+ __func__, stream_name ? stream_name : "unknown stream");
+ kfree(stream_name);
+ return ret;
+ }
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->frags = 0;
+
+ ret = hda_dsp_stream_setup_bdl(sdev, dmab, hstream);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: set up of BDL failed\n");
+ return ret;
+ }
+
+ /* program stream tag to set up stream descriptor for DMA */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_SD_CTL_STREAM_TAG_MASK,
+ hstream->stream_tag <<
+ SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT);
+
+ /* program cyclic buffer length */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CBL,
+ hstream->bufsize);
+
+ /*
+ * Recommended hardware programming sequence for HDAudio DMA format
+ * on earlier platforms - this is not needed on newer platforms
+ *
+ * 1. Put DMA into coupled mode by clearing PPCTL.PROCEN bit
+ * for corresponding stream index before the time of writing
+ * format to SDxFMT register.
+ * 2. Write SDxFMT
+ * 3. Set PPCTL.PROCEN bit for corresponding stream index to
+ * enable decoupled mode
+ */
+
+ if (chip->quirks & SOF_INTEL_PROCEN_FMT_QUIRK) {
+ /* couple host and link DMA, disable DSP features */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, 0);
+ }
+
+ /* program stream format */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_CL_SD_FORMAT,
+ 0xffff, hstream->format_val);
+
+ if (chip->quirks & SOF_INTEL_PROCEN_FMT_QUIRK) {
+ /* decouple host and link DMA, enable DSP features */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, mask);
+ }
+
+ /* program last valid index */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_LVI,
+ 0xffff, (hstream->frags - 1));
+
+ /* program BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
+ (u32)hstream->bdl.addr);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
+ upper_32_bits(hstream->bdl.addr));
+
+ /* enable position buffer, if needed */
+ if (bus->use_posbuf && bus->posbuf.addr &&
+ !(snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE)
+ & SOF_HDA_ADSP_DPLBASE_ENABLE)) {
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPUBASE,
+ upper_32_bits(bus->posbuf.addr));
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE,
+ (u32)bus->posbuf.addr |
+ SOF_HDA_ADSP_DPLBASE_ENABLE);
+ }
+
+ /* set interrupt enable bits */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ /* read FIFO size */
+ if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ hstream->fifo_size =
+ snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_CL_SD_FIFOSIZE);
+ hstream->fifo_size &= 0xffff;
+ hstream->fifo_size += 1;
+ } else {
+ hstream->fifo_size = 0;
+ }
+
+ return ret;
+}
+
+int hda_dsp_stream_hw_free(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *hext_stream = container_of(hstream,
+ struct hdac_ext_stream,
+ hstream);
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ u32 mask = 0x1 << hstream->index;
+ int ret;
+
+ ret = hda_dsp_stream_reset(sdev, hstream);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irq(&bus->reg_lock);
+ /* couple host and link DMA if link DMA channel is idle */
+ if (!hext_stream->link_locked)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
+ SOF_HDA_REG_PP_PPCTL, mask, 0);
+ spin_unlock_irq(&bus->reg_lock);
+
+ hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0);
+
+ hstream->substream = NULL;
+
+ return 0;
+}
+
+bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ bool ret = false;
+ u32 status;
+
+ /* The function can be called at irq thread, so use spin_lock_irq */
+ spin_lock_irq(&bus->reg_lock);
+
+ status = snd_hdac_chip_readl(bus, INTSTS);
+ trace_sof_intel_hda_dsp_check_stream_irq(sdev, status);
+
+ /* if Register inaccessible, ignore it.*/
+ if (status != 0xffffffff)
+ ret = true;
+
+ spin_unlock_irq(&bus->reg_lock);
+
+ return ret;
+}
+
+static void
+hda_dsp_compr_bytes_transferred(struct hdac_stream *hstream, int direction)
+{
+ u64 buffer_size = hstream->bufsize;
+ u64 prev_pos, pos, num_bytes;
+
+ div64_u64_rem(hstream->curr_pos, buffer_size, &prev_pos);
+ pos = hda_dsp_stream_get_position(hstream, direction, false);
+
+ if (pos < prev_pos)
+ num_bytes = (buffer_size - prev_pos) + pos;
+ else
+ num_bytes = pos - prev_pos;
+
+ hstream->curr_pos += num_bytes;
+}
+
+static bool hda_dsp_stream_check(struct hdac_bus *bus, u32 status)
+{
+ struct sof_intel_hda_dev *sof_hda = bus_to_sof_hda(bus);
+ struct hdac_stream *s;
+ bool active = false;
+ u32 sd_status;
+
+ list_for_each_entry(s, &bus->stream_list, list) {
+ if (status & BIT(s->index) && s->opened) {
+ sd_status = snd_hdac_stream_readb(s, SD_STS);
+
+ trace_sof_intel_hda_dsp_stream_status(bus->dev, s, sd_status);
+
+ snd_hdac_stream_writeb(s, SD_STS, sd_status);
+
+ active = true;
+ if ((!s->substream && !s->cstream) ||
+ !s->running ||
+ (sd_status & SOF_HDA_CL_DMA_SD_INT_COMPLETE) == 0)
+ continue;
+
+ /* Inform ALSA only in case not do that with IPC */
+ if (s->substream && sof_hda->no_ipc_position) {
+ snd_sof_pcm_period_elapsed(s->substream);
+ } else if (s->cstream) {
+ hda_dsp_compr_bytes_transferred(s, s->cstream->direction);
+ snd_compr_fragment_elapsed(s->cstream);
+ }
+ }
+ }
+
+ return active;
+}
+
+irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ u32 rirb_status;
+#endif
+ bool active;
+ u32 status;
+ int i;
+
+ /*
+ * Loop 10 times to handle missed interrupts caused by
+ * unsolicited responses from the codec
+ */
+ for (i = 0, active = true; i < 10 && active; i++) {
+ spin_lock_irq(&bus->reg_lock);
+
+ status = snd_hdac_chip_readl(bus, INTSTS);
+
+ /* check streams */
+ active = hda_dsp_stream_check(bus, status);
+
+ /* check and clear RIRB interrupt */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ if (status & AZX_INT_CTRL_EN) {
+ rirb_status = snd_hdac_chip_readb(bus, RIRBSTS);
+ if (rirb_status & RIRB_INT_MASK) {
+ /*
+ * Clearing the interrupt status here ensures
+ * that no interrupt gets masked after the RIRB
+ * wp is read in snd_hdac_bus_update_rirb.
+ */
+ snd_hdac_chip_writeb(bus, RIRBSTS,
+ RIRB_INT_MASK);
+ active = true;
+ if (rirb_status & RIRB_INT_RESPONSE)
+ snd_hdac_bus_update_rirb(bus);
+ }
+ }
+#endif
+ spin_unlock_irq(&bus->reg_lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int hda_dsp_stream_init(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_stream *hext_stream;
+ struct hdac_stream *hstream;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ struct sof_intel_hda_dev *sof_hda = bus_to_sof_hda(bus);
+ int sd_offset;
+ int i, num_playback, num_capture, num_total, ret;
+ u32 gcap;
+
+ gcap = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCAP);
+ dev_dbg(sdev->dev, "hda global caps = 0x%x\n", gcap);
+
+ /* get stream count from GCAP */
+ num_capture = (gcap >> 8) & 0x0f;
+ num_playback = (gcap >> 12) & 0x0f;
+ num_total = num_playback + num_capture;
+
+ dev_dbg(sdev->dev, "detected %d playback and %d capture streams\n",
+ num_playback, num_capture);
+
+ if (num_playback >= SOF_HDA_PLAYBACK_STREAMS) {
+ dev_err(sdev->dev, "error: too many playback streams %d\n",
+ num_playback);
+ return -EINVAL;
+ }
+
+ if (num_capture >= SOF_HDA_CAPTURE_STREAMS) {
+ dev_err(sdev->dev, "error: too many capture streams %d\n",
+ num_playback);
+ return -EINVAL;
+ }
+
+ /*
+ * mem alloc for the position buffer
+ * TODO: check position buffer update
+ */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ SOF_HDA_DPIB_ENTRY_SIZE * num_total,
+ &bus->posbuf);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: posbuffer dma alloc failed\n");
+ return -ENOMEM;
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* mem alloc for the CORB/RIRB ringbuffers */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ PAGE_SIZE, &bus->rb);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: RB alloc failed\n");
+ return -ENOMEM;
+ }
+#endif
+
+ /* create capture streams */
+ for (i = 0; i < num_capture; i++) {
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = devm_kzalloc(sdev->dev, sizeof(*hda_stream),
+ GFP_KERNEL);
+ if (!hda_stream)
+ return -ENOMEM;
+
+ hda_stream->sdev = sdev;
+
+ hext_stream = &hda_stream->hext_stream;
+
+ hext_stream->pphc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPHC_BASE + SOF_HDA_PPHC_INTERVAL * i;
+
+ hext_stream->pplc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPLC_BASE + SOF_HDA_PPLC_MULTI * num_total +
+ SOF_HDA_PPLC_INTERVAL * i;
+
+ /* do we support SPIB */
+ if (sdev->bar[HDA_DSP_SPIB_BAR]) {
+ hext_stream->spib_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_SPIB;
+
+ hext_stream->fifo_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_MAXFIFO;
+ }
+
+ hstream = &hext_stream->hstream;
+ hstream->bus = bus;
+ hstream->sd_int_sta_mask = 1 << i;
+ hstream->index = i;
+ sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ hstream->sd_addr = sdev->bar[HDA_DSP_HDA_BAR] + sd_offset;
+ hstream->stream_tag = i + 1;
+ hstream->opened = false;
+ hstream->running = false;
+ hstream->direction = SNDRV_PCM_STREAM_CAPTURE;
+
+ /* memory alloc for stream BDL */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ HDA_DSP_BDL_SIZE, &hstream->bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: stream bdl dma alloc failed\n");
+ return -ENOMEM;
+ }
+ hstream->posbuf = (__le32 *)(bus->posbuf.area +
+ (hstream->index) * 8);
+
+ list_add_tail(&hstream->list, &bus->stream_list);
+ }
+
+ /* create playback streams */
+ for (i = num_capture; i < num_total; i++) {
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = devm_kzalloc(sdev->dev, sizeof(*hda_stream),
+ GFP_KERNEL);
+ if (!hda_stream)
+ return -ENOMEM;
+
+ hda_stream->sdev = sdev;
+
+ hext_stream = &hda_stream->hext_stream;
+
+ /* we always have DSP support */
+ hext_stream->pphc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPHC_BASE + SOF_HDA_PPHC_INTERVAL * i;
+
+ hext_stream->pplc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPLC_BASE + SOF_HDA_PPLC_MULTI * num_total +
+ SOF_HDA_PPLC_INTERVAL * i;
+
+ /* do we support SPIB */
+ if (sdev->bar[HDA_DSP_SPIB_BAR]) {
+ hext_stream->spib_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_SPIB;
+
+ hext_stream->fifo_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_MAXFIFO;
+ }
+
+ hstream = &hext_stream->hstream;
+ hstream->bus = bus;
+ hstream->sd_int_sta_mask = 1 << i;
+ hstream->index = i;
+ sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ hstream->sd_addr = sdev->bar[HDA_DSP_HDA_BAR] + sd_offset;
+ hstream->stream_tag = i - num_capture + 1;
+ hstream->opened = false;
+ hstream->running = false;
+ hstream->direction = SNDRV_PCM_STREAM_PLAYBACK;
+
+ /* mem alloc for stream BDL */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ HDA_DSP_BDL_SIZE, &hstream->bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: stream bdl dma alloc failed\n");
+ return -ENOMEM;
+ }
+
+ hstream->posbuf = (__le32 *)(bus->posbuf.area +
+ (hstream->index) * 8);
+
+ list_add_tail(&hstream->list, &bus->stream_list);
+ }
+
+ /* store total stream count (playback + capture) from GCAP */
+ sof_hda->stream_max = num_total;
+
+ return 0;
+}
+
+void hda_dsp_stream_free(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *s, *_s;
+ struct hdac_ext_stream *hext_stream;
+ struct sof_intel_hda_stream *hda_stream;
+
+ /* free position buffer */
+ if (bus->posbuf.area)
+ snd_dma_free_pages(&bus->posbuf);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* free position buffer */
+ if (bus->rb.area)
+ snd_dma_free_pages(&bus->rb);
+#endif
+
+ list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
+ /* TODO: decouple */
+
+ /* free bdl buffer */
+ if (s->bdl.area)
+ snd_dma_free_pages(&s->bdl);
+ list_del(&s->list);
+ hext_stream = stream_to_hdac_ext_stream(s);
+ hda_stream = container_of(hext_stream, struct sof_intel_hda_stream,
+ hext_stream);
+ devm_kfree(sdev->dev, hda_stream);
+ }
+}
+
+snd_pcm_uframes_t hda_dsp_stream_get_position(struct hdac_stream *hstream,
+ int direction, bool can_sleep)
+{
+ struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream);
+ struct sof_intel_hda_stream *hda_stream = hstream_to_sof_hda_stream(hext_stream);
+ struct snd_sof_dev *sdev = hda_stream->sdev;
+ snd_pcm_uframes_t pos;
+
+ switch (sof_hda_position_quirk) {
+ case SOF_HDA_POSITION_QUIRK_USE_SKYLAKE_LEGACY:
+ /*
+ * This legacy code, inherited from the Skylake driver,
+ * mixes DPIB registers and DPIB DDR updates and
+ * does not seem to follow any known hardware recommendations.
+ * It's not clear e.g. why there is a different flow
+ * for capture and playback, the only information that matters is
+ * what traffic class is used, and on all SOF-enabled platforms
+ * only VC0 is supported so the work-around was likely not necessary
+ * and quite possibly wrong.
+ */
+
+ /* DPIB/posbuf position mode:
+ * For Playback, Use DPIB register from HDA space which
+ * reflects the actual data transferred.
+ * For Capture, Use the position buffer for pointer, as DPIB
+ * is not accurate enough, its update may be completed
+ * earlier than the data written to DDR.
+ */
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ AZX_REG_VS_SDXDPIB_XBASE +
+ (AZX_REG_VS_SDXDPIB_XINTERVAL *
+ hstream->index));
+ } else {
+ /*
+ * For capture stream, we need more workaround to fix the
+ * position incorrect issue:
+ *
+ * 1. Wait at least 20us before reading position buffer after
+ * the interrupt generated(IOC), to make sure position update
+ * happens on frame boundary i.e. 20.833uSec for 48KHz.
+ * 2. Perform a dummy Read to DPIB register to flush DMA
+ * position value.
+ * 3. Read the DMA Position from posbuf. Now the readback
+ * value should be >= period boundary.
+ */
+ if (can_sleep)
+ usleep_range(20, 21);
+
+ snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ AZX_REG_VS_SDXDPIB_XBASE +
+ (AZX_REG_VS_SDXDPIB_XINTERVAL *
+ hstream->index));
+ pos = snd_hdac_stream_get_pos_posbuf(hstream);
+ }
+ break;
+ case SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS:
+ /*
+ * In case VC1 traffic is disabled this is the recommended option
+ */
+ pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ AZX_REG_VS_SDXDPIB_XBASE +
+ (AZX_REG_VS_SDXDPIB_XINTERVAL *
+ hstream->index));
+ break;
+ case SOF_HDA_POSITION_QUIRK_USE_DPIB_DDR_UPDATE:
+ /*
+ * This is the recommended option when VC1 is enabled.
+ * While this isn't needed for SOF platforms it's added for
+ * consistency and debug.
+ */
+ pos = snd_hdac_stream_get_pos_posbuf(hstream);
+ break;
+ default:
+ dev_err_once(sdev->dev, "hda_position_quirk value %d not supported\n",
+ sof_hda_position_quirk);
+ pos = 0;
+ break;
+ }
+
+ if (pos >= hstream->bufsize)
+ pos = 0;
+
+ return pos;
+}
diff --git a/sound/soc/sof/intel/hda-trace.c b/sound/soc/sof/intel/hda-trace.c
new file mode 100644
index 000000000..cbb9bd777
--- /dev/null
+++ b/sound/soc/sof/intel/hda-trace.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/hdaudio_ext.h>
+#include "../ops.h"
+#include "hda.h"
+
+static int hda_dsp_trace_prepare(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_ext_stream *hext_stream = hda->dtrace_stream;
+ struct hdac_stream *hstream = &hext_stream->hstream;
+ int ret;
+
+ hstream->period_bytes = 0;/* initialize period_bytes */
+ hstream->bufsize = dmab->bytes;
+
+ ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
+
+ return ret;
+}
+
+int hda_dsp_trace_init(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
+ struct sof_ipc_dma_trace_params_ext *dtrace_params)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ int ret;
+
+ hda->dtrace_stream = hda_dsp_stream_get(sdev, SNDRV_PCM_STREAM_CAPTURE,
+ SOF_HDA_STREAM_DMI_L1_COMPATIBLE);
+
+ if (!hda->dtrace_stream) {
+ dev_err(sdev->dev,
+ "error: no available capture stream for DMA trace\n");
+ return -ENODEV;
+ }
+
+ dtrace_params->stream_tag = hda->dtrace_stream->hstream.stream_tag;
+
+ /*
+ * initialize capture stream, set BDL address and return corresponding
+ * stream tag which will be sent to the firmware by IPC message.
+ */
+ ret = hda_dsp_trace_prepare(sdev, dmab);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac trace init failed: %d\n", ret);
+ hda_dsp_stream_put(sdev, SNDRV_PCM_STREAM_CAPTURE,
+ dtrace_params->stream_tag);
+ hda->dtrace_stream = NULL;
+ dtrace_params->stream_tag = 0;
+ }
+
+ return ret;
+}
+
+int hda_dsp_trace_release(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_stream *hstream;
+
+ if (hda->dtrace_stream) {
+ hstream = &hda->dtrace_stream->hstream;
+ hda_dsp_stream_put(sdev,
+ SNDRV_PCM_STREAM_CAPTURE,
+ hstream->stream_tag);
+ hda->dtrace_stream = NULL;
+ return 0;
+ }
+
+ dev_dbg(sdev->dev, "DMA trace stream is not opened!\n");
+ return -ENODEV;
+}
+
+int hda_dsp_trace_trigger(struct snd_sof_dev *sdev, int cmd)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+
+ return hda_dsp_stream_trigger(sdev, hda->dtrace_stream, cmd);
+}
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
new file mode 100644
index 000000000..63764afdc
--- /dev/null
+++ b/sound/soc/sof/intel/hda.c
@@ -0,0 +1,1706 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
+#include <sound/intel-dsp-config.h>
+#include <sound/intel-nhlt.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include "../sof-audio.h"
+#include "../sof-pci-dev.h"
+#include "../ops.h"
+#include "hda.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sof_intel.h>
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+#include <sound/soc-acpi-intel-match.h>
+#endif
+
+/* platform specific devices */
+#include "shim.h"
+
+#define EXCEPT_MAX_HDR_SIZE 0x400
+#define HDA_EXT_ROM_STATUS_SIZE 8
+
+int hda_ctrl_dai_widget_setup(struct snd_soc_dapm_widget *w, unsigned int quirk_flags,
+ struct snd_sof_dai_config_data *data)
+{
+ struct snd_sof_widget *swidget = w->dobj.private;
+ struct snd_soc_component *component = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
+ struct snd_sof_dai *sof_dai = swidget->private;
+ int ret;
+
+ if (!sof_dai) {
+ dev_err(sdev->dev, "%s: No DAI for DAI widget %s\n", __func__, w->name);
+ return -EINVAL;
+ }
+
+ if (tplg_ops->dai_config) {
+ unsigned int flags;
+
+ /* set HW_PARAMS flag along with quirks */
+ flags = SOF_DAI_CONFIG_FLAGS_HW_PARAMS |
+ quirk_flags << SOF_DAI_CONFIG_FLAGS_QUIRK_SHIFT;
+
+ ret = tplg_ops->dai_config(sdev, swidget, flags, data);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: DAI config failed for widget %s\n", __func__,
+ w->name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int hda_ctrl_dai_widget_free(struct snd_soc_dapm_widget *w, unsigned int quirk_flags,
+ struct snd_sof_dai_config_data *data)
+{
+ struct snd_sof_widget *swidget = w->dobj.private;
+ struct snd_soc_component *component = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
+ struct snd_sof_dai *sof_dai = swidget->private;
+
+ if (!sof_dai) {
+ dev_err(sdev->dev, "%s: No DAI for BE DAI widget %s\n", __func__, w->name);
+ return -EINVAL;
+ }
+
+ if (tplg_ops->dai_config) {
+ unsigned int flags;
+ int ret;
+
+ /* set HW_FREE flag along with any quirks */
+ flags = SOF_DAI_CONFIG_FLAGS_HW_FREE |
+ quirk_flags << SOF_DAI_CONFIG_FLAGS_QUIRK_SHIFT;
+
+ ret = tplg_ops->dai_config(sdev, swidget, flags, data);
+ if (ret < 0)
+ dev_err(sdev->dev, "%s: DAI config failed for widget '%s'\n", __func__,
+ w->name);
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+
+/*
+ * The default for SoundWire clock stop quirks is to power gate the IP
+ * and do a Bus Reset, this will need to be modified when the DSP
+ * needs to remain in D0i3 so that the Master does not lose context
+ * and enumeration is not required on clock restart
+ */
+static int sdw_clock_stop_quirks = SDW_INTEL_CLK_STOP_BUS_RESET;
+module_param(sdw_clock_stop_quirks, int, 0444);
+MODULE_PARM_DESC(sdw_clock_stop_quirks, "SOF SoundWire clock stop quirks");
+
+static int sdw_params_stream(struct device *dev,
+ struct sdw_intel_stream_params_data *params_data)
+{
+ struct snd_soc_dai *d = params_data->dai;
+ struct snd_sof_dai_config_data data;
+ struct snd_soc_dapm_widget *w;
+
+ w = snd_soc_dai_get_widget(d, params_data->stream);
+ data.dai_index = (params_data->link_id << 8) | d->id;
+ data.dai_data = params_data->alh_stream_id;
+
+ return hda_ctrl_dai_widget_setup(w, SOF_DAI_CONFIG_FLAGS_NONE, &data);
+}
+
+static int sdw_free_stream(struct device *dev,
+ struct sdw_intel_stream_free_data *free_data)
+{
+ struct snd_soc_dai *d = free_data->dai;
+ struct snd_sof_dai_config_data data;
+ struct snd_soc_dapm_widget *w;
+
+ w = snd_soc_dai_get_widget(d, free_data->stream);
+ data.dai_index = (free_data->link_id << 8) | d->id;
+
+ /* send invalid stream_id */
+ data.dai_data = 0xFFFF;
+
+ return hda_ctrl_dai_widget_free(w, SOF_DAI_CONFIG_FLAGS_NONE, &data);
+}
+
+struct sdw_intel_ops sdw_callback = {
+ .params_stream = sdw_params_stream,
+ .free_stream = sdw_free_stream,
+};
+
+void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ sdw_intel_enable_irq(sdev->bar[HDA_DSP_BAR], enable);
+}
+
+static int hda_sdw_acpi_scan(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ acpi_handle handle;
+ int ret;
+
+ handle = ACPI_HANDLE(sdev->dev);
+
+ /* save ACPI info for the probe step */
+ hdev = sdev->pdata->hw_pdata;
+
+ ret = sdw_intel_acpi_scan(handle, &hdev->info);
+ if (ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hda_sdw_probe(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ struct sdw_intel_res res;
+ void *sdw;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ memset(&res, 0, sizeof(res));
+
+ res.mmio_base = sdev->bar[HDA_DSP_BAR];
+ res.shim_base = hdev->desc->sdw_shim_base;
+ res.alh_base = hdev->desc->sdw_alh_base;
+ res.irq = sdev->ipc_irq;
+ res.handle = hdev->info.handle;
+ res.parent = sdev->dev;
+ res.ops = &sdw_callback;
+ res.dev = sdev->dev;
+ res.clock_stop_quirks = sdw_clock_stop_quirks;
+
+ /*
+ * ops and arg fields are not populated for now,
+ * they will be needed when the DAI callbacks are
+ * provided
+ */
+
+ /* we could filter links here if needed, e.g for quirks */
+ res.count = hdev->info.count;
+ res.link_mask = hdev->info.link_mask;
+
+ sdw = sdw_intel_probe(&res);
+ if (!sdw) {
+ dev_err(sdev->dev, "error: SoundWire probe failed\n");
+ return -EINVAL;
+ }
+
+ /* save context */
+ hdev->sdw = sdw;
+
+ return 0;
+}
+
+int hda_sdw_startup(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ struct snd_sof_pdata *pdata = sdev->pdata;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ if (!hdev->sdw)
+ return 0;
+
+ if (pdata->machine && !pdata->machine->mach_params.link_mask)
+ return 0;
+
+ return sdw_intel_startup(hdev->sdw);
+}
+
+static int hda_sdw_exit(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ hda_sdw_int_enable(sdev, false);
+
+ if (hdev->sdw)
+ sdw_intel_exit(hdev->sdw);
+ hdev->sdw = NULL;
+
+ return 0;
+}
+
+bool hda_common_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ bool ret = false;
+ u32 irq_status;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ if (!hdev->sdw)
+ return ret;
+
+ /* store status */
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS2);
+
+ /* invalid message ? */
+ if (irq_status == 0xffffffff)
+ goto out;
+
+ /* SDW message ? */
+ if (irq_status & HDA_DSP_REG_ADSPIS2_SNDW)
+ ret = true;
+
+out:
+ return ret;
+}
+
+static bool hda_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ const struct sof_intel_dsp_desc *chip;
+
+ chip = get_chip_info(sdev->pdata);
+ if (chip && chip->check_sdw_irq)
+ return chip->check_sdw_irq(sdev);
+
+ return false;
+}
+
+static irqreturn_t hda_dsp_sdw_thread(int irq, void *context)
+{
+ return sdw_intel_thread(irq, context);
+}
+
+static bool hda_sdw_check_wakeen_irq(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+ if (hdev->sdw &&
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ hdev->desc->sdw_shim_base + SDW_SHIM_WAKESTS))
+ return true;
+
+ return false;
+}
+
+void hda_sdw_process_wakeen(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+ if (!hdev->sdw)
+ return;
+
+ sdw_intel_process_wakeen_event(hdev->sdw);
+}
+
+#else /* IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE) */
+static inline int hda_sdw_acpi_scan(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline int hda_sdw_probe(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline int hda_sdw_exit(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline bool hda_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ return false;
+}
+
+static inline irqreturn_t hda_dsp_sdw_thread(int irq, void *context)
+{
+ return IRQ_HANDLED;
+}
+
+static inline bool hda_sdw_check_wakeen_irq(struct snd_sof_dev *sdev)
+{
+ return false;
+}
+
+#endif /* IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE) */
+
+/*
+ * Debug
+ */
+
+struct hda_dsp_msg_code {
+ u32 code;
+ const char *text;
+};
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
+static bool hda_use_msi = true;
+module_param_named(use_msi, hda_use_msi, bool, 0444);
+MODULE_PARM_DESC(use_msi, "SOF HDA use PCI MSI mode");
+#else
+#define hda_use_msi (1)
+#endif
+
+int sof_hda_position_quirk = SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS;
+module_param_named(position_quirk, sof_hda_position_quirk, int, 0444);
+MODULE_PARM_DESC(position_quirk, "SOF HDaudio position quirk");
+
+static char *hda_model;
+module_param(hda_model, charp, 0444);
+MODULE_PARM_DESC(hda_model, "Use the given HDA board model.");
+
+static int dmic_num_override = -1;
+module_param_named(dmic_num, dmic_num_override, int, 0444);
+MODULE_PARM_DESC(dmic_num, "SOF HDA DMIC number");
+
+static int mclk_id_override = -1;
+module_param_named(mclk_id, mclk_id_override, int, 0444);
+MODULE_PARM_DESC(mclk_id, "SOF SSP mclk_id");
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+static bool hda_codec_use_common_hdmi = IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI);
+module_param_named(use_common_hdmi, hda_codec_use_common_hdmi, bool, 0444);
+MODULE_PARM_DESC(use_common_hdmi, "SOF HDA use common HDMI codec driver");
+#endif
+
+static const struct hda_dsp_msg_code hda_dsp_rom_fw_error_texts[] = {
+ {HDA_DSP_ROM_CSE_ERROR, "error: cse error"},
+ {HDA_DSP_ROM_CSE_WRONG_RESPONSE, "error: cse wrong response"},
+ {HDA_DSP_ROM_IMR_TO_SMALL, "error: IMR too small"},
+ {HDA_DSP_ROM_BASE_FW_NOT_FOUND, "error: base fw not found"},
+ {HDA_DSP_ROM_CSE_VALIDATION_FAILED, "error: signature verification failed"},
+ {HDA_DSP_ROM_IPC_FATAL_ERROR, "error: ipc fatal error"},
+ {HDA_DSP_ROM_L2_CACHE_ERROR, "error: L2 cache error"},
+ {HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL, "error: load offset too small"},
+ {HDA_DSP_ROM_API_PTR_INVALID, "error: API ptr invalid"},
+ {HDA_DSP_ROM_BASEFW_INCOMPAT, "error: base fw incompatible"},
+ {HDA_DSP_ROM_UNHANDLED_INTERRUPT, "error: unhandled interrupt"},
+ {HDA_DSP_ROM_MEMORY_HOLE_ECC, "error: ECC memory hole"},
+ {HDA_DSP_ROM_KERNEL_EXCEPTION, "error: kernel exception"},
+ {HDA_DSP_ROM_USER_EXCEPTION, "error: user exception"},
+ {HDA_DSP_ROM_UNEXPECTED_RESET, "error: unexpected reset"},
+ {HDA_DSP_ROM_NULL_FW_ENTRY, "error: null FW entry point"},
+};
+
+#define FSR_ROM_STATE_ENTRY(state) {FSR_STATE_ROM_##state, #state}
+static const struct hda_dsp_msg_code fsr_rom_state_names[] = {
+ FSR_ROM_STATE_ENTRY(INIT),
+ FSR_ROM_STATE_ENTRY(INIT_DONE),
+ FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED),
+ FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED),
+ FSR_ROM_STATE_ENTRY(FW_FW_LOADED),
+ FSR_ROM_STATE_ENTRY(FW_ENTERED),
+ FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK),
+ FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET),
+ FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT),
+ FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT_DONE),
+ /* CSE states */
+ FSR_ROM_STATE_ENTRY(CSE_IMR_REQUEST),
+ FSR_ROM_STATE_ENTRY(CSE_IMR_GRANTED),
+ FSR_ROM_STATE_ENTRY(CSE_VALIDATE_IMAGE_REQUEST),
+ FSR_ROM_STATE_ENTRY(CSE_IMAGE_VALIDATED),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_IFACE_INIT),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_RESET_PHASE_1),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL_ENTRY),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_DOWN),
+};
+
+#define FSR_BRINGUP_STATE_ENTRY(state) {FSR_STATE_BRINGUP_##state, #state}
+static const struct hda_dsp_msg_code fsr_bringup_state_names[] = {
+ FSR_BRINGUP_STATE_ENTRY(INIT),
+ FSR_BRINGUP_STATE_ENTRY(INIT_DONE),
+ FSR_BRINGUP_STATE_ENTRY(HPSRAM_LOAD),
+ FSR_BRINGUP_STATE_ENTRY(UNPACK_START),
+ FSR_BRINGUP_STATE_ENTRY(IMR_RESTORE),
+ FSR_BRINGUP_STATE_ENTRY(FW_ENTERED),
+};
+
+#define FSR_WAIT_STATE_ENTRY(state) {FSR_WAIT_FOR_##state, #state}
+static const struct hda_dsp_msg_code fsr_wait_state_names[] = {
+ FSR_WAIT_STATE_ENTRY(IPC_BUSY),
+ FSR_WAIT_STATE_ENTRY(IPC_DONE),
+ FSR_WAIT_STATE_ENTRY(CACHE_INVALIDATION),
+ FSR_WAIT_STATE_ENTRY(LP_SRAM_OFF),
+ FSR_WAIT_STATE_ENTRY(DMA_BUFFER_FULL),
+ FSR_WAIT_STATE_ENTRY(CSE_CSR),
+};
+
+#define FSR_MODULE_NAME_ENTRY(mod) [FSR_MOD_##mod] = #mod
+static const char * const fsr_module_names[] = {
+ FSR_MODULE_NAME_ENTRY(ROM),
+ FSR_MODULE_NAME_ENTRY(ROM_BYP),
+ FSR_MODULE_NAME_ENTRY(BASE_FW),
+ FSR_MODULE_NAME_ENTRY(LP_BOOT),
+ FSR_MODULE_NAME_ENTRY(BRNGUP),
+ FSR_MODULE_NAME_ENTRY(ROM_EXT),
+};
+
+static const char *
+hda_dsp_get_state_text(u32 code, const struct hda_dsp_msg_code *msg_code,
+ size_t array_size)
+{
+ int i;
+
+ for (i = 0; i < array_size; i++) {
+ if (code == msg_code[i].code)
+ return msg_code[i].text;
+ }
+
+ return NULL;
+}
+
+static void hda_dsp_get_state(struct snd_sof_dev *sdev, const char *level)
+{
+ const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
+ const char *state_text, *error_text, *module_text;
+ u32 fsr, state, wait_state, module, error_code;
+
+ fsr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg);
+ state = FSR_TO_STATE_CODE(fsr);
+ wait_state = FSR_TO_WAIT_STATE_CODE(fsr);
+ module = FSR_TO_MODULE_CODE(fsr);
+
+ if (module > FSR_MOD_ROM_EXT)
+ module_text = "unknown";
+ else
+ module_text = fsr_module_names[module];
+
+ if (module == FSR_MOD_BRNGUP)
+ state_text = hda_dsp_get_state_text(state, fsr_bringup_state_names,
+ ARRAY_SIZE(fsr_bringup_state_names));
+ else
+ state_text = hda_dsp_get_state_text(state, fsr_rom_state_names,
+ ARRAY_SIZE(fsr_rom_state_names));
+
+ /* not for us, must be generic sof message */
+ if (!state_text) {
+ dev_printk(level, sdev->dev, "%#010x: unknown ROM status value\n", fsr);
+ return;
+ }
+
+ if (wait_state) {
+ const char *wait_state_text;
+
+ wait_state_text = hda_dsp_get_state_text(wait_state, fsr_wait_state_names,
+ ARRAY_SIZE(fsr_wait_state_names));
+ if (!wait_state_text)
+ wait_state_text = "unknown";
+
+ dev_printk(level, sdev->dev,
+ "%#010x: module: %s, state: %s, waiting for: %s, %s\n",
+ fsr, module_text, state_text, wait_state_text,
+ fsr & FSR_HALTED ? "not running" : "running");
+ } else {
+ dev_printk(level, sdev->dev, "%#010x: module: %s, state: %s, %s\n",
+ fsr, module_text, state_text,
+ fsr & FSR_HALTED ? "not running" : "running");
+ }
+
+ error_code = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + 4);
+ if (!error_code)
+ return;
+
+ error_text = hda_dsp_get_state_text(error_code, hda_dsp_rom_fw_error_texts,
+ ARRAY_SIZE(hda_dsp_rom_fw_error_texts));
+ if (!error_text)
+ error_text = "unknown";
+
+ if (state == FSR_STATE_FW_ENTERED)
+ dev_printk(level, sdev->dev, "status code: %#x (%s)\n", error_code,
+ error_text);
+ else
+ dev_printk(level, sdev->dev, "error code: %#x (%s)\n", error_code,
+ error_text);
+}
+
+static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read registers */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_block_read(sdev, sdev->mmio_bar, offset,
+ panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_block_read(sdev, sdev->mmio_bar, offset, stack,
+ stack_words * sizeof(u32));
+}
+
+/* dump the first 8 dwords representing the extended ROM status */
+static void hda_dsp_dump_ext_rom_status(struct snd_sof_dev *sdev, const char *level,
+ u32 flags)
+{
+ const struct sof_intel_dsp_desc *chip;
+ char msg[128];
+ int len = 0;
+ u32 value;
+ int i;
+
+ chip = get_chip_info(sdev->pdata);
+ for (i = 0; i < HDA_EXT_ROM_STATUS_SIZE; i++) {
+ value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + i * 0x4);
+ len += scnprintf(msg + len, sizeof(msg) - len, " 0x%x", value);
+ }
+
+ dev_printk(level, sdev->dev, "extended rom status: %s", msg);
+
+}
+
+void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[HDA_DSP_STACK_DUMP_SIZE];
+
+ /* print ROM/FW status */
+ hda_dsp_get_state(sdev, level);
+
+ /* The firmware register dump only available with IPC3 */
+ if (flags & SOF_DBG_DUMP_REGS && sdev->pdata->ipc_type == SOF_IPC) {
+ u32 status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_STATUS);
+ u32 panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP);
+
+ hda_dsp_get_registers(sdev, &xoops, &panic_info, stack,
+ HDA_DSP_STACK_DUMP_SIZE);
+ sof_print_oops_and_stack(sdev, level, status, panic, &xoops,
+ &panic_info, stack, HDA_DSP_STACK_DUMP_SIZE);
+ } else {
+ hda_dsp_dump_ext_rom_status(sdev, level, flags);
+ }
+}
+
+static bool hda_check_ipc_irq(struct snd_sof_dev *sdev)
+{
+ const struct sof_intel_dsp_desc *chip;
+
+ chip = get_chip_info(sdev->pdata);
+ if (chip && chip->check_ipc_irq)
+ return chip->check_ipc_irq(sdev);
+
+ return false;
+}
+
+void hda_ipc_irq_dump(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ u32 adspis;
+ u32 intsts;
+ u32 intctl;
+ u32 ppsts;
+ u8 rirbsts;
+
+ /* read key IRQ stats and config registers */
+ adspis = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS);
+ intsts = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS);
+ intctl = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL);
+ ppsts = snd_sof_dsp_read(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPSTS);
+ rirbsts = snd_hdac_chip_readb(bus, RIRBSTS);
+
+ dev_err(sdev->dev, "hda irq intsts 0x%8.8x intlctl 0x%8.8x rirb %2.2x\n",
+ intsts, intctl, rirbsts);
+ dev_err(sdev->dev, "dsp irq ppsts 0x%8.8x adspis 0x%8.8x\n", ppsts, adspis);
+}
+
+void hda_ipc_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcie;
+ u32 hipct;
+ u32 hipcctl;
+
+ hda_ipc_irq_dump(sdev);
+
+ /* read IPC status */
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
+ hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev, "host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
+ hipcie, hipct, hipcctl);
+}
+
+void hda_ipc4_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipci, hipcie, hipct, hipcte, hipcctl;
+
+ hda_ipc_irq_dump(sdev);
+
+ hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI);
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
+ hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
+ hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCTE);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev, "Host IPC initiator: %#x|%#x, target: %#x|%#x, ctl: %#x\n",
+ hipci, hipcie, hipct, hipcte, hipcctl);
+}
+
+static int hda_init(struct snd_sof_dev *sdev)
+{
+ struct hda_bus *hbus;
+ struct hdac_bus *bus;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ int ret;
+
+ hbus = sof_to_hbus(sdev);
+ bus = sof_to_bus(sdev);
+
+ /* HDA bus init */
+ sof_hda_bus_init(bus, &pci->dev);
+
+ if (sof_hda_position_quirk == SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS)
+ bus->use_posbuf = 0;
+ else
+ bus->use_posbuf = 1;
+ bus->bdl_pos_adj = 0;
+ bus->sync_write = 1;
+
+ mutex_init(&hbus->prepare_mutex);
+ hbus->pci = pci;
+ hbus->mixer_assigned = -1;
+ hbus->modelname = hda_model;
+
+ /* initialise hdac bus */
+ bus->addr = pci_resource_start(pci, 0);
+ bus->remap_addr = pci_ioremap_bar(pci, 0);
+ if (!bus->remap_addr) {
+ dev_err(bus->dev, "error: ioremap error\n");
+ return -ENXIO;
+ }
+
+ /* HDA base */
+ sdev->bar[HDA_DSP_HDA_BAR] = bus->remap_addr;
+
+ /* init i915 and HDMI codecs */
+ ret = hda_codec_i915_init(sdev);
+ if (ret < 0)
+ dev_warn(sdev->dev, "init of i915 and HDMI codec failed\n");
+
+ /* get controller capabilities */
+ ret = hda_dsp_ctrl_get_caps(sdev);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: get caps error\n");
+
+ return ret;
+}
+
+static int check_dmic_num(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct nhlt_acpi_table *nhlt;
+ int dmic_num = 0;
+
+ nhlt = hdev->nhlt;
+ if (nhlt)
+ dmic_num = intel_nhlt_get_dmic_geo(sdev->dev, nhlt);
+
+ /* allow for module parameter override */
+ if (dmic_num_override != -1) {
+ dev_dbg(sdev->dev,
+ "overriding DMICs detected in NHLT tables %d by kernel param %d\n",
+ dmic_num, dmic_num_override);
+ dmic_num = dmic_num_override;
+ }
+
+ if (dmic_num < 0 || dmic_num > 4) {
+ dev_dbg(sdev->dev, "invalid dmic_number %d\n", dmic_num);
+ dmic_num = 0;
+ }
+
+ return dmic_num;
+}
+
+static int check_nhlt_ssp_mask(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct nhlt_acpi_table *nhlt;
+ int ssp_mask = 0;
+
+ nhlt = hdev->nhlt;
+ if (!nhlt)
+ return ssp_mask;
+
+ if (intel_nhlt_has_endpoint_type(nhlt, NHLT_LINK_SSP)) {
+ ssp_mask = intel_nhlt_ssp_endpoint_mask(nhlt, NHLT_DEVICE_I2S);
+ if (ssp_mask)
+ dev_info(sdev->dev, "NHLT_DEVICE_I2S detected, ssp_mask %#x\n", ssp_mask);
+ }
+
+ return ssp_mask;
+}
+
+static int check_nhlt_ssp_mclk_mask(struct snd_sof_dev *sdev, int ssp_num)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct nhlt_acpi_table *nhlt;
+
+ nhlt = hdev->nhlt;
+ if (!nhlt)
+ return 0;
+
+ return intel_nhlt_ssp_mclk_mask(nhlt, ssp_num);
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) || IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+
+static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
+ const char *sof_tplg_filename,
+ const char *idisp_str,
+ const char *dmic_str)
+{
+ const char *tplg_filename = NULL;
+ char *filename, *tmp;
+ const char *split_ext;
+
+ filename = kstrdup(sof_tplg_filename, GFP_KERNEL);
+ if (!filename)
+ return NULL;
+
+ /* this assumes a .tplg extension */
+ tmp = filename;
+ split_ext = strsep(&tmp, ".");
+ if (split_ext)
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s%s%s.tplg",
+ split_ext, idisp_str, dmic_str);
+ kfree(filename);
+
+ return tplg_filename;
+}
+
+static int dmic_detect_topology_fixup(struct snd_sof_dev *sdev,
+ const char **tplg_filename,
+ const char *idisp_str,
+ int *dmic_found,
+ bool tplg_fixup)
+{
+ const char *dmic_str;
+ int dmic_num;
+
+ /* first check for DMICs (using NHLT or module parameter) */
+ dmic_num = check_dmic_num(sdev);
+
+ switch (dmic_num) {
+ case 1:
+ dmic_str = "-1ch";
+ break;
+ case 2:
+ dmic_str = "-2ch";
+ break;
+ case 3:
+ dmic_str = "-3ch";
+ break;
+ case 4:
+ dmic_str = "-4ch";
+ break;
+ default:
+ dmic_num = 0;
+ dmic_str = "";
+ break;
+ }
+
+ if (tplg_fixup) {
+ const char *default_tplg_filename = *tplg_filename;
+ const char *fixed_tplg_filename;
+
+ fixed_tplg_filename = fixup_tplg_name(sdev, default_tplg_filename,
+ idisp_str, dmic_str);
+ if (!fixed_tplg_filename)
+ return -ENOMEM;
+ *tplg_filename = fixed_tplg_filename;
+ }
+
+ dev_info(sdev->dev, "DMICs detected in NHLT tables: %d\n", dmic_num);
+ *dmic_found = dmic_num;
+
+ return 0;
+}
+#endif
+
+static int hda_init_caps(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct snd_sof_pdata *pdata = sdev->pdata;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_ext_link *hlink;
+#endif
+ struct sof_intel_hda_dev *hdev = pdata->hw_pdata;
+ u32 link_mask;
+ int ret = 0;
+
+ /* check if dsp is there */
+ if (bus->ppcap)
+ dev_dbg(sdev->dev, "PP capability, will probe DSP later.\n");
+
+ /* Init HDA controller after i915 init */
+ ret = hda_dsp_ctrl_init_chip(sdev, true);
+ if (ret < 0) {
+ dev_err(bus->dev, "error: init chip failed with ret: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* scan SoundWire capabilities exposed by DSDT */
+ ret = hda_sdw_acpi_scan(sdev);
+ if (ret < 0) {
+ dev_dbg(sdev->dev, "skipping SoundWire, not detected with ACPI scan\n");
+ goto skip_soundwire;
+ }
+
+ link_mask = hdev->info.link_mask;
+ if (!link_mask) {
+ dev_dbg(sdev->dev, "skipping SoundWire, no links enabled\n");
+ goto skip_soundwire;
+ }
+
+ /*
+ * probe/allocate SoundWire resources.
+ * The hardware configuration takes place in hda_sdw_startup
+ * after power rails are enabled.
+ * It's entirely possible to have a mix of I2S/DMIC/SoundWire
+ * devices, so we allocate the resources in all cases.
+ */
+ ret = hda_sdw_probe(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: SoundWire probe error\n");
+ return ret;
+ }
+
+skip_soundwire:
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ if (bus->mlcap)
+ snd_hdac_ext_bus_get_ml_capabilities(bus);
+
+ /* create codec instances */
+ hda_codec_probe_bus(sdev, hda_codec_use_common_hdmi);
+
+ if (!HDA_IDISP_CODEC(bus->codec_mask))
+ hda_codec_i915_display_power(sdev, false);
+
+ /*
+ * we are done probing so decrement link counts
+ */
+ list_for_each_entry(hlink, &bus->hlink_list, list)
+ snd_hdac_ext_bus_link_put(bus, hlink);
+#endif
+ return 0;
+}
+
+static void hda_check_for_state_change(struct snd_sof_dev *sdev)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ unsigned int codec_mask;
+
+ codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+ if (codec_mask) {
+ hda_codec_jack_check(sdev);
+ snd_hdac_chip_writew(bus, STATESTS, codec_mask);
+ }
+#endif
+}
+
+static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+
+ /*
+ * Get global interrupt status. It includes all hardware interrupt
+ * sources in the Intel HD Audio controller.
+ */
+ if (snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS) &
+ SOF_HDA_INTSTS_GIS) {
+
+ /* disable GIE interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_INTCTL,
+ SOF_HDA_INT_GLOBAL_EN,
+ 0);
+
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ /* deal with streams and controller first */
+ if (hda_dsp_check_stream_irq(sdev)) {
+ trace_sof_intel_hda_irq(sdev, "stream");
+ hda_dsp_stream_threaded_handler(irq, sdev);
+ }
+
+ if (hda_check_ipc_irq(sdev)) {
+ trace_sof_intel_hda_irq(sdev, "ipc");
+ sof_ops(sdev)->irq_thread(irq, sdev);
+ }
+
+ if (hda_dsp_check_sdw_irq(sdev)) {
+ trace_sof_intel_hda_irq(sdev, "sdw");
+ hda_dsp_sdw_thread(irq, hdev->sdw);
+ }
+
+ if (hda_sdw_check_wakeen_irq(sdev)) {
+ trace_sof_intel_hda_irq(sdev, "wakeen");
+ hda_sdw_process_wakeen(sdev);
+ }
+
+ hda_check_for_state_change(sdev);
+
+ /* enable GIE interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_INTCTL,
+ SOF_HDA_INT_GLOBAL_EN,
+ SOF_HDA_INT_GLOBAL_EN);
+
+ return IRQ_HANDLED;
+}
+
+int hda_dsp_probe(struct snd_sof_dev *sdev)
+{
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ struct sof_intel_hda_dev *hdev;
+ struct hdac_bus *bus;
+ const struct sof_intel_dsp_desc *chip;
+ int ret = 0;
+
+ /*
+ * detect DSP by checking class/subclass/prog-id information
+ * class=04 subclass 03 prog-if 00: no DSP, legacy driver is required
+ * class=04 subclass 01 prog-if 00: DSP is present
+ * (and may be required e.g. for DMIC or SSP support)
+ * class=04 subclass 03 prog-if 80: either of DSP or legacy mode works
+ */
+ if (pci->class == 0x040300) {
+ dev_err(sdev->dev, "error: the DSP is not enabled on this platform, aborting probe\n");
+ return -ENODEV;
+ } else if (pci->class != 0x040100 && pci->class != 0x040380) {
+ dev_err(sdev->dev, "error: unknown PCI class/subclass/prog-if 0x%06x found, aborting probe\n", pci->class);
+ return -ENODEV;
+ }
+ dev_info(sdev->dev, "DSP detected with PCI class/subclass/prog-if 0x%06x\n", pci->class);
+
+ chip = get_chip_info(sdev->pdata);
+ if (!chip) {
+ dev_err(sdev->dev, "error: no such device supported, chip id:%x\n",
+ pci->device);
+ ret = -EIO;
+ goto err;
+ }
+
+ sdev->num_cores = chip->cores_num;
+
+ hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+ sdev->pdata->hw_pdata = hdev;
+ hdev->desc = chip;
+
+ hdev->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec",
+ PLATFORM_DEVID_NONE,
+ NULL, 0);
+ if (IS_ERR(hdev->dmic_dev)) {
+ dev_err(sdev->dev, "error: failed to create DMIC device\n");
+ return PTR_ERR(hdev->dmic_dev);
+ }
+
+ /*
+ * use position update IPC if either it is forced
+ * or we don't have other choice
+ */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_FORCE_IPC_POSITION)
+ hdev->no_ipc_position = 0;
+#else
+ hdev->no_ipc_position = sof_ops(sdev)->pcm_pointer ? 1 : 0;
+#endif
+
+ /* set up HDA base */
+ bus = sof_to_bus(sdev);
+ ret = hda_init(sdev);
+ if (ret < 0)
+ goto hdac_bus_unmap;
+
+ /* DSP base */
+ sdev->bar[HDA_DSP_BAR] = pci_ioremap_bar(pci, HDA_DSP_BAR);
+ if (!sdev->bar[HDA_DSP_BAR]) {
+ dev_err(sdev->dev, "error: ioremap error\n");
+ ret = -ENXIO;
+ goto hdac_bus_unmap;
+ }
+
+ sdev->mmio_bar = HDA_DSP_BAR;
+ sdev->mailbox_bar = HDA_DSP_BAR;
+
+ /* allow 64bit DMA address if supported by H/W */
+ if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(64))) {
+ dev_dbg(sdev->dev, "DMA mask is 32 bit\n");
+ dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32));
+ }
+ dma_set_max_seg_size(&pci->dev, UINT_MAX);
+
+ /* init streams */
+ ret = hda_dsp_stream_init(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to init streams\n");
+ /*
+ * not all errors are due to memory issues, but trying
+ * to free everything does not harm
+ */
+ goto free_streams;
+ }
+
+ /*
+ * register our IRQ
+ * let's try to enable msi firstly
+ * if it fails, use legacy interrupt mode
+ * TODO: support msi multiple vectors
+ */
+ if (hda_use_msi && pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI) > 0) {
+ dev_info(sdev->dev, "use msi interrupt mode\n");
+ sdev->ipc_irq = pci_irq_vector(pci, 0);
+ /* initialised to "false" by kzalloc() */
+ sdev->msi_enabled = true;
+ }
+
+ if (!sdev->msi_enabled) {
+ dev_info(sdev->dev, "use legacy interrupt mode\n");
+ /*
+ * in IO-APIC mode, hda->irq and ipc_irq are using the same
+ * irq number of pci->irq
+ */
+ sdev->ipc_irq = pci->irq;
+ }
+
+ dev_dbg(sdev->dev, "using IPC IRQ %d\n", sdev->ipc_irq);
+ ret = request_threaded_irq(sdev->ipc_irq, hda_dsp_interrupt_handler,
+ hda_dsp_interrupt_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IPC IRQ %d\n",
+ sdev->ipc_irq);
+ goto free_irq_vector;
+ }
+
+ pci_set_master(pci);
+ synchronize_irq(pci->irq);
+
+ /*
+ * clear TCSEL to clear playback on some HD Audio
+ * codecs. PCI TCSEL is defined in the Intel manuals.
+ */
+ snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
+
+ /* init HDA capabilities */
+ ret = hda_init_caps(sdev);
+ if (ret < 0)
+ goto free_ipc_irq;
+
+ /* enable ppcap interrupt */
+ hda_dsp_ctrl_ppcap_enable(sdev, true);
+ hda_dsp_ctrl_ppcap_int_enable(sdev, true);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = HDA_DSP_MBOX_UPLINK_OFFSET;
+
+ INIT_DELAYED_WORK(&hdev->d0i3_work, hda_dsp_d0i3_work);
+
+ init_waitqueue_head(&hdev->waitq);
+
+ hdev->nhlt = intel_nhlt_init(sdev->dev);
+
+ return 0;
+
+free_ipc_irq:
+ free_irq(sdev->ipc_irq, sdev);
+free_irq_vector:
+ if (sdev->msi_enabled)
+ pci_free_irq_vectors(pci);
+free_streams:
+ hda_dsp_stream_free(sdev);
+/* dsp_unmap: not currently used */
+ iounmap(sdev->bar[HDA_DSP_BAR]);
+hdac_bus_unmap:
+ platform_device_unregister(hdev->dmic_dev);
+ iounmap(bus->remap_addr);
+ hda_codec_i915_exit(sdev);
+err:
+ return ret;
+}
+
+int hda_dsp_remove(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ struct nhlt_acpi_table *nhlt = hda->nhlt;
+
+ if (nhlt)
+ intel_nhlt_free(nhlt);
+
+ /* cancel any attempt for DSP D0I3 */
+ cancel_delayed_work_sync(&hda->d0i3_work);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* codec removal, invoke bus_device_remove */
+ snd_hdac_ext_bus_device_remove(bus);
+#endif
+
+ hda_sdw_exit(sdev);
+
+ if (!IS_ERR_OR_NULL(hda->dmic_dev))
+ platform_device_unregister(hda->dmic_dev);
+
+ /* disable DSP IRQ */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_PIE, 0);
+
+ /* disable CIE and GIE interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN, 0);
+
+ /* no need to check for error as the DSP will be disabled anyway */
+ if (chip && chip->power_down_dsp)
+ chip->power_down_dsp(sdev);
+
+ /* disable DSP */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, 0);
+
+ free_irq(sdev->ipc_irq, sdev);
+ if (sdev->msi_enabled)
+ pci_free_irq_vectors(pci);
+
+ hda_dsp_stream_free(sdev);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_link_free_all(bus);
+#endif
+
+ iounmap(sdev->bar[HDA_DSP_BAR]);
+ iounmap(bus->remap_addr);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_ext_bus_exit(bus);
+#endif
+ hda_codec_i915_exit(sdev);
+
+ return 0;
+}
+
+int hda_power_down_dsp(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ return hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+static void hda_generic_machine_select(struct snd_sof_dev *sdev,
+ struct snd_soc_acpi_mach **mach)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct snd_soc_acpi_mach_params *mach_params;
+ struct snd_soc_acpi_mach *hda_mach;
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const char *tplg_filename;
+ const char *idisp_str;
+ int dmic_num = 0;
+ int codec_num = 0;
+ int ret;
+ int i;
+
+ /* codec detection */
+ if (!bus->codec_mask) {
+ dev_info(bus->dev, "no hda codecs found!\n");
+ } else {
+ dev_info(bus->dev, "hda codecs found, mask %lx\n",
+ bus->codec_mask);
+
+ for (i = 0; i < HDA_MAX_CODECS; i++) {
+ if (bus->codec_mask & (1 << i))
+ codec_num++;
+ }
+
+ /*
+ * If no machine driver is found, then:
+ *
+ * generic hda machine driver can handle:
+ * - one HDMI codec, and/or
+ * - one external HDAudio codec
+ */
+ if (!*mach && codec_num <= 2) {
+ bool tplg_fixup;
+
+ hda_mach = snd_soc_acpi_intel_hda_machines;
+
+ dev_info(bus->dev, "using HDA machine driver %s now\n",
+ hda_mach->drv_name);
+
+ if (codec_num == 1 && HDA_IDISP_CODEC(bus->codec_mask))
+ idisp_str = "-idisp";
+ else
+ idisp_str = "";
+
+ /* topology: use the info from hda_machines */
+ if (pdata->tplg_filename) {
+ tplg_fixup = false;
+ tplg_filename = pdata->tplg_filename;
+ } else {
+ tplg_fixup = true;
+ tplg_filename = hda_mach->sof_tplg_filename;
+ }
+ ret = dmic_detect_topology_fixup(sdev, &tplg_filename, idisp_str, &dmic_num,
+ tplg_fixup);
+ if (ret < 0)
+ return;
+
+ hda_mach->mach_params.dmic_num = dmic_num;
+ pdata->tplg_filename = tplg_filename;
+
+ if (codec_num == 2 ||
+ (codec_num == 1 && !HDA_IDISP_CODEC(bus->codec_mask))) {
+ /*
+ * Prevent SoundWire links from starting when an external
+ * HDaudio codec is used
+ */
+ hda_mach->mach_params.link_mask = 0;
+ } else {
+ /*
+ * Allow SoundWire links to start when no external HDaudio codec
+ * was detected. This will not create a SoundWire card but
+ * will help detect if any SoundWire codec reports as ATTACHED.
+ */
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ hda_mach->mach_params.link_mask = hdev->info.link_mask;
+ }
+
+ *mach = hda_mach;
+ }
+ }
+
+ /* used by hda machine driver to create dai links */
+ if (*mach) {
+ mach_params = &(*mach)->mach_params;
+ mach_params->codec_mask = bus->codec_mask;
+ mach_params->common_hdmi_codec_drv = hda_codec_use_common_hdmi;
+ }
+}
+#else
+static void hda_generic_machine_select(struct snd_sof_dev *sdev,
+ struct snd_soc_acpi_mach **mach)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+
+#define SDW_CODEC_ADR_MASK(_adr) ((_adr) & (SDW_DISCO_LINK_ID_MASK | SDW_VERSION_MASK | \
+ SDW_MFG_ID_MASK | SDW_PART_ID_MASK))
+
+/* Check if all Slaves defined on the link can be found */
+static bool link_slaves_found(struct snd_sof_dev *sdev,
+ const struct snd_soc_acpi_link_adr *link,
+ struct sdw_intel_ctx *sdw)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sdw_intel_slave_id *ids = sdw->ids;
+ int num_slaves = sdw->num_slaves;
+ unsigned int part_id, link_id, unique_id, mfg_id, version;
+ int i, j, k;
+
+ for (i = 0; i < link->num_adr; i++) {
+ u64 adr = link->adr_d[i].adr;
+ int reported_part_count = 0;
+
+ mfg_id = SDW_MFG_ID(adr);
+ part_id = SDW_PART_ID(adr);
+ link_id = SDW_DISCO_LINK_ID(adr);
+ version = SDW_VERSION(adr);
+
+ for (j = 0; j < num_slaves; j++) {
+ /* find out how many identical parts were reported on that link */
+ if (ids[j].link_id == link_id &&
+ ids[j].id.part_id == part_id &&
+ ids[j].id.mfg_id == mfg_id &&
+ ids[j].id.sdw_version == version)
+ reported_part_count++;
+ }
+
+ for (j = 0; j < num_slaves; j++) {
+ int expected_part_count = 0;
+
+ if (ids[j].link_id != link_id ||
+ ids[j].id.part_id != part_id ||
+ ids[j].id.mfg_id != mfg_id ||
+ ids[j].id.sdw_version != version)
+ continue;
+
+ /* find out how many identical parts are expected */
+ for (k = 0; k < link->num_adr; k++) {
+ u64 adr2 = link->adr_d[k].adr;
+
+ if (SDW_CODEC_ADR_MASK(adr2) == SDW_CODEC_ADR_MASK(adr))
+ expected_part_count++;
+ }
+
+ if (reported_part_count == expected_part_count) {
+ /*
+ * we have to check unique id
+ * if there is more than one
+ * Slave on the link
+ */
+ unique_id = SDW_UNIQUE_ID(adr);
+ if (reported_part_count == 1 ||
+ ids[j].id.unique_id == unique_id) {
+ dev_dbg(bus->dev, "found %x at link %d\n",
+ part_id, link_id);
+ break;
+ }
+ } else {
+ dev_dbg(bus->dev, "part %x reported %d expected %d on link %d, skipping\n",
+ part_id, reported_part_count, expected_part_count, link_id);
+ }
+ }
+ if (j == num_slaves) {
+ dev_dbg(bus->dev,
+ "Slave %x not found\n",
+ part_id);
+ return false;
+ }
+ }
+ return true;
+}
+
+static struct snd_soc_acpi_mach *hda_sdw_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct snd_soc_acpi_link_adr *link;
+ struct snd_soc_acpi_mach *mach;
+ struct sof_intel_hda_dev *hdev;
+ u32 link_mask;
+ int i;
+
+ hdev = pdata->hw_pdata;
+ link_mask = hdev->info.link_mask;
+
+ /*
+ * Select SoundWire machine driver if needed using the
+ * alternate tables. This case deals with SoundWire-only
+ * machines, for mixed cases with I2C/I2S the detection relies
+ * on the HID list.
+ */
+ if (link_mask) {
+ for (mach = pdata->desc->alt_machines;
+ mach && mach->link_mask; mach++) {
+ /*
+ * On some platforms such as Up Extreme all links
+ * are enabled but only one link can be used by
+ * external codec. Instead of exact match of two masks,
+ * first check whether link_mask of mach is subset of
+ * link_mask supported by hw and then go on searching
+ * link_adr
+ */
+ if (~link_mask & mach->link_mask)
+ continue;
+
+ /* No need to match adr if there is no links defined */
+ if (!mach->links)
+ break;
+
+ link = mach->links;
+ for (i = 0; i < hdev->info.count && link->num_adr;
+ i++, link++) {
+ /*
+ * Try next machine if any expected Slaves
+ * are not found on this link.
+ */
+ if (!link_slaves_found(sdev, link, hdev->sdw))
+ break;
+ }
+ /* Found if all Slaves are checked */
+ if (i == hdev->info.count || !link->num_adr)
+ break;
+ }
+ if (mach && mach->link_mask) {
+ int dmic_num = 0;
+ bool tplg_fixup;
+ const char *tplg_filename;
+
+ mach->mach_params.links = mach->links;
+ mach->mach_params.link_mask = mach->link_mask;
+ mach->mach_params.platform = dev_name(sdev->dev);
+
+ if (pdata->tplg_filename) {
+ tplg_fixup = false;
+ } else {
+ tplg_fixup = true;
+ tplg_filename = mach->sof_tplg_filename;
+ }
+
+ /*
+ * DMICs use up to 4 pins and are typically pin-muxed with SoundWire
+ * link 2 and 3, or link 1 and 2, thus we only try to enable dmics
+ * if all conditions are true:
+ * a) 2 or fewer links are used by SoundWire
+ * b) the NHLT table reports the presence of microphones
+ */
+ if (hweight_long(mach->link_mask) <= 2) {
+ int ret;
+
+ ret = dmic_detect_topology_fixup(sdev, &tplg_filename, "",
+ &dmic_num, tplg_fixup);
+ if (ret < 0)
+ return NULL;
+ }
+ if (tplg_fixup)
+ pdata->tplg_filename = tplg_filename;
+ mach->mach_params.dmic_num = dmic_num;
+
+ dev_dbg(sdev->dev,
+ "SoundWire machine driver %s topology %s\n",
+ mach->drv_name,
+ pdata->tplg_filename);
+
+ return mach;
+ }
+
+ dev_info(sdev->dev, "No SoundWire machine driver found\n");
+ }
+
+ return NULL;
+}
+#else
+static struct snd_soc_acpi_mach *hda_sdw_machine_select(struct snd_sof_dev *sdev)
+{
+ return NULL;
+}
+#endif
+
+void hda_set_mach_params(struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct snd_soc_acpi_mach_params *mach_params;
+
+ mach_params = &mach->mach_params;
+ mach_params->platform = dev_name(sdev->dev);
+ mach_params->num_dai_drivers = desc->ops->num_drv;
+ mach_params->dai_drivers = desc->ops->drv;
+}
+
+struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach;
+ const char *tplg_filename;
+
+ mach = snd_soc_acpi_find_machine(desc->machines);
+ if (mach) {
+ bool add_extension = false;
+ bool tplg_fixup = false;
+
+ /*
+ * If tplg file name is overridden, use it instead of
+ * the one set in mach table
+ */
+ if (!sof_pdata->tplg_filename) {
+ sof_pdata->tplg_filename = mach->sof_tplg_filename;
+ tplg_fixup = true;
+ }
+
+ /* report to machine driver if any DMICs are found */
+ mach->mach_params.dmic_num = check_dmic_num(sdev);
+
+ if (tplg_fixup &&
+ mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER &&
+ mach->mach_params.dmic_num) {
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s%s%d%s",
+ sof_pdata->tplg_filename,
+ "-dmic",
+ mach->mach_params.dmic_num,
+ "ch");
+ if (!tplg_filename)
+ return NULL;
+
+ sof_pdata->tplg_filename = tplg_filename;
+ add_extension = true;
+ }
+
+ if (mach->link_mask) {
+ mach->mach_params.links = mach->links;
+ mach->mach_params.link_mask = mach->link_mask;
+ }
+
+ /* report SSP link mask to machine driver */
+ mach->mach_params.i2s_link_mask = check_nhlt_ssp_mask(sdev);
+
+ if (tplg_fixup &&
+ mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER &&
+ mach->mach_params.i2s_link_mask) {
+ const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
+ int ssp_num;
+ int mclk_mask;
+
+ if (hweight_long(mach->mach_params.i2s_link_mask) > 1 &&
+ !(mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_SSP_MSB))
+ dev_warn(sdev->dev, "More than one SSP exposed by NHLT, choosing MSB\n");
+
+ /* fls returns 1-based results, SSPs indices are 0-based */
+ ssp_num = fls(mach->mach_params.i2s_link_mask) - 1;
+
+ if (ssp_num >= chip->ssp_count) {
+ dev_err(sdev->dev, "Invalid SSP %d, max on this platform is %d\n",
+ ssp_num, chip->ssp_count);
+ return NULL;
+ }
+
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s%s%d",
+ sof_pdata->tplg_filename,
+ "-ssp",
+ ssp_num);
+ if (!tplg_filename)
+ return NULL;
+
+ sof_pdata->tplg_filename = tplg_filename;
+ add_extension = true;
+
+ mclk_mask = check_nhlt_ssp_mclk_mask(sdev, ssp_num);
+
+ if (mclk_mask < 0) {
+ dev_err(sdev->dev, "Invalid MCLK configuration\n");
+ return NULL;
+ }
+
+ dev_dbg(sdev->dev, "MCLK mask %#x found in NHLT\n", mclk_mask);
+
+ if (mclk_mask) {
+ dev_info(sdev->dev, "Overriding topology with MCLK mask %#x from NHLT\n", mclk_mask);
+ sdev->mclk_id_override = true;
+ sdev->mclk_id_quirk = (mclk_mask & BIT(0)) ? 0 : 1;
+ }
+ }
+
+ if (tplg_fixup && add_extension) {
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s%s",
+ sof_pdata->tplg_filename,
+ ".tplg");
+ if (!tplg_filename)
+ return NULL;
+
+ sof_pdata->tplg_filename = tplg_filename;
+ }
+
+ /* check if mclk_id should be modified from topology defaults */
+ if (mclk_id_override >= 0) {
+ dev_info(sdev->dev, "Overriding topology with MCLK %d from kernel_parameter\n", mclk_id_override);
+ sdev->mclk_id_override = true;
+ sdev->mclk_id_quirk = mclk_id_override;
+ }
+ }
+
+ /*
+ * If I2S fails, try SoundWire
+ */
+ if (!mach)
+ mach = hda_sdw_machine_select(sdev);
+
+ /*
+ * Choose HDA generic machine driver if mach is NULL.
+ * Otherwise, set certain mach params.
+ */
+ hda_generic_machine_select(sdev, &mach);
+ if (!mach)
+ dev_warn(sdev->dev, "warning: No matching ASoC machine driver found\n");
+
+ return mach;
+}
+
+int hda_pci_intel_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ int ret;
+
+ ret = snd_intel_dsp_driver_probe(pci);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+ dev_dbg(&pci->dev, "SOF PCI driver not selected, aborting probe\n");
+ return -ENODEV;
+ }
+
+ return sof_pci_probe(pci, pci_id);
+}
+EXPORT_SYMBOL_NS(hda_pci_intel_probe, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int hda_register_clients(struct snd_sof_dev *sdev)
+{
+ return hda_probes_register(sdev);
+}
+
+void hda_unregister_clients(struct snd_sof_dev *sdev)
+{
+ hda_probes_unregister(sdev);
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
+MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC);
+MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_IMPORT_NS(SND_INTEL_SOUNDWIRE_ACPI);
+MODULE_IMPORT_NS(SOUNDWIRE_INTEL_INIT);
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
new file mode 100644
index 000000000..9acd21901
--- /dev/null
+++ b/sound/soc/sof/intel/hda.h
@@ -0,0 +1,860 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_HDA_H
+#define __SOF_INTEL_HDA_H
+
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
+#include <sound/compress_driver.h>
+#include <sound/hda_codec.h>
+#include <sound/hdaudio_ext.h>
+#include "../sof-client-probes.h"
+#include "../sof-audio.h"
+#include "shim.h"
+
+/* PCI registers */
+#define PCI_TCSEL 0x44
+#define PCI_PGCTL PCI_TCSEL
+#define PCI_CGCTL 0x48
+
+/* PCI_PGCTL bits */
+#define PCI_PGCTL_ADSPPGD BIT(2)
+#define PCI_PGCTL_LSRMD_MASK BIT(4)
+
+/* PCI_CGCTL bits */
+#define PCI_CGCTL_MISCBDCGE_MASK BIT(6)
+#define PCI_CGCTL_ADSPDCGE BIT(1)
+
+/* Legacy HDA registers and bits used - widths are variable */
+#define SOF_HDA_GCAP 0x0
+#define SOF_HDA_GCTL 0x8
+/* accept unsol. response enable */
+#define SOF_HDA_GCTL_UNSOL BIT(8)
+#define SOF_HDA_LLCH 0x14
+#define SOF_HDA_INTCTL 0x20
+#define SOF_HDA_INTSTS 0x24
+#define SOF_HDA_WAKESTS 0x0E
+#define SOF_HDA_WAKESTS_INT_MASK ((1 << 8) - 1)
+#define SOF_HDA_RIRBSTS 0x5d
+
+/* SOF_HDA_GCTL register bist */
+#define SOF_HDA_GCTL_RESET BIT(0)
+
+/* SOF_HDA_INCTL regs */
+#define SOF_HDA_INT_GLOBAL_EN BIT(31)
+#define SOF_HDA_INT_CTRL_EN BIT(30)
+#define SOF_HDA_INT_ALL_STREAM 0xff
+
+/* SOF_HDA_INTSTS regs */
+#define SOF_HDA_INTSTS_GIS BIT(31)
+
+#define SOF_HDA_MAX_CAPS 10
+#define SOF_HDA_CAP_ID_OFF 16
+#define SOF_HDA_CAP_ID_MASK GENMASK(SOF_HDA_CAP_ID_OFF + 11,\
+ SOF_HDA_CAP_ID_OFF)
+#define SOF_HDA_CAP_NEXT_MASK 0xFFFF
+
+#define SOF_HDA_GTS_CAP_ID 0x1
+#define SOF_HDA_ML_CAP_ID 0x2
+
+#define SOF_HDA_PP_CAP_ID 0x3
+#define SOF_HDA_REG_PP_PPCH 0x10
+#define SOF_HDA_REG_PP_PPCTL 0x04
+#define SOF_HDA_REG_PP_PPSTS 0x08
+#define SOF_HDA_PPCTL_PIE BIT(31)
+#define SOF_HDA_PPCTL_GPROCEN BIT(30)
+
+/*Vendor Specific Registers*/
+#define SOF_HDA_VS_D0I3C 0x104A
+
+/* D0I3C Register fields */
+#define SOF_HDA_VS_D0I3C_CIP BIT(0) /* Command-In-Progress */
+#define SOF_HDA_VS_D0I3C_I3 BIT(2) /* D0i3 enable bit */
+
+/* DPIB entry size: 8 Bytes = 2 DWords */
+#define SOF_HDA_DPIB_ENTRY_SIZE 0x8
+
+#define SOF_HDA_SPIB_CAP_ID 0x4
+#define SOF_HDA_DRSM_CAP_ID 0x5
+
+#define SOF_HDA_SPIB_BASE 0x08
+#define SOF_HDA_SPIB_INTERVAL 0x08
+#define SOF_HDA_SPIB_SPIB 0x00
+#define SOF_HDA_SPIB_MAXFIFO 0x04
+
+#define SOF_HDA_PPHC_BASE 0x10
+#define SOF_HDA_PPHC_INTERVAL 0x10
+
+#define SOF_HDA_PPLC_BASE 0x10
+#define SOF_HDA_PPLC_MULTI 0x10
+#define SOF_HDA_PPLC_INTERVAL 0x10
+
+#define SOF_HDA_DRSM_BASE 0x08
+#define SOF_HDA_DRSM_INTERVAL 0x08
+
+/* Descriptor error interrupt */
+#define SOF_HDA_CL_DMA_SD_INT_DESC_ERR 0x10
+
+/* FIFO error interrupt */
+#define SOF_HDA_CL_DMA_SD_INT_FIFO_ERR 0x08
+
+/* Buffer completion interrupt */
+#define SOF_HDA_CL_DMA_SD_INT_COMPLETE 0x04
+
+#define SOF_HDA_CL_DMA_SD_INT_MASK \
+ (SOF_HDA_CL_DMA_SD_INT_DESC_ERR | \
+ SOF_HDA_CL_DMA_SD_INT_FIFO_ERR | \
+ SOF_HDA_CL_DMA_SD_INT_COMPLETE)
+#define SOF_HDA_SD_CTL_DMA_START 0x02 /* Stream DMA start bit */
+
+/* Intel HD Audio Code Loader DMA Registers */
+#define SOF_HDA_ADSP_LOADER_BASE 0x80
+#define SOF_HDA_ADSP_DPLBASE 0x70
+#define SOF_HDA_ADSP_DPUBASE 0x74
+#define SOF_HDA_ADSP_DPLBASE_ENABLE 0x01
+
+/* Stream Registers */
+#define SOF_HDA_ADSP_REG_CL_SD_CTL 0x00
+#define SOF_HDA_ADSP_REG_CL_SD_STS 0x03
+#define SOF_HDA_ADSP_REG_CL_SD_LPIB 0x04
+#define SOF_HDA_ADSP_REG_CL_SD_CBL 0x08
+#define SOF_HDA_ADSP_REG_CL_SD_LVI 0x0C
+#define SOF_HDA_ADSP_REG_CL_SD_FIFOW 0x0E
+#define SOF_HDA_ADSP_REG_CL_SD_FIFOSIZE 0x10
+#define SOF_HDA_ADSP_REG_CL_SD_FORMAT 0x12
+#define SOF_HDA_ADSP_REG_CL_SD_FIFOL 0x14
+#define SOF_HDA_ADSP_REG_CL_SD_BDLPL 0x18
+#define SOF_HDA_ADSP_REG_CL_SD_BDLPU 0x1C
+#define SOF_HDA_ADSP_SD_ENTRY_SIZE 0x20
+
+/* CL: Software Position Based FIFO Capability Registers */
+#define SOF_DSP_REG_CL_SPBFIFO \
+ (SOF_HDA_ADSP_LOADER_BASE + 0x20)
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCH 0x0
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL 0x4
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_SPIB 0x8
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_MAXFIFOS 0xc
+
+/* Stream Number */
+#define SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT 20
+#define SOF_HDA_CL_SD_CTL_STREAM_TAG_MASK \
+ GENMASK(SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT + 3,\
+ SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT)
+
+#define HDA_DSP_HDA_BAR 0
+#define HDA_DSP_PP_BAR 1
+#define HDA_DSP_SPIB_BAR 2
+#define HDA_DSP_DRSM_BAR 3
+#define HDA_DSP_BAR 4
+
+#define SRAM_WINDOW_OFFSET(x) (0x80000 + (x) * 0x20000)
+
+#define HDA_DSP_MBOX_OFFSET SRAM_WINDOW_OFFSET(0)
+
+#define HDA_DSP_PANIC_OFFSET(x) \
+ (((x) & 0xFFFFFF) + HDA_DSP_MBOX_OFFSET)
+
+/* SRAM window 0 FW "registers" */
+#define HDA_DSP_SRAM_REG_ROM_STATUS (HDA_DSP_MBOX_OFFSET + 0x0)
+#define HDA_DSP_SRAM_REG_ROM_ERROR (HDA_DSP_MBOX_OFFSET + 0x4)
+/* FW and ROM share offset 4 */
+#define HDA_DSP_SRAM_REG_FW_STATUS (HDA_DSP_MBOX_OFFSET + 0x4)
+#define HDA_DSP_SRAM_REG_FW_TRACEP (HDA_DSP_MBOX_OFFSET + 0x8)
+#define HDA_DSP_SRAM_REG_FW_END (HDA_DSP_MBOX_OFFSET + 0xc)
+
+#define HDA_DSP_MBOX_UPLINK_OFFSET 0x81000
+
+#define HDA_DSP_STREAM_RESET_TIMEOUT 300
+/*
+ * Timeout in us, for setting the stream RUN bit, during
+ * start/stop the stream. The timeout expires if new RUN bit
+ * value cannot be read back within the specified time.
+ */
+#define HDA_DSP_STREAM_RUN_TIMEOUT 300
+
+#define HDA_DSP_SPIB_ENABLE 1
+#define HDA_DSP_SPIB_DISABLE 0
+
+#define SOF_HDA_MAX_BUFFER_SIZE (32 * PAGE_SIZE)
+
+#define HDA_DSP_STACK_DUMP_SIZE 32
+
+/* ROM/FW status register */
+#define FSR_STATE_MASK GENMASK(23, 0)
+#define FSR_WAIT_STATE_MASK GENMASK(27, 24)
+#define FSR_MODULE_MASK GENMASK(30, 28)
+#define FSR_HALTED BIT(31)
+#define FSR_TO_STATE_CODE(x) ((x) & FSR_STATE_MASK)
+#define FSR_TO_WAIT_STATE_CODE(x) (((x) & FSR_WAIT_STATE_MASK) >> 24)
+#define FSR_TO_MODULE_CODE(x) (((x) & FSR_MODULE_MASK) >> 28)
+
+/* Wait states */
+#define FSR_WAIT_FOR_IPC_BUSY 0x1
+#define FSR_WAIT_FOR_IPC_DONE 0x2
+#define FSR_WAIT_FOR_CACHE_INVALIDATION 0x3
+#define FSR_WAIT_FOR_LP_SRAM_OFF 0x4
+#define FSR_WAIT_FOR_DMA_BUFFER_FULL 0x5
+#define FSR_WAIT_FOR_CSE_CSR 0x6
+
+/* Module codes */
+#define FSR_MOD_ROM 0x0
+#define FSR_MOD_ROM_BYP 0x1
+#define FSR_MOD_BASE_FW 0x2
+#define FSR_MOD_LP_BOOT 0x3
+#define FSR_MOD_BRNGUP 0x4
+#define FSR_MOD_ROM_EXT 0x5
+
+/* State codes (module dependent) */
+/* Module independent states */
+#define FSR_STATE_INIT 0x0
+#define FSR_STATE_INIT_DONE 0x1
+#define FSR_STATE_FW_ENTERED 0x5
+
+/* ROM states */
+#define FSR_STATE_ROM_INIT FSR_STATE_INIT
+#define FSR_STATE_ROM_INIT_DONE FSR_STATE_INIT_DONE
+#define FSR_STATE_ROM_CSE_MANIFEST_LOADED 0x2
+#define FSR_STATE_ROM_FW_MANIFEST_LOADED 0x3
+#define FSR_STATE_ROM_FW_FW_LOADED 0x4
+#define FSR_STATE_ROM_FW_ENTERED FSR_STATE_FW_ENTERED
+#define FSR_STATE_ROM_VERIFY_FEATURE_MASK 0x6
+#define FSR_STATE_ROM_GET_LOAD_OFFSET 0x7
+#define FSR_STATE_ROM_FETCH_ROM_EXT 0x8
+#define FSR_STATE_ROM_FETCH_ROM_EXT_DONE 0x9
+#define FSR_STATE_ROM_BASEFW_ENTERED 0xf /* SKL */
+
+/* (ROM) CSE states */
+#define FSR_STATE_ROM_CSE_IMR_REQUEST 0x10
+#define FSR_STATE_ROM_CSE_IMR_GRANTED 0x11
+#define FSR_STATE_ROM_CSE_VALIDATE_IMAGE_REQUEST 0x12
+#define FSR_STATE_ROM_CSE_IMAGE_VALIDATED 0x13
+
+#define FSR_STATE_ROM_CSE_IPC_IFACE_INIT 0x20
+#define FSR_STATE_ROM_CSE_IPC_RESET_PHASE_1 0x21
+#define FSR_STATE_ROM_CSE_IPC_OPERATIONAL_ENTRY 0x22
+#define FSR_STATE_ROM_CSE_IPC_OPERATIONAL 0x23
+#define FSR_STATE_ROM_CSE_IPC_DOWN 0x24
+
+/* BRINGUP (or BRNGUP) states */
+#define FSR_STATE_BRINGUP_INIT FSR_STATE_INIT
+#define FSR_STATE_BRINGUP_INIT_DONE FSR_STATE_INIT_DONE
+#define FSR_STATE_BRINGUP_HPSRAM_LOAD 0x2
+#define FSR_STATE_BRINGUP_UNPACK_START 0X3
+#define FSR_STATE_BRINGUP_IMR_RESTORE 0x4
+#define FSR_STATE_BRINGUP_FW_ENTERED FSR_STATE_FW_ENTERED
+
+/* ROM status/error values */
+#define HDA_DSP_ROM_CSE_ERROR 40
+#define HDA_DSP_ROM_CSE_WRONG_RESPONSE 41
+#define HDA_DSP_ROM_IMR_TO_SMALL 42
+#define HDA_DSP_ROM_BASE_FW_NOT_FOUND 43
+#define HDA_DSP_ROM_CSE_VALIDATION_FAILED 44
+#define HDA_DSP_ROM_IPC_FATAL_ERROR 45
+#define HDA_DSP_ROM_L2_CACHE_ERROR 46
+#define HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL 47
+#define HDA_DSP_ROM_API_PTR_INVALID 50
+#define HDA_DSP_ROM_BASEFW_INCOMPAT 51
+#define HDA_DSP_ROM_UNHANDLED_INTERRUPT 0xBEE00000
+#define HDA_DSP_ROM_MEMORY_HOLE_ECC 0xECC00000
+#define HDA_DSP_ROM_KERNEL_EXCEPTION 0xCAFE0000
+#define HDA_DSP_ROM_USER_EXCEPTION 0xBEEF0000
+#define HDA_DSP_ROM_UNEXPECTED_RESET 0xDECAF000
+#define HDA_DSP_ROM_NULL_FW_ENTRY 0x4c4c4e55
+
+#define HDA_DSP_ROM_IPC_CONTROL 0x01000000
+#define HDA_DSP_ROM_IPC_PURGE_FW 0x00004000
+
+/* various timeout values */
+#define HDA_DSP_PU_TIMEOUT 50
+#define HDA_DSP_PD_TIMEOUT 50
+#define HDA_DSP_RESET_TIMEOUT_US 50000
+#define HDA_DSP_BASEFW_TIMEOUT_US 3000000
+#define HDA_DSP_INIT_TIMEOUT_US 500000
+#define HDA_DSP_CTRL_RESET_TIMEOUT 100
+#define HDA_DSP_WAIT_TIMEOUT 500 /* 500 msec */
+#define HDA_DSP_REG_POLL_INTERVAL_US 500 /* 0.5 msec */
+#define HDA_DSP_REG_POLL_RETRY_COUNT 50
+
+#define HDA_DSP_ADSPIC_IPC BIT(0)
+#define HDA_DSP_ADSPIS_IPC BIT(0)
+
+/* Intel HD Audio General DSP Registers */
+#define HDA_DSP_GEN_BASE 0x0
+#define HDA_DSP_REG_ADSPCS (HDA_DSP_GEN_BASE + 0x04)
+#define HDA_DSP_REG_ADSPIC (HDA_DSP_GEN_BASE + 0x08)
+#define HDA_DSP_REG_ADSPIS (HDA_DSP_GEN_BASE + 0x0C)
+#define HDA_DSP_REG_ADSPIC2 (HDA_DSP_GEN_BASE + 0x10)
+#define HDA_DSP_REG_ADSPIS2 (HDA_DSP_GEN_BASE + 0x14)
+
+#define HDA_DSP_REG_ADSPIS2_SNDW BIT(5)
+
+/* Intel HD Audio Inter-Processor Communication Registers */
+#define HDA_DSP_IPC_BASE 0x40
+#define HDA_DSP_REG_HIPCT (HDA_DSP_IPC_BASE + 0x00)
+#define HDA_DSP_REG_HIPCTE (HDA_DSP_IPC_BASE + 0x04)
+#define HDA_DSP_REG_HIPCI (HDA_DSP_IPC_BASE + 0x08)
+#define HDA_DSP_REG_HIPCIE (HDA_DSP_IPC_BASE + 0x0C)
+#define HDA_DSP_REG_HIPCCTL (HDA_DSP_IPC_BASE + 0x10)
+
+/* Intel Vendor Specific Registers */
+#define HDA_VS_INTEL_EM2 0x1030
+#define HDA_VS_INTEL_EM2_L1SEN BIT(13)
+#define HDA_VS_INTEL_LTRP_GB_MASK 0x3F
+
+/* HIPCI */
+#define HDA_DSP_REG_HIPCI_BUSY BIT(31)
+#define HDA_DSP_REG_HIPCI_MSG_MASK 0x7FFFFFFF
+
+/* HIPCIE */
+#define HDA_DSP_REG_HIPCIE_DONE BIT(30)
+#define HDA_DSP_REG_HIPCIE_MSG_MASK 0x3FFFFFFF
+
+/* HIPCCTL */
+#define HDA_DSP_REG_HIPCCTL_DONE BIT(1)
+#define HDA_DSP_REG_HIPCCTL_BUSY BIT(0)
+
+/* HIPCT */
+#define HDA_DSP_REG_HIPCT_BUSY BIT(31)
+#define HDA_DSP_REG_HIPCT_MSG_MASK 0x7FFFFFFF
+
+/* HIPCTE */
+#define HDA_DSP_REG_HIPCTE_MSG_MASK 0x3FFFFFFF
+
+#define HDA_DSP_ADSPIC_CL_DMA BIT(1)
+#define HDA_DSP_ADSPIS_CL_DMA BIT(1)
+
+/* Delay before scheduling D0i3 entry */
+#define BXT_D0I3_DELAY 5000
+
+#define FW_CL_STREAM_NUMBER 0x1
+#define HDA_FW_BOOT_ATTEMPTS 3
+
+/* ADSPCS - Audio DSP Control & Status */
+
+/*
+ * Core Reset - asserted high
+ * CRST Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_CRST_SHIFT 0
+#define HDA_DSP_ADSPCS_CRST_MASK(cm) ((cm) << HDA_DSP_ADSPCS_CRST_SHIFT)
+
+/*
+ * Core run/stall - when set to '1' core is stalled
+ * CSTALL Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_CSTALL_SHIFT 8
+#define HDA_DSP_ADSPCS_CSTALL_MASK(cm) ((cm) << HDA_DSP_ADSPCS_CSTALL_SHIFT)
+
+/*
+ * Set Power Active - when set to '1' turn cores on
+ * SPA Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_SPA_SHIFT 16
+#define HDA_DSP_ADSPCS_SPA_MASK(cm) ((cm) << HDA_DSP_ADSPCS_SPA_SHIFT)
+
+/*
+ * Current Power Active - power status of cores, set by hardware
+ * CPA Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_CPA_SHIFT 24
+#define HDA_DSP_ADSPCS_CPA_MASK(cm) ((cm) << HDA_DSP_ADSPCS_CPA_SHIFT)
+
+/*
+ * Mask for a given number of cores
+ * nc = number of supported cores
+ */
+#define SOF_DSP_CORES_MASK(nc) GENMASK(((nc) - 1), 0)
+
+/* Intel HD Audio Inter-Processor Communication Registers for Cannonlake*/
+#define CNL_DSP_IPC_BASE 0xc0
+#define CNL_DSP_REG_HIPCTDR (CNL_DSP_IPC_BASE + 0x00)
+#define CNL_DSP_REG_HIPCTDA (CNL_DSP_IPC_BASE + 0x04)
+#define CNL_DSP_REG_HIPCTDD (CNL_DSP_IPC_BASE + 0x08)
+#define CNL_DSP_REG_HIPCIDR (CNL_DSP_IPC_BASE + 0x10)
+#define CNL_DSP_REG_HIPCIDA (CNL_DSP_IPC_BASE + 0x14)
+#define CNL_DSP_REG_HIPCIDD (CNL_DSP_IPC_BASE + 0x18)
+#define CNL_DSP_REG_HIPCCTL (CNL_DSP_IPC_BASE + 0x28)
+
+/* HIPCI */
+#define CNL_DSP_REG_HIPCIDR_BUSY BIT(31)
+#define CNL_DSP_REG_HIPCIDR_MSG_MASK 0x7FFFFFFF
+
+/* HIPCIE */
+#define CNL_DSP_REG_HIPCIDA_DONE BIT(31)
+#define CNL_DSP_REG_HIPCIDA_MSG_MASK 0x7FFFFFFF
+
+/* HIPCCTL */
+#define CNL_DSP_REG_HIPCCTL_DONE BIT(1)
+#define CNL_DSP_REG_HIPCCTL_BUSY BIT(0)
+
+/* HIPCT */
+#define CNL_DSP_REG_HIPCTDR_BUSY BIT(31)
+#define CNL_DSP_REG_HIPCTDR_MSG_MASK 0x7FFFFFFF
+
+/* HIPCTDA */
+#define CNL_DSP_REG_HIPCTDA_DONE BIT(31)
+#define CNL_DSP_REG_HIPCTDA_MSG_MASK 0x7FFFFFFF
+
+/* HIPCTDD */
+#define CNL_DSP_REG_HIPCTDD_MSG_MASK 0x7FFFFFFF
+
+/* BDL */
+#define HDA_DSP_BDL_SIZE 4096
+#define HDA_DSP_MAX_BDL_ENTRIES \
+ (HDA_DSP_BDL_SIZE / sizeof(struct sof_intel_dsp_bdl))
+
+/* Number of DAIs */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+#define SOF_SKL_NUM_DAIS 15
+#else
+#define SOF_SKL_NUM_DAIS 8
+#endif
+
+/* Intel HD Audio SRAM Window 0*/
+#define HDA_DSP_SRAM_REG_ROM_STATUS_SKL 0x8000
+#define HDA_ADSP_SRAM0_BASE_SKL 0x8000
+
+/* Firmware status window */
+#define HDA_ADSP_FW_STATUS_SKL HDA_ADSP_SRAM0_BASE_SKL
+#define HDA_ADSP_ERROR_CODE_SKL (HDA_ADSP_FW_STATUS_SKL + 0x4)
+
+/* Host Device Memory Space */
+#define APL_SSP_BASE_OFFSET 0x2000
+#define CNL_SSP_BASE_OFFSET 0x10000
+
+/* Host Device Memory Size of a Single SSP */
+#define SSP_DEV_MEM_SIZE 0x1000
+
+/* SSP Count of the Platform */
+#define APL_SSP_COUNT 6
+#define CNL_SSP_COUNT 3
+#define ICL_SSP_COUNT 6
+#define TGL_SSP_COUNT 3
+#define MTL_SSP_COUNT 3
+
+/* SSP Registers */
+#define SSP_SSC1_OFFSET 0x4
+#define SSP_SET_SCLK_CONSUMER BIT(25)
+#define SSP_SET_SFRM_CONSUMER BIT(24)
+#define SSP_SET_CBP_CFP (SSP_SET_SCLK_CONSUMER | SSP_SET_SFRM_CONSUMER)
+
+#define HDA_IDISP_ADDR 2
+#define HDA_IDISP_CODEC(x) ((x) & BIT(HDA_IDISP_ADDR))
+
+struct sof_intel_dsp_bdl {
+ __le32 addr_l;
+ __le32 addr_h;
+ __le32 size;
+ __le32 ioc;
+} __attribute((packed));
+
+#define SOF_HDA_PLAYBACK_STREAMS 16
+#define SOF_HDA_CAPTURE_STREAMS 16
+#define SOF_HDA_PLAYBACK 0
+#define SOF_HDA_CAPTURE 1
+
+/* stream flags */
+#define SOF_HDA_STREAM_DMI_L1_COMPATIBLE 1
+
+/*
+ * Time in ms for opportunistic D0I3 entry delay.
+ * This has been deliberately chosen to be long to avoid race conditions.
+ * Could be optimized in future.
+ */
+#define SOF_HDA_D0I3_WORK_DELAY_MS 5000
+
+/* HDA DSP D0 substate */
+enum sof_hda_D0_substate {
+ SOF_HDA_DSP_PM_D0I0, /* default D0 substate */
+ SOF_HDA_DSP_PM_D0I3, /* low power D0 substate */
+};
+
+/* represents DSP HDA controller frontend - i.e. host facing control */
+struct sof_intel_hda_dev {
+ bool imrboot_supported;
+ bool skip_imr_boot;
+
+ int boot_iteration;
+
+ struct hda_bus hbus;
+
+ /* hw config */
+ const struct sof_intel_dsp_desc *desc;
+
+ /* trace */
+ struct hdac_ext_stream *dtrace_stream;
+
+ /* if position update IPC needed */
+ u32 no_ipc_position;
+
+ /* the maximum number of streams (playback + capture) supported */
+ u32 stream_max;
+
+ /* PM related */
+ bool l1_support_changed;/* during suspend, is L1SEN changed or not */
+
+ /* DMIC device */
+ struct platform_device *dmic_dev;
+
+ /* delayed work to enter D0I3 opportunistically */
+ struct delayed_work d0i3_work;
+
+ /* ACPI information stored between scan and probe steps */
+ struct sdw_intel_acpi_info info;
+
+ /* sdw context allocated by SoundWire driver */
+ struct sdw_intel_ctx *sdw;
+
+ /* FW clock config, 0:HPRO, 1:LPRO */
+ bool clk_config_lpro;
+
+ wait_queue_head_t waitq;
+ bool code_loading;
+
+ /* Intel NHLT information */
+ struct nhlt_acpi_table *nhlt;
+};
+
+static inline struct hdac_bus *sof_to_bus(struct snd_sof_dev *s)
+{
+ struct sof_intel_hda_dev *hda = s->pdata->hw_pdata;
+
+ return &hda->hbus.core;
+}
+
+static inline struct hda_bus *sof_to_hbus(struct snd_sof_dev *s)
+{
+ struct sof_intel_hda_dev *hda = s->pdata->hw_pdata;
+
+ return &hda->hbus;
+}
+
+struct sof_intel_hda_stream {
+ struct snd_sof_dev *sdev;
+ struct hdac_ext_stream hext_stream;
+ struct sof_intel_stream sof_intel_stream;
+ int host_reserved; /* reserve host DMA channel */
+ u32 flags;
+};
+
+#define hstream_to_sof_hda_stream(hstream) \
+ container_of(hstream, struct sof_intel_hda_stream, hext_stream)
+
+#define bus_to_sof_hda(bus) \
+ container_of(bus, struct sof_intel_hda_dev, hbus.core)
+
+#define SOF_STREAM_SD_OFFSET(s) \
+ (SOF_HDA_ADSP_SD_ENTRY_SIZE * ((s)->index) \
+ + SOF_HDA_ADSP_LOADER_BASE)
+
+#define SOF_STREAM_SD_OFFSET_CRST 0x1
+
+/*
+ * DSP Core services.
+ */
+int hda_dsp_probe(struct snd_sof_dev *sdev);
+int hda_dsp_remove(struct snd_sof_dev *sdev);
+int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
+ unsigned int core_mask);
+int hda_power_down_dsp(struct snd_sof_dev *sdev);
+int hda_dsp_core_get(struct snd_sof_dev *sdev, int core);
+void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev);
+void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev);
+bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask);
+
+int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state);
+
+int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state);
+int hda_dsp_resume(struct snd_sof_dev *sdev);
+int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev);
+int hda_dsp_runtime_resume(struct snd_sof_dev *sdev);
+int hda_dsp_runtime_idle(struct snd_sof_dev *sdev);
+int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev);
+int hda_dsp_shutdown(struct snd_sof_dev *sdev);
+int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev);
+void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags);
+void hda_ipc_dump(struct snd_sof_dev *sdev);
+void hda_ipc_irq_dump(struct snd_sof_dev *sdev);
+void hda_dsp_d0i3_work(struct work_struct *work);
+int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev);
+
+/*
+ * DSP PCM Operations.
+ */
+u32 hda_dsp_get_mult_div(struct snd_sof_dev *sdev, int rate);
+u32 hda_dsp_get_bits(struct snd_sof_dev *sdev, int sample_bits);
+int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_sof_platform_stream_params *platform_params);
+int hda_dsp_stream_hw_free(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_trigger(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream, int cmd);
+snd_pcm_uframes_t hda_dsp_pcm_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_ack(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream);
+
+/*
+ * DSP Stream Operations.
+ */
+
+int hda_dsp_stream_init(struct snd_sof_dev *sdev);
+void hda_dsp_stream_free(struct snd_sof_dev *sdev);
+int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params);
+int hda_dsp_iccmax_stream_hw_params(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params);
+int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream, int cmd);
+irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context);
+int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct hdac_stream *hstream);
+bool hda_dsp_check_ipc_irq(struct snd_sof_dev *sdev);
+bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev);
+
+snd_pcm_uframes_t hda_dsp_stream_get_position(struct hdac_stream *hstream,
+ int direction, bool can_sleep);
+
+struct hdac_ext_stream *
+ hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction, u32 flags);
+int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag);
+int hda_dsp_stream_spib_config(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *hext_stream,
+ int enable, u32 size);
+
+int hda_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz);
+int hda_set_stream_data_offset(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ size_t posn_offset);
+
+/*
+ * DSP IPC Operations.
+ */
+int hda_dsp_ipc_send_msg(struct snd_sof_dev *sdev,
+ struct snd_sof_ipc_msg *msg);
+void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev);
+int hda_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev);
+int hda_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id);
+
+irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context);
+int hda_dsp_ipc_cmd_done(struct snd_sof_dev *sdev, int dir);
+
+/*
+ * DSP Code loader.
+ */
+int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev);
+int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev);
+int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream);
+struct hdac_ext_stream *hda_cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
+ unsigned int size, struct snd_dma_buffer *dmab,
+ int direction);
+int hda_cl_cleanup(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
+ struct hdac_ext_stream *hext_stream);
+int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot);
+#define HDA_CL_STREAM_FORMAT 0x40
+
+/* pre and post fw run ops */
+int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev);
+int hda_dsp_post_fw_run(struct snd_sof_dev *sdev);
+
+/* parse platform specific ext manifest ops */
+int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr);
+
+/*
+ * HDA Controller Operations.
+ */
+int hda_dsp_ctrl_get_caps(struct snd_sof_dev *sdev);
+void hda_dsp_ctrl_ppcap_enable(struct snd_sof_dev *sdev, bool enable);
+void hda_dsp_ctrl_ppcap_int_enable(struct snd_sof_dev *sdev, bool enable);
+int hda_dsp_ctrl_link_reset(struct snd_sof_dev *sdev, bool reset);
+void hda_dsp_ctrl_misc_clock_gating(struct snd_sof_dev *sdev, bool enable);
+int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable);
+int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset);
+void hda_dsp_ctrl_stop_chip(struct snd_sof_dev *sdev);
+/*
+ * HDA bus operations.
+ */
+void sof_hda_bus_init(struct hdac_bus *bus, struct device *dev);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+/*
+ * HDA Codec operations.
+ */
+void hda_codec_probe_bus(struct snd_sof_dev *sdev,
+ bool hda_codec_use_common_hdmi);
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable);
+void hda_codec_jack_check(struct snd_sof_dev *sdev);
+
+#endif /* CONFIG_SND_SOC_SOF_HDA */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) && \
+ (IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) || \
+ IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+
+void hda_codec_i915_display_power(struct snd_sof_dev *sdev, bool enable);
+int hda_codec_i915_init(struct snd_sof_dev *sdev);
+int hda_codec_i915_exit(struct snd_sof_dev *sdev);
+
+#else
+
+static inline void hda_codec_i915_display_power(struct snd_sof_dev *sdev,
+ bool enable) { }
+static inline int hda_codec_i915_init(struct snd_sof_dev *sdev) { return 0; }
+static inline int hda_codec_i915_exit(struct snd_sof_dev *sdev) { return 0; }
+
+#endif
+
+/*
+ * Trace Control.
+ */
+int hda_dsp_trace_init(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
+ struct sof_ipc_dma_trace_params_ext *dtrace_params);
+int hda_dsp_trace_release(struct snd_sof_dev *sdev);
+int hda_dsp_trace_trigger(struct snd_sof_dev *sdev, int cmd);
+
+/*
+ * SoundWire support
+ */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+
+int hda_sdw_startup(struct snd_sof_dev *sdev);
+void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable);
+void hda_sdw_process_wakeen(struct snd_sof_dev *sdev);
+bool hda_common_check_sdw_irq(struct snd_sof_dev *sdev);
+
+#else
+
+static inline int hda_sdw_startup(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable)
+{
+}
+
+static inline void hda_sdw_process_wakeen(struct snd_sof_dev *sdev)
+{
+}
+
+static inline bool hda_common_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ return false;
+}
+
+#endif
+
+/* common dai driver */
+extern struct snd_soc_dai_driver skl_dai[];
+int hda_dsp_dais_suspend(struct snd_sof_dev *sdev);
+
+/*
+ * Platform Specific HW abstraction Ops.
+ */
+extern struct snd_sof_dsp_ops sof_hda_common_ops;
+
+extern struct snd_sof_dsp_ops sof_skl_ops;
+int sof_skl_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_apl_ops;
+int sof_apl_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_cnl_ops;
+int sof_cnl_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_tgl_ops;
+int sof_tgl_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_icl_ops;
+int sof_icl_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_mtl_ops;
+int sof_mtl_ops_init(struct snd_sof_dev *sdev);
+
+extern const struct sof_intel_dsp_desc skl_chip_info;
+extern const struct sof_intel_dsp_desc apl_chip_info;
+extern const struct sof_intel_dsp_desc cnl_chip_info;
+extern const struct sof_intel_dsp_desc icl_chip_info;
+extern const struct sof_intel_dsp_desc tgl_chip_info;
+extern const struct sof_intel_dsp_desc tglh_chip_info;
+extern const struct sof_intel_dsp_desc ehl_chip_info;
+extern const struct sof_intel_dsp_desc jsl_chip_info;
+extern const struct sof_intel_dsp_desc adls_chip_info;
+extern const struct sof_intel_dsp_desc mtl_chip_info;
+
+/* Probes support */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+int hda_probes_register(struct snd_sof_dev *sdev);
+void hda_probes_unregister(struct snd_sof_dev *sdev);
+#else
+static inline int hda_probes_register(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline void hda_probes_unregister(struct snd_sof_dev *sdev)
+{
+}
+#endif /* CONFIG_SND_SOC_SOF_HDA_PROBES */
+
+/* SOF client registration for HDA platforms */
+int hda_register_clients(struct snd_sof_dev *sdev);
+void hda_unregister_clients(struct snd_sof_dev *sdev);
+
+/* machine driver select */
+struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev);
+void hda_set_mach_params(struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev);
+
+/* PCI driver selection and probe */
+int hda_pci_intel_probe(struct pci_dev *pci, const struct pci_device_id *pci_id);
+
+struct snd_sof_dai;
+struct sof_ipc_dai_config;
+int hda_ctrl_dai_widget_setup(struct snd_soc_dapm_widget *w, unsigned int quirk_flags,
+ struct snd_sof_dai_config_data *data);
+int hda_ctrl_dai_widget_free(struct snd_soc_dapm_widget *w, unsigned int quirk_flags,
+ struct snd_sof_dai_config_data *data);
+
+#define SOF_HDA_POSITION_QUIRK_USE_SKYLAKE_LEGACY (0) /* previous implementation */
+#define SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS (1) /* recommended if VC0 only */
+#define SOF_HDA_POSITION_QUIRK_USE_DPIB_DDR_UPDATE (2) /* recommended with VC0 or VC1 */
+
+extern int sof_hda_position_quirk;
+
+void hda_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops);
+void hda_ops_free(struct snd_sof_dev *sdev);
+
+/* SKL/KBL */
+int hda_dsp_cl_boot_firmware_skl(struct snd_sof_dev *sdev);
+int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask);
+
+/* IPC4 */
+irqreturn_t cnl_ipc4_irq_thread(int irq, void *context);
+int cnl_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
+irqreturn_t hda_dsp_ipc4_irq_thread(int irq, void *context);
+int hda_dsp_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
+void hda_ipc4_dump(struct snd_sof_dev *sdev);
+extern struct sdw_intel_ops sdw_callback;
+
+#endif
diff --git a/sound/soc/sof/intel/icl.c b/sound/soc/sof/intel/icl.c
new file mode 100644
index 000000000..6d5877108
--- /dev/null
+++ b/sound/soc/sof/intel/icl.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2020 Intel Corporation. All rights reserved.
+//
+// Author: Fred Oh <fred.oh@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on IceLake.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kconfig.h>
+#include <linux/export.h>
+#include <linux/bits.h>
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+#include "../sof-audio.h"
+
+#define ICL_DSP_HPRO_CORE_ID 3
+
+static const struct snd_sof_debugfs_map icl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static int icl_dsp_core_stall(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* make sure core_mask in host managed cores */
+ core_mask &= chip->host_managed_cores_mask;
+ if (!core_mask) {
+ dev_err(sdev->dev, "error: core_mask is not in host managed cores\n");
+ return -EINVAL;
+ }
+
+ /* stall core */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+ return 0;
+}
+
+/*
+ * post fw run operation for ICL.
+ * Core 3 will be powered up and in stall when HPRO is enabled
+ */
+static int icl_dsp_post_fw_run(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ int ret;
+
+ if (sdev->first_boot) {
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ ret = hda_sdw_startup(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: could not startup SoundWire links\n");
+ return ret;
+ }
+
+ /* Check if IMR boot is usable */
+ if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT) &&
+ sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT)
+ hdev->imrboot_supported = true;
+ }
+
+ hda_sdw_int_enable(sdev, true);
+
+ /*
+ * The recommended HW programming sequence for ICL is to
+ * power up core 3 and keep it in stall if HPRO is enabled.
+ */
+ if (!hda->clk_config_lpro) {
+ ret = hda_dsp_enable_core(sdev, BIT(ICL_DSP_HPRO_CORE_ID));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core power up failed on core %d\n",
+ ICL_DSP_HPRO_CORE_ID);
+ return ret;
+ }
+
+ sdev->enabled_cores_mask |= BIT(ICL_DSP_HPRO_CORE_ID);
+ sdev->dsp_core_ref_count[ICL_DSP_HPRO_CORE_ID]++;
+
+ snd_sof_dsp_stall(sdev, BIT(ICL_DSP_HPRO_CORE_ID));
+ }
+
+ /* re-enable clock gating and power gating */
+ return hda_dsp_ctrl_clock_power_gating(sdev, true);
+}
+
+/* Icelake ops */
+struct snd_sof_dsp_ops sof_icl_ops;
+EXPORT_SYMBOL_NS(sof_icl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_icl_ops_init(struct snd_sof_dev *sdev)
+{
+ /* common defaults */
+ memcpy(&sof_icl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* probe/remove/shutdown */
+ sof_icl_ops.shutdown = hda_dsp_shutdown;
+
+ if (sdev->pdata->ipc_type == SOF_IPC) {
+ /* doorbell */
+ sof_icl_ops.irq_thread = cnl_ipc_irq_thread;
+
+ /* ipc */
+ sof_icl_ops.send_msg = cnl_ipc_send_msg;
+
+ /* debug */
+ sof_icl_ops.ipc_dump = cnl_ipc_dump;
+ }
+
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_2;
+
+ /* doorbell */
+ sof_icl_ops.irq_thread = cnl_ipc4_irq_thread;
+
+ /* ipc */
+ sof_icl_ops.send_msg = cnl_ipc4_send_msg;
+
+ /* debug */
+ sof_icl_ops.ipc_dump = cnl_ipc4_dump;
+ }
+
+ /* debug */
+ sof_icl_ops.debug_map = icl_dsp_debugfs;
+ sof_icl_ops.debug_map_count = ARRAY_SIZE(icl_dsp_debugfs);
+
+ /* pre/post fw run */
+ sof_icl_ops.post_fw_run = icl_dsp_post_fw_run;
+
+ /* firmware run */
+ sof_icl_ops.run = hda_dsp_cl_boot_firmware_iccmax;
+ sof_icl_ops.stall = icl_dsp_core_stall;
+
+ /* dsp core get/put */
+ sof_icl_ops.core_get = hda_dsp_core_get;
+
+ /* set DAI driver ops */
+ hda_set_dai_drv_ops(sdev, &sof_icl_ops);
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_icl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc icl_chip_info = {
+ /* Icelake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(3, 0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_2_0,
+};
+EXPORT_SYMBOL_NS(icl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/mtl.c b/sound/soc/sof/intel/mtl.c
new file mode 100644
index 000000000..d7048f1d6
--- /dev/null
+++ b/sound/soc/sof/intel/mtl.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+// Authors: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Meteorlake.
+ */
+
+#include <linux/firmware.h>
+#include <sound/sof/ipc4/header.h>
+#include <trace/events/sof_intel.h>
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+#include "../sof-audio.h"
+#include "mtl.h"
+
+static const struct snd_sof_debugfs_map mtl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void mtl_ipc_host_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * clear busy interrupt to tell dsp controller this interrupt has been accepted,
+ * not trigger it again
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDR,
+ MTL_DSP_REG_HFIPCXTDR_BUSY, MTL_DSP_REG_HFIPCXTDR_BUSY);
+ /*
+ * clear busy bit to ack dsp the msg has been processed and send reply msg to dsp
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDA,
+ MTL_DSP_REG_HFIPCXTDA_BUSY, 0);
+}
+
+static void mtl_ipc_dsp_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * set DONE bit - tell DSP we have received the reply msg from DSP, and processed it,
+ * don't send more reply to host
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDA,
+ MTL_DSP_REG_HFIPCXIDA_DONE, MTL_DSP_REG_HFIPCXIDA_DONE);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXCTL,
+ MTL_DSP_REG_HFIPCXCTL_DONE, MTL_DSP_REG_HFIPCXCTL_DONE);
+}
+
+/* Check if an IPC IRQ occurred */
+static bool mtl_dsp_check_ipc_irq(struct snd_sof_dev *sdev)
+{
+ u32 irq_status;
+ u32 hfintipptr;
+
+ /* read Interrupt IP Pointer */
+ hfintipptr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFINTIPPTR) & MTL_HFINTIPPTR_PTR_MASK;
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, hfintipptr + MTL_DSP_IRQSTS);
+
+ trace_sof_intel_hda_irq_ipc_check(sdev, irq_status);
+
+ if (irq_status != U32_MAX && (irq_status & MTL_DSP_IRQSTS_IPC))
+ return true;
+
+ return false;
+}
+
+/* Check if an SDW IRQ occurred */
+static bool mtl_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ u32 irq_status;
+ u32 hfintipptr;
+
+ /* read Interrupt IP Pointer */
+ hfintipptr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFINTIPPTR) & MTL_HFINTIPPTR_PTR_MASK;
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, hfintipptr + MTL_DSP_IRQSTS);
+
+ if (irq_status != U32_MAX && (irq_status & MTL_DSP_IRQSTS_SDW))
+ return true;
+
+ return false;
+}
+
+static int mtl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct sof_ipc4_msg *msg_data = msg->msg_data;
+
+ /* send the message via mailbox */
+ if (msg_data->data_size)
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
+ msg_data->data_size);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDDY,
+ msg_data->extension);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDR,
+ msg_data->primary | MTL_DSP_REG_HFIPCXIDR_BUSY);
+
+ return 0;
+}
+
+static void mtl_enable_ipc_interrupts(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* enable IPC DONE and BUSY interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ MTL_DSP_REG_HFIPCXCTL_BUSY | MTL_DSP_REG_HFIPCXCTL_DONE,
+ MTL_DSP_REG_HFIPCXCTL_BUSY | MTL_DSP_REG_HFIPCXCTL_DONE);
+}
+
+static void mtl_disable_ipc_interrupts(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* disable IPC DONE and BUSY interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ MTL_DSP_REG_HFIPCXCTL_BUSY | MTL_DSP_REG_HFIPCXCTL_DONE, 0);
+}
+
+static int mtl_enable_interrupts(struct snd_sof_dev *sdev)
+{
+ u32 hfintipptr;
+ u32 irqinten;
+ u32 host_ipc;
+ u32 hipcie;
+ int ret;
+
+ /* read Interrupt IP Pointer */
+ hfintipptr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFINTIPPTR) & MTL_HFINTIPPTR_PTR_MASK;
+
+ /* Enable Host IPC and SOUNDWIRE */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, hfintipptr,
+ MTL_IRQ_INTEN_L_HOST_IPC_MASK | MTL_IRQ_INTEN_L_SOUNDWIRE_MASK,
+ MTL_IRQ_INTEN_L_HOST_IPC_MASK | MTL_IRQ_INTEN_L_SOUNDWIRE_MASK);
+
+ /* check if operation was successful */
+ host_ipc = MTL_IRQ_INTEN_L_HOST_IPC_MASK | MTL_IRQ_INTEN_L_SOUNDWIRE_MASK;
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, hfintipptr, irqinten,
+ (irqinten & host_ipc) == host_ipc,
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to enable Host IPC and/or SOUNDWIRE\n");
+ return ret;
+ }
+
+ /* Set Host IPC interrupt enable */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfHIPCIE,
+ MTL_DSP_REG_HfHIPCIE_IE_MASK, MTL_DSP_REG_HfHIPCIE_IE_MASK);
+
+ /* check if operation was successful */
+ host_ipc = MTL_DSP_REG_HfHIPCIE_IE_MASK;
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfHIPCIE, hipcie,
+ (hipcie & host_ipc) == host_ipc,
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to set Host IPC interrupt enable\n");
+ return ret;
+ }
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfSNDWIE,
+ MTL_DSP_REG_HfSNDWIE_IE_MASK, MTL_DSP_REG_HfSNDWIE_IE_MASK);
+ host_ipc = MTL_DSP_REG_HfSNDWIE_IE_MASK;
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfSNDWIE, hipcie,
+ (hipcie & host_ipc) == host_ipc,
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to set SoundWire IPC interrupt enable\n");
+
+ return ret;
+}
+
+static int mtl_disable_interrupts(struct snd_sof_dev *sdev)
+{
+ u32 hfintipptr;
+ u32 irqinten;
+ u32 host_ipc;
+ u32 hipcie;
+ int ret1;
+ int ret;
+
+ /* read Interrupt IP Pointer */
+ hfintipptr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFINTIPPTR) & MTL_HFINTIPPTR_PTR_MASK;
+
+ /* Disable Host IPC and SOUNDWIRE */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, hfintipptr,
+ MTL_IRQ_INTEN_L_HOST_IPC_MASK | MTL_IRQ_INTEN_L_SOUNDWIRE_MASK, 0);
+
+ /* check if operation was successful */
+ host_ipc = MTL_IRQ_INTEN_L_HOST_IPC_MASK | MTL_IRQ_INTEN_L_SOUNDWIRE_MASK;
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, hfintipptr, irqinten,
+ (irqinten & host_ipc) == 0,
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_RESET_TIMEOUT_US);
+ /* Continue to disable other interrupts when error happens */
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to disable Host IPC and SoundWire\n");
+
+ /* Set Host IPC interrupt disable */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfHIPCIE,
+ MTL_DSP_REG_HfHIPCIE_IE_MASK, 0);
+
+ /* check if operation was successful */
+ host_ipc = MTL_DSP_REG_HfHIPCIE_IE_MASK;
+ ret1 = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfHIPCIE, hipcie,
+ (hipcie & host_ipc) == 0,
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "failed to set Host IPC interrupt disable\n");
+ if (!ret)
+ ret = ret1;
+ }
+
+ /* Set SoundWire IPC interrupt disable */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfSNDWIE,
+ MTL_DSP_REG_HfSNDWIE_IE_MASK, 0);
+ host_ipc = MTL_DSP_REG_HfSNDWIE_IE_MASK;
+ ret1 = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfSNDWIE, hipcie,
+ (hipcie & host_ipc) == 0,
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "failed to set SoundWire IPC interrupt disable\n");
+ if (!ret)
+ ret = ret1;
+ }
+
+ return ret;
+}
+
+/* pre fw run operations */
+static int mtl_dsp_pre_fw_run(struct snd_sof_dev *sdev)
+{
+ u32 dsphfpwrsts;
+ u32 dsphfdsscs;
+ u32 cpa;
+ u32 pgs;
+ int ret;
+
+ /* Set the DSP subsystem power on */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_HFDSSCS,
+ MTL_HFDSSCS_SPA_MASK, MTL_HFDSSCS_SPA_MASK);
+
+ /* Wait for unstable CPA read (1 then 0 then 1) just after setting SPA bit */
+ usleep_range(1000, 1010);
+
+ /* poll with timeout to check if operation successful */
+ cpa = MTL_HFDSSCS_CPA_MASK;
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_HFDSSCS, dsphfdsscs,
+ (dsphfdsscs & cpa) == cpa, HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to enable DSP subsystem\n");
+ return ret;
+ }
+
+ /* Power up gated-DSP-0 domain in order to access the DSP shim register block. */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_HFPWRCTL,
+ MTL_HFPWRCTL_WPDSPHPXPG, MTL_HFPWRCTL_WPDSPHPXPG);
+
+ usleep_range(1000, 1010);
+
+ /* poll with timeout to check if operation successful */
+ pgs = MTL_HFPWRSTS_DSPHPXPGS_MASK;
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_HFPWRSTS, dsphfpwrsts,
+ (dsphfpwrsts & pgs) == pgs,
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to power up gated DSP domain\n");
+
+ /* make sure SoundWire is not power-gated */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, MTL_HFPWRCTL,
+ MTL_HfPWRCTL_WPIOXPG(1), MTL_HfPWRCTL_WPIOXPG(1));
+ return ret;
+}
+
+static int mtl_dsp_post_fw_run(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ if (sdev->first_boot) {
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ ret = hda_sdw_startup(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "could not startup SoundWire links\n");
+ return ret;
+ }
+
+ /* Check if IMR boot is usable */
+ if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT))
+ hdev->imrboot_supported = true;
+ }
+
+ hda_sdw_int_enable(sdev, true);
+ return 0;
+}
+
+static void mtl_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
+ u32 romdbgsts;
+ u32 romdbgerr;
+ u32 fwsts;
+ u32 fwlec;
+
+ fwsts = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_ROM_STS);
+ fwlec = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_ROM_ERROR);
+ romdbgsts = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFFLGPXQWY);
+ romdbgerr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFFLGPXQWY_ERROR);
+
+ dev_err(sdev->dev, "ROM status: %#x, ROM error: %#x\n", fwsts, fwlec);
+ dev_err(sdev->dev, "ROM debug status: %#x, ROM debug error: %#x\n", romdbgsts,
+ romdbgerr);
+ romdbgsts = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFFLGPXQWY + 0x8 * 3);
+ dev_printk(level, sdev->dev, "ROM feature bit%s enabled\n",
+ romdbgsts & BIT(24) ? "" : " not");
+}
+
+static bool mtl_dsp_primary_core_is_enabled(struct snd_sof_dev *sdev)
+{
+ int val;
+
+ val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE);
+ if (val != U32_MAX && val & MTL_DSP2CXCTL_PRIMARY_CORE_CPA_MASK)
+ return true;
+
+ return false;
+}
+
+static int mtl_dsp_core_power_up(struct snd_sof_dev *sdev, int core)
+{
+ unsigned int cpa;
+ u32 dspcxctl;
+ int ret;
+
+ /* Only the primary core can be powered up by the host */
+ if (core != SOF_DSP_PRIMARY_CORE || mtl_dsp_primary_core_is_enabled(sdev))
+ return 0;
+
+ /* Program the owner of the IP & shim registers (10: Host CPU) */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE,
+ MTL_DSP2CXCTL_PRIMARY_CORE_OSEL,
+ 0x2 << MTL_DSP2CXCTL_PRIMARY_CORE_OSEL_SHIFT);
+
+ /* enable SPA bit */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE,
+ MTL_DSP2CXCTL_PRIMARY_CORE_SPA_MASK,
+ MTL_DSP2CXCTL_PRIMARY_CORE_SPA_MASK);
+
+ /* Wait for unstable CPA read (1 then 0 then 1) just after setting SPA bit */
+ usleep_range(1000, 1010);
+
+ /* poll with timeout to check if operation successful */
+ cpa = MTL_DSP2CXCTL_PRIMARY_CORE_CPA_MASK;
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE, dspcxctl,
+ (dspcxctl & cpa) == cpa, HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "%s: timeout on MTL_DSP2CXCTL_PRIMARY_CORE read\n",
+ __func__);
+
+ return ret;
+}
+
+static int mtl_dsp_core_power_down(struct snd_sof_dev *sdev, int core)
+{
+ u32 dspcxctl;
+ int ret;
+
+ /* Only the primary core can be powered down by the host */
+ if (core != SOF_DSP_PRIMARY_CORE || !mtl_dsp_primary_core_is_enabled(sdev))
+ return 0;
+
+ /* disable SPA bit */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE,
+ MTL_DSP2CXCTL_PRIMARY_CORE_SPA_MASK, 0);
+
+ /* Wait for unstable CPA read (1 then 0 then 1) just after setting SPA bit */
+ usleep_range(1000, 1010);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE, dspcxctl,
+ !(dspcxctl & MTL_DSP2CXCTL_PRIMARY_CORE_CPA_MASK),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to power down primary core\n");
+
+ return ret;
+}
+
+static int mtl_power_down_dsp(struct snd_sof_dev *sdev)
+{
+ u32 dsphfdsscs, cpa;
+ int ret;
+
+ /* first power down core */
+ ret = mtl_dsp_core_power_down(sdev, SOF_DSP_PRIMARY_CORE);
+ if (ret) {
+ dev_err(sdev->dev, "mtl dsp power down error, %d\n", ret);
+ return ret;
+ }
+
+ /* Set the DSP subsystem power down */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_HFDSSCS,
+ MTL_HFDSSCS_SPA_MASK, 0);
+
+ /* Wait for unstable CPA read (1 then 0 then 1) just after setting SPA bit */
+ usleep_range(1000, 1010);
+
+ /* poll with timeout to check if operation successful */
+ cpa = MTL_HFDSSCS_CPA_MASK;
+ dsphfdsscs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFDSSCS);
+ return snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_HFDSSCS, dsphfdsscs,
+ (dsphfdsscs & cpa) == 0, HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+}
+
+static int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int status;
+ u32 ipc_hdr;
+ int ret;
+
+ /* step 1: purge FW request */
+ ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL;
+ if (!imr_boot)
+ ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr);
+
+ /* step 2: power up primary core */
+ ret = mtl_dsp_core_power_up(sdev, SOF_DSP_PRIMARY_CORE);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev, "dsp core 0/1 power up failed\n");
+ goto err;
+ }
+
+ dev_dbg(sdev->dev, "Primary core power up successful\n");
+
+ /* step 3: wait for IPC DONE bit from ROM */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, chip->ipc_ack, status,
+ ((status & chip->ipc_ack_mask) == chip->ipc_ack_mask),
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_INIT_TIMEOUT_US);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev, "timeout waiting for purge IPC done\n");
+ goto err;
+ }
+
+ /* set DONE bit to clear the reply IPC message */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, chip->ipc_ack, chip->ipc_ack_mask,
+ chip->ipc_ack_mask);
+
+ /* step 4: enable interrupts */
+ ret = mtl_enable_interrupts(sdev);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev, "%s: failed to enable interrupts\n", __func__);
+ goto err;
+ }
+
+ mtl_enable_ipc_interrupts(sdev);
+
+ /*
+ * ACE workaround: don't wait for ROM INIT.
+ * The platform cannot catch ROM_INIT_DONE because of a very short
+ * timing window. Follow the recommendations and skip this part.
+ */
+
+ return 0;
+
+err:
+ snd_sof_dsp_dbg_dump(sdev, "MTL DSP init fail", 0);
+ mtl_dsp_core_power_down(sdev, SOF_DSP_PRIMARY_CORE);
+ return ret;
+}
+
+static irqreturn_t mtl_ipc_irq_thread(int irq, void *context)
+{
+ struct sof_ipc4_msg notification_data = {{ 0 }};
+ struct snd_sof_dev *sdev = context;
+ bool ipc_irq = false;
+ u32 hipcida;
+ u32 hipctdr;
+
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDA);
+
+ /* reply message from DSP */
+ if (hipcida & MTL_DSP_REG_HFIPCXIDA_DONE) {
+ /* DSP received the message */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXCTL,
+ MTL_DSP_REG_HFIPCXCTL_DONE, 0);
+
+ mtl_ipc_dsp_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDR);
+ if (hipctdr & MTL_DSP_REG_HFIPCXTDR_BUSY) {
+ /* Message from DSP (reply or notification) */
+ u32 extension = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDDY);
+ u32 primary = hipctdr & MTL_DSP_REG_HFIPCXTDR_MSG_MASK;
+
+ /*
+ * ACE fw sends a new fw ipc message to host to
+ * notify the status of the last host ipc message
+ */
+ if (primary & SOF_IPC4_MSG_DIR_MASK) {
+ /* Reply received */
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
+
+ data->primary = primary;
+ data->extension = extension;
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ snd_sof_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, data->primary);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev,
+ "IPC reply before FW_READY: %#x|%#x\n",
+ primary, extension);
+ }
+ } else {
+ /* Notification received */
+ notification_data.primary = primary;
+ notification_data.extension = extension;
+
+ sdev->ipc->msg.rx_data = &notification_data;
+ snd_sof_ipc_msgs_rx(sdev);
+ sdev->ipc->msg.rx_data = NULL;
+ }
+
+ mtl_ipc_host_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq) {
+ /* This interrupt is not shared so no need to return IRQ_NONE. */
+ dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mtl_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MTL_DSP_MBOX_UPLINK_OFFSET;
+}
+
+static int mtl_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MTL_SRAM_WINDOW_OFFSET(id);
+}
+
+static void mtl_ipc_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcidr, hipcidd, hipcida, hipctdr, hipctdd, hipctda, hipcctl;
+
+ hipcidr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDR);
+ hipcidd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDDY);
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDA);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDR);
+ hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDDY);
+ hipctda = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDA);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXCTL);
+
+ dev_err(sdev->dev,
+ "Host IPC initiator: %#x|%#x|%#x, target: %#x|%#x|%#x, ctl: %#x\n",
+ hipcidr, hipcidd, hipcida, hipctdr, hipctdd, hipctda, hipcctl);
+}
+
+static int mtl_dsp_disable_interrupts(struct snd_sof_dev *sdev)
+{
+ mtl_disable_ipc_interrupts(sdev);
+ return mtl_disable_interrupts(sdev);
+}
+
+/* Meteorlake ops */
+struct snd_sof_dsp_ops sof_mtl_ops;
+EXPORT_SYMBOL_NS(sof_mtl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_mtl_ops_init(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ /* common defaults */
+ memcpy(&sof_mtl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* shutdown */
+ sof_mtl_ops.shutdown = hda_dsp_shutdown;
+
+ /* doorbell */
+ sof_mtl_ops.irq_thread = mtl_ipc_irq_thread;
+
+ /* ipc */
+ sof_mtl_ops.send_msg = mtl_ipc_send_msg;
+ sof_mtl_ops.get_mailbox_offset = mtl_dsp_ipc_get_mailbox_offset;
+ sof_mtl_ops.get_window_offset = mtl_dsp_ipc_get_window_offset;
+
+ /* debug */
+ sof_mtl_ops.debug_map = mtl_dsp_debugfs;
+ sof_mtl_ops.debug_map_count = ARRAY_SIZE(mtl_dsp_debugfs);
+ sof_mtl_ops.dbg_dump = mtl_dsp_dump;
+ sof_mtl_ops.ipc_dump = mtl_ipc_dump;
+
+ /* pre/post fw run */
+ sof_mtl_ops.pre_fw_run = mtl_dsp_pre_fw_run;
+ sof_mtl_ops.post_fw_run = mtl_dsp_post_fw_run;
+
+ /* parse platform specific extended manifest */
+ sof_mtl_ops.parse_platform_ext_manifest = NULL;
+
+ /* dsp core get/put */
+ /* TODO: add core_get and core_put */
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(struct sof_ipc4_fw_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_2;
+
+ /* set DAI ops */
+ hda_set_dai_drv_ops(sdev, &sof_mtl_ops);
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_mtl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc mtl_chip_info = {
+ .cores_num = 3,
+ .init_core_mask = BIT(0),
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = MTL_DSP_REG_HFIPCXIDR,
+ .ipc_req_mask = MTL_DSP_REG_HFIPCXIDR_BUSY,
+ .ipc_ack = MTL_DSP_REG_HFIPCXIDA,
+ .ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
+ .ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
+ .rom_status_reg = MTL_DSP_ROM_STS,
+ .rom_init_timeout = 300,
+ .ssp_count = MTL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE_ACE,
+ .sdw_alh_base = SDW_ALH_BASE_ACE,
+ .check_sdw_irq = mtl_dsp_check_sdw_irq,
+ .check_ipc_irq = mtl_dsp_check_ipc_irq,
+ .cl_init = mtl_dsp_cl_init,
+ .power_down_dsp = mtl_power_down_dsp,
+ .disable_interrupts = mtl_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_ACE_1_0,
+};
+EXPORT_SYMBOL_NS(mtl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/mtl.h b/sound/soc/sof/intel/mtl.h
new file mode 100644
index 000000000..00e352688
--- /dev/null
+++ b/sound/soc/sof/intel/mtl.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2020-2022 Intel Corporation. All rights reserved.
+ */
+
+/* DSP Registers */
+#define MTL_HFDSSCS 0x1000
+#define MTL_HFDSSCS_SPA_MASK BIT(16)
+#define MTL_HFDSSCS_CPA_MASK BIT(24)
+#define MTL_HFSNDWIE 0x114C
+#define MTL_HFPWRCTL 0x1D18
+#define MTL_HfPWRCTL_WPIOXPG(x) BIT((x) + 8)
+#define MTL_HFPWRCTL_WPDSPHPXPG BIT(0)
+#define MTL_HFPWRSTS 0x1D1C
+#define MTL_HFPWRSTS_DSPHPXPGS_MASK BIT(0)
+#define MTL_HFINTIPPTR 0x1108
+#define MTL_IRQ_INTEN_L_HOST_IPC_MASK BIT(0)
+#define MTL_IRQ_INTEN_L_SOUNDWIRE_MASK BIT(6)
+#define MTL_HFINTIPPTR_PTR_MASK GENMASK(20, 0)
+
+#define MTL_DSP2CXCAP_PRIMARY_CORE 0x178D00
+#define MTL_DSP2CXCTL_PRIMARY_CORE 0x178D04
+#define MTL_DSP2CXCTL_PRIMARY_CORE_SPA_MASK BIT(0)
+#define MTL_DSP2CXCTL_PRIMARY_CORE_CPA_MASK BIT(8)
+#define MTL_DSP2CXCTL_PRIMARY_CORE_OSEL GENMASK(25, 24)
+#define MTL_DSP2CXCTL_PRIMARY_CORE_OSEL_SHIFT 24
+
+/* IPC Registers */
+#define MTL_DSP_REG_HFIPCXTDR 0x73200
+#define MTL_DSP_REG_HFIPCXTDR_BUSY BIT(31)
+#define MTL_DSP_REG_HFIPCXTDR_MSG_MASK GENMASK(30, 0)
+#define MTL_DSP_REG_HFIPCXTDA 0x73204
+#define MTL_DSP_REG_HFIPCXTDA_BUSY BIT(31)
+#define MTL_DSP_REG_HFIPCXIDR 0x73210
+#define MTL_DSP_REG_HFIPCXIDR_BUSY BIT(31)
+#define MTL_DSP_REG_HFIPCXIDR_MSG_MASK GENMASK(30, 0)
+#define MTL_DSP_REG_HFIPCXIDA 0x73214
+#define MTL_DSP_REG_HFIPCXIDA_DONE BIT(31)
+#define MTL_DSP_REG_HFIPCXIDA_MSG_MASK GENMASK(30, 0)
+#define MTL_DSP_REG_HFIPCXCTL 0x73228
+#define MTL_DSP_REG_HFIPCXCTL_BUSY BIT(0)
+#define MTL_DSP_REG_HFIPCXCTL_DONE BIT(1)
+#define MTL_DSP_REG_HFIPCXTDDY 0x73300
+#define MTL_DSP_REG_HFIPCXIDDY 0x73380
+#define MTL_DSP_REG_HfHIPCIE 0x1140
+#define MTL_DSP_REG_HfHIPCIE_IE_MASK BIT(0)
+#define MTL_DSP_REG_HfSNDWIE 0x114C
+#define MTL_DSP_REG_HfSNDWIE_IE_MASK GENMASK(3, 0)
+
+#define MTL_DSP_IRQSTS 0x20
+#define MTL_DSP_IRQSTS_IPC BIT(0)
+#define MTL_DSP_IRQSTS_SDW BIT(6)
+
+#define MTL_DSP_REG_POLL_INTERVAL_US 10 /* 10 us */
+
+/* Memory windows */
+#define MTL_SRAM_WINDOW_OFFSET(x) (0x180000 + 0x8000 * (x))
+
+#define MTL_DSP_MBOX_UPLINK_OFFSET (MTL_SRAM_WINDOW_OFFSET(0) + 0x1000)
+#define MTL_DSP_MBOX_UPLINK_SIZE 0x1000
+#define MTL_DSP_MBOX_DOWNLINK_OFFSET MTL_SRAM_WINDOW_OFFSET(1)
+#define MTL_DSP_MBOX_DOWNLINK_SIZE 0x1000
+
+/* FW registers */
+#define MTL_DSP_ROM_STS MTL_SRAM_WINDOW_OFFSET(0) /* ROM status */
+#define MTL_DSP_ROM_ERROR (MTL_SRAM_WINDOW_OFFSET(0) + 0x4) /* ROM error code */
+
+#define MTL_DSP_REG_HFFLGPXQWY 0x163200 /* ROM debug status */
+#define MTL_DSP_REG_HFFLGPXQWY_ERROR 0x163204 /* ROM debug error code */
+#define MTL_DSP_REG_HfIMRIS1 0x162088
+#define MTL_DSP_REG_HfIMRIS1_IU_MASK BIT(0)
+
diff --git a/sound/soc/sof/intel/pci-apl.c b/sound/soc/sof/intel/pci-apl.c
new file mode 100644
index 000000000..ad8431b13
--- /dev/null
+++ b/sound/soc/sof/intel/pci-apl.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2021 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+
+static const struct sof_dev_desc bxt_desc = {
+ .machines = snd_soc_acpi_intel_bxt_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &apl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/apl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-apl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-apl-nocodec.tplg",
+ .ops = &sof_apl_ops,
+ .ops_init = sof_apl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc glk_desc = {
+ .machines = snd_soc_acpi_intel_glk_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &apl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/glk",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-glk.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-glk-nocodec.tplg",
+ .ops = &sof_apl_ops,
+ .ops_init = sof_apl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE(0x8086, 0x5a98), /* BXT-P (ApolloLake) */
+ .driver_data = (unsigned long)&bxt_desc},
+ { PCI_DEVICE(0x8086, 0x1a98),/* BXT-T */
+ .driver_data = (unsigned long)&bxt_desc},
+ { PCI_DEVICE(0x8086, 0x3198), /* GeminiLake */
+ .driver_data = (unsigned long)&glk_desc},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_apl_driver = {
+ .name = "sof-audio-pci-intel-apl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_apl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-cnl.c b/sound/soc/sof/intel/pci-cnl.c
new file mode 100644
index 000000000..33677ce8d
--- /dev/null
+++ b/sound/soc/sof/intel/pci-cnl.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+
+static const struct sof_dev_desc cnl_desc = {
+ .machines = snd_soc_acpi_intel_cnl_machines,
+ .alt_machines = snd_soc_acpi_intel_cnl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &cnl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/cnl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-cnl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .ops_init = sof_cnl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc cfl_desc = {
+ .machines = snd_soc_acpi_intel_cfl_machines,
+ .alt_machines = snd_soc_acpi_intel_cfl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &cnl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/cnl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-cfl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .ops_init = sof_cnl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc cml_desc = {
+ .machines = snd_soc_acpi_intel_cml_machines,
+ .alt_machines = snd_soc_acpi_intel_cml_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &cnl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/cnl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-cml.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .ops_init = sof_cnl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE(0x8086, 0x9dc8), /* CNL-LP */
+ .driver_data = (unsigned long)&cnl_desc},
+ { PCI_DEVICE(0x8086, 0xa348), /* CNL-H */
+ .driver_data = (unsigned long)&cfl_desc},
+ { PCI_DEVICE(0x8086, 0x02c8), /* CML-LP */
+ .driver_data = (unsigned long)&cml_desc},
+ { PCI_DEVICE(0x8086, 0x06c8), /* CML-H */
+ .driver_data = (unsigned long)&cml_desc},
+ { PCI_DEVICE(0x8086, 0xa3f0), /* CML-S */
+ .driver_data = (unsigned long)&cml_desc},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_cnl_driver = {
+ .name = "sof-audio-pci-intel-cnl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_cnl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-icl.c b/sound/soc/sof/intel/pci-icl.c
new file mode 100644
index 000000000..9a42a4ea1
--- /dev/null
+++ b/sound/soc/sof/intel/pci-icl.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2021 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+
+static const struct sof_dev_desc icl_desc = {
+ .machines = snd_soc_acpi_intel_icl_machines,
+ .alt_machines = snd_soc_acpi_intel_icl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &icl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/icl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-icl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-icl-nocodec.tplg",
+ .ops = &sof_icl_ops,
+ .ops_init = sof_icl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc jsl_desc = {
+ .machines = snd_soc_acpi_intel_jsl_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &jsl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/jsl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-jsl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .ops_init = sof_cnl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE(0x8086, 0x34C8), /* ICL-LP */
+ .driver_data = (unsigned long)&icl_desc},
+ { PCI_DEVICE(0x8086, 0x3dc8), /* ICL-H */
+ .driver_data = (unsigned long)&icl_desc},
+ { PCI_DEVICE(0x8086, 0x38c8), /* ICL-N */
+ .driver_data = (unsigned long)&jsl_desc},
+ { PCI_DEVICE(0x8086, 0x4dc8), /* JSL-N */
+ .driver_data = (unsigned long)&jsl_desc},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_icl_driver = {
+ .name = "sof-audio-pci-intel-icl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_icl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-mtl.c b/sound/soc/sof/intel/pci-mtl.c
new file mode 100644
index 000000000..4dae25653
--- /dev/null
+++ b/sound/soc/sof/intel/pci-mtl.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
+//
+// Author: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+#include "mtl.h"
+
+static const struct sof_dev_desc mtl_desc = {
+ .use_acpi_target_states = true,
+ .machines = snd_soc_acpi_intel_mtl_machines,
+ .alt_machines = snd_soc_acpi_intel_mtl_sdw_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &mtl_chip_info,
+ .ipc_supported_mask = BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_INTEL_IPC4,
+ .default_fw_path = {
+ [SOF_INTEL_IPC4] = "intel/sof-ipc4/mtl",
+ },
+ .default_tplg_path = {
+ [SOF_INTEL_IPC4] = "intel/sof-ace-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_INTEL_IPC4] = "sof-mtl.ri",
+ },
+ .nocodec_tplg_filename = "sof-mtl-nocodec.tplg",
+ .ops = &sof_mtl_ops,
+ .ops_init = sof_mtl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE(0x8086, 0x7E28), /* MTL */
+ .driver_data = (unsigned long)&mtl_desc},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_mtl_driver = {
+ .name = "sof-audio-pci-intel-mtl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_mtl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-skl.c b/sound/soc/sof/intel/pci-skl.c
new file mode 100644
index 000000000..5b4bccf81
--- /dev/null
+++ b/sound/soc/sof/intel/pci-skl.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+
+static struct sof_dev_desc skl_desc = {
+ .machines = snd_soc_acpi_intel_skl_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .chip_info = &skl_chip_info,
+ .irqindex_host_ipc = -1,
+ .ipc_supported_mask = BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_INTEL_IPC4,
+ .default_fw_path = {
+ [SOF_INTEL_IPC4] = "intel/avs/skl",
+ },
+ .default_tplg_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-skl-nocodec.tplg",
+ .ops = &sof_skl_ops,
+ .ops_init = sof_skl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static struct sof_dev_desc kbl_desc = {
+ .machines = snd_soc_acpi_intel_kbl_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .chip_info = &skl_chip_info,
+ .irqindex_host_ipc = -1,
+ .ipc_supported_mask = BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_INTEL_IPC4,
+ .default_fw_path = {
+ [SOF_INTEL_IPC4] = "intel/avs/kbl",
+ },
+ .default_tplg_path = {
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-kbl-nocodec.tplg",
+ .ops = &sof_skl_ops,
+ .ops_init = sof_skl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ /* Sunrise Point-LP */
+ { PCI_DEVICE(0x8086, 0x9d70), .driver_data = (unsigned long)&skl_desc},
+ /* KBL */
+ { PCI_DEVICE(0x8086, 0x9d71), .driver_data = (unsigned long)&kbl_desc},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_skl_driver = {
+ .name = "sof-audio-pci-intel-skl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_skl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
new file mode 100644
index 000000000..ccaf0ff9e
--- /dev/null
+++ b/sound/soc/sof/intel/pci-tgl.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2021 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+
+static const struct sof_dev_desc tgl_desc = {
+ .machines = snd_soc_acpi_intel_tgl_machines,
+ .alt_machines = snd_soc_acpi_intel_tgl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &tgl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/tgl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-tgl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc tglh_desc = {
+ .machines = snd_soc_acpi_intel_tgl_machines,
+ .alt_machines = snd_soc_acpi_intel_tgl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &tglh_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/tgl-h",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-tgl-h.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc ehl_desc = {
+ .machines = snd_soc_acpi_intel_ehl_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &ehl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/ehl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-ehl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc adls_desc = {
+ .machines = snd_soc_acpi_intel_adl_machines,
+ .alt_machines = snd_soc_acpi_intel_adl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &adls_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/adl-s",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-adl-s.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc adl_desc = {
+ .machines = snd_soc_acpi_intel_adl_machines,
+ .alt_machines = snd_soc_acpi_intel_adl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &tgl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/adl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-adl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc adl_n_desc = {
+ .machines = snd_soc_acpi_intel_adl_machines,
+ .alt_machines = snd_soc_acpi_intel_adl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &tgl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/adl-n",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-adl-n.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc rpls_desc = {
+ .machines = snd_soc_acpi_intel_rpl_machines,
+ .alt_machines = snd_soc_acpi_intel_rpl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &adls_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/rpl-s",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-rpl-s.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+static const struct sof_dev_desc rpl_desc = {
+ .machines = snd_soc_acpi_intel_rpl_machines,
+ .alt_machines = snd_soc_acpi_intel_rpl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &tgl_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ [SOF_INTEL_IPC4] = "intel/avs/rpl",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ [SOF_INTEL_IPC4] = "intel/avs-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-rpl.ri",
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+ .ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE(0x8086, 0xa0c8), /* TGL-LP */
+ .driver_data = (unsigned long)&tgl_desc},
+ { PCI_DEVICE(0x8086, 0x43c8), /* TGL-H */
+ .driver_data = (unsigned long)&tglh_desc},
+ { PCI_DEVICE(0x8086, 0x4b55), /* EHL */
+ .driver_data = (unsigned long)&ehl_desc},
+ { PCI_DEVICE(0x8086, 0x4b58), /* EHL */
+ .driver_data = (unsigned long)&ehl_desc},
+ { PCI_DEVICE(0x8086, 0x7ad0), /* ADL-S */
+ .driver_data = (unsigned long)&adls_desc},
+ { PCI_DEVICE(0x8086, 0x7a50), /* RPL-S */
+ .driver_data = (unsigned long)&rpls_desc},
+ { PCI_DEVICE(0x8086, 0x51c8), /* ADL-P */
+ .driver_data = (unsigned long)&adl_desc},
+ { PCI_DEVICE(0x8086, 0x51c9), /* ADL-PS */
+ .driver_data = (unsigned long)&adl_desc},
+ { PCI_DEVICE(0x8086, 0x51ca), /* RPL-P */
+ .driver_data = (unsigned long)&rpl_desc},
+ { PCI_DEVICE(0x8086, 0x51cb), /* RPL-P */
+ .driver_data = (unsigned long)&rpl_desc},
+ { PCI_DEVICE(0x8086, 0x51cc), /* ADL-M */
+ .driver_data = (unsigned long)&adl_desc},
+ { PCI_DEVICE(0x8086, 0x51cd), /* ADL-P */
+ .driver_data = (unsigned long)&adl_desc},
+ { PCI_DEVICE(0x8086, 0x51ce), /* RPL-M */
+ .driver_data = (unsigned long)&rpl_desc},
+ { PCI_DEVICE(0x8086, 0x51cf), /* RPL-PX */
+ .driver_data = (unsigned long)&rpl_desc},
+ { PCI_DEVICE(0x8086, 0x54c8), /* ADL-N */
+ .driver_data = (unsigned long)&adl_n_desc},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_tgl_driver = {
+ .name = "sof-audio-pci-intel-tgl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_tgl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-tng.c b/sound/soc/sof/intel/pci-tng.c
new file mode 100644
index 000000000..0b17d1bb2
--- /dev/null
+++ b/sound/soc/sof/intel/pci-tng.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2021 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "atom.h"
+#include "../sof-pci-dev.h"
+#include "../sof-audio.h"
+
+/* platform specific devices */
+#include "shim.h"
+
+static struct snd_soc_acpi_mach sof_tng_machines[] = {
+ {
+ .id = "INT343A",
+ .drv_name = "edison",
+ .sof_tplg_filename = "sof-byt.tplg",
+ },
+ {}
+};
+
+static const struct snd_sof_debugfs_map tng_debugfs[] = {
+ {"dmac0", DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", DSP_BAR, SHIM_OFFSET, SHIM_SIZE_BYT,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static int tangier_pci_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_intel_dsp_desc *chip;
+ u32 base, size;
+ int ret;
+
+ chip = get_chip_info(sdev->pdata);
+ if (!chip) {
+ dev_err(sdev->dev, "error: no such device supported\n");
+ return -EIO;
+ }
+
+ sdev->num_cores = chip->cores_num;
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(&pci->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* LPE base */
+ base = pci_resource_start(pci, desc->resindex_lpe_base) - IRAM_OFFSET;
+ size = PCI_BAR_SIZE;
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[DSP_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[DSP_BAR]);
+
+ /* IMR base - optional */
+ if (desc->resindex_imr_base == -1)
+ goto irq;
+
+ base = pci_resource_start(pci, desc->resindex_imr_base);
+ size = pci_resource_len(pci, desc->resindex_imr_base);
+
+ /* some BIOSes don't map IMR */
+ if (base == 0x55aa55aa || base == 0x0) {
+ dev_info(sdev->dev, "IMR not set by BIOS. Ignoring\n");
+ goto irq;
+ }
+
+ dev_dbg(sdev->dev, "IMR base at 0x%x size 0x%x", base, size);
+ sdev->bar[IMR_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[IMR_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap IMR base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "IMR VADDR %p\n", sdev->bar[IMR_BAR]);
+
+irq:
+ /* register our IRQ */
+ sdev->ipc_irq = pci->irq;
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ atom_irq_handler, atom_irq_thread,
+ 0, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return ret;
+}
+
+struct snd_sof_dsp_ops sof_tng_ops = {
+ /* device init */
+ .probe = tangier_pci_probe,
+
+ /* DSP core boot / reset */
+ .run = atom_run,
+ .reset = atom_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Mailbox IO */
+ .mailbox_read = sof_mailbox_read,
+ .mailbox_write = sof_mailbox_write,
+
+ /* doorbell */
+ .irq_handler = atom_irq_handler,
+ .irq_thread = atom_irq_thread,
+
+ /* ipc */
+ .send_msg = atom_send_msg,
+ .get_mailbox_offset = atom_get_mailbox_offset,
+ .get_window_offset = atom_get_window_offset,
+
+ .ipc_msg_data = sof_ipc_msg_data,
+ .set_stream_data_offset = sof_set_stream_data_offset,
+
+ /* machine driver */
+ .machine_select = atom_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = atom_set_mach_params,
+
+ /* debug */
+ .debug_map = tng_debugfs,
+ .debug_map_count = ARRAY_SIZE(tng_debugfs),
+ .dbg_dump = atom_dump,
+ .debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
+
+ /* stream callbacks */
+ .pcm_open = sof_stream_pcm_open,
+ .pcm_close = sof_stream_pcm_close,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = atom_dai,
+ .num_drv = 3, /* we have only 3 SSPs on byt*/
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .dsp_arch_ops = &sof_xtensa_arch_ops,
+};
+
+const struct sof_intel_dsp_desc tng_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+ .hw_ip_version = SOF_INTEL_TANGIER,
+};
+
+static const struct sof_dev_desc tng_desc = {
+ .machines = sof_tng_machines,
+ .resindex_lpe_base = 3, /* IRAM, but subtract IRAM offset */
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = 0,
+ .irqindex_host_ipc = -1,
+ .chip_info = &tng_chip_info,
+ .ipc_supported_mask = BIT(SOF_IPC),
+ .ipc_default = SOF_IPC,
+ .default_fw_path = {
+ [SOF_IPC] = "intel/sof",
+ },
+ .default_tplg_path = {
+ [SOF_IPC] = "intel/sof-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_IPC] = "sof-byt.ri",
+ },
+ .nocodec_tplg_filename = "sof-byt.tplg",
+ .ops = &sof_tng_ops,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE(0x8086, 0x119a),
+ .driver_data = (unsigned long)&tng_desc},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_tng_driver = {
+ .name = "sof-audio-pci-intel-tng",
+ .id_table = sof_pci_ids,
+ .probe = sof_pci_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_tng_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
diff --git a/sound/soc/sof/intel/shim.h b/sound/soc/sof/intel/shim.h
new file mode 100644
index 000000000..3ceba5c39
--- /dev/null
+++ b/sound/soc/sof/intel/shim.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_SHIM_H
+#define __SOF_INTEL_SHIM_H
+
+enum sof_intel_hw_ip_version {
+ SOF_INTEL_TANGIER,
+ SOF_INTEL_BAYTRAIL,
+ SOF_INTEL_BROADWELL,
+ SOF_INTEL_CAVS_1_5, /* SkyLake, KabyLake, AmberLake */
+ SOF_INTEL_CAVS_1_5_PLUS,/* ApolloLake, GeminiLake */
+ SOF_INTEL_CAVS_1_8, /* CannonLake, CometLake, CoffeeLake */
+ SOF_INTEL_CAVS_2_0, /* IceLake, JasperLake */
+ SOF_INTEL_CAVS_2_5, /* TigerLake, AlderLake */
+ SOF_INTEL_ACE_1_0, /* MeteorLake */
+};
+
+/*
+ * SHIM registers for BYT, BSW, CHT, BDW
+ */
+
+#define SHIM_CSR (SHIM_OFFSET + 0x00)
+#define SHIM_PISR (SHIM_OFFSET + 0x08)
+#define SHIM_PIMR (SHIM_OFFSET + 0x10)
+#define SHIM_ISRX (SHIM_OFFSET + 0x18)
+#define SHIM_ISRD (SHIM_OFFSET + 0x20)
+#define SHIM_IMRX (SHIM_OFFSET + 0x28)
+#define SHIM_IMRD (SHIM_OFFSET + 0x30)
+#define SHIM_IPCX (SHIM_OFFSET + 0x38)
+#define SHIM_IPCD (SHIM_OFFSET + 0x40)
+#define SHIM_ISRSC (SHIM_OFFSET + 0x48)
+#define SHIM_ISRLPESC (SHIM_OFFSET + 0x50)
+#define SHIM_IMRSC (SHIM_OFFSET + 0x58)
+#define SHIM_IMRLPESC (SHIM_OFFSET + 0x60)
+#define SHIM_IPCSC (SHIM_OFFSET + 0x68)
+#define SHIM_IPCLPESC (SHIM_OFFSET + 0x70)
+#define SHIM_CLKCTL (SHIM_OFFSET + 0x78)
+#define SHIM_CSR2 (SHIM_OFFSET + 0x80)
+#define SHIM_LTRC (SHIM_OFFSET + 0xE0)
+#define SHIM_HMDC (SHIM_OFFSET + 0xE8)
+
+#define SHIM_PWMCTRL 0x1000
+
+/*
+ * SST SHIM register bits for BYT, BSW, CHT, BDW
+ * Register bit naming and functionaility can differ between devices.
+ */
+
+/* CSR / CS */
+#define SHIM_CSR_RST BIT(1)
+#define SHIM_CSR_SBCS0 BIT(2)
+#define SHIM_CSR_SBCS1 BIT(3)
+#define SHIM_CSR_DCS(x) ((x) << 4)
+#define SHIM_CSR_DCS_MASK (0x7 << 4)
+#define SHIM_CSR_STALL BIT(10)
+#define SHIM_CSR_S0IOCS BIT(21)
+#define SHIM_CSR_S1IOCS BIT(23)
+#define SHIM_CSR_LPCS BIT(31)
+#define SHIM_CSR_24MHZ_LPCS \
+ (SHIM_CSR_SBCS0 | SHIM_CSR_SBCS1 | SHIM_CSR_LPCS)
+#define SHIM_CSR_24MHZ_NO_LPCS (SHIM_CSR_SBCS0 | SHIM_CSR_SBCS1)
+#define SHIM_BYT_CSR_RST BIT(0)
+#define SHIM_BYT_CSR_VECTOR_SEL BIT(1)
+#define SHIM_BYT_CSR_STALL BIT(2)
+#define SHIM_BYT_CSR_PWAITMODE BIT(3)
+
+/* ISRX / ISC */
+#define SHIM_ISRX_BUSY BIT(1)
+#define SHIM_ISRX_DONE BIT(0)
+#define SHIM_BYT_ISRX_REQUEST BIT(1)
+
+/* ISRD / ISD */
+#define SHIM_ISRD_BUSY BIT(1)
+#define SHIM_ISRD_DONE BIT(0)
+
+/* IMRX / IMC */
+#define SHIM_IMRX_BUSY BIT(1)
+#define SHIM_IMRX_DONE BIT(0)
+#define SHIM_BYT_IMRX_REQUEST BIT(1)
+
+/* IMRD / IMD */
+#define SHIM_IMRD_DONE BIT(0)
+#define SHIM_IMRD_BUSY BIT(1)
+#define SHIM_IMRD_SSP0 BIT(16)
+#define SHIM_IMRD_DMAC0 BIT(21)
+#define SHIM_IMRD_DMAC1 BIT(22)
+#define SHIM_IMRD_DMAC (SHIM_IMRD_DMAC0 | SHIM_IMRD_DMAC1)
+
+/* IPCX / IPCC */
+#define SHIM_IPCX_DONE BIT(30)
+#define SHIM_IPCX_BUSY BIT(31)
+#define SHIM_BYT_IPCX_DONE BIT_ULL(62)
+#define SHIM_BYT_IPCX_BUSY BIT_ULL(63)
+
+/* IPCD */
+#define SHIM_IPCD_DONE BIT(30)
+#define SHIM_IPCD_BUSY BIT(31)
+#define SHIM_BYT_IPCD_DONE BIT_ULL(62)
+#define SHIM_BYT_IPCD_BUSY BIT_ULL(63)
+
+/* CLKCTL */
+#define SHIM_CLKCTL_SMOS(x) ((x) << 24)
+#define SHIM_CLKCTL_MASK (3 << 24)
+#define SHIM_CLKCTL_DCPLCG BIT(18)
+#define SHIM_CLKCTL_SCOE1 BIT(17)
+#define SHIM_CLKCTL_SCOE0 BIT(16)
+
+/* CSR2 / CS2 */
+#define SHIM_CSR2_SDFD_SSP0 BIT(1)
+#define SHIM_CSR2_SDFD_SSP1 BIT(2)
+
+/* LTRC */
+#define SHIM_LTRC_VAL(x) ((x) << 0)
+
+/* HMDC */
+#define SHIM_HMDC_HDDA0(x) ((x) << 0)
+#define SHIM_HMDC_HDDA1(x) ((x) << 7)
+#define SHIM_HMDC_HDDA_E0_CH0 1
+#define SHIM_HMDC_HDDA_E0_CH1 2
+#define SHIM_HMDC_HDDA_E0_CH2 4
+#define SHIM_HMDC_HDDA_E0_CH3 8
+#define SHIM_HMDC_HDDA_E1_CH0 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH0)
+#define SHIM_HMDC_HDDA_E1_CH1 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH1)
+#define SHIM_HMDC_HDDA_E1_CH2 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH2)
+#define SHIM_HMDC_HDDA_E1_CH3 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH3)
+#define SHIM_HMDC_HDDA_E0_ALLCH \
+ (SHIM_HMDC_HDDA_E0_CH0 | SHIM_HMDC_HDDA_E0_CH1 | \
+ SHIM_HMDC_HDDA_E0_CH2 | SHIM_HMDC_HDDA_E0_CH3)
+#define SHIM_HMDC_HDDA_E1_ALLCH \
+ (SHIM_HMDC_HDDA_E1_CH0 | SHIM_HMDC_HDDA_E1_CH1 | \
+ SHIM_HMDC_HDDA_E1_CH2 | SHIM_HMDC_HDDA_E1_CH3)
+
+/* Audio DSP PCI registers */
+#define PCI_VDRTCTL0 0xa0
+#define PCI_VDRTCTL1 0xa4
+#define PCI_VDRTCTL2 0xa8
+#define PCI_VDRTCTL3 0xaC
+
+/* VDRTCTL0 */
+#define PCI_VDRTCL0_D3PGD BIT(0)
+#define PCI_VDRTCL0_D3SRAMPGD BIT(1)
+#define PCI_VDRTCL0_DSRAMPGE_SHIFT 12
+#define PCI_VDRTCL0_DSRAMPGE_MASK GENMASK(PCI_VDRTCL0_DSRAMPGE_SHIFT + 19,\
+ PCI_VDRTCL0_DSRAMPGE_SHIFT)
+#define PCI_VDRTCL0_ISRAMPGE_SHIFT 2
+#define PCI_VDRTCL0_ISRAMPGE_MASK GENMASK(PCI_VDRTCL0_ISRAMPGE_SHIFT + 9,\
+ PCI_VDRTCL0_ISRAMPGE_SHIFT)
+
+/* VDRTCTL2 */
+#define PCI_VDRTCL2_DCLCGE BIT(1)
+#define PCI_VDRTCL2_DTCGE BIT(10)
+#define PCI_VDRTCL2_APLLSE_MASK BIT(31)
+
+/* PMCS */
+#define PCI_PMCS 0x84
+#define PCI_PMCS_PS_MASK 0x3
+
+/* Intel quirks */
+#define SOF_INTEL_PROCEN_FMT_QUIRK BIT(0)
+
+/* DSP hardware descriptor */
+struct sof_intel_dsp_desc {
+ int cores_num;
+ int host_managed_cores_mask;
+ int init_core_mask; /* cores available after fw boot */
+ int ipc_req;
+ int ipc_req_mask;
+ int ipc_ack;
+ int ipc_ack_mask;
+ int ipc_ctl;
+ int rom_status_reg;
+ int rom_init_timeout;
+ int ssp_count; /* ssp count of the platform */
+ int ssp_base_offset; /* base address of the SSPs */
+ u32 sdw_shim_base;
+ u32 sdw_alh_base;
+ u32 quirks;
+ enum sof_intel_hw_ip_version hw_ip_version;
+ bool (*check_sdw_irq)(struct snd_sof_dev *sdev);
+ bool (*check_ipc_irq)(struct snd_sof_dev *sdev);
+ int (*power_down_dsp)(struct snd_sof_dev *sdev);
+ int (*disable_interrupts)(struct snd_sof_dev *sdev);
+ int (*cl_init)(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot);
+};
+
+extern struct snd_sof_dsp_ops sof_tng_ops;
+
+extern const struct sof_intel_dsp_desc tng_chip_info;
+
+struct sof_intel_stream {
+ size_t posn_offset;
+};
+
+static inline const struct sof_intel_dsp_desc *get_chip_info(struct snd_sof_pdata *pdata)
+{
+ const struct sof_dev_desc *desc = pdata->desc;
+
+ return desc->chip_info;
+}
+
+#endif
diff --git a/sound/soc/sof/intel/skl.c b/sound/soc/sof/intel/skl.c
new file mode 100644
index 000000000..13efdb94d
--- /dev/null
+++ b/sound/soc/sof/intel/skl.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
+//
+
+/*
+ * Hardware interface for audio DSP on Skylake and Kabylake.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/pcm_params.h>
+#include <sound/sof.h>
+#include <sound/sof/ext_manifest4.h>
+
+#include "../sof-priv.h"
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "hda.h"
+#include "../sof-audio.h"
+
+#define SRAM_MEMORY_WINDOW_BASE 0x8000
+
+static const __maybe_unused struct snd_sof_debugfs_map skl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000},
+};
+
+static int skl_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return SRAM_MEMORY_WINDOW_BASE + (0x2000 * id);
+}
+
+static int skl_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return SRAM_MEMORY_WINDOW_BASE + 0x1000;
+}
+
+/* skylake ops */
+struct snd_sof_dsp_ops sof_skl_ops;
+EXPORT_SYMBOL_NS(sof_skl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_skl_ops_init(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ /* common defaults */
+ memcpy(&sof_skl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* probe/remove/shutdown */
+ sof_skl_ops.shutdown = hda_dsp_shutdown;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET_CAVS_1_5;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_1_5;
+
+ sof_skl_ops.get_window_offset = skl_dsp_ipc_get_window_offset;
+ sof_skl_ops.get_mailbox_offset = skl_dsp_ipc_get_mailbox_offset;
+
+ /* doorbell */
+ sof_skl_ops.irq_thread = hda_dsp_ipc4_irq_thread;
+
+ /* ipc */
+ sof_skl_ops.send_msg = hda_dsp_ipc4_send_msg;
+
+ /* set DAI driver ops */
+ hda_set_dai_drv_ops(sdev, &sof_skl_ops);
+
+ /* debug */
+ sof_skl_ops.debug_map = skl_dsp_debugfs;
+ sof_skl_ops.debug_map_count = ARRAY_SIZE(skl_dsp_debugfs);
+ sof_skl_ops.ipc_dump = hda_ipc4_dump;
+
+ /* firmware run */
+ sof_skl_ops.run = hda_dsp_cl_boot_firmware_skl;
+
+ /* pre/post fw run */
+ sof_skl_ops.post_fw_run = hda_dsp_post_fw_run;
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_skl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc skl_chip_info = {
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(1, 0),
+ .ipc_req = HDA_DSP_REG_HIPCI,
+ .ipc_req_mask = HDA_DSP_REG_HIPCI_BUSY,
+ .ipc_ack = HDA_DSP_REG_HIPCIE,
+ .ipc_ack_mask = HDA_DSP_REG_HIPCIE_DONE,
+ .ipc_ctl = HDA_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS_SKL,
+ .rom_init_timeout = 300,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_1_5,
+};
+EXPORT_SYMBOL_NS(skl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/tgl.c b/sound/soc/sof/intel/tgl.c
new file mode 100644
index 000000000..8637fe102
--- /dev/null
+++ b/sound/soc/sof/intel/tgl.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2020 Intel Corporation. All rights reserved.
+//
+// Authors: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Tigerlake.
+ */
+
+#include <sound/sof/ext_manifest4.h>
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+#include "../sof-audio.h"
+
+static const struct snd_sof_debugfs_map tgl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static int tgl_dsp_core_get(struct snd_sof_dev *sdev, int core)
+{
+ const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
+
+ /* power up primary core if not already powered up and return */
+ if (core == SOF_DSP_PRIMARY_CORE)
+ return hda_dsp_enable_core(sdev, BIT(core));
+
+ if (pm_ops->set_core_state)
+ return pm_ops->set_core_state(sdev, core, true);
+
+ return 0;
+}
+
+static int tgl_dsp_core_put(struct snd_sof_dev *sdev, int core)
+{
+ const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
+
+ /* power down primary core and return */
+ if (core == SOF_DSP_PRIMARY_CORE)
+ return hda_dsp_core_reset_power_down(sdev, BIT(core));
+
+ if (pm_ops->set_core_state)
+ return pm_ops->set_core_state(sdev, core, false);
+
+ return 0;
+}
+
+/* Tigerlake ops */
+struct snd_sof_dsp_ops sof_tgl_ops;
+EXPORT_SYMBOL_NS(sof_tgl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_tgl_ops_init(struct snd_sof_dev *sdev)
+{
+ /* common defaults */
+ memcpy(&sof_tgl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* probe/remove/shutdown */
+ sof_tgl_ops.shutdown = hda_dsp_shutdown_dma_flush;
+
+ if (sdev->pdata->ipc_type == SOF_IPC) {
+ /* doorbell */
+ sof_tgl_ops.irq_thread = cnl_ipc_irq_thread;
+
+ /* ipc */
+ sof_tgl_ops.send_msg = cnl_ipc_send_msg;
+
+ /* debug */
+ sof_tgl_ops.ipc_dump = cnl_ipc_dump;
+ }
+
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
+
+ ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_2;
+
+ /* doorbell */
+ sof_tgl_ops.irq_thread = cnl_ipc4_irq_thread;
+
+ /* ipc */
+ sof_tgl_ops.send_msg = cnl_ipc4_send_msg;
+
+ /* debug */
+ sof_tgl_ops.ipc_dump = cnl_ipc4_dump;
+ }
+
+ /* set DAI driver ops */
+ hda_set_dai_drv_ops(sdev, &sof_tgl_ops);
+
+ /* debug */
+ sof_tgl_ops.debug_map = tgl_dsp_debugfs;
+ sof_tgl_ops.debug_map_count = ARRAY_SIZE(tgl_dsp_debugfs);
+
+ /* pre/post fw run */
+ sof_tgl_ops.post_fw_run = hda_dsp_post_fw_run;
+
+ /* firmware run */
+ sof_tgl_ops.run = hda_dsp_cl_boot_firmware_iccmax;
+
+ /* dsp core get/put */
+ sof_tgl_ops.core_get = tgl_dsp_core_get;
+ sof_tgl_ops.core_put = tgl_dsp_core_put;
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_tgl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc tgl_chip_info = {
+ /* Tigerlake , Alderlake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = TGL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_2_5,
+};
+EXPORT_SYMBOL_NS(tgl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc tglh_chip_info = {
+ /* Tigerlake-H */
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = TGL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_2_5,
+};
+EXPORT_SYMBOL_NS(tglh_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc ehl_chip_info = {
+ /* Elkhartlake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = TGL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_2_5,
+};
+EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc adls_chip_info = {
+ /* Alderlake-S */
+ .cores_num = 2,
+ .init_core_mask = BIT(0),
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
+ .rom_init_timeout = 300,
+ .ssp_count = TGL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE,
+ .sdw_alh_base = SDW_ALH_BASE,
+ .check_sdw_irq = hda_common_check_sdw_irq,
+ .check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
+ .power_down_dsp = hda_power_down_dsp,
+ .disable_interrupts = hda_dsp_disable_interrupts,
+ .hw_ip_version = SOF_INTEL_CAVS_2_5,
+};
+EXPORT_SYMBOL_NS(adls_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);