summaryrefslogtreecommitdiffstats
path: root/sound/soc/sof
diff options
context:
space:
mode:
Diffstat (limited to 'sound/soc/sof')
-rw-r--r--sound/soc/sof/Kconfig210
-rw-r--r--sound/soc/sof/Makefile23
-rw-r--r--sound/soc/sof/compress.c147
-rw-r--r--sound/soc/sof/compress.h32
-rw-r--r--sound/soc/sof/control.c491
-rw-r--r--sound/soc/sof/core.c388
-rw-r--r--sound/soc/sof/debug.c707
-rw-r--r--sound/soc/sof/imx/Kconfig61
-rw-r--r--sound/soc/sof/imx/Makefile9
-rw-r--r--sound/soc/sof/imx/imx-common.c75
-rw-r--r--sound/soc/sof/imx/imx-common.h16
-rw-r--r--sound/soc/sof/imx/imx8.c508
-rw-r--r--sound/soc/sof/imx/imx8m.c312
-rw-r--r--sound/soc/sof/intel/Kconfig364
-rw-r--r--sound/soc/sof/intel/Makefile20
-rw-r--r--sound/soc/sof/intel/apl.c142
-rw-r--r--sound/soc/sof/intel/bdw.c664
-rw-r--r--sound/soc/sof/intel/byt.c987
-rw-r--r--sound/soc/sof/intel/cnl.c395
-rw-r--r--sound/soc/sof/intel/hda-bus.c45
-rw-r--r--sound/soc/sof/intel/hda-codec.c265
-rw-r--r--sound/soc/sof/intel/hda-compress.c114
-rw-r--r--sound/soc/sof/intel/hda-ctrl.c364
-rw-r--r--sound/soc/sof/intel/hda-dai.c586
-rw-r--r--sound/soc/sof/intel/hda-dsp.c964
-rw-r--r--sound/soc/sof/intel/hda-ipc.c300
-rw-r--r--sound/soc/sof/intel/hda-ipc.h55
-rw-r--r--sound/soc/sof/intel/hda-loader.c473
-rw-r--r--sound/soc/sof/intel/hda-pcm.c250
-rw-r--r--sound/soc/sof/intel/hda-stream.c947
-rw-r--r--sound/soc/sof/intel/hda-trace.c94
-rw-r--r--sound/soc/sof/intel/hda.c1247
-rw-r--r--sound/soc/sof/intel/hda.h751
-rw-r--r--sound/soc/sof/intel/intel-ipc.c92
-rw-r--r--sound/soc/sof/intel/shim.h183
-rw-r--r--sound/soc/sof/intel/tgl.c153
-rw-r--r--sound/soc/sof/ipc.c866
-rw-r--r--sound/soc/sof/loader.c836
-rw-r--r--sound/soc/sof/nocodec.c111
-rw-r--r--sound/soc/sof/ops.c163
-rw-r--r--sound/soc/sof/ops.h556
-rw-r--r--sound/soc/sof/pcm.c821
-rw-r--r--sound/soc/sof/pm.c331
-rw-r--r--sound/soc/sof/probe.c290
-rw-r--r--sound/soc/sof/probe.h85
-rw-r--r--sound/soc/sof/sof-acpi-dev.c224
-rw-r--r--sound/soc/sof/sof-audio.c518
-rw-r--r--sound/soc/sof/sof-audio.h225
-rw-r--r--sound/soc/sof/sof-of-dev.c153
-rw-r--r--sound/soc/sof/sof-pci-dev.c555
-rw-r--r--sound/soc/sof/sof-priv.h589
-rw-r--r--sound/soc/sof/topology.c3758
-rw-r--r--sound/soc/sof/trace.c352
-rw-r--r--sound/soc/sof/utils.c172
-rw-r--r--sound/soc/sof/xtensa/Kconfig3
-rw-r--r--sound/soc/sof/xtensa/Makefile5
-rw-r--r--sound/soc/sof/xtensa/core.c138
57 files changed, 23185 insertions, 0 deletions
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
new file mode 100644
index 000000000..8c1f0829d
--- /dev/null
+++ b/sound/soc/sof/Kconfig
@@ -0,0 +1,210 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SND_SOC_SOF_TOPLEVEL
+ bool "Sound Open Firmware Support"
+ help
+ This adds support for Sound Open Firmware (SOF). SOF is a free and
+ generic open source audio DSP firmware for multiple devices.
+ Say Y if you have such a device that is supported by SOF.
+ If unsure select "N".
+
+if SND_SOC_SOF_TOPLEVEL
+
+config SND_SOC_SOF_PCI
+ tristate "SOF PCI enumeration support"
+ depends on PCI
+ select SND_SOC_SOF
+ select SND_SOC_ACPI if ACPI
+ help
+ This adds support for PCI enumeration. This option is
+ required to enable Intel Skylake+ devices
+ Say Y if you need this option
+ If unsure select "N".
+
+config SND_SOC_SOF_ACPI
+ tristate "SOF ACPI enumeration support"
+ depends on ACPI || COMPILE_TEST
+ select SND_SOC_SOF
+ select SND_SOC_ACPI if ACPI
+ select IOSF_MBI if X86 && PCI
+ help
+ This adds support for ACPI enumeration. This option is required
+ to enable Intel Broadwell/Baytrail/Cherrytrail devices
+ Say Y if you need this option
+ If unsure select "N".
+
+config SND_SOC_SOF_OF
+ tristate "SOF OF enumeration support"
+ depends on OF || COMPILE_TEST
+ select SND_SOC_SOF
+ help
+ This adds support for Device Tree enumeration. This option is
+ required to enable i.MX8 devices.
+ Say Y if you need this option. If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_PROBES
+ bool "SOF enable data probing"
+ select SND_SOC_COMPRESS
+ help
+ This option enables the data probing feature that can be used to
+ gather data directly from specific points of the audio pipeline.
+ Say Y if you want to enable probes.
+ If unsure, select "N".
+
+config SND_SOC_SOF_DEVELOPER_SUPPORT
+ bool "SOF developer options support"
+ depends on EXPERT
+ help
+ This option unlock SOF developer options for debug/performance/
+ code hardening.
+ Distributions should not select this option, only SOF development
+ teams should select it.
+ Say Y if you are involved in SOF development and need this option
+ If not, select N
+
+if SND_SOC_SOF_DEVELOPER_SUPPORT
+
+config SND_SOC_SOF_NOCODEC
+ tristate
+
+config SND_SOC_SOF_NOCODEC_SUPPORT
+ bool "SOF nocodec mode support"
+ help
+ This adds support for a dummy/nocodec machine driver fallback
+ option if no known codec is detected. This is typically only
+ enabled for developers or devices where the sound card is
+ controlled externally
+ This option is mutually exclusive with the Intel HDAudio support,
+ selecting it may have negative impacts and prevent e.g. microphone
+ functionality from being enabled on Intel CoffeeLake and later
+ platforms.
+ Distributions should not select this option!
+ Say Y if you need this nocodec fallback option
+ If unsure select "N".
+
+config SND_SOC_SOF_STRICT_ABI_CHECKS
+ bool "SOF strict ABI checks"
+ help
+ This option enables strict ABI checks for firmware and topology
+ files.
+ When these files are more recent than the kernel, the kernel
+ will handle the functionality it supports and may report errors
+ during topology creation or run-time usage if new functionality
+ is invoked.
+ This option will stop topology creation and firmware load upfront.
+ It is intended for SOF CI/releases and not for users or distros.
+ Say Y if you want strict ABI checks for an SOF release
+ If you are not involved in SOF releases and CI development
+ select "N".
+
+config SND_SOC_SOF_DEBUG
+ bool "SOF debugging features"
+ help
+ This option can be used to enable or disable individual SOF firmware
+ and driver debugging options.
+ Say Y if you are debugging SOF FW or drivers.
+ If unsure select "N".
+
+if SND_SOC_SOF_DEBUG
+
+config SND_SOC_SOF_FORCE_NOCODEC_MODE
+ bool "SOF force nocodec Mode"
+ depends on SND_SOC_SOF_NOCODEC_SUPPORT
+ help
+ This forces SOF to use dummy/nocodec as machine driver, even
+ though there is a codec detected on the real platform. This is
+ typically only enabled for developers for debug purposes, before
+ codec/machine driver is ready, or to exclude the impact of those
+ drivers
+ Say Y if you need this force nocodec mode option
+ If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_XRUN_STOP
+ bool "SOF stop on XRUN"
+ help
+ This option forces PCMs to stop on any XRUN event. This is useful to
+ preserve any trace data ond pipeline status prior to the XRUN.
+ Say Y if you are debugging SOF FW pipeline XRUNs.
+ If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_VERBOSE_IPC
+ bool "SOF verbose IPC logs"
+ help
+ This option enables more verbose IPC logs, with command types in
+ human-readable form instead of just 32-bit hex dumps. This is useful
+ if you are trying to debug IPC with the DSP firmware.
+ If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_FORCE_IPC_POSITION
+ bool "SOF force to use IPC for position update on SKL+"
+ help
+ This option force to handle stream position update IPCs and run pcm
+ elapse to inform ALSA about that, on platforms (e.g. Intel SKL+) that
+ with other approach (e.g. HDAC DPIB/posbuf) to elapse PCM.
+ On platforms (e.g. Intel SKL-) where position update IPC is the only
+ one choice, this setting won't impact anything.
+ if you are trying to debug pointer update with position IPCs or where
+ DPIB/posbuf is not ready, select "Y".
+ If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE
+ bool "SOF enable debugfs caching"
+ help
+ This option enables caching of debugfs
+ memory -> DSP resource (memory, register, etc)
+ before the audio DSP is suspended. This will increase the suspend
+ latency and therefore should be used for debug purposes only.
+ Say Y if you want to enable caching the memory windows.
+ If unsure, select "N".
+
+config SND_SOC_SOF_DEBUG_ENABLE_FIRMWARE_TRACE
+ bool "SOF enable firmware trace"
+ help
+ The firmware trace can be enabled either at build-time with
+ this option, or dynamically by setting flags in the SOF core
+ module parameter (similar to dynamic debug)
+ If unsure, select "N".
+
+config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST
+ bool "SOF enable IPC flood test"
+ help
+ This option enables the IPC flood test which can be used to flood
+ the DSP with test IPCs and gather stats about response times.
+ Say Y if you want to enable IPC flood test.
+ If unsure, select "N".
+
+config SND_SOC_SOF_DEBUG_RETAIN_DSP_CONTEXT
+ bool "SOF retain DSP context on any FW exceptions"
+ help
+ This option keeps the DSP in D0 state so that firmware debug
+ information can be retained and dumped to userspace.
+ Say Y if you want to retain DSP context for FW exceptions.
+ If unsure, select "N".
+
+endif ## SND_SOC_SOF_DEBUG
+
+endif ## SND_SOC_SOF_DEVELOPER_SUPPORT
+
+config SND_SOC_SOF
+ tristate
+ select SND_SOC_TOPOLOGY
+ select SND_SOC_SOF_NOCODEC if SND_SOC_SOF_NOCODEC_SUPPORT
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+ The selection is made at the top level and does not exactly follow
+ module dependencies but since the module or built-in type is decided
+ at the top level it doesn't matter.
+
+config SND_SOC_SOF_PROBE_WORK_QUEUE
+ bool
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+ When selected, the probe is handled in two steps, for example to
+ avoid lockdeps if request_module is used in the probe.
+
+source "sound/soc/sof/imx/Kconfig"
+source "sound/soc/sof/intel/Kconfig"
+source "sound/soc/sof/xtensa/Kconfig"
+
+endif
diff --git a/sound/soc/sof/Makefile b/sound/soc/sof/Makefile
new file mode 100644
index 000000000..05718dfe6
--- /dev/null
+++ b/sound/soc/sof/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+snd-sof-objs := core.o ops.o loader.o ipc.o pcm.o pm.o debug.o topology.o\
+ control.o trace.o utils.o sof-audio.o
+snd-sof-$(CONFIG_SND_SOC_SOF_DEBUG_PROBES) += probe.o compress.o
+
+snd-sof-pci-objs := sof-pci-dev.o
+snd-sof-acpi-objs := sof-acpi-dev.o
+snd-sof-of-objs := sof-of-dev.o
+
+snd-sof-nocodec-objs := nocodec.o
+
+obj-$(CONFIG_SND_SOC_SOF) += snd-sof.o
+obj-$(CONFIG_SND_SOC_SOF_NOCODEC) += snd-sof-nocodec.o
+
+
+obj-$(CONFIG_SND_SOC_SOF_ACPI) += snd-sof-acpi.o
+obj-$(CONFIG_SND_SOC_SOF_OF) += snd-sof-of.o
+obj-$(CONFIG_SND_SOC_SOF_PCI) += snd-sof-pci.o
+
+obj-$(CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL) += intel/
+obj-$(CONFIG_SND_SOC_SOF_IMX_TOPLEVEL) += imx/
+obj-$(CONFIG_SND_SOC_SOF_XTENSA) += xtensa/
diff --git a/sound/soc/sof/compress.c b/sound/soc/sof/compress.c
new file mode 100644
index 000000000..2d4969c70
--- /dev/null
+++ b/sound/soc/sof/compress.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019-2020 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+
+#include <sound/soc.h>
+#include "compress.h"
+#include "ops.h"
+#include "probe.h"
+
+struct snd_compress_ops sof_probe_compressed_ops = {
+ .copy = sof_probe_compr_copy,
+};
+EXPORT_SYMBOL(sof_probe_compressed_ops);
+
+int sof_probe_compr_open(struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+ int ret;
+
+ ret = snd_sof_probe_compr_assign(sdev, cstream, dai);
+ if (ret < 0) {
+ dev_err(dai->dev, "Failed to assign probe stream: %d\n", ret);
+ return ret;
+ }
+
+ sdev->extractor_stream_tag = ret;
+ return 0;
+}
+EXPORT_SYMBOL(sof_probe_compr_open);
+
+int sof_probe_compr_free(struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+ struct sof_probe_point_desc *desc;
+ size_t num_desc;
+ int i, ret;
+
+ /* disconnect all probe points */
+ ret = sof_ipc_probe_points_info(sdev, &desc, &num_desc);
+ if (ret < 0) {
+ dev_err(dai->dev, "Failed to get probe points: %d\n", ret);
+ goto exit;
+ }
+
+ for (i = 0; i < num_desc; i++)
+ sof_ipc_probe_points_remove(sdev, &desc[i].buffer_id, 1);
+ kfree(desc);
+
+exit:
+ ret = sof_ipc_probe_deinit(sdev);
+ if (ret < 0)
+ dev_err(dai->dev, "Failed to deinit probe: %d\n", ret);
+
+ sdev->extractor_stream_tag = SOF_PROBE_INVALID_NODE_ID;
+ snd_compr_free_pages(cstream);
+
+ return snd_sof_probe_compr_free(sdev, cstream, dai);
+}
+EXPORT_SYMBOL(sof_probe_compr_free);
+
+int sof_probe_compr_set_params(struct snd_compr_stream *cstream,
+ struct snd_compr_params *params, struct snd_soc_dai *dai)
+{
+ struct snd_compr_runtime *rtd = cstream->runtime;
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+ int ret;
+
+ cstream->dma_buffer.dev.type = SNDRV_DMA_TYPE_DEV_SG;
+ cstream->dma_buffer.dev.dev = sdev->dev;
+ ret = snd_compr_malloc_pages(cstream, rtd->buffer_size);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_sof_probe_compr_set_params(sdev, cstream, params, dai);
+ if (ret < 0)
+ return ret;
+
+ ret = sof_ipc_probe_init(sdev, sdev->extractor_stream_tag,
+ rtd->dma_bytes);
+ if (ret < 0) {
+ dev_err(dai->dev, "Failed to init probe: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_probe_compr_set_params);
+
+int sof_probe_compr_trigger(struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+
+ return snd_sof_probe_compr_trigger(sdev, cstream, cmd, dai);
+}
+EXPORT_SYMBOL(sof_probe_compr_trigger);
+
+int sof_probe_compr_pointer(struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp, struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+
+ return snd_sof_probe_compr_pointer(sdev, cstream, tstamp, dai);
+}
+EXPORT_SYMBOL(sof_probe_compr_pointer);
+
+int sof_probe_compr_copy(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ char __user *buf, size_t count)
+{
+ struct snd_compr_runtime *rtd = cstream->runtime;
+ unsigned int offset, n;
+ void *ptr;
+ int ret;
+
+ if (count > rtd->buffer_size)
+ count = rtd->buffer_size;
+
+ div_u64_rem(rtd->total_bytes_transferred, rtd->buffer_size, &offset);
+ ptr = rtd->dma_area + offset;
+ n = rtd->buffer_size - offset;
+
+ if (count < n) {
+ ret = copy_to_user(buf, ptr, count);
+ } else {
+ ret = copy_to_user(buf, ptr, n);
+ ret += copy_to_user(buf + n, rtd->dma_area, count - n);
+ }
+
+ if (ret)
+ return count - ret;
+ return count;
+}
+EXPORT_SYMBOL(sof_probe_compr_copy);
diff --git a/sound/soc/sof/compress.h b/sound/soc/sof/compress.h
new file mode 100644
index 000000000..ca8790bd4
--- /dev/null
+++ b/sound/soc/sof/compress.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2019-2020 Intel Corporation. All rights reserved.
+ *
+ * Author: Cezary Rojewski <cezary.rojewski@intel.com>
+ */
+
+#ifndef __SOF_COMPRESS_H
+#define __SOF_COMPRESS_H
+
+#include <sound/compress_driver.h>
+
+extern struct snd_compress_ops sof_probe_compressed_ops;
+
+int sof_probe_compr_open(struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai);
+int sof_probe_compr_free(struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai);
+int sof_probe_compr_set_params(struct snd_compr_stream *cstream,
+ struct snd_compr_params *params, struct snd_soc_dai *dai);
+int sof_probe_compr_trigger(struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai);
+int sof_probe_compr_pointer(struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp, struct snd_soc_dai *dai);
+int sof_probe_compr_copy(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ char __user *buf, size_t count);
+
+#endif
diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
new file mode 100644
index 000000000..0352d2b61
--- /dev/null
+++ b/sound/soc/sof/control.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/* Mixer Controls */
+
+#include <linux/pm_runtime.h>
+#include <linux/leds.h>
+#include "sof-priv.h"
+#include "sof-audio.h"
+
+static void update_mute_led(struct snd_sof_control *scontrol,
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int temp = 0;
+ int mask;
+ int i;
+
+ mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+
+ for (i = 0; i < scontrol->num_channels; i++) {
+ if (ucontrol->value.integer.value[i]) {
+ temp |= mask;
+ break;
+ }
+ }
+
+ if (temp == scontrol->led_ctl.led_value)
+ return;
+
+ scontrol->led_ctl.led_value = temp;
+
+#if IS_REACHABLE(CONFIG_LEDS_TRIGGER_AUDIO)
+ if (!scontrol->led_ctl.direction)
+ ledtrig_audio_set(LED_AUDIO_MUTE, temp ? LED_OFF : LED_ON);
+ else
+ ledtrig_audio_set(LED_AUDIO_MICMUTE, temp ? LED_OFF : LED_ON);
+#endif
+}
+
+static inline u32 mixer_to_ipc(unsigned int value, u32 *volume_map, int size)
+{
+ if (value >= size)
+ return volume_map[size - 1];
+
+ return volume_map[value];
+}
+
+static inline u32 ipc_to_mixer(u32 value, u32 *volume_map, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (volume_map[i] >= value)
+ return i;
+ }
+
+ return i - 1;
+}
+
+int snd_sof_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *sm =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ unsigned int i, channels = scontrol->num_channels;
+
+ /* read back each channel */
+ for (i = 0; i < channels; i++)
+ ucontrol->value.integer.value[i] =
+ ipc_to_mixer(cdata->chanv[i].value,
+ scontrol->volume_table, sm->max + 1);
+
+ return 0;
+}
+
+int snd_sof_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *sm =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ unsigned int i, channels = scontrol->num_channels;
+ bool change = false;
+ u32 value;
+
+ /* update each channel */
+ for (i = 0; i < channels; i++) {
+ value = mixer_to_ipc(ucontrol->value.integer.value[i],
+ scontrol->volume_table, sm->max + 1);
+ change = change || (value != cdata->chanv[i].value);
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
+ }
+
+ /* notify DSP of mixer updates */
+ if (pm_runtime_active(scomp->dev))
+ snd_sof_ipc_set_get_comp_data(scontrol,
+ SOF_IPC_COMP_SET_VALUE,
+ SOF_CTRL_TYPE_VALUE_CHAN_GET,
+ SOF_CTRL_CMD_VOLUME,
+ true);
+ return change;
+}
+
+int snd_sof_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *sm =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ unsigned int i, channels = scontrol->num_channels;
+
+ /* read back each channel */
+ for (i = 0; i < channels; i++)
+ ucontrol->value.integer.value[i] = cdata->chanv[i].value;
+
+ return 0;
+}
+
+int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *sm =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ unsigned int i, channels = scontrol->num_channels;
+ bool change = false;
+ u32 value;
+
+ /* update each channel */
+ for (i = 0; i < channels; i++) {
+ value = ucontrol->value.integer.value[i];
+ change = change || (value != cdata->chanv[i].value);
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
+ }
+
+ if (scontrol->led_ctl.use_led)
+ update_mute_led(scontrol, kcontrol, ucontrol);
+
+ /* notify DSP of mixer updates */
+ if (pm_runtime_active(scomp->dev))
+ snd_sof_ipc_set_get_comp_data(scontrol,
+ SOF_IPC_COMP_SET_VALUE,
+ SOF_CTRL_TYPE_VALUE_CHAN_GET,
+ SOF_CTRL_CMD_SWITCH,
+ true);
+
+ return change;
+}
+
+int snd_sof_enum_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_enum *se =
+ (struct soc_enum *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = se->dobj.private;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ unsigned int i, channels = scontrol->num_channels;
+
+ /* read back each channel */
+ for (i = 0; i < channels; i++)
+ ucontrol->value.enumerated.item[i] = cdata->chanv[i].value;
+
+ return 0;
+}
+
+int snd_sof_enum_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_enum *se =
+ (struct soc_enum *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = se->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ unsigned int i, channels = scontrol->num_channels;
+ bool change = false;
+ u32 value;
+
+ /* update each channel */
+ for (i = 0; i < channels; i++) {
+ value = ucontrol->value.enumerated.item[i];
+ change = change || (value != cdata->chanv[i].value);
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
+ }
+
+ /* notify DSP of enum updates */
+ if (pm_runtime_active(scomp->dev))
+ snd_sof_ipc_set_get_comp_data(scontrol,
+ SOF_IPC_COMP_SET_VALUE,
+ SOF_CTRL_TYPE_VALUE_CHAN_GET,
+ SOF_CTRL_CMD_ENUM,
+ true);
+
+ return change;
+}
+
+int snd_sof_bytes_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_bytes_ext *be =
+ (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ struct sof_abi_hdr *data = cdata->data;
+ size_t size;
+
+ if (be->max > sizeof(ucontrol->value.bytes.data)) {
+ dev_err_ratelimited(scomp->dev,
+ "error: data max %d exceeds ucontrol data array size\n",
+ be->max);
+ return -EINVAL;
+ }
+
+ /* be->max has been verified to be >= sizeof(struct sof_abi_hdr) */
+ if (data->size > be->max - sizeof(*data)) {
+ dev_err_ratelimited(scomp->dev,
+ "error: %u bytes of control data is invalid, max is %zu\n",
+ data->size, be->max - sizeof(*data));
+ return -EINVAL;
+ }
+
+ size = data->size + sizeof(*data);
+
+ /* copy back to kcontrol */
+ memcpy(ucontrol->value.bytes.data, data, size);
+
+ return 0;
+}
+
+int snd_sof_bytes_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_bytes_ext *be =
+ (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ struct sof_abi_hdr *data = cdata->data;
+ size_t size;
+
+ if (be->max > sizeof(ucontrol->value.bytes.data)) {
+ dev_err_ratelimited(scomp->dev,
+ "error: data max %d exceeds ucontrol data array size\n",
+ be->max);
+ return -EINVAL;
+ }
+
+ /* be->max has been verified to be >= sizeof(struct sof_abi_hdr) */
+ if (data->size > be->max - sizeof(*data)) {
+ dev_err_ratelimited(scomp->dev,
+ "error: data size too big %u bytes max is %zu\n",
+ data->size, be->max - sizeof(*data));
+ return -EINVAL;
+ }
+
+ size = data->size + sizeof(*data);
+
+ /* copy from kcontrol */
+ memcpy(data, ucontrol->value.bytes.data, size);
+
+ /* notify DSP of byte control updates */
+ if (pm_runtime_active(scomp->dev))
+ snd_sof_ipc_set_get_comp_data(scontrol,
+ SOF_IPC_COMP_SET_DATA,
+ SOF_CTRL_TYPE_DATA_SET,
+ scontrol->cmd,
+ true);
+
+ return 0;
+}
+
+int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size)
+{
+ struct soc_bytes_ext *be =
+ (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ struct snd_ctl_tlv header;
+ const struct snd_ctl_tlv __user *tlvd =
+ (const struct snd_ctl_tlv __user *)binary_data;
+
+ /* make sure we have at least a header */
+ if (size < sizeof(struct snd_ctl_tlv))
+ return -EINVAL;
+
+ /*
+ * The beginning of bytes data contains a header from where
+ * the length (as bytes) is needed to know the correct copy
+ * length of data from tlvd->tlv.
+ */
+ if (copy_from_user(&header, tlvd, sizeof(const struct snd_ctl_tlv)))
+ return -EFAULT;
+
+ /* make sure TLV info is consistent */
+ if (header.length + sizeof(struct snd_ctl_tlv) > size) {
+ dev_err_ratelimited(scomp->dev, "error: inconsistent TLV, data %d + header %zu > %d\n",
+ header.length, sizeof(struct snd_ctl_tlv), size);
+ return -EINVAL;
+ }
+
+ /* be->max is coming from topology */
+ if (header.length > be->max) {
+ dev_err_ratelimited(scomp->dev, "error: Bytes data size %d exceeds max %d.\n",
+ header.length, be->max);
+ return -EINVAL;
+ }
+
+ /* Check that header id matches the command */
+ if (header.numid != scontrol->cmd) {
+ dev_err_ratelimited(scomp->dev,
+ "error: incorrect numid %d\n",
+ header.numid);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(cdata->data, tlvd->tlv, header.length))
+ return -EFAULT;
+
+ if (cdata->data->magic != SOF_ABI_MAGIC) {
+ dev_err_ratelimited(scomp->dev,
+ "error: Wrong ABI magic 0x%08x.\n",
+ cdata->data->magic);
+ return -EINVAL;
+ }
+
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION, cdata->data->abi)) {
+ dev_err_ratelimited(scomp->dev, "error: Incompatible ABI version 0x%08x.\n",
+ cdata->data->abi);
+ return -EINVAL;
+ }
+
+ /* be->max has been verified to be >= sizeof(struct sof_abi_hdr) */
+ if (cdata->data->size > be->max - sizeof(const struct sof_abi_hdr)) {
+ dev_err_ratelimited(scomp->dev, "error: Mismatch in ABI data size (truncated?).\n");
+ return -EINVAL;
+ }
+
+ /* notify DSP of byte control updates */
+ if (pm_runtime_active(scomp->dev))
+ snd_sof_ipc_set_get_comp_data(scontrol,
+ SOF_IPC_COMP_SET_DATA,
+ SOF_CTRL_TYPE_DATA_SET,
+ scontrol->cmd,
+ true);
+
+ return 0;
+}
+
+int snd_sof_bytes_ext_volatile_get(struct snd_kcontrol *kcontrol, unsigned int __user *binary_data,
+ unsigned int size)
+{
+ struct soc_bytes_ext *be = (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ struct snd_ctl_tlv header;
+ struct snd_ctl_tlv __user *tlvd = (struct snd_ctl_tlv __user *)binary_data;
+ size_t data_size;
+ int ret;
+ int err;
+
+ /*
+ * Decrement the limit by ext bytes header size to
+ * ensure the user space buffer is not exceeded.
+ */
+ if (size < sizeof(struct snd_ctl_tlv))
+ return -ENOSPC;
+ size -= sizeof(struct snd_ctl_tlv);
+
+ ret = pm_runtime_get_sync(scomp->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err_ratelimited(scomp->dev, "error: bytes_ext get failed to resume %d\n", ret);
+ pm_runtime_put_noidle(scomp->dev);
+ return ret;
+ }
+
+ /* set the ABI header values */
+ cdata->data->magic = SOF_ABI_MAGIC;
+ cdata->data->abi = SOF_ABI_VERSION;
+ /* get all the component data from DSP */
+ ret = snd_sof_ipc_set_get_comp_data(scontrol, SOF_IPC_COMP_GET_DATA, SOF_CTRL_TYPE_DATA_GET,
+ scontrol->cmd, false);
+ if (ret < 0)
+ goto out;
+
+ /* check data size doesn't exceed max coming from topology */
+ if (cdata->data->size > be->max - sizeof(const struct sof_abi_hdr)) {
+ dev_err_ratelimited(scomp->dev, "error: user data size %d exceeds max size %zu.\n",
+ cdata->data->size,
+ be->max - sizeof(const struct sof_abi_hdr));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ data_size = cdata->data->size + sizeof(const struct sof_abi_hdr);
+
+ /* make sure we don't exceed size provided by user space for data */
+ if (data_size > size) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ header.numid = scontrol->cmd;
+ header.length = data_size;
+ if (copy_to_user(tlvd, &header, sizeof(const struct snd_ctl_tlv))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (copy_to_user(tlvd->tlv, cdata->data, data_size))
+ ret = -EFAULT;
+out:
+ pm_runtime_mark_last_busy(scomp->dev);
+ err = pm_runtime_put_autosuspend(scomp->dev);
+ if (err < 0)
+ dev_err_ratelimited(scomp->dev, "error: bytes_ext get failed to idle %d\n", err);
+
+ return ret;
+}
+
+int snd_sof_bytes_ext_get(struct snd_kcontrol *kcontrol,
+ unsigned int __user *binary_data,
+ unsigned int size)
+{
+ struct soc_bytes_ext *be =
+ (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ struct snd_ctl_tlv header;
+ struct snd_ctl_tlv __user *tlvd =
+ (struct snd_ctl_tlv __user *)binary_data;
+ size_t data_size;
+
+ /*
+ * Decrement the limit by ext bytes header size to
+ * ensure the user space buffer is not exceeded.
+ */
+ if (size < sizeof(struct snd_ctl_tlv))
+ return -ENOSPC;
+ size -= sizeof(struct snd_ctl_tlv);
+
+ /* set the ABI header values */
+ cdata->data->magic = SOF_ABI_MAGIC;
+ cdata->data->abi = SOF_ABI_VERSION;
+
+ /* check data size doesn't exceed max coming from topology */
+ if (cdata->data->size > be->max - sizeof(const struct sof_abi_hdr)) {
+ dev_err_ratelimited(scomp->dev, "error: user data size %d exceeds max size %zu.\n",
+ cdata->data->size,
+ be->max - sizeof(const struct sof_abi_hdr));
+ return -EINVAL;
+ }
+
+ data_size = cdata->data->size + sizeof(const struct sof_abi_hdr);
+
+ /* make sure we don't exceed size provided by user space for data */
+ if (data_size > size)
+ return -ENOSPC;
+
+ header.numid = scontrol->cmd;
+ header.length = data_size;
+ if (copy_to_user(tlvd, &header, sizeof(const struct snd_ctl_tlv)))
+ return -EFAULT;
+
+ if (copy_to_user(tlvd->tlv, cdata->data, data_size))
+ return -EFAULT;
+
+ return 0;
+}
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
new file mode 100644
index 000000000..feced9077
--- /dev/null
+++ b/sound/soc/sof/core.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <sound/sof.h>
+#include "sof-priv.h"
+#include "ops.h"
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+#include "probe.h"
+#endif
+
+/* see SOF_DBG_ flags */
+int sof_core_debug;
+module_param_named(sof_debug, sof_core_debug, int, 0444);
+MODULE_PARM_DESC(sof_debug, "SOF core debug options (0x0 all off)");
+
+/* SOF defaults if not provided by the platform in ms */
+#define TIMEOUT_DEFAULT_IPC_MS 500
+#define TIMEOUT_DEFAULT_BOOT_MS 2000
+
+/*
+ * FW Panic/fault handling.
+ */
+
+struct sof_panic_msg {
+ u32 id;
+ const char *msg;
+};
+
+/* standard FW panic types */
+static const struct sof_panic_msg panic_msg[] = {
+ {SOF_IPC_PANIC_MEM, "out of memory"},
+ {SOF_IPC_PANIC_WORK, "work subsystem init failed"},
+ {SOF_IPC_PANIC_IPC, "IPC subsystem init failed"},
+ {SOF_IPC_PANIC_ARCH, "arch init failed"},
+ {SOF_IPC_PANIC_PLATFORM, "platform init failed"},
+ {SOF_IPC_PANIC_TASK, "scheduler init failed"},
+ {SOF_IPC_PANIC_EXCEPTION, "runtime exception"},
+ {SOF_IPC_PANIC_DEADLOCK, "deadlock"},
+ {SOF_IPC_PANIC_STACK, "stack overflow"},
+ {SOF_IPC_PANIC_IDLE, "can't enter idle"},
+ {SOF_IPC_PANIC_WFI, "invalid wait state"},
+ {SOF_IPC_PANIC_ASSERT, "assertion failed"},
+};
+
+/*
+ * helper to be called from .dbg_dump callbacks. No error code is
+ * provided, it's left as an exercise for the caller of .dbg_dump
+ * (typically IPC or loader)
+ */
+void snd_sof_get_status(struct snd_sof_dev *sdev, u32 panic_code,
+ u32 tracep_code, void *oops,
+ struct sof_ipc_panic_info *panic_info,
+ void *stack, size_t stack_words)
+{
+ u32 code;
+ int i;
+
+ /* is firmware dead ? */
+ if ((panic_code & SOF_IPC_PANIC_MAGIC_MASK) != SOF_IPC_PANIC_MAGIC) {
+ dev_err(sdev->dev, "error: unexpected fault 0x%8.8x trace 0x%8.8x\n",
+ panic_code, tracep_code);
+ return; /* no fault ? */
+ }
+
+ code = panic_code & (SOF_IPC_PANIC_MAGIC_MASK | SOF_IPC_PANIC_CODE_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(panic_msg); i++) {
+ if (panic_msg[i].id == code) {
+ dev_err(sdev->dev, "error: %s\n", panic_msg[i].msg);
+ dev_err(sdev->dev, "error: trace point %8.8x\n",
+ tracep_code);
+ goto out;
+ }
+ }
+
+ /* unknown error */
+ dev_err(sdev->dev, "error: unknown reason %8.8x\n", panic_code);
+ dev_err(sdev->dev, "error: trace point %8.8x\n", tracep_code);
+
+out:
+ dev_err(sdev->dev, "error: panic at %s:%d\n",
+ panic_info->filename, panic_info->linenum);
+ sof_oops(sdev, oops);
+ sof_stack(sdev, oops, stack, stack_words);
+}
+EXPORT_SYMBOL(snd_sof_get_status);
+
+/*
+ * FW Boot State Transition Diagram
+ *
+ * +-----------------------------------------------------------------------+
+ * | |
+ * ------------------ ------------------ |
+ * | | | | |
+ * | BOOT_FAILED | | READY_FAILED |-------------------------+ |
+ * | | | | | |
+ * ------------------ ------------------ | |
+ * ^ ^ | |
+ * | | | |
+ * (FW Boot Timeout) (FW_READY FAIL) | |
+ * | | | |
+ * | | | |
+ * ------------------ | ------------------ | |
+ * | | | | | | |
+ * | IN_PROGRESS |---------------+------------->| COMPLETE | | |
+ * | | (FW Boot OK) (FW_READY OK) | | | |
+ * ------------------ ------------------ | |
+ * ^ | | |
+ * | | | |
+ * (FW Loading OK) (System Suspend/Runtime Suspend)
+ * | | | |
+ * | | | |
+ * ------------------ ------------------ | | |
+ * | | | |<-----+ | |
+ * | PREPARE | | NOT_STARTED |<---------------------+ |
+ * | | | |<---------------------------+
+ * ------------------ ------------------
+ * | ^ | ^
+ * | | | |
+ * | +-----------------------+ |
+ * | (DSP Probe OK) |
+ * | |
+ * | |
+ * +------------------------------------+
+ * (System Suspend/Runtime Suspend)
+ */
+
+static int sof_probe_continue(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ int ret;
+
+ /* probe the DSP hardware */
+ ret = snd_sof_probe(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to probe DSP %d\n", ret);
+ return ret;
+ }
+
+ sdev->fw_state = SOF_FW_BOOT_PREPARE;
+
+ /* check machine info */
+ ret = sof_machine_check(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to get machine info %d\n",
+ ret);
+ goto dbg_err;
+ }
+
+ /* set up platform component driver */
+ snd_sof_new_platform_drv(sdev);
+
+ /* register any debug/trace capabilities */
+ ret = snd_sof_dbg_init(sdev);
+ if (ret < 0) {
+ /*
+ * debugfs issues are suppressed in snd_sof_dbg_init() since
+ * we cannot rely on debugfs
+ * here we trap errors due to memory allocation only.
+ */
+ dev_err(sdev->dev, "error: failed to init DSP trace/debug %d\n",
+ ret);
+ goto dbg_err;
+ }
+
+ /* init the IPC */
+ sdev->ipc = snd_sof_ipc_init(sdev);
+ if (!sdev->ipc) {
+ ret = -ENOMEM;
+ dev_err(sdev->dev, "error: failed to init DSP IPC %d\n", ret);
+ goto ipc_err;
+ }
+
+ /* load the firmware */
+ ret = snd_sof_load_firmware(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to load DSP firmware %d\n",
+ ret);
+ goto fw_load_err;
+ }
+
+ sdev->fw_state = SOF_FW_BOOT_IN_PROGRESS;
+
+ /*
+ * Boot the firmware. The FW boot status will be modified
+ * in snd_sof_run_firmware() depending on the outcome.
+ */
+ ret = snd_sof_run_firmware(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to boot DSP firmware %d\n",
+ ret);
+ goto fw_run_err;
+ }
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_FIRMWARE_TRACE) ||
+ (sof_core_debug & SOF_DBG_ENABLE_TRACE)) {
+ sdev->dtrace_is_supported = true;
+
+ /* init DMA trace */
+ ret = snd_sof_init_trace(sdev);
+ if (ret < 0) {
+ /* non fatal */
+ dev_warn(sdev->dev,
+ "warning: failed to initialize trace %d\n",
+ ret);
+ }
+ } else {
+ dev_dbg(sdev->dev, "SOF firmware trace disabled\n");
+ }
+
+ /* hereafter all FW boot flows are for PM reasons */
+ sdev->first_boot = false;
+
+ /* now register audio DSP platform driver and dai */
+ ret = devm_snd_soc_register_component(sdev->dev, &sdev->plat_drv,
+ sof_ops(sdev)->drv,
+ sof_ops(sdev)->num_drv);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to register DSP DAI driver %d\n", ret);
+ goto fw_trace_err;
+ }
+
+ ret = snd_sof_machine_register(sdev, plat_data);
+ if (ret < 0)
+ goto fw_trace_err;
+
+ /*
+ * Some platforms in SOF, ex: BYT, may not have their platform PM
+ * callbacks set. Increment the usage count so as to
+ * prevent the device from entering runtime suspend.
+ */
+ if (!sof_ops(sdev)->runtime_suspend || !sof_ops(sdev)->runtime_resume)
+ pm_runtime_get_noresume(sdev->dev);
+
+ if (plat_data->sof_probe_complete)
+ plat_data->sof_probe_complete(sdev->dev);
+
+ return 0;
+
+fw_trace_err:
+ snd_sof_free_trace(sdev);
+fw_run_err:
+ snd_sof_fw_unload(sdev);
+fw_load_err:
+ snd_sof_ipc_free(sdev);
+ipc_err:
+ snd_sof_free_debug(sdev);
+dbg_err:
+ snd_sof_remove(sdev);
+
+ /* all resources freed, update state to match */
+ sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
+ sdev->first_boot = true;
+
+ return ret;
+}
+
+static void sof_probe_work(struct work_struct *work)
+{
+ struct snd_sof_dev *sdev =
+ container_of(work, struct snd_sof_dev, probe_work);
+ int ret;
+
+ ret = sof_probe_continue(sdev);
+ if (ret < 0) {
+ /* errors cannot be propagated, log */
+ dev_err(sdev->dev, "error: %s failed err: %d\n", __func__, ret);
+ }
+}
+
+int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data)
+{
+ struct snd_sof_dev *sdev;
+
+ sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ /* initialize sof device */
+ sdev->dev = dev;
+
+ /* initialize default DSP power state */
+ sdev->dsp_power_state.state = SOF_DSP_PM_D0;
+
+ sdev->pdata = plat_data;
+ sdev->first_boot = true;
+ sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+ sdev->extractor_stream_tag = SOF_PROBE_INVALID_NODE_ID;
+#endif
+ dev_set_drvdata(dev, sdev);
+
+ /* check all mandatory ops */
+ if (!sof_ops(sdev) || !sof_ops(sdev)->probe || !sof_ops(sdev)->run ||
+ !sof_ops(sdev)->block_read || !sof_ops(sdev)->block_write ||
+ !sof_ops(sdev)->send_msg || !sof_ops(sdev)->load_firmware ||
+ !sof_ops(sdev)->ipc_msg_data || !sof_ops(sdev)->ipc_pcm_params ||
+ !sof_ops(sdev)->fw_ready)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&sdev->pcm_list);
+ INIT_LIST_HEAD(&sdev->kcontrol_list);
+ INIT_LIST_HEAD(&sdev->widget_list);
+ INIT_LIST_HEAD(&sdev->dai_list);
+ INIT_LIST_HEAD(&sdev->route_list);
+ spin_lock_init(&sdev->ipc_lock);
+ spin_lock_init(&sdev->hw_lock);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+ INIT_WORK(&sdev->probe_work, sof_probe_work);
+
+ /* set default timeouts if none provided */
+ if (plat_data->desc->ipc_timeout == 0)
+ sdev->ipc_timeout = TIMEOUT_DEFAULT_IPC_MS;
+ else
+ sdev->ipc_timeout = plat_data->desc->ipc_timeout;
+ if (plat_data->desc->boot_timeout == 0)
+ sdev->boot_timeout = TIMEOUT_DEFAULT_BOOT_MS;
+ else
+ sdev->boot_timeout = plat_data->desc->boot_timeout;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)) {
+ schedule_work(&sdev->probe_work);
+ return 0;
+ }
+
+ return sof_probe_continue(sdev);
+}
+EXPORT_SYMBOL(snd_sof_device_probe);
+
+int snd_sof_device_remove(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+ cancel_work_sync(&sdev->probe_work);
+
+ if (sdev->fw_state > SOF_FW_BOOT_NOT_STARTED) {
+ ret = snd_sof_dsp_power_down_notify(sdev);
+ if (ret < 0)
+ dev_warn(dev, "error: %d failed to prepare DSP for device removal",
+ ret);
+
+ snd_sof_ipc_free(sdev);
+ snd_sof_free_debug(sdev);
+ snd_sof_free_trace(sdev);
+ }
+
+ /*
+ * Unregister machine driver. This will unbind the snd_card which
+ * will remove the component driver and unload the topology
+ * before freeing the snd_card.
+ */
+ snd_sof_machine_unregister(sdev, pdata);
+
+ /*
+ * Unregistering the machine driver results in unloading the topology.
+ * Some widgets, ex: scheduler, attempt to power down the core they are
+ * scheduled on, when they are unloaded. Therefore, the DSP must be
+ * removed only after the topology has been unloaded.
+ */
+ if (sdev->fw_state > SOF_FW_BOOT_NOT_STARTED)
+ snd_sof_remove(sdev);
+
+ /* release firmware */
+ snd_sof_fw_unload(sdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_device_remove);
+
+MODULE_AUTHOR("Liam Girdwood");
+MODULE_DESCRIPTION("Sound Open Firmware (SOF) Core");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:sof-audio");
diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
new file mode 100644
index 000000000..3ef51b221
--- /dev/null
+++ b/sound/soc/sof/debug.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+// Generic debug routines used to export DSP MMIO and memories to userspace
+// for firmware debugging.
+//
+
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include "sof-priv.h"
+#include "ops.h"
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+#include "probe.h"
+
+/**
+ * strsplit_u32 - Split string into sequence of u32 tokens
+ * @buf: String to split into tokens.
+ * @delim: String containing delimiter characters.
+ * @tkns: Returned u32 sequence pointer.
+ * @num_tkns: Returned number of tokens obtained.
+ */
+static int
+strsplit_u32(char **buf, const char *delim, u32 **tkns, size_t *num_tkns)
+{
+ char *s;
+ u32 *data, *tmp;
+ size_t count = 0;
+ size_t cap = 32;
+ int ret = 0;
+
+ *tkns = NULL;
+ *num_tkns = 0;
+ data = kcalloc(cap, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ while ((s = strsep(buf, delim)) != NULL) {
+ ret = kstrtouint(s, 0, data + count);
+ if (ret)
+ goto exit;
+ if (++count >= cap) {
+ cap *= 2;
+ tmp = krealloc(data, cap * sizeof(*data), GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ data = tmp;
+ }
+ }
+
+ if (!count)
+ goto exit;
+ *tkns = kmemdup(data, count * sizeof(*data), GFP_KERNEL);
+ if (*tkns == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ *num_tkns = count;
+
+exit:
+ kfree(data);
+ return ret;
+}
+
+static int tokenize_input(const char __user *from, size_t count,
+ loff_t *ppos, u32 **tkns, size_t *num_tkns)
+{
+ char *buf;
+ int ret;
+
+ buf = kmalloc(count + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = simple_write_to_buffer(buf, count, ppos, from, count);
+ if (ret != count) {
+ ret = ret >= 0 ? -EIO : ret;
+ goto exit;
+ }
+
+ buf[count] = '\0';
+ ret = strsplit_u32((char **)&buf, ",", tkns, num_tkns);
+exit:
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t probe_points_read(struct file *file,
+ char __user *to, size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ struct sof_probe_point_desc *desc;
+ size_t num_desc, len = 0;
+ char *buf;
+ int i, ret;
+
+ if (sdev->extractor_stream_tag == SOF_PROBE_INVALID_NODE_ID) {
+ dev_warn(sdev->dev, "no extractor stream running\n");
+ return -ENOENT;
+ }
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = sof_ipc_probe_points_info(sdev, &desc, &num_desc);
+ if (ret < 0)
+ goto exit;
+
+ for (i = 0; i < num_desc; i++) {
+ ret = snprintf(buf + len, PAGE_SIZE - len,
+ "Id: %#010x Purpose: %d Node id: %#x\n",
+ desc[i].buffer_id, desc[i].purpose, desc[i].stream_tag);
+ if (ret < 0)
+ goto free_desc;
+ len += ret;
+ }
+
+ ret = simple_read_from_buffer(to, count, ppos, buf, len);
+free_desc:
+ kfree(desc);
+exit:
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t probe_points_write(struct file *file,
+ const char __user *from, size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ struct sof_probe_point_desc *desc;
+ size_t num_tkns, bytes;
+ u32 *tkns;
+ int ret;
+
+ if (sdev->extractor_stream_tag == SOF_PROBE_INVALID_NODE_ID) {
+ dev_warn(sdev->dev, "no extractor stream running\n");
+ return -ENOENT;
+ }
+
+ ret = tokenize_input(from, count, ppos, &tkns, &num_tkns);
+ if (ret < 0)
+ return ret;
+ bytes = sizeof(*tkns) * num_tkns;
+ if (!num_tkns || (bytes % sizeof(*desc))) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ desc = (struct sof_probe_point_desc *)tkns;
+ ret = sof_ipc_probe_points_add(sdev,
+ desc, bytes / sizeof(*desc));
+ if (!ret)
+ ret = count;
+exit:
+ kfree(tkns);
+ return ret;
+}
+
+static const struct file_operations probe_points_fops = {
+ .open = simple_open,
+ .read = probe_points_read,
+ .write = probe_points_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t probe_points_remove_write(struct file *file,
+ const char __user *from, size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ size_t num_tkns;
+ u32 *tkns;
+ int ret;
+
+ if (sdev->extractor_stream_tag == SOF_PROBE_INVALID_NODE_ID) {
+ dev_warn(sdev->dev, "no extractor stream running\n");
+ return -ENOENT;
+ }
+
+ ret = tokenize_input(from, count, ppos, &tkns, &num_tkns);
+ if (ret < 0)
+ return ret;
+ if (!num_tkns) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = sof_ipc_probe_points_remove(sdev, tkns, num_tkns);
+ if (!ret)
+ ret = count;
+exit:
+ kfree(tkns);
+ return ret;
+}
+
+static const struct file_operations probe_points_remove_fops = {
+ .open = simple_open,
+ .write = probe_points_remove_write,
+ .llseek = default_llseek,
+};
+
+static int snd_sof_debugfs_probe_item(struct snd_sof_dev *sdev,
+ const char *name, mode_t mode,
+ const struct file_operations *fops)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ dfse->type = SOF_DFSENTRY_TYPE_BUF;
+ dfse->sdev = sdev;
+
+ debugfs_create_file(name, mode, sdev->debugfs_root, dfse, fops);
+ /* add to dfsentry list */
+ list_add(&dfse->list, &sdev->dfsentry_list);
+
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
+#define MAX_IPC_FLOOD_DURATION_MS 1000
+#define MAX_IPC_FLOOD_COUNT 10000
+#define IPC_FLOOD_TEST_RESULT_LEN 512
+
+static int sof_debug_ipc_flood_test(struct snd_sof_dev *sdev,
+ struct snd_sof_dfsentry *dfse,
+ bool flood_duration_test,
+ unsigned long ipc_duration_ms,
+ unsigned long ipc_count)
+{
+ struct sof_ipc_cmd_hdr hdr;
+ struct sof_ipc_reply reply;
+ u64 min_response_time = U64_MAX;
+ ktime_t start, end, test_end;
+ u64 avg_response_time = 0;
+ u64 max_response_time = 0;
+ u64 ipc_response_time;
+ int i = 0;
+ int ret;
+
+ /* configure test IPC */
+ hdr.cmd = SOF_IPC_GLB_TEST_MSG | SOF_IPC_TEST_IPC_FLOOD;
+ hdr.size = sizeof(hdr);
+
+ /* set test end time for duration flood test */
+ if (flood_duration_test)
+ test_end = ktime_get_ns() + ipc_duration_ms * NSEC_PER_MSEC;
+
+ /* send test IPC's */
+ while (1) {
+ start = ktime_get();
+ ret = sof_ipc_tx_message(sdev->ipc, hdr.cmd, &hdr, hdr.size,
+ &reply, sizeof(reply));
+ end = ktime_get();
+
+ if (ret < 0)
+ break;
+
+ /* compute min and max response times */
+ ipc_response_time = ktime_to_ns(ktime_sub(end, start));
+ min_response_time = min(min_response_time, ipc_response_time);
+ max_response_time = max(max_response_time, ipc_response_time);
+
+ /* sum up response times */
+ avg_response_time += ipc_response_time;
+ i++;
+
+ /* test complete? */
+ if (flood_duration_test) {
+ if (ktime_to_ns(end) >= test_end)
+ break;
+ } else {
+ if (i == ipc_count)
+ break;
+ }
+ }
+
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: ipc flood test failed at %d iterations\n", i);
+
+ /* return if the first IPC fails */
+ if (!i)
+ return ret;
+
+ /* compute average response time */
+ do_div(avg_response_time, i);
+
+ /* clear previous test output */
+ memset(dfse->cache_buf, 0, IPC_FLOOD_TEST_RESULT_LEN);
+
+ if (flood_duration_test) {
+ dev_dbg(sdev->dev, "IPC Flood test duration: %lums\n",
+ ipc_duration_ms);
+ snprintf(dfse->cache_buf, IPC_FLOOD_TEST_RESULT_LEN,
+ "IPC Flood test duration: %lums\n", ipc_duration_ms);
+ }
+
+ dev_dbg(sdev->dev,
+ "IPC Flood count: %d, Avg response time: %lluns\n",
+ i, avg_response_time);
+ dev_dbg(sdev->dev, "Max response time: %lluns\n",
+ max_response_time);
+ dev_dbg(sdev->dev, "Min response time: %lluns\n",
+ min_response_time);
+
+ /* format output string */
+ snprintf(dfse->cache_buf + strlen(dfse->cache_buf),
+ IPC_FLOOD_TEST_RESULT_LEN - strlen(dfse->cache_buf),
+ "IPC Flood count: %d\nAvg response time: %lluns\n",
+ i, avg_response_time);
+
+ snprintf(dfse->cache_buf + strlen(dfse->cache_buf),
+ IPC_FLOOD_TEST_RESULT_LEN - strlen(dfse->cache_buf),
+ "Max response time: %lluns\nMin response time: %lluns\n",
+ max_response_time, min_response_time);
+
+ return ret;
+}
+#endif
+
+static ssize_t sof_dfsentry_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ unsigned long ipc_duration_ms = 0;
+ bool flood_duration_test = false;
+ unsigned long ipc_count = 0;
+ struct dentry *dentry;
+ int err;
+#endif
+ size_t size;
+ char *string;
+ int ret;
+
+ string = kzalloc(count+1, GFP_KERNEL);
+ if (!string)
+ return -ENOMEM;
+
+ size = simple_write_to_buffer(string, count, ppos, buffer, count);
+ ret = size;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
+ /*
+ * write op is only supported for ipc_flood_count or
+ * ipc_flood_duration_ms debugfs entries atm.
+ * ipc_flood_count floods the DSP with the number of IPC's specified.
+ * ipc_duration_ms test floods the DSP for the time specified
+ * in the debugfs entry.
+ */
+ dentry = file->f_path.dentry;
+ if (strcmp(dentry->d_name.name, "ipc_flood_count") &&
+ strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!strcmp(dentry->d_name.name, "ipc_flood_duration_ms"))
+ flood_duration_test = true;
+
+ /* test completion criterion */
+ if (flood_duration_test)
+ ret = kstrtoul(string, 0, &ipc_duration_ms);
+ else
+ ret = kstrtoul(string, 0, &ipc_count);
+ if (ret < 0)
+ goto out;
+
+ /* limit max duration/ipc count for flood test */
+ if (flood_duration_test) {
+ if (!ipc_duration_ms) {
+ ret = size;
+ goto out;
+ }
+
+ /* find the minimum. min() is not used to avoid warnings */
+ if (ipc_duration_ms > MAX_IPC_FLOOD_DURATION_MS)
+ ipc_duration_ms = MAX_IPC_FLOOD_DURATION_MS;
+ } else {
+ if (!ipc_count) {
+ ret = size;
+ goto out;
+ }
+
+ /* find the minimum. min() is not used to avoid warnings */
+ if (ipc_count > MAX_IPC_FLOOD_COUNT)
+ ipc_count = MAX_IPC_FLOOD_COUNT;
+ }
+
+ ret = pm_runtime_get_sync(sdev->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err_ratelimited(sdev->dev,
+ "error: debugfs write failed to resume %d\n",
+ ret);
+ pm_runtime_put_noidle(sdev->dev);
+ goto out;
+ }
+
+ /* flood test */
+ ret = sof_debug_ipc_flood_test(sdev, dfse, flood_duration_test,
+ ipc_duration_ms, ipc_count);
+
+ pm_runtime_mark_last_busy(sdev->dev);
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: debugfs write failed to idle %d\n",
+ err);
+
+ /* return size if test is successful */
+ if (ret >= 0)
+ ret = size;
+out:
+#endif
+ kfree(string);
+ return ret;
+}
+
+static ssize_t sof_dfsentry_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ loff_t pos = *ppos;
+ size_t size_ret;
+ int skip = 0;
+ int size;
+ u8 *buf;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
+ struct dentry *dentry;
+
+ dentry = file->f_path.dentry;
+ if ((!strcmp(dentry->d_name.name, "ipc_flood_count") ||
+ !strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) &&
+ dfse->cache_buf) {
+ if (*ppos)
+ return 0;
+
+ count = strlen(dfse->cache_buf);
+ size_ret = copy_to_user(buffer, dfse->cache_buf, count);
+ if (size_ret)
+ return -EFAULT;
+
+ *ppos += count;
+ return count;
+ }
+#endif
+ size = dfse->size;
+
+ /* validate position & count */
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= size || !count)
+ return 0;
+ /* find the minimum. min() is not used since it adds sparse warnings */
+ if (count > size - pos)
+ count = size - pos;
+
+ /* align io read start to u32 multiple */
+ pos = ALIGN_DOWN(pos, 4);
+
+ /* intermediate buffer size must be u32 multiple */
+ size = ALIGN(count, 4);
+
+ /* if start position is unaligned, read extra u32 */
+ if (unlikely(pos != *ppos)) {
+ skip = *ppos - pos;
+ if (pos + size + 4 < dfse->size)
+ size += 4;
+ }
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (dfse->type == SOF_DFSENTRY_TYPE_IOMEM) {
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+ /*
+ * If the DSP is active: copy from IO.
+ * If the DSP is suspended:
+ * - Copy from IO if the memory is always accessible.
+ * - Otherwise, copy from cached buffer.
+ */
+ if (pm_runtime_active(sdev->dev) ||
+ dfse->access_type == SOF_DEBUGFS_ACCESS_ALWAYS) {
+ memcpy_fromio(buf, dfse->io_mem + pos, size);
+ } else {
+ dev_info(sdev->dev,
+ "Copying cached debugfs data\n");
+ memcpy(buf, dfse->cache_buf + pos, size);
+ }
+#else
+ /* if the DSP is in D3 */
+ if (!pm_runtime_active(sdev->dev) &&
+ dfse->access_type == SOF_DEBUGFS_ACCESS_D0_ONLY) {
+ dev_err(sdev->dev,
+ "error: debugfs entry cannot be read in DSP D3\n");
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ memcpy_fromio(buf, dfse->io_mem + pos, size);
+#endif
+ } else {
+ memcpy(buf, ((u8 *)(dfse->buf) + pos), size);
+ }
+
+ /* copy to userspace */
+ size_ret = copy_to_user(buffer, buf + skip, count);
+
+ kfree(buf);
+
+ /* update count & position if copy succeeded */
+ if (size_ret)
+ return -EFAULT;
+
+ *ppos = pos + count;
+
+ return count;
+}
+
+static const struct file_operations sof_dfs_fops = {
+ .open = simple_open,
+ .read = sof_dfsentry_read,
+ .llseek = default_llseek,
+ .write = sof_dfsentry_write,
+};
+
+/* create FS entry for debug files that can expose DSP memories, registers */
+int snd_sof_debugfs_io_item(struct snd_sof_dev *sdev,
+ void __iomem *base, size_t size,
+ const char *name,
+ enum sof_debugfs_access_type access_type)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ if (!sdev)
+ return -EINVAL;
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ dfse->type = SOF_DFSENTRY_TYPE_IOMEM;
+ dfse->io_mem = base;
+ dfse->size = size;
+ dfse->sdev = sdev;
+ dfse->access_type = access_type;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+ /*
+ * allocate cache buffer that will be used to save the mem window
+ * contents prior to suspend
+ */
+ if (access_type == SOF_DEBUGFS_ACCESS_D0_ONLY) {
+ dfse->cache_buf = devm_kzalloc(sdev->dev, size, GFP_KERNEL);
+ if (!dfse->cache_buf)
+ return -ENOMEM;
+ }
+#endif
+
+ debugfs_create_file(name, 0444, sdev->debugfs_root, dfse,
+ &sof_dfs_fops);
+
+ /* add to dfsentry list */
+ list_add(&dfse->list, &sdev->dfsentry_list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_sof_debugfs_io_item);
+
+/* create FS entry for debug files to expose kernel memory */
+int snd_sof_debugfs_buf_item(struct snd_sof_dev *sdev,
+ void *base, size_t size,
+ const char *name, mode_t mode)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ if (!sdev)
+ return -EINVAL;
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ dfse->type = SOF_DFSENTRY_TYPE_BUF;
+ dfse->buf = base;
+ dfse->size = size;
+ dfse->sdev = sdev;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
+ /*
+ * cache_buf is unused for SOF_DFSENTRY_TYPE_BUF debugfs entries.
+ * So, use it to save the results of the last IPC flood test.
+ */
+ dfse->cache_buf = devm_kzalloc(sdev->dev, IPC_FLOOD_TEST_RESULT_LEN,
+ GFP_KERNEL);
+ if (!dfse->cache_buf)
+ return -ENOMEM;
+#endif
+
+ debugfs_create_file(name, mode, sdev->debugfs_root, dfse,
+ &sof_dfs_fops);
+ /* add to dfsentry list */
+ list_add(&dfse->list, &sdev->dfsentry_list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_sof_debugfs_buf_item);
+
+int snd_sof_dbg_init(struct snd_sof_dev *sdev)
+{
+ const struct snd_sof_dsp_ops *ops = sof_ops(sdev);
+ const struct snd_sof_debugfs_map *map;
+ int i;
+ int err;
+
+ /* use "sof" as top level debugFS dir */
+ sdev->debugfs_root = debugfs_create_dir("sof", NULL);
+
+ /* init dfsentry list */
+ INIT_LIST_HEAD(&sdev->dfsentry_list);
+
+ /* create debugFS files for platform specific MMIO/DSP memories */
+ for (i = 0; i < ops->debug_map_count; i++) {
+ map = &ops->debug_map[i];
+
+ err = snd_sof_debugfs_io_item(sdev, sdev->bar[map->bar] +
+ map->offset, map->size,
+ map->name, map->access_type);
+ /* errors are only due to memory allocation, not debugfs */
+ if (err < 0)
+ return err;
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+ err = snd_sof_debugfs_probe_item(sdev, "probe_points",
+ 0644, &probe_points_fops);
+ if (err < 0)
+ return err;
+ err = snd_sof_debugfs_probe_item(sdev, "probe_points_remove",
+ 0200, &probe_points_remove_fops);
+ if (err < 0)
+ return err;
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
+ /* create read-write ipc_flood_count debugfs entry */
+ err = snd_sof_debugfs_buf_item(sdev, NULL, 0,
+ "ipc_flood_count", 0666);
+
+ /* errors are only due to memory allocation, not debugfs */
+ if (err < 0)
+ return err;
+
+ /* create read-write ipc_flood_duration_ms debugfs entry */
+ err = snd_sof_debugfs_buf_item(sdev, NULL, 0,
+ "ipc_flood_duration_ms", 0666);
+
+ /* errors are only due to memory allocation, not debugfs */
+ if (err < 0)
+ return err;
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_sof_dbg_init);
+
+void snd_sof_free_debug(struct snd_sof_dev *sdev)
+{
+ debugfs_remove_recursive(sdev->debugfs_root);
+}
+EXPORT_SYMBOL_GPL(snd_sof_free_debug);
+
+void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev)
+{
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_RETAIN_DSP_CONTEXT) ||
+ (sof_core_debug & SOF_DBG_RETAIN_CTX)) {
+ /* should we prevent DSP entering D3 ? */
+ dev_info(sdev->dev, "info: preventing DSP entering D3 state to preserve context\n");
+ pm_runtime_get_noresume(sdev->dev);
+ }
+
+ /* dump vital information to the logs */
+ snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX);
+ snd_sof_ipc_dump(sdev);
+ snd_sof_trace_notify_for_error(sdev);
+}
+EXPORT_SYMBOL(snd_sof_handle_fw_exception);
diff --git a/sound/soc/sof/imx/Kconfig b/sound/soc/sof/imx/Kconfig
new file mode 100644
index 000000000..48f998a19
--- /dev/null
+++ b/sound/soc/sof/imx/Kconfig
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+config SND_SOC_SOF_IMX_TOPLEVEL
+ bool "SOF support for NXP i.MX audio DSPs"
+ depends on ARM64|| COMPILE_TEST
+ depends on SND_SOC_SOF_OF
+ help
+ This adds support for Sound Open Firmware for NXP i.MX platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+if SND_SOC_SOF_IMX_TOPLEVEL
+
+config SND_SOC_SOF_IMX_OF
+ def_tristate SND_SOC_SOF_OF
+ select SND_SOC_SOF_IMX8 if SND_SOC_SOF_IMX8_SUPPORT
+ select SND_SOC_SOF_IMX8M if SND_SOC_SOF_IMX8M_SUPPORT
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_IMX_COMMON
+ tristate
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
+config SND_SOC_SOF_IMX8_SUPPORT
+ bool "SOF support for i.MX8"
+ depends on IMX_SCU=y || IMX_SCU=SND_SOC_SOF_IMX_OF
+ depends on IMX_DSP=y || IMX_DSP=SND_SOC_SOF_IMX_OF
+ help
+ This adds support for Sound Open Firmware for NXP i.MX8 platforms
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_IMX8
+ tristate
+ select SND_SOC_SOF_IMX_COMMON
+ select SND_SOC_SOF_XTENSA
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_IMX8M_SUPPORT
+ bool "SOF support for i.MX8M"
+ depends on IMX_DSP=y || IMX_DSP=SND_SOC_SOF_OF
+ help
+ This adds support for Sound Open Firmware for NXP i.MX8M platforms
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_IMX8M
+ tristate
+ select SND_SOC_SOF_IMX_COMMON
+ select SND_SOC_SOF_XTENSA
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+endif ## SND_SOC_SOF_IMX_IMX_TOPLEVEL
diff --git a/sound/soc/sof/imx/Makefile b/sound/soc/sof/imx/Makefile
new file mode 100644
index 000000000..dba93c346
--- /dev/null
+++ b/sound/soc/sof/imx/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+snd-sof-imx8-objs := imx8.o
+snd-sof-imx8m-objs := imx8m.o
+
+snd-sof-imx-common-objs := imx-common.o
+
+obj-$(CONFIG_SND_SOC_SOF_IMX8) += snd-sof-imx8.o
+obj-$(CONFIG_SND_SOC_SOF_IMX8M) += snd-sof-imx8m.o
+obj-$(CONFIG_SND_SOC_SOF_IMX_COMMON) += imx-common.o
diff --git a/sound/soc/sof/imx/imx-common.c b/sound/soc/sof/imx/imx-common.c
new file mode 100644
index 000000000..5fee63783
--- /dev/null
+++ b/sound/soc/sof/imx/imx-common.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright 2020 NXP
+//
+// Common helpers for the audio DSP on i.MX8
+
+#include <linux/module.h>
+#include <sound/sof/xtensa.h>
+#include "../ops.h"
+
+#include "imx-common.h"
+
+/**
+ * imx8_get_registers() - This function is called in case of DSP oops
+ * in order to gather information about the registers, filename and
+ * linenumber and stack.
+ * @sdev: SOF device
+ * @xoops: Stores information about registers.
+ * @panic_info: Stores information about filename and line number.
+ * @stack: Stores the stack dump.
+ * @stack_words: Size of the stack dump.
+ */
+void imx8_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read registers */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
+}
+
+/**
+ * imx8_dump() - This function is called when a panic message is
+ * received from the firmware.
+ */
+void imx8_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[IMX8_STACK_DUMP_SIZE];
+ u32 status;
+
+ /* Get information about the panic status from the debug box area.
+ * Compute the trace point based on the status.
+ */
+ sof_mailbox_read(sdev, sdev->debug_box.offset + 0x4, &status, 4);
+
+ /* Get information about the registers, the filename and line
+ * number and the stack.
+ */
+ imx8_get_registers(sdev, &xoops, &panic_info, stack,
+ IMX8_STACK_DUMP_SIZE);
+
+ /* Print the information to the console */
+ snd_sof_get_status(sdev, status, status, &xoops, &panic_info, stack,
+ IMX8_STACK_DUMP_SIZE);
+}
+EXPORT_SYMBOL(imx8_dump);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/imx/imx-common.h b/sound/soc/sof/imx/imx-common.h
new file mode 100644
index 000000000..1cc7d6704
--- /dev/null
+++ b/sound/soc/sof/imx/imx-common.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef __IMX_COMMON_H__
+#define __IMX_COMMON_H__
+
+#define EXCEPT_MAX_HDR_SIZE 0x400
+#define IMX8_STACK_DUMP_SIZE 32
+
+void imx8_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words);
+
+void imx8_dump(struct snd_sof_dev *sdev, u32 flags);
+
+#endif
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
new file mode 100644
index 000000000..4e7dccadd
--- /dev/null
+++ b/sound/soc/sof/imx/imx8.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright 2019 NXP
+//
+// Author: Daniel Baluta <daniel.baluta@nxp.com>
+//
+// Hardware interface for audio DSP on i.MX8
+
+#include <linux/firmware.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pm_domain.h>
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/dsp.h>
+
+#include <linux/firmware/imx/svc/misc.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include "../ops.h"
+#include "imx-common.h"
+
+/* DSP memories */
+#define IRAM_OFFSET 0x10000
+#define IRAM_SIZE (2 * 1024)
+#define DRAM0_OFFSET 0x0
+#define DRAM0_SIZE (32 * 1024)
+#define DRAM1_OFFSET 0x8000
+#define DRAM1_SIZE (32 * 1024)
+#define SYSRAM_OFFSET 0x18000
+#define SYSRAM_SIZE (256 * 1024)
+#define SYSROM_OFFSET 0x58000
+#define SYSROM_SIZE (192 * 1024)
+
+#define RESET_VECTOR_VADDR 0x596f8000
+
+#define MBOX_OFFSET 0x800000
+#define MBOX_SIZE 0x1000
+
+struct imx8_priv {
+ struct device *dev;
+ struct snd_sof_dev *sdev;
+
+ /* DSP IPC handler */
+ struct imx_dsp_ipc *dsp_ipc;
+ struct platform_device *ipc_dev;
+
+ /* System Controller IPC handler */
+ struct imx_sc_ipc *sc_ipc;
+
+ /* Power domain handling */
+ int num_domains;
+ struct device **pd_dev;
+ struct device_link **link;
+
+};
+
+static void imx8_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ int ret = 0;
+
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt\n");
+ return;
+ }
+
+ /* get reply */
+ sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
+
+ if (reply.error < 0) {
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ ret = reply.error;
+ } else {
+ /* reply has correct size? */
+ if (reply.hdr.size != msg->reply_size) {
+ dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
+ msg->reply_size, reply.hdr.size);
+ ret = -EINVAL;
+ }
+
+ /* read the message */
+ if (msg->reply_size > 0)
+ sof_mailbox_read(sdev, sdev->host_box.offset,
+ msg->reply_data, msg->reply_size);
+ }
+
+ msg->reply_error = ret;
+}
+
+static int imx8_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+
+static int imx8_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+
+static void imx8_dsp_handle_reply(struct imx_dsp_ipc *ipc)
+{
+ struct imx8_priv *priv = imx_dsp_get_data(ipc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sdev->ipc_lock, flags);
+ imx8_get_reply(priv->sdev);
+ snd_sof_ipc_reply(priv->sdev, 0);
+ spin_unlock_irqrestore(&priv->sdev->ipc_lock, flags);
+}
+
+static void imx8_dsp_handle_request(struct imx_dsp_ipc *ipc)
+{
+ struct imx8_priv *priv = imx_dsp_get_data(ipc);
+ u32 p; /* panic code */
+
+ /* Read the message from the debug box. */
+ sof_mailbox_read(priv->sdev, priv->sdev->debug_box.offset + 4, &p, sizeof(p));
+
+ /* Check to see if the message is a panic code (0x0dead***) */
+ if ((p & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC)
+ snd_sof_dsp_panic(priv->sdev, p);
+ else
+ snd_sof_ipc_msgs_rx(priv->sdev);
+}
+
+static struct imx_dsp_ops dsp_ops = {
+ .handle_reply = imx8_dsp_handle_reply,
+ .handle_request = imx8_dsp_handle_request,
+};
+
+static int imx8_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct imx8_priv *priv = sdev->pdata->hw_pdata;
+
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ imx_dsp_ring_doorbell(priv->dsp_ipc, 0);
+
+ return 0;
+}
+
+/*
+ * DSP control.
+ */
+static int imx8x_run(struct snd_sof_dev *sdev)
+{
+ struct imx8_priv *dsp_priv = sdev->pdata->hw_pdata;
+ int ret;
+
+ ret = imx_sc_misc_set_control(dsp_priv->sc_ipc, IMX_SC_R_DSP,
+ IMX_SC_C_OFS_SEL, 1);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error system address offset source select\n");
+ return ret;
+ }
+
+ ret = imx_sc_misc_set_control(dsp_priv->sc_ipc, IMX_SC_R_DSP,
+ IMX_SC_C_OFS_AUDIO, 0x80);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error system address offset of AUDIO\n");
+ return ret;
+ }
+
+ ret = imx_sc_misc_set_control(dsp_priv->sc_ipc, IMX_SC_R_DSP,
+ IMX_SC_C_OFS_PERIPH, 0x5A);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error system address offset of PERIPH %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = imx_sc_misc_set_control(dsp_priv->sc_ipc, IMX_SC_R_DSP,
+ IMX_SC_C_OFS_IRQ, 0x51);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error system address offset of IRQ\n");
+ return ret;
+ }
+
+ imx_sc_pm_cpu_start(dsp_priv->sc_ipc, IMX_SC_R_DSP, true,
+ RESET_VECTOR_VADDR);
+
+ return 0;
+}
+
+static int imx8_run(struct snd_sof_dev *sdev)
+{
+ struct imx8_priv *dsp_priv = sdev->pdata->hw_pdata;
+ int ret;
+
+ ret = imx_sc_misc_set_control(dsp_priv->sc_ipc, IMX_SC_R_DSP,
+ IMX_SC_C_OFS_SEL, 0);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error system address offset source select\n");
+ return ret;
+ }
+
+ imx_sc_pm_cpu_start(dsp_priv->sc_ipc, IMX_SC_R_DSP, true,
+ RESET_VECTOR_VADDR);
+
+ return 0;
+}
+
+static int imx8_probe(struct snd_sof_dev *sdev)
+{
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *res_node;
+ struct resource *mmio;
+ struct imx8_priv *priv;
+ struct resource res;
+ u32 base, size;
+ int ret = 0;
+ int i;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ sdev->pdata->hw_pdata = priv;
+ priv->dev = sdev->dev;
+ priv->sdev = sdev;
+
+ /* power up device associated power domains */
+ priv->num_domains = of_count_phandle_with_args(np, "power-domains",
+ "#power-domain-cells");
+ if (priv->num_domains < 0) {
+ dev_err(sdev->dev, "no power-domains property in %pOF\n", np);
+ return priv->num_domains;
+ }
+
+ priv->pd_dev = devm_kmalloc_array(&pdev->dev, priv->num_domains,
+ sizeof(*priv->pd_dev), GFP_KERNEL);
+ if (!priv->pd_dev)
+ return -ENOMEM;
+
+ priv->link = devm_kmalloc_array(&pdev->dev, priv->num_domains,
+ sizeof(*priv->link), GFP_KERNEL);
+ if (!priv->link)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->num_domains; i++) {
+ priv->pd_dev[i] = dev_pm_domain_attach_by_id(&pdev->dev, i);
+ if (IS_ERR(priv->pd_dev[i])) {
+ ret = PTR_ERR(priv->pd_dev[i]);
+ goto exit_unroll_pm;
+ }
+ priv->link[i] = device_link_add(&pdev->dev, priv->pd_dev[i],
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!priv->link[i]) {
+ ret = -ENOMEM;
+ dev_pm_domain_detach(priv->pd_dev[i], false);
+ goto exit_unroll_pm;
+ }
+ }
+
+ ret = imx_scu_get_handle(&priv->sc_ipc);
+ if (ret) {
+ dev_err(sdev->dev, "Cannot obtain SCU handle (err = %d)\n",
+ ret);
+ goto exit_unroll_pm;
+ }
+
+ priv->ipc_dev = platform_device_register_data(sdev->dev, "imx-dsp",
+ PLATFORM_DEVID_NONE,
+ pdev, sizeof(*pdev));
+ if (IS_ERR(priv->ipc_dev)) {
+ ret = PTR_ERR(priv->ipc_dev);
+ goto exit_unroll_pm;
+ }
+
+ priv->dsp_ipc = dev_get_drvdata(&priv->ipc_dev->dev);
+ if (!priv->dsp_ipc) {
+ /* DSP IPC driver not probed yet, try later */
+ ret = -EPROBE_DEFER;
+ dev_err(sdev->dev, "Failed to get drvdata\n");
+ goto exit_pdev_unregister;
+ }
+
+ imx_dsp_set_data(priv->dsp_ipc, priv);
+ priv->dsp_ipc->ops = &dsp_ops;
+
+ /* DSP base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get DSP base at idx 0\n");
+ ret = -EINVAL;
+ goto exit_pdev_unregister;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_IRAM] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[SOF_FW_BLK_TYPE_IRAM]) {
+ dev_err(sdev->dev, "failed to ioremap base 0x%x size 0x%x\n",
+ base, size);
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+ sdev->mmio_bar = SOF_FW_BLK_TYPE_IRAM;
+
+ res_node = of_parse_phandle(np, "memory-region", 0);
+ if (!res_node) {
+ dev_err(&pdev->dev, "failed to get memory region node\n");
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+
+ ret = of_address_to_resource(res_node, 0, &res);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get reserved region address\n");
+ goto exit_pdev_unregister;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap_wc(sdev->dev, res.start,
+ resource_size(&res));
+ if (!sdev->bar[SOF_FW_BLK_TYPE_SRAM]) {
+ dev_err(sdev->dev, "failed to ioremap mem 0x%x size 0x%x\n",
+ base, size);
+ ret = -ENOMEM;
+ goto exit_pdev_unregister;
+ }
+ sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return 0;
+
+exit_pdev_unregister:
+ platform_device_unregister(priv->ipc_dev);
+exit_unroll_pm:
+ while (--i >= 0) {
+ device_link_del(priv->link[i]);
+ dev_pm_domain_detach(priv->pd_dev[i], false);
+ }
+
+ return ret;
+}
+
+static int imx8_remove(struct snd_sof_dev *sdev)
+{
+ struct imx8_priv *priv = sdev->pdata->hw_pdata;
+ int i;
+
+ platform_device_unregister(priv->ipc_dev);
+
+ for (i = 0; i < priv->num_domains; i++) {
+ device_link_del(priv->link[i]);
+ dev_pm_domain_detach(priv->pd_dev[i], false);
+ }
+
+ return 0;
+}
+
+/* on i.MX8 there is 1 to 1 match between type and BAR idx */
+static int imx8_get_bar_index(struct snd_sof_dev *sdev, u32 type)
+{
+ return type;
+}
+
+static void imx8_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz)
+{
+ sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
+}
+
+static int imx8_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply)
+{
+ return 0;
+}
+
+static struct snd_soc_dai_driver imx8_dai[] = {
+{
+ .name = "esai0",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "sai1",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+},
+};
+
+/* i.MX8 ops */
+struct snd_sof_dsp_ops sof_imx8_ops = {
+ /* probe and remove */
+ .probe = imx8_probe,
+ .remove = imx8_remove,
+ /* DSP core boot */
+ .run = imx8_run,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Module IO */
+ .read64 = sof_io_read64,
+
+ /* ipc */
+ .send_msg = imx8_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = imx8_get_mailbox_offset,
+ .get_window_offset = imx8_get_window_offset,
+
+ .ipc_msg_data = imx8_ipc_msg_data,
+ .ipc_pcm_params = imx8_ipc_pcm_params,
+
+ /* module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+ .get_bar_index = imx8_get_bar_index,
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* Debug information */
+ .dbg_dump = imx8_dump,
+
+ /* Firmware ops */
+ .arch_ops = &sof_xtensa_arch_ops,
+
+ /* DAI drivers */
+ .drv = imx8_dai,
+ .num_drv = ARRAY_SIZE(imx8_dai),
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+};
+EXPORT_SYMBOL(sof_imx8_ops);
+
+/* i.MX8X ops */
+struct snd_sof_dsp_ops sof_imx8x_ops = {
+ /* probe and remove */
+ .probe = imx8_probe,
+ .remove = imx8_remove,
+ /* DSP core boot */
+ .run = imx8x_run,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Module IO */
+ .read64 = sof_io_read64,
+
+ /* ipc */
+ .send_msg = imx8_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = imx8_get_mailbox_offset,
+ .get_window_offset = imx8_get_window_offset,
+
+ .ipc_msg_data = imx8_ipc_msg_data,
+ .ipc_pcm_params = imx8_ipc_pcm_params,
+
+ /* module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+ .get_bar_index = imx8_get_bar_index,
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* Debug information */
+ .dbg_dump = imx8_dump,
+
+ /* Firmware ops */
+ .arch_ops = &sof_xtensa_arch_ops,
+
+ /* DAI drivers */
+ .drv = imx8_dai,
+ .num_drv = ARRAY_SIZE(imx8_dai),
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP
+};
+EXPORT_SYMBOL(sof_imx8x_ops);
+
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
new file mode 100644
index 000000000..6943c0527
--- /dev/null
+++ b/sound/soc/sof/imx/imx8m.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright 2020 NXP
+//
+// Author: Daniel Baluta <daniel.baluta@nxp.com>
+//
+// Hardware interface for audio DSP on i.MX8M
+
+#include <linux/firmware.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <linux/firmware/imx/dsp.h>
+
+#include "../ops.h"
+#include "imx-common.h"
+
+#define MBOX_OFFSET 0x800000
+#define MBOX_SIZE 0x1000
+
+struct imx8m_priv {
+ struct device *dev;
+ struct snd_sof_dev *sdev;
+
+ /* DSP IPC handler */
+ struct imx_dsp_ipc *dsp_ipc;
+ struct platform_device *ipc_dev;
+};
+
+static void imx8m_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ int ret = 0;
+
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt\n");
+ return;
+ }
+
+ /* get reply */
+ sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
+
+ if (reply.error < 0) {
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ ret = reply.error;
+ } else {
+ /* reply has correct size? */
+ if (reply.hdr.size != msg->reply_size) {
+ dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
+ msg->reply_size, reply.hdr.size);
+ ret = -EINVAL;
+ }
+
+ /* read the message */
+ if (msg->reply_size > 0)
+ sof_mailbox_read(sdev, sdev->host_box.offset,
+ msg->reply_data, msg->reply_size);
+ }
+
+ msg->reply_error = ret;
+}
+
+static int imx8m_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+
+static int imx8m_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+
+static void imx8m_dsp_handle_reply(struct imx_dsp_ipc *ipc)
+{
+ struct imx8m_priv *priv = imx_dsp_get_data(ipc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sdev->ipc_lock, flags);
+ imx8m_get_reply(priv->sdev);
+ snd_sof_ipc_reply(priv->sdev, 0);
+ spin_unlock_irqrestore(&priv->sdev->ipc_lock, flags);
+}
+
+static void imx8m_dsp_handle_request(struct imx_dsp_ipc *ipc)
+{
+ struct imx8m_priv *priv = imx_dsp_get_data(ipc);
+ u32 p; /* Panic code */
+
+ /* Read the message from the debug box. */
+ sof_mailbox_read(priv->sdev, priv->sdev->debug_box.offset + 4, &p, sizeof(p));
+
+ /* Check to see if the message is a panic code (0x0dead***) */
+ if ((p & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC)
+ snd_sof_dsp_panic(priv->sdev, p);
+ else
+ snd_sof_ipc_msgs_rx(priv->sdev);
+}
+
+static struct imx_dsp_ops imx8m_dsp_ops = {
+ .handle_reply = imx8m_dsp_handle_reply,
+ .handle_request = imx8m_dsp_handle_request,
+};
+
+static int imx8m_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct imx8m_priv *priv = sdev->pdata->hw_pdata;
+
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ imx_dsp_ring_doorbell(priv->dsp_ipc, 0);
+
+ return 0;
+}
+
+/*
+ * DSP control.
+ */
+static int imx8m_run(struct snd_sof_dev *sdev)
+{
+ /* TODO: start DSP using Audio MIX bits */
+ return 0;
+}
+
+static int imx8m_probe(struct snd_sof_dev *sdev)
+{
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *res_node;
+ struct resource *mmio;
+ struct imx8m_priv *priv;
+ struct resource res;
+ u32 base, size;
+ int ret = 0;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ sdev->pdata->hw_pdata = priv;
+ priv->dev = sdev->dev;
+ priv->sdev = sdev;
+
+ priv->ipc_dev = platform_device_register_data(sdev->dev, "imx-dsp",
+ PLATFORM_DEVID_NONE,
+ pdev, sizeof(*pdev));
+ if (IS_ERR(priv->ipc_dev))
+ return PTR_ERR(priv->ipc_dev);
+
+ priv->dsp_ipc = dev_get_drvdata(&priv->ipc_dev->dev);
+ if (!priv->dsp_ipc) {
+ /* DSP IPC driver not probed yet, try later */
+ ret = -EPROBE_DEFER;
+ dev_err(sdev->dev, "Failed to get drvdata\n");
+ goto exit_pdev_unregister;
+ }
+
+ imx_dsp_set_data(priv->dsp_ipc, priv);
+ priv->dsp_ipc->ops = &imx8m_dsp_ops;
+
+ /* DSP base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get DSP base at idx 0\n");
+ ret = -EINVAL;
+ goto exit_pdev_unregister;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_IRAM] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[SOF_FW_BLK_TYPE_IRAM]) {
+ dev_err(sdev->dev, "failed to ioremap base 0x%x size 0x%x\n",
+ base, size);
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+ sdev->mmio_bar = SOF_FW_BLK_TYPE_IRAM;
+
+ res_node = of_parse_phandle(np, "memory-region", 0);
+ if (!res_node) {
+ dev_err(&pdev->dev, "failed to get memory region node\n");
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+
+ ret = of_address_to_resource(res_node, 0, &res);
+ of_node_put(res_node);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get reserved region address\n");
+ goto exit_pdev_unregister;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap_wc(sdev->dev, res.start,
+ resource_size(&res));
+ if (!sdev->bar[SOF_FW_BLK_TYPE_SRAM]) {
+ dev_err(sdev->dev, "failed to ioremap mem 0x%x size 0x%x\n",
+ base, size);
+ ret = -ENOMEM;
+ goto exit_pdev_unregister;
+ }
+ sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return 0;
+
+exit_pdev_unregister:
+ platform_device_unregister(priv->ipc_dev);
+ return ret;
+}
+
+static int imx8m_remove(struct snd_sof_dev *sdev)
+{
+ struct imx8m_priv *priv = sdev->pdata->hw_pdata;
+
+ platform_device_unregister(priv->ipc_dev);
+
+ return 0;
+}
+
+/* on i.MX8 there is 1 to 1 match between type and BAR idx */
+static int imx8m_get_bar_index(struct snd_sof_dev *sdev, u32 type)
+{
+ return type;
+}
+
+static void imx8m_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz)
+{
+ sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
+}
+
+static int imx8m_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply)
+{
+ return 0;
+}
+
+static struct snd_soc_dai_driver imx8m_dai[] = {
+{
+ .name = "sai3",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+},
+};
+
+/* i.MX8 ops */
+struct snd_sof_dsp_ops sof_imx8m_ops = {
+ /* probe and remove */
+ .probe = imx8m_probe,
+ .remove = imx8m_remove,
+ /* DSP core boot */
+ .run = imx8m_run,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* Module IO */
+ .read64 = sof_io_read64,
+
+ /* ipc */
+ .send_msg = imx8m_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = imx8m_get_mailbox_offset,
+ .get_window_offset = imx8m_get_window_offset,
+
+ .ipc_msg_data = imx8m_ipc_msg_data,
+ .ipc_pcm_params = imx8m_ipc_pcm_params,
+
+ /* module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+ .get_bar_index = imx8m_get_bar_index,
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* Debug information */
+ .dbg_dump = imx8_dump,
+
+ /* Firmware ops */
+ .arch_ops = &sof_xtensa_arch_ops,
+
+ /* DAI drivers */
+ .drv = imx8m_dai,
+ .num_drv = ARRAY_SIZE(imx8m_dai),
+
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+};
+EXPORT_SYMBOL(sof_imx8m_ops);
+
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
new file mode 100644
index 000000000..6708a2c5a
--- /dev/null
+++ b/sound/soc/sof/intel/Kconfig
@@ -0,0 +1,364 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SND_SOC_SOF_INTEL_TOPLEVEL
+ bool "SOF support for Intel audio DSPs"
+ depends on X86 || COMPILE_TEST
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+if SND_SOC_SOF_INTEL_TOPLEVEL
+
+config SND_SOC_SOF_INTEL_ACPI
+ def_tristate SND_SOC_SOF_ACPI
+ select SND_SOC_SOF_BAYTRAIL if SND_SOC_SOF_BAYTRAIL_SUPPORT
+ select SND_SOC_SOF_BROADWELL if SND_SOC_SOF_BROADWELL_SUPPORT
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_INTEL_PCI
+ def_tristate SND_SOC_SOF_PCI
+ select SND_SOC_SOF_MERRIFIELD if SND_SOC_SOF_MERRIFIELD_SUPPORT
+ select SND_SOC_SOF_APOLLOLAKE if SND_SOC_SOF_APOLLOLAKE_SUPPORT
+ select SND_SOC_SOF_GEMINILAKE if SND_SOC_SOF_GEMINILAKE_SUPPORT
+ select SND_SOC_SOF_CANNONLAKE if SND_SOC_SOF_CANNONLAKE_SUPPORT
+ select SND_SOC_SOF_COFFEELAKE if SND_SOC_SOF_COFFEELAKE_SUPPORT
+ select SND_SOC_SOF_ICELAKE if SND_SOC_SOF_ICELAKE_SUPPORT
+ select SND_SOC_SOF_COMETLAKE if SND_SOC_SOF_COMETLAKE_SUPPORT
+ select SND_SOC_SOF_TIGERLAKE if SND_SOC_SOF_TIGERLAKE_SUPPORT
+ select SND_SOC_SOF_ELKHARTLAKE if SND_SOC_SOF_ELKHARTLAKE_SUPPORT
+ select SND_SOC_SOF_JASPERLAKE if SND_SOC_SOF_JASPERLAKE_SUPPORT
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_INTEL_HIFI_EP_IPC
+ tristate
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_INTEL_ATOM_HIFI_EP
+ tristate
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_INTEL_HIFI_EP_IPC
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_INTEL_COMMON
+ tristate
+ select SND_SOC_ACPI_INTEL_MATCH
+ select SND_SOC_SOF_XTENSA
+ select SND_SOC_INTEL_MACH
+ select SND_SOC_ACPI if ACPI
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+if SND_SOC_SOF_INTEL_ACPI
+
+config SND_SOC_SOF_BAYTRAIL_SUPPORT
+ bool "SOF support for Baytrail, Braswell and Cherrytrail"
+ depends on SND_SST_ATOM_HIFI2_PLATFORM_ACPI=n
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Baytrail, Braswell or Cherrytrail processors.
+ This option is mutually exclusive with the Atom/SST and Baytrail
+ legacy drivers. If you want to enable SOF on Baytrail/Cherrytrail,
+ you need to deselect those options first.
+ SOF does not support Baytrail-CR for now, so this option is not
+ recommended for distros. At some point all legacy drivers will be
+ deprecated but not before all userspace firmware/topology/UCM files
+ are made available to downstream distros.
+ Say Y if you want to enable SOF on Baytrail/Cherrytrail
+ If unsure select "N".
+
+config SND_SOC_SOF_BAYTRAIL
+ tristate
+ select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_BROADWELL_SUPPORT
+ bool "SOF support for Broadwell"
+ depends on SND_SOC_INTEL_CATPT=n
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Broadwell processors.
+ This option is mutually exclusive with the Haswell/Broadwell legacy
+ driver. If you want to enable SOF on Broadwell you need to deselect
+ the legacy driver first.
+ SOF does fully support Broadwell yet, so this option is not
+ recommended for distros. At some point all legacy drivers will be
+ deprecated but not before all userspace firmware/topology/UCM files
+ are made available to downstream distros.
+ Say Y if you want to enable SOF on Broadwell
+ If unsure select "N".
+
+config SND_SOC_SOF_BROADWELL
+ tristate
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_INTEL_HIFI_EP_IPC
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+endif ## SND_SOC_SOF_INTEL_ACPI
+
+if SND_SOC_SOF_INTEL_PCI
+
+config SND_SOC_SOF_MERRIFIELD_SUPPORT
+ bool "SOF support for Tangier/Merrifield"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Tangier/Merrifield processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_MERRIFIELD
+ tristate
+ select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_APOLLOLAKE_SUPPORT
+ bool "SOF support for Apollolake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Apollolake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_APOLLOLAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_GEMINILAKE_SUPPORT
+ bool "SOF support for GeminiLake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Geminilake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_GEMINILAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_CANNONLAKE_SUPPORT
+ bool "SOF support for Cannonlake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Cannonlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_CANNONLAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_COFFEELAKE_SUPPORT
+ bool "SOF support for CoffeeLake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Coffeelake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_COFFEELAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_ICELAKE_SUPPORT
+ bool "SOF support for Icelake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Icelake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_ICELAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_COMETLAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_COMETLAKE_SUPPORT
+ bool
+
+config SND_SOC_SOF_COMETLAKE_LP_SUPPORT
+ bool "SOF support for CometLake"
+ select SND_SOC_SOF_COMETLAKE_SUPPORT
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Cometlake processors.
+ If unsure select "N".
+
+config SND_SOC_SOF_TIGERLAKE_SUPPORT
+ bool "SOF support for Tigerlake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Tigerlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_TIGERLAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_ELKHARTLAKE_SUPPORT
+ bool "SOF support for ElkhartLake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the ElkhartLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_ELKHARTLAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_JASPERLAKE_SUPPORT
+ bool "SOF support for JasperLake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the JasperLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_JASPERLAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_HDA_COMMON
+ tristate
+ select SND_INTEL_DSP_CONFIG
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_HDA_LINK_BASELINE
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+if SND_SOC_SOF_HDA_COMMON
+
+config SND_SOC_SOF_HDA_LINK
+ bool "SOF support for HDA Links(HDA/HDMI)"
+ depends on SND_SOC_SOF_NOCODEC=n
+ select SND_SOC_SOF_PROBE_WORK_QUEUE
+ help
+ This adds support for HDA links(HDA/HDMI) with Sound Open Firmware
+ for Intel(R) platforms.
+ Say Y if you want to enable HDA links with SOF.
+ If unsure select "N".
+
+config SND_SOC_SOF_HDA_AUDIO_CODEC
+ bool "SOF support for HDAudio codecs"
+ depends on SND_SOC_SOF_HDA_LINK
+ help
+ This adds support for HDAudio codecs with Sound Open Firmware
+ for Intel(R) platforms.
+ Say Y if you want to enable HDAudio codecs with SOF.
+ If unsure select "N".
+
+config SND_SOC_SOF_HDA_PROBES
+ bool "SOF enable probes over HDA"
+ depends on SND_SOC_SOF_DEBUG_PROBES
+ help
+ This option enables the data probing for Intel(R).
+ Intel(R) Skylake and newer platforms.
+ Say Y if you want to enable probes.
+ If unsure, select "N".
+
+config SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1
+ bool "SOF enable DMI Link L1"
+ help
+ This option enables DMI L1 for both playback and capture
+ and disables known workarounds for specific HDAudio platforms.
+ Only use to look into power optimizations on platforms not
+ affected by DMI L1 issues. This option is not recommended.
+ Say Y if you want to enable DMI Link L1
+ If unsure, select "N".
+
+endif ## SND_SOC_SOF_HDA_COMMON
+
+config SND_SOC_SOF_HDA_LINK_BASELINE
+ tristate
+ select SND_SOC_SOF_HDA if SND_SOC_SOF_HDA_LINK
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_HDA
+ tristate
+ select SND_HDA_EXT_CORE if SND_SOC_SOF_HDA_LINK
+ select SND_SOC_HDAC_HDA if SND_SOC_SOF_HDA_AUDIO_CODEC
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK
+ bool "SOF support for SoundWire"
+ depends on ACPI
+ help
+ This adds support for SoundWire with Sound Open Firmware
+ for Intel(R) platforms.
+ Say Y if you want to enable SoundWire links with SOF.
+ If unsure select "N".
+
+config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ tristate
+ select SND_SOC_SOF_INTEL_SOUNDWIRE if SND_SOC_SOF_INTEL_SOUNDWIRE_LINK
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_INTEL_SOUNDWIRE
+ tristate
+ select SOUNDWIRE
+ select SOUNDWIRE_INTEL
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+endif ## SND_SOC_SOF_INTEL_PCI
+
+endif ## SND_SOC_SOF_INTEL_TOPLEVEL
diff --git a/sound/soc/sof/intel/Makefile b/sound/soc/sof/intel/Makefile
new file mode 100644
index 000000000..72d85b25d
--- /dev/null
+++ b/sound/soc/sof/intel/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+snd-sof-intel-byt-objs := byt.o
+snd-sof-intel-bdw-objs := bdw.o
+
+snd-sof-intel-ipc-objs := intel-ipc.o
+
+snd-sof-intel-hda-common-objs := hda.o hda-loader.o hda-stream.o hda-trace.o \
+ hda-dsp.o hda-ipc.o hda-ctrl.o hda-pcm.o \
+ hda-dai.o hda-bus.o \
+ apl.o cnl.o tgl.o
+snd-sof-intel-hda-common-$(CONFIG_SND_SOC_SOF_HDA_PROBES) += hda-compress.o
+
+snd-sof-intel-hda-objs := hda-codec.o
+
+obj-$(CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP) += snd-sof-intel-byt.o
+obj-$(CONFIG_SND_SOC_SOF_BROADWELL) += snd-sof-intel-bdw.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_HIFI_EP_IPC) += snd-sof-intel-ipc.o
+obj-$(CONFIG_SND_SOC_SOF_HDA_COMMON) += snd-sof-intel-hda-common.o
+obj-$(CONFIG_SND_SOC_SOF_HDA) += snd-sof-intel-hda.o
diff --git a/sound/soc/sof/intel/apl.c b/sound/soc/sof/intel/apl.c
new file mode 100644
index 000000000..4eeade2e7
--- /dev/null
+++ b/sound/soc/sof/intel/apl.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Apollolake and GeminiLake
+ */
+
+#include "../sof-priv.h"
+#include "hda.h"
+#include "../sof-audio.h"
+
+static const struct snd_sof_debugfs_map apl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+/* apollolake ops */
+const struct snd_sof_dsp_ops sof_apl_ops = {
+ /* probe and remove */
+ .probe = hda_dsp_probe,
+ .remove = hda_dsp_remove,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* doorbell */
+ .irq_thread = hda_dsp_ipc_irq_thread,
+
+ /* ipc */
+ .send_msg = hda_dsp_ipc_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = hda_dsp_ipc_get_mailbox_offset,
+ .get_window_offset = hda_dsp_ipc_get_window_offset,
+
+ .ipc_msg_data = hda_ipc_msg_data,
+ .ipc_pcm_params = hda_ipc_pcm_params,
+
+ /* machine driver */
+ .machine_select = hda_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = hda_set_mach_params,
+
+ /* debug */
+ .debug_map = apl_dsp_debugfs,
+ .debug_map_count = ARRAY_SIZE(apl_dsp_debugfs),
+ .dbg_dump = hda_dsp_dump,
+ .ipc_dump = hda_ipc_dump,
+
+ /* stream callbacks */
+ .pcm_open = hda_dsp_pcm_open,
+ .pcm_close = hda_dsp_pcm_close,
+ .pcm_hw_params = hda_dsp_pcm_hw_params,
+ .pcm_hw_free = hda_dsp_stream_hw_free,
+ .pcm_trigger = hda_dsp_pcm_trigger,
+ .pcm_pointer = hda_dsp_pcm_pointer,
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+ /* probe callbacks */
+ .probe_assign = hda_probe_compr_assign,
+ .probe_free = hda_probe_compr_free,
+ .probe_set_params = hda_probe_compr_set_params,
+ .probe_trigger = hda_probe_compr_trigger,
+ .probe_pointer = hda_probe_compr_pointer,
+#endif
+
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_raw,
+
+ /* firmware run */
+ .run = hda_dsp_cl_boot_firmware,
+
+ /* pre/post fw run */
+ .pre_fw_run = hda_dsp_pre_fw_run,
+ .post_fw_run = hda_dsp_post_fw_run,
+
+ /* dsp core power up/down */
+ .core_power_up = hda_dsp_enable_core,
+ .core_power_down = hda_dsp_core_reset_power_down,
+
+ /* trace callback */
+ .trace_init = hda_dsp_trace_init,
+ .trace_release = hda_dsp_trace_release,
+ .trace_trigger = hda_dsp_trace_trigger,
+
+ /* DAI drivers */
+ .drv = skl_dai,
+ .num_drv = SOF_SKL_NUM_DAIS,
+
+ /* PM */
+ .suspend = hda_dsp_suspend,
+ .resume = hda_dsp_resume,
+ .runtime_suspend = hda_dsp_runtime_suspend,
+ .runtime_resume = hda_dsp_runtime_resume,
+ .runtime_idle = hda_dsp_runtime_idle,
+ .set_hw_params_upon_resume = hda_dsp_set_hw_params_upon_resume,
+ .set_power_state = hda_dsp_set_power_state,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+
+ .arch_ops = &sof_xtensa_arch_ops,
+};
+EXPORT_SYMBOL_NS(sof_apl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc apl_chip_info = {
+ /* Apollolake */
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(1, 0),
+ .ipc_req = HDA_DSP_REG_HIPCI,
+ .ipc_req_mask = HDA_DSP_REG_HIPCI_BUSY,
+ .ipc_ack = HDA_DSP_REG_HIPCIE,
+ .ipc_ack_mask = HDA_DSP_REG_HIPCIE_DONE,
+ .ipc_ctl = HDA_DSP_REG_HIPCCTL,
+ .rom_init_timeout = 150,
+ .ssp_count = APL_SSP_COUNT,
+ .ssp_base_offset = APL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL_NS(apl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
new file mode 100644
index 000000000..50a4a73e6
--- /dev/null
+++ b/sound/soc/sof/intel/bdw.c
@@ -0,0 +1,664 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Broadwell
+ */
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include "../ops.h"
+#include "shim.h"
+#include "../sof-audio.h"
+
+/* BARs */
+#define BDW_DSP_BAR 0
+#define BDW_PCI_BAR 1
+
+/*
+ * Debug
+ */
+
+/* DSP memories for BDW */
+#define IRAM_OFFSET 0xA0000
+#define BDW_IRAM_SIZE (10 * 32 * 1024)
+#define DRAM_OFFSET 0x00000
+#define BDW_DRAM_SIZE (20 * 32 * 1024)
+#define SHIM_OFFSET 0xFB000
+#define SHIM_SIZE 0x100
+#define MBOX_OFFSET 0x9E000
+#define MBOX_SIZE 0x1000
+#define MBOX_DUMP_SIZE 0x30
+#define EXCEPT_OFFSET 0x800
+#define EXCEPT_MAX_HDR_SIZE 0x400
+
+/* DSP peripherals */
+#define DMAC0_OFFSET 0xFE000
+#define DMAC1_OFFSET 0xFF000
+#define DMAC_SIZE 0x420
+#define SSP0_OFFSET 0xFC000
+#define SSP1_OFFSET 0xFD000
+#define SSP_SIZE 0x100
+
+#define BDW_STACK_DUMP_SIZE 32
+
+#define BDW_PANIC_OFFSET(x) ((x) & 0xFFFF)
+
+static const struct snd_sof_debugfs_map bdw_debugfs[] = {
+ {"dmac0", BDW_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", BDW_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", BDW_DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", BDW_DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", BDW_DSP_BAR, IRAM_OFFSET, BDW_IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", BDW_DSP_BAR, DRAM_OFFSET, BDW_DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", BDW_DSP_BAR, SHIM_OFFSET, SHIM_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void bdw_host_done(struct snd_sof_dev *sdev);
+static void bdw_dsp_done(struct snd_sof_dev *sdev);
+static void bdw_get_reply(struct snd_sof_dev *sdev);
+
+/*
+ * DSP Control.
+ */
+
+static int bdw_run(struct snd_sof_dev *sdev)
+{
+ /* set opportunistic mode on engine 0,1 for all channels */
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_HMDC,
+ SHIM_HMDC_HDDA_E0_ALLCH |
+ SHIM_HMDC_HDDA_E1_ALLCH, 0);
+
+ /* set DSP to RUN */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_STALL, 0x0);
+
+ /* return init core mask */
+ return 1;
+}
+
+static int bdw_reset(struct snd_sof_dev *sdev)
+{
+ /* put DSP into reset and stall */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_RST | SHIM_CSR_STALL,
+ SHIM_CSR_RST | SHIM_CSR_STALL);
+
+ /* keep in reset for 10ms */
+ mdelay(10);
+
+ /* take DSP out of reset and keep stalled for FW loading */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_RST | SHIM_CSR_STALL,
+ SHIM_CSR_STALL);
+
+ return 0;
+}
+
+static int bdw_set_dsp_D0(struct snd_sof_dev *sdev)
+{
+ int tries = 10;
+ u32 reg;
+
+ /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2,
+ PCI_VDRTCL2_DCLCGE |
+ PCI_VDRTCL2_DTCGE, 0);
+
+ /* Disable D3PG (VDRTCTL0.D3PGD = 1) */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL0,
+ PCI_VDRTCL0_D3PGD, PCI_VDRTCL0_D3PGD);
+
+ /* Set D0 state */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_PMCS,
+ PCI_PMCS_PS_MASK, 0);
+
+ /* check that ADSP shim is enabled */
+ while (tries--) {
+ reg = readl(sdev->bar[BDW_PCI_BAR] + PCI_PMCS)
+ & PCI_PMCS_PS_MASK;
+ if (reg == 0)
+ goto finish;
+
+ msleep(20);
+ }
+
+ return -ENODEV;
+
+finish:
+ /*
+ * select SSP1 19.2MHz base clock, SSP clock 0,
+ * turn off Low Power Clock
+ */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_S1IOCS | SHIM_CSR_SBCS1 |
+ SHIM_CSR_LPCS, 0x0);
+
+ /* stall DSP core, set clk to 192/96Mhz */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR,
+ SHIM_CSR, SHIM_CSR_STALL |
+ SHIM_CSR_DCS_MASK,
+ SHIM_CSR_STALL |
+ SHIM_CSR_DCS(4));
+
+ /* Set 24MHz MCLK, prevent local clock gating, enable SSP0 clock */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CLKCTL,
+ SHIM_CLKCTL_MASK |
+ SHIM_CLKCTL_DCPLCG |
+ SHIM_CLKCTL_SCOE0,
+ SHIM_CLKCTL_MASK |
+ SHIM_CLKCTL_DCPLCG |
+ SHIM_CLKCTL_SCOE0);
+
+ /* Stall and reset core, set CSR */
+ bdw_reset(sdev);
+
+ /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2,
+ PCI_VDRTCL2_DCLCGE |
+ PCI_VDRTCL2_DTCGE,
+ PCI_VDRTCL2_DCLCGE |
+ PCI_VDRTCL2_DTCGE);
+
+ usleep_range(50, 55);
+
+ /* switch on audio PLL */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2,
+ PCI_VDRTCL2_APLLSE_MASK, 0);
+
+ /*
+ * set default power gating control, enable power gating control for
+ * all blocks. that is, can't be accessed, please enable each block
+ * before accessing.
+ */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL0,
+ 0xfffffffC, 0x0);
+
+ /* disable DMA finish function for SSP0 & SSP1 */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR2,
+ SHIM_CSR2_SDFD_SSP1,
+ SHIM_CSR2_SDFD_SSP1);
+
+ /* set on-demond mode on engine 0,1 for all channels */
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_HMDC,
+ SHIM_HMDC_HDDA_E0_ALLCH |
+ SHIM_HMDC_HDDA_E1_ALLCH,
+ SHIM_HMDC_HDDA_E0_ALLCH |
+ SHIM_HMDC_HDDA_E1_ALLCH);
+
+ /* Enable Interrupt from both sides */
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_IMRX,
+ (SHIM_IMRX_BUSY | SHIM_IMRX_DONE), 0x0);
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_IMRD,
+ (SHIM_IMRD_DONE | SHIM_IMRD_BUSY |
+ SHIM_IMRD_SSP0 | SHIM_IMRD_DMAC), 0x0);
+
+ /* clear IPC registers */
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCX, 0x0);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCD, 0x0);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, 0x80, 0x6);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, 0xe0, 0x300a);
+
+ return 0;
+}
+
+static void bdw_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read registers */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
+}
+
+static void bdw_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[BDW_STACK_DUMP_SIZE];
+ u32 status, panic, imrx, imrd;
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD);
+ panic = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCX);
+ bdw_get_registers(sdev, &xoops, &panic_info, stack,
+ BDW_STACK_DUMP_SIZE);
+ snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack,
+ BDW_STACK_DUMP_SIZE);
+
+ /* provide some context for firmware debug */
+ imrx = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IMRX);
+ imrd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IMRD);
+ dev_err(sdev->dev,
+ "error: ipc host -> DSP: pending %s complete %s raw 0x%8.8x\n",
+ (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
+ (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ dev_err(sdev->dev,
+ "error: mask host: pending %s complete %s raw 0x%8.8x\n",
+ (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
+ (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ dev_err(sdev->dev,
+ "error: ipc DSP -> host: pending %s complete %s raw 0x%8.8x\n",
+ (status & SHIM_IPCD_BUSY) ? "yes" : "no",
+ (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ dev_err(sdev->dev,
+ "error: mask DSP: pending %s complete %s raw 0x%8.8x\n",
+ (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
+ (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+}
+
+/*
+ * IPC Doorbell IRQ handler and thread.
+ */
+
+static irqreturn_t bdw_irq_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 isr;
+ int ret = IRQ_NONE;
+
+ /* Interrupt arrived, check src */
+ isr = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_ISRX);
+ if (isr & (SHIM_ISRX_DONE | SHIM_ISRX_BUSY))
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+static irqreturn_t bdw_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 ipcx, ipcd, imrx;
+
+ imrx = snd_sof_dsp_read64(sdev, BDW_DSP_BAR, SHIM_IMRX);
+ ipcx = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCX);
+
+ /* reply message from DSP */
+ if (ipcx & SHIM_IPCX_DONE &&
+ !(imrx & SHIM_IMRX_DONE)) {
+ /* Mask Done interrupt before return */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR,
+ SHIM_IMRX, SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /*
+ * handle immediate reply from DSP core. If the msg is
+ * found, set done bit in cmd_done which is called at the
+ * end of message processing function, else set it here
+ * because the done bit can't be set in cmd_done function
+ * which is triggered by msg
+ */
+ bdw_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, ipcx);
+
+ bdw_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ }
+
+ ipcd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD);
+
+ /* new message from DSP */
+ if (ipcd & SHIM_IPCD_BUSY &&
+ !(imrx & SHIM_IMRX_BUSY)) {
+ /* Mask Busy interrupt before return */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR,
+ SHIM_IMRX, SHIM_IMRX_BUSY,
+ SHIM_IMRX_BUSY);
+
+ /* Handle messages from DSP Core */
+ if ((ipcd & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, BDW_PANIC_OFFSET(ipcx) +
+ MBOX_OFFSET);
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ bdw_host_done(sdev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * IPC Mailbox IO
+ */
+
+static int bdw_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ /* send the message */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCX, SHIM_IPCX_BUSY);
+
+ return 0;
+}
+
+static void bdw_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ int ret = 0;
+
+ /*
+ * Sometimes, there is unexpected reply ipc arriving. The reply
+ * ipc belongs to none of the ipcs sent from driver.
+ * In this case, the driver must ignore the ipc.
+ */
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
+ return;
+ }
+
+ /* get reply */
+ sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
+
+ if (reply.error < 0) {
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ ret = reply.error;
+ } else {
+ /* reply correct size ? */
+ if (reply.hdr.size != msg->reply_size) {
+ dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
+ msg->reply_size, reply.hdr.size);
+ ret = -EINVAL;
+ }
+
+ /* read the message */
+ if (msg->reply_size > 0)
+ sof_mailbox_read(sdev, sdev->host_box.offset,
+ msg->reply_data, msg->reply_size);
+ }
+
+ msg->reply_error = ret;
+}
+
+static int bdw_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+
+static int bdw_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+
+static void bdw_host_done(struct snd_sof_dev *sdev)
+{
+ /* clear BUSY bit and set DONE bit - accept new messages */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IPCD,
+ SHIM_IPCD_BUSY | SHIM_IPCD_DONE,
+ SHIM_IPCD_DONE);
+
+ /* unmask busy interrupt */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY, 0);
+}
+
+static void bdw_dsp_done(struct snd_sof_dev *sdev)
+{
+ /* clear DONE bit - tell DSP we have completed */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IPCX,
+ SHIM_IPCX_DONE, 0);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_DONE, 0);
+}
+
+/*
+ * Probe and remove.
+ */
+static int bdw_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ struct resource *mmio;
+ u32 base, size;
+ int ret;
+
+ /* LPE base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_lpe_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get LPE base at idx %d\n",
+ desc->resindex_lpe_base);
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[BDW_DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BDW_DSP_BAR]) {
+ dev_err(sdev->dev,
+ "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[BDW_DSP_BAR]);
+
+ /* TODO: add offsets */
+ sdev->mmio_bar = BDW_DSP_BAR;
+ sdev->mailbox_bar = BDW_DSP_BAR;
+ sdev->dsp_oops_offset = MBOX_OFFSET;
+
+ /* PCI base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_pcicfg_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get PCI base at idx %d\n",
+ desc->resindex_pcicfg_base);
+ return -ENODEV;
+ }
+
+ dev_dbg(sdev->dev, "PCI base at 0x%x size 0x%x", base, size);
+ sdev->bar[BDW_PCI_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BDW_PCI_BAR]) {
+ dev_err(sdev->dev,
+ "error: failed to ioremap PCI base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "PCI VADDR %p\n", sdev->bar[BDW_PCI_BAR]);
+
+ /* register our IRQ */
+ sdev->ipc_irq = platform_get_irq(pdev, desc->irqindex_host_ipc);
+ if (sdev->ipc_irq < 0)
+ return sdev->ipc_irq;
+
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ bdw_irq_handler, bdw_irq_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable the DSP SHIM */
+ ret = bdw_set_dsp_D0(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DSP D0\n");
+ return ret;
+ }
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(sdev->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* set default mailbox */
+ snd_sof_dsp_mailbox_init(sdev, MBOX_OFFSET, MBOX_SIZE, 0, 0);
+
+ return ret;
+}
+
+static void bdw_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach;
+
+ mach = snd_soc_acpi_find_machine(desc->machines);
+ if (!mach) {
+ dev_warn(sdev->dev, "warning: No matching ASoC machine driver found\n");
+ return;
+ }
+
+ sof_pdata->tplg_filename = mach->sof_tplg_filename;
+ mach->mach_params.acpi_ipc_irq_index = desc->irqindex_host_ipc;
+ sof_pdata->machine = mach;
+}
+
+static void bdw_set_mach_params(const struct snd_soc_acpi_mach *mach,
+ struct device *dev)
+{
+ struct snd_soc_acpi_mach_params *mach_params;
+
+ mach_params = (struct snd_soc_acpi_mach_params *)&mach->mach_params;
+ mach_params->platform = dev_name(dev);
+}
+
+/* Broadwell DAIs */
+static struct snd_soc_dai_driver bdw_dai[] = {
+{
+ .name = "ssp0-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp1-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+};
+
+/* broadwell ops */
+const struct snd_sof_dsp_ops sof_bdw_ops = {
+ /*Device init */
+ .probe = bdw_probe,
+
+ /* DSP Core Control */
+ .run = bdw_run,
+ .reset = bdw_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* ipc */
+ .send_msg = bdw_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = bdw_get_mailbox_offset,
+ .get_window_offset = bdw_get_window_offset,
+
+ .ipc_msg_data = intel_ipc_msg_data,
+ .ipc_pcm_params = intel_ipc_pcm_params,
+
+ /* machine driver */
+ .machine_select = bdw_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = bdw_set_mach_params,
+
+ /* debug */
+ .debug_map = bdw_debugfs,
+ .debug_map_count = ARRAY_SIZE(bdw_debugfs),
+ .dbg_dump = bdw_dump,
+
+ /* stream callbacks */
+ .pcm_open = intel_pcm_open,
+ .pcm_close = intel_pcm_close,
+
+ /* Module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = bdw_dai,
+ .num_drv = ARRAY_SIZE(bdw_dai),
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .arch_ops = &sof_xtensa_arch_ops,
+};
+EXPORT_SYMBOL_NS(sof_bdw_ops, SND_SOC_SOF_BROADWELL);
+
+const struct sof_intel_dsp_desc bdw_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+};
+EXPORT_SYMBOL_NS(bdw_chip_info, SND_SOC_SOF_BROADWELL);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
new file mode 100644
index 000000000..186736ee5
--- /dev/null
+++ b/sound/soc/sof/intel/byt.c
@@ -0,0 +1,987 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Baytrail, Braswell and Cherrytrail.
+ */
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include "../ops.h"
+#include "shim.h"
+#include "../sof-audio.h"
+#include "../../intel/common/soc-intel-quirks.h"
+
+/* DSP memories */
+#define IRAM_OFFSET 0x0C0000
+#define IRAM_SIZE (80 * 1024)
+#define DRAM_OFFSET 0x100000
+#define DRAM_SIZE (160 * 1024)
+#define SHIM_OFFSET 0x140000
+#define SHIM_SIZE_BYT 0x100
+#define SHIM_SIZE_CHT 0x118
+#define MBOX_OFFSET 0x144000
+#define MBOX_SIZE 0x1000
+#define EXCEPT_OFFSET 0x800
+#define EXCEPT_MAX_HDR_SIZE 0x400
+
+/* DSP peripherals */
+#define DMAC0_OFFSET 0x098000
+#define DMAC1_OFFSET 0x09c000
+#define DMAC2_OFFSET 0x094000
+#define DMAC_SIZE 0x420
+#define SSP0_OFFSET 0x0a0000
+#define SSP1_OFFSET 0x0a1000
+#define SSP2_OFFSET 0x0a2000
+#define SSP3_OFFSET 0x0a4000
+#define SSP4_OFFSET 0x0a5000
+#define SSP5_OFFSET 0x0a6000
+#define SSP_SIZE 0x100
+
+#define BYT_STACK_DUMP_SIZE 32
+
+#define BYT_PCI_BAR_SIZE 0x200000
+
+#define BYT_PANIC_OFFSET(x) (((x) & GENMASK_ULL(47, 32)) >> 32)
+
+/*
+ * Debug
+ */
+
+#define MBOX_DUMP_SIZE 0x30
+
+/* BARs */
+#define BYT_DSP_BAR 0
+#define BYT_PCI_BAR 1
+#define BYT_IMR_BAR 2
+
+static const struct snd_sof_debugfs_map byt_debugfs[] = {
+ {"dmac0", BYT_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", BYT_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", BYT_DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", BYT_DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", BYT_DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", BYT_DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE_BYT,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void byt_host_done(struct snd_sof_dev *sdev);
+static void byt_dsp_done(struct snd_sof_dev *sdev);
+static void byt_get_reply(struct snd_sof_dev *sdev);
+
+/*
+ * Debug
+ */
+
+static void byt_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read regsisters */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
+}
+
+static void byt_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[BYT_STACK_DUMP_SIZE];
+ u64 status, panic, imrd, imrx;
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCD);
+ panic = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCX);
+ byt_get_registers(sdev, &xoops, &panic_info, stack,
+ BYT_STACK_DUMP_SIZE);
+ snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack,
+ BYT_STACK_DUMP_SIZE);
+
+ /* provide some context for firmware debug */
+ imrx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IMRX);
+ imrd = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IMRD);
+ dev_err(sdev->dev,
+ "error: ipc host -> DSP: pending %s complete %s raw 0x%llx\n",
+ (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
+ (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ dev_err(sdev->dev,
+ "error: mask host: pending %s complete %s raw 0x%llx\n",
+ (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
+ (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ dev_err(sdev->dev,
+ "error: ipc DSP -> host: pending %s complete %s raw 0x%llx\n",
+ (status & SHIM_IPCD_BUSY) ? "yes" : "no",
+ (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ dev_err(sdev->dev,
+ "error: mask DSP: pending %s complete %s raw 0x%llx\n",
+ (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
+ (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+
+}
+
+/*
+ * IPC Doorbell IRQ handler and thread.
+ */
+
+static irqreturn_t byt_irq_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u64 ipcx, ipcd;
+ int ret = IRQ_NONE;
+
+ ipcx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCX);
+ ipcd = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCD);
+
+ if (ipcx & SHIM_BYT_IPCX_DONE) {
+
+ /* reply message from DSP, Mask Done interrupt first */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR,
+ SHIM_IMRX,
+ SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (ipcd & SHIM_BYT_IPCD_BUSY) {
+
+ /* new message from DSP, Mask Busy interrupt first */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR,
+ SHIM_IMRX,
+ SHIM_IMRX_BUSY,
+ SHIM_IMRX_BUSY);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+static irqreturn_t byt_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u64 ipcx, ipcd;
+
+ ipcx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCX);
+ ipcd = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCD);
+
+ /* reply message from DSP */
+ if (ipcx & SHIM_BYT_IPCX_DONE) {
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /*
+ * handle immediate reply from DSP core. If the msg is
+ * found, set done bit in cmd_done which is called at the
+ * end of message processing function, else set it here
+ * because the done bit can't be set in cmd_done function
+ * which is triggered by msg
+ */
+ byt_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, ipcx);
+
+ byt_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ }
+
+ /* new message from DSP */
+ if (ipcd & SHIM_BYT_IPCD_BUSY) {
+
+ /* Handle messages from DSP Core */
+ if ((ipcd & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, BYT_PANIC_OFFSET(ipcd) +
+ MBOX_OFFSET);
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ byt_host_done(sdev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int byt_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ /* unmask and prepare to receive Done interrupt */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_DONE, 0);
+
+ /* send the message */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write64(sdev, BYT_DSP_BAR, SHIM_IPCX, SHIM_BYT_IPCX_BUSY);
+
+ return 0;
+}
+
+static void byt_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ int ret = 0;
+
+ /*
+ * Sometimes, there is unexpected reply ipc arriving. The reply
+ * ipc belongs to none of the ipcs sent from driver.
+ * In this case, the driver must ignore the ipc.
+ */
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
+ return;
+ }
+
+ /* get reply */
+ sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
+
+ if (reply.error < 0) {
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ ret = reply.error;
+ } else {
+ /* reply correct size ? */
+ if (reply.hdr.size != msg->reply_size) {
+ dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
+ msg->reply_size, reply.hdr.size);
+ ret = -EINVAL;
+ }
+
+ /* read the message */
+ if (msg->reply_size > 0)
+ sof_mailbox_read(sdev, sdev->host_box.offset,
+ msg->reply_data, msg->reply_size);
+ }
+
+ msg->reply_error = ret;
+}
+
+static int byt_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+
+static int byt_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+
+static void byt_host_done(struct snd_sof_dev *sdev)
+{
+ /* clear BUSY bit and set DONE bit - accept new messages */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IPCD,
+ SHIM_BYT_IPCD_BUSY |
+ SHIM_BYT_IPCD_DONE,
+ SHIM_BYT_IPCD_DONE);
+
+ /* unmask and prepare to receive next new message */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY, 0);
+}
+
+static void byt_dsp_done(struct snd_sof_dev *sdev)
+{
+ /* clear DONE bit - tell DSP we have completed */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IPCX,
+ SHIM_BYT_IPCX_DONE, 0);
+}
+
+/*
+ * DSP control.
+ */
+
+static int byt_run(struct snd_sof_dev *sdev)
+{
+ int tries = 10;
+
+ /* release stall and wait to unstall */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_STALL, 0x0);
+ while (tries--) {
+ if (!(snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_CSR) &
+ SHIM_BYT_CSR_PWAITMODE))
+ break;
+ msleep(100);
+ }
+ if (tries < 0) {
+ dev_err(sdev->dev, "error: unable to run DSP firmware\n");
+ byt_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX);
+ return -ENODEV;
+ }
+
+ /* return init core mask */
+ return 1;
+}
+
+static int byt_reset(struct snd_sof_dev *sdev)
+{
+ /* put DSP into reset, set reset vector and stall */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL |
+ SHIM_BYT_CSR_STALL,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL |
+ SHIM_BYT_CSR_STALL);
+
+ usleep_range(10, 15);
+
+ /* take DSP out of reset and keep stalled for FW loading */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST, 0);
+
+ return 0;
+}
+
+static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
+ const char *sof_tplg_filename,
+ const char *ssp_str)
+{
+ const char *tplg_filename = NULL;
+ char *filename;
+ char *split_ext;
+
+ filename = devm_kstrdup(sdev->dev, sof_tplg_filename, GFP_KERNEL);
+ if (!filename)
+ return NULL;
+
+ /* this assumes a .tplg extension */
+ split_ext = strsep(&filename, ".");
+ if (split_ext) {
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s-%s.tplg",
+ split_ext, ssp_str);
+ if (!tplg_filename)
+ return NULL;
+ }
+ return tplg_filename;
+}
+
+static void byt_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach;
+ struct platform_device *pdev;
+ const char *tplg_filename;
+
+ mach = snd_soc_acpi_find_machine(desc->machines);
+ if (!mach) {
+ dev_warn(sdev->dev, "warning: No matching ASoC machine driver found\n");
+ return;
+ }
+
+ pdev = to_platform_device(sdev->dev);
+ if (soc_intel_is_byt_cr(pdev)) {
+ dev_dbg(sdev->dev,
+ "BYT-CR detected, SSP0 used instead of SSP2\n");
+
+ tplg_filename = fixup_tplg_name(sdev,
+ mach->sof_tplg_filename,
+ "ssp0");
+ } else {
+ tplg_filename = mach->sof_tplg_filename;
+ }
+
+ if (!tplg_filename) {
+ dev_dbg(sdev->dev,
+ "error: no topology filename\n");
+ return;
+ }
+
+ sof_pdata->tplg_filename = tplg_filename;
+ mach->mach_params.acpi_ipc_irq_index = desc->irqindex_host_ipc;
+ sof_pdata->machine = mach;
+}
+
+static void byt_set_mach_params(const struct snd_soc_acpi_mach *mach,
+ struct device *dev)
+{
+ struct snd_soc_acpi_mach_params *mach_params;
+
+ mach_params = (struct snd_soc_acpi_mach_params *)&mach->mach_params;
+ mach_params->platform = dev_name(dev);
+}
+
+/* Baytrail DAIs */
+static struct snd_soc_dai_driver byt_dai[] = {
+{
+ .name = "ssp0-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp1-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp2-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ }
+},
+{
+ .name = "ssp3-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp4-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp5-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+};
+
+/*
+ * Probe and remove.
+ */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_MERRIFIELD)
+
+static int tangier_pci_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ u32 base, size;
+ int ret;
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(&pci->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* LPE base */
+ base = pci_resource_start(pci, desc->resindex_lpe_base) - IRAM_OFFSET;
+ size = BYT_PCI_BAR_SIZE;
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[BYT_DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BYT_DSP_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[BYT_DSP_BAR]);
+
+ /* IMR base - optional */
+ if (desc->resindex_imr_base == -1)
+ goto irq;
+
+ base = pci_resource_start(pci, desc->resindex_imr_base);
+ size = pci_resource_len(pci, desc->resindex_imr_base);
+
+ /* some BIOSes don't map IMR */
+ if (base == 0x55aa55aa || base == 0x0) {
+ dev_info(sdev->dev, "IMR not set by BIOS. Ignoring\n");
+ goto irq;
+ }
+
+ dev_dbg(sdev->dev, "IMR base at 0x%x size 0x%x", base, size);
+ sdev->bar[BYT_IMR_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BYT_IMR_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap IMR base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "IMR VADDR %p\n", sdev->bar[BYT_IMR_BAR]);
+
+irq:
+ /* register our IRQ */
+ sdev->ipc_irq = pci->irq;
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ byt_irq_handler, byt_irq_thread,
+ 0, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return ret;
+}
+
+const struct snd_sof_dsp_ops sof_tng_ops = {
+ /* device init */
+ .probe = tangier_pci_probe,
+
+ /* DSP core boot / reset */
+ .run = byt_run,
+ .reset = byt_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* doorbell */
+ .irq_handler = byt_irq_handler,
+ .irq_thread = byt_irq_thread,
+
+ /* ipc */
+ .send_msg = byt_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = byt_get_mailbox_offset,
+ .get_window_offset = byt_get_window_offset,
+
+ .ipc_msg_data = intel_ipc_msg_data,
+ .ipc_pcm_params = intel_ipc_pcm_params,
+
+ /* machine driver */
+ .machine_select = byt_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = byt_set_mach_params,
+
+ /* debug */
+ .debug_map = byt_debugfs,
+ .debug_map_count = ARRAY_SIZE(byt_debugfs),
+ .dbg_dump = byt_dump,
+
+ /* stream callbacks */
+ .pcm_open = intel_pcm_open,
+ .pcm_close = intel_pcm_close,
+
+ /* module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = byt_dai,
+ .num_drv = 3, /* we have only 3 SSPs on byt*/
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .arch_ops = &sof_xtensa_arch_ops,
+};
+EXPORT_SYMBOL_NS(sof_tng_ops, SND_SOC_SOF_MERRIFIELD);
+
+const struct sof_intel_dsp_desc tng_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+};
+EXPORT_SYMBOL_NS(tng_chip_info, SND_SOC_SOF_MERRIFIELD);
+
+#endif /* CONFIG_SND_SOC_SOF_MERRIFIELD */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+
+static void byt_reset_dsp_disable_int(struct snd_sof_dev *sdev)
+{
+ /* Disable Interrupt from both sides */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX, 0x3, 0x3);
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRD, 0x3, 0x3);
+
+ /* Put DSP into reset, set reset vector */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL);
+}
+
+static int byt_suspend(struct snd_sof_dev *sdev, u32 target_state)
+{
+ byt_reset_dsp_disable_int(sdev);
+
+ return 0;
+}
+
+static int byt_resume(struct snd_sof_dev *sdev)
+{
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ return 0;
+}
+
+static int byt_remove(struct snd_sof_dev *sdev)
+{
+ byt_reset_dsp_disable_int(sdev);
+
+ return 0;
+}
+
+static const struct snd_sof_debugfs_map cht_debugfs[] = {
+ {"dmac0", BYT_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", BYT_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac2", BYT_DSP_BAR, DMAC2_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", BYT_DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", BYT_DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", BYT_DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp3", BYT_DSP_BAR, SSP3_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp4", BYT_DSP_BAR, SSP4_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp5", BYT_DSP_BAR, SSP5_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", BYT_DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE_CHT,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static int byt_acpi_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ struct resource *mmio;
+ u32 base, size;
+ int ret;
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(sdev->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* LPE base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_lpe_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get LPE base at idx %d\n",
+ desc->resindex_lpe_base);
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[BYT_DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BYT_DSP_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[BYT_DSP_BAR]);
+
+ /* TODO: add offsets */
+ sdev->mmio_bar = BYT_DSP_BAR;
+ sdev->mailbox_bar = BYT_DSP_BAR;
+
+ /* IMR base - optional */
+ if (desc->resindex_imr_base == -1)
+ goto irq;
+
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_imr_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get IMR base at idx %d\n",
+ desc->resindex_imr_base);
+ return -ENODEV;
+ }
+
+ /* some BIOSes don't map IMR */
+ if (base == 0x55aa55aa || base == 0x0) {
+ dev_info(sdev->dev, "IMR not set by BIOS. Ignoring\n");
+ goto irq;
+ }
+
+ dev_dbg(sdev->dev, "IMR base at 0x%x size 0x%x", base, size);
+ sdev->bar[BYT_IMR_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BYT_IMR_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap IMR base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "IMR VADDR %p\n", sdev->bar[BYT_IMR_BAR]);
+
+irq:
+ /* register our IRQ */
+ sdev->ipc_irq = platform_get_irq(pdev, desc->irqindex_host_ipc);
+ if (sdev->ipc_irq < 0)
+ return sdev->ipc_irq;
+
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ byt_irq_handler, byt_irq_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return ret;
+}
+
+/* baytrail ops */
+const struct snd_sof_dsp_ops sof_byt_ops = {
+ /* device init */
+ .probe = byt_acpi_probe,
+ .remove = byt_remove,
+
+ /* DSP core boot / reset */
+ .run = byt_run,
+ .reset = byt_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* doorbell */
+ .irq_handler = byt_irq_handler,
+ .irq_thread = byt_irq_thread,
+
+ /* ipc */
+ .send_msg = byt_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = byt_get_mailbox_offset,
+ .get_window_offset = byt_get_window_offset,
+
+ .ipc_msg_data = intel_ipc_msg_data,
+ .ipc_pcm_params = intel_ipc_pcm_params,
+
+ /* machine driver */
+ .machine_select = byt_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = byt_set_mach_params,
+
+ /* debug */
+ .debug_map = byt_debugfs,
+ .debug_map_count = ARRAY_SIZE(byt_debugfs),
+ .dbg_dump = byt_dump,
+
+ /* stream callbacks */
+ .pcm_open = intel_pcm_open,
+ .pcm_close = intel_pcm_close,
+
+ /* module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* PM */
+ .suspend = byt_suspend,
+ .resume = byt_resume,
+
+ /* DAI drivers */
+ .drv = byt_dai,
+ .num_drv = 3, /* we have only 3 SSPs on byt*/
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .arch_ops = &sof_xtensa_arch_ops,
+};
+EXPORT_SYMBOL_NS(sof_byt_ops, SND_SOC_SOF_BAYTRAIL);
+
+const struct sof_intel_dsp_desc byt_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+};
+EXPORT_SYMBOL_NS(byt_chip_info, SND_SOC_SOF_BAYTRAIL);
+
+/* cherrytrail and braswell ops */
+const struct snd_sof_dsp_ops sof_cht_ops = {
+ /* device init */
+ .probe = byt_acpi_probe,
+ .remove = byt_remove,
+
+ /* DSP core boot / reset */
+ .run = byt_run,
+ .reset = byt_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* doorbell */
+ .irq_handler = byt_irq_handler,
+ .irq_thread = byt_irq_thread,
+
+ /* ipc */
+ .send_msg = byt_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = byt_get_mailbox_offset,
+ .get_window_offset = byt_get_window_offset,
+
+ .ipc_msg_data = intel_ipc_msg_data,
+ .ipc_pcm_params = intel_ipc_pcm_params,
+
+ /* machine driver */
+ .machine_select = byt_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = byt_set_mach_params,
+
+ /* debug */
+ .debug_map = cht_debugfs,
+ .debug_map_count = ARRAY_SIZE(cht_debugfs),
+ .dbg_dump = byt_dump,
+
+ /* stream callbacks */
+ .pcm_open = intel_pcm_open,
+ .pcm_close = intel_pcm_close,
+
+ /* module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* PM */
+ .suspend = byt_suspend,
+ .resume = byt_resume,
+
+ /* DAI drivers */
+ .drv = byt_dai,
+ /* all 6 SSPs may be available for cherrytrail */
+ .num_drv = ARRAY_SIZE(byt_dai),
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .arch_ops = &sof_xtensa_arch_ops,
+};
+EXPORT_SYMBOL_NS(sof_cht_ops, SND_SOC_SOF_BAYTRAIL);
+
+const struct sof_intel_dsp_desc cht_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+};
+EXPORT_SYMBOL_NS(cht_chip_info, SND_SOC_SOF_BAYTRAIL);
+
+#endif /* CONFIG_SND_SOC_SOF_BAYTRAIL */
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
new file mode 100644
index 000000000..a5d325810
--- /dev/null
+++ b/sound/soc/sof/intel/cnl.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Cannonlake.
+ */
+
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+#include "../sof-audio.h"
+
+static const struct snd_sof_debugfs_map cnl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void cnl_ipc_host_done(struct snd_sof_dev *sdev);
+static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev);
+
+irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 hipci;
+ u32 hipcida;
+ u32 hipctdr;
+ u32 hipctdd;
+ u32 msg;
+ u32 msg_ext;
+ bool ipc_irq = false;
+
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
+ hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDD);
+ hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR);
+
+ /* reply message from DSP */
+ if (hipcida & CNL_DSP_REG_HIPCIDA_DONE) {
+ msg_ext = hipci & CNL_DSP_REG_HIPCIDR_MSG_MASK;
+ msg = hipcida & CNL_DSP_REG_HIPCIDA_MSG_MASK;
+
+ dev_vdbg(sdev->dev,
+ "ipc: firmware response, msg:0x%x, msg_ext:0x%x\n",
+ msg, msg_ext);
+
+ /* mask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCCTL,
+ CNL_DSP_REG_HIPCCTL_DONE, 0);
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /* handle immediate reply from DSP core */
+ hda_dsp_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, msg);
+
+ cnl_ipc_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+
+ ipc_irq = true;
+ }
+
+ /* new message from DSP */
+ if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
+ msg = hipctdr & CNL_DSP_REG_HIPCTDR_MSG_MASK;
+ msg_ext = hipctdd & CNL_DSP_REG_HIPCTDD_MSG_MASK;
+
+ dev_vdbg(sdev->dev,
+ "ipc: firmware initiated, msg:0x%x, msg_ext:0x%x\n",
+ msg, msg_ext);
+
+ /* handle messages from DSP */
+ if ((hipctdr & SOF_IPC_PANIC_MAGIC_MASK) ==
+ SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext));
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ cnl_ipc_host_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq) {
+ /*
+ * This interrupt is not shared so no need to return IRQ_NONE.
+ */
+ dev_dbg_ratelimited(sdev->dev,
+ "nothing to do in IPC IRQ thread\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cnl_ipc_host_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * clear busy interrupt to tell dsp controller this
+ * interrupt has been accepted, not trigger it again
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCTDR,
+ CNL_DSP_REG_HIPCTDR_BUSY,
+ CNL_DSP_REG_HIPCTDR_BUSY);
+ /*
+ * set done bit to ack dsp the msg has been
+ * processed and send reply msg to dsp
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCTDA,
+ CNL_DSP_REG_HIPCTDA_DONE,
+ CNL_DSP_REG_HIPCTDA_DONE);
+}
+
+static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * set DONE bit - tell DSP we have received the reply msg
+ * from DSP, and processed it, don't send more reply to host
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCIDA,
+ CNL_DSP_REG_HIPCIDA_DONE,
+ CNL_DSP_REG_HIPCIDA_DONE);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCCTL,
+ CNL_DSP_REG_HIPCCTL_DONE,
+ CNL_DSP_REG_HIPCCTL_DONE);
+}
+
+static bool cnl_compact_ipc_compress(struct snd_sof_ipc_msg *msg,
+ u32 *dr, u32 *dd)
+{
+ struct sof_ipc_pm_gate *pm_gate;
+
+ if (msg->header == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
+ pm_gate = msg->msg_data;
+
+ /* send the compact message via the primary register */
+ *dr = HDA_IPC_MSG_COMPACT | HDA_IPC_PM_GATE;
+
+ /* send payload via the extended data register */
+ *dd = pm_gate->flags;
+
+ return true;
+ }
+
+ return false;
+}
+
+int cnl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct sof_ipc_cmd_hdr *hdr;
+ u32 dr = 0;
+ u32 dd = 0;
+
+ /*
+ * Currently the only compact IPC supported is the PM_GATE
+ * IPC which is used for transitioning the DSP between the
+ * D0I0 and D0I3 states. And these are sent only during the
+ * set_power_state() op. Therefore, there will never be a case
+ * that a compact IPC results in the DSP exiting D0I3 without
+ * the host and FW being in sync.
+ */
+ if (cnl_compact_ipc_compress(msg, &dr, &dd)) {
+ /* send the message via IPC registers */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD,
+ dd);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ CNL_DSP_REG_HIPCIDR_BUSY | dr);
+ return 0;
+ }
+
+ /* send the message via mailbox */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ CNL_DSP_REG_HIPCIDR_BUSY);
+
+ hdr = msg->msg_data;
+
+ /*
+ * Use mod_delayed_work() to schedule the delayed work
+ * to avoid scheduling multiple workqueue items when
+ * IPCs are sent at a high-rate. mod_delayed_work()
+ * modifies the timer if the work is pending.
+ * Also, a new delayed work should not be queued after the
+ * CTX_SAVE IPC, which is sent before the DSP enters D3.
+ */
+ if (hdr->cmd != (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE))
+ mod_delayed_work(system_wq, &hdev->d0i3_work,
+ msecs_to_jiffies(SOF_HDA_D0I3_WORK_DELAY_MS));
+
+ return 0;
+}
+
+void cnl_ipc_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcctl;
+ u32 hipcida;
+ u32 hipctdr;
+
+ hda_ipc_irq_dump(sdev);
+
+ /* read IPC status */
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCCTL);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev,
+ "error: host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
+ hipcida, hipctdr, hipcctl);
+}
+
+/* cannonlake ops */
+const struct snd_sof_dsp_ops sof_cnl_ops = {
+ /* probe and remove */
+ .probe = hda_dsp_probe,
+ .remove = hda_dsp_remove,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* doorbell */
+ .irq_thread = cnl_ipc_irq_thread,
+
+ /* ipc */
+ .send_msg = cnl_ipc_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = hda_dsp_ipc_get_mailbox_offset,
+ .get_window_offset = hda_dsp_ipc_get_window_offset,
+
+ .ipc_msg_data = hda_ipc_msg_data,
+ .ipc_pcm_params = hda_ipc_pcm_params,
+
+ /* machine driver */
+ .machine_select = hda_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = hda_set_mach_params,
+
+ /* debug */
+ .debug_map = cnl_dsp_debugfs,
+ .debug_map_count = ARRAY_SIZE(cnl_dsp_debugfs),
+ .dbg_dump = hda_dsp_dump,
+ .ipc_dump = cnl_ipc_dump,
+
+ /* stream callbacks */
+ .pcm_open = hda_dsp_pcm_open,
+ .pcm_close = hda_dsp_pcm_close,
+ .pcm_hw_params = hda_dsp_pcm_hw_params,
+ .pcm_hw_free = hda_dsp_stream_hw_free,
+ .pcm_trigger = hda_dsp_pcm_trigger,
+ .pcm_pointer = hda_dsp_pcm_pointer,
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+ /* probe callbacks */
+ .probe_assign = hda_probe_compr_assign,
+ .probe_free = hda_probe_compr_free,
+ .probe_set_params = hda_probe_compr_set_params,
+ .probe_trigger = hda_probe_compr_trigger,
+ .probe_pointer = hda_probe_compr_pointer,
+#endif
+
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_raw,
+
+ /* pre/post fw run */
+ .pre_fw_run = hda_dsp_pre_fw_run,
+ .post_fw_run = hda_dsp_post_fw_run,
+
+ /* dsp core power up/down */
+ .core_power_up = hda_dsp_enable_core,
+ .core_power_down = hda_dsp_core_reset_power_down,
+
+ /* firmware run */
+ .run = hda_dsp_cl_boot_firmware,
+
+ /* trace callback */
+ .trace_init = hda_dsp_trace_init,
+ .trace_release = hda_dsp_trace_release,
+ .trace_trigger = hda_dsp_trace_trigger,
+
+ /* DAI drivers */
+ .drv = skl_dai,
+ .num_drv = SOF_SKL_NUM_DAIS,
+
+ /* PM */
+ .suspend = hda_dsp_suspend,
+ .resume = hda_dsp_resume,
+ .runtime_suspend = hda_dsp_runtime_suspend,
+ .runtime_resume = hda_dsp_runtime_resume,
+ .runtime_idle = hda_dsp_runtime_idle,
+ .set_hw_params_upon_resume = hda_dsp_set_hw_params_upon_resume,
+ .set_power_state = hda_dsp_set_power_state,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+
+ .arch_ops = &sof_xtensa_arch_ops,
+};
+EXPORT_SYMBOL_NS(sof_cnl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc cnl_chip_info = {
+ /* Cannonlake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(3, 0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_init_timeout = 300,
+ .ssp_count = CNL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL_NS(cnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc icl_chip_info = {
+ /* Icelake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(3, 0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL_NS(icl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc ehl_chip_info = {
+ /* Elkhartlake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc jsl_chip_info = {
+ /* Jasperlake */
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = GENMASK(1, 0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL_NS(jsl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c
new file mode 100644
index 000000000..789148e55
--- /dev/null
+++ b/sound/soc/sof/intel/hda-bus.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Keyon Jie <yang.jie@linux.intel.com>
+
+#include <linux/io.h>
+#include <sound/hdaudio.h>
+#include "../sof-priv.h"
+#include "hda.h"
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+#include "../../codecs/hdac_hda.h"
+#define sof_hda_ext_ops snd_soc_hdac_hda_get_ops()
+#else
+#define sof_hda_ext_ops NULL
+#endif
+
+/*
+ * This can be used for both with/without hda link support.
+ */
+void sof_hda_bus_init(struct hdac_bus *bus, struct device *dev)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_ext_bus_init(bus, dev, NULL, sof_hda_ext_ops);
+#else /* CONFIG_SND_SOC_SOF_HDA */
+ memset(bus, 0, sizeof(*bus));
+ bus->dev = dev;
+
+ INIT_LIST_HEAD(&bus->stream_list);
+
+ bus->irq = -1;
+
+ /*
+ * There is only one HDA bus atm. keep the index as 0.
+ * Need to fix when there are more than one HDA bus.
+ */
+ bus->idx = 0;
+
+ spin_lock_init(&bus->reg_lock);
+#endif /* CONFIG_SND_SOC_SOF_HDA */
+}
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
new file mode 100644
index 000000000..8d65004c9
--- /dev/null
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Keyon Jie <yang.jie@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_i915.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "hda.h"
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+#include "../../codecs/hdac_hda.h"
+#endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+#define IDISP_VID_INTEL 0x80860000
+
+/* load the legacy HDA codec driver */
+static int request_codec_module(struct hda_codec *codec)
+{
+#ifdef MODULE
+ char alias[MODULE_NAME_LEN];
+ const char *mod = NULL;
+
+ switch (codec->probe_id) {
+ case HDA_CODEC_ID_GENERIC:
+#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
+ mod = "snd-hda-codec-generic";
+#endif
+ break;
+ default:
+ snd_hdac_codec_modalias(&codec->core, alias, sizeof(alias));
+ mod = alias;
+ break;
+ }
+
+ if (mod) {
+ dev_dbg(&codec->core.dev, "loading codec module: %s\n", mod);
+ request_module(mod);
+ }
+#endif /* MODULE */
+ return device_attach(hda_codec_dev(codec));
+}
+
+static int hda_codec_load_module(struct hda_codec *codec)
+{
+ int ret = request_codec_module(codec);
+
+ if (ret <= 0) {
+ codec->probe_id = HDA_CODEC_ID_GENERIC;
+ ret = request_codec_module(codec);
+ }
+
+ return ret;
+}
+
+/* enable controller wake up event for all codecs with jack connectors */
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
+{
+ struct hda_bus *hbus = sof_to_hbus(sdev);
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hda_codec *codec;
+ unsigned int mask = 0;
+
+ list_for_each_codec(codec, hbus)
+ if (codec->jacktbl.used)
+ mask |= BIT(codec->core.addr);
+
+ snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
+}
+
+/* check jack status after resuming from suspend mode */
+void hda_codec_jack_check(struct snd_sof_dev *sdev)
+{
+ struct hda_bus *hbus = sof_to_hbus(sdev);
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hda_codec *codec;
+
+ /* disable controller Wake Up event*/
+ snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, 0);
+
+ list_for_each_codec(codec, hbus)
+ /*
+ * Wake up all jack-detecting codecs regardless whether an event
+ * has been recorded in STATESTS
+ */
+ if (codec->jacktbl.used)
+ pm_request_resume(&codec->core.dev);
+}
+#else
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {}
+void hda_codec_jack_check(struct snd_sof_dev *sdev) {}
+#endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
+EXPORT_SYMBOL_NS(hda_codec_jack_wake_enable, SND_SOC_SOF_HDA_AUDIO_CODEC);
+EXPORT_SYMBOL_NS(hda_codec_jack_check, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+#if IS_ENABLED(CONFIG_SND_HDA_GENERIC)
+#define is_generic_config(bus) \
+ ((bus)->modelname && !strcmp((bus)->modelname, "generic"))
+#else
+#define is_generic_config(x) 0
+#endif
+
+/* probe individual codec */
+static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
+ bool hda_codec_use_common_hdmi)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ struct hdac_hda_priv *hda_priv;
+ struct hda_codec *codec;
+ int type = HDA_DEV_LEGACY;
+#endif
+ struct hda_bus *hbus = sof_to_hbus(sdev);
+ struct hdac_device *hdev;
+ u32 hda_cmd = (address << 28) | (AC_NODE_ROOT << 20) |
+ (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
+ u32 resp = -1;
+ int ret;
+
+ mutex_lock(&hbus->core.cmd_mutex);
+ snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
+ snd_hdac_bus_get_response(&hbus->core, address, &resp);
+ mutex_unlock(&hbus->core.cmd_mutex);
+ if (resp == -1)
+ return -EIO;
+ dev_dbg(sdev->dev, "HDA codec #%d probed OK: response: %x\n",
+ address, resp);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ hda_priv = devm_kzalloc(sdev->dev, sizeof(*hda_priv), GFP_KERNEL);
+ if (!hda_priv)
+ return -ENOMEM;
+
+ hda_priv->codec.bus = hbus;
+ hdev = &hda_priv->codec.core;
+ codec = &hda_priv->codec;
+
+ /* only probe ASoC codec drivers for HDAC-HDMI */
+ if (!hda_codec_use_common_hdmi && (resp & 0xFFFF0000) == IDISP_VID_INTEL)
+ type = HDA_DEV_ASOC;
+
+ ret = snd_hdac_ext_bus_device_init(&hbus->core, address, hdev, type);
+ if (ret < 0)
+ return ret;
+
+ if ((resp & 0xFFFF0000) == IDISP_VID_INTEL) {
+ if (!hdev->bus->audio_component) {
+ dev_dbg(sdev->dev,
+ "iDisp hw present but no driver\n");
+ ret = -ENOENT;
+ goto out;
+ }
+ hda_priv->need_display_power = true;
+ }
+
+ if (is_generic_config(hbus))
+ codec->probe_id = HDA_CODEC_ID_GENERIC;
+ else
+ codec->probe_id = 0;
+
+ if (type == HDA_DEV_LEGACY) {
+ ret = hda_codec_load_module(codec);
+ /*
+ * handle ret==0 (no driver bound) as an error, but pass
+ * other return codes without modification
+ */
+ if (ret == 0)
+ ret = -ENOENT;
+ }
+
+out:
+ if (ret < 0) {
+ snd_hdac_device_unregister(hdev);
+ put_device(&hdev->dev);
+ }
+#else
+ hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ ret = snd_hdac_ext_bus_device_init(&hbus->core, address, hdev, HDA_DEV_ASOC);
+#endif
+
+ return ret;
+}
+
+/* Codec initialization */
+void hda_codec_probe_bus(struct snd_sof_dev *sdev,
+ bool hda_codec_use_common_hdmi)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int i, ret;
+
+ /* probe codecs in avail slots */
+ for (i = 0; i < HDA_MAX_CODECS; i++) {
+
+ if (!(bus->codec_mask & (1 << i)))
+ continue;
+
+ ret = hda_codec_probe(sdev, i, hda_codec_use_common_hdmi);
+ if (ret < 0) {
+ dev_warn(bus->dev, "codec #%d probe error, ret: %d\n",
+ i, ret);
+ bus->codec_mask &= ~BIT(i);
+ }
+ }
+}
+EXPORT_SYMBOL_NS(hda_codec_probe_bus, SND_SOC_SOF_HDA_AUDIO_CODEC);
+
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) || \
+ IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)
+
+void hda_codec_i915_display_power(struct snd_sof_dev *sdev, bool enable)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (HDA_IDISP_CODEC(bus->codec_mask)) {
+ dev_dbg(bus->dev, "Turning i915 HDAC power %d\n", enable);
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, enable);
+ }
+}
+EXPORT_SYMBOL_NS(hda_codec_i915_display_power, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+
+int hda_codec_i915_init(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int ret;
+
+ /* i915 exposes a HDA codec for HDMI audio */
+ ret = snd_hdac_i915_init(bus);
+ if (ret < 0)
+ return ret;
+
+ /* codec_mask not yet known, power up for probe */
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(hda_codec_i915_init, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+
+int hda_codec_i915_exit(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ if (!bus->audio_component)
+ return 0;
+
+ /* power down unconditionally */
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
+
+ return snd_hdac_i915_exit(bus);
+}
+EXPORT_SYMBOL_NS(hda_codec_i915_exit, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+
+#endif
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/hda-compress.c b/sound/soc/sof/intel/hda-compress.c
new file mode 100644
index 000000000..53c08034f
--- /dev/null
+++ b/sound/soc/sof/intel/hda-compress.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019-2020 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+
+#include <sound/hdaudio_ext.h>
+#include <sound/soc.h>
+#include "../sof-priv.h"
+#include "hda.h"
+
+static inline struct hdac_ext_stream *
+hda_compr_get_stream(struct snd_compr_stream *cstream)
+{
+ return cstream->runtime->private_data;
+}
+
+int hda_probe_compr_assign(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *stream;
+
+ stream = hda_dsp_stream_get(sdev, cstream->direction);
+ if (!stream)
+ return -EBUSY;
+
+ hdac_stream(stream)->curr_pos = 0;
+ hdac_stream(stream)->cstream = cstream;
+ cstream->runtime->private_data = stream;
+
+ return hdac_stream(stream)->stream_tag;
+}
+
+int hda_probe_compr_free(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *stream = hda_compr_get_stream(cstream);
+ int ret;
+
+ ret = hda_dsp_stream_put(sdev, cstream->direction,
+ hdac_stream(stream)->stream_tag);
+ if (ret < 0) {
+ dev_dbg(sdev->dev, "stream put failed: %d\n", ret);
+ return ret;
+ }
+
+ hdac_stream(stream)->cstream = NULL;
+ cstream->runtime->private_data = NULL;
+
+ return 0;
+}
+
+int hda_probe_compr_set_params(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *stream = hda_compr_get_stream(cstream);
+ struct hdac_stream *hstream = hdac_stream(stream);
+ struct snd_dma_buffer *dmab;
+ u32 bits, rate;
+ int bps, ret;
+
+ dmab = cstream->runtime->dma_buffer_p;
+ /* compr params do not store bit depth, default to S32_LE */
+ bps = snd_pcm_format_physical_width(SNDRV_PCM_FORMAT_S32_LE);
+ if (bps < 0)
+ return bps;
+ bits = hda_dsp_get_bits(sdev, bps);
+ rate = hda_dsp_get_mult_div(sdev, params->codec.sample_rate);
+
+ hstream->format_val = rate | bits | (params->codec.ch_out - 1);
+ hstream->bufsize = cstream->runtime->buffer_size;
+ hstream->period_bytes = cstream->runtime->fragment_size;
+ hstream->no_period_wakeup = 0;
+
+ ret = hda_dsp_stream_hw_params(sdev, stream, dmab, NULL);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac prepare failed: %x\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int hda_probe_compr_trigger(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *stream = hda_compr_get_stream(cstream);
+
+ return hda_dsp_stream_trigger(sdev, stream, cmd);
+}
+
+int hda_probe_compr_pointer(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *stream = hda_compr_get_stream(cstream);
+ struct snd_soc_pcm_stream *pstream;
+
+ pstream = &dai->driver->capture;
+ tstamp->copied_total = hdac_stream(stream)->curr_pos;
+ tstamp->sampling_rate = snd_pcm_rate_bit_to_rate(pstream->rates);
+
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-ctrl.c b/sound/soc/sof/intel/hda-ctrl.c
new file mode 100644
index 000000000..fa5f0a718
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ctrl.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/hda_component.h>
+#include "../ops.h"
+#include "hda.h"
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+static int hda_codec_mask = -1;
+module_param_named(codec_mask, hda_codec_mask, int, 0444);
+MODULE_PARM_DESC(codec_mask, "SOF HDA codec mask for probing");
+#endif
+
+/*
+ * HDA Operations.
+ */
+
+int hda_dsp_ctrl_link_reset(struct snd_sof_dev *sdev, bool reset)
+{
+ unsigned long timeout;
+ u32 gctl = 0;
+ u32 val;
+
+ /* 0 to enter reset and 1 to exit reset */
+ val = reset ? 0 : SOF_HDA_GCTL_RESET;
+
+ /* enter/exit HDA controller reset */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCTL,
+ SOF_HDA_GCTL_RESET, val);
+
+ /* wait to enter/exit reset */
+ timeout = jiffies + msecs_to_jiffies(HDA_DSP_CTRL_RESET_TIMEOUT);
+ while (time_before(jiffies, timeout)) {
+ gctl = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCTL);
+ if ((gctl & SOF_HDA_GCTL_RESET) == val)
+ return 0;
+ usleep_range(500, 1000);
+ }
+
+ /* enter/exit reset failed */
+ dev_err(sdev->dev, "error: failed to %s HDA controller gctl 0x%x\n",
+ reset ? "reset" : "ready", gctl);
+ return -EIO;
+}
+
+int hda_dsp_ctrl_get_caps(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ u32 cap, offset, feature;
+ int count = 0;
+ int ret;
+
+ /*
+ * On some devices, one reset cycle is necessary before reading
+ * capabilities
+ */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0)
+ return ret;
+ ret = hda_dsp_ctrl_link_reset(sdev, false);
+ if (ret < 0)
+ return ret;
+
+ offset = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_LLCH);
+
+ do {
+ dev_dbg(sdev->dev, "checking for capabilities at offset 0x%x\n",
+ offset & SOF_HDA_CAP_NEXT_MASK);
+
+ cap = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, offset);
+
+ if (cap == -1) {
+ dev_dbg(bus->dev, "Invalid capability reg read\n");
+ break;
+ }
+
+ feature = (cap & SOF_HDA_CAP_ID_MASK) >> SOF_HDA_CAP_ID_OFF;
+
+ switch (feature) {
+ case SOF_HDA_PP_CAP_ID:
+ dev_dbg(sdev->dev, "found DSP capability at 0x%x\n",
+ offset);
+ bus->ppcap = bus->remap_addr + offset;
+ sdev->bar[HDA_DSP_PP_BAR] = bus->ppcap;
+ break;
+ case SOF_HDA_SPIB_CAP_ID:
+ dev_dbg(sdev->dev, "found SPIB capability at 0x%x\n",
+ offset);
+ bus->spbcap = bus->remap_addr + offset;
+ sdev->bar[HDA_DSP_SPIB_BAR] = bus->spbcap;
+ break;
+ case SOF_HDA_DRSM_CAP_ID:
+ dev_dbg(sdev->dev, "found DRSM capability at 0x%x\n",
+ offset);
+ bus->drsmcap = bus->remap_addr + offset;
+ sdev->bar[HDA_DSP_DRSM_BAR] = bus->drsmcap;
+ break;
+ case SOF_HDA_GTS_CAP_ID:
+ dev_dbg(sdev->dev, "found GTS capability at 0x%x\n",
+ offset);
+ bus->gtscap = bus->remap_addr + offset;
+ break;
+ case SOF_HDA_ML_CAP_ID:
+ dev_dbg(sdev->dev, "found ML capability at 0x%x\n",
+ offset);
+ bus->mlcap = bus->remap_addr + offset;
+ break;
+ default:
+ dev_dbg(sdev->dev, "found capability %d at 0x%x\n",
+ feature, offset);
+ break;
+ }
+
+ offset = cap & SOF_HDA_CAP_NEXT_MASK;
+ } while (count++ <= SOF_HDA_MAX_CAPS && offset);
+
+ return 0;
+}
+
+void hda_dsp_ctrl_ppcap_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? SOF_HDA_PPCTL_GPROCEN : 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, val);
+}
+
+void hda_dsp_ctrl_ppcap_int_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? SOF_HDA_PPCTL_PIE : 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_PIE, val);
+}
+
+void hda_dsp_ctrl_misc_clock_gating(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? PCI_CGCTL_MISCBDCGE_MASK : 0;
+
+ snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_MISCBDCGE_MASK, val);
+}
+
+/*
+ * enable/disable audio dsp clock gating and power gating bits.
+ * This allows the HW to opportunistically power and clock gate
+ * the audio dsp when it is idle
+ */
+int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val;
+
+ /* enable/disable audio dsp clock gating */
+ val = enable ? PCI_CGCTL_ADSPDCGE : 0;
+ snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_ADSPDCGE, val);
+
+ /* enable/disable DMI Link L1 support */
+ val = enable ? HDA_VS_INTEL_EM2_L1SEN : 0;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, val);
+
+ /* enable/disable audio dsp power gating */
+ val = enable ? 0 : PCI_PGCTL_ADSPPGD;
+ snd_sof_pci_update_bits(sdev, PCI_PGCTL, PCI_PGCTL_ADSPPGD, val);
+
+ return 0;
+}
+
+int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_ext_link *hlink;
+#endif
+ struct hdac_stream *stream;
+ int sd_offset, ret = 0;
+
+ if (bus->chip_init)
+ return 0;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_set_codec_wakeup(bus, true);
+#endif
+ hda_dsp_ctrl_misc_clock_gating(sdev, false);
+
+ if (full_reset) {
+ /* reset HDA controller */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to reset HDA controller\n");
+ goto err;
+ }
+
+ usleep_range(500, 1000);
+
+ /* exit HDA controller reset */
+ ret = hda_dsp_ctrl_link_reset(sdev, false);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
+ goto err;
+ }
+
+ usleep_range(1000, 1200);
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* check to see if controller is ready */
+ if (!snd_hdac_chip_readb(bus, GCTL)) {
+ dev_dbg(bus->dev, "controller not ready!\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Accept unsolicited responses */
+ snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, AZX_GCTL_UNSOL);
+
+ /* detect codecs */
+ if (!bus->codec_mask) {
+ bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+ dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask);
+ }
+
+ if (hda_codec_mask != -1) {
+ bus->codec_mask &= hda_codec_mask;
+ dev_dbg(bus->dev, "filtered codec_mask = 0x%lx\n",
+ bus->codec_mask);
+ }
+#endif
+
+ /* clear stream status */
+ list_for_each_entry(stream, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(stream);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+ }
+
+ /* clear WAKESTS */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
+ SOF_HDA_WAKESTS_INT_MASK);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* clear rirb status */
+ snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
+#endif
+
+ /* clear interrupt status register */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* initialize the codec command I/O */
+ snd_hdac_bus_init_cmd_io(bus);
+#endif
+
+ /* enable CIE and GIE interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN);
+
+ /* program the position buffer */
+ if (bus->use_posbuf && bus->posbuf.addr) {
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE,
+ (u32)bus->posbuf.addr);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPUBASE,
+ upper_32_bits(bus->posbuf.addr));
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* Reset stream-to-link mapping */
+ list_for_each_entry(hlink, &bus->hlink_list, list)
+ writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
+#endif
+
+ bus->chip_init = true;
+
+err:
+ hda_dsp_ctrl_misc_clock_gating(sdev, true);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_set_codec_wakeup(bus, false);
+#endif
+
+ return ret;
+}
+
+void hda_dsp_ctrl_stop_chip(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *stream;
+ int sd_offset;
+
+ if (!bus->chip_init)
+ return;
+
+ /* disable interrupts in stream descriptor */
+ list_for_each_entry(stream, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(stream);
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_CL_SD_CTL,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ 0);
+ }
+
+ /* disable SIE for all streams */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_ALL_STREAM, 0);
+
+ /* disable controller CIE and GIE */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
+ 0);
+
+ /* clear stream status */
+ list_for_each_entry(stream, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(stream);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+ }
+
+ /* clear WAKESTS */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
+ SOF_HDA_WAKESTS_INT_MASK);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* clear rirb status */
+ snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
+#endif
+
+ /* clear interrupt status register */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* disable CORB/RIRB */
+ snd_hdac_bus_stop_cmd_io(bus);
+#endif
+ /* disable position buffer */
+ if (bus->posbuf.addr) {
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_ADSP_DPLBASE, 0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_ADSP_DPUBASE, 0);
+ }
+
+ bus->chip_init = false;
+}
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
new file mode 100644
index 000000000..a6275cc92
--- /dev/null
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -0,0 +1,586 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Keyon Jie <yang.jie@linux.intel.com>
+//
+
+#include <sound/pcm_params.h>
+#include <sound/hdaudio_ext.h>
+#include "../sof-priv.h"
+#include "../sof-audio.h"
+#include "hda.h"
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+
+struct hda_pipe_params {
+ u8 host_dma_id;
+ u8 link_dma_id;
+ u32 ch;
+ u32 s_freq;
+ u32 s_fmt;
+ u8 linktype;
+ snd_pcm_format_t format;
+ int link_index;
+ int stream;
+ unsigned int host_bps;
+ unsigned int link_bps;
+};
+
+/*
+ * This function checks if the host dma channel corresponding
+ * to the link DMA stream_tag argument is assigned to one
+ * of the FEs connected to the BE DAI.
+ */
+static bool hda_check_fes(struct snd_soc_pcm_runtime *rtd,
+ int dir, int stream_tag)
+{
+ struct snd_pcm_substream *fe_substream;
+ struct hdac_stream *fe_hstream;
+ struct snd_soc_dpcm *dpcm;
+
+ for_each_dpcm_fe(rtd, dir, dpcm) {
+ fe_substream = snd_soc_dpcm_get_substream(dpcm->fe, dir);
+ fe_hstream = fe_substream->runtime->private_data;
+ if (fe_hstream->stream_tag == stream_tag)
+ return true;
+ }
+
+ return false;
+}
+
+static struct hdac_ext_stream *
+ hda_link_stream_assign(struct hdac_bus *bus,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct sof_intel_hda_stream *hda_stream;
+ struct hdac_ext_stream *res = NULL;
+ struct hdac_stream *stream = NULL;
+
+ int stream_dir = substream->stream;
+
+ if (!bus->ppcap) {
+ dev_err(bus->dev, "stream type not supported\n");
+ return NULL;
+ }
+
+ spin_lock_irq(&bus->reg_lock);
+ list_for_each_entry(stream, &bus->stream_list, list) {
+ struct hdac_ext_stream *hstream =
+ stream_to_hdac_ext_stream(stream);
+ if (stream->direction != substream->stream)
+ continue;
+
+ hda_stream = hstream_to_sof_hda_stream(hstream);
+
+ /* check if link is available */
+ if (!hstream->link_locked) {
+ if (stream->opened) {
+ /*
+ * check if the stream tag matches the stream
+ * tag of one of the connected FEs
+ */
+ if (hda_check_fes(rtd, stream_dir,
+ stream->stream_tag)) {
+ res = hstream;
+ break;
+ }
+ } else {
+ res = hstream;
+
+ /*
+ * This must be a hostless stream.
+ * So reserve the host DMA channel.
+ */
+ hda_stream->host_reserved = 1;
+ break;
+ }
+ }
+ }
+
+ if (res) {
+ /*
+ * Decouple host and link DMA. The decoupled flag
+ * is updated in snd_hdac_ext_stream_decouple().
+ */
+ if (!res->decoupled)
+ snd_hdac_ext_stream_decouple_locked(bus, res, true);
+
+ res->link_locked = 1;
+ res->link_substream = substream;
+ }
+ spin_unlock_irq(&bus->reg_lock);
+
+ return res;
+}
+
+static int hda_link_dma_params(struct hdac_ext_stream *stream,
+ struct hda_pipe_params *params)
+{
+ struct hdac_stream *hstream = &stream->hstream;
+ unsigned char stream_tag = hstream->stream_tag;
+ struct hdac_bus *bus = hstream->bus;
+ struct hdac_ext_link *link;
+ unsigned int format_val;
+
+ snd_hdac_ext_stream_decouple(bus, stream, true);
+ snd_hdac_ext_link_stream_reset(stream);
+
+ format_val = snd_hdac_calc_stream_format(params->s_freq, params->ch,
+ params->format,
+ params->link_bps, 0);
+
+ dev_dbg(bus->dev, "format_val=%d, rate=%d, ch=%d, format=%d\n",
+ format_val, params->s_freq, params->ch, params->format);
+
+ snd_hdac_ext_link_stream_setup(stream, format_val);
+
+ if (stream->hstream.direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ list_for_each_entry(link, &bus->hlink_list, list) {
+ if (link->index == params->link_index)
+ snd_hdac_ext_link_set_stream_id(link,
+ stream_tag);
+ }
+ }
+
+ stream->link_prepared = 1;
+
+ return 0;
+}
+
+/* Send DAI_CONFIG IPC to the DAI that matches the dai_name and direction */
+static int hda_link_config_ipc(struct sof_intel_hda_stream *hda_stream,
+ const char *dai_name, int channel, int dir)
+{
+ struct sof_ipc_dai_config *config;
+ struct snd_sof_dai *sof_dai;
+ struct sof_ipc_reply reply;
+ int ret = 0;
+
+ list_for_each_entry(sof_dai, &hda_stream->sdev->dai_list, list) {
+ if (!sof_dai->cpu_dai_name)
+ continue;
+
+ if (!strcmp(dai_name, sof_dai->cpu_dai_name) &&
+ dir == sof_dai->comp_dai.direction) {
+ config = sof_dai->dai_config;
+
+ if (!config) {
+ dev_err(hda_stream->sdev->dev,
+ "error: no config for DAI %s\n",
+ sof_dai->name);
+ return -EINVAL;
+ }
+
+ /* update config with stream tag */
+ config->hda.link_dma_ch = channel;
+
+ /* send IPC */
+ ret = sof_ipc_tx_message(hda_stream->sdev->ipc,
+ config->hdr.cmd,
+ config,
+ config->hdr.size,
+ &reply, sizeof(reply));
+
+ if (ret < 0)
+ dev_err(hda_stream->sdev->dev,
+ "error: failed to set dai config for %s\n",
+ sof_dai->name);
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int hda_link_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_bus *bus = hstream->bus;
+ struct hdac_ext_stream *link_dev;
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct sof_intel_hda_stream *hda_stream;
+ struct hda_pipe_params p_params = {0};
+ struct hdac_ext_link *link;
+ int stream_tag;
+ int ret;
+
+ link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
+ if (!link)
+ return -EINVAL;
+
+ /* get stored dma data if resuming from system suspend */
+ link_dev = snd_soc_dai_get_dma_data(dai, substream);
+ if (!link_dev) {
+ link_dev = hda_link_stream_assign(bus, substream);
+ if (!link_dev)
+ return -EBUSY;
+
+ snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
+ }
+
+ stream_tag = hdac_stream(link_dev)->stream_tag;
+
+ hda_stream = hstream_to_sof_hda_stream(link_dev);
+
+ /* update the DSP with the new tag */
+ ret = hda_link_config_ipc(hda_stream, dai->name, stream_tag - 1,
+ substream->stream);
+ if (ret < 0)
+ return ret;
+
+ /* set the hdac_stream in the codec dai */
+ snd_soc_dai_set_stream(codec_dai, hdac_stream(link_dev), substream->stream);
+
+ p_params.s_fmt = snd_pcm_format_width(params_format(params));
+ p_params.ch = params_channels(params);
+ p_params.s_freq = params_rate(params);
+ p_params.stream = substream->stream;
+ p_params.link_dma_id = stream_tag - 1;
+ p_params.link_index = link->index;
+ p_params.format = params_format(params);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ p_params.link_bps = codec_dai->driver->playback.sig_bits;
+ else
+ p_params.link_bps = codec_dai->driver->capture.sig_bits;
+
+ return hda_link_dma_params(link_dev, &p_params);
+}
+
+static int hda_link_pcm_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *link_dev =
+ snd_soc_dai_get_dma_data(dai, substream);
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ int stream = substream->stream;
+
+ if (link_dev->link_prepared)
+ return 0;
+
+ dev_dbg(sdev->dev, "hda: prepare stream dir %d\n", substream->stream);
+
+ return hda_link_hw_params(substream, &rtd->dpcm[stream].hw_params,
+ dai);
+}
+
+static int hda_link_pcm_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *link_dev =
+ snd_soc_dai_get_dma_data(dai, substream);
+ struct sof_intel_hda_stream *hda_stream;
+ struct snd_soc_pcm_runtime *rtd;
+ struct hdac_ext_link *link;
+ struct hdac_stream *hstream;
+ struct hdac_bus *bus;
+ int stream_tag;
+ int ret;
+
+ hstream = substream->runtime->private_data;
+ bus = hstream->bus;
+ rtd = asoc_substream_to_rtd(substream);
+
+ link = snd_hdac_ext_bus_get_link(bus, asoc_rtd_to_codec(rtd, 0)->component->name);
+ if (!link)
+ return -EINVAL;
+
+ hda_stream = hstream_to_sof_hda_stream(link_dev);
+
+ dev_dbg(dai->dev, "In %s cmd=%d\n", __func__, cmd);
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_RESUME:
+ /* set up hw_params */
+ ret = hda_link_pcm_prepare(substream, dai);
+ if (ret < 0) {
+ dev_err(dai->dev,
+ "error: setting up hw_params during resume\n");
+ return ret;
+ }
+
+ fallthrough;
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ snd_hdac_ext_link_stream_start(link_dev);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ /*
+ * clear link DMA channel. It will be assigned when
+ * hw_params is set up again after resume.
+ */
+ ret = hda_link_config_ipc(hda_stream, dai->name,
+ DMA_CHAN_INVALID, substream->stream);
+ if (ret < 0)
+ return ret;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ stream_tag = hdac_stream(link_dev)->stream_tag;
+ snd_hdac_ext_link_clear_stream_id(link, stream_tag);
+ }
+
+ link_dev->link_prepared = 0;
+
+ fallthrough;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ snd_hdac_ext_link_stream_clear(link_dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hda_link_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ unsigned int stream_tag;
+ struct sof_intel_hda_stream *hda_stream;
+ struct hdac_bus *bus;
+ struct hdac_ext_link *link;
+ struct hdac_stream *hstream;
+ struct snd_soc_pcm_runtime *rtd;
+ struct hdac_ext_stream *link_dev;
+ int ret;
+
+ hstream = substream->runtime->private_data;
+ bus = hstream->bus;
+ rtd = asoc_substream_to_rtd(substream);
+ link_dev = snd_soc_dai_get_dma_data(dai, substream);
+
+ if (!link_dev) {
+ dev_dbg(dai->dev,
+ "%s: link_dev is not assigned\n", __func__);
+ return -EINVAL;
+ }
+
+ hda_stream = hstream_to_sof_hda_stream(link_dev);
+
+ /* free the link DMA channel in the FW */
+ ret = hda_link_config_ipc(hda_stream, dai->name, DMA_CHAN_INVALID,
+ substream->stream);
+ if (ret < 0)
+ return ret;
+
+ link = snd_hdac_ext_bus_get_link(bus, asoc_rtd_to_codec(rtd, 0)->component->name);
+ if (!link)
+ return -EINVAL;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ stream_tag = hdac_stream(link_dev)->stream_tag;
+ snd_hdac_ext_link_clear_stream_id(link, stream_tag);
+ }
+
+ snd_soc_dai_set_dma_data(dai, substream, NULL);
+ snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK);
+ link_dev->link_prepared = 0;
+
+ /* free the host DMA channel reserved by hostless streams */
+ hda_stream->host_reserved = 0;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops hda_link_dai_ops = {
+ .hw_params = hda_link_hw_params,
+ .hw_free = hda_link_hw_free,
+ .trigger = hda_link_pcm_trigger,
+ .prepare = hda_link_pcm_prepare,
+};
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+#include "../compress.h"
+
+static struct snd_soc_cdai_ops sof_probe_compr_ops = {
+ .startup = sof_probe_compr_open,
+ .shutdown = sof_probe_compr_free,
+ .set_params = sof_probe_compr_set_params,
+ .trigger = sof_probe_compr_trigger,
+ .pointer = sof_probe_compr_pointer,
+};
+
+#endif
+#endif
+
+/*
+ * common dai driver for skl+ platforms.
+ * some products who use this DAI array only physically have a subset of
+ * the DAIs, but no harm is done here by adding the whole set.
+ */
+struct snd_soc_dai_driver skl_dai[] = {
+{
+ .name = "SSP0 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP1 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP2 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP3 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP4 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "SSP5 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "DMIC01 Pin",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+},
+{
+ .name = "DMIC16k Pin",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+},
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+{
+ .name = "iDisp1 Pin",
+ .ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "iDisp2 Pin",
+ .ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "iDisp3 Pin",
+ .ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "iDisp4 Pin",
+ .ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "Analog CPU DAI",
+ .ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+},
+{
+ .name = "Digital CPU DAI",
+ .ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+},
+{
+ .name = "Alt Analog CPU DAI",
+ .ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+},
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+{
+ .name = "Probe Extraction CPU DAI",
+ .compress_new = snd_soc_new_compress,
+ .cops = &sof_probe_compr_ops,
+ .capture = {
+ .stream_name = "Probe Extraction",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ },
+},
+#endif
+#endif
+};
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
new file mode 100644
index 000000000..85ec4361c
--- /dev/null
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -0,0 +1,964 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include "../sof-audio.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+
+static bool hda_enable_trace_D0I3_S0;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
+module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
+MODULE_PARM_DESC(enable_trace_D0I3_S0,
+ "SOF HDA enable trace when the DSP is in D0I3 in S0");
+#endif
+
+/*
+ * DSP Core control.
+ */
+
+int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ u32 adspcs;
+ u32 reset;
+ int ret;
+
+ /* set reset bits for cores */
+ reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ reset, reset),
+
+ /* poll with timeout to check if operation successful */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ ((adspcs & reset) == reset),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
+
+ /* has core entered reset ? */
+ adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS);
+ if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
+ HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
+ dev_err(sdev->dev,
+ "error: reset enter failed: core_mask %x adspcs 0x%x\n",
+ core_mask, adspcs);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ unsigned int crst;
+ u32 adspcs;
+ int ret;
+
+ /* clear reset bits for cores */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CRST_MASK(core_mask),
+ 0);
+
+ /* poll with timeout to check if operation successful */
+ crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ !(adspcs & crst),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
+
+ /* has core left reset ? */
+ adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS);
+ if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
+ dev_err(sdev->dev,
+ "error: reset leave failed: core_mask %x adspcs 0x%x\n",
+ core_mask, adspcs);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ /* stall core */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+ /* set reset state */
+ return hda_dsp_core_reset_enter(sdev, core_mask);
+}
+
+int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ int ret;
+
+ /* leave reset state */
+ ret = hda_dsp_core_reset_leave(sdev, core_mask);
+ if (ret < 0)
+ return ret;
+
+ /* run core */
+ dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
+ 0);
+
+ /* is core now running ? */
+ if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
+ hda_dsp_core_stall_reset(sdev, core_mask);
+ dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
+ core_mask);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/*
+ * Power Management.
+ */
+
+int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ unsigned int cpa;
+ u32 adspcs;
+ int ret;
+
+ /* update bits */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_SPA_MASK(core_mask),
+ HDA_DSP_ADSPCS_SPA_MASK(core_mask));
+
+ /* poll with timeout to check if operation successful */
+ cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ (adspcs & cpa) == cpa,
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
+
+ /* did core power up ? */
+ adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS);
+ if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
+ HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
+ dev_err(sdev->dev,
+ "error: power up core failed core_mask %xadspcs 0x%x\n",
+ core_mask, adspcs);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ u32 adspcs;
+ int ret;
+
+ /* update bits */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+
+ return ret;
+}
+
+bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
+ unsigned int core_mask)
+{
+ int val;
+ bool is_enable;
+
+ val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
+
+#define MASK_IS_EQUAL(v, m, field) ({ \
+ u32 _m = field(m); \
+ ((v) & _m) == _m; \
+})
+
+ is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
+ MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
+ !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
+ !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+#undef MASK_IS_EQUAL
+
+ dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
+ is_enable, core_mask);
+
+ return is_enable;
+}
+
+int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ int ret;
+
+ /* restrict core_mask to host managed cores mask */
+ core_mask &= chip->host_managed_cores_mask;
+
+ /* return if core_mask is not valid or cores are already enabled */
+ if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
+ return 0;
+
+ /* power up */
+ ret = hda_dsp_core_power_up(sdev, core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
+ core_mask);
+ return ret;
+ }
+
+ return hda_dsp_core_run(sdev, core_mask);
+}
+
+int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
+ unsigned int core_mask)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ int ret;
+
+ /* restrict core_mask to host managed cores mask */
+ core_mask &= chip->host_managed_cores_mask;
+
+ /* return if core_mask is not valid */
+ if (!core_mask)
+ return 0;
+
+ /* place core in reset prior to power down */
+ ret = hda_dsp_core_stall_reset(sdev, core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
+ core_mask);
+ return ret;
+ }
+
+ /* power down core */
+ ret = hda_dsp_core_power_down(sdev, core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
+ core_mask, ret);
+ return ret;
+ }
+
+ /* make sure we are in OFF state */
+ if (hda_dsp_core_is_enabled(sdev, core_mask)) {
+ dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
+ core_mask, ret);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* enable IPC DONE and BUSY interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
+ HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
+
+ /* enable IPC interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
+}
+
+void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* disable IPC interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, 0);
+
+ /* disable IPC BUSY and DONE interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
+}
+
+static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
+
+ while (snd_hdac_chip_readb(bus, VS_D0I3C) & SOF_HDA_VS_D0I3C_CIP) {
+ if (!retry--)
+ return -ETIMEDOUT;
+ usleep_range(10, 15);
+ }
+
+ return 0;
+}
+
+static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_pm_gate pm_gate;
+ struct sof_ipc_reply reply;
+
+ memset(&pm_gate, 0, sizeof(pm_gate));
+
+ /* configure pm_gate ipc message */
+ pm_gate.hdr.size = sizeof(pm_gate);
+ pm_gate.hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE;
+ pm_gate.flags = flags;
+
+ /* send pm_gate ipc to dsp */
+ return sof_ipc_tx_message_no_pm(sdev->ipc, pm_gate.hdr.cmd,
+ &pm_gate, sizeof(pm_gate), &reply,
+ sizeof(reply));
+}
+
+static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int ret;
+
+ /* Write to D0I3C after Command-In-Progress bit is cleared */
+ ret = hda_dsp_wait_d0i3c_done(sdev);
+ if (ret < 0) {
+ dev_err(bus->dev, "CIP timeout before D0I3C update!\n");
+ return ret;
+ }
+
+ /* Update D0I3C register */
+ snd_hdac_chip_updateb(bus, VS_D0I3C, SOF_HDA_VS_D0I3C_I3, value);
+
+ /* Wait for cmd in progress to be cleared before exiting the function */
+ ret = hda_dsp_wait_d0i3c_done(sdev);
+ if (ret < 0) {
+ dev_err(bus->dev, "CIP timeout after D0I3C update!\n");
+ return ret;
+ }
+
+ dev_vdbg(bus->dev, "D0I3C updated, register = 0x%x\n",
+ snd_hdac_chip_readb(bus, VS_D0I3C));
+
+ return 0;
+}
+
+static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ u32 flags = 0;
+ int ret;
+ u8 value = 0;
+
+ /*
+ * Sanity check for illegal state transitions
+ * The only allowed transitions are:
+ * 1. D3 -> D0I0
+ * 2. D0I0 -> D0I3
+ * 3. D0I3 -> D0I0
+ */
+ switch (sdev->dsp_power_state.state) {
+ case SOF_DSP_PM_D0:
+ /* Follow the sequence below for D0 substate transitions */
+ break;
+ case SOF_DSP_PM_D3:
+ /* Follow regular flow for D3 -> D0 transition */
+ return 0;
+ default:
+ dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
+ sdev->dsp_power_state.state, target_state->state);
+ return -EINVAL;
+ }
+
+ /* Set flags and register value for D0 target substate */
+ if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
+ value = SOF_HDA_VS_D0I3C_I3;
+
+ /*
+ * Trace DMA need to be disabled when the DSP enters
+ * D0I3 for S0Ix suspend, but it can be kept enabled
+ * when the DSP enters D0I3 while the system is in S0
+ * for debug purpose.
+ */
+ if (!sdev->dtrace_is_supported ||
+ !hda_enable_trace_D0I3_S0 ||
+ sdev->system_suspend_target != SOF_SUSPEND_NONE)
+ flags = HDA_PM_NO_DMA_TRACE;
+ } else {
+ /* prevent power gating in D0I0 */
+ flags = HDA_PM_PPG;
+ }
+
+ /* update D0I3C register */
+ ret = hda_dsp_update_d0i3c_register(sdev, value);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Notify the DSP of the state change.
+ * If this IPC fails, revert the D0I3C register update in order
+ * to prevent partial state change.
+ */
+ ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: PM_GATE ipc error %d\n", ret);
+ goto revert;
+ }
+
+ return ret;
+
+revert:
+ /* fallback to the previous register value */
+ value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
+
+ /*
+ * This can fail but return the IPC error to signal that
+ * the state change failed.
+ */
+ hda_dsp_update_d0i3c_register(sdev, value);
+
+ return ret;
+}
+
+/* helper to log DSP state */
+static void hda_dsp_state_log(struct snd_sof_dev *sdev)
+{
+ switch (sdev->dsp_power_state.state) {
+ case SOF_DSP_PM_D0:
+ switch (sdev->dsp_power_state.substate) {
+ case SOF_HDA_DSP_PM_D0I0:
+ dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
+ break;
+ case SOF_HDA_DSP_PM_D0I3:
+ dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
+ break;
+ default:
+ dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
+ sdev->dsp_power_state.substate);
+ break;
+ }
+ break;
+ case SOF_DSP_PM_D1:
+ dev_dbg(sdev->dev, "Current DSP power state: D1\n");
+ break;
+ case SOF_DSP_PM_D2:
+ dev_dbg(sdev->dev, "Current DSP power state: D2\n");
+ break;
+ case SOF_DSP_PM_D3_HOT:
+ dev_dbg(sdev->dev, "Current DSP power state: D3_HOT\n");
+ break;
+ case SOF_DSP_PM_D3:
+ dev_dbg(sdev->dev, "Current DSP power state: D3\n");
+ break;
+ case SOF_DSP_PM_D3_COLD:
+ dev_dbg(sdev->dev, "Current DSP power state: D3_COLD\n");
+ break;
+ default:
+ dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
+ sdev->dsp_power_state.state);
+ break;
+ }
+}
+
+/*
+ * All DSP power state transitions are initiated by the driver.
+ * If the requested state change fails, the error is simply returned.
+ * Further state transitions are attempted only when the set_power_save() op
+ * is called again either because of a new IPC sent to the DSP or
+ * during system suspend/resume.
+ */
+int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ int ret = 0;
+
+ /*
+ * When the DSP is already in D0I3 and the target state is D0I3,
+ * it could be the case that the DSP is in D0I3 during S0
+ * and the system is suspending to S0Ix. Therefore,
+ * hda_dsp_set_D0_state() must be called to disable trace DMA
+ * by sending the PM_GATE IPC to the FW.
+ */
+ if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
+ sdev->system_suspend_target == SOF_SUSPEND_S0IX)
+ goto set_state;
+
+ /*
+ * For all other cases, return without doing anything if
+ * the DSP is already in the target state.
+ */
+ if (target_state->state == sdev->dsp_power_state.state &&
+ target_state->substate == sdev->dsp_power_state.substate)
+ return 0;
+
+set_state:
+ switch (target_state->state) {
+ case SOF_DSP_PM_D0:
+ ret = hda_dsp_set_D0_state(sdev, target_state);
+ break;
+ case SOF_DSP_PM_D3:
+ /* The only allowed transition is: D0I0 -> D3 */
+ if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
+ sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
+ break;
+
+ dev_err(sdev->dev,
+ "error: transition from %d to %d not allowed\n",
+ sdev->dsp_power_state.state, target_state->state);
+ return -EINVAL;
+ default:
+ dev_err(sdev->dev, "error: target state unsupported %d\n",
+ target_state->state);
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "failed to set requested target DSP state %d substate %d\n",
+ target_state->state, target_state->substate);
+ return ret;
+ }
+
+ sdev->dsp_power_state = *target_state;
+ hda_dsp_state_log(sdev);
+ return ret;
+}
+
+/*
+ * Audio DSP states may transform as below:-
+ *
+ * Opportunistic D0I3 in S0
+ * Runtime +---------------------+ Delayed D0i3 work timeout
+ * suspend | +--------------------+
+ * +------------+ D0I0(active) | |
+ * | | <---------------+ |
+ * | +--------> | New IPC | |
+ * | |Runtime +--^--+---------^--+--+ (via mailbox) | |
+ * | |resume | | | | | |
+ * | | | | | | | |
+ * | | System| | | | | |
+ * | | resume| | S3/S0IX | | | |
+ * | | | | suspend | | S0IX | |
+ * | | | | | |suspend | |
+ * | | | | | | | |
+ * | | | | | | | |
+ * +-v---+-----------+--v-------+ | | +------+----v----+
+ * | | | +-----------> |
+ * | D3 (suspended) | | | D0I3 |
+ * | | +--------------+ |
+ * | | System resume | |
+ * +----------------------------+ +----------------+
+ *
+ * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
+ * ignored the suspend trigger. Otherwise the DSP
+ * is in D3.
+ */
+
+static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+#endif
+ int ret;
+
+ hda_sdw_int_enable(sdev, false);
+
+ /* disable IPC interrupts */
+ hda_dsp_ipc_int_disable(sdev);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ if (runtime_suspend)
+ hda_codec_jack_wake_enable(sdev);
+
+ /* power down all hda link */
+ snd_hdac_ext_bus_link_power_down_all(bus);
+#endif
+
+ /* power down DSP */
+ ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to power down core during suspend\n");
+ return ret;
+ }
+
+ /* disable ppcap interrupt */
+ hda_dsp_ctrl_ppcap_enable(sdev, false);
+ hda_dsp_ctrl_ppcap_int_enable(sdev, false);
+
+ /* disable hda bus irq and streams */
+ hda_dsp_ctrl_stop_chip(sdev);
+
+ /* disable LP retention mode */
+ snd_sof_pci_update_bits(sdev, PCI_PGCTL,
+ PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
+
+ /* reset controller */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to reset controller during suspend\n");
+ return ret;
+ }
+
+ /* display codec can powered off after link reset */
+ hda_codec_i915_display_power(sdev, false);
+
+ return 0;
+}
+
+static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_link *hlink = NULL;
+#endif
+ int ret;
+
+ /* display codec must be powered before link reset */
+ hda_codec_i915_display_power(sdev, true);
+
+ /*
+ * clear TCSEL to clear playback on some HD Audio
+ * codecs. PCI TCSEL is defined in the Intel manuals.
+ */
+ snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
+
+ /* reset and start hda controller */
+ ret = hda_dsp_ctrl_init_chip(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to start controller after resume\n");
+ return ret;
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* check jack status */
+ if (runtime_resume) {
+ if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
+ hda_codec_jack_check(sdev);
+ }
+
+ /* turn off the links that were off before suspend */
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
+ if (!hlink->ref_count)
+ snd_hdac_ext_bus_link_power_down(hlink);
+ }
+
+ /* check dma status and clean up CORB/RIRB buffers */
+ if (!bus->cmd_dma_state)
+ snd_hdac_bus_stop_cmd_io(bus);
+#endif
+
+ /* enable ppcap interrupt */
+ hda_dsp_ctrl_ppcap_enable(sdev, true);
+ hda_dsp_ctrl_ppcap_int_enable(sdev, true);
+
+ return 0;
+}
+
+int hda_dsp_resume(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ .substate = SOF_HDA_DSP_PM_D0I0,
+ };
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_link *hlink = NULL;
+#endif
+ int ret;
+
+ /* resume from D0I3 */
+ if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
+ hda_codec_i915_display_power(sdev, true);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* power up links that were active before suspend */
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
+ if (hlink->ref_count) {
+ ret = snd_hdac_ext_bus_link_power_up(hlink);
+ if (ret < 0) {
+ dev_dbg(sdev->dev,
+ "error %x in %s: failed to power up links",
+ ret, __func__);
+ return ret;
+ }
+ }
+ }
+
+ /* set up CORB/RIRB buffers if was on before suspend */
+ if (bus->cmd_dma_state)
+ snd_hdac_bus_init_cmd_io(bus);
+#endif
+
+ /* Set DSP power state */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_state);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
+ target_state.state, target_state.substate);
+ return ret;
+ }
+
+ /* restore L1SEN bit */
+ if (hda->l1_support_changed)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, 0);
+
+ /* restore and disable the system wakeup */
+ pci_restore_state(pci);
+ disable_irq_wake(pci->irq);
+ return 0;
+ }
+
+ /* init hda controller. DSP cores will be powered up during fw boot */
+ ret = hda_resume(sdev, false);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
+}
+
+int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
+{
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+ int ret;
+
+ /* init hda controller. DSP cores will be powered up during fw boot */
+ ret = hda_resume(sdev, true);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
+}
+
+int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *hbus = sof_to_bus(sdev);
+
+ if (hbus->codec_powered) {
+ dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
+ (unsigned int)hbus->codec_powered);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D3,
+ };
+ int ret;
+
+ /* cancel any attempt for DSP D0I3 */
+ cancel_delayed_work_sync(&hda->d0i3_work);
+
+ /* stop hda controller and power dsp off */
+ ret = hda_suspend(sdev, true);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
+}
+
+int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = target_state,
+ .substate = target_state == SOF_DSP_PM_D0 ?
+ SOF_HDA_DSP_PM_D0I3 : 0,
+ };
+ int ret;
+
+ /* cancel any attempt for DSP D0I3 */
+ cancel_delayed_work_sync(&hda->d0i3_work);
+
+ if (target_state == SOF_DSP_PM_D0) {
+ /* we can't keep a wakeref to display driver at suspend */
+ hda_codec_i915_display_power(sdev, false);
+
+ /* Set DSP power state */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
+ target_dsp_state.state,
+ target_dsp_state.substate);
+ return ret;
+ }
+
+ /* enable L1SEN to make sure the system can enter S0Ix */
+ hda->l1_support_changed =
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN,
+ HDA_VS_INTEL_EM2_L1SEN);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* stop the CORB/RIRB DMA if it is On */
+ if (bus->cmd_dma_state)
+ snd_hdac_bus_stop_cmd_io(bus);
+
+ /* no link can be powered in s0ix state */
+ ret = snd_hdac_ext_bus_link_power_down_all(bus);
+ if (ret < 0) {
+ dev_dbg(sdev->dev,
+ "error %d in %s: failed to power down links",
+ ret, __func__);
+ return ret;
+ }
+#endif
+
+ /* enable the system waking up via IPC IRQ */
+ enable_irq_wake(pci->irq);
+ pci_save_state(pci);
+ return 0;
+ }
+
+ /* stop hda controller and power dsp off */
+ ret = hda_suspend(sdev, false);
+ if (ret < 0) {
+ dev_err(bus->dev, "error: suspending dsp\n");
+ return ret;
+ }
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct snd_soc_pcm_runtime *rtd;
+ struct hdac_ext_stream *stream;
+ struct hdac_ext_link *link;
+ struct hdac_stream *s;
+ const char *name;
+ int stream_tag;
+
+ /* set internal flag for BE */
+ list_for_each_entry(s, &bus->stream_list, list) {
+ stream = stream_to_hdac_ext_stream(s);
+
+ /*
+ * clear stream. This should already be taken care for running
+ * streams when the SUSPEND trigger is called. But paused
+ * streams do not get suspended, so this needs to be done
+ * explicitly during suspend.
+ */
+ if (stream->link_substream) {
+ rtd = asoc_substream_to_rtd(stream->link_substream);
+ name = asoc_rtd_to_codec(rtd, 0)->component->name;
+ link = snd_hdac_ext_bus_get_link(bus, name);
+ if (!link)
+ return -EINVAL;
+
+ stream->link_prepared = 0;
+
+ if (hdac_stream(stream)->direction ==
+ SNDRV_PCM_STREAM_CAPTURE)
+ continue;
+
+ stream_tag = hdac_stream(stream)->stream_tag;
+ snd_hdac_ext_link_clear_stream_id(link, stream_tag);
+ }
+ }
+#endif
+ return 0;
+}
+
+void hda_dsp_d0i3_work(struct work_struct *work)
+{
+ struct sof_intel_hda_dev *hdev = container_of(work,
+ struct sof_intel_hda_dev,
+ d0i3_work.work);
+ struct hdac_bus *bus = &hdev->hbus.core;
+ struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
+ struct sof_dsp_power_state target_state;
+ int ret;
+
+ target_state.state = SOF_DSP_PM_D0;
+
+ /* DSP can enter D0I3 iff only D0I3-compatible streams are active */
+ if (snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
+ target_state.substate = SOF_HDA_DSP_PM_D0I3;
+ else
+ target_state.substate = SOF_HDA_DSP_PM_D0I0;
+
+ /* remain in D0I0 */
+ if (target_state.substate == SOF_HDA_DSP_PM_D0I0)
+ return;
+
+ /* This can fail but error cannot be propagated */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_state);
+ if (ret < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: failed to set DSP state %d substate %d\n",
+ target_state.state, target_state.substate);
+}
diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
new file mode 100644
index 000000000..acfeca426
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ipc.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include "../ops.h"
+#include "hda.h"
+
+static void hda_dsp_ipc_host_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * tell DSP cmd is done - clear busy
+ * interrupt and send reply msg to dsp
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCT,
+ HDA_DSP_REG_HIPCT_BUSY,
+ HDA_DSP_REG_HIPCT_BUSY);
+
+ /* unmask BUSY interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_BUSY,
+ HDA_DSP_REG_HIPCCTL_BUSY);
+}
+
+static void hda_dsp_ipc_dsp_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * set DONE bit - tell DSP we have received the reply msg
+ * from DSP, and processed it, don't send more reply to host
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCIE,
+ HDA_DSP_REG_HIPCIE_DONE,
+ HDA_DSP_REG_HIPCIE_DONE);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_DONE,
+ HDA_DSP_REG_HIPCCTL_DONE);
+}
+
+int hda_dsp_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ /* send IPC message to DSP */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI,
+ HDA_DSP_REG_HIPCI_BUSY);
+
+ return 0;
+}
+
+void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ struct sof_ipc_cmd_hdr *hdr;
+ int ret = 0;
+
+ /*
+ * Sometimes, there is unexpected reply ipc arriving. The reply
+ * ipc belongs to none of the ipcs sent from driver.
+ * In this case, the driver must ignore the ipc.
+ */
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
+ return;
+ }
+
+ hdr = msg->msg_data;
+ if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE) ||
+ hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
+ /*
+ * memory windows are powered off before sending IPC reply,
+ * so we can't read the mailbox for CTX_SAVE and PM_GATE
+ * replies.
+ */
+ reply.error = 0;
+ reply.hdr.cmd = SOF_IPC_GLB_REPLY;
+ reply.hdr.size = sizeof(reply);
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ goto out;
+ }
+
+ /* get IPC reply from DSP in the mailbox */
+ sof_mailbox_read(sdev, sdev->host_box.offset, &reply,
+ sizeof(reply));
+
+ if (reply.error < 0) {
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ ret = reply.error;
+ } else {
+ /* reply correct size ? */
+ if (reply.hdr.size != msg->reply_size &&
+ /* getter payload is never known upfront */
+ ((reply.hdr.cmd & SOF_GLB_TYPE_MASK) != SOF_IPC_GLB_PROBE)) {
+ dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
+ msg->reply_size, reply.hdr.size);
+ ret = -EINVAL;
+ }
+
+ /* read the message */
+ if (msg->reply_size > 0)
+ sof_mailbox_read(sdev, sdev->host_box.offset,
+ msg->reply_data, msg->reply_size);
+ }
+
+out:
+ msg->reply_error = ret;
+
+}
+
+/* IPC handler thread */
+irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 hipci;
+ u32 hipcie;
+ u32 hipct;
+ u32 hipcte;
+ u32 msg;
+ u32 msg_ext;
+ bool ipc_irq = false;
+
+ /* read IPC status */
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCIE);
+ hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
+ hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI);
+ hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCTE);
+
+ /* is this a reply message from the DSP */
+ if (hipcie & HDA_DSP_REG_HIPCIE_DONE) {
+ msg = hipci & HDA_DSP_REG_HIPCI_MSG_MASK;
+ msg_ext = hipcie & HDA_DSP_REG_HIPCIE_MSG_MASK;
+
+ dev_vdbg(sdev->dev,
+ "ipc: firmware response, msg:0x%x, msg_ext:0x%x\n",
+ msg, msg_ext);
+
+ /* mask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_DONE, 0);
+
+ /*
+ * Make sure the interrupt thread cannot be preempted between
+ * waking up the sender and re-enabling the interrupt. Also
+ * protect against a theoretical race with sof_ipc_tx_message():
+ * if the DSP is fast enough to receive an IPC message, reply to
+ * it, and the host interrupt processing calls this function on
+ * a different core from the one, where the sending is taking
+ * place, the message might not yet be marked as expecting a
+ * reply.
+ */
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /* handle immediate reply from DSP core */
+ hda_dsp_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, msg);
+
+ /* set the done bit */
+ hda_dsp_ipc_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+
+ ipc_irq = true;
+ }
+
+ /* is this a new message from DSP */
+ if (hipct & HDA_DSP_REG_HIPCT_BUSY) {
+ msg = hipct & HDA_DSP_REG_HIPCT_MSG_MASK;
+ msg_ext = hipcte & HDA_DSP_REG_HIPCTE_MSG_MASK;
+
+ dev_vdbg(sdev->dev,
+ "ipc: firmware initiated, msg:0x%x, msg_ext:0x%x\n",
+ msg, msg_ext);
+
+ /* mask BUSY interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_BUSY, 0);
+
+ /* handle messages from DSP */
+ if ((hipct & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ /* this is a PANIC message !! */
+ snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext));
+ } else {
+ /* normal message - process normally */
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ hda_dsp_ipc_host_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq) {
+ /*
+ * This interrupt is not shared so no need to return IRQ_NONE.
+ */
+ dev_dbg_ratelimited(sdev->dev,
+ "nothing to do in IPC IRQ thread\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Check if an IPC IRQ occurred */
+bool hda_dsp_check_ipc_irq(struct snd_sof_dev *sdev)
+{
+ bool ret = false;
+ u32 irq_status;
+
+ /* store status */
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS);
+ dev_vdbg(sdev->dev, "irq handler: irq_status:0x%x\n", irq_status);
+
+ /* invalid message ? */
+ if (irq_status == 0xffffffff)
+ goto out;
+
+ /* IPC message ? */
+ if (irq_status & HDA_DSP_ADSPIS_IPC)
+ ret = true;
+
+out:
+ return ret;
+}
+
+int hda_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return HDA_DSP_MBOX_UPLINK_OFFSET;
+}
+
+int hda_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return SRAM_WINDOW_OFFSET(id);
+}
+
+void hda_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz)
+{
+ if (!substream || !sdev->stream_box.size) {
+ sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
+ } else {
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = container_of(hstream,
+ struct sof_intel_hda_stream,
+ hda_stream.hstream);
+
+ /* The stream might already be closed */
+ if (hstream)
+ sof_mailbox_read(sdev, hda_stream->stream.posn_offset,
+ p, sz);
+ }
+}
+
+int hda_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct sof_intel_hda_stream *hda_stream;
+ /* validate offset */
+ size_t posn_offset = reply->posn_offset;
+
+ hda_stream = container_of(hstream, struct sof_intel_hda_stream,
+ hda_stream.hstream);
+
+ /* check for unaligned offset or overflow */
+ if (posn_offset > sdev->stream_box.size ||
+ posn_offset % sizeof(struct sof_ipc_stream_posn) != 0)
+ return -EINVAL;
+
+ hda_stream->stream.posn_offset = sdev->stream_box.offset + posn_offset;
+
+ dev_dbg(sdev->dev, "pcm: stream dir %d, posn mailbox offset is %zu",
+ substream->stream, hda_stream->stream.posn_offset);
+
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-ipc.h b/sound/soc/sof/intel/hda-ipc.h
new file mode 100644
index 000000000..10fbca593
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ipc.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2019 Intel Corporation. All rights reserved.
+ *
+ * Author: Keyon Jie <yang.jie@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_HDA_IPC_H
+#define __SOF_INTEL_HDA_IPC_H
+
+/*
+ * Primary register, mapped to
+ * - DIPCTDR (HIPCIDR) in sideband IPC (cAVS 1.8+)
+ * - DIPCT in cAVS 1.5 IPC
+ *
+ * Secondary register, mapped to:
+ * - DIPCTDD (HIPCIDD) in sideband IPC (cAVS 1.8+)
+ * - DIPCTE in cAVS 1.5 IPC
+ */
+
+/* Common bits in primary register */
+
+/* Reserved for doorbell */
+#define HDA_IPC_RSVD_31 BIT(31)
+/* Target, 0 - normal message, 1 - compact message(cAVS compatible) */
+#define HDA_IPC_MSG_COMPACT BIT(30)
+/* Direction, 0 - request, 1 - response */
+#define HDA_IPC_RSP BIT(29)
+
+#define HDA_IPC_TYPE_SHIFT 24
+#define HDA_IPC_TYPE_MASK GENMASK(28, 24)
+#define HDA_IPC_TYPE(x) ((x) << HDA_IPC_TYPE_SHIFT)
+
+#define HDA_IPC_PM_GATE HDA_IPC_TYPE(0x8U)
+
+/* Command specific payload bits in secondary register */
+
+/* Disable DMA tracing (0 - keep tracing, 1 - to disable DMA trace) */
+#define HDA_PM_NO_DMA_TRACE BIT(4)
+/* Prevent clock gating (0 - cg allowed, 1 - DSP clock always on) */
+#define HDA_PM_PCG BIT(3)
+/* Prevent power gating (0 - deep power state transitions allowed) */
+#define HDA_PM_PPG BIT(2)
+/* Indicates whether streaming is active */
+#define HDA_PM_PG_STREAMING BIT(1)
+#define HDA_PM_PG_RSVD BIT(0)
+
+irqreturn_t cnl_ipc_irq_thread(int irq, void *context);
+int cnl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
+void cnl_ipc_dump(struct snd_sof_dev *sdev);
+
+#endif
diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
new file mode 100644
index 000000000..4012097a9
--- /dev/null
+++ b/sound/soc/sof/intel/hda-loader.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for HDA DSP code loader
+ */
+
+#include <linux/firmware.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "hda.h"
+
+#define HDA_FW_BOOT_ATTEMPTS 3
+#define HDA_CL_STREAM_FORMAT 0x40
+
+static struct hdac_ext_stream *cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
+ unsigned int size, struct snd_dma_buffer *dmab,
+ int direction)
+{
+ struct hdac_ext_stream *dsp_stream;
+ struct hdac_stream *hstream;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ int ret;
+
+ dsp_stream = hda_dsp_stream_get(sdev, direction);
+
+ if (!dsp_stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return ERR_PTR(-ENODEV);
+ }
+ hstream = &dsp_stream->hstream;
+ hstream->substream = NULL;
+
+ /* allocate DMA buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: memory alloc failed: %x\n", ret);
+ goto out_put;
+ }
+
+ hstream->period_bytes = 0;/* initialize period_bytes */
+ hstream->format_val = format;
+ hstream->bufsize = size;
+
+ if (direction == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = hda_dsp_iccmax_stream_hw_params(sdev, dsp_stream, dmab, NULL);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: iccmax stream prepare failed: %x\n", ret);
+ goto out_free;
+ }
+ } else {
+ ret = hda_dsp_stream_hw_params(sdev, dsp_stream, dmab, NULL);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac prepare failed: %x\n", ret);
+ goto out_free;
+ }
+ hda_dsp_stream_spib_config(sdev, dsp_stream, HDA_DSP_SPIB_ENABLE, size);
+ }
+
+ return dsp_stream;
+
+out_free:
+ snd_dma_free_pages(dmab);
+out_put:
+ hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
+ return ERR_PTR(ret);
+}
+
+/*
+ * first boot sequence has some extra steps.
+ * power on all host managed cores and only unstall/run the boot core to boot the
+ * DSP then turn off all non boot cores (if any) is powered on.
+ */
+static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int status;
+ int ret;
+ int i;
+
+ /* step 1: power up corex */
+ ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n");
+ goto err;
+ }
+
+ /* DSP is powered up, set all SSPs to slave mode */
+ for (i = 0; i < chip->ssp_count; i++) {
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ chip->ssp_base_offset
+ + i * SSP_DEV_MEM_SIZE
+ + SSP_SSC1_OFFSET,
+ SSP_SET_SLAVE,
+ SSP_SET_SLAVE);
+ }
+
+ /* step 2: purge FW request */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req,
+ chip->ipc_req_mask | (HDA_DSP_IPC_PURGE_FW |
+ ((stream_tag - 1) << 9)));
+
+ /* step 3: unset core 0 reset state & unstall/run core 0 */
+ ret = hda_dsp_core_run(sdev, chip->init_core_mask);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "error: dsp core start failed %d\n", ret);
+ ret = -EIO;
+ goto err;
+ }
+
+ /* step 4: wait for IPC DONE bit from ROM */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->ipc_ack, status,
+ ((status & chip->ipc_ack_mask)
+ == chip->ipc_ack_mask),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_INIT_TIMEOUT_US);
+
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "error: %s: timeout for HIPCIE done\n",
+ __func__);
+ goto err;
+ }
+
+ /* set DONE bit to clear the reply IPC message */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ chip->ipc_ack,
+ chip->ipc_ack_mask,
+ chip->ipc_ack_mask);
+
+ /* step 5: power down corex */
+ ret = hda_dsp_core_power_down(sdev, chip->host_managed_cores_mask & ~(BIT(0)));
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "error: dsp core x power down failed\n");
+ goto err;
+ }
+
+ /* step 6: enable IPC interrupts */
+ hda_dsp_ipc_int_enable(sdev);
+
+ /* step 7: wait for ROM init */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_SRAM_REG_ROM_STATUS, status,
+ ((status & HDA_DSP_ROM_STS_MASK)
+ == HDA_DSP_ROM_INIT),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ chip->rom_init_timeout *
+ USEC_PER_MSEC);
+ if (!ret)
+ return 0;
+
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "error: %s: timeout HDA_DSP_SRAM_REG_ROM_STATUS read\n",
+ __func__);
+
+err:
+ hda_dsp_dump(sdev, SOF_DBG_REGS | SOF_DBG_PCI | SOF_DBG_MBOX);
+ hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
+
+ return ret;
+}
+
+static int cl_trigger(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream, int cmd)
+{
+ struct hdac_stream *hstream = &stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+
+ /* code loader is special case that reuses stream ops */
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ 1 << hstream->index,
+ 1 << hstream->index);
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->running = true;
+ return 0;
+ default:
+ return hda_dsp_stream_trigger(sdev, stream, cmd);
+ }
+}
+
+static int cl_cleanup(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
+ struct hdac_ext_stream *stream)
+{
+ struct hdac_stream *hstream = &stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ int ret = 0;
+
+ if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = hda_dsp_stream_spib_config(sdev, stream, HDA_DSP_SPIB_DISABLE, 0);
+ else
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_SD_CTL_DMA_START, 0);
+
+ hda_dsp_stream_put(sdev, hstream->direction, hstream->stream_tag);
+ hstream->running = 0;
+ hstream->substream = NULL;
+
+ /* reset BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL, 0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU, 0);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0);
+ snd_dma_free_pages(dmab);
+ dmab->area = NULL;
+ hstream->bufsize = 0;
+ hstream->format_val = 0;
+
+ return ret;
+}
+
+static int cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *stream)
+{
+ unsigned int reg;
+ int ret, status;
+
+ ret = cl_trigger(sdev, stream, SNDRV_PCM_TRIGGER_START);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: DMA trigger start failed\n");
+ return ret;
+ }
+
+ status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_SRAM_REG_ROM_STATUS, reg,
+ ((reg & HDA_DSP_ROM_STS_MASK)
+ == HDA_DSP_ROM_FW_ENTERED),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_BASEFW_TIMEOUT_US);
+
+ /*
+ * even in case of errors we still need to stop the DMAs,
+ * but we return the initial error should the DMA stop also fail
+ */
+
+ if (status < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout HDA_DSP_SRAM_REG_ROM_STATUS read\n",
+ __func__);
+ }
+
+ ret = cl_trigger(sdev, stream, SNDRV_PCM_TRIGGER_STOP);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: DMA trigger stop failed\n");
+ if (!status)
+ status = ret;
+ }
+
+ return status;
+}
+
+int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ struct hdac_ext_stream *iccmax_stream;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct firmware stripped_firmware;
+ int ret, ret1;
+ u8 original_gb;
+
+ /* save the original LTRP guardband value */
+ original_gb = snd_hdac_chip_readb(bus, VS_LTRP) & HDA_VS_INTEL_LTRP_GB_MASK;
+
+ if (plat_data->fw->size <= plat_data->fw_offset) {
+ dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
+ return -EINVAL;
+ }
+
+ stripped_firmware.size = plat_data->fw->size - plat_data->fw_offset;
+
+ /* prepare capture stream for ICCMAX */
+ iccmax_stream = cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT, stripped_firmware.size,
+ &sdev->dmab_bdl, SNDRV_PCM_STREAM_CAPTURE);
+ if (IS_ERR(iccmax_stream)) {
+ dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n");
+ return PTR_ERR(iccmax_stream);
+ }
+
+ ret = hda_dsp_cl_boot_firmware(sdev);
+
+ /*
+ * Perform iccmax stream cleanup. This should be done even if firmware loading fails.
+ * If the cleanup also fails, we return the initial error
+ */
+ ret1 = cl_cleanup(sdev, &sdev->dmab_bdl, iccmax_stream);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n");
+
+ /* set return value to indicate cleanup failure */
+ if (!ret)
+ ret = ret1;
+ }
+
+ /* restore the original guardband value after FW boot */
+ snd_hdac_chip_updateb(bus, VS_LTRP, HDA_VS_INTEL_LTRP_GB_MASK, original_gb);
+
+ return ret;
+}
+
+int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const struct sof_dev_desc *desc = plat_data->desc;
+ const struct sof_intel_dsp_desc *chip_info;
+ struct hdac_ext_stream *stream;
+ struct firmware stripped_firmware;
+ int ret, ret1, i;
+
+ chip_info = desc->chip_info;
+
+ if (plat_data->fw->size <= plat_data->fw_offset) {
+ dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
+ return -EINVAL;
+ }
+
+ stripped_firmware.data = plat_data->fw->data + plat_data->fw_offset;
+ stripped_firmware.size = plat_data->fw->size - plat_data->fw_offset;
+
+ /* init for booting wait */
+ init_waitqueue_head(&sdev->boot_wait);
+
+ /* prepare DMA for code loader stream */
+ stream = cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT, stripped_firmware.size,
+ &sdev->dmab, SNDRV_PCM_STREAM_PLAYBACK);
+ if (IS_ERR(stream)) {
+ dev_err(sdev->dev, "error: dma prepare for fw loading failed\n");
+ return PTR_ERR(stream);
+ }
+
+ memcpy(sdev->dmab.area, stripped_firmware.data,
+ stripped_firmware.size);
+
+ /* try ROM init a few times before giving up */
+ for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) {
+ dev_dbg(sdev->dev,
+ "Attempting iteration %d of Core En/ROM load...\n", i);
+
+ hda->boot_iteration = i + 1;
+ ret = cl_dsp_init(sdev, stream->hstream.stream_tag);
+
+ /* don't retry anymore if successful */
+ if (!ret)
+ break;
+ }
+
+ if (i == HDA_FW_BOOT_ATTEMPTS) {
+ dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n",
+ i, ret);
+ dev_err(sdev->dev, "ROM error=0x%x: FW status=0x%x\n",
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_SRAM_REG_ROM_ERROR),
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_SRAM_REG_ROM_STATUS));
+ goto cleanup;
+ }
+
+ /*
+ * When a SoundWire link is in clock stop state, a Slave
+ * device may trigger in-band wakes for events such as jack
+ * insertion or acoustic event detection. This event will lead
+ * to a WAKEEN interrupt, handled by the PCI device and routed
+ * to PME if the PCI device is in D3. The resume function in
+ * audio PCI driver will be invoked by ACPI for PME event and
+ * initialize the device and process WAKEEN interrupt.
+ *
+ * The WAKEEN interrupt should be processed ASAP to prevent an
+ * interrupt flood, otherwise other interrupts, such IPC,
+ * cannot work normally. The WAKEEN is handled after the ROM
+ * is initialized successfully, which ensures power rails are
+ * enabled before accessing the SoundWire SHIM registers
+ */
+ if (!sdev->first_boot)
+ hda_sdw_process_wakeen(sdev);
+
+ /*
+ * at this point DSP ROM has been initialized and
+ * should be ready for code loading and firmware boot
+ */
+ ret = cl_copy_fw(sdev, stream);
+ if (!ret)
+ dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
+ else
+ dev_err(sdev->dev, "error: load fw failed ret: %d\n", ret);
+
+cleanup:
+ /*
+ * Perform codeloader stream cleanup.
+ * This should be done even if firmware loading fails.
+ * If the cleanup also fails, we return the initial error
+ */
+ ret1 = cl_cleanup(sdev, &sdev->dmab, stream);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
+
+ /* set return value to indicate cleanup failure */
+ if (!ret)
+ ret = ret1;
+ }
+
+ /*
+ * return primary core id if both fw copy
+ * and stream clean up are successful
+ */
+ if (!ret)
+ return chip_info->init_core_mask;
+
+ /* dump dsp registers and disable DSP upon error */
+ hda_dsp_dump(sdev, SOF_DBG_REGS | SOF_DBG_PCI | SOF_DBG_MBOX);
+
+ /* disable DSP */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
+ SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, 0);
+ return ret;
+}
+
+/* pre fw run operations */
+int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev)
+{
+ /* disable clock gating and power gating */
+ return hda_dsp_ctrl_clock_power_gating(sdev, false);
+}
+
+/* post fw run operations */
+int hda_dsp_post_fw_run(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ if (sdev->first_boot) {
+ ret = hda_sdw_startup(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: could not startup SoundWire links\n");
+ return ret;
+ }
+ }
+
+ hda_sdw_int_enable(sdev, true);
+
+ /* re-enable clock gating and power gating */
+ return hda_dsp_ctrl_clock_power_gating(sdev, true);
+}
diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
new file mode 100644
index 000000000..b527d5958
--- /dev/null
+++ b/sound/soc/sof/intel/hda-pcm.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/hda_register.h>
+#include <sound/pcm_params.h>
+#include "../sof-audio.h"
+#include "../ops.h"
+#include "hda.h"
+
+#define SDnFMT_BASE(x) ((x) << 14)
+#define SDnFMT_MULT(x) (((x) - 1) << 11)
+#define SDnFMT_DIV(x) (((x) - 1) << 8)
+#define SDnFMT_BITS(x) ((x) << 4)
+#define SDnFMT_CHAN(x) ((x) << 0)
+
+u32 hda_dsp_get_mult_div(struct snd_sof_dev *sdev, int rate)
+{
+ switch (rate) {
+ case 8000:
+ return SDnFMT_DIV(6);
+ case 9600:
+ return SDnFMT_DIV(5);
+ case 11025:
+ return SDnFMT_BASE(1) | SDnFMT_DIV(4);
+ case 16000:
+ return SDnFMT_DIV(3);
+ case 22050:
+ return SDnFMT_BASE(1) | SDnFMT_DIV(2);
+ case 32000:
+ return SDnFMT_DIV(3) | SDnFMT_MULT(2);
+ case 44100:
+ return SDnFMT_BASE(1);
+ case 48000:
+ return 0;
+ case 88200:
+ return SDnFMT_BASE(1) | SDnFMT_MULT(2);
+ case 96000:
+ return SDnFMT_MULT(2);
+ case 176400:
+ return SDnFMT_BASE(1) | SDnFMT_MULT(4);
+ case 192000:
+ return SDnFMT_MULT(4);
+ default:
+ dev_warn(sdev->dev, "can't find div rate %d using 48kHz\n",
+ rate);
+ return 0; /* use 48KHz if not found */
+ }
+};
+
+u32 hda_dsp_get_bits(struct snd_sof_dev *sdev, int sample_bits)
+{
+ switch (sample_bits) {
+ case 8:
+ return SDnFMT_BITS(0);
+ case 16:
+ return SDnFMT_BITS(1);
+ case 20:
+ return SDnFMT_BITS(2);
+ case 24:
+ return SDnFMT_BITS(3);
+ case 32:
+ return SDnFMT_BITS(4);
+ default:
+ dev_warn(sdev->dev, "can't find %d bits using 16bit\n",
+ sample_bits);
+ return SDnFMT_BITS(1); /* use 16bits format if not found */
+ }
+};
+
+int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc_stream_params *ipc_params)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *stream = stream_to_hdac_ext_stream(hstream);
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct snd_dma_buffer *dmab;
+ struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
+ int ret;
+ u32 size, rate, bits;
+
+ size = params_buffer_bytes(params);
+ rate = hda_dsp_get_mult_div(sdev, params_rate(params));
+ bits = hda_dsp_get_bits(sdev, params_width(params));
+
+ hstream->substream = substream;
+
+ dmab = substream->runtime->dma_buffer_p;
+
+ hstream->format_val = rate | bits | (params_channels(params) - 1);
+ hstream->bufsize = size;
+ hstream->period_bytes = params_period_bytes(params);
+ hstream->no_period_wakeup =
+ (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
+ (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
+
+ ret = hda_dsp_stream_hw_params(sdev, stream, dmab, params);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac prepare failed: %x\n", ret);
+ return ret;
+ }
+
+ /* disable SPIB, to enable buffer wrap for stream */
+ hda_dsp_stream_spib_config(sdev, stream, HDA_DSP_SPIB_DISABLE, 0);
+
+ /* update no_stream_position flag for ipc params */
+ if (hda && hda->no_ipc_position) {
+ /* For older ABIs set host_period_bytes to zero to inform
+ * FW we don't want position updates. Newer versions use
+ * no_stream_position for this purpose.
+ */
+ if (v->abi_version < SOF_ABI_VER(3, 10, 0))
+ ipc_params->host_period_bytes = 0;
+ else
+ ipc_params->no_stream_position = 1;
+ }
+
+ ipc_params->stream_tag = hstream->stream_tag;
+
+ return 0;
+}
+
+int hda_dsp_pcm_trigger(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *stream = stream_to_hdac_ext_stream(hstream);
+
+ return hda_dsp_stream_trigger(sdev, stream, cmd);
+}
+
+snd_pcm_uframes_t hda_dsp_pcm_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_component *scomp = sdev->component;
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct snd_sof_pcm *spcm;
+ snd_pcm_uframes_t pos;
+
+ spcm = snd_sof_find_spcm_dai(scomp, rtd);
+ if (!spcm) {
+ dev_warn_ratelimited(sdev->dev, "warn: can't find PCM with DAI ID %d\n",
+ rtd->dai_link->id);
+ return 0;
+ }
+
+ if (hda && !hda->no_ipc_position) {
+ /* read position from IPC position */
+ pos = spcm->stream[substream->stream].posn.host_posn;
+ goto found;
+ }
+
+ /*
+ * DPIB/posbuf position mode:
+ * For Playback, Use DPIB register from HDA space which
+ * reflects the actual data transferred.
+ * For Capture, Use the position buffer for pointer, as DPIB
+ * is not accurate enough, its update may be completed
+ * earlier than the data written to DDR.
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ AZX_REG_VS_SDXDPIB_XBASE +
+ (AZX_REG_VS_SDXDPIB_XINTERVAL *
+ hstream->index));
+ } else {
+ /*
+ * For capture stream, we need more workaround to fix the
+ * position incorrect issue:
+ *
+ * 1. Wait at least 20us before reading position buffer after
+ * the interrupt generated(IOC), to make sure position update
+ * happens on frame boundary i.e. 20.833uSec for 48KHz.
+ * 2. Perform a dummy Read to DPIB register to flush DMA
+ * position value.
+ * 3. Read the DMA Position from posbuf. Now the readback
+ * value should be >= period boundary.
+ */
+ usleep_range(20, 21);
+ snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ AZX_REG_VS_SDXDPIB_XBASE +
+ (AZX_REG_VS_SDXDPIB_XINTERVAL *
+ hstream->index));
+ pos = snd_hdac_stream_get_pos_posbuf(hstream);
+ }
+
+ if (pos >= hstream->bufsize)
+ pos = 0;
+
+found:
+ pos = bytes_to_frames(substream->runtime, pos);
+
+ dev_vdbg(sdev->dev, "PCM: stream %d dir %d position %lu\n",
+ hstream->index, substream->stream, pos);
+ return pos;
+}
+
+int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_ext_stream *dsp_stream;
+ int direction = substream->stream;
+
+ dsp_stream = hda_dsp_stream_get(sdev, direction);
+
+ if (!dsp_stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return -ENODEV;
+ }
+
+ /* binding pcm substream to hda stream */
+ substream->runtime->private_data = &dsp_stream->hstream;
+ return 0;
+}
+
+int hda_dsp_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ int direction = substream->stream;
+ int ret;
+
+ ret = hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
+
+ if (ret) {
+ dev_dbg(sdev->dev, "stream %s not opened!\n", substream->name);
+ return -ENODEV;
+ }
+
+ /* unbinding pcm substream to hda stream */
+ substream->runtime->private_data = NULL;
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
new file mode 100644
index 000000000..0e09ede92
--- /dev/null
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <linux/pm_runtime.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-audio.h"
+#include "hda.h"
+
+#define HDA_LTRP_GB_VALUE_US 95
+
+/*
+ * set up one of BDL entries for a stream
+ */
+static int hda_setup_bdle(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct hdac_stream *stream,
+ struct sof_intel_dsp_bdl **bdlp,
+ int offset, int size, int ioc)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sof_intel_dsp_bdl *bdl = *bdlp;
+
+ while (size > 0) {
+ dma_addr_t addr;
+ int chunk;
+
+ if (stream->frags >= HDA_DSP_MAX_BDL_ENTRIES) {
+ dev_err(sdev->dev, "error: stream frags exceeded\n");
+ return -EINVAL;
+ }
+
+ addr = snd_sgbuf_get_addr(dmab, offset);
+ /* program BDL addr */
+ bdl->addr_l = cpu_to_le32(lower_32_bits(addr));
+ bdl->addr_h = cpu_to_le32(upper_32_bits(addr));
+ /* program BDL size */
+ chunk = snd_sgbuf_get_chunk_size(dmab, offset, size);
+ /* one BDLE should not cross 4K boundary */
+ if (bus->align_bdle_4k) {
+ u32 remain = 0x1000 - (offset & 0xfff);
+
+ if (chunk > remain)
+ chunk = remain;
+ }
+ bdl->size = cpu_to_le32(chunk);
+ /* only program IOC when the whole segment is processed */
+ size -= chunk;
+ bdl->ioc = (size || !ioc) ? 0 : cpu_to_le32(0x01);
+ bdl++;
+ stream->frags++;
+ offset += chunk;
+
+ dev_vdbg(sdev->dev, "bdl, frags:%d, chunk size:0x%x;\n",
+ stream->frags, chunk);
+ }
+
+ *bdlp = bdl;
+ return offset;
+}
+
+/*
+ * set up Buffer Descriptor List (BDL) for host memory transfer
+ * BDL describes the location of the individual buffers and is little endian.
+ */
+int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct hdac_stream *stream)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct sof_intel_dsp_bdl *bdl;
+ int i, offset, period_bytes, periods;
+ int remain, ioc;
+
+ period_bytes = stream->period_bytes;
+ dev_dbg(sdev->dev, "period_bytes:0x%x\n", period_bytes);
+ if (!period_bytes)
+ period_bytes = stream->bufsize;
+
+ periods = stream->bufsize / period_bytes;
+
+ dev_dbg(sdev->dev, "periods:%d\n", periods);
+
+ remain = stream->bufsize % period_bytes;
+ if (remain)
+ periods++;
+
+ /* program the initial BDL entries */
+ bdl = (struct sof_intel_dsp_bdl *)stream->bdl.area;
+ offset = 0;
+ stream->frags = 0;
+
+ /*
+ * set IOC if don't use position IPC
+ * and period_wakeup needed.
+ */
+ ioc = hda->no_ipc_position ?
+ !stream->no_period_wakeup : 0;
+
+ for (i = 0; i < periods; i++) {
+ if (i == (periods - 1) && remain)
+ /* set the last small entry */
+ offset = hda_setup_bdle(sdev, dmab,
+ stream, &bdl, offset,
+ remain, 0);
+ else
+ offset = hda_setup_bdle(sdev, dmab,
+ stream, &bdl, offset,
+ period_bytes, ioc);
+ }
+
+ return offset;
+}
+
+int hda_dsp_stream_spib_config(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream,
+ int enable, u32 size)
+{
+ struct hdac_stream *hstream = &stream->hstream;
+ u32 mask;
+
+ if (!sdev->bar[HDA_DSP_SPIB_BAR]) {
+ dev_err(sdev->dev, "error: address of spib capability is NULL\n");
+ return -EINVAL;
+ }
+
+ mask = (1 << hstream->index);
+
+ /* enable/disable SPIB for the stream */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_SPIB_BAR,
+ SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL, mask,
+ enable << hstream->index);
+
+ /* set the SPIB value */
+ sof_io_write(sdev, stream->spib_addr, size);
+
+ return 0;
+}
+
+/* get next unused stream */
+struct hdac_ext_stream *
+hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sof_intel_hda_stream *hda_stream;
+ struct hdac_ext_stream *stream = NULL;
+ struct hdac_stream *s;
+
+ spin_lock_irq(&bus->reg_lock);
+
+ /* get an unused stream */
+ list_for_each_entry(s, &bus->stream_list, list) {
+ if (s->direction == direction && !s->opened) {
+ stream = stream_to_hdac_ext_stream(s);
+ hda_stream = container_of(stream,
+ struct sof_intel_hda_stream,
+ hda_stream);
+ /* check if the host DMA channel is reserved */
+ if (hda_stream->host_reserved)
+ continue;
+
+ s->opened = true;
+ break;
+ }
+ }
+
+ spin_unlock_irq(&bus->reg_lock);
+
+ /* stream found ? */
+ if (!stream)
+ dev_err(sdev->dev, "error: no free %s streams\n",
+ direction == SNDRV_PCM_STREAM_PLAYBACK ?
+ "playback" : "capture");
+
+ /*
+ * Disable DMI Link L1 entry when capture stream is opened.
+ * Workaround to address a known issue with host DMA that results
+ * in xruns during pause/release in capture scenarios.
+ */
+ if (!IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+ if (stream && direction == SNDRV_PCM_STREAM_CAPTURE)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, 0);
+
+ return stream;
+}
+
+/* free a stream */
+int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *s;
+ bool active_capture_stream = false;
+ bool found = false;
+
+ spin_lock_irq(&bus->reg_lock);
+
+ /*
+ * close stream matching the stream tag
+ * and check if there are any open capture streams.
+ */
+ list_for_each_entry(s, &bus->stream_list, list) {
+ if (!s->opened)
+ continue;
+
+ if (s->direction == direction && s->stream_tag == stream_tag) {
+ s->opened = false;
+ found = true;
+ } else if (s->direction == SNDRV_PCM_STREAM_CAPTURE) {
+ active_capture_stream = true;
+ }
+ }
+
+ spin_unlock_irq(&bus->reg_lock);
+
+ /* Enable DMI L1 entry if there are no capture streams open */
+ if (!IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+ if (!active_capture_stream)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN,
+ HDA_VS_INTEL_EM2_L1SEN);
+
+ if (!found) {
+ dev_dbg(sdev->dev, "stream_tag %d not opened!\n", stream_tag);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream, int cmd)
+{
+ struct hdac_stream *hstream = &stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ u32 dma_start = SOF_HDA_SD_CTL_DMA_START;
+ int ret;
+ u32 run;
+
+ /* cmd must be for audio stream */
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_START:
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ 1 << hstream->index,
+ 1 << hstream->index);
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev,
+ HDA_DSP_HDA_BAR,
+ sd_offset, run,
+ ((run & dma_start) == dma_start),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_STREAM_RUN_TIMEOUT);
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: cmd %d: timeout on STREAM_SD_OFFSET read\n",
+ __func__, cmd);
+ return ret;
+ }
+
+ hstream->running = true;
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_STOP:
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK, 0x0);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
+ sd_offset, run,
+ !(run & dma_start),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_STREAM_RUN_TIMEOUT);
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: cmd %d: timeout on STREAM_SD_OFFSET read\n",
+ __func__, cmd);
+ return ret;
+ }
+
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset +
+ SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->running = false;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ 1 << hstream->index, 0x0);
+ break;
+ default:
+ dev_err(sdev->dev, "error: unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* minimal recommended programming for ICCMAX stream */
+int hda_dsp_iccmax_stream_hw_params(struct snd_sof_dev *sdev, struct hdac_ext_stream *stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *hstream = &stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ int ret;
+ u32 mask = 0x1 << hstream->index;
+
+ if (!stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return -ENODEV;
+ }
+
+ if (hstream->posbuf)
+ *hstream->posbuf = 0;
+
+ /* reset BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
+ 0x0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
+ 0x0);
+
+ hstream->frags = 0;
+
+ ret = hda_dsp_stream_setup_bdl(sdev, dmab, hstream);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: set up of BDL failed\n");
+ return ret;
+ }
+
+ /* program BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
+ (u32)hstream->bdl.addr);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
+ upper_32_bits(hstream->bdl.addr));
+
+ /* program cyclic buffer length */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CBL,
+ hstream->bufsize);
+
+ /* program last valid index */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_LVI,
+ 0xffff, (hstream->frags - 1));
+
+ /* decouple host and link DMA, enable DSP features */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, mask);
+
+ /* Follow HW recommendation to set the guardband value to 95us during FW boot */
+ snd_hdac_chip_updateb(bus, VS_LTRP, HDA_VS_INTEL_LTRP_GB_MASK, HDA_LTRP_GB_VALUE_US);
+
+ /* start DMA */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_SD_CTL_DMA_START, SOF_HDA_SD_CTL_DMA_START);
+
+ return 0;
+}
+
+/*
+ * prepare for common hdac registers settings, for both code loader
+ * and normal stream.
+ */
+int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *hstream = &stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ int ret, timeout = HDA_DSP_STREAM_RESET_TIMEOUT;
+ u32 dma_start = SOF_HDA_SD_CTL_DMA_START;
+ u32 val, mask;
+ u32 run;
+
+ if (!stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return -ENODEV;
+ }
+
+ /* decouple host and link DMA */
+ mask = 0x1 << hstream->index;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, mask);
+
+ if (!dmab) {
+ dev_err(sdev->dev, "error: no dma buffer allocated!\n");
+ return -ENODEV;
+ }
+
+ /* clear stream status */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_DMA_SD_INT_MASK |
+ SOF_HDA_SD_CTL_DMA_START, 0);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
+ sd_offset, run,
+ !(run & dma_start),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_STREAM_RUN_TIMEOUT);
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on STREAM_SD_OFFSET read1\n",
+ __func__);
+ return ret;
+ }
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ /* stream reset */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, 0x1,
+ 0x1);
+ udelay(3);
+ do {
+ val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ sd_offset);
+ if (val & 0x1)
+ break;
+ } while (--timeout);
+ if (timeout == 0) {
+ dev_err(sdev->dev, "error: stream reset failed\n");
+ return -ETIMEDOUT;
+ }
+
+ timeout = HDA_DSP_STREAM_RESET_TIMEOUT;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, 0x1,
+ 0x0);
+
+ /* wait for hardware to report that stream is out of reset */
+ udelay(3);
+ do {
+ val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ sd_offset);
+ if ((val & 0x1) == 0)
+ break;
+ } while (--timeout);
+ if (timeout == 0) {
+ dev_err(sdev->dev, "error: timeout waiting for stream reset\n");
+ return -ETIMEDOUT;
+ }
+
+ if (hstream->posbuf)
+ *hstream->posbuf = 0;
+
+ /* reset BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
+ 0x0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
+ 0x0);
+
+ /* clear stream status */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_DMA_SD_INT_MASK |
+ SOF_HDA_SD_CTL_DMA_START, 0);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
+ sd_offset, run,
+ !(run & dma_start),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_STREAM_RUN_TIMEOUT);
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on STREAM_SD_OFFSET read2\n",
+ __func__);
+ return ret;
+ }
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->frags = 0;
+
+ ret = hda_dsp_stream_setup_bdl(sdev, dmab, hstream);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: set up of BDL failed\n");
+ return ret;
+ }
+
+ /* program stream tag to set up stream descriptor for DMA */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_SD_CTL_STREAM_TAG_MASK,
+ hstream->stream_tag <<
+ SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT);
+
+ /* program cyclic buffer length */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CBL,
+ hstream->bufsize);
+
+ /*
+ * Recommended hardware programming sequence for HDAudio DMA format
+ *
+ * 1. Put DMA into coupled mode by clearing PPCTL.PROCEN bit
+ * for corresponding stream index before the time of writing
+ * format to SDxFMT register.
+ * 2. Write SDxFMT
+ * 3. Set PPCTL.PROCEN bit for corresponding stream index to
+ * enable decoupled mode
+ */
+
+ /* couple host and link DMA, disable DSP features */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, 0);
+
+ /* program stream format */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_CL_SD_FORMAT,
+ 0xffff, hstream->format_val);
+
+ /* decouple host and link DMA, enable DSP features */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, mask);
+
+ /* program last valid index */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_LVI,
+ 0xffff, (hstream->frags - 1));
+
+ /* program BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
+ (u32)hstream->bdl.addr);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
+ upper_32_bits(hstream->bdl.addr));
+
+ /* enable position buffer */
+ if (!(snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE)
+ & SOF_HDA_ADSP_DPLBASE_ENABLE)) {
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPUBASE,
+ upper_32_bits(bus->posbuf.addr));
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE,
+ (u32)bus->posbuf.addr |
+ SOF_HDA_ADSP_DPLBASE_ENABLE);
+ }
+
+ /* set interrupt enable bits */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ /* read FIFO size */
+ if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ hstream->fifo_size =
+ snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_CL_SD_FIFOSIZE);
+ hstream->fifo_size &= 0xffff;
+ hstream->fifo_size += 1;
+ } else {
+ hstream->fifo_size = 0;
+ }
+
+ return ret;
+}
+
+int hda_dsp_stream_hw_free(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *stream = substream->runtime->private_data;
+ struct hdac_ext_stream *link_dev = container_of(stream,
+ struct hdac_ext_stream,
+ hstream);
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ u32 mask = 0x1 << stream->index;
+
+ spin_lock_irq(&bus->reg_lock);
+ /* couple host and link DMA if link DMA channel is idle */
+ if (!link_dev->link_locked)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
+ SOF_HDA_REG_PP_PPCTL, mask, 0);
+ spin_unlock_irq(&bus->reg_lock);
+
+ stream->substream = NULL;
+
+ return 0;
+}
+
+bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ bool ret = false;
+ u32 status;
+
+ /* The function can be called at irq thread, so use spin_lock_irq */
+ spin_lock_irq(&bus->reg_lock);
+
+ status = snd_hdac_chip_readl(bus, INTSTS);
+ dev_vdbg(bus->dev, "stream irq, INTSTS status: 0x%x\n", status);
+
+ /* if Register inaccessible, ignore it.*/
+ if (status != 0xffffffff)
+ ret = true;
+
+ spin_unlock_irq(&bus->reg_lock);
+
+ return ret;
+}
+
+static void
+hda_dsp_set_bytes_transferred(struct hdac_stream *hstream, u64 buffer_size)
+{
+ u64 prev_pos, pos, num_bytes;
+
+ div64_u64_rem(hstream->curr_pos, buffer_size, &prev_pos);
+ pos = snd_hdac_stream_get_pos_posbuf(hstream);
+
+ if (pos < prev_pos)
+ num_bytes = (buffer_size - prev_pos) + pos;
+ else
+ num_bytes = pos - prev_pos;
+
+ hstream->curr_pos += num_bytes;
+}
+
+static bool hda_dsp_stream_check(struct hdac_bus *bus, u32 status)
+{
+ struct sof_intel_hda_dev *sof_hda = bus_to_sof_hda(bus);
+ struct hdac_stream *s;
+ bool active = false;
+ u32 sd_status;
+
+ list_for_each_entry(s, &bus->stream_list, list) {
+ if (status & BIT(s->index) && s->opened) {
+ sd_status = snd_hdac_stream_readb(s, SD_STS);
+
+ dev_vdbg(bus->dev, "stream %d status 0x%x\n",
+ s->index, sd_status);
+
+ snd_hdac_stream_writeb(s, SD_STS, sd_status);
+
+ active = true;
+ if ((!s->substream && !s->cstream) ||
+ !s->running ||
+ (sd_status & SOF_HDA_CL_DMA_SD_INT_COMPLETE) == 0)
+ continue;
+
+ /* Inform ALSA only in case not do that with IPC */
+ if (s->substream && sof_hda->no_ipc_position) {
+ snd_sof_pcm_period_elapsed(s->substream);
+ } else if (s->cstream) {
+ hda_dsp_set_bytes_transferred(s,
+ s->cstream->runtime->buffer_size);
+ snd_compr_fragment_elapsed(s->cstream);
+ }
+ }
+ }
+
+ return active;
+}
+
+irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ u32 rirb_status;
+#endif
+ bool active;
+ u32 status;
+ int i;
+
+ /*
+ * Loop 10 times to handle missed interrupts caused by
+ * unsolicited responses from the codec
+ */
+ for (i = 0, active = true; i < 10 && active; i++) {
+ spin_lock_irq(&bus->reg_lock);
+
+ status = snd_hdac_chip_readl(bus, INTSTS);
+
+ /* check streams */
+ active = hda_dsp_stream_check(bus, status);
+
+ /* check and clear RIRB interrupt */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ if (status & AZX_INT_CTRL_EN) {
+ rirb_status = snd_hdac_chip_readb(bus, RIRBSTS);
+ if (rirb_status & RIRB_INT_MASK) {
+ /*
+ * Clearing the interrupt status here ensures
+ * that no interrupt gets masked after the RIRB
+ * wp is read in snd_hdac_bus_update_rirb.
+ */
+ snd_hdac_chip_writeb(bus, RIRBSTS,
+ RIRB_INT_MASK);
+ active = true;
+ if (rirb_status & RIRB_INT_RESPONSE)
+ snd_hdac_bus_update_rirb(bus);
+ }
+ }
+#endif
+ spin_unlock_irq(&bus->reg_lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int hda_dsp_stream_init(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_stream *stream;
+ struct hdac_stream *hstream;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ struct sof_intel_hda_dev *sof_hda = bus_to_sof_hda(bus);
+ int sd_offset;
+ int i, num_playback, num_capture, num_total, ret;
+ u32 gcap;
+
+ gcap = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCAP);
+ dev_dbg(sdev->dev, "hda global caps = 0x%x\n", gcap);
+
+ /* get stream count from GCAP */
+ num_capture = (gcap >> 8) & 0x0f;
+ num_playback = (gcap >> 12) & 0x0f;
+ num_total = num_playback + num_capture;
+
+ dev_dbg(sdev->dev, "detected %d playback and %d capture streams\n",
+ num_playback, num_capture);
+
+ if (num_playback >= SOF_HDA_PLAYBACK_STREAMS) {
+ dev_err(sdev->dev, "error: too many playback streams %d\n",
+ num_playback);
+ return -EINVAL;
+ }
+
+ if (num_capture >= SOF_HDA_CAPTURE_STREAMS) {
+ dev_err(sdev->dev, "error: too many capture streams %d\n",
+ num_playback);
+ return -EINVAL;
+ }
+
+ /*
+ * mem alloc for the position buffer
+ * TODO: check position buffer update
+ */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ SOF_HDA_DPIB_ENTRY_SIZE * num_total,
+ &bus->posbuf);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: posbuffer dma alloc failed\n");
+ return -ENOMEM;
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* mem alloc for the CORB/RIRB ringbuffers */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ PAGE_SIZE, &bus->rb);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: RB alloc failed\n");
+ return -ENOMEM;
+ }
+#endif
+
+ /* create capture streams */
+ for (i = 0; i < num_capture; i++) {
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = devm_kzalloc(sdev->dev, sizeof(*hda_stream),
+ GFP_KERNEL);
+ if (!hda_stream)
+ return -ENOMEM;
+
+ hda_stream->sdev = sdev;
+
+ stream = &hda_stream->hda_stream;
+
+ stream->pphc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPHC_BASE + SOF_HDA_PPHC_INTERVAL * i;
+
+ stream->pplc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPLC_BASE + SOF_HDA_PPLC_MULTI * num_total +
+ SOF_HDA_PPLC_INTERVAL * i;
+
+ /* do we support SPIB */
+ if (sdev->bar[HDA_DSP_SPIB_BAR]) {
+ stream->spib_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_SPIB;
+
+ stream->fifo_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_MAXFIFO;
+ }
+
+ hstream = &stream->hstream;
+ hstream->bus = bus;
+ hstream->sd_int_sta_mask = 1 << i;
+ hstream->index = i;
+ sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ hstream->sd_addr = sdev->bar[HDA_DSP_HDA_BAR] + sd_offset;
+ hstream->stream_tag = i + 1;
+ hstream->opened = false;
+ hstream->running = false;
+ hstream->direction = SNDRV_PCM_STREAM_CAPTURE;
+
+ /* memory alloc for stream BDL */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ HDA_DSP_BDL_SIZE, &hstream->bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: stream bdl dma alloc failed\n");
+ return -ENOMEM;
+ }
+ hstream->posbuf = (__le32 *)(bus->posbuf.area +
+ (hstream->index) * 8);
+
+ list_add_tail(&hstream->list, &bus->stream_list);
+ }
+
+ /* create playback streams */
+ for (i = num_capture; i < num_total; i++) {
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = devm_kzalloc(sdev->dev, sizeof(*hda_stream),
+ GFP_KERNEL);
+ if (!hda_stream)
+ return -ENOMEM;
+
+ hda_stream->sdev = sdev;
+
+ stream = &hda_stream->hda_stream;
+
+ /* we always have DSP support */
+ stream->pphc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPHC_BASE + SOF_HDA_PPHC_INTERVAL * i;
+
+ stream->pplc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPLC_BASE + SOF_HDA_PPLC_MULTI * num_total +
+ SOF_HDA_PPLC_INTERVAL * i;
+
+ /* do we support SPIB */
+ if (sdev->bar[HDA_DSP_SPIB_BAR]) {
+ stream->spib_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_SPIB;
+
+ stream->fifo_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_MAXFIFO;
+ }
+
+ hstream = &stream->hstream;
+ hstream->bus = bus;
+ hstream->sd_int_sta_mask = 1 << i;
+ hstream->index = i;
+ sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ hstream->sd_addr = sdev->bar[HDA_DSP_HDA_BAR] + sd_offset;
+ hstream->stream_tag = i - num_capture + 1;
+ hstream->opened = false;
+ hstream->running = false;
+ hstream->direction = SNDRV_PCM_STREAM_PLAYBACK;
+
+ /* mem alloc for stream BDL */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ HDA_DSP_BDL_SIZE, &hstream->bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: stream bdl dma alloc failed\n");
+ return -ENOMEM;
+ }
+
+ hstream->posbuf = (__le32 *)(bus->posbuf.area +
+ (hstream->index) * 8);
+
+ list_add_tail(&hstream->list, &bus->stream_list);
+ }
+
+ /* store total stream count (playback + capture) from GCAP */
+ sof_hda->stream_max = num_total;
+
+ return 0;
+}
+
+void hda_dsp_stream_free(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *s, *_s;
+ struct hdac_ext_stream *stream;
+ struct sof_intel_hda_stream *hda_stream;
+
+ /* free position buffer */
+ if (bus->posbuf.area)
+ snd_dma_free_pages(&bus->posbuf);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* free position buffer */
+ if (bus->rb.area)
+ snd_dma_free_pages(&bus->rb);
+#endif
+
+ list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
+ /* TODO: decouple */
+
+ /* free bdl buffer */
+ if (s->bdl.area)
+ snd_dma_free_pages(&s->bdl);
+ list_del(&s->list);
+ stream = stream_to_hdac_ext_stream(s);
+ hda_stream = container_of(stream, struct sof_intel_hda_stream,
+ hda_stream);
+ devm_kfree(sdev->dev, hda_stream);
+ }
+}
diff --git a/sound/soc/sof/intel/hda-trace.c b/sound/soc/sof/intel/hda-trace.c
new file mode 100644
index 000000000..1eb746d5a
--- /dev/null
+++ b/sound/soc/sof/intel/hda-trace.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/hdaudio_ext.h>
+#include "../ops.h"
+#include "hda.h"
+
+static int hda_dsp_trace_prepare(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_ext_stream *stream = hda->dtrace_stream;
+ struct hdac_stream *hstream = &stream->hstream;
+ struct snd_dma_buffer *dmab = &sdev->dmatb;
+ int ret;
+
+ hstream->period_bytes = 0;/* initialize period_bytes */
+ hstream->bufsize = sdev->dmatb.bytes;
+
+ ret = hda_dsp_stream_hw_params(sdev, stream, dmab, NULL);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: hdac prepare failed: %x\n", ret);
+
+ return ret;
+}
+
+int hda_dsp_trace_init(struct snd_sof_dev *sdev, u32 *stream_tag)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ int ret;
+
+ hda->dtrace_stream = hda_dsp_stream_get(sdev,
+ SNDRV_PCM_STREAM_CAPTURE);
+
+ if (!hda->dtrace_stream) {
+ dev_err(sdev->dev,
+ "error: no available capture stream for DMA trace\n");
+ return -ENODEV;
+ }
+
+ *stream_tag = hda->dtrace_stream->hstream.stream_tag;
+
+ /*
+ * initialize capture stream, set BDL address and return corresponding
+ * stream tag which will be sent to the firmware by IPC message.
+ */
+ ret = hda_dsp_trace_prepare(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac trace init failed: %x\n", ret);
+ hda_dsp_stream_put(sdev, SNDRV_PCM_STREAM_CAPTURE, *stream_tag);
+ hda->dtrace_stream = NULL;
+ *stream_tag = 0;
+ }
+
+ return ret;
+}
+
+int hda_dsp_trace_release(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_stream *hstream;
+
+ if (hda->dtrace_stream) {
+ hstream = &hda->dtrace_stream->hstream;
+ hda_dsp_stream_put(sdev,
+ SNDRV_PCM_STREAM_CAPTURE,
+ hstream->stream_tag);
+ hda->dtrace_stream = NULL;
+ return 0;
+ }
+
+ dev_dbg(sdev->dev, "DMA trace stream is not opened!\n");
+ return -ENODEV;
+}
+
+int hda_dsp_trace_trigger(struct snd_sof_dev *sdev, int cmd)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+
+ return hda_dsp_stream_trigger(sdev, hda->dtrace_stream, cmd);
+}
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
new file mode 100644
index 000000000..b4cc72483
--- /dev/null
+++ b/sound/soc/sof/intel/hda.c
@@ -0,0 +1,1247 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
+#include <sound/intel-nhlt.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include "../sof-audio.h"
+#include "../ops.h"
+#include "hda.h"
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+#include <sound/soc-acpi-intel-match.h>
+#endif
+
+/* platform specific devices */
+#include "shim.h"
+
+#define EXCEPT_MAX_HDR_SIZE 0x400
+#define HDA_EXT_ROM_STATUS_SIZE 8
+
+static const struct sof_intel_dsp_desc
+ *get_chip_info(struct snd_sof_pdata *pdata)
+{
+ const struct sof_dev_desc *desc = pdata->desc;
+ const struct sof_intel_dsp_desc *chip_info;
+
+ chip_info = desc->chip_info;
+
+ return chip_info;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+
+/*
+ * The default for SoundWire clock stop quirks is to power gate the IP
+ * and do a Bus Reset, this will need to be modified when the DSP
+ * needs to remain in D0i3 so that the Master does not lose context
+ * and enumeration is not required on clock restart
+ */
+static int sdw_clock_stop_quirks = SDW_INTEL_CLK_STOP_BUS_RESET;
+module_param(sdw_clock_stop_quirks, int, 0444);
+MODULE_PARM_DESC(sdw_clock_stop_quirks, "SOF SoundWire clock stop quirks");
+
+static int sdw_params_stream(struct device *dev,
+ struct sdw_intel_stream_params_data *params_data)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_soc_dai *d = params_data->dai;
+ struct sof_ipc_dai_config config;
+ struct sof_ipc_reply reply;
+ int link_id = params_data->link_id;
+ int alh_stream_id = params_data->alh_stream_id;
+ int ret;
+ u32 size = sizeof(config);
+
+ memset(&config, 0, size);
+ config.hdr.size = size;
+ config.hdr.cmd = SOF_IPC_GLB_DAI_MSG | SOF_IPC_DAI_CONFIG;
+ config.type = SOF_DAI_INTEL_ALH;
+ config.dai_index = (link_id << 8) | (d->id);
+ config.alh.stream_id = alh_stream_id;
+
+ /* send message to DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ config.hdr.cmd, &config, size, &reply,
+ sizeof(reply));
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to set DAI hw_params for link %d dai->id %d ALH %d\n",
+ link_id, d->id, alh_stream_id);
+ }
+
+ return ret;
+}
+
+static int sdw_free_stream(struct device *dev,
+ struct sdw_intel_stream_free_data *free_data)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_soc_dai *d = free_data->dai;
+ struct sof_ipc_dai_config config;
+ struct sof_ipc_reply reply;
+ int link_id = free_data->link_id;
+ int ret;
+ u32 size = sizeof(config);
+
+ memset(&config, 0, size);
+ config.hdr.size = size;
+ config.hdr.cmd = SOF_IPC_GLB_DAI_MSG | SOF_IPC_DAI_CONFIG;
+ config.type = SOF_DAI_INTEL_ALH;
+ config.dai_index = (link_id << 8) | d->id;
+ config.alh.stream_id = 0xFFFF; /* invalid value on purpose */
+
+ /* send message to DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ config.hdr.cmd, &config, size, &reply,
+ sizeof(reply));
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to free stream for link %d dai->id %d\n",
+ link_id, d->id);
+ }
+
+ return ret;
+}
+
+static const struct sdw_intel_ops sdw_callback = {
+ .params_stream = sdw_params_stream,
+ .free_stream = sdw_free_stream,
+};
+
+void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ sdw_intel_enable_irq(sdev->bar[HDA_DSP_BAR], enable);
+}
+
+static int hda_sdw_acpi_scan(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ acpi_handle handle;
+ int ret;
+
+ handle = ACPI_HANDLE(sdev->dev);
+
+ /* save ACPI info for the probe step */
+ hdev = sdev->pdata->hw_pdata;
+
+ ret = sdw_intel_acpi_scan(handle, &hdev->info);
+ if (ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hda_sdw_probe(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ struct sdw_intel_res res;
+ void *sdw;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ memset(&res, 0, sizeof(res));
+
+ res.mmio_base = sdev->bar[HDA_DSP_BAR];
+ res.irq = sdev->ipc_irq;
+ res.handle = hdev->info.handle;
+ res.parent = sdev->dev;
+ res.ops = &sdw_callback;
+ res.dev = sdev->dev;
+ res.clock_stop_quirks = sdw_clock_stop_quirks;
+
+ /*
+ * ops and arg fields are not populated for now,
+ * they will be needed when the DAI callbacks are
+ * provided
+ */
+
+ /* we could filter links here if needed, e.g for quirks */
+ res.count = hdev->info.count;
+ res.link_mask = hdev->info.link_mask;
+
+ sdw = sdw_intel_probe(&res);
+ if (!sdw) {
+ dev_err(sdev->dev, "error: SoundWire probe failed\n");
+ return -EINVAL;
+ }
+
+ /* save context */
+ hdev->sdw = sdw;
+
+ return 0;
+}
+
+int hda_sdw_startup(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ if (!hdev->sdw)
+ return 0;
+
+ return sdw_intel_startup(hdev->sdw);
+}
+
+static int hda_sdw_exit(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ hda_sdw_int_enable(sdev, false);
+
+ if (hdev->sdw)
+ sdw_intel_exit(hdev->sdw);
+ hdev->sdw = NULL;
+
+ return 0;
+}
+
+static bool hda_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ bool ret = false;
+ u32 irq_status;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ if (!hdev->sdw)
+ return ret;
+
+ /* store status */
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS2);
+
+ /* invalid message ? */
+ if (irq_status == 0xffffffff)
+ goto out;
+
+ /* SDW message ? */
+ if (irq_status & HDA_DSP_REG_ADSPIS2_SNDW)
+ ret = true;
+
+out:
+ return ret;
+}
+
+static irqreturn_t hda_dsp_sdw_thread(int irq, void *context)
+{
+ return sdw_intel_thread(irq, context);
+}
+
+static bool hda_sdw_check_wakeen_irq(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+ if (hdev->sdw &&
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_SNDW_WAKE_STS))
+ return true;
+
+ return false;
+}
+
+void hda_sdw_process_wakeen(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+ if (!hdev->sdw)
+ return;
+
+ sdw_intel_process_wakeen_event(hdev->sdw);
+}
+
+#endif
+
+/*
+ * Debug
+ */
+
+struct hda_dsp_msg_code {
+ u32 code;
+ const char *msg;
+};
+
+static bool hda_use_msi = IS_ENABLED(CONFIG_PCI);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
+module_param_named(use_msi, hda_use_msi, bool, 0444);
+MODULE_PARM_DESC(use_msi, "SOF HDA use PCI MSI mode");
+#endif
+
+static char *hda_model;
+module_param(hda_model, charp, 0444);
+MODULE_PARM_DESC(hda_model, "Use the given HDA board model.");
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+static int hda_dmic_num = -1;
+module_param_named(dmic_num, hda_dmic_num, int, 0444);
+MODULE_PARM_DESC(dmic_num, "SOF HDA DMIC number");
+
+static bool hda_codec_use_common_hdmi = IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI);
+module_param_named(use_common_hdmi, hda_codec_use_common_hdmi, bool, 0444);
+MODULE_PARM_DESC(use_common_hdmi, "SOF HDA use common HDMI codec driver");
+#endif
+
+static const struct hda_dsp_msg_code hda_dsp_rom_msg[] = {
+ {HDA_DSP_ROM_FW_MANIFEST_LOADED, "status: manifest loaded"},
+ {HDA_DSP_ROM_FW_FW_LOADED, "status: fw loaded"},
+ {HDA_DSP_ROM_FW_ENTERED, "status: fw entered"},
+ {HDA_DSP_ROM_CSE_ERROR, "error: cse error"},
+ {HDA_DSP_ROM_CSE_WRONG_RESPONSE, "error: cse wrong response"},
+ {HDA_DSP_ROM_IMR_TO_SMALL, "error: IMR too small"},
+ {HDA_DSP_ROM_BASE_FW_NOT_FOUND, "error: base fw not found"},
+ {HDA_DSP_ROM_CSE_VALIDATION_FAILED, "error: signature verification failed"},
+ {HDA_DSP_ROM_IPC_FATAL_ERROR, "error: ipc fatal error"},
+ {HDA_DSP_ROM_L2_CACHE_ERROR, "error: L2 cache error"},
+ {HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL, "error: load offset too small"},
+ {HDA_DSP_ROM_API_PTR_INVALID, "error: API ptr invalid"},
+ {HDA_DSP_ROM_BASEFW_INCOMPAT, "error: base fw incompatible"},
+ {HDA_DSP_ROM_UNHANDLED_INTERRUPT, "error: unhandled interrupt"},
+ {HDA_DSP_ROM_MEMORY_HOLE_ECC, "error: ECC memory hole"},
+ {HDA_DSP_ROM_KERNEL_EXCEPTION, "error: kernel exception"},
+ {HDA_DSP_ROM_USER_EXCEPTION, "error: user exception"},
+ {HDA_DSP_ROM_UNEXPECTED_RESET, "error: unexpected reset"},
+ {HDA_DSP_ROM_NULL_FW_ENTRY, "error: null FW entry point"},
+};
+
+static void hda_dsp_get_status_skl(struct snd_sof_dev *sdev)
+{
+ u32 status;
+ int i;
+
+ status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_ADSP_FW_STATUS_SKL);
+
+ for (i = 0; i < ARRAY_SIZE(hda_dsp_rom_msg); i++) {
+ if (status == hda_dsp_rom_msg[i].code) {
+ dev_err(sdev->dev, "%s - code %8.8x\n",
+ hda_dsp_rom_msg[i].msg, status);
+ return;
+ }
+ }
+
+ /* not for us, must be generic sof message */
+ dev_dbg(sdev->dev, "unknown ROM status value %8.8x\n", status);
+}
+
+static void hda_dsp_get_status(struct snd_sof_dev *sdev)
+{
+ u32 status;
+ int i;
+
+ status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_SRAM_REG_ROM_STATUS);
+
+ for (i = 0; i < ARRAY_SIZE(hda_dsp_rom_msg); i++) {
+ if (status == hda_dsp_rom_msg[i].code) {
+ dev_err(sdev->dev, "%s - code %8.8x\n",
+ hda_dsp_rom_msg[i].msg, status);
+ return;
+ }
+ }
+
+ /* not for us, must be generic sof message */
+ dev_dbg(sdev->dev, "unknown ROM status value %8.8x\n", status);
+}
+
+static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read registers */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_block_read(sdev, sdev->mmio_bar, offset,
+ panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_block_read(sdev, sdev->mmio_bar, offset, stack,
+ stack_words * sizeof(u32));
+}
+
+void hda_dsp_dump_skl(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[HDA_DSP_STACK_DUMP_SIZE];
+ u32 status, panic;
+
+ /* try APL specific status message types first */
+ hda_dsp_get_status_skl(sdev);
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_ADSP_ERROR_CODE_SKL);
+
+ /*TODO: Check: there is no define in spec, but it is used in the code*/
+ panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_ADSP_ERROR_CODE_SKL + 0x4);
+
+ if (sdev->fw_state == SOF_FW_BOOT_COMPLETE) {
+ hda_dsp_get_registers(sdev, &xoops, &panic_info, stack,
+ HDA_DSP_STACK_DUMP_SIZE);
+ snd_sof_get_status(sdev, status, panic, &xoops, &panic_info,
+ stack, HDA_DSP_STACK_DUMP_SIZE);
+ } else {
+ dev_err(sdev->dev, "error: status = 0x%8.8x panic = 0x%8.8x\n",
+ status, panic);
+ hda_dsp_get_status_skl(sdev);
+ }
+}
+
+/* dump the first 8 dwords representing the extended ROM status */
+static void hda_dsp_dump_ext_rom_status(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ char msg[128];
+ int len = 0;
+ u32 value;
+ int i;
+
+ for (i = 0; i < HDA_EXT_ROM_STATUS_SIZE; i++) {
+ value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_ROM_STATUS + i * 0x4);
+ len += snprintf(msg + len, sizeof(msg) - len, " 0x%x", value);
+ }
+
+ sof_dev_dbg_or_err(sdev->dev, hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS,
+ "extended rom status: %s", msg);
+
+}
+
+void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[HDA_DSP_STACK_DUMP_SIZE];
+ u32 status, panic;
+
+ /* try APL specific status message types first */
+ hda_dsp_get_status(sdev);
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_SRAM_REG_FW_STATUS);
+ panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP);
+
+ if (sdev->fw_state == SOF_FW_BOOT_COMPLETE) {
+ hda_dsp_get_registers(sdev, &xoops, &panic_info, stack,
+ HDA_DSP_STACK_DUMP_SIZE);
+ snd_sof_get_status(sdev, status, panic, &xoops, &panic_info,
+ stack, HDA_DSP_STACK_DUMP_SIZE);
+ } else {
+ sof_dev_dbg_or_err(sdev->dev, hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS,
+ "status = 0x%8.8x panic = 0x%8.8x\n",
+ status, panic);
+
+ hda_dsp_dump_ext_rom_status(sdev);
+ hda_dsp_get_status(sdev);
+ }
+}
+
+void hda_ipc_irq_dump(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ u32 adspis;
+ u32 intsts;
+ u32 intctl;
+ u32 ppsts;
+ u8 rirbsts;
+
+ /* read key IRQ stats and config registers */
+ adspis = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS);
+ intsts = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS);
+ intctl = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL);
+ ppsts = snd_sof_dsp_read(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPSTS);
+ rirbsts = snd_hdac_chip_readb(bus, RIRBSTS);
+
+ dev_err(sdev->dev,
+ "error: hda irq intsts 0x%8.8x intlctl 0x%8.8x rirb %2.2x\n",
+ intsts, intctl, rirbsts);
+ dev_err(sdev->dev,
+ "error: dsp irq ppsts 0x%8.8x adspis 0x%8.8x\n",
+ ppsts, adspis);
+}
+
+void hda_ipc_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcie;
+ u32 hipct;
+ u32 hipcctl;
+
+ hda_ipc_irq_dump(sdev);
+
+ /* read IPC status */
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
+ hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev,
+ "error: host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
+ hipcie, hipct, hipcctl);
+}
+
+static int hda_init(struct snd_sof_dev *sdev)
+{
+ struct hda_bus *hbus;
+ struct hdac_bus *bus;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ int ret;
+
+ hbus = sof_to_hbus(sdev);
+ bus = sof_to_bus(sdev);
+
+ /* HDA bus init */
+ sof_hda_bus_init(bus, &pci->dev);
+
+ bus->use_posbuf = 1;
+ bus->bdl_pos_adj = 0;
+ bus->sync_write = 1;
+
+ mutex_init(&hbus->prepare_mutex);
+ hbus->pci = pci;
+ hbus->mixer_assigned = -1;
+ hbus->modelname = hda_model;
+
+ /* initialise hdac bus */
+ bus->addr = pci_resource_start(pci, 0);
+#if IS_ENABLED(CONFIG_PCI)
+ bus->remap_addr = pci_ioremap_bar(pci, 0);
+#endif
+ if (!bus->remap_addr) {
+ dev_err(bus->dev, "error: ioremap error\n");
+ return -ENXIO;
+ }
+
+ /* HDA base */
+ sdev->bar[HDA_DSP_HDA_BAR] = bus->remap_addr;
+
+ /* init i915 and HDMI codecs */
+ ret = hda_codec_i915_init(sdev);
+ if (ret < 0)
+ dev_warn(sdev->dev, "init of i915 and HDMI codec failed\n");
+
+ /* get controller capabilities */
+ ret = hda_dsp_ctrl_get_caps(sdev);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: get caps error\n");
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+
+static int check_nhlt_dmic(struct snd_sof_dev *sdev)
+{
+ struct nhlt_acpi_table *nhlt;
+ int dmic_num;
+
+ nhlt = intel_nhlt_init(sdev->dev);
+ if (nhlt) {
+ dmic_num = intel_nhlt_get_dmic_geo(sdev->dev, nhlt);
+ intel_nhlt_free(nhlt);
+ if (dmic_num >= 1 && dmic_num <= 4)
+ return dmic_num;
+ }
+
+ return 0;
+}
+
+static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
+ const char *sof_tplg_filename,
+ const char *idisp_str,
+ const char *dmic_str)
+{
+ const char *tplg_filename = NULL;
+ char *filename;
+ char *split_ext;
+
+ filename = devm_kstrdup(sdev->dev, sof_tplg_filename, GFP_KERNEL);
+ if (!filename)
+ return NULL;
+
+ /* this assumes a .tplg extension */
+ split_ext = strsep(&filename, ".");
+ if (split_ext) {
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s%s%s.tplg",
+ split_ext, idisp_str, dmic_str);
+ if (!tplg_filename)
+ return NULL;
+ }
+ return tplg_filename;
+}
+
+#endif
+
+static int hda_init_caps(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct snd_sof_pdata *pdata = sdev->pdata;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_ext_link *hlink;
+#endif
+ struct sof_intel_hda_dev *hdev = pdata->hw_pdata;
+ u32 link_mask;
+ int ret = 0;
+
+ device_disable_async_suspend(bus->dev);
+
+ /* check if dsp is there */
+ if (bus->ppcap)
+ dev_dbg(sdev->dev, "PP capability, will probe DSP later.\n");
+
+ /* Init HDA controller after i915 init */
+ ret = hda_dsp_ctrl_init_chip(sdev, true);
+ if (ret < 0) {
+ dev_err(bus->dev, "error: init chip failed with ret: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* scan SoundWire capabilities exposed by DSDT */
+ ret = hda_sdw_acpi_scan(sdev);
+ if (ret < 0) {
+ dev_dbg(sdev->dev, "skipping SoundWire, not detected with ACPI scan\n");
+ goto skip_soundwire;
+ }
+
+ link_mask = hdev->info.link_mask;
+ if (!link_mask) {
+ dev_dbg(sdev->dev, "skipping SoundWire, no links enabled\n");
+ goto skip_soundwire;
+ }
+
+ /*
+ * probe/allocate SoundWire resources.
+ * The hardware configuration takes place in hda_sdw_startup
+ * after power rails are enabled.
+ * It's entirely possible to have a mix of I2S/DMIC/SoundWire
+ * devices, so we allocate the resources in all cases.
+ */
+ ret = hda_sdw_probe(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: SoundWire probe error\n");
+ return ret;
+ }
+
+skip_soundwire:
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ if (bus->mlcap)
+ snd_hdac_ext_bus_get_ml_capabilities(bus);
+
+ /* create codec instances */
+ hda_codec_probe_bus(sdev, hda_codec_use_common_hdmi);
+
+ if (!HDA_IDISP_CODEC(bus->codec_mask))
+ hda_codec_i915_display_power(sdev, false);
+
+ /*
+ * we are done probing so decrement link counts
+ */
+ list_for_each_entry(hlink, &bus->hlink_list, list)
+ snd_hdac_ext_bus_link_put(bus, hlink);
+#endif
+ return 0;
+}
+
+static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+
+ /*
+ * Get global interrupt status. It includes all hardware interrupt
+ * sources in the Intel HD Audio controller.
+ */
+ if (snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS) &
+ SOF_HDA_INTSTS_GIS) {
+
+ /* disable GIE interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_INTCTL,
+ SOF_HDA_INT_GLOBAL_EN,
+ 0);
+
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ /* deal with streams and controller first */
+ if (hda_dsp_check_stream_irq(sdev))
+ hda_dsp_stream_threaded_handler(irq, sdev);
+
+ if (hda_dsp_check_ipc_irq(sdev))
+ sof_ops(sdev)->irq_thread(irq, sdev);
+
+ if (hda_dsp_check_sdw_irq(sdev))
+ hda_dsp_sdw_thread(irq, hdev->sdw);
+
+ if (hda_sdw_check_wakeen_irq(sdev))
+ hda_sdw_process_wakeen(sdev);
+
+ /* enable GIE interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ SOF_HDA_INTCTL,
+ SOF_HDA_INT_GLOBAL_EN,
+ SOF_HDA_INT_GLOBAL_EN);
+
+ return IRQ_HANDLED;
+}
+
+int hda_dsp_probe(struct snd_sof_dev *sdev)
+{
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ struct sof_intel_hda_dev *hdev;
+ struct hdac_bus *bus;
+ const struct sof_intel_dsp_desc *chip;
+ int ret = 0;
+
+ /*
+ * detect DSP by checking class/subclass/prog-id information
+ * class=04 subclass 03 prog-if 00: no DSP, legacy driver is required
+ * class=04 subclass 01 prog-if 00: DSP is present
+ * (and may be required e.g. for DMIC or SSP support)
+ * class=04 subclass 03 prog-if 80: either of DSP or legacy mode works
+ */
+ if (pci->class == 0x040300) {
+ dev_err(sdev->dev, "error: the DSP is not enabled on this platform, aborting probe\n");
+ return -ENODEV;
+ } else if (pci->class != 0x040100 && pci->class != 0x040380) {
+ dev_err(sdev->dev, "error: unknown PCI class/subclass/prog-if 0x%06x found, aborting probe\n", pci->class);
+ return -ENODEV;
+ }
+ dev_info(sdev->dev, "DSP detected with PCI class/subclass/prog-if 0x%06x\n", pci->class);
+
+ chip = get_chip_info(sdev->pdata);
+ if (!chip) {
+ dev_err(sdev->dev, "error: no such device supported, chip id:%x\n",
+ pci->device);
+ ret = -EIO;
+ goto err;
+ }
+
+ hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+ sdev->pdata->hw_pdata = hdev;
+ hdev->desc = chip;
+
+ hdev->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec",
+ PLATFORM_DEVID_NONE,
+ NULL, 0);
+ if (IS_ERR(hdev->dmic_dev)) {
+ dev_err(sdev->dev, "error: failed to create DMIC device\n");
+ return PTR_ERR(hdev->dmic_dev);
+ }
+
+ /*
+ * use position update IPC if either it is forced
+ * or we don't have other choice
+ */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_FORCE_IPC_POSITION)
+ hdev->no_ipc_position = 0;
+#else
+ hdev->no_ipc_position = sof_ops(sdev)->pcm_pointer ? 1 : 0;
+#endif
+
+ /* set up HDA base */
+ bus = sof_to_bus(sdev);
+ ret = hda_init(sdev);
+ if (ret < 0)
+ goto hdac_bus_unmap;
+
+ /* DSP base */
+#if IS_ENABLED(CONFIG_PCI)
+ sdev->bar[HDA_DSP_BAR] = pci_ioremap_bar(pci, HDA_DSP_BAR);
+#endif
+ if (!sdev->bar[HDA_DSP_BAR]) {
+ dev_err(sdev->dev, "error: ioremap error\n");
+ ret = -ENXIO;
+ goto hdac_bus_unmap;
+ }
+
+ sdev->mmio_bar = HDA_DSP_BAR;
+ sdev->mailbox_bar = HDA_DSP_BAR;
+
+ /* allow 64bit DMA address if supported by H/W */
+ if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(64))) {
+ dev_dbg(sdev->dev, "DMA mask is 64 bit\n");
+ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(64));
+ } else {
+ dev_dbg(sdev->dev, "DMA mask is 32 bit\n");
+ dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
+ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32));
+ }
+
+ /* init streams */
+ ret = hda_dsp_stream_init(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to init streams\n");
+ /*
+ * not all errors are due to memory issues, but trying
+ * to free everything does not harm
+ */
+ goto free_streams;
+ }
+
+ /*
+ * register our IRQ
+ * let's try to enable msi firstly
+ * if it fails, use legacy interrupt mode
+ * TODO: support msi multiple vectors
+ */
+ if (hda_use_msi && pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI) > 0) {
+ dev_info(sdev->dev, "use msi interrupt mode\n");
+ sdev->ipc_irq = pci_irq_vector(pci, 0);
+ /* initialised to "false" by kzalloc() */
+ sdev->msi_enabled = true;
+ }
+
+ if (!sdev->msi_enabled) {
+ dev_info(sdev->dev, "use legacy interrupt mode\n");
+ /*
+ * in IO-APIC mode, hda->irq and ipc_irq are using the same
+ * irq number of pci->irq
+ */
+ sdev->ipc_irq = pci->irq;
+ }
+
+ dev_dbg(sdev->dev, "using IPC IRQ %d\n", sdev->ipc_irq);
+ ret = request_threaded_irq(sdev->ipc_irq, hda_dsp_interrupt_handler,
+ hda_dsp_interrupt_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IPC IRQ %d\n",
+ sdev->ipc_irq);
+ goto free_irq_vector;
+ }
+
+ pci_set_master(pci);
+ synchronize_irq(pci->irq);
+
+ /*
+ * clear TCSEL to clear playback on some HD Audio
+ * codecs. PCI TCSEL is defined in the Intel manuals.
+ */
+ snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
+
+ /* init HDA capabilities */
+ ret = hda_init_caps(sdev);
+ if (ret < 0)
+ goto free_ipc_irq;
+
+ /* enable ppcap interrupt */
+ hda_dsp_ctrl_ppcap_enable(sdev, true);
+ hda_dsp_ctrl_ppcap_int_enable(sdev, true);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = HDA_DSP_MBOX_UPLINK_OFFSET;
+
+ INIT_DELAYED_WORK(&hdev->d0i3_work, hda_dsp_d0i3_work);
+
+ return 0;
+
+free_ipc_irq:
+ free_irq(sdev->ipc_irq, sdev);
+free_irq_vector:
+ if (sdev->msi_enabled)
+ pci_free_irq_vectors(pci);
+free_streams:
+ hda_dsp_stream_free(sdev);
+/* dsp_unmap: not currently used */
+ iounmap(sdev->bar[HDA_DSP_BAR]);
+hdac_bus_unmap:
+ platform_device_unregister(hdev->dmic_dev);
+ iounmap(bus->remap_addr);
+ hda_codec_i915_exit(sdev);
+err:
+ return ret;
+}
+
+int hda_dsp_remove(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* cancel any attempt for DSP D0I3 */
+ cancel_delayed_work_sync(&hda->d0i3_work);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* codec removal, invoke bus_device_remove */
+ snd_hdac_ext_bus_device_remove(bus);
+#endif
+
+ hda_sdw_exit(sdev);
+
+ if (!IS_ERR_OR_NULL(hda->dmic_dev))
+ platform_device_unregister(hda->dmic_dev);
+
+ /* disable DSP IRQ */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_PIE, 0);
+
+ /* disable CIE and GIE interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN, 0);
+
+ /* disable cores */
+ if (chip)
+ hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
+
+ /* disable DSP */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, 0);
+
+ free_irq(sdev->ipc_irq, sdev);
+ if (sdev->msi_enabled)
+ pci_free_irq_vectors(pci);
+
+ hda_dsp_stream_free(sdev);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_link_free_all(bus);
+#endif
+
+ iounmap(sdev->bar[HDA_DSP_BAR]);
+ iounmap(bus->remap_addr);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_ext_bus_exit(bus);
+#endif
+ hda_codec_i915_exit(sdev);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+static int hda_generic_machine_select(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct snd_soc_acpi_mach_params *mach_params;
+ struct snd_soc_acpi_mach *hda_mach;
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const char *tplg_filename;
+ const char *idisp_str;
+ const char *dmic_str;
+ int dmic_num = 0;
+ int codec_num = 0;
+ int i;
+
+ /* codec detection */
+ if (!bus->codec_mask) {
+ dev_info(bus->dev, "no hda codecs found!\n");
+ } else {
+ dev_info(bus->dev, "hda codecs found, mask %lx\n",
+ bus->codec_mask);
+
+ for (i = 0; i < HDA_MAX_CODECS; i++) {
+ if (bus->codec_mask & (1 << i))
+ codec_num++;
+ }
+
+ /*
+ * If no machine driver is found, then:
+ *
+ * generic hda machine driver can handle:
+ * - one HDMI codec, and/or
+ * - one external HDAudio codec
+ */
+ if (!pdata->machine && codec_num <= 2) {
+ hda_mach = snd_soc_acpi_intel_hda_machines;
+
+ /* topology: use the info from hda_machines */
+ pdata->tplg_filename =
+ hda_mach->sof_tplg_filename;
+
+ dev_info(bus->dev, "using HDA machine driver %s now\n",
+ hda_mach->drv_name);
+
+ if (codec_num == 1 && HDA_IDISP_CODEC(bus->codec_mask))
+ idisp_str = "-idisp";
+ else
+ idisp_str = "";
+
+ /* first check NHLT for DMICs */
+ dmic_num = check_nhlt_dmic(sdev);
+
+ /* allow for module parameter override */
+ if (hda_dmic_num != -1)
+ dmic_num = hda_dmic_num;
+
+ switch (dmic_num) {
+ case 1:
+ dmic_str = "-1ch";
+ break;
+ case 2:
+ dmic_str = "-2ch";
+ break;
+ case 3:
+ dmic_str = "-3ch";
+ break;
+ case 4:
+ dmic_str = "-4ch";
+ break;
+ default:
+ dmic_num = 0;
+ dmic_str = "";
+ break;
+ }
+
+ tplg_filename = pdata->tplg_filename;
+ tplg_filename = fixup_tplg_name(sdev, tplg_filename,
+ idisp_str, dmic_str);
+ if (!tplg_filename)
+ return -EINVAL;
+
+ dev_info(bus->dev,
+ "DMICs detected in NHLT tables: %d\n",
+ dmic_num);
+
+ pdata->machine = hda_mach;
+ pdata->tplg_filename = tplg_filename;
+ }
+ }
+
+ /* used by hda machine driver to create dai links */
+ if (pdata->machine) {
+ mach_params = (struct snd_soc_acpi_mach_params *)
+ &pdata->machine->mach_params;
+ mach_params->codec_mask = bus->codec_mask;
+ mach_params->common_hdmi_codec_drv = hda_codec_use_common_hdmi;
+ mach_params->dmic_num = dmic_num;
+ }
+
+ return 0;
+}
+#else
+static int hda_generic_machine_select(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+/* Check if all Slaves defined on the link can be found */
+static bool link_slaves_found(struct snd_sof_dev *sdev,
+ const struct snd_soc_acpi_link_adr *link,
+ struct sdw_intel_ctx *sdw)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sdw_intel_slave_id *ids = sdw->ids;
+ int num_slaves = sdw->num_slaves;
+ unsigned int part_id, link_id, unique_id, mfg_id;
+ int i, j;
+
+ for (i = 0; i < link->num_adr; i++) {
+ u64 adr = link->adr_d[i].adr;
+
+ mfg_id = SDW_MFG_ID(adr);
+ part_id = SDW_PART_ID(adr);
+ link_id = SDW_DISCO_LINK_ID(adr);
+ for (j = 0; j < num_slaves; j++) {
+ if (ids[j].link_id != link_id ||
+ ids[j].id.part_id != part_id ||
+ ids[j].id.mfg_id != mfg_id)
+ continue;
+ /*
+ * we have to check unique id
+ * if there is more than one
+ * Slave on the link
+ */
+ unique_id = SDW_UNIQUE_ID(adr);
+ if (link->num_adr == 1 ||
+ ids[j].id.unique_id == SDW_IGNORED_UNIQUE_ID ||
+ ids[j].id.unique_id == unique_id) {
+ dev_dbg(bus->dev,
+ "found %x at link %d\n",
+ part_id, link_id);
+ break;
+ }
+ }
+ if (j == num_slaves) {
+ dev_dbg(bus->dev,
+ "Slave %x not found\n",
+ part_id);
+ return false;
+ }
+ }
+ return true;
+}
+
+static int hda_sdw_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct snd_soc_acpi_link_adr *link;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct snd_soc_acpi_mach *mach;
+ struct sof_intel_hda_dev *hdev;
+ u32 link_mask;
+ int i;
+
+ hdev = pdata->hw_pdata;
+ link_mask = hdev->info.link_mask;
+
+ /*
+ * Select SoundWire machine driver if needed using the
+ * alternate tables. This case deals with SoundWire-only
+ * machines, for mixed cases with I2C/I2S the detection relies
+ * on the HID list.
+ */
+ if (link_mask && !pdata->machine) {
+ for (mach = pdata->desc->alt_machines;
+ mach && mach->link_mask; mach++) {
+ /*
+ * On some platforms such as Up Extreme all links
+ * are enabled but only one link can be used by
+ * external codec. Instead of exact match of two masks,
+ * first check whether link_mask of mach is subset of
+ * link_mask supported by hw and then go on searching
+ * link_adr
+ */
+ if (~link_mask & mach->link_mask)
+ continue;
+
+ /* No need to match adr if there is no links defined */
+ if (!mach->links)
+ break;
+
+ link = mach->links;
+ for (i = 0; i < hdev->info.count && link->num_adr;
+ i++, link++) {
+ /*
+ * Try next machine if any expected Slaves
+ * are not found on this link.
+ */
+ if (!link_slaves_found(sdev, link, hdev->sdw))
+ break;
+ }
+ /* Found if all Slaves are checked */
+ if (i == hdev->info.count || !link->num_adr)
+ break;
+ }
+ if (mach && mach->link_mask) {
+ dev_dbg(bus->dev,
+ "SoundWire machine driver %s topology %s\n",
+ mach->drv_name,
+ mach->sof_tplg_filename);
+ pdata->machine = mach;
+ mach->mach_params.links = mach->links;
+ mach->mach_params.link_mask = mach->link_mask;
+ mach->mach_params.platform = dev_name(sdev->dev);
+ pdata->fw_filename = mach->sof_fw_filename;
+ pdata->tplg_filename = mach->sof_tplg_filename;
+ } else {
+ dev_info(sdev->dev,
+ "No SoundWire machine driver found\n");
+ }
+ }
+
+ return 0;
+}
+#else
+static int hda_sdw_machine_select(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+#endif
+
+void hda_set_mach_params(const struct snd_soc_acpi_mach *mach,
+ struct device *dev)
+{
+ struct snd_soc_acpi_mach_params *mach_params;
+
+ mach_params = (struct snd_soc_acpi_mach_params *)&mach->mach_params;
+ mach_params->platform = dev_name(dev);
+}
+
+void hda_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach;
+
+ mach = snd_soc_acpi_find_machine(desc->machines);
+ if (mach) {
+ /*
+ * If tplg file name is overridden, use it instead of
+ * the one set in mach table
+ */
+ if (!sof_pdata->tplg_filename)
+ sof_pdata->tplg_filename = mach->sof_tplg_filename;
+
+ sof_pdata->machine = mach;
+
+ if (mach->link_mask) {
+ mach->mach_params.links = mach->links;
+ mach->mach_params.link_mask = mach->link_mask;
+ }
+ }
+
+ /*
+ * If I2S fails, try SoundWire
+ */
+ hda_sdw_machine_select(sdev);
+
+ /*
+ * Choose HDA generic machine driver if mach is NULL.
+ * Otherwise, set certain mach params.
+ */
+ hda_generic_machine_select(sdev);
+
+ if (!sof_pdata->machine)
+ dev_warn(sdev->dev, "warning: No matching ASoC machine driver found\n");
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC);
+MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
+MODULE_IMPORT_NS(SOUNDWIRE_INTEL_INIT);
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
new file mode 100644
index 000000000..1bc4dabdd
--- /dev/null
+++ b/sound/soc/sof/intel/hda.h
@@ -0,0 +1,751 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_HDA_H
+#define __SOF_INTEL_HDA_H
+
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
+#include <sound/compress_driver.h>
+#include <sound/hda_codec.h>
+#include <sound/hdaudio_ext.h>
+#include "shim.h"
+
+/* PCI registers */
+#define PCI_TCSEL 0x44
+#define PCI_PGCTL PCI_TCSEL
+#define PCI_CGCTL 0x48
+
+/* PCI_PGCTL bits */
+#define PCI_PGCTL_ADSPPGD BIT(2)
+#define PCI_PGCTL_LSRMD_MASK BIT(4)
+
+/* PCI_CGCTL bits */
+#define PCI_CGCTL_MISCBDCGE_MASK BIT(6)
+#define PCI_CGCTL_ADSPDCGE BIT(1)
+
+/* Legacy HDA registers and bits used - widths are variable */
+#define SOF_HDA_GCAP 0x0
+#define SOF_HDA_GCTL 0x8
+/* accept unsol. response enable */
+#define SOF_HDA_GCTL_UNSOL BIT(8)
+#define SOF_HDA_LLCH 0x14
+#define SOF_HDA_INTCTL 0x20
+#define SOF_HDA_INTSTS 0x24
+#define SOF_HDA_WAKESTS 0x0E
+#define SOF_HDA_WAKESTS_INT_MASK ((1 << 8) - 1)
+#define SOF_HDA_RIRBSTS 0x5d
+
+/* SOF_HDA_GCTL register bist */
+#define SOF_HDA_GCTL_RESET BIT(0)
+
+/* SOF_HDA_INCTL regs */
+#define SOF_HDA_INT_GLOBAL_EN BIT(31)
+#define SOF_HDA_INT_CTRL_EN BIT(30)
+#define SOF_HDA_INT_ALL_STREAM 0xff
+
+/* SOF_HDA_INTSTS regs */
+#define SOF_HDA_INTSTS_GIS BIT(31)
+
+#define SOF_HDA_MAX_CAPS 10
+#define SOF_HDA_CAP_ID_OFF 16
+#define SOF_HDA_CAP_ID_MASK GENMASK(SOF_HDA_CAP_ID_OFF + 11,\
+ SOF_HDA_CAP_ID_OFF)
+#define SOF_HDA_CAP_NEXT_MASK 0xFFFF
+
+#define SOF_HDA_GTS_CAP_ID 0x1
+#define SOF_HDA_ML_CAP_ID 0x2
+
+#define SOF_HDA_PP_CAP_ID 0x3
+#define SOF_HDA_REG_PP_PPCH 0x10
+#define SOF_HDA_REG_PP_PPCTL 0x04
+#define SOF_HDA_REG_PP_PPSTS 0x08
+#define SOF_HDA_PPCTL_PIE BIT(31)
+#define SOF_HDA_PPCTL_GPROCEN BIT(30)
+
+/*Vendor Specific Registers*/
+#define SOF_HDA_VS_D0I3C 0x104A
+
+/* D0I3C Register fields */
+#define SOF_HDA_VS_D0I3C_CIP BIT(0) /* Command-In-Progress */
+#define SOF_HDA_VS_D0I3C_I3 BIT(2) /* D0i3 enable bit */
+
+/* DPIB entry size: 8 Bytes = 2 DWords */
+#define SOF_HDA_DPIB_ENTRY_SIZE 0x8
+
+#define SOF_HDA_SPIB_CAP_ID 0x4
+#define SOF_HDA_DRSM_CAP_ID 0x5
+
+#define SOF_HDA_SPIB_BASE 0x08
+#define SOF_HDA_SPIB_INTERVAL 0x08
+#define SOF_HDA_SPIB_SPIB 0x00
+#define SOF_HDA_SPIB_MAXFIFO 0x04
+
+#define SOF_HDA_PPHC_BASE 0x10
+#define SOF_HDA_PPHC_INTERVAL 0x10
+
+#define SOF_HDA_PPLC_BASE 0x10
+#define SOF_HDA_PPLC_MULTI 0x10
+#define SOF_HDA_PPLC_INTERVAL 0x10
+
+#define SOF_HDA_DRSM_BASE 0x08
+#define SOF_HDA_DRSM_INTERVAL 0x08
+
+/* Descriptor error interrupt */
+#define SOF_HDA_CL_DMA_SD_INT_DESC_ERR 0x10
+
+/* FIFO error interrupt */
+#define SOF_HDA_CL_DMA_SD_INT_FIFO_ERR 0x08
+
+/* Buffer completion interrupt */
+#define SOF_HDA_CL_DMA_SD_INT_COMPLETE 0x04
+
+#define SOF_HDA_CL_DMA_SD_INT_MASK \
+ (SOF_HDA_CL_DMA_SD_INT_DESC_ERR | \
+ SOF_HDA_CL_DMA_SD_INT_FIFO_ERR | \
+ SOF_HDA_CL_DMA_SD_INT_COMPLETE)
+#define SOF_HDA_SD_CTL_DMA_START 0x02 /* Stream DMA start bit */
+
+/* Intel HD Audio Code Loader DMA Registers */
+#define SOF_HDA_ADSP_LOADER_BASE 0x80
+#define SOF_HDA_ADSP_DPLBASE 0x70
+#define SOF_HDA_ADSP_DPUBASE 0x74
+#define SOF_HDA_ADSP_DPLBASE_ENABLE 0x01
+
+/* Stream Registers */
+#define SOF_HDA_ADSP_REG_CL_SD_CTL 0x00
+#define SOF_HDA_ADSP_REG_CL_SD_STS 0x03
+#define SOF_HDA_ADSP_REG_CL_SD_LPIB 0x04
+#define SOF_HDA_ADSP_REG_CL_SD_CBL 0x08
+#define SOF_HDA_ADSP_REG_CL_SD_LVI 0x0C
+#define SOF_HDA_ADSP_REG_CL_SD_FIFOW 0x0E
+#define SOF_HDA_ADSP_REG_CL_SD_FIFOSIZE 0x10
+#define SOF_HDA_ADSP_REG_CL_SD_FORMAT 0x12
+#define SOF_HDA_ADSP_REG_CL_SD_FIFOL 0x14
+#define SOF_HDA_ADSP_REG_CL_SD_BDLPL 0x18
+#define SOF_HDA_ADSP_REG_CL_SD_BDLPU 0x1C
+#define SOF_HDA_ADSP_SD_ENTRY_SIZE 0x20
+
+/* CL: Software Position Based FIFO Capability Registers */
+#define SOF_DSP_REG_CL_SPBFIFO \
+ (SOF_HDA_ADSP_LOADER_BASE + 0x20)
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCH 0x0
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL 0x4
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_SPIB 0x8
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_MAXFIFOS 0xc
+
+/* Stream Number */
+#define SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT 20
+#define SOF_HDA_CL_SD_CTL_STREAM_TAG_MASK \
+ GENMASK(SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT + 3,\
+ SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT)
+
+#define HDA_DSP_HDA_BAR 0
+#define HDA_DSP_PP_BAR 1
+#define HDA_DSP_SPIB_BAR 2
+#define HDA_DSP_DRSM_BAR 3
+#define HDA_DSP_BAR 4
+
+#define SRAM_WINDOW_OFFSET(x) (0x80000 + (x) * 0x20000)
+
+#define HDA_DSP_MBOX_OFFSET SRAM_WINDOW_OFFSET(0)
+
+#define HDA_DSP_PANIC_OFFSET(x) \
+ (((x) & 0xFFFFFF) + HDA_DSP_MBOX_OFFSET)
+
+/* SRAM window 0 FW "registers" */
+#define HDA_DSP_SRAM_REG_ROM_STATUS (HDA_DSP_MBOX_OFFSET + 0x0)
+#define HDA_DSP_SRAM_REG_ROM_ERROR (HDA_DSP_MBOX_OFFSET + 0x4)
+/* FW and ROM share offset 4 */
+#define HDA_DSP_SRAM_REG_FW_STATUS (HDA_DSP_MBOX_OFFSET + 0x4)
+#define HDA_DSP_SRAM_REG_FW_TRACEP (HDA_DSP_MBOX_OFFSET + 0x8)
+#define HDA_DSP_SRAM_REG_FW_END (HDA_DSP_MBOX_OFFSET + 0xc)
+
+#define HDA_DSP_MBOX_UPLINK_OFFSET 0x81000
+
+#define HDA_DSP_STREAM_RESET_TIMEOUT 300
+/*
+ * Timeout in us, for setting the stream RUN bit, during
+ * start/stop the stream. The timeout expires if new RUN bit
+ * value cannot be read back within the specified time.
+ */
+#define HDA_DSP_STREAM_RUN_TIMEOUT 300
+
+#define HDA_DSP_SPIB_ENABLE 1
+#define HDA_DSP_SPIB_DISABLE 0
+
+#define SOF_HDA_MAX_BUFFER_SIZE (32 * PAGE_SIZE)
+
+#define HDA_DSP_STACK_DUMP_SIZE 32
+
+/* ROM status/error values */
+#define HDA_DSP_ROM_STS_MASK GENMASK(23, 0)
+#define HDA_DSP_ROM_INIT 0x1
+#define HDA_DSP_ROM_FW_MANIFEST_LOADED 0x3
+#define HDA_DSP_ROM_FW_FW_LOADED 0x4
+#define HDA_DSP_ROM_FW_ENTERED 0x5
+#define HDA_DSP_ROM_RFW_START 0xf
+#define HDA_DSP_ROM_CSE_ERROR 40
+#define HDA_DSP_ROM_CSE_WRONG_RESPONSE 41
+#define HDA_DSP_ROM_IMR_TO_SMALL 42
+#define HDA_DSP_ROM_BASE_FW_NOT_FOUND 43
+#define HDA_DSP_ROM_CSE_VALIDATION_FAILED 44
+#define HDA_DSP_ROM_IPC_FATAL_ERROR 45
+#define HDA_DSP_ROM_L2_CACHE_ERROR 46
+#define HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL 47
+#define HDA_DSP_ROM_API_PTR_INVALID 50
+#define HDA_DSP_ROM_BASEFW_INCOMPAT 51
+#define HDA_DSP_ROM_UNHANDLED_INTERRUPT 0xBEE00000
+#define HDA_DSP_ROM_MEMORY_HOLE_ECC 0xECC00000
+#define HDA_DSP_ROM_KERNEL_EXCEPTION 0xCAFE0000
+#define HDA_DSP_ROM_USER_EXCEPTION 0xBEEF0000
+#define HDA_DSP_ROM_UNEXPECTED_RESET 0xDECAF000
+#define HDA_DSP_ROM_NULL_FW_ENTRY 0x4c4c4e55
+#define HDA_DSP_IPC_PURGE_FW 0x01004000
+
+/* various timeout values */
+#define HDA_DSP_PU_TIMEOUT 50
+#define HDA_DSP_PD_TIMEOUT 50
+#define HDA_DSP_RESET_TIMEOUT_US 50000
+#define HDA_DSP_BASEFW_TIMEOUT_US 3000000
+#define HDA_DSP_INIT_TIMEOUT_US 500000
+#define HDA_DSP_CTRL_RESET_TIMEOUT 100
+#define HDA_DSP_WAIT_TIMEOUT 500 /* 500 msec */
+#define HDA_DSP_REG_POLL_INTERVAL_US 500 /* 0.5 msec */
+#define HDA_DSP_REG_POLL_RETRY_COUNT 50
+
+#define HDA_DSP_ADSPIC_IPC 1
+#define HDA_DSP_ADSPIS_IPC 1
+
+/* Intel HD Audio General DSP Registers */
+#define HDA_DSP_GEN_BASE 0x0
+#define HDA_DSP_REG_ADSPCS (HDA_DSP_GEN_BASE + 0x04)
+#define HDA_DSP_REG_ADSPIC (HDA_DSP_GEN_BASE + 0x08)
+#define HDA_DSP_REG_ADSPIS (HDA_DSP_GEN_BASE + 0x0C)
+#define HDA_DSP_REG_ADSPIC2 (HDA_DSP_GEN_BASE + 0x10)
+#define HDA_DSP_REG_ADSPIS2 (HDA_DSP_GEN_BASE + 0x14)
+
+#define HDA_DSP_REG_ADSPIS2_SNDW BIT(5)
+#define HDA_DSP_REG_SNDW_WAKE_STS 0x2C192
+
+/* Intel HD Audio Inter-Processor Communication Registers */
+#define HDA_DSP_IPC_BASE 0x40
+#define HDA_DSP_REG_HIPCT (HDA_DSP_IPC_BASE + 0x00)
+#define HDA_DSP_REG_HIPCTE (HDA_DSP_IPC_BASE + 0x04)
+#define HDA_DSP_REG_HIPCI (HDA_DSP_IPC_BASE + 0x08)
+#define HDA_DSP_REG_HIPCIE (HDA_DSP_IPC_BASE + 0x0C)
+#define HDA_DSP_REG_HIPCCTL (HDA_DSP_IPC_BASE + 0x10)
+
+/* Intel Vendor Specific Registers */
+#define HDA_VS_INTEL_EM2 0x1030
+#define HDA_VS_INTEL_EM2_L1SEN BIT(13)
+#define HDA_VS_INTEL_LTRP_GB_MASK 0x3F
+
+/* HIPCI */
+#define HDA_DSP_REG_HIPCI_BUSY BIT(31)
+#define HDA_DSP_REG_HIPCI_MSG_MASK 0x7FFFFFFF
+
+/* HIPCIE */
+#define HDA_DSP_REG_HIPCIE_DONE BIT(30)
+#define HDA_DSP_REG_HIPCIE_MSG_MASK 0x3FFFFFFF
+
+/* HIPCCTL */
+#define HDA_DSP_REG_HIPCCTL_DONE BIT(1)
+#define HDA_DSP_REG_HIPCCTL_BUSY BIT(0)
+
+/* HIPCT */
+#define HDA_DSP_REG_HIPCT_BUSY BIT(31)
+#define HDA_DSP_REG_HIPCT_MSG_MASK 0x7FFFFFFF
+
+/* HIPCTE */
+#define HDA_DSP_REG_HIPCTE_MSG_MASK 0x3FFFFFFF
+
+#define HDA_DSP_ADSPIC_CL_DMA 0x2
+#define HDA_DSP_ADSPIS_CL_DMA 0x2
+
+/* Delay before scheduling D0i3 entry */
+#define BXT_D0I3_DELAY 5000
+
+#define FW_CL_STREAM_NUMBER 0x1
+#define HDA_FW_BOOT_ATTEMPTS 3
+
+/* ADSPCS - Audio DSP Control & Status */
+
+/*
+ * Core Reset - asserted high
+ * CRST Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_CRST_SHIFT 0
+#define HDA_DSP_ADSPCS_CRST_MASK(cm) ((cm) << HDA_DSP_ADSPCS_CRST_SHIFT)
+
+/*
+ * Core run/stall - when set to '1' core is stalled
+ * CSTALL Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_CSTALL_SHIFT 8
+#define HDA_DSP_ADSPCS_CSTALL_MASK(cm) ((cm) << HDA_DSP_ADSPCS_CSTALL_SHIFT)
+
+/*
+ * Set Power Active - when set to '1' turn cores on
+ * SPA Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_SPA_SHIFT 16
+#define HDA_DSP_ADSPCS_SPA_MASK(cm) ((cm) << HDA_DSP_ADSPCS_SPA_SHIFT)
+
+/*
+ * Current Power Active - power status of cores, set by hardware
+ * CPA Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_CPA_SHIFT 24
+#define HDA_DSP_ADSPCS_CPA_MASK(cm) ((cm) << HDA_DSP_ADSPCS_CPA_SHIFT)
+
+/*
+ * Mask for a given number of cores
+ * nc = number of supported cores
+ */
+#define SOF_DSP_CORES_MASK(nc) GENMASK(((nc) - 1), 0)
+
+/* Intel HD Audio Inter-Processor Communication Registers for Cannonlake*/
+#define CNL_DSP_IPC_BASE 0xc0
+#define CNL_DSP_REG_HIPCTDR (CNL_DSP_IPC_BASE + 0x00)
+#define CNL_DSP_REG_HIPCTDA (CNL_DSP_IPC_BASE + 0x04)
+#define CNL_DSP_REG_HIPCTDD (CNL_DSP_IPC_BASE + 0x08)
+#define CNL_DSP_REG_HIPCIDR (CNL_DSP_IPC_BASE + 0x10)
+#define CNL_DSP_REG_HIPCIDA (CNL_DSP_IPC_BASE + 0x14)
+#define CNL_DSP_REG_HIPCIDD (CNL_DSP_IPC_BASE + 0x18)
+#define CNL_DSP_REG_HIPCCTL (CNL_DSP_IPC_BASE + 0x28)
+
+/* HIPCI */
+#define CNL_DSP_REG_HIPCIDR_BUSY BIT(31)
+#define CNL_DSP_REG_HIPCIDR_MSG_MASK 0x7FFFFFFF
+
+/* HIPCIE */
+#define CNL_DSP_REG_HIPCIDA_DONE BIT(31)
+#define CNL_DSP_REG_HIPCIDA_MSG_MASK 0x7FFFFFFF
+
+/* HIPCCTL */
+#define CNL_DSP_REG_HIPCCTL_DONE BIT(1)
+#define CNL_DSP_REG_HIPCCTL_BUSY BIT(0)
+
+/* HIPCT */
+#define CNL_DSP_REG_HIPCTDR_BUSY BIT(31)
+#define CNL_DSP_REG_HIPCTDR_MSG_MASK 0x7FFFFFFF
+
+/* HIPCTDA */
+#define CNL_DSP_REG_HIPCTDA_DONE BIT(31)
+#define CNL_DSP_REG_HIPCTDA_MSG_MASK 0x7FFFFFFF
+
+/* HIPCTDD */
+#define CNL_DSP_REG_HIPCTDD_MSG_MASK 0x7FFFFFFF
+
+/* BDL */
+#define HDA_DSP_BDL_SIZE 4096
+#define HDA_DSP_MAX_BDL_ENTRIES \
+ (HDA_DSP_BDL_SIZE / sizeof(struct sof_intel_dsp_bdl))
+
+/* Number of DAIs */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+#define SOF_SKL_NUM_DAIS 16
+#else
+#define SOF_SKL_NUM_DAIS 15
+#endif
+
+#else
+#define SOF_SKL_NUM_DAIS 8
+#endif
+
+/* Intel HD Audio SRAM Window 0*/
+#define HDA_ADSP_SRAM0_BASE_SKL 0x8000
+
+/* Firmware status window */
+#define HDA_ADSP_FW_STATUS_SKL HDA_ADSP_SRAM0_BASE_SKL
+#define HDA_ADSP_ERROR_CODE_SKL (HDA_ADSP_FW_STATUS_SKL + 0x4)
+
+/* Host Device Memory Space */
+#define APL_SSP_BASE_OFFSET 0x2000
+#define CNL_SSP_BASE_OFFSET 0x10000
+
+/* Host Device Memory Size of a Single SSP */
+#define SSP_DEV_MEM_SIZE 0x1000
+
+/* SSP Count of the Platform */
+#define APL_SSP_COUNT 6
+#define CNL_SSP_COUNT 3
+#define ICL_SSP_COUNT 6
+
+/* SSP Registers */
+#define SSP_SSC1_OFFSET 0x4
+#define SSP_SET_SCLK_SLAVE BIT(25)
+#define SSP_SET_SFRM_SLAVE BIT(24)
+#define SSP_SET_SLAVE (SSP_SET_SCLK_SLAVE | SSP_SET_SFRM_SLAVE)
+
+#define HDA_IDISP_CODEC(x) ((x) & BIT(2))
+
+struct sof_intel_dsp_bdl {
+ __le32 addr_l;
+ __le32 addr_h;
+ __le32 size;
+ __le32 ioc;
+} __attribute((packed));
+
+#define SOF_HDA_PLAYBACK_STREAMS 16
+#define SOF_HDA_CAPTURE_STREAMS 16
+#define SOF_HDA_PLAYBACK 0
+#define SOF_HDA_CAPTURE 1
+
+/*
+ * Time in ms for opportunistic D0I3 entry delay.
+ * This has been deliberately chosen to be long to avoid race conditions.
+ * Could be optimized in future.
+ */
+#define SOF_HDA_D0I3_WORK_DELAY_MS 5000
+
+/* HDA DSP D0 substate */
+enum sof_hda_D0_substate {
+ SOF_HDA_DSP_PM_D0I0, /* default D0 substate */
+ SOF_HDA_DSP_PM_D0I3, /* low power D0 substate */
+};
+
+/* represents DSP HDA controller frontend - i.e. host facing control */
+struct sof_intel_hda_dev {
+ int boot_iteration;
+
+ struct hda_bus hbus;
+
+ /* hw config */
+ const struct sof_intel_dsp_desc *desc;
+
+ /* trace */
+ struct hdac_ext_stream *dtrace_stream;
+
+ /* if position update IPC needed */
+ u32 no_ipc_position;
+
+ /* the maximum number of streams (playback + capture) supported */
+ u32 stream_max;
+
+ /* PM related */
+ bool l1_support_changed;/* during suspend, is L1SEN changed or not */
+
+ /* DMIC device */
+ struct platform_device *dmic_dev;
+
+ /* delayed work to enter D0I3 opportunistically */
+ struct delayed_work d0i3_work;
+
+ /* ACPI information stored between scan and probe steps */
+ struct sdw_intel_acpi_info info;
+
+ /* sdw context allocated by SoundWire driver */
+ struct sdw_intel_ctx *sdw;
+};
+
+static inline struct hdac_bus *sof_to_bus(struct snd_sof_dev *s)
+{
+ struct sof_intel_hda_dev *hda = s->pdata->hw_pdata;
+
+ return &hda->hbus.core;
+}
+
+static inline struct hda_bus *sof_to_hbus(struct snd_sof_dev *s)
+{
+ struct sof_intel_hda_dev *hda = s->pdata->hw_pdata;
+
+ return &hda->hbus;
+}
+
+struct sof_intel_hda_stream {
+ struct snd_sof_dev *sdev;
+ struct hdac_ext_stream hda_stream;
+ struct sof_intel_stream stream;
+ int host_reserved; /* reserve host DMA channel */
+};
+
+#define hstream_to_sof_hda_stream(hstream) \
+ container_of(hstream, struct sof_intel_hda_stream, hda_stream)
+
+#define bus_to_sof_hda(bus) \
+ container_of(bus, struct sof_intel_hda_dev, hbus.core)
+
+#define SOF_STREAM_SD_OFFSET(s) \
+ (SOF_HDA_ADSP_SD_ENTRY_SIZE * ((s)->index) \
+ + SOF_HDA_ADSP_LOADER_BASE)
+
+/*
+ * DSP Core services.
+ */
+int hda_dsp_probe(struct snd_sof_dev *sdev);
+int hda_dsp_remove(struct snd_sof_dev *sdev);
+int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev,
+ unsigned int core_mask);
+int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev,
+ unsigned int core_mask);
+int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask);
+bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
+ unsigned int core_mask);
+int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
+ unsigned int core_mask);
+void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev);
+void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev);
+
+int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state);
+
+int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state);
+int hda_dsp_resume(struct snd_sof_dev *sdev);
+int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev);
+int hda_dsp_runtime_resume(struct snd_sof_dev *sdev);
+int hda_dsp_runtime_idle(struct snd_sof_dev *sdev);
+int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev);
+void hda_dsp_dump_skl(struct snd_sof_dev *sdev, u32 flags);
+void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags);
+void hda_ipc_dump(struct snd_sof_dev *sdev);
+void hda_ipc_irq_dump(struct snd_sof_dev *sdev);
+void hda_dsp_d0i3_work(struct work_struct *work);
+
+/*
+ * DSP PCM Operations.
+ */
+u32 hda_dsp_get_mult_div(struct snd_sof_dev *sdev, int rate);
+u32 hda_dsp_get_bits(struct snd_sof_dev *sdev, int sample_bits);
+int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc_stream_params *ipc_params);
+int hda_dsp_stream_hw_free(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_trigger(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream, int cmd);
+snd_pcm_uframes_t hda_dsp_pcm_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+
+/*
+ * DSP Stream Operations.
+ */
+
+int hda_dsp_stream_init(struct snd_sof_dev *sdev);
+void hda_dsp_stream_free(struct snd_sof_dev *sdev);
+int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params);
+int hda_dsp_iccmax_stream_hw_params(struct snd_sof_dev *sdev, struct hdac_ext_stream *stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params);
+int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream, int cmd);
+irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context);
+int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct hdac_stream *stream);
+bool hda_dsp_check_ipc_irq(struct snd_sof_dev *sdev);
+bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev);
+
+struct hdac_ext_stream *
+ hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction);
+int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag);
+int hda_dsp_stream_spib_config(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream,
+ int enable, u32 size);
+
+void hda_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz);
+int hda_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+/*
+ * Probe Compress Operations.
+ */
+int hda_probe_compr_assign(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai);
+int hda_probe_compr_free(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai);
+int hda_probe_compr_set_params(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params,
+ struct snd_soc_dai *dai);
+int hda_probe_compr_trigger(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai);
+int hda_probe_compr_pointer(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp,
+ struct snd_soc_dai *dai);
+#endif
+
+/*
+ * DSP IPC Operations.
+ */
+int hda_dsp_ipc_send_msg(struct snd_sof_dev *sdev,
+ struct snd_sof_ipc_msg *msg);
+void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev);
+int hda_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev);
+int hda_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id);
+
+irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context);
+int hda_dsp_ipc_cmd_done(struct snd_sof_dev *sdev, int dir);
+
+/*
+ * DSP Code loader.
+ */
+int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev);
+int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev);
+int hda_dsp_cl_boot_firmware_skl(struct snd_sof_dev *sdev);
+
+/* pre and post fw run ops */
+int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev);
+int hda_dsp_post_fw_run(struct snd_sof_dev *sdev);
+
+/*
+ * HDA Controller Operations.
+ */
+int hda_dsp_ctrl_get_caps(struct snd_sof_dev *sdev);
+void hda_dsp_ctrl_ppcap_enable(struct snd_sof_dev *sdev, bool enable);
+void hda_dsp_ctrl_ppcap_int_enable(struct snd_sof_dev *sdev, bool enable);
+int hda_dsp_ctrl_link_reset(struct snd_sof_dev *sdev, bool reset);
+void hda_dsp_ctrl_misc_clock_gating(struct snd_sof_dev *sdev, bool enable);
+int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable);
+int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset);
+void hda_dsp_ctrl_stop_chip(struct snd_sof_dev *sdev);
+/*
+ * HDA bus operations.
+ */
+void sof_hda_bus_init(struct hdac_bus *bus, struct device *dev);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+/*
+ * HDA Codec operations.
+ */
+void hda_codec_probe_bus(struct snd_sof_dev *sdev,
+ bool hda_codec_use_common_hdmi);
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev);
+void hda_codec_jack_check(struct snd_sof_dev *sdev);
+
+#endif /* CONFIG_SND_SOC_SOF_HDA */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) && \
+ (IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) || \
+ IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+
+void hda_codec_i915_display_power(struct snd_sof_dev *sdev, bool enable);
+int hda_codec_i915_init(struct snd_sof_dev *sdev);
+int hda_codec_i915_exit(struct snd_sof_dev *sdev);
+
+#else
+
+static inline void hda_codec_i915_display_power(struct snd_sof_dev *sdev,
+ bool enable) { }
+static inline int hda_codec_i915_init(struct snd_sof_dev *sdev) { return 0; }
+static inline int hda_codec_i915_exit(struct snd_sof_dev *sdev) { return 0; }
+
+#endif
+
+/*
+ * Trace Control.
+ */
+int hda_dsp_trace_init(struct snd_sof_dev *sdev, u32 *stream_tag);
+int hda_dsp_trace_release(struct snd_sof_dev *sdev);
+int hda_dsp_trace_trigger(struct snd_sof_dev *sdev, int cmd);
+
+/*
+ * SoundWire support
+ */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+
+int hda_sdw_startup(struct snd_sof_dev *sdev);
+void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable);
+void hda_sdw_process_wakeen(struct snd_sof_dev *sdev);
+
+#else
+
+static inline int hda_sdw_acpi_scan(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline int hda_sdw_probe(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline int hda_sdw_startup(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline int hda_sdw_exit(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable)
+{
+}
+
+static inline bool hda_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ return false;
+}
+
+static inline irqreturn_t hda_dsp_sdw_thread(int irq, void *context)
+{
+ return IRQ_HANDLED;
+}
+
+static inline bool hda_sdw_check_wakeen_irq(struct snd_sof_dev *sdev)
+{
+ return false;
+}
+
+static inline void hda_sdw_process_wakeen(struct snd_sof_dev *sdev)
+{
+}
+#endif
+
+/* common dai driver */
+extern struct snd_soc_dai_driver skl_dai[];
+
+/*
+ * Platform Specific HW abstraction Ops.
+ */
+extern const struct snd_sof_dsp_ops sof_apl_ops;
+extern const struct snd_sof_dsp_ops sof_cnl_ops;
+extern const struct snd_sof_dsp_ops sof_tgl_ops;
+
+extern const struct sof_intel_dsp_desc apl_chip_info;
+extern const struct sof_intel_dsp_desc cnl_chip_info;
+extern const struct sof_intel_dsp_desc skl_chip_info;
+extern const struct sof_intel_dsp_desc icl_chip_info;
+extern const struct sof_intel_dsp_desc tgl_chip_info;
+extern const struct sof_intel_dsp_desc tglh_chip_info;
+extern const struct sof_intel_dsp_desc ehl_chip_info;
+extern const struct sof_intel_dsp_desc jsl_chip_info;
+
+/* machine driver select */
+void hda_machine_select(struct snd_sof_dev *sdev);
+void hda_set_mach_params(const struct snd_soc_acpi_mach *mach,
+ struct device *dev);
+
+#endif
diff --git a/sound/soc/sof/intel/intel-ipc.c b/sound/soc/sof/intel/intel-ipc.c
new file mode 100644
index 000000000..310f9168c
--- /dev/null
+++ b/sound/soc/sof/intel/intel-ipc.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019 Intel Corporation. All rights reserved.
+//
+// Authors: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
+
+/* Intel-specific SOF IPC code */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <sound/pcm.h>
+#include <sound/sof/stream.h>
+
+#include "../ops.h"
+#include "../sof-priv.h"
+
+struct intel_stream {
+ size_t posn_offset;
+};
+
+/* Mailbox-based Intel IPC implementation */
+void intel_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz)
+{
+ if (!substream || !sdev->stream_box.size) {
+ sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
+ } else {
+ struct intel_stream *stream = substream->runtime->private_data;
+
+ /* The stream might already be closed */
+ if (stream)
+ sof_mailbox_read(sdev, stream->posn_offset, p, sz);
+ }
+}
+EXPORT_SYMBOL_NS(intel_ipc_msg_data, SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+
+int intel_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply)
+{
+ struct intel_stream *stream = substream->runtime->private_data;
+ size_t posn_offset = reply->posn_offset;
+
+ /* check if offset is overflow or it is not aligned */
+ if (posn_offset > sdev->stream_box.size ||
+ posn_offset % sizeof(struct sof_ipc_stream_posn) != 0)
+ return -EINVAL;
+
+ stream->posn_offset = sdev->stream_box.offset + posn_offset;
+
+ dev_dbg(sdev->dev, "pcm: stream dir %d, posn mailbox offset is %zu",
+ substream->stream, stream->posn_offset);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(intel_ipc_pcm_params, SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+
+int intel_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct intel_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+
+ if (!stream)
+ return -ENOMEM;
+
+ /* binding pcm substream to hda stream */
+ substream->runtime->private_data = stream;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(intel_pcm_open, SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+
+int intel_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct intel_stream *stream = substream->runtime->private_data;
+
+ substream->runtime->private_data = NULL;
+ kfree(stream);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(intel_pcm_close, SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/shim.h b/sound/soc/sof/intel/shim.h
new file mode 100644
index 000000000..1e0afb5c8
--- /dev/null
+++ b/sound/soc/sof/intel/shim.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_SHIM_H
+#define __SOF_INTEL_SHIM_H
+
+/*
+ * SHIM registers for BYT, BSW, CHT, BDW
+ */
+
+#define SHIM_CSR (SHIM_OFFSET + 0x00)
+#define SHIM_PISR (SHIM_OFFSET + 0x08)
+#define SHIM_PIMR (SHIM_OFFSET + 0x10)
+#define SHIM_ISRX (SHIM_OFFSET + 0x18)
+#define SHIM_ISRD (SHIM_OFFSET + 0x20)
+#define SHIM_IMRX (SHIM_OFFSET + 0x28)
+#define SHIM_IMRD (SHIM_OFFSET + 0x30)
+#define SHIM_IPCX (SHIM_OFFSET + 0x38)
+#define SHIM_IPCD (SHIM_OFFSET + 0x40)
+#define SHIM_ISRSC (SHIM_OFFSET + 0x48)
+#define SHIM_ISRLPESC (SHIM_OFFSET + 0x50)
+#define SHIM_IMRSC (SHIM_OFFSET + 0x58)
+#define SHIM_IMRLPESC (SHIM_OFFSET + 0x60)
+#define SHIM_IPCSC (SHIM_OFFSET + 0x68)
+#define SHIM_IPCLPESC (SHIM_OFFSET + 0x70)
+#define SHIM_CLKCTL (SHIM_OFFSET + 0x78)
+#define SHIM_CSR2 (SHIM_OFFSET + 0x80)
+#define SHIM_LTRC (SHIM_OFFSET + 0xE0)
+#define SHIM_HMDC (SHIM_OFFSET + 0xE8)
+
+#define SHIM_PWMCTRL 0x1000
+
+/*
+ * SST SHIM register bits for BYT, BSW, CHT, BDW
+ * Register bit naming and functionaility can differ between devices.
+ */
+
+/* CSR / CS */
+#define SHIM_CSR_RST BIT(1)
+#define SHIM_CSR_SBCS0 BIT(2)
+#define SHIM_CSR_SBCS1 BIT(3)
+#define SHIM_CSR_DCS(x) ((x) << 4)
+#define SHIM_CSR_DCS_MASK (0x7 << 4)
+#define SHIM_CSR_STALL BIT(10)
+#define SHIM_CSR_S0IOCS BIT(21)
+#define SHIM_CSR_S1IOCS BIT(23)
+#define SHIM_CSR_LPCS BIT(31)
+#define SHIM_CSR_24MHZ_LPCS \
+ (SHIM_CSR_SBCS0 | SHIM_CSR_SBCS1 | SHIM_CSR_LPCS)
+#define SHIM_CSR_24MHZ_NO_LPCS (SHIM_CSR_SBCS0 | SHIM_CSR_SBCS1)
+#define SHIM_BYT_CSR_RST BIT(0)
+#define SHIM_BYT_CSR_VECTOR_SEL BIT(1)
+#define SHIM_BYT_CSR_STALL BIT(2)
+#define SHIM_BYT_CSR_PWAITMODE BIT(3)
+
+/* ISRX / ISC */
+#define SHIM_ISRX_BUSY BIT(1)
+#define SHIM_ISRX_DONE BIT(0)
+#define SHIM_BYT_ISRX_REQUEST BIT(1)
+
+/* ISRD / ISD */
+#define SHIM_ISRD_BUSY BIT(1)
+#define SHIM_ISRD_DONE BIT(0)
+
+/* IMRX / IMC */
+#define SHIM_IMRX_BUSY BIT(1)
+#define SHIM_IMRX_DONE BIT(0)
+#define SHIM_BYT_IMRX_REQUEST BIT(1)
+
+/* IMRD / IMD */
+#define SHIM_IMRD_DONE BIT(0)
+#define SHIM_IMRD_BUSY BIT(1)
+#define SHIM_IMRD_SSP0 BIT(16)
+#define SHIM_IMRD_DMAC0 BIT(21)
+#define SHIM_IMRD_DMAC1 BIT(22)
+#define SHIM_IMRD_DMAC (SHIM_IMRD_DMAC0 | SHIM_IMRD_DMAC1)
+
+/* IPCX / IPCC */
+#define SHIM_IPCX_DONE BIT(30)
+#define SHIM_IPCX_BUSY BIT(31)
+#define SHIM_BYT_IPCX_DONE BIT_ULL(62)
+#define SHIM_BYT_IPCX_BUSY BIT_ULL(63)
+
+/* IPCD */
+#define SHIM_IPCD_DONE BIT(30)
+#define SHIM_IPCD_BUSY BIT(31)
+#define SHIM_BYT_IPCD_DONE BIT_ULL(62)
+#define SHIM_BYT_IPCD_BUSY BIT_ULL(63)
+
+/* CLKCTL */
+#define SHIM_CLKCTL_SMOS(x) ((x) << 24)
+#define SHIM_CLKCTL_MASK (3 << 24)
+#define SHIM_CLKCTL_DCPLCG BIT(18)
+#define SHIM_CLKCTL_SCOE1 BIT(17)
+#define SHIM_CLKCTL_SCOE0 BIT(16)
+
+/* CSR2 / CS2 */
+#define SHIM_CSR2_SDFD_SSP0 BIT(1)
+#define SHIM_CSR2_SDFD_SSP1 BIT(2)
+
+/* LTRC */
+#define SHIM_LTRC_VAL(x) ((x) << 0)
+
+/* HMDC */
+#define SHIM_HMDC_HDDA0(x) ((x) << 0)
+#define SHIM_HMDC_HDDA1(x) ((x) << 7)
+#define SHIM_HMDC_HDDA_E0_CH0 1
+#define SHIM_HMDC_HDDA_E0_CH1 2
+#define SHIM_HMDC_HDDA_E0_CH2 4
+#define SHIM_HMDC_HDDA_E0_CH3 8
+#define SHIM_HMDC_HDDA_E1_CH0 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH0)
+#define SHIM_HMDC_HDDA_E1_CH1 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH1)
+#define SHIM_HMDC_HDDA_E1_CH2 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH2)
+#define SHIM_HMDC_HDDA_E1_CH3 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH3)
+#define SHIM_HMDC_HDDA_E0_ALLCH \
+ (SHIM_HMDC_HDDA_E0_CH0 | SHIM_HMDC_HDDA_E0_CH1 | \
+ SHIM_HMDC_HDDA_E0_CH2 | SHIM_HMDC_HDDA_E0_CH3)
+#define SHIM_HMDC_HDDA_E1_ALLCH \
+ (SHIM_HMDC_HDDA_E1_CH0 | SHIM_HMDC_HDDA_E1_CH1 | \
+ SHIM_HMDC_HDDA_E1_CH2 | SHIM_HMDC_HDDA_E1_CH3)
+
+/* Audio DSP PCI registers */
+#define PCI_VDRTCTL0 0xa0
+#define PCI_VDRTCTL1 0xa4
+#define PCI_VDRTCTL2 0xa8
+#define PCI_VDRTCTL3 0xaC
+
+/* VDRTCTL0 */
+#define PCI_VDRTCL0_D3PGD BIT(0)
+#define PCI_VDRTCL0_D3SRAMPGD BIT(1)
+#define PCI_VDRTCL0_DSRAMPGE_SHIFT 12
+#define PCI_VDRTCL0_DSRAMPGE_MASK GENMASK(PCI_VDRTCL0_DSRAMPGE_SHIFT + 19,\
+ PCI_VDRTCL0_DSRAMPGE_SHIFT)
+#define PCI_VDRTCL0_ISRAMPGE_SHIFT 2
+#define PCI_VDRTCL0_ISRAMPGE_MASK GENMASK(PCI_VDRTCL0_ISRAMPGE_SHIFT + 9,\
+ PCI_VDRTCL0_ISRAMPGE_SHIFT)
+
+/* VDRTCTL2 */
+#define PCI_VDRTCL2_DCLCGE BIT(1)
+#define PCI_VDRTCL2_DTCGE BIT(10)
+#define PCI_VDRTCL2_APLLSE_MASK BIT(31)
+
+/* PMCS */
+#define PCI_PMCS 0x84
+#define PCI_PMCS_PS_MASK 0x3
+
+/* DSP hardware descriptor */
+struct sof_intel_dsp_desc {
+ int cores_num;
+ int host_managed_cores_mask;
+ int init_core_mask; /* cores available after fw boot */
+ int ipc_req;
+ int ipc_req_mask;
+ int ipc_ack;
+ int ipc_ack_mask;
+ int ipc_ctl;
+ int rom_init_timeout;
+ int ssp_count; /* ssp count of the platform */
+ int ssp_base_offset; /* base address of the SSPs */
+};
+
+extern const struct snd_sof_dsp_ops sof_tng_ops;
+extern const struct snd_sof_dsp_ops sof_byt_ops;
+extern const struct snd_sof_dsp_ops sof_cht_ops;
+extern const struct snd_sof_dsp_ops sof_bdw_ops;
+
+extern const struct sof_intel_dsp_desc byt_chip_info;
+extern const struct sof_intel_dsp_desc cht_chip_info;
+extern const struct sof_intel_dsp_desc bdw_chip_info;
+extern const struct sof_intel_dsp_desc tng_chip_info;
+
+struct sof_intel_stream {
+ size_t posn_offset;
+};
+
+#endif
diff --git a/sound/soc/sof/intel/tgl.c b/sound/soc/sof/intel/tgl.c
new file mode 100644
index 000000000..0278b67de
--- /dev/null
+++ b/sound/soc/sof/intel/tgl.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2020 Intel Corporation. All rights reserved.
+//
+// Authors: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Tigerlake.
+ */
+
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+#include "../sof-audio.h"
+
+static const struct snd_sof_debugfs_map tgl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+/* Tigerlake ops */
+const struct snd_sof_dsp_ops sof_tgl_ops = {
+ /* probe and remove */
+ .probe = hda_dsp_probe,
+ .remove = hda_dsp_remove,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* doorbell */
+ .irq_thread = cnl_ipc_irq_thread,
+
+ /* ipc */
+ .send_msg = cnl_ipc_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = hda_dsp_ipc_get_mailbox_offset,
+ .get_window_offset = hda_dsp_ipc_get_window_offset,
+
+ .ipc_msg_data = hda_ipc_msg_data,
+ .ipc_pcm_params = hda_ipc_pcm_params,
+
+ /* machine driver */
+ .machine_select = hda_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = hda_set_mach_params,
+
+ /* debug */
+ .debug_map = tgl_dsp_debugfs,
+ .debug_map_count = ARRAY_SIZE(tgl_dsp_debugfs),
+ .dbg_dump = hda_dsp_dump,
+ .ipc_dump = cnl_ipc_dump,
+
+ /* stream callbacks */
+ .pcm_open = hda_dsp_pcm_open,
+ .pcm_close = hda_dsp_pcm_close,
+ .pcm_hw_params = hda_dsp_pcm_hw_params,
+ .pcm_hw_free = hda_dsp_stream_hw_free,
+ .pcm_trigger = hda_dsp_pcm_trigger,
+ .pcm_pointer = hda_dsp_pcm_pointer,
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+ /* probe callbacks */
+ .probe_assign = hda_probe_compr_assign,
+ .probe_free = hda_probe_compr_free,
+ .probe_set_params = hda_probe_compr_set_params,
+ .probe_trigger = hda_probe_compr_trigger,
+ .probe_pointer = hda_probe_compr_pointer,
+#endif
+
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_raw,
+
+ /* pre/post fw run */
+ .pre_fw_run = hda_dsp_pre_fw_run,
+ .post_fw_run = hda_dsp_post_fw_run,
+
+ /* dsp core power up/down */
+ .core_power_up = hda_dsp_enable_core,
+ .core_power_down = hda_dsp_core_reset_power_down,
+
+ /* firmware run */
+ .run = hda_dsp_cl_boot_firmware_iccmax,
+
+ /* trace callback */
+ .trace_init = hda_dsp_trace_init,
+ .trace_release = hda_dsp_trace_release,
+ .trace_trigger = hda_dsp_trace_trigger,
+
+ /* DAI drivers */
+ .drv = skl_dai,
+ .num_drv = SOF_SKL_NUM_DAIS,
+
+ /* PM */
+ .suspend = hda_dsp_suspend,
+ .resume = hda_dsp_resume,
+ .runtime_suspend = hda_dsp_runtime_suspend,
+ .runtime_resume = hda_dsp_runtime_resume,
+ .runtime_idle = hda_dsp_runtime_idle,
+ .set_hw_params_upon_resume = hda_dsp_set_hw_params_upon_resume,
+ .set_power_state = hda_dsp_set_power_state,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+
+ .arch_ops = &sof_xtensa_arch_ops,
+};
+EXPORT_SYMBOL_NS(sof_tgl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc tgl_chip_info = {
+ /* Tigerlake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL_NS(tgl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc tglh_chip_info = {
+ /* Tigerlake-H */
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL_NS(tglh_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
new file mode 100644
index 000000000..fd2b96ae4
--- /dev/null
+++ b/sound/soc/sof/ipc.c
@@ -0,0 +1,866 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+// Generic IPC layer that can work over MMIO and SPI/I2C. PHY layer provided
+// by platform driver code.
+//
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ops.h"
+
+static void ipc_trace_message(struct snd_sof_dev *sdev, u32 msg_id);
+static void ipc_stream_message(struct snd_sof_dev *sdev, u32 msg_cmd);
+
+/*
+ * IPC message Tx/Rx message handling.
+ */
+
+/* SOF generic IPC data */
+struct snd_sof_ipc {
+ struct snd_sof_dev *sdev;
+
+ /* protects messages and the disable flag */
+ struct mutex tx_mutex;
+ /* disables further sending of ipc's */
+ bool disable_ipc_tx;
+
+ struct snd_sof_ipc_msg msg;
+};
+
+struct sof_ipc_ctrl_data_params {
+ size_t msg_bytes;
+ size_t hdr_bytes;
+ size_t pl_size;
+ size_t elems;
+ u32 num_msg;
+ u8 *src;
+ u8 *dst;
+};
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_VERBOSE_IPC)
+static void ipc_log_header(struct device *dev, u8 *text, u32 cmd)
+{
+ u8 *str;
+ u8 *str2 = NULL;
+ u32 glb;
+ u32 type;
+ bool vdbg = false;
+
+ glb = cmd & SOF_GLB_TYPE_MASK;
+ type = cmd & SOF_CMD_TYPE_MASK;
+
+ switch (glb) {
+ case SOF_IPC_GLB_REPLY:
+ str = "GLB_REPLY"; break;
+ case SOF_IPC_GLB_COMPOUND:
+ str = "GLB_COMPOUND"; break;
+ case SOF_IPC_GLB_TPLG_MSG:
+ str = "GLB_TPLG_MSG";
+ switch (type) {
+ case SOF_IPC_TPLG_COMP_NEW:
+ str2 = "COMP_NEW"; break;
+ case SOF_IPC_TPLG_COMP_FREE:
+ str2 = "COMP_FREE"; break;
+ case SOF_IPC_TPLG_COMP_CONNECT:
+ str2 = "COMP_CONNECT"; break;
+ case SOF_IPC_TPLG_PIPE_NEW:
+ str2 = "PIPE_NEW"; break;
+ case SOF_IPC_TPLG_PIPE_FREE:
+ str2 = "PIPE_FREE"; break;
+ case SOF_IPC_TPLG_PIPE_CONNECT:
+ str2 = "PIPE_CONNECT"; break;
+ case SOF_IPC_TPLG_PIPE_COMPLETE:
+ str2 = "PIPE_COMPLETE"; break;
+ case SOF_IPC_TPLG_BUFFER_NEW:
+ str2 = "BUFFER_NEW"; break;
+ case SOF_IPC_TPLG_BUFFER_FREE:
+ str2 = "BUFFER_FREE"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_PM_MSG:
+ str = "GLB_PM_MSG";
+ switch (type) {
+ case SOF_IPC_PM_CTX_SAVE:
+ str2 = "CTX_SAVE"; break;
+ case SOF_IPC_PM_CTX_RESTORE:
+ str2 = "CTX_RESTORE"; break;
+ case SOF_IPC_PM_CTX_SIZE:
+ str2 = "CTX_SIZE"; break;
+ case SOF_IPC_PM_CLK_SET:
+ str2 = "CLK_SET"; break;
+ case SOF_IPC_PM_CLK_GET:
+ str2 = "CLK_GET"; break;
+ case SOF_IPC_PM_CLK_REQ:
+ str2 = "CLK_REQ"; break;
+ case SOF_IPC_PM_CORE_ENABLE:
+ str2 = "CORE_ENABLE"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_COMP_MSG:
+ str = "GLB_COMP_MSG";
+ switch (type) {
+ case SOF_IPC_COMP_SET_VALUE:
+ str2 = "SET_VALUE"; break;
+ case SOF_IPC_COMP_GET_VALUE:
+ str2 = "GET_VALUE"; break;
+ case SOF_IPC_COMP_SET_DATA:
+ str2 = "SET_DATA"; break;
+ case SOF_IPC_COMP_GET_DATA:
+ str2 = "GET_DATA"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_STREAM_MSG:
+ str = "GLB_STREAM_MSG";
+ switch (type) {
+ case SOF_IPC_STREAM_PCM_PARAMS:
+ str2 = "PCM_PARAMS"; break;
+ case SOF_IPC_STREAM_PCM_PARAMS_REPLY:
+ str2 = "PCM_REPLY"; break;
+ case SOF_IPC_STREAM_PCM_FREE:
+ str2 = "PCM_FREE"; break;
+ case SOF_IPC_STREAM_TRIG_START:
+ str2 = "TRIG_START"; break;
+ case SOF_IPC_STREAM_TRIG_STOP:
+ str2 = "TRIG_STOP"; break;
+ case SOF_IPC_STREAM_TRIG_PAUSE:
+ str2 = "TRIG_PAUSE"; break;
+ case SOF_IPC_STREAM_TRIG_RELEASE:
+ str2 = "TRIG_RELEASE"; break;
+ case SOF_IPC_STREAM_TRIG_DRAIN:
+ str2 = "TRIG_DRAIN"; break;
+ case SOF_IPC_STREAM_TRIG_XRUN:
+ str2 = "TRIG_XRUN"; break;
+ case SOF_IPC_STREAM_POSITION:
+ vdbg = true;
+ str2 = "POSITION"; break;
+ case SOF_IPC_STREAM_VORBIS_PARAMS:
+ str2 = "VORBIS_PARAMS"; break;
+ case SOF_IPC_STREAM_VORBIS_FREE:
+ str2 = "VORBIS_FREE"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_FW_READY:
+ str = "FW_READY"; break;
+ case SOF_IPC_GLB_DAI_MSG:
+ str = "GLB_DAI_MSG";
+ switch (type) {
+ case SOF_IPC_DAI_CONFIG:
+ str2 = "CONFIG"; break;
+ case SOF_IPC_DAI_LOOPBACK:
+ str2 = "LOOPBACK"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_TRACE_MSG:
+ str = "GLB_TRACE_MSG"; break;
+ case SOF_IPC_GLB_TEST_MSG:
+ str = "GLB_TEST_MSG";
+ switch (type) {
+ case SOF_IPC_TEST_IPC_FLOOD:
+ str2 = "IPC_FLOOD"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ default:
+ str = "unknown GLB command"; break;
+ }
+
+ if (str2) {
+ if (vdbg)
+ dev_vdbg(dev, "%s: 0x%x: %s: %s\n", text, cmd, str, str2);
+ else
+ dev_dbg(dev, "%s: 0x%x: %s: %s\n", text, cmd, str, str2);
+ } else {
+ dev_dbg(dev, "%s: 0x%x: %s\n", text, cmd, str);
+ }
+}
+#else
+static inline void ipc_log_header(struct device *dev, u8 *text, u32 cmd)
+{
+ if ((cmd & SOF_GLB_TYPE_MASK) != SOF_IPC_GLB_TRACE_MSG)
+ dev_dbg(dev, "%s: 0x%x\n", text, cmd);
+}
+#endif
+
+/* wait for IPC message reply */
+static int tx_wait_done(struct snd_sof_ipc *ipc, struct snd_sof_ipc_msg *msg,
+ void *reply_data)
+{
+ struct snd_sof_dev *sdev = ipc->sdev;
+ struct sof_ipc_cmd_hdr *hdr = msg->msg_data;
+ int ret;
+
+ /* wait for DSP IPC completion */
+ ret = wait_event_timeout(msg->waitq, msg->ipc_complete,
+ msecs_to_jiffies(sdev->ipc_timeout));
+
+ if (ret == 0) {
+ dev_err(sdev->dev, "error: ipc timed out for 0x%x size %d\n",
+ hdr->cmd, hdr->size);
+ snd_sof_handle_fw_exception(ipc->sdev);
+ ret = -ETIMEDOUT;
+ } else {
+ ret = msg->reply_error;
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: ipc error for 0x%x size %zu\n",
+ hdr->cmd, msg->reply_size);
+ } else {
+ ipc_log_header(sdev->dev, "ipc tx succeeded", hdr->cmd);
+ if (msg->reply_size)
+ /* copy the data returned from DSP */
+ memcpy(reply_data, msg->reply_data,
+ msg->reply_size);
+ }
+ }
+
+ return ret;
+}
+
+/* send IPC message from host to DSP */
+static int sof_ipc_tx_message_unlocked(struct snd_sof_ipc *ipc, u32 header,
+ void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes)
+{
+ struct snd_sof_dev *sdev = ipc->sdev;
+ struct snd_sof_ipc_msg *msg;
+ int ret;
+
+ if (ipc->disable_ipc_tx)
+ return -ENODEV;
+
+ /*
+ * The spin-lock is also still needed to protect message objects against
+ * other atomic contexts.
+ */
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /* initialise the message */
+ msg = &ipc->msg;
+
+ msg->header = header;
+ msg->msg_size = msg_bytes;
+ msg->reply_size = reply_bytes;
+ msg->reply_error = 0;
+
+ /* attach any data */
+ if (msg_bytes)
+ memcpy(msg->msg_data, msg_data, msg_bytes);
+
+ sdev->msg = msg;
+
+ ret = snd_sof_dsp_send_msg(sdev, msg);
+ /* Next reply that we receive will be related to this message */
+ if (!ret)
+ msg->ipc_complete = false;
+
+ spin_unlock_irq(&sdev->ipc_lock);
+
+ if (ret < 0) {
+ dev_err_ratelimited(sdev->dev,
+ "error: ipc tx failed with error %d\n",
+ ret);
+ return ret;
+ }
+
+ ipc_log_header(sdev->dev, "ipc tx", msg->header);
+
+ /* now wait for completion */
+ if (!ret)
+ ret = tx_wait_done(ipc, msg, reply_data);
+
+ return ret;
+}
+
+/* send IPC message from host to DSP */
+int sof_ipc_tx_message(struct snd_sof_ipc *ipc, u32 header,
+ void *msg_data, size_t msg_bytes, void *reply_data,
+ size_t reply_bytes)
+{
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+ int ret;
+
+ /* ensure the DSP is in D0 before sending a new IPC */
+ ret = snd_sof_dsp_set_power_state(ipc->sdev, &target_state);
+ if (ret < 0) {
+ dev_err(ipc->sdev->dev, "error: resuming DSP %d\n", ret);
+ return ret;
+ }
+
+ return sof_ipc_tx_message_no_pm(ipc, header, msg_data, msg_bytes,
+ reply_data, reply_bytes);
+}
+EXPORT_SYMBOL(sof_ipc_tx_message);
+
+/*
+ * send IPC message from host to DSP without modifying the DSP state.
+ * This will be used for IPC's that can be handled by the DSP
+ * even in a low-power D0 substate.
+ */
+int sof_ipc_tx_message_no_pm(struct snd_sof_ipc *ipc, u32 header,
+ void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes)
+{
+ int ret;
+
+ if (msg_bytes > SOF_IPC_MSG_MAX_SIZE ||
+ reply_bytes > SOF_IPC_MSG_MAX_SIZE)
+ return -ENOBUFS;
+
+ /* Serialise IPC TX */
+ mutex_lock(&ipc->tx_mutex);
+
+ ret = sof_ipc_tx_message_unlocked(ipc, header, msg_data, msg_bytes,
+ reply_data, reply_bytes);
+
+ mutex_unlock(&ipc->tx_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(sof_ipc_tx_message_no_pm);
+
+/* handle reply message from DSP */
+void snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct snd_sof_ipc_msg *msg = &sdev->ipc->msg;
+
+ if (msg->ipc_complete) {
+ dev_dbg(sdev->dev,
+ "no reply expected, received 0x%x, will be ignored",
+ msg_id);
+ return;
+ }
+
+ /* wake up and return the error if we have waiters on this message ? */
+ msg->ipc_complete = true;
+ wake_up(&msg->waitq);
+}
+EXPORT_SYMBOL(snd_sof_ipc_reply);
+
+/* DSP firmware has sent host a message */
+void snd_sof_ipc_msgs_rx(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_cmd_hdr hdr;
+ u32 cmd, type;
+ int err = 0;
+
+ /* read back header */
+ snd_sof_ipc_msg_data(sdev, NULL, &hdr, sizeof(hdr));
+ ipc_log_header(sdev->dev, "ipc rx", hdr.cmd);
+
+ cmd = hdr.cmd & SOF_GLB_TYPE_MASK;
+ type = hdr.cmd & SOF_CMD_TYPE_MASK;
+
+ /* check message type */
+ switch (cmd) {
+ case SOF_IPC_GLB_REPLY:
+ dev_err(sdev->dev, "error: ipc reply unknown\n");
+ break;
+ case SOF_IPC_FW_READY:
+ /* check for FW boot completion */
+ if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS) {
+ err = sof_ops(sdev)->fw_ready(sdev, cmd);
+ if (err < 0)
+ sdev->fw_state = SOF_FW_BOOT_READY_FAILED;
+ else
+ sdev->fw_state = SOF_FW_BOOT_COMPLETE;
+
+ /* wake up firmware loader */
+ wake_up(&sdev->boot_wait);
+ }
+ break;
+ case SOF_IPC_GLB_COMPOUND:
+ case SOF_IPC_GLB_TPLG_MSG:
+ case SOF_IPC_GLB_PM_MSG:
+ case SOF_IPC_GLB_COMP_MSG:
+ break;
+ case SOF_IPC_GLB_STREAM_MSG:
+ /* need to pass msg id into the function */
+ ipc_stream_message(sdev, hdr.cmd);
+ break;
+ case SOF_IPC_GLB_TRACE_MSG:
+ ipc_trace_message(sdev, type);
+ break;
+ default:
+ dev_err(sdev->dev, "error: unknown DSP message 0x%x\n", cmd);
+ break;
+ }
+
+ ipc_log_header(sdev->dev, "ipc rx done", hdr.cmd);
+}
+EXPORT_SYMBOL(snd_sof_ipc_msgs_rx);
+
+/*
+ * IPC trace mechanism.
+ */
+
+static void ipc_trace_message(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct sof_ipc_dma_trace_posn posn;
+
+ switch (msg_id) {
+ case SOF_IPC_TRACE_DMA_POSITION:
+ /* read back full message */
+ snd_sof_ipc_msg_data(sdev, NULL, &posn, sizeof(posn));
+ snd_sof_trace_update_pos(sdev, &posn);
+ break;
+ default:
+ dev_err(sdev->dev, "error: unhandled trace message %x\n",
+ msg_id);
+ break;
+ }
+}
+
+/*
+ * IPC stream position.
+ */
+
+static void ipc_period_elapsed(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct snd_soc_component *scomp = sdev->component;
+ struct snd_sof_pcm_stream *stream;
+ struct sof_ipc_stream_posn posn;
+ struct snd_sof_pcm *spcm;
+ int direction;
+
+ spcm = snd_sof_find_spcm_comp(scomp, msg_id, &direction);
+ if (!spcm) {
+ dev_err(sdev->dev,
+ "error: period elapsed for unknown stream, msg_id %d\n",
+ msg_id);
+ return;
+ }
+
+ stream = &spcm->stream[direction];
+ snd_sof_ipc_msg_data(sdev, stream->substream, &posn, sizeof(posn));
+
+ dev_vdbg(sdev->dev, "posn : host 0x%llx dai 0x%llx wall 0x%llx\n",
+ posn.host_posn, posn.dai_posn, posn.wallclock);
+
+ memcpy(&stream->posn, &posn, sizeof(posn));
+
+ /* only inform ALSA for period_wakeup mode */
+ if (!stream->substream->runtime->no_period_wakeup)
+ snd_sof_pcm_period_elapsed(stream->substream);
+}
+
+/* DSP notifies host of an XRUN within FW */
+static void ipc_xrun(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct snd_soc_component *scomp = sdev->component;
+ struct snd_sof_pcm_stream *stream;
+ struct sof_ipc_stream_posn posn;
+ struct snd_sof_pcm *spcm;
+ int direction;
+
+ spcm = snd_sof_find_spcm_comp(scomp, msg_id, &direction);
+ if (!spcm) {
+ dev_err(sdev->dev, "error: XRUN for unknown stream, msg_id %d\n",
+ msg_id);
+ return;
+ }
+
+ stream = &spcm->stream[direction];
+ snd_sof_ipc_msg_data(sdev, stream->substream, &posn, sizeof(posn));
+
+ dev_dbg(sdev->dev, "posn XRUN: host %llx comp %d size %d\n",
+ posn.host_posn, posn.xrun_comp_id, posn.xrun_size);
+
+#if defined(CONFIG_SND_SOC_SOF_DEBUG_XRUN_STOP)
+ /* stop PCM on XRUN - used for pipeline debug */
+ memcpy(&stream->posn, &posn, sizeof(posn));
+ snd_pcm_stop_xrun(stream->substream);
+#endif
+}
+
+/* stream notifications from DSP FW */
+static void ipc_stream_message(struct snd_sof_dev *sdev, u32 msg_cmd)
+{
+ /* get msg cmd type and msd id */
+ u32 msg_type = msg_cmd & SOF_CMD_TYPE_MASK;
+ u32 msg_id = SOF_IPC_MESSAGE_ID(msg_cmd);
+
+ switch (msg_type) {
+ case SOF_IPC_STREAM_POSITION:
+ ipc_period_elapsed(sdev, msg_id);
+ break;
+ case SOF_IPC_STREAM_TRIG_XRUN:
+ ipc_xrun(sdev, msg_id);
+ break;
+ default:
+ dev_err(sdev->dev, "error: unhandled stream message %x\n",
+ msg_id);
+ break;
+ }
+}
+
+/* get stream position IPC - use faster MMIO method if available on platform */
+int snd_sof_ipc_stream_posn(struct snd_soc_component *scomp,
+ struct snd_sof_pcm *spcm, int direction,
+ struct sof_ipc_stream_posn *posn)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_stream stream;
+ int err;
+
+ /* read position via slower IPC */
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_POSITION;
+ stream.comp_id = spcm->stream[direction].comp_id;
+
+ /* send IPC to the DSP */
+ err = sof_ipc_tx_message(sdev->ipc,
+ stream.hdr.cmd, &stream, sizeof(stream), posn,
+ sizeof(*posn));
+ if (err < 0) {
+ dev_err(sdev->dev, "error: failed to get stream %d position\n",
+ stream.comp_id);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_ipc_stream_posn);
+
+static int sof_get_ctrl_copy_params(enum sof_ipc_ctrl_type ctrl_type,
+ struct sof_ipc_ctrl_data *src,
+ struct sof_ipc_ctrl_data *dst,
+ struct sof_ipc_ctrl_data_params *sparams)
+{
+ switch (ctrl_type) {
+ case SOF_CTRL_TYPE_VALUE_CHAN_GET:
+ case SOF_CTRL_TYPE_VALUE_CHAN_SET:
+ sparams->src = (u8 *)src->chanv;
+ sparams->dst = (u8 *)dst->chanv;
+ break;
+ case SOF_CTRL_TYPE_VALUE_COMP_GET:
+ case SOF_CTRL_TYPE_VALUE_COMP_SET:
+ sparams->src = (u8 *)src->compv;
+ sparams->dst = (u8 *)dst->compv;
+ break;
+ case SOF_CTRL_TYPE_DATA_GET:
+ case SOF_CTRL_TYPE_DATA_SET:
+ sparams->src = (u8 *)src->data->data;
+ sparams->dst = (u8 *)dst->data->data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* calculate payload size and number of messages */
+ sparams->pl_size = SOF_IPC_MSG_MAX_SIZE - sparams->hdr_bytes;
+ sparams->num_msg = DIV_ROUND_UP(sparams->msg_bytes, sparams->pl_size);
+
+ return 0;
+}
+
+static int sof_set_get_large_ctrl_data(struct snd_sof_dev *sdev,
+ struct sof_ipc_ctrl_data *cdata,
+ struct sof_ipc_ctrl_data_params *sparams,
+ bool send)
+{
+ struct sof_ipc_ctrl_data *partdata;
+ size_t send_bytes;
+ size_t offset = 0;
+ size_t msg_bytes;
+ size_t pl_size;
+ int err;
+ int i;
+
+ /* allocate max ipc size because we have at least one */
+ partdata = kzalloc(SOF_IPC_MSG_MAX_SIZE, GFP_KERNEL);
+ if (!partdata)
+ return -ENOMEM;
+
+ if (send)
+ err = sof_get_ctrl_copy_params(cdata->type, cdata, partdata,
+ sparams);
+ else
+ err = sof_get_ctrl_copy_params(cdata->type, partdata, cdata,
+ sparams);
+ if (err < 0) {
+ kfree(partdata);
+ return err;
+ }
+
+ msg_bytes = sparams->msg_bytes;
+ pl_size = sparams->pl_size;
+
+ /* copy the header data */
+ memcpy(partdata, cdata, sparams->hdr_bytes);
+
+ /* Serialise IPC TX */
+ mutex_lock(&sdev->ipc->tx_mutex);
+
+ /* copy the payload data in a loop */
+ for (i = 0; i < sparams->num_msg; i++) {
+ send_bytes = min(msg_bytes, pl_size);
+ partdata->num_elems = send_bytes;
+ partdata->rhdr.hdr.size = sparams->hdr_bytes + send_bytes;
+ partdata->msg_index = i;
+ msg_bytes -= send_bytes;
+ partdata->elems_remaining = msg_bytes;
+
+ if (send)
+ memcpy(sparams->dst, sparams->src + offset, send_bytes);
+
+ err = sof_ipc_tx_message_unlocked(sdev->ipc,
+ partdata->rhdr.hdr.cmd,
+ partdata,
+ partdata->rhdr.hdr.size,
+ partdata,
+ partdata->rhdr.hdr.size);
+ if (err < 0)
+ break;
+
+ if (!send)
+ memcpy(sparams->dst + offset, sparams->src, send_bytes);
+
+ offset += pl_size;
+ }
+
+ mutex_unlock(&sdev->ipc->tx_mutex);
+
+ kfree(partdata);
+ return err;
+}
+
+/*
+ * IPC get()/set() for kcontrols.
+ */
+int snd_sof_ipc_set_get_comp_data(struct snd_sof_control *scontrol,
+ u32 ipc_cmd,
+ enum sof_ipc_ctrl_type ctrl_type,
+ enum sof_ipc_ctrl_cmd ctrl_cmd,
+ bool send)
+{
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
+ struct sof_ipc_ctrl_data_params sparams;
+ size_t send_bytes;
+ int err;
+
+ /* read or write firmware volume */
+ if (scontrol->readback_offset != 0) {
+ /* write/read value header via mmaped region */
+ send_bytes = sizeof(struct sof_ipc_ctrl_value_chan) *
+ cdata->num_elems;
+ if (send)
+ snd_sof_dsp_block_write(sdev, sdev->mmio_bar,
+ scontrol->readback_offset,
+ cdata->chanv, send_bytes);
+
+ else
+ snd_sof_dsp_block_read(sdev, sdev->mmio_bar,
+ scontrol->readback_offset,
+ cdata->chanv, send_bytes);
+ return 0;
+ }
+
+ cdata->rhdr.hdr.cmd = SOF_IPC_GLB_COMP_MSG | ipc_cmd;
+ cdata->cmd = ctrl_cmd;
+ cdata->type = ctrl_type;
+ cdata->comp_id = scontrol->comp_id;
+ cdata->msg_index = 0;
+
+ /* calculate header and data size */
+ switch (cdata->type) {
+ case SOF_CTRL_TYPE_VALUE_CHAN_GET:
+ case SOF_CTRL_TYPE_VALUE_CHAN_SET:
+ sparams.msg_bytes = scontrol->num_channels *
+ sizeof(struct sof_ipc_ctrl_value_chan);
+ sparams.hdr_bytes = sizeof(struct sof_ipc_ctrl_data);
+ sparams.elems = scontrol->num_channels;
+ break;
+ case SOF_CTRL_TYPE_VALUE_COMP_GET:
+ case SOF_CTRL_TYPE_VALUE_COMP_SET:
+ sparams.msg_bytes = scontrol->num_channels *
+ sizeof(struct sof_ipc_ctrl_value_comp);
+ sparams.hdr_bytes = sizeof(struct sof_ipc_ctrl_data);
+ sparams.elems = scontrol->num_channels;
+ break;
+ case SOF_CTRL_TYPE_DATA_GET:
+ case SOF_CTRL_TYPE_DATA_SET:
+ sparams.msg_bytes = cdata->data->size;
+ sparams.hdr_bytes = sizeof(struct sof_ipc_ctrl_data) +
+ sizeof(struct sof_abi_hdr);
+ sparams.elems = cdata->data->size;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cdata->rhdr.hdr.size = sparams.msg_bytes + sparams.hdr_bytes;
+ cdata->num_elems = sparams.elems;
+ cdata->elems_remaining = 0;
+
+ /* send normal size ipc in one part */
+ if (cdata->rhdr.hdr.size <= SOF_IPC_MSG_MAX_SIZE) {
+ err = sof_ipc_tx_message(sdev->ipc, cdata->rhdr.hdr.cmd, cdata,
+ cdata->rhdr.hdr.size, cdata,
+ cdata->rhdr.hdr.size);
+
+ if (err < 0)
+ dev_err(sdev->dev, "error: set/get ctrl ipc comp %d\n",
+ cdata->comp_id);
+
+ return err;
+ }
+
+ /* data is bigger than max ipc size, chop into smaller pieces */
+ dev_dbg(sdev->dev, "large ipc size %u, control size %u\n",
+ cdata->rhdr.hdr.size, scontrol->size);
+
+ /* large messages is only supported from ABI 3.3.0 onwards */
+ if (v->abi_version < SOF_ABI_VER(3, 3, 0)) {
+ dev_err(sdev->dev, "error: incompatible FW ABI version\n");
+ return -EINVAL;
+ }
+
+ err = sof_set_get_large_ctrl_data(sdev, cdata, &sparams, send);
+
+ if (err < 0)
+ dev_err(sdev->dev, "error: set/get large ctrl ipc comp %d\n",
+ cdata->comp_id);
+
+ return err;
+}
+EXPORT_SYMBOL(snd_sof_ipc_set_get_comp_data);
+
+/*
+ * IPC layer enumeration.
+ */
+
+int snd_sof_dsp_mailbox_init(struct snd_sof_dev *sdev, u32 dspbox,
+ size_t dspbox_size, u32 hostbox,
+ size_t hostbox_size)
+{
+ sdev->dsp_box.offset = dspbox;
+ sdev->dsp_box.size = dspbox_size;
+ sdev->host_box.offset = hostbox;
+ sdev->host_box.size = hostbox_size;
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_dsp_mailbox_init);
+
+int snd_sof_ipc_valid(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
+
+ dev_info(sdev->dev,
+ "Firmware info: version %d:%d:%d-%s\n", v->major, v->minor,
+ v->micro, v->tag);
+ dev_info(sdev->dev,
+ "Firmware: ABI %d:%d:%d Kernel ABI %d:%d:%d\n",
+ SOF_ABI_VERSION_MAJOR(v->abi_version),
+ SOF_ABI_VERSION_MINOR(v->abi_version),
+ SOF_ABI_VERSION_PATCH(v->abi_version),
+ SOF_ABI_MAJOR, SOF_ABI_MINOR, SOF_ABI_PATCH);
+
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION, v->abi_version)) {
+ dev_err(sdev->dev, "error: incompatible FW ABI version\n");
+ return -EINVAL;
+ }
+
+ if (v->abi_version > SOF_ABI_VERSION) {
+ if (!IS_ENABLED(CONFIG_SND_SOC_SOF_STRICT_ABI_CHECKS)) {
+ dev_warn(sdev->dev, "warn: FW ABI is more recent than kernel\n");
+ } else {
+ dev_err(sdev->dev, "error: FW ABI is more recent than kernel\n");
+ return -EINVAL;
+ }
+ }
+
+ if (ready->flags & SOF_IPC_INFO_BUILD) {
+ dev_info(sdev->dev,
+ "Firmware debug build %d on %s-%s - options:\n"
+ " GDB: %s\n"
+ " lock debug: %s\n"
+ " lock vdebug: %s\n",
+ v->build, v->date, v->time,
+ (ready->flags & SOF_IPC_INFO_GDB) ?
+ "enabled" : "disabled",
+ (ready->flags & SOF_IPC_INFO_LOCKS) ?
+ "enabled" : "disabled",
+ (ready->flags & SOF_IPC_INFO_LOCKSV) ?
+ "enabled" : "disabled");
+ }
+
+ /* copy the fw_version into debugfs at first boot */
+ memcpy(&sdev->fw_version, v, sizeof(*v));
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_ipc_valid);
+
+struct snd_sof_ipc *snd_sof_ipc_init(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc *ipc;
+ struct snd_sof_ipc_msg *msg;
+
+ ipc = devm_kzalloc(sdev->dev, sizeof(*ipc), GFP_KERNEL);
+ if (!ipc)
+ return NULL;
+
+ mutex_init(&ipc->tx_mutex);
+ ipc->sdev = sdev;
+ msg = &ipc->msg;
+
+ /* indicate that we aren't sending a message ATM */
+ msg->ipc_complete = true;
+
+ /* pre-allocate message data */
+ msg->msg_data = devm_kzalloc(sdev->dev, SOF_IPC_MSG_MAX_SIZE,
+ GFP_KERNEL);
+ if (!msg->msg_data)
+ return NULL;
+
+ msg->reply_data = devm_kzalloc(sdev->dev, SOF_IPC_MSG_MAX_SIZE,
+ GFP_KERNEL);
+ if (!msg->reply_data)
+ return NULL;
+
+ init_waitqueue_head(&msg->waitq);
+
+ return ipc;
+}
+EXPORT_SYMBOL(snd_sof_ipc_init);
+
+void snd_sof_ipc_free(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc *ipc = sdev->ipc;
+
+ if (!ipc)
+ return;
+
+ /* disable sending of ipc's */
+ mutex_lock(&ipc->tx_mutex);
+ ipc->disable_ipc_tx = true;
+ mutex_unlock(&ipc->tx_mutex);
+}
+EXPORT_SYMBOL(snd_sof_ipc_free);
diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
new file mode 100644
index 000000000..2d5c3fc93
--- /dev/null
+++ b/sound/soc/sof/loader.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+// Generic firmware loader.
+//
+
+#include <linux/firmware.h>
+#include <sound/sof.h>
+#include <sound/sof/ext_manifest.h>
+#include "ops.h"
+
+static int get_ext_windows(struct snd_sof_dev *sdev,
+ const struct sof_ipc_ext_data_hdr *ext_hdr)
+{
+ const struct sof_ipc_window *w =
+ container_of(ext_hdr, struct sof_ipc_window, ext_hdr);
+
+ if (w->num_windows == 0 || w->num_windows > SOF_IPC_MAX_ELEMS)
+ return -EINVAL;
+
+ if (sdev->info_window) {
+ if (memcmp(sdev->info_window, w, ext_hdr->hdr.size)) {
+ dev_err(sdev->dev, "error: mismatch between window descriptor from extended manifest and mailbox");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* keep a local copy of the data */
+ sdev->info_window = devm_kmemdup(sdev->dev, w, ext_hdr->hdr.size,
+ GFP_KERNEL);
+ if (!sdev->info_window)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int get_cc_info(struct snd_sof_dev *sdev,
+ const struct sof_ipc_ext_data_hdr *ext_hdr)
+{
+ int ret;
+
+ const struct sof_ipc_cc_version *cc =
+ container_of(ext_hdr, struct sof_ipc_cc_version, ext_hdr);
+
+ if (sdev->cc_version) {
+ if (memcmp(sdev->cc_version, cc, cc->ext_hdr.hdr.size)) {
+ dev_err(sdev->dev, "error: receive diverged cc_version descriptions");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ dev_dbg(sdev->dev, "Firmware info: used compiler %s %d:%d:%d%s used optimization flags %s\n",
+ cc->name, cc->major, cc->minor, cc->micro, cc->desc,
+ cc->optim);
+
+ /* create read-only cc_version debugfs to store compiler version info */
+ /* use local copy of the cc_version to prevent data corruption */
+ if (sdev->first_boot) {
+ sdev->cc_version = devm_kmalloc(sdev->dev, cc->ext_hdr.hdr.size,
+ GFP_KERNEL);
+
+ if (!sdev->cc_version)
+ return -ENOMEM;
+
+ memcpy(sdev->cc_version, cc, cc->ext_hdr.hdr.size);
+ ret = snd_sof_debugfs_buf_item(sdev, sdev->cc_version,
+ cc->ext_hdr.hdr.size,
+ "cc_version", 0444);
+
+ /* errors are only due to memory allocation, not debugfs */
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: snd_sof_debugfs_buf_item failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* parse the extended FW boot data structures from FW boot message */
+int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset)
+{
+ struct sof_ipc_ext_data_hdr *ext_hdr;
+ void *ext_data;
+ int ret = 0;
+
+ ext_data = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!ext_data)
+ return -ENOMEM;
+
+ /* get first header */
+ snd_sof_dsp_block_read(sdev, bar, offset, ext_data,
+ sizeof(*ext_hdr));
+ ext_hdr = ext_data;
+
+ while (ext_hdr->hdr.cmd == SOF_IPC_FW_READY) {
+ /* read in ext structure */
+ snd_sof_dsp_block_read(sdev, bar, offset + sizeof(*ext_hdr),
+ (void *)((u8 *)ext_data + sizeof(*ext_hdr)),
+ ext_hdr->hdr.size - sizeof(*ext_hdr));
+
+ dev_dbg(sdev->dev, "found ext header type %d size 0x%x\n",
+ ext_hdr->type, ext_hdr->hdr.size);
+
+ /* process structure data */
+ switch (ext_hdr->type) {
+ case SOF_IPC_EXT_WINDOW:
+ ret = get_ext_windows(sdev, ext_hdr);
+ break;
+ case SOF_IPC_EXT_CC_INFO:
+ ret = get_cc_info(sdev, ext_hdr);
+ break;
+ case SOF_IPC_EXT_UNUSED:
+ case SOF_IPC_EXT_PROBE_INFO:
+ case SOF_IPC_EXT_USER_ABI_INFO:
+ /* They are supported but we don't do anything here */
+ break;
+ default:
+ dev_warn(sdev->dev, "warning: unknown ext header type %d size 0x%x\n",
+ ext_hdr->type, ext_hdr->hdr.size);
+ ret = 0;
+ break;
+ }
+
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to parse ext data type %d\n",
+ ext_hdr->type);
+ break;
+ }
+
+ /* move to next header */
+ offset += ext_hdr->hdr.size;
+ snd_sof_dsp_block_read(sdev, bar, offset, ext_data,
+ sizeof(*ext_hdr));
+ ext_hdr = ext_data;
+ }
+
+ kfree(ext_data);
+ return ret;
+}
+EXPORT_SYMBOL(snd_sof_fw_parse_ext_data);
+
+static int ext_man_get_fw_version(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct sof_ext_man_fw_version *v =
+ container_of(hdr, struct sof_ext_man_fw_version, hdr);
+
+ memcpy(&sdev->fw_ready.version, &v->version, sizeof(v->version));
+ sdev->fw_ready.flags = v->flags;
+
+ /* log ABI versions and check FW compatibility */
+ return snd_sof_ipc_valid(sdev);
+}
+
+static int ext_man_get_windows(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct sof_ext_man_window *w;
+
+ w = container_of(hdr, struct sof_ext_man_window, hdr);
+
+ return get_ext_windows(sdev, &w->ipc_window.ext_hdr);
+}
+
+static int ext_man_get_cc_info(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct sof_ext_man_cc_version *cc;
+
+ cc = container_of(hdr, struct sof_ext_man_cc_version, hdr);
+
+ return get_cc_info(sdev, &cc->cc_version.ext_hdr);
+}
+
+static int ext_man_get_dbg_abi_info(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct ext_man_dbg_abi *dbg_abi =
+ container_of(hdr, struct ext_man_dbg_abi, hdr);
+
+ if (sdev->first_boot)
+ dev_dbg(sdev->dev,
+ "Firmware: DBG_ABI %d:%d:%d\n",
+ SOF_ABI_VERSION_MAJOR(dbg_abi->dbg_abi.abi_dbg_version),
+ SOF_ABI_VERSION_MINOR(dbg_abi->dbg_abi.abi_dbg_version),
+ SOF_ABI_VERSION_PATCH(dbg_abi->dbg_abi.abi_dbg_version));
+
+ return 0;
+}
+
+static ssize_t snd_sof_ext_man_size(const struct firmware *fw)
+{
+ const struct sof_ext_man_header *head;
+
+ head = (struct sof_ext_man_header *)fw->data;
+
+ /*
+ * assert fw size is big enough to contain extended manifest header,
+ * it prevents from reading unallocated memory from `head` in following
+ * step.
+ */
+ if (fw->size < sizeof(*head))
+ return -EINVAL;
+
+ /*
+ * When fw points to extended manifest,
+ * then first u32 must be equal SOF_EXT_MAN_MAGIC_NUMBER.
+ */
+ if (head->magic == SOF_EXT_MAN_MAGIC_NUMBER)
+ return head->full_size;
+
+ /* otherwise given fw don't have an extended manifest */
+ return 0;
+}
+
+/* parse extended FW manifest data structures */
+static int snd_sof_fw_ext_man_parse(struct snd_sof_dev *sdev,
+ const struct firmware *fw)
+{
+ const struct sof_ext_man_elem_header *elem_hdr;
+ const struct sof_ext_man_header *head;
+ ssize_t ext_man_size;
+ ssize_t remaining;
+ uintptr_t iptr;
+ int ret = 0;
+
+ head = (struct sof_ext_man_header *)fw->data;
+ remaining = head->full_size - head->header_size;
+ ext_man_size = snd_sof_ext_man_size(fw);
+
+ /* Assert firmware starts with extended manifest */
+ if (ext_man_size <= 0)
+ return ext_man_size;
+
+ /* incompatible version */
+ if (SOF_EXT_MAN_VERSION_INCOMPATIBLE(SOF_EXT_MAN_VERSION,
+ head->header_version)) {
+ dev_err(sdev->dev, "error: extended manifest version 0x%X differ from used 0x%X\n",
+ head->header_version, SOF_EXT_MAN_VERSION);
+ return -EINVAL;
+ }
+
+ /* get first extended manifest element header */
+ iptr = (uintptr_t)fw->data + head->header_size;
+
+ while (remaining > sizeof(*elem_hdr)) {
+ elem_hdr = (struct sof_ext_man_elem_header *)iptr;
+
+ dev_dbg(sdev->dev, "found sof_ext_man header type %d size 0x%X\n",
+ elem_hdr->type, elem_hdr->size);
+
+ if (elem_hdr->size < sizeof(*elem_hdr) ||
+ elem_hdr->size > remaining) {
+ dev_err(sdev->dev, "error: invalid sof_ext_man header size, type %d size 0x%X\n",
+ elem_hdr->type, elem_hdr->size);
+ return -EINVAL;
+ }
+
+ /* process structure data */
+ switch (elem_hdr->type) {
+ case SOF_EXT_MAN_ELEM_FW_VERSION:
+ ret = ext_man_get_fw_version(sdev, elem_hdr);
+ break;
+ case SOF_EXT_MAN_ELEM_WINDOW:
+ ret = ext_man_get_windows(sdev, elem_hdr);
+ break;
+ case SOF_EXT_MAN_ELEM_CC_VERSION:
+ ret = ext_man_get_cc_info(sdev, elem_hdr);
+ break;
+ case SOF_EXT_MAN_ELEM_DBG_ABI:
+ ret = ext_man_get_dbg_abi_info(sdev, elem_hdr);
+ break;
+ default:
+ dev_warn(sdev->dev, "warning: unknown sof_ext_man header type %d size 0x%X\n",
+ elem_hdr->type, elem_hdr->size);
+ break;
+ }
+
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to parse sof_ext_man header type %d size 0x%X\n",
+ elem_hdr->type, elem_hdr->size);
+ return ret;
+ }
+
+ remaining -= elem_hdr->size;
+ iptr += elem_hdr->size;
+ }
+
+ if (remaining) {
+ dev_err(sdev->dev, "error: sof_ext_man header is inconsistent\n");
+ return -EINVAL;
+ }
+
+ return ext_man_size;
+}
+
+/*
+ * IPC Firmware ready.
+ */
+static void sof_get_windows(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_window_elem *elem;
+ u32 outbox_offset = 0;
+ u32 stream_offset = 0;
+ u32 inbox_offset = 0;
+ u32 outbox_size = 0;
+ u32 stream_size = 0;
+ u32 inbox_size = 0;
+ u32 debug_size = 0;
+ u32 debug_offset = 0;
+ int window_offset;
+ int bar;
+ int i;
+
+ if (!sdev->info_window) {
+ dev_err(sdev->dev, "error: have no window info\n");
+ return;
+ }
+
+ bar = snd_sof_dsp_get_bar_index(sdev, SOF_FW_BLK_TYPE_SRAM);
+ if (bar < 0) {
+ dev_err(sdev->dev, "error: have no bar mapping\n");
+ return;
+ }
+
+ for (i = 0; i < sdev->info_window->num_windows; i++) {
+ elem = &sdev->info_window->window[i];
+
+ window_offset = snd_sof_dsp_get_window_offset(sdev, elem->id);
+ if (window_offset < 0) {
+ dev_warn(sdev->dev, "warn: no offset for window %d\n",
+ elem->id);
+ continue;
+ }
+
+ switch (elem->type) {
+ case SOF_IPC_REGION_UPBOX:
+ inbox_offset = window_offset + elem->offset;
+ inbox_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[bar] +
+ inbox_offset,
+ elem->size, "inbox",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_DOWNBOX:
+ outbox_offset = window_offset + elem->offset;
+ outbox_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[bar] +
+ outbox_offset,
+ elem->size, "outbox",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_TRACE:
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[bar] +
+ window_offset +
+ elem->offset,
+ elem->size, "etrace",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_DEBUG:
+ debug_offset = window_offset + elem->offset;
+ debug_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[bar] +
+ window_offset +
+ elem->offset,
+ elem->size, "debug",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_STREAM:
+ stream_offset = window_offset + elem->offset;
+ stream_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[bar] +
+ stream_offset,
+ elem->size, "stream",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_REGS:
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[bar] +
+ window_offset +
+ elem->offset,
+ elem->size, "regs",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_EXCEPTION:
+ sdev->dsp_oops_offset = window_offset + elem->offset;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[bar] +
+ window_offset +
+ elem->offset,
+ elem->size, "exception",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ default:
+ dev_err(sdev->dev, "error: get illegal window info\n");
+ return;
+ }
+ }
+
+ if (outbox_size == 0 || inbox_size == 0) {
+ dev_err(sdev->dev, "error: get illegal mailbox window\n");
+ return;
+ }
+
+ snd_sof_dsp_mailbox_init(sdev, inbox_offset, inbox_size,
+ outbox_offset, outbox_size);
+ sdev->stream_box.offset = stream_offset;
+ sdev->stream_box.size = stream_size;
+
+ sdev->debug_box.offset = debug_offset;
+ sdev->debug_box.size = debug_size;
+
+ dev_dbg(sdev->dev, " mailbox upstream 0x%x - size 0x%x\n",
+ inbox_offset, inbox_size);
+ dev_dbg(sdev->dev, " mailbox downstream 0x%x - size 0x%x\n",
+ outbox_offset, outbox_size);
+ dev_dbg(sdev->dev, " stream region 0x%x - size 0x%x\n",
+ stream_offset, stream_size);
+ dev_dbg(sdev->dev, " debug region 0x%x - size 0x%x\n",
+ debug_offset, debug_size);
+}
+
+/* check for ABI compatibility and create memory windows on first boot */
+int sof_fw_ready(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct sof_ipc_fw_ready *fw_ready = &sdev->fw_ready;
+ int offset;
+ int bar;
+ int ret;
+
+ /* mailbox must be on 4k boundary */
+ offset = snd_sof_dsp_get_mailbox_offset(sdev);
+ if (offset < 0) {
+ dev_err(sdev->dev, "error: have no mailbox offset\n");
+ return offset;
+ }
+
+ bar = snd_sof_dsp_get_bar_index(sdev, SOF_FW_BLK_TYPE_SRAM);
+ if (bar < 0) {
+ dev_err(sdev->dev, "error: have no bar mapping\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "ipc: DSP is ready 0x%8.8x offset 0x%x\n",
+ msg_id, offset);
+
+ /* no need to re-check version/ABI for subsequent boots */
+ if (!sdev->first_boot)
+ return 0;
+
+ /* copy data from the DSP FW ready offset */
+ sof_block_read(sdev, bar, offset, fw_ready, sizeof(*fw_ready));
+
+ /* make sure ABI version is compatible */
+ ret = snd_sof_ipc_valid(sdev);
+ if (ret < 0)
+ return ret;
+
+ /* now check for extended data */
+ snd_sof_fw_parse_ext_data(sdev, bar, offset +
+ sizeof(struct sof_ipc_fw_ready));
+
+ sof_get_windows(sdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_fw_ready);
+
+/* generic module parser for mmaped DSPs */
+int snd_sof_parse_module_memcpy(struct snd_sof_dev *sdev,
+ struct snd_sof_mod_hdr *module)
+{
+ struct snd_sof_blk_hdr *block;
+ int count, bar;
+ u32 offset;
+ size_t remaining;
+
+ dev_dbg(sdev->dev, "new module size 0x%x blocks 0x%x type 0x%x\n",
+ module->size, module->num_blocks, module->type);
+
+ block = (struct snd_sof_blk_hdr *)((u8 *)module + sizeof(*module));
+
+ /* module->size doesn't include header size */
+ remaining = module->size;
+ for (count = 0; count < module->num_blocks; count++) {
+ /* check for wrap */
+ if (remaining < sizeof(*block)) {
+ dev_err(sdev->dev, "error: not enough data remaining\n");
+ return -EINVAL;
+ }
+
+ /* minus header size of block */
+ remaining -= sizeof(*block);
+
+ if (block->size == 0) {
+ dev_warn(sdev->dev,
+ "warning: block %d size zero\n", count);
+ dev_warn(sdev->dev, " type 0x%x offset 0x%x\n",
+ block->type, block->offset);
+ continue;
+ }
+
+ switch (block->type) {
+ case SOF_FW_BLK_TYPE_RSRVD0:
+ case SOF_FW_BLK_TYPE_ROM...SOF_FW_BLK_TYPE_RSRVD14:
+ continue; /* not handled atm */
+ case SOF_FW_BLK_TYPE_IRAM:
+ case SOF_FW_BLK_TYPE_DRAM:
+ case SOF_FW_BLK_TYPE_SRAM:
+ offset = block->offset;
+ bar = snd_sof_dsp_get_bar_index(sdev, block->type);
+ if (bar < 0) {
+ dev_err(sdev->dev,
+ "error: no BAR mapping for block type 0x%x\n",
+ block->type);
+ return bar;
+ }
+ break;
+ default:
+ dev_err(sdev->dev, "error: bad type 0x%x for block 0x%x\n",
+ block->type, count);
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev,
+ "block %d type 0x%x size 0x%x ==> offset 0x%x\n",
+ count, block->type, block->size, offset);
+
+ /* checking block->size to avoid unaligned access */
+ if (block->size % sizeof(u32)) {
+ dev_err(sdev->dev, "error: invalid block size 0x%x\n",
+ block->size);
+ return -EINVAL;
+ }
+ snd_sof_dsp_block_write(sdev, bar, offset,
+ block + 1, block->size);
+
+ if (remaining < block->size) {
+ dev_err(sdev->dev, "error: not enough data remaining\n");
+ return -EINVAL;
+ }
+
+ /* minus body size of block */
+ remaining -= block->size;
+ /* next block */
+ block = (struct snd_sof_blk_hdr *)((u8 *)block + sizeof(*block)
+ + block->size);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_parse_module_memcpy);
+
+static int check_header(struct snd_sof_dev *sdev, const struct firmware *fw,
+ size_t fw_offset)
+{
+ struct snd_sof_fw_header *header;
+ size_t fw_size = fw->size - fw_offset;
+
+ if (fw->size <= fw_offset) {
+ dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
+ return -EINVAL;
+ }
+
+ /* Read the header information from the data pointer */
+ header = (struct snd_sof_fw_header *)(fw->data + fw_offset);
+
+ /* verify FW sig */
+ if (strncmp(header->sig, SND_SOF_FW_SIG, SND_SOF_FW_SIG_SIZE) != 0) {
+ dev_err(sdev->dev, "error: invalid firmware signature\n");
+ return -EINVAL;
+ }
+
+ /* check size is valid */
+ if (fw_size != header->file_size + sizeof(*header)) {
+ dev_err(sdev->dev, "error: invalid filesize mismatch got 0x%zx expected 0x%zx\n",
+ fw_size, header->file_size + sizeof(*header));
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "header size=0x%x modules=0x%x abi=0x%x size=%zu\n",
+ header->file_size, header->num_modules,
+ header->abi, sizeof(*header));
+
+ return 0;
+}
+
+static int load_modules(struct snd_sof_dev *sdev, const struct firmware *fw,
+ size_t fw_offset)
+{
+ struct snd_sof_fw_header *header;
+ struct snd_sof_mod_hdr *module;
+ int (*load_module)(struct snd_sof_dev *sof_dev,
+ struct snd_sof_mod_hdr *hdr);
+ int ret, count;
+ size_t remaining;
+
+ header = (struct snd_sof_fw_header *)(fw->data + fw_offset);
+ load_module = sof_ops(sdev)->load_module;
+ if (!load_module)
+ return -EINVAL;
+
+ /* parse each module */
+ module = (struct snd_sof_mod_hdr *)(fw->data + fw_offset +
+ sizeof(*header));
+ remaining = fw->size - sizeof(*header) - fw_offset;
+ /* check for wrap */
+ if (remaining > fw->size) {
+ dev_err(sdev->dev, "error: fw size smaller than header size\n");
+ return -EINVAL;
+ }
+
+ for (count = 0; count < header->num_modules; count++) {
+ /* check for wrap */
+ if (remaining < sizeof(*module)) {
+ dev_err(sdev->dev, "error: not enough data remaining\n");
+ return -EINVAL;
+ }
+
+ /* minus header size of module */
+ remaining -= sizeof(*module);
+
+ /* module */
+ ret = load_module(sdev, module);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: invalid module %d\n", count);
+ return ret;
+ }
+
+ if (remaining < module->size) {
+ dev_err(sdev->dev, "error: not enough data remaining\n");
+ return -EINVAL;
+ }
+
+ /* minus body size of module */
+ remaining -= module->size;
+ module = (struct snd_sof_mod_hdr *)((u8 *)module
+ + sizeof(*module) + module->size);
+ }
+
+ return 0;
+}
+
+int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const char *fw_filename;
+ ssize_t ext_man_size;
+ int ret;
+
+ /* Don't request firmware again if firmware is already requested */
+ if (plat_data->fw)
+ return 0;
+
+ fw_filename = kasprintf(GFP_KERNEL, "%s/%s",
+ plat_data->fw_filename_prefix,
+ plat_data->fw_filename);
+ if (!fw_filename)
+ return -ENOMEM;
+
+ ret = request_firmware(&plat_data->fw, fw_filename, sdev->dev);
+
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: request firmware %s failed err: %d\n",
+ fw_filename, ret);
+ goto err;
+ } else {
+ dev_dbg(sdev->dev, "request_firmware %s successful\n",
+ fw_filename);
+ }
+
+ /* check for extended manifest */
+ ext_man_size = snd_sof_fw_ext_man_parse(sdev, plat_data->fw);
+ if (ext_man_size > 0) {
+ /* when no error occurred, drop extended manifest */
+ plat_data->fw_offset = ext_man_size;
+ } else if (!ext_man_size) {
+ /* No extended manifest, so nothing to skip during FW load */
+ dev_dbg(sdev->dev, "firmware doesn't contain extended manifest\n");
+ } else {
+ ret = ext_man_size;
+ dev_err(sdev->dev, "error: firmware %s contains unsupported or invalid extended manifest: %d\n",
+ fw_filename, ret);
+ }
+
+err:
+ kfree(fw_filename);
+
+ return ret;
+}
+EXPORT_SYMBOL(snd_sof_load_firmware_raw);
+
+int snd_sof_load_firmware_memcpy(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ int ret;
+
+ ret = snd_sof_load_firmware_raw(sdev);
+ if (ret < 0)
+ return ret;
+
+ /* make sure the FW header and file is valid */
+ ret = check_header(sdev, plat_data->fw, plat_data->fw_offset);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: invalid FW header\n");
+ goto error;
+ }
+
+ /* prepare the DSP for FW loading */
+ ret = snd_sof_dsp_reset(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to reset DSP\n");
+ goto error;
+ }
+
+ /* parse and load firmware modules to DSP */
+ ret = load_modules(sdev, plat_data->fw, plat_data->fw_offset);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: invalid FW modules\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ release_firmware(plat_data->fw);
+ plat_data->fw = NULL;
+ return ret;
+
+}
+EXPORT_SYMBOL(snd_sof_load_firmware_memcpy);
+
+int snd_sof_load_firmware(struct snd_sof_dev *sdev)
+{
+ dev_dbg(sdev->dev, "loading firmware\n");
+
+ if (sof_ops(sdev)->load_firmware)
+ return sof_ops(sdev)->load_firmware(sdev);
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_load_firmware);
+
+int snd_sof_run_firmware(struct snd_sof_dev *sdev)
+{
+ int ret;
+ int init_core_mask;
+
+ init_waitqueue_head(&sdev->boot_wait);
+
+ /* create read-only fw_version debugfs to store boot version info */
+ if (sdev->first_boot) {
+ ret = snd_sof_debugfs_buf_item(sdev, &sdev->fw_version,
+ sizeof(sdev->fw_version),
+ "fw_version", 0444);
+ /* errors are only due to memory allocation, not debugfs */
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: snd_sof_debugfs_buf_item failed\n");
+ return ret;
+ }
+ }
+
+ /* perform pre fw run operations */
+ ret = snd_sof_dsp_pre_fw_run(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed pre fw run op\n");
+ return ret;
+ }
+
+ dev_dbg(sdev->dev, "booting DSP firmware\n");
+
+ /* boot the firmware on the DSP */
+ ret = snd_sof_dsp_run(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to reset DSP\n");
+ return ret;
+ }
+
+ init_core_mask = ret;
+
+ /*
+ * now wait for the DSP to boot. There are 3 possible outcomes:
+ * 1. Boot wait times out indicating FW boot failure.
+ * 2. FW boots successfully and fw_ready op succeeds.
+ * 3. FW boots but fw_ready op fails.
+ */
+ ret = wait_event_timeout(sdev->boot_wait,
+ sdev->fw_state > SOF_FW_BOOT_IN_PROGRESS,
+ msecs_to_jiffies(sdev->boot_timeout));
+ if (ret == 0) {
+ dev_err(sdev->dev, "error: firmware boot failure\n");
+ snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX |
+ SOF_DBG_TEXT | SOF_DBG_PCI);
+ sdev->fw_state = SOF_FW_BOOT_FAILED;
+ return -EIO;
+ }
+
+ if (sdev->fw_state == SOF_FW_BOOT_COMPLETE)
+ dev_dbg(sdev->dev, "firmware boot complete\n");
+ else
+ return -EIO; /* FW boots but fw_ready op failed */
+
+ /* perform post fw run operations */
+ ret = snd_sof_dsp_post_fw_run(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed post fw run op\n");
+ return ret;
+ }
+
+ /* fw boot is complete. Update the active cores mask */
+ sdev->enabled_cores_mask = init_core_mask;
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_run_firmware);
+
+void snd_sof_fw_unload(struct snd_sof_dev *sdev)
+{
+ /* TODO: support module unloading at runtime */
+ release_firmware(sdev->pdata->fw);
+ sdev->pdata->fw = NULL;
+}
+EXPORT_SYMBOL(snd_sof_fw_unload);
diff --git a/sound/soc/sof/nocodec.c b/sound/soc/sof/nocodec.c
new file mode 100644
index 000000000..9e922df6a
--- /dev/null
+++ b/sound/soc/sof/nocodec.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include "sof-priv.h"
+
+static struct snd_soc_card sof_nocodec_card = {
+ .name = "nocodec", /* the sof- prefix is added by the core */
+ .owner = THIS_MODULE
+};
+
+static int sof_nocodec_bes_setup(struct device *dev,
+ const struct snd_sof_dsp_ops *ops,
+ struct snd_soc_dai_link *links,
+ int link_num, struct snd_soc_card *card)
+{
+ struct snd_soc_dai_link_component *dlc;
+ int i;
+
+ if (!ops || !links || !card)
+ return -EINVAL;
+
+ /* set up BE dai_links */
+ for (i = 0; i < link_num; i++) {
+ dlc = devm_kzalloc(dev, 3 * sizeof(*dlc), GFP_KERNEL);
+ if (!dlc)
+ return -ENOMEM;
+
+ links[i].name = devm_kasprintf(dev, GFP_KERNEL,
+ "NoCodec-%d", i);
+ if (!links[i].name)
+ return -ENOMEM;
+
+ links[i].cpus = &dlc[0];
+ links[i].codecs = &dlc[1];
+ links[i].platforms = &dlc[2];
+
+ links[i].num_cpus = 1;
+ links[i].num_codecs = 1;
+ links[i].num_platforms = 1;
+
+ links[i].id = i;
+ links[i].no_pcm = 1;
+ links[i].cpus->dai_name = ops->drv[i].name;
+ links[i].platforms->name = dev_name(dev);
+ links[i].codecs->dai_name = "snd-soc-dummy-dai";
+ links[i].codecs->name = "snd-soc-dummy";
+ if (ops->drv[i].playback.channels_min)
+ links[i].dpcm_playback = 1;
+ if (ops->drv[i].capture.channels_min)
+ links[i].dpcm_capture = 1;
+ }
+
+ card->dai_link = links;
+ card->num_links = link_num;
+
+ return 0;
+}
+
+int sof_nocodec_setup(struct device *dev,
+ const struct snd_sof_dsp_ops *ops)
+{
+ struct snd_soc_dai_link *links;
+
+ /* create dummy BE dai_links */
+ links = devm_kzalloc(dev, sizeof(struct snd_soc_dai_link) *
+ ops->num_drv, GFP_KERNEL);
+ if (!links)
+ return -ENOMEM;
+
+ return sof_nocodec_bes_setup(dev, ops, links, ops->num_drv,
+ &sof_nocodec_card);
+}
+EXPORT_SYMBOL(sof_nocodec_setup);
+
+static int sof_nocodec_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &sof_nocodec_card;
+
+ card->dev = &pdev->dev;
+
+ return devm_snd_soc_register_card(&pdev->dev, card);
+}
+
+static int sof_nocodec_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver sof_nocodec_audio = {
+ .probe = sof_nocodec_probe,
+ .remove = sof_nocodec_remove,
+ .driver = {
+ .name = "sof-nocodec",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+module_platform_driver(sof_nocodec_audio)
+
+MODULE_DESCRIPTION("ASoC sof nocodec");
+MODULE_AUTHOR("Liam Girdwood");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:sof-nocodec");
diff --git a/sound/soc/sof/ops.c b/sound/soc/sof/ops.c
new file mode 100644
index 000000000..1a394b4c6
--- /dev/null
+++ b/sound/soc/sof/ops.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/pci.h>
+#include "ops.h"
+
+static
+bool snd_sof_pci_update_bits_unlocked(struct snd_sof_dev *sdev, u32 offset,
+ u32 mask, u32 value)
+{
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ unsigned int old, new;
+ u32 ret = 0;
+
+ pci_read_config_dword(pci, offset, &ret);
+ old = ret;
+ dev_dbg(sdev->dev, "Debug PCIR: %8.8x at %8.8x\n", old & mask, offset);
+
+ new = (old & ~mask) | (value & mask);
+
+ if (old == new)
+ return false;
+
+ pci_write_config_dword(pci, offset, new);
+ dev_dbg(sdev->dev, "Debug PCIW: %8.8x at %8.8x\n", value,
+ offset);
+
+ return true;
+}
+
+bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset,
+ u32 mask, u32 value)
+{
+ unsigned long flags;
+ bool change;
+
+ spin_lock_irqsave(&sdev->hw_lock, flags);
+ change = snd_sof_pci_update_bits_unlocked(sdev, offset, mask, value);
+ spin_unlock_irqrestore(&sdev->hw_lock, flags);
+ return change;
+}
+EXPORT_SYMBOL(snd_sof_pci_update_bits);
+
+bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value)
+{
+ unsigned int old, new;
+ u32 ret;
+
+ ret = snd_sof_dsp_read(sdev, bar, offset);
+
+ old = ret;
+ new = (old & ~mask) | (value & mask);
+
+ if (old == new)
+ return false;
+
+ snd_sof_dsp_write(sdev, bar, offset, new);
+
+ return true;
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits_unlocked);
+
+bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u64 mask, u64 value)
+{
+ u64 old, new;
+
+ old = snd_sof_dsp_read64(sdev, bar, offset);
+
+ new = (old & ~mask) | (value & mask);
+
+ if (old == new)
+ return false;
+
+ snd_sof_dsp_write64(sdev, bar, offset, new);
+
+ return true;
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits64_unlocked);
+
+/* This is for registers bits with attribute RWC */
+bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset,
+ u32 mask, u32 value)
+{
+ unsigned long flags;
+ bool change;
+
+ spin_lock_irqsave(&sdev->hw_lock, flags);
+ change = snd_sof_dsp_update_bits_unlocked(sdev, bar, offset, mask,
+ value);
+ spin_unlock_irqrestore(&sdev->hw_lock, flags);
+ return change;
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits);
+
+bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar, u32 offset,
+ u64 mask, u64 value)
+{
+ unsigned long flags;
+ bool change;
+
+ spin_lock_irqsave(&sdev->hw_lock, flags);
+ change = snd_sof_dsp_update_bits64_unlocked(sdev, bar, offset, mask,
+ value);
+ spin_unlock_irqrestore(&sdev->hw_lock, flags);
+ return change;
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits64);
+
+static
+void snd_sof_dsp_update_bits_forced_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value)
+{
+ unsigned int old, new;
+ u32 ret;
+
+ ret = snd_sof_dsp_read(sdev, bar, offset);
+
+ old = ret;
+ new = (old & ~mask) | (value & mask);
+
+ snd_sof_dsp_write(sdev, bar, offset, new);
+}
+
+/* This is for registers bits with attribute RWC */
+void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdev->hw_lock, flags);
+ snd_sof_dsp_update_bits_forced_unlocked(sdev, bar, offset, mask, value);
+ spin_unlock_irqrestore(&sdev->hw_lock, flags);
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits_forced);
+
+void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset)
+{
+ dev_err(sdev->dev, "error : DSP panic!\n");
+
+ /*
+ * check if DSP is not ready and did not set the dsp_oops_offset.
+ * if the dsp_oops_offset is not set, set it from the panic message.
+ * Also add a check to memory window setting with panic message.
+ */
+ if (!sdev->dsp_oops_offset)
+ sdev->dsp_oops_offset = offset;
+ else
+ dev_dbg(sdev->dev, "panic: dsp_oops_offset %zu offset %d\n",
+ sdev->dsp_oops_offset, offset);
+
+ snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX);
+ snd_sof_trace_notify_for_error(sdev);
+}
+EXPORT_SYMBOL(snd_sof_dsp_panic);
diff --git a/sound/soc/sof/ops.h b/sound/soc/sof/ops.h
new file mode 100644
index 000000000..b21632f55
--- /dev/null
+++ b/sound/soc/sof/ops.h
@@ -0,0 +1,556 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOUND_SOC_SOF_IO_H
+#define __SOUND_SOC_SOF_IO_H
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <sound/pcm.h>
+#include "sof-priv.h"
+
+#define sof_ops(sdev) \
+ ((sdev)->pdata->desc->ops)
+
+/* Mandatory operations are verified during probing */
+
+/* init */
+static inline int snd_sof_probe(struct snd_sof_dev *sdev)
+{
+ return sof_ops(sdev)->probe(sdev);
+}
+
+static inline int snd_sof_remove(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->remove)
+ return sof_ops(sdev)->remove(sdev);
+
+ return 0;
+}
+
+/* control */
+
+/*
+ * snd_sof_dsp_run returns the core mask of the cores that are available
+ * after successful fw boot
+ */
+static inline int snd_sof_dsp_run(struct snd_sof_dev *sdev)
+{
+ return sof_ops(sdev)->run(sdev);
+}
+
+static inline int snd_sof_dsp_stall(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->stall)
+ return sof_ops(sdev)->stall(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_reset(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->reset)
+ return sof_ops(sdev)->reset(sdev);
+
+ return 0;
+}
+
+/* dsp core power up/power down */
+static inline int snd_sof_dsp_core_power_up(struct snd_sof_dev *sdev,
+ unsigned int core_mask)
+{
+ if (sof_ops(sdev)->core_power_up)
+ return sof_ops(sdev)->core_power_up(sdev, core_mask);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_core_power_down(struct snd_sof_dev *sdev,
+ unsigned int core_mask)
+{
+ if (sof_ops(sdev)->core_power_down)
+ return sof_ops(sdev)->core_power_down(sdev, core_mask);
+
+ return 0;
+}
+
+/* pre/post fw load */
+static inline int snd_sof_dsp_pre_fw_run(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->pre_fw_run)
+ return sof_ops(sdev)->pre_fw_run(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_post_fw_run(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->post_fw_run)
+ return sof_ops(sdev)->post_fw_run(sdev);
+
+ return 0;
+}
+
+/* misc */
+
+/**
+ * snd_sof_dsp_get_bar_index - Maps a section type with a BAR index
+ *
+ * @sdev: sof device
+ * @type: section type as described by snd_sof_fw_blk_type
+ *
+ * Returns the corresponding BAR index (a positive integer) or -EINVAL
+ * in case there is no mapping
+ */
+static inline int snd_sof_dsp_get_bar_index(struct snd_sof_dev *sdev, u32 type)
+{
+ if (sof_ops(sdev)->get_bar_index)
+ return sof_ops(sdev)->get_bar_index(sdev, type);
+
+ return sdev->mmio_bar;
+}
+
+static inline int snd_sof_dsp_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->get_mailbox_offset)
+ return sof_ops(sdev)->get_mailbox_offset(sdev);
+
+ dev_err(sdev->dev, "error: %s not defined\n", __func__);
+ return -ENOTSUPP;
+}
+
+static inline int snd_sof_dsp_get_window_offset(struct snd_sof_dev *sdev,
+ u32 id)
+{
+ if (sof_ops(sdev)->get_window_offset)
+ return sof_ops(sdev)->get_window_offset(sdev, id);
+
+ dev_err(sdev->dev, "error: %s not defined\n", __func__);
+ return -ENOTSUPP;
+}
+/* power management */
+static inline int snd_sof_dsp_resume(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->resume)
+ return sof_ops(sdev)->resume(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev,
+ u32 target_state)
+{
+ if (sof_ops(sdev)->suspend)
+ return sof_ops(sdev)->suspend(sdev, target_state);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_runtime_resume(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->runtime_resume)
+ return sof_ops(sdev)->runtime_resume(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_runtime_suspend(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->runtime_suspend)
+ return sof_ops(sdev)->runtime_suspend(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_runtime_idle(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->runtime_idle)
+ return sof_ops(sdev)->runtime_idle(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_hw_params_upon_resume(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->set_hw_params_upon_resume)
+ return sof_ops(sdev)->set_hw_params_upon_resume(sdev);
+ return 0;
+}
+
+static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq)
+{
+ if (sof_ops(sdev)->set_clk)
+ return sof_ops(sdev)->set_clk(sdev, freq);
+
+ return 0;
+}
+
+static inline int
+snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ if (sof_ops(sdev)->set_power_state)
+ return sof_ops(sdev)->set_power_state(sdev, target_state);
+
+ /* D0 substate is not supported, do nothing here. */
+ return 0;
+}
+
+/* debug */
+static inline void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ if (sof_ops(sdev)->dbg_dump)
+ return sof_ops(sdev)->dbg_dump(sdev, flags);
+}
+
+static inline void snd_sof_ipc_dump(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->ipc_dump)
+ return sof_ops(sdev)->ipc_dump(sdev);
+}
+
+/* register IO */
+static inline void snd_sof_dsp_write(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 value)
+{
+ if (sof_ops(sdev)->write) {
+ sof_ops(sdev)->write(sdev, sdev->bar[bar] + offset, value);
+ return;
+ }
+
+ dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__);
+}
+
+static inline void snd_sof_dsp_write64(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u64 value)
+{
+ if (sof_ops(sdev)->write64) {
+ sof_ops(sdev)->write64(sdev, sdev->bar[bar] + offset, value);
+ return;
+ }
+
+ dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__);
+}
+
+static inline u32 snd_sof_dsp_read(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset)
+{
+ if (sof_ops(sdev)->read)
+ return sof_ops(sdev)->read(sdev, sdev->bar[bar] + offset);
+
+ dev_err(sdev->dev, "error: %s not defined\n", __func__);
+ return -ENOTSUPP;
+}
+
+static inline u64 snd_sof_dsp_read64(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset)
+{
+ if (sof_ops(sdev)->read64)
+ return sof_ops(sdev)->read64(sdev, sdev->bar[bar] + offset);
+
+ dev_err(sdev->dev, "error: %s not defined\n", __func__);
+ return -ENOTSUPP;
+}
+
+/* block IO */
+static inline void snd_sof_dsp_block_read(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, void *dest, size_t bytes)
+{
+ sof_ops(sdev)->block_read(sdev, bar, offset, dest, bytes);
+}
+
+static inline void snd_sof_dsp_block_write(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, void *src, size_t bytes)
+{
+ sof_ops(sdev)->block_write(sdev, bar, offset, src, bytes);
+}
+
+/* ipc */
+static inline int snd_sof_dsp_send_msg(struct snd_sof_dev *sdev,
+ struct snd_sof_ipc_msg *msg)
+{
+ return sof_ops(sdev)->send_msg(sdev, msg);
+}
+
+/* host DMA trace */
+static inline int snd_sof_dma_trace_init(struct snd_sof_dev *sdev,
+ u32 *stream_tag)
+{
+ if (sof_ops(sdev)->trace_init)
+ return sof_ops(sdev)->trace_init(sdev, stream_tag);
+
+ return 0;
+}
+
+static inline int snd_sof_dma_trace_release(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->trace_release)
+ return sof_ops(sdev)->trace_release(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dma_trace_trigger(struct snd_sof_dev *sdev, int cmd)
+{
+ if (sof_ops(sdev)->trace_trigger)
+ return sof_ops(sdev)->trace_trigger(sdev, cmd);
+
+ return 0;
+}
+
+/* host PCM ops */
+static inline int
+snd_sof_pcm_platform_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_open)
+ return sof_ops(sdev)->pcm_open(sdev, substream);
+
+ return 0;
+}
+
+/* disconnect pcm substream to a host stream */
+static inline int
+snd_sof_pcm_platform_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_close)
+ return sof_ops(sdev)->pcm_close(sdev, substream);
+
+ return 0;
+}
+
+/* host stream hw params */
+static inline int
+snd_sof_pcm_platform_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc_stream_params *ipc_params)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_params)
+ return sof_ops(sdev)->pcm_hw_params(sdev, substream,
+ params, ipc_params);
+
+ return 0;
+}
+
+/* host stream hw free */
+static inline int
+snd_sof_pcm_platform_hw_free(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_free)
+ return sof_ops(sdev)->pcm_hw_free(sdev, substream);
+
+ return 0;
+}
+
+/* host stream trigger */
+static inline int
+snd_sof_pcm_platform_trigger(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_trigger)
+ return sof_ops(sdev)->pcm_trigger(sdev, substream, cmd);
+
+ return 0;
+}
+
+/* host DSP message data */
+static inline void snd_sof_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz)
+{
+ sof_ops(sdev)->ipc_msg_data(sdev, substream, p, sz);
+}
+
+/* host configure DSP HW parameters */
+static inline int
+snd_sof_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply)
+{
+ return sof_ops(sdev)->ipc_pcm_params(sdev, substream, reply);
+}
+
+/* host stream pointer */
+static inline snd_pcm_uframes_t
+snd_sof_pcm_platform_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_pointer)
+ return sof_ops(sdev)->pcm_pointer(sdev, substream);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+static inline int
+snd_sof_probe_compr_assign(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream, struct snd_soc_dai *dai)
+{
+ return sof_ops(sdev)->probe_assign(sdev, cstream, dai);
+}
+
+static inline int
+snd_sof_probe_compr_free(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream, struct snd_soc_dai *dai)
+{
+ return sof_ops(sdev)->probe_free(sdev, cstream, dai);
+}
+
+static inline int
+snd_sof_probe_compr_set_params(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params, struct snd_soc_dai *dai)
+{
+ return sof_ops(sdev)->probe_set_params(sdev, cstream, params, dai);
+}
+
+static inline int
+snd_sof_probe_compr_trigger(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ return sof_ops(sdev)->probe_trigger(sdev, cstream, cmd, dai);
+}
+
+static inline int
+snd_sof_probe_compr_pointer(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp, struct snd_soc_dai *dai)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->probe_pointer)
+ return sof_ops(sdev)->probe_pointer(sdev, cstream, tstamp, dai);
+
+ return 0;
+}
+#endif
+
+/* machine driver */
+static inline int
+snd_sof_machine_register(struct snd_sof_dev *sdev, void *pdata)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->machine_register)
+ return sof_ops(sdev)->machine_register(sdev, pdata);
+
+ return 0;
+}
+
+static inline void
+snd_sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->machine_unregister)
+ sof_ops(sdev)->machine_unregister(sdev, pdata);
+}
+
+static inline void
+snd_sof_machine_select(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->machine_select)
+ sof_ops(sdev)->machine_select(sdev);
+}
+
+static inline void
+snd_sof_set_mach_params(const struct snd_soc_acpi_mach *mach,
+ struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+
+ if (sof_ops(sdev) && sof_ops(sdev)->set_mach_params)
+ sof_ops(sdev)->set_mach_params(mach, dev);
+}
+
+static inline const struct snd_sof_dsp_ops
+*sof_get_ops(const struct sof_dev_desc *d,
+ const struct sof_ops_table mach_ops[], int asize)
+{
+ int i;
+
+ for (i = 0; i < asize; i++) {
+ if (d == mach_ops[i].desc)
+ return mach_ops[i].ops;
+ }
+
+ /* not found */
+ return NULL;
+}
+
+/**
+ * snd_sof_dsp_register_poll_timeout - Periodically poll an address
+ * until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ */
+#define snd_sof_dsp_read_poll_timeout(sdev, bar, offset, val, cond, sleep_us, timeout_us) \
+({ \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __sleep_us = (sleep_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ might_sleep_if((__sleep_us) != 0); \
+ for (;;) { \
+ (val) = snd_sof_dsp_read(sdev, bar, offset); \
+ if (cond) { \
+ dev_dbg(sdev->dev, \
+ "FW Poll Status: reg=%#x successful\n", (val)); \
+ break; \
+ } \
+ if (__timeout_us && \
+ ktime_compare(ktime_get(), __timeout) > 0) { \
+ (val) = snd_sof_dsp_read(sdev, bar, offset); \
+ dev_dbg(sdev->dev, \
+ "FW Poll Status: reg=%#x timedout\n", (val)); \
+ break; \
+ } \
+ if (__sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+/* This is for registers bits with attribute RWC */
+bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset,
+ u32 mask, u32 value);
+
+bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value);
+
+bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u64 mask, u64 value);
+
+bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset,
+ u32 mask, u32 value);
+
+bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u64 mask, u64 value);
+
+void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value);
+
+int snd_sof_dsp_register_poll(struct snd_sof_dev *sdev, u32 bar, u32 offset,
+ u32 mask, u32 target, u32 timeout_ms,
+ u32 interval_us);
+
+void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset);
+#endif
diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
new file mode 100644
index 000000000..cbac6f17c
--- /dev/null
+++ b/sound/soc/sof/pcm.c
@@ -0,0 +1,821 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+// PCM Layer, interface between ALSA and IPC.
+//
+
+#include <linux/pm_runtime.h>
+#include <sound/pcm_params.h>
+#include <sound/sof.h>
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ops.h"
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+#include "compress.h"
+#endif
+
+/* Create DMA buffer page table for DSP */
+static int create_page_table(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ unsigned char *dma_area, size_t size)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_pcm *spcm;
+ struct snd_dma_buffer *dmab = snd_pcm_get_dma_buf(substream);
+ int stream = substream->stream;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ return snd_sof_create_page_table(component->dev, dmab,
+ spcm->stream[stream].page_table.area, size);
+}
+
+static int sof_pcm_dsp_params(struct snd_sof_pcm *spcm, struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply)
+{
+ struct snd_soc_component *scomp = spcm->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+
+ /* validate offset */
+ int ret = snd_sof_ipc_pcm_params(sdev, substream, reply);
+
+ if (ret < 0)
+ dev_err(scomp->dev, "error: got wrong reply for PCM %d\n",
+ spcm->pcm.pcm_id);
+
+ return ret;
+}
+
+/*
+ * sof pcm period elapse work
+ */
+void snd_sof_pcm_period_elapsed_work(struct work_struct *work)
+{
+ struct snd_sof_pcm_stream *sps =
+ container_of(work, struct snd_sof_pcm_stream,
+ period_elapsed_work);
+
+ snd_pcm_period_elapsed(sps->substream);
+}
+
+/*
+ * sof pcm period elapse, this could be called at irq thread context.
+ */
+void snd_sof_pcm_period_elapsed(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
+ struct snd_sof_pcm *spcm;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm) {
+ dev_err(component->dev,
+ "error: period elapsed for unknown stream!\n");
+ return;
+ }
+
+ /*
+ * snd_pcm_period_elapsed() can be called in interrupt context
+ * before IRQ_HANDLED is returned. Inside snd_pcm_period_elapsed(),
+ * when the PCM is done draining or xrun happened, a STOP IPC will
+ * then be sent and this IPC will hit IPC timeout.
+ * To avoid sending IPC before the previous IPC is handled, we
+ * schedule delayed work here to call the snd_pcm_period_elapsed().
+ */
+ schedule_work(&spcm->stream[substream->stream].period_elapsed_work);
+}
+EXPORT_SYMBOL(snd_sof_pcm_period_elapsed);
+
+static int sof_pcm_dsp_pcm_free(struct snd_pcm_substream *substream,
+ struct snd_sof_dev *sdev,
+ struct snd_sof_pcm *spcm)
+{
+ struct sof_ipc_stream stream;
+ struct sof_ipc_reply reply;
+ int ret;
+
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_FREE;
+ stream.comp_id = spcm->stream[substream->stream].comp_id;
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
+ sizeof(stream), &reply, sizeof(reply));
+ if (!ret)
+ spcm->prepared[substream->stream] = false;
+
+ return ret;
+}
+
+static int sof_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ struct sof_ipc_pcm_params pcm;
+ struct sof_ipc_pcm_params_reply ipc_params_reply;
+ int ret;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ /*
+ * Handle repeated calls to hw_params() without free_pcm() in
+ * between. At least ALSA OSS emulation depends on this.
+ */
+ if (spcm->prepared[substream->stream]) {
+ ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+ if (ret < 0)
+ return ret;
+ }
+
+ dev_dbg(component->dev, "pcm: hw params stream %d dir %d\n",
+ spcm->pcm.pcm_id, substream->stream);
+
+ memset(&pcm, 0, sizeof(pcm));
+
+ /* create compressed page table for audio firmware */
+ if (runtime->buffer_changed) {
+ ret = create_page_table(component, substream, runtime->dma_area,
+ runtime->dma_bytes);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* number of pages should be rounded up */
+ pcm.params.buffer.pages = PFN_UP(runtime->dma_bytes);
+
+ /* set IPC PCM parameters */
+ pcm.hdr.size = sizeof(pcm);
+ pcm.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_PARAMS;
+ pcm.comp_id = spcm->stream[substream->stream].comp_id;
+ pcm.params.hdr.size = sizeof(pcm.params);
+ pcm.params.buffer.phy_addr =
+ spcm->stream[substream->stream].page_table.addr;
+ pcm.params.buffer.size = runtime->dma_bytes;
+ pcm.params.direction = substream->stream;
+ pcm.params.sample_valid_bytes = params_width(params) >> 3;
+ pcm.params.buffer_fmt = SOF_IPC_BUFFER_INTERLEAVED;
+ pcm.params.rate = params_rate(params);
+ pcm.params.channels = params_channels(params);
+ pcm.params.host_period_bytes = params_period_bytes(params);
+
+ /* container size */
+ ret = snd_pcm_format_physical_width(params_format(params));
+ if (ret < 0)
+ return ret;
+ pcm.params.sample_container_bytes = ret >> 3;
+
+ /* format */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S16_LE;
+ break;
+ case SNDRV_PCM_FORMAT_S24:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S24_4LE;
+ break;
+ case SNDRV_PCM_FORMAT_S32:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S32_LE;
+ break;
+ case SNDRV_PCM_FORMAT_FLOAT:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_FLOAT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* firmware already configured host stream */
+ ret = snd_sof_pcm_platform_hw_params(sdev,
+ substream,
+ params,
+ &pcm.params);
+ if (ret < 0) {
+ dev_err(component->dev, "error: platform hw params failed\n");
+ return ret;
+ }
+
+ dev_dbg(component->dev, "stream_tag %d", pcm.params.stream_tag);
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc, pcm.hdr.cmd, &pcm, sizeof(pcm),
+ &ipc_params_reply, sizeof(ipc_params_reply));
+ if (ret < 0) {
+ dev_err(component->dev, "error: hw params ipc failed for stream %d\n",
+ pcm.params.stream_tag);
+ return ret;
+ }
+
+ ret = sof_pcm_dsp_params(spcm, substream, &ipc_params_reply);
+ if (ret < 0)
+ return ret;
+
+ spcm->prepared[substream->stream] = true;
+
+ /* save pcm hw_params */
+ memcpy(&spcm->params[substream->stream], params, sizeof(*params));
+
+ return ret;
+}
+
+static int sof_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ int ret, err = 0;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(component->dev, "pcm: free stream %d dir %d\n",
+ spcm->pcm.pcm_id, substream->stream);
+
+ if (spcm->prepared[substream->stream]) {
+ ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+ if (ret < 0)
+ err = ret;
+ }
+
+ cancel_work_sync(&spcm->stream[substream->stream].period_elapsed_work);
+
+ ret = snd_sof_pcm_platform_hw_free(sdev, substream);
+ if (ret < 0) {
+ dev_err(component->dev, "error: platform hw free failed\n");
+ err = ret;
+ }
+
+ return err;
+}
+
+static int sof_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_pcm *spcm;
+ int ret;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ if (spcm->prepared[substream->stream])
+ return 0;
+
+ dev_dbg(component->dev, "pcm: prepare stream %d dir %d\n",
+ spcm->pcm.pcm_id, substream->stream);
+
+ /* set hw_params */
+ ret = sof_pcm_hw_params(component,
+ substream, &spcm->params[substream->stream]);
+ if (ret < 0) {
+ dev_err(component->dev,
+ "error: set pcm hw_params after resume\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * FE dai link trigger actions are always executed in non-atomic context because
+ * they involve IPC's.
+ */
+static int sof_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ struct sof_ipc_stream stream;
+ struct sof_ipc_reply reply;
+ bool reset_hw_params = false;
+ bool ipc_first = false;
+ int ret;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(component->dev, "pcm: trigger stream %d dir %d cmd %d\n",
+ spcm->pcm.pcm_id, substream->stream, cmd);
+
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG;
+ stream.comp_id = spcm->stream[substream->stream].comp_id;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_PAUSE;
+ ipc_first = true;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_RELEASE;
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ if (spcm->stream[substream->stream].suspend_ignored) {
+ /*
+ * this case will be triggered when INFO_RESUME is
+ * supported, no need to resume streams that remained
+ * enabled in D0ix.
+ */
+ spcm->stream[substream->stream].suspend_ignored = false;
+ return 0;
+ }
+
+ /* set up hw_params */
+ ret = sof_pcm_prepare(component, substream);
+ if (ret < 0) {
+ dev_err(component->dev,
+ "error: failed to set up hw_params upon resume\n");
+ return ret;
+ }
+
+ fallthrough;
+ case SNDRV_PCM_TRIGGER_START:
+ if (spcm->stream[substream->stream].suspend_ignored) {
+ /*
+ * This case will be triggered when INFO_RESUME is
+ * not supported, no need to re-start streams that
+ * remained enabled in D0ix.
+ */
+ spcm->stream[substream->stream].suspend_ignored = false;
+ return 0;
+ }
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_START;
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (sdev->system_suspend_target == SOF_SUSPEND_S0IX &&
+ spcm->stream[substream->stream].d0i3_compatible) {
+ /*
+ * trap the event, not sending trigger stop to
+ * prevent the FW pipelines from being stopped,
+ * and mark the flag to ignore the upcoming DAPM
+ * PM events.
+ */
+ spcm->stream[substream->stream].suspend_ignored = true;
+ return 0;
+ }
+ fallthrough;
+ case SNDRV_PCM_TRIGGER_STOP:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
+ ipc_first = true;
+ reset_hw_params = true;
+ break;
+ default:
+ dev_err(component->dev, "error: unhandled trigger cmd %d\n",
+ cmd);
+ return -EINVAL;
+ }
+
+ /*
+ * DMA and IPC sequence is different for start and stop. Need to send
+ * STOP IPC before stop DMA
+ */
+ if (!ipc_first)
+ snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
+ sizeof(stream), &reply, sizeof(reply));
+
+ /* need to STOP DMA even if STOP IPC failed */
+ if (ipc_first)
+ snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+
+ /* free PCM if reset_hw_params is set and the STOP IPC is successful */
+ if (!ret && reset_hw_params)
+ ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+
+ return ret;
+}
+
+static snd_pcm_uframes_t sof_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ snd_pcm_uframes_t host, dai;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ /* use dsp ops pointer callback directly if set */
+ if (sof_ops(sdev)->pcm_pointer)
+ return sof_ops(sdev)->pcm_pointer(sdev, substream);
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ /* read position from DSP */
+ host = bytes_to_frames(substream->runtime,
+ spcm->stream[substream->stream].posn.host_posn);
+ dai = bytes_to_frames(substream->runtime,
+ spcm->stream[substream->stream].posn.dai_posn);
+
+ dev_vdbg(component->dev,
+ "PCM: stream %d dir %d DMA position %lu DAI position %lu\n",
+ spcm->pcm.pcm_id, substream->stream, host, dai);
+
+ return host;
+}
+
+static int sof_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ const struct snd_sof_dsp_ops *ops = sof_ops(sdev);
+ struct snd_sof_pcm *spcm;
+ struct snd_soc_tplg_stream_caps *caps;
+ int ret;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(component->dev, "pcm: open stream %d dir %d\n",
+ spcm->pcm.pcm_id, substream->stream);
+
+
+ caps = &spcm->pcm.caps[substream->stream];
+
+ /* set any runtime constraints based on topology */
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ le32_to_cpu(caps->period_size_min));
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ le32_to_cpu(caps->period_size_min));
+
+ /* set runtime config */
+ runtime->hw.info = ops->hw_info; /* platform-specific */
+
+ runtime->hw.formats = le64_to_cpu(caps->formats);
+ runtime->hw.period_bytes_min = le32_to_cpu(caps->period_size_min);
+ runtime->hw.period_bytes_max = le32_to_cpu(caps->period_size_max);
+ runtime->hw.periods_min = le32_to_cpu(caps->periods_min);
+ runtime->hw.periods_max = le32_to_cpu(caps->periods_max);
+
+ /*
+ * caps->buffer_size_min is not used since the
+ * snd_pcm_hardware structure only defines buffer_bytes_max
+ */
+ runtime->hw.buffer_bytes_max = le32_to_cpu(caps->buffer_size_max);
+
+ dev_dbg(component->dev, "period min %zd max %zd bytes\n",
+ runtime->hw.period_bytes_min,
+ runtime->hw.period_bytes_max);
+ dev_dbg(component->dev, "period count %d max %d\n",
+ runtime->hw.periods_min,
+ runtime->hw.periods_max);
+ dev_dbg(component->dev, "buffer max %zd bytes\n",
+ runtime->hw.buffer_bytes_max);
+
+ /* set wait time - TODO: come from topology */
+ substream->wait_time = 500;
+
+ spcm->stream[substream->stream].posn.host_posn = 0;
+ spcm->stream[substream->stream].posn.dai_posn = 0;
+ spcm->stream[substream->stream].substream = substream;
+ spcm->prepared[substream->stream] = false;
+
+ ret = snd_sof_pcm_platform_open(sdev, substream);
+ if (ret < 0)
+ dev_err(component->dev, "error: pcm open failed %d\n", ret);
+
+ return ret;
+}
+
+static int sof_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ int err;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(component->dev, "pcm: close stream %d dir %d\n",
+ spcm->pcm.pcm_id, substream->stream);
+
+ err = snd_sof_pcm_platform_close(sdev, substream);
+ if (err < 0) {
+ dev_err(component->dev, "error: pcm close failed %d\n",
+ err);
+ /*
+ * keep going, no point in preventing the close
+ * from happening
+ */
+ }
+
+ return 0;
+}
+
+/*
+ * Pre-allocate playback/capture audio buffer pages.
+ * no need to explicitly release memory preallocated by sof_pcm_new in pcm_free
+ * snd_pcm_lib_preallocate_free_for_all() is called by the core.
+ */
+static int sof_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ struct snd_pcm *pcm = rtd->pcm;
+ struct snd_soc_tplg_stream_caps *caps;
+ int stream = SNDRV_PCM_STREAM_PLAYBACK;
+
+ /* find SOF PCM for this RTD */
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm) {
+ dev_warn(component->dev, "warn: can't find PCM with DAI ID %d\n",
+ rtd->dai_link->id);
+ return 0;
+ }
+
+ dev_dbg(component->dev, "creating new PCM %s\n", spcm->pcm.pcm_name);
+
+ /* do we need to pre-allocate playback audio buffer pages */
+ if (!spcm->pcm.playback)
+ goto capture;
+
+ caps = &spcm->pcm.caps[stream];
+
+ /* pre-allocate playback audio buffer pages */
+ dev_dbg(component->dev,
+ "spcm: allocate %s playback DMA buffer size 0x%x max 0x%x\n",
+ caps->name, caps->buffer_size_min, caps->buffer_size_max);
+
+ if (!pcm->streams[stream].substream) {
+ dev_err(component->dev, "error: NULL playback substream!\n");
+ return -EINVAL;
+ }
+
+ snd_pcm_set_managed_buffer(pcm->streams[stream].substream,
+ SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
+ 0, le32_to_cpu(caps->buffer_size_max));
+capture:
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+
+ /* do we need to pre-allocate capture audio buffer pages */
+ if (!spcm->pcm.capture)
+ return 0;
+
+ caps = &spcm->pcm.caps[stream];
+
+ /* pre-allocate capture audio buffer pages */
+ dev_dbg(component->dev,
+ "spcm: allocate %s capture DMA buffer size 0x%x max 0x%x\n",
+ caps->name, caps->buffer_size_min, caps->buffer_size_max);
+
+ if (!pcm->streams[stream].substream) {
+ dev_err(component->dev, "error: NULL capture substream!\n");
+ return -EINVAL;
+ }
+
+ snd_pcm_set_managed_buffer(pcm->streams[stream].substream,
+ SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
+ 0, le32_to_cpu(caps->buffer_size_max));
+
+ return 0;
+}
+
+/* fixup the BE DAI link to match any values from topology */
+static int sof_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
+ struct snd_sof_dai *dai =
+ snd_sof_find_dai(component, (char *)rtd->dai_link->name);
+ struct snd_soc_dpcm *dpcm;
+
+ /* no topology exists for this BE, try a common configuration */
+ if (!dai) {
+ dev_warn(component->dev,
+ "warning: no topology found for BE DAI %s config\n",
+ rtd->dai_link->name);
+
+ /* set 48k, stereo, 16bits by default */
+ rate->min = 48000;
+ rate->max = 48000;
+
+ channels->min = 2;
+ channels->max = 2;
+
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+
+ return 0;
+ }
+
+ /* read format from topology */
+ snd_mask_none(fmt);
+
+ switch (dai->comp_dai.config.frame_fmt) {
+ case SOF_IPC_FRAME_S16_LE:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ break;
+ case SOF_IPC_FRAME_S24_4LE:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ break;
+ case SOF_IPC_FRAME_S32_LE:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S32_LE);
+ break;
+ default:
+ dev_err(component->dev, "error: No available DAI format!\n");
+ return -EINVAL;
+ }
+
+ /* read rate and channels from topology */
+ switch (dai->dai_config->type) {
+ case SOF_DAI_INTEL_SSP:
+ rate->min = dai->dai_config->ssp.fsync_rate;
+ rate->max = dai->dai_config->ssp.fsync_rate;
+ channels->min = dai->dai_config->ssp.tdm_slots;
+ channels->max = dai->dai_config->ssp.tdm_slots;
+
+ dev_dbg(component->dev,
+ "rate_min: %d rate_max: %d\n", rate->min, rate->max);
+ dev_dbg(component->dev,
+ "channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+
+ break;
+ case SOF_DAI_INTEL_DMIC:
+ /* DMIC only supports 16 or 32 bit formats */
+ if (dai->comp_dai.config.frame_fmt == SOF_IPC_FRAME_S24_4LE) {
+ dev_err(component->dev,
+ "error: invalid fmt %d for DAI type %d\n",
+ dai->comp_dai.config.frame_fmt,
+ dai->dai_config->type);
+ }
+ break;
+ case SOF_DAI_INTEL_HDA:
+ /*
+ * HDAudio does not follow the default trigger
+ * sequence due to firmware implementation
+ */
+ for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
+ struct snd_soc_pcm_runtime *fe = dpcm->fe;
+
+ fe->dai_link->trigger[SNDRV_PCM_STREAM_PLAYBACK] =
+ SND_SOC_DPCM_TRIGGER_POST;
+ }
+ break;
+ case SOF_DAI_INTEL_ALH:
+ /* do nothing for ALH dai_link */
+ break;
+ case SOF_DAI_IMX_ESAI:
+ rate->min = dai->dai_config->esai.fsync_rate;
+ rate->max = dai->dai_config->esai.fsync_rate;
+ channels->min = dai->dai_config->esai.tdm_slots;
+ channels->max = dai->dai_config->esai.tdm_slots;
+
+ dev_dbg(component->dev,
+ "rate_min: %d rate_max: %d\n", rate->min, rate->max);
+ dev_dbg(component->dev,
+ "channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+ break;
+ case SOF_DAI_IMX_SAI:
+ rate->min = dai->dai_config->sai.fsync_rate;
+ rate->max = dai->dai_config->sai.fsync_rate;
+ channels->min = dai->dai_config->sai.tdm_slots;
+ channels->max = dai->dai_config->sai.tdm_slots;
+
+ dev_dbg(component->dev,
+ "rate_min: %d rate_max: %d\n", rate->min, rate->max);
+ dev_dbg(component->dev,
+ "channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+ break;
+ default:
+ dev_err(component->dev, "error: invalid DAI type %d\n",
+ dai->dai_config->type);
+ break;
+ }
+
+ return 0;
+}
+
+static int sof_pcm_probe(struct snd_soc_component *component)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const char *tplg_filename;
+ int ret;
+
+ /* load the default topology */
+ sdev->component = component;
+
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s/%s",
+ plat_data->tplg_filename_prefix,
+ plat_data->tplg_filename);
+ if (!tplg_filename)
+ return -ENOMEM;
+
+ ret = snd_sof_load_topology(component, tplg_filename);
+ if (ret < 0) {
+ dev_err(component->dev, "error: failed to load DSP topology %d\n",
+ ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void sof_pcm_remove(struct snd_soc_component *component)
+{
+ /* remove topology */
+ snd_soc_tplg_component_remove(component, SND_SOC_TPLG_INDEX_ALL);
+}
+
+void snd_sof_new_platform_drv(struct snd_sof_dev *sdev)
+{
+ struct snd_soc_component_driver *pd = &sdev->plat_drv;
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const char *drv_name;
+
+ drv_name = plat_data->machine->drv_name;
+
+ pd->name = "sof-audio-component";
+ pd->probe = sof_pcm_probe;
+ pd->remove = sof_pcm_remove;
+ pd->open = sof_pcm_open;
+ pd->close = sof_pcm_close;
+ pd->hw_params = sof_pcm_hw_params;
+ pd->prepare = sof_pcm_prepare;
+ pd->hw_free = sof_pcm_hw_free;
+ pd->trigger = sof_pcm_trigger;
+ pd->pointer = sof_pcm_pointer;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMPRESS)
+ pd->compress_ops = &sof_compressed_ops;
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+ /* override cops when probe support is enabled */
+ pd->compress_ops = &sof_probe_compressed_ops;
+#endif
+ pd->pcm_construct = sof_pcm_new;
+ pd->ignore_machine = drv_name;
+ pd->be_hw_params_fixup = sof_pcm_dai_link_fixup;
+ pd->be_pcm_base = SOF_BE_PCM_BASE;
+ pd->use_dai_pcm_id = true;
+ pd->topology_name_prefix = "sof";
+
+ /* increment module refcount when a pcm is opened */
+ pd->module_get_upon_open = 1;
+}
diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
new file mode 100644
index 000000000..c83fb6255
--- /dev/null
+++ b/sound/soc/sof/pm.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include "ops.h"
+#include "sof-priv.h"
+#include "sof-audio.h"
+
+/*
+ * Helper function to determine the target DSP state during
+ * system suspend. This function only cares about the device
+ * D-states. Platform-specific substates, if any, should be
+ * handled by the platform-specific parts.
+ */
+static u32 snd_sof_dsp_power_target(struct snd_sof_dev *sdev)
+{
+ u32 target_dsp_state;
+
+ switch (sdev->system_suspend_target) {
+ case SOF_SUSPEND_S3:
+ /* DSP should be in D3 if the system is suspending to S3 */
+ target_dsp_state = SOF_DSP_PM_D3;
+ break;
+ case SOF_SUSPEND_S0IX:
+ /*
+ * Currently, the only criterion for retaining the DSP in D0
+ * is that there are streams that ignored the suspend trigger.
+ * Additional criteria such Soundwire clock-stop mode and
+ * device suspend latency considerations will be added later.
+ */
+ if (snd_sof_stream_suspend_ignored(sdev))
+ target_dsp_state = SOF_DSP_PM_D0;
+ else
+ target_dsp_state = SOF_DSP_PM_D3;
+ break;
+ default:
+ /* This case would be during runtime suspend */
+ target_dsp_state = SOF_DSP_PM_D3;
+ break;
+ }
+
+ return target_dsp_state;
+}
+
+static int sof_send_pm_ctx_ipc(struct snd_sof_dev *sdev, int cmd)
+{
+ struct sof_ipc_pm_ctx pm_ctx;
+ struct sof_ipc_reply reply;
+
+ memset(&pm_ctx, 0, sizeof(pm_ctx));
+
+ /* configure ctx save ipc message */
+ pm_ctx.hdr.size = sizeof(pm_ctx);
+ pm_ctx.hdr.cmd = SOF_IPC_GLB_PM_MSG | cmd;
+
+ /* send ctx save ipc to dsp */
+ return sof_ipc_tx_message(sdev->ipc, pm_ctx.hdr.cmd, &pm_ctx,
+ sizeof(pm_ctx), &reply, sizeof(reply));
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+static void sof_cache_debugfs(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ list_for_each_entry(dfse, &sdev->dfsentry_list, list) {
+
+ /* nothing to do if debugfs buffer is not IO mem */
+ if (dfse->type == SOF_DFSENTRY_TYPE_BUF)
+ continue;
+
+ /* cache memory that is only accessible in D0 */
+ if (dfse->access_type == SOF_DEBUGFS_ACCESS_D0_ONLY)
+ memcpy_fromio(dfse->cache_buf, dfse->io_mem,
+ dfse->size);
+ }
+}
+#endif
+
+static int sof_resume(struct device *dev, bool runtime_resume)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ u32 old_state = sdev->dsp_power_state.state;
+ int ret;
+
+ /* do nothing if dsp resume callbacks are not set */
+ if (!runtime_resume && !sof_ops(sdev)->resume)
+ return 0;
+
+ if (runtime_resume && !sof_ops(sdev)->runtime_resume)
+ return 0;
+
+ /* DSP was never successfully started, nothing to resume */
+ if (sdev->first_boot)
+ return 0;
+
+ /*
+ * if the runtime_resume flag is set, call the runtime_resume routine
+ * or else call the system resume routine
+ */
+ if (runtime_resume)
+ ret = snd_sof_dsp_runtime_resume(sdev);
+ else
+ ret = snd_sof_dsp_resume(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to power up DSP after resume\n");
+ return ret;
+ }
+
+ /*
+ * Nothing further to be done for platforms that support the low power
+ * D0 substate.
+ */
+ if (!runtime_resume && sof_ops(sdev)->set_power_state &&
+ old_state == SOF_DSP_PM_D0)
+ return 0;
+
+ sdev->fw_state = SOF_FW_BOOT_PREPARE;
+
+ /* load the firmware */
+ ret = snd_sof_load_firmware(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to load DSP firmware after resume %d\n",
+ ret);
+ return ret;
+ }
+
+ sdev->fw_state = SOF_FW_BOOT_IN_PROGRESS;
+
+ /*
+ * Boot the firmware. The FW boot status will be modified
+ * in snd_sof_run_firmware() depending on the outcome.
+ */
+ ret = snd_sof_run_firmware(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to boot DSP firmware after resume %d\n",
+ ret);
+ return ret;
+ }
+
+ /* resume DMA trace, only need send ipc */
+ ret = snd_sof_init_trace_ipc(sdev);
+ if (ret < 0) {
+ /* non fatal */
+ dev_warn(sdev->dev,
+ "warning: failed to init trace after resume %d\n",
+ ret);
+ }
+
+ /* restore pipelines */
+ ret = sof_restore_pipelines(sdev->dev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to restore pipeline after resume %d\n",
+ ret);
+ return ret;
+ }
+
+ /* notify DSP of system resume */
+ ret = sof_send_pm_ctx_ipc(sdev, SOF_IPC_PM_CTX_RESTORE);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: ctx_restore ipc error during resume %d\n",
+ ret);
+
+ return ret;
+}
+
+static int sof_suspend(struct device *dev, bool runtime_suspend)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ u32 target_state = 0;
+ int ret;
+
+ /* do nothing if dsp suspend callback is not set */
+ if (!runtime_suspend && !sof_ops(sdev)->suspend)
+ return 0;
+
+ if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
+ return 0;
+
+ if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
+ goto suspend;
+
+ /* set restore_stream for all streams during system suspend */
+ if (!runtime_suspend) {
+ ret = sof_set_hw_params_upon_resume(sdev->dev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: setting hw_params flag during suspend %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ target_state = snd_sof_dsp_power_target(sdev);
+
+ /* Skip to platform-specific suspend if DSP is entering D0 */
+ if (target_state == SOF_DSP_PM_D0)
+ goto suspend;
+
+ /* release trace */
+ snd_sof_release_trace(sdev);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+ /* cache debugfs contents during runtime suspend */
+ if (runtime_suspend)
+ sof_cache_debugfs(sdev);
+#endif
+ /* notify DSP of upcoming power down */
+ ret = sof_send_pm_ctx_ipc(sdev, SOF_IPC_PM_CTX_SAVE);
+ if (ret == -EBUSY || ret == -EAGAIN) {
+ /*
+ * runtime PM has logic to handle -EBUSY/-EAGAIN so
+ * pass these errors up
+ */
+ dev_err(sdev->dev,
+ "error: ctx_save ipc error during suspend %d\n",
+ ret);
+ return ret;
+ } else if (ret < 0) {
+ /* FW in unexpected state, continue to power down */
+ dev_warn(sdev->dev,
+ "ctx_save ipc error %d, proceeding with suspend\n",
+ ret);
+ }
+
+suspend:
+
+ /* return if the DSP was not probed successfully */
+ if (sdev->fw_state == SOF_FW_BOOT_NOT_STARTED)
+ return 0;
+
+ /* platform-specific suspend */
+ if (runtime_suspend)
+ ret = snd_sof_dsp_runtime_suspend(sdev);
+ else
+ ret = snd_sof_dsp_suspend(sdev, target_state);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: failed to power down DSP during suspend %d\n",
+ ret);
+
+ /* Do not reset FW state if DSP is in D0 */
+ if (target_state == SOF_DSP_PM_D0)
+ return ret;
+
+ /* reset FW state */
+ sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
+ sdev->enabled_cores_mask = 0;
+
+ return ret;
+}
+
+int snd_sof_dsp_power_down_notify(struct snd_sof_dev *sdev)
+{
+ /* Notify DSP of upcoming power down */
+ if (sof_ops(sdev)->remove)
+ return sof_send_pm_ctx_ipc(sdev, SOF_IPC_PM_CTX_SAVE);
+
+ return 0;
+}
+
+int snd_sof_runtime_suspend(struct device *dev)
+{
+ return sof_suspend(dev, true);
+}
+EXPORT_SYMBOL(snd_sof_runtime_suspend);
+
+int snd_sof_runtime_idle(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+
+ return snd_sof_dsp_runtime_idle(sdev);
+}
+EXPORT_SYMBOL(snd_sof_runtime_idle);
+
+int snd_sof_runtime_resume(struct device *dev)
+{
+ return sof_resume(dev, true);
+}
+EXPORT_SYMBOL(snd_sof_runtime_resume);
+
+int snd_sof_resume(struct device *dev)
+{
+ return sof_resume(dev, false);
+}
+EXPORT_SYMBOL(snd_sof_resume);
+
+int snd_sof_suspend(struct device *dev)
+{
+ return sof_suspend(dev, false);
+}
+EXPORT_SYMBOL(snd_sof_suspend);
+
+int snd_sof_prepare(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ const struct sof_dev_desc *desc = sdev->pdata->desc;
+
+ /* will suspend to S3 by default */
+ sdev->system_suspend_target = SOF_SUSPEND_S3;
+
+ if (!desc->use_acpi_target_states)
+ return 0;
+
+#if defined(CONFIG_ACPI)
+ if (acpi_target_system_state() == ACPI_STATE_S0)
+ sdev->system_suspend_target = SOF_SUSPEND_S0IX;
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_prepare);
+
+void snd_sof_complete(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+
+ sdev->system_suspend_target = SOF_SUSPEND_NONE;
+}
+EXPORT_SYMBOL(snd_sof_complete);
diff --git a/sound/soc/sof/probe.c b/sound/soc/sof/probe.c
new file mode 100644
index 000000000..14509f4d3
--- /dev/null
+++ b/sound/soc/sof/probe.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019-2020 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+
+#include "sof-priv.h"
+#include "probe.h"
+
+/**
+ * sof_ipc_probe_init - initialize data probing
+ * @sdev: SOF sound device
+ * @stream_tag: Extractor stream tag
+ * @buffer_size: DMA buffer size to set for extractor
+ *
+ * Host chooses whether extraction is supported or not by providing
+ * valid stream tag to DSP. Once specified, stream described by that
+ * tag will be tied to DSP for extraction for the entire lifetime of
+ * probe.
+ *
+ * Probing is initialized only once and each INIT request must be
+ * matched by DEINIT call.
+ */
+int sof_ipc_probe_init(struct snd_sof_dev *sdev,
+ u32 stream_tag, size_t buffer_size)
+{
+ struct sof_ipc_probe_dma_add_params *msg;
+ struct sof_ipc_reply reply;
+ size_t size = struct_size(msg, dma, 1);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_INIT;
+ msg->num_elems = 1;
+ msg->dma[0].stream_tag = stream_tag;
+ msg->dma[0].dma_buffer_size = buffer_size;
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg->hdr.cmd, msg, msg->hdr.size,
+ &reply, sizeof(reply));
+ kfree(msg);
+ return ret;
+}
+EXPORT_SYMBOL(sof_ipc_probe_init);
+
+/**
+ * sof_ipc_probe_deinit - cleanup after data probing
+ * @sdev: SOF sound device
+ *
+ * Host sends DEINIT request to free previously initialized probe
+ * on DSP side once it is no longer needed. DEINIT only when there
+ * are no probes connected and with all injectors detached.
+ */
+int sof_ipc_probe_deinit(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_cmd_hdr msg;
+ struct sof_ipc_reply reply;
+
+ msg.size = sizeof(msg);
+ msg.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_DEINIT;
+
+ return sof_ipc_tx_message(sdev->ipc, msg.cmd, &msg, msg.size,
+ &reply, sizeof(reply));
+}
+EXPORT_SYMBOL(sof_ipc_probe_deinit);
+
+static int sof_ipc_probe_info(struct snd_sof_dev *sdev, unsigned int cmd,
+ void **params, size_t *num_params)
+{
+ struct sof_ipc_probe_info_params msg = {{{0}}};
+ struct sof_ipc_probe_info_params *reply;
+ size_t bytes;
+ int ret;
+
+ *params = NULL;
+ *num_params = 0;
+
+ reply = kzalloc(SOF_IPC_MSG_MAX_SIZE, GFP_KERNEL);
+ if (!reply)
+ return -ENOMEM;
+ msg.rhdr.hdr.size = sizeof(msg);
+ msg.rhdr.hdr.cmd = SOF_IPC_GLB_PROBE | cmd;
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg.rhdr.hdr.cmd, &msg,
+ msg.rhdr.hdr.size, reply, SOF_IPC_MSG_MAX_SIZE);
+ if (ret < 0 || reply->rhdr.error < 0)
+ goto exit;
+
+ if (!reply->num_elems)
+ goto exit;
+
+ if (cmd == SOF_IPC_PROBE_DMA_INFO)
+ bytes = sizeof(reply->dma[0]);
+ else
+ bytes = sizeof(reply->desc[0]);
+ bytes *= reply->num_elems;
+ *params = kmemdup(&reply->dma[0], bytes, GFP_KERNEL);
+ if (!*params) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ *num_params = reply->num_elems;
+
+exit:
+ kfree(reply);
+ return ret;
+}
+
+/**
+ * sof_ipc_probe_dma_info - retrieve list of active injection dmas
+ * @sdev: SOF sound device
+ * @dma: Returned list of active dmas
+ * @num_dma: Returned count of active dmas
+ *
+ * Host sends DMA_INFO request to obtain list of injection dmas it
+ * can use to transfer data over with.
+ *
+ * Note that list contains only injection dmas as there is only one
+ * extractor (dma) and it is always assigned on probing init.
+ * DSP knows exactly where data from extraction probes is going to,
+ * which is not the case for injection where multiple streams
+ * could be engaged.
+ */
+int sof_ipc_probe_dma_info(struct snd_sof_dev *sdev,
+ struct sof_probe_dma **dma, size_t *num_dma)
+{
+ return sof_ipc_probe_info(sdev, SOF_IPC_PROBE_DMA_INFO,
+ (void **)dma, num_dma);
+}
+EXPORT_SYMBOL(sof_ipc_probe_dma_info);
+
+/**
+ * sof_ipc_probe_dma_add - attach to specified dmas
+ * @sdev: SOF sound device
+ * @dma: List of streams (dmas) to attach to
+ * @num_dma: Number of elements in @dma
+ *
+ * Contrary to extraction, injection streams are never assigned
+ * on init. Before attempting any data injection, host is responsible
+ * for specifying streams which will be later used to transfer data
+ * to connected probe points.
+ */
+int sof_ipc_probe_dma_add(struct snd_sof_dev *sdev,
+ struct sof_probe_dma *dma, size_t num_dma)
+{
+ struct sof_ipc_probe_dma_add_params *msg;
+ struct sof_ipc_reply reply;
+ size_t size = struct_size(msg, dma, num_dma);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->num_elems = num_dma;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_DMA_ADD;
+ memcpy(&msg->dma[0], dma, size - sizeof(*msg));
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg->hdr.cmd, msg, msg->hdr.size,
+ &reply, sizeof(reply));
+ kfree(msg);
+ return ret;
+}
+EXPORT_SYMBOL(sof_ipc_probe_dma_add);
+
+/**
+ * sof_ipc_probe_dma_remove - detach from specified dmas
+ * @sdev: SOF sound device
+ * @stream_tag: List of stream tags to detach from
+ * @num_stream_tag: Number of elements in @stream_tag
+ *
+ * Host sends DMA_REMOVE request to free previously attached stream
+ * from being occupied for injection. Each detach operation should
+ * match equivalent DMA_ADD. Detach only when all probes tied to
+ * given stream have been disconnected.
+ */
+int sof_ipc_probe_dma_remove(struct snd_sof_dev *sdev,
+ unsigned int *stream_tag, size_t num_stream_tag)
+{
+ struct sof_ipc_probe_dma_remove_params *msg;
+ struct sof_ipc_reply reply;
+ size_t size = struct_size(msg, stream_tag, num_stream_tag);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->num_elems = num_stream_tag;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_DMA_REMOVE;
+ memcpy(&msg->stream_tag[0], stream_tag, size - sizeof(*msg));
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg->hdr.cmd, msg, msg->hdr.size,
+ &reply, sizeof(reply));
+ kfree(msg);
+ return ret;
+}
+EXPORT_SYMBOL(sof_ipc_probe_dma_remove);
+
+/**
+ * sof_ipc_probe_points_info - retrieve list of active probe points
+ * @sdev: SOF sound device
+ * @desc: Returned list of active probes
+ * @num_desc: Returned count of active probes
+ *
+ * Host sends PROBE_POINT_INFO request to obtain list of active probe
+ * points, valid for disconnection when given probe is no longer
+ * required.
+ */
+int sof_ipc_probe_points_info(struct snd_sof_dev *sdev,
+ struct sof_probe_point_desc **desc, size_t *num_desc)
+{
+ return sof_ipc_probe_info(sdev, SOF_IPC_PROBE_POINT_INFO,
+ (void **)desc, num_desc);
+}
+EXPORT_SYMBOL(sof_ipc_probe_points_info);
+
+/**
+ * sof_ipc_probe_points_add - connect specified probes
+ * @sdev: SOF sound device
+ * @desc: List of probe points to connect
+ * @num_desc: Number of elements in @desc
+ *
+ * Dynamically connects to provided set of endpoints. Immediately
+ * after connection is established, host must be prepared to
+ * transfer data from or to target stream given the probing purpose.
+ *
+ * Each probe point should be removed using PROBE_POINT_REMOVE
+ * request when no longer needed.
+ */
+int sof_ipc_probe_points_add(struct snd_sof_dev *sdev,
+ struct sof_probe_point_desc *desc, size_t num_desc)
+{
+ struct sof_ipc_probe_point_add_params *msg;
+ struct sof_ipc_reply reply;
+ size_t size = struct_size(msg, desc, num_desc);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->num_elems = num_desc;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_POINT_ADD;
+ memcpy(&msg->desc[0], desc, size - sizeof(*msg));
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg->hdr.cmd, msg, msg->hdr.size,
+ &reply, sizeof(reply));
+ kfree(msg);
+ return ret;
+}
+EXPORT_SYMBOL(sof_ipc_probe_points_add);
+
+/**
+ * sof_ipc_probe_points_remove - disconnect specified probes
+ * @sdev: SOF sound device
+ * @buffer_id: List of probe points to disconnect
+ * @num_buffer_id: Number of elements in @desc
+ *
+ * Removes previously connected probes from list of active probe
+ * points and frees all resources on DSP side.
+ */
+int sof_ipc_probe_points_remove(struct snd_sof_dev *sdev,
+ unsigned int *buffer_id, size_t num_buffer_id)
+{
+ struct sof_ipc_probe_point_remove_params *msg;
+ struct sof_ipc_reply reply;
+ size_t size = struct_size(msg, buffer_id, num_buffer_id);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->num_elems = num_buffer_id;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_POINT_REMOVE;
+ memcpy(&msg->buffer_id[0], buffer_id, size - sizeof(*msg));
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg->hdr.cmd, msg, msg->hdr.size,
+ &reply, sizeof(reply));
+ kfree(msg);
+ return ret;
+}
+EXPORT_SYMBOL(sof_ipc_probe_points_remove);
diff --git a/sound/soc/sof/probe.h b/sound/soc/sof/probe.h
new file mode 100644
index 000000000..5e159ab23
--- /dev/null
+++ b/sound/soc/sof/probe.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2019-2020 Intel Corporation. All rights reserved.
+ *
+ * Author: Cezary Rojewski <cezary.rojewski@intel.com>
+ */
+
+#ifndef __SOF_PROBE_H
+#define __SOF_PROBE_H
+
+#include <sound/sof/header.h>
+
+struct snd_sof_dev;
+
+#define SOF_PROBE_INVALID_NODE_ID UINT_MAX
+
+struct sof_probe_dma {
+ unsigned int stream_tag;
+ unsigned int dma_buffer_size;
+} __packed;
+
+enum sof_connection_purpose {
+ SOF_CONNECTION_PURPOSE_EXTRACT = 1,
+ SOF_CONNECTION_PURPOSE_INJECT,
+};
+
+struct sof_probe_point_desc {
+ unsigned int buffer_id;
+ unsigned int purpose;
+ unsigned int stream_tag;
+} __packed;
+
+struct sof_ipc_probe_dma_add_params {
+ struct sof_ipc_cmd_hdr hdr;
+ unsigned int num_elems;
+ struct sof_probe_dma dma[];
+} __packed;
+
+struct sof_ipc_probe_info_params {
+ struct sof_ipc_reply rhdr;
+ unsigned int num_elems;
+ union {
+ struct sof_probe_dma dma[0];
+ struct sof_probe_point_desc desc[0];
+ };
+} __packed;
+
+struct sof_ipc_probe_dma_remove_params {
+ struct sof_ipc_cmd_hdr hdr;
+ unsigned int num_elems;
+ unsigned int stream_tag[];
+} __packed;
+
+struct sof_ipc_probe_point_add_params {
+ struct sof_ipc_cmd_hdr hdr;
+ unsigned int num_elems;
+ struct sof_probe_point_desc desc[];
+} __packed;
+
+struct sof_ipc_probe_point_remove_params {
+ struct sof_ipc_cmd_hdr hdr;
+ unsigned int num_elems;
+ unsigned int buffer_id[];
+} __packed;
+
+int sof_ipc_probe_init(struct snd_sof_dev *sdev,
+ u32 stream_tag, size_t buffer_size);
+int sof_ipc_probe_deinit(struct snd_sof_dev *sdev);
+int sof_ipc_probe_dma_info(struct snd_sof_dev *sdev,
+ struct sof_probe_dma **dma, size_t *num_dma);
+int sof_ipc_probe_dma_add(struct snd_sof_dev *sdev,
+ struct sof_probe_dma *dma, size_t num_dma);
+int sof_ipc_probe_dma_remove(struct snd_sof_dev *sdev,
+ unsigned int *stream_tag, size_t num_stream_tag);
+int sof_ipc_probe_points_info(struct snd_sof_dev *sdev,
+ struct sof_probe_point_desc **desc, size_t *num_desc);
+int sof_ipc_probe_points_add(struct snd_sof_dev *sdev,
+ struct sof_probe_point_desc *desc, size_t num_desc);
+int sof_ipc_probe_points_remove(struct snd_sof_dev *sdev,
+ unsigned int *buffer_id, size_t num_buffer_id);
+
+#endif
diff --git a/sound/soc/sof/sof-acpi-dev.c b/sound/soc/sof/sof-acpi-dev.c
new file mode 100644
index 000000000..a78b76ef3
--- /dev/null
+++ b/sound/soc/sof/sof-acpi-dev.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/acpi.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../intel/common/soc-intel-quirks.h"
+#include "ops.h"
+
+/* platform specific devices */
+#include "intel/shim.h"
+
+static char *fw_path;
+module_param(fw_path, charp, 0444);
+MODULE_PARM_DESC(fw_path, "alternate path for SOF firmware.");
+
+static char *tplg_path;
+module_param(tplg_path, charp, 0444);
+MODULE_PARM_DESC(tplg_path, "alternate path for SOF topology.");
+
+static int sof_acpi_debug;
+module_param_named(sof_acpi_debug, sof_acpi_debug, int, 0444);
+MODULE_PARM_DESC(sof_acpi_debug, "SOF ACPI debug options (0x0 all off)");
+
+#define SOF_ACPI_DISABLE_PM_RUNTIME BIT(0)
+
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
+static const struct sof_dev_desc sof_acpi_broadwell_desc = {
+ .machines = snd_soc_acpi_intel_broadwell_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = 0,
+ .chip_info = &bdw_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-bdw.ri",
+ .nocodec_tplg_filename = "sof-bdw-nocodec.tplg",
+ .ops = &sof_bdw_ops,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+
+/* BYTCR uses different IRQ index */
+static const struct sof_dev_desc sof_acpi_baytrailcr_desc = {
+ .machines = snd_soc_acpi_intel_baytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = 2,
+ .irqindex_host_ipc = 0,
+ .chip_info = &byt_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-byt.ri",
+ .nocodec_tplg_filename = "sof-byt-nocodec.tplg",
+ .ops = &sof_byt_ops,
+};
+
+static const struct sof_dev_desc sof_acpi_baytrail_desc = {
+ .machines = snd_soc_acpi_intel_baytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = 2,
+ .irqindex_host_ipc = 5,
+ .chip_info = &byt_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-byt.ri",
+ .nocodec_tplg_filename = "sof-byt-nocodec.tplg",
+ .ops = &sof_byt_ops,
+};
+
+static const struct sof_dev_desc sof_acpi_cherrytrail_desc = {
+ .machines = snd_soc_acpi_intel_cherrytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = 2,
+ .irqindex_host_ipc = 5,
+ .chip_info = &cht_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-cht.ri",
+ .nocodec_tplg_filename = "sof-cht-nocodec.tplg",
+ .ops = &sof_cht_ops,
+};
+
+#endif
+
+static const struct dev_pm_ops sof_acpi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(snd_sof_suspend, snd_sof_resume)
+ SET_RUNTIME_PM_OPS(snd_sof_runtime_suspend, snd_sof_runtime_resume,
+ snd_sof_runtime_idle)
+};
+
+static void sof_acpi_probe_complete(struct device *dev)
+{
+ dev_dbg(dev, "Completing SOF ACPI probe");
+
+ if (sof_acpi_debug & SOF_ACPI_DISABLE_PM_RUNTIME)
+ return;
+
+ /* allow runtime_pm */
+ pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+}
+
+static int sof_acpi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct sof_dev_desc *desc;
+ struct snd_sof_pdata *sof_pdata;
+ const struct snd_sof_dsp_ops *ops;
+ int ret;
+
+ dev_dbg(&pdev->dev, "ACPI DSP detected");
+
+ sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL);
+ if (!sof_pdata)
+ return -ENOMEM;
+
+ desc = device_get_match_data(dev);
+ if (!desc)
+ return -ENODEV;
+
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ if (desc == &sof_acpi_baytrail_desc && soc_intel_is_byt_cr(pdev))
+ desc = &sof_acpi_baytrailcr_desc;
+#endif
+
+ /* get ops for platform */
+ ops = desc->ops;
+ if (!ops) {
+ dev_err(dev, "error: no matching ACPI descriptor ops\n");
+ return -ENODEV;
+ }
+
+ sof_pdata->desc = desc;
+ sof_pdata->dev = &pdev->dev;
+ sof_pdata->fw_filename = desc->default_fw_filename;
+
+ /* alternate fw and tplg filenames ? */
+ if (fw_path)
+ sof_pdata->fw_filename_prefix = fw_path;
+ else
+ sof_pdata->fw_filename_prefix =
+ sof_pdata->desc->default_fw_path;
+
+ if (tplg_path)
+ sof_pdata->tplg_filename_prefix = tplg_path;
+ else
+ sof_pdata->tplg_filename_prefix =
+ sof_pdata->desc->default_tplg_path;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+ /* set callback to enable runtime_pm */
+ sof_pdata->sof_probe_complete = sof_acpi_probe_complete;
+#endif
+ /* call sof helper for DSP hardware probe */
+ ret = snd_sof_device_probe(dev, sof_pdata);
+ if (ret) {
+ dev_err(dev, "error: failed to probe DSP hardware!\n");
+ return ret;
+ }
+
+#if !IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+ sof_acpi_probe_complete(dev);
+#endif
+
+ return ret;
+}
+
+static int sof_acpi_remove(struct platform_device *pdev)
+{
+ if (!(sof_acpi_debug & SOF_ACPI_DISABLE_PM_RUNTIME))
+ pm_runtime_disable(&pdev->dev);
+
+ /* call sof helper for DSP hardware remove */
+ snd_sof_device_remove(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id sof_acpi_match[] = {
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
+ { "INT3438", (unsigned long)&sof_acpi_broadwell_desc },
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ { "80860F28", (unsigned long)&sof_acpi_baytrail_desc },
+ { "808622A8", (unsigned long)&sof_acpi_cherrytrail_desc },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sof_acpi_match);
+#endif
+
+/* acpi_driver definition */
+static struct platform_driver snd_sof_acpi_driver = {
+ .probe = sof_acpi_probe,
+ .remove = sof_acpi_remove,
+ .driver = {
+ .name = "sof-audio-acpi",
+ .pm = &sof_acpi_pm,
+ .acpi_match_table = ACPI_PTR(sof_acpi_match),
+ },
+};
+module_platform_driver(snd_sof_acpi_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_BAYTRAIL);
+MODULE_IMPORT_NS(SND_SOC_SOF_BROADWELL);
diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
new file mode 100644
index 000000000..afe7e503b
--- /dev/null
+++ b/sound/soc/sof/sof-audio.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019 Intel Corporation. All rights reserved.
+//
+// Author: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+//
+
+#include "sof-audio.h"
+#include "ops.h"
+
+/*
+ * helper to determine if there are only D0i3 compatible
+ * streams active
+ */
+bool snd_sof_dsp_only_d0i3_compatible_stream_active(struct snd_sof_dev *sdev)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_sof_pcm *spcm;
+ bool d0i3_compatible_active = false;
+ int dir;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ for_each_pcm_streams(dir) {
+ substream = spcm->stream[dir].substream;
+ if (!substream || !substream->runtime)
+ continue;
+
+ /*
+ * substream->runtime being not NULL indicates
+ * that the stream is open. No need to check the
+ * stream state.
+ */
+ if (!spcm->stream[dir].d0i3_compatible)
+ return false;
+
+ d0i3_compatible_active = true;
+ }
+ }
+
+ return d0i3_compatible_active;
+}
+EXPORT_SYMBOL(snd_sof_dsp_only_d0i3_compatible_stream_active);
+
+bool snd_sof_stream_suspend_ignored(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pcm *spcm;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ if (spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].suspend_ignored ||
+ spcm->stream[SNDRV_PCM_STREAM_CAPTURE].suspend_ignored)
+ return true;
+ }
+
+ return false;
+}
+
+int sof_set_hw_params_upon_resume(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_pcm_substream *substream;
+ struct snd_sof_pcm *spcm;
+ snd_pcm_state_t state;
+ int dir;
+
+ /*
+ * SOF requires hw_params to be set-up internally upon resume.
+ * So, set the flag to indicate this for those streams that
+ * have been suspended.
+ */
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ for_each_pcm_streams(dir) {
+ /*
+ * do not reset hw_params upon resume for streams that
+ * were kept running during suspend
+ */
+ if (spcm->stream[dir].suspend_ignored)
+ continue;
+
+ substream = spcm->stream[dir].substream;
+ if (!substream || !substream->runtime)
+ continue;
+
+ state = substream->runtime->status->state;
+ if (state == SNDRV_PCM_STATE_SUSPENDED)
+ spcm->prepared[dir] = false;
+ }
+ }
+
+ /* set internal flag for BE */
+ return snd_sof_dsp_hw_params_upon_resume(sdev);
+}
+
+static int sof_restore_kcontrols(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_sof_control *scontrol;
+ int ipc_cmd, ctrl_type;
+ int ret = 0;
+
+ /* restore kcontrol values */
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
+ /* reset readback offset for scontrol after resuming */
+ scontrol->readback_offset = 0;
+
+ /* notify DSP of kcontrol values */
+ switch (scontrol->cmd) {
+ case SOF_CTRL_CMD_VOLUME:
+ case SOF_CTRL_CMD_ENUM:
+ case SOF_CTRL_CMD_SWITCH:
+ ipc_cmd = SOF_IPC_COMP_SET_VALUE;
+ ctrl_type = SOF_CTRL_TYPE_VALUE_CHAN_SET;
+ ret = snd_sof_ipc_set_get_comp_data(scontrol,
+ ipc_cmd, ctrl_type,
+ scontrol->cmd,
+ true);
+ break;
+ case SOF_CTRL_CMD_BINARY:
+ ipc_cmd = SOF_IPC_COMP_SET_DATA;
+ ctrl_type = SOF_CTRL_TYPE_DATA_SET;
+ ret = snd_sof_ipc_set_get_comp_data(scontrol,
+ ipc_cmd, ctrl_type,
+ scontrol->cmd,
+ true);
+ break;
+
+ default:
+ break;
+ }
+
+ if (ret < 0) {
+ dev_err(dev,
+ "error: failed kcontrol value set for widget: %d\n",
+ scontrol->comp_id);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+const struct sof_ipc_pipe_new *snd_sof_pipeline_find(struct snd_sof_dev *sdev,
+ int pipeline_id)
+{
+ const struct snd_sof_widget *swidget;
+
+ list_for_each_entry(swidget, &sdev->widget_list, list)
+ if (swidget->id == snd_soc_dapm_scheduler) {
+ const struct sof_ipc_pipe_new *pipeline =
+ swidget->private;
+ if (pipeline->pipeline_id == pipeline_id)
+ return pipeline;
+ }
+
+ return NULL;
+}
+
+int sof_restore_pipelines(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_sof_widget *swidget;
+ struct snd_sof_route *sroute;
+ struct sof_ipc_pipe_new *pipeline;
+ struct snd_sof_dai *dai;
+ struct sof_ipc_cmd_hdr *hdr;
+ struct sof_ipc_comp *comp;
+ size_t ipc_size;
+ int ret;
+
+ /* restore pipeline components */
+ list_for_each_entry_reverse(swidget, &sdev->widget_list, list) {
+ struct sof_ipc_comp_reply r;
+
+ /* skip if there is no private data */
+ if (!swidget->private)
+ continue;
+
+ ret = sof_pipeline_core_enable(sdev, swidget);
+ if (ret < 0) {
+ dev_err(dev,
+ "error: failed to enable target core: %d\n",
+ ret);
+
+ return ret;
+ }
+
+ switch (swidget->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ ipc_size = sizeof(struct sof_ipc_comp_dai) +
+ sizeof(struct sof_ipc_comp_ext);
+ comp = kzalloc(ipc_size, GFP_KERNEL);
+ if (!comp)
+ return -ENOMEM;
+
+ dai = swidget->private;
+ memcpy(comp, &dai->comp_dai,
+ sizeof(struct sof_ipc_comp_dai));
+
+ /* append extended data to the end of the component */
+ memcpy((u8 *)comp + sizeof(struct sof_ipc_comp_dai),
+ &swidget->comp_ext, sizeof(swidget->comp_ext));
+
+ ret = sof_ipc_tx_message(sdev->ipc, comp->hdr.cmd,
+ comp, ipc_size,
+ &r, sizeof(r));
+ kfree(comp);
+ break;
+ case snd_soc_dapm_scheduler:
+
+ /*
+ * During suspend, all DSP cores are powered off.
+ * Therefore upon resume, create the pipeline comp
+ * and power up the core that the pipeline is
+ * scheduled on.
+ */
+ pipeline = swidget->private;
+ ret = sof_load_pipeline_ipc(dev, pipeline, &r);
+ break;
+ default:
+ hdr = swidget->private;
+ ret = sof_ipc_tx_message(sdev->ipc, hdr->cmd,
+ swidget->private, hdr->size,
+ &r, sizeof(r));
+ break;
+ }
+ if (ret < 0) {
+ dev_err(dev,
+ "error: failed to load widget type %d with ID: %d\n",
+ swidget->widget->id, swidget->comp_id);
+
+ return ret;
+ }
+ }
+
+ /* restore pipeline connections */
+ list_for_each_entry_reverse(sroute, &sdev->route_list, list) {
+ struct sof_ipc_pipe_comp_connect *connect;
+ struct sof_ipc_reply reply;
+
+ /* skip if there's no private data */
+ if (!sroute->private)
+ continue;
+
+ connect = sroute->private;
+
+ /* send ipc */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ connect->hdr.cmd,
+ connect, sizeof(*connect),
+ &reply, sizeof(reply));
+ if (ret < 0) {
+ dev_err(dev,
+ "error: failed to load route sink %s control %s source %s\n",
+ sroute->route->sink,
+ sroute->route->control ? sroute->route->control
+ : "none",
+ sroute->route->source);
+
+ return ret;
+ }
+ }
+
+ /* restore dai links */
+ list_for_each_entry_reverse(dai, &sdev->dai_list, list) {
+ struct sof_ipc_reply reply;
+ struct sof_ipc_dai_config *config = dai->dai_config;
+
+ if (!config) {
+ dev_err(dev, "error: no config for DAI %s\n",
+ dai->name);
+ continue;
+ }
+
+ /*
+ * The link DMA channel would be invalidated for running
+ * streams but not for streams that were in the PAUSED
+ * state during suspend. So invalidate it here before setting
+ * the dai config in the DSP.
+ */
+ if (config->type == SOF_DAI_INTEL_HDA)
+ config->hda.link_dma_ch = DMA_CHAN_INVALID;
+
+ ret = sof_ipc_tx_message(sdev->ipc,
+ config->hdr.cmd, config,
+ config->hdr.size,
+ &reply, sizeof(reply));
+
+ if (ret < 0) {
+ dev_err(dev,
+ "error: failed to set dai config for %s\n",
+ dai->name);
+
+ return ret;
+ }
+ }
+
+ /* complete pipeline */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ switch (swidget->id) {
+ case snd_soc_dapm_scheduler:
+ swidget->complete =
+ snd_sof_complete_pipeline(dev, swidget);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* restore pipeline kcontrols */
+ ret = sof_restore_kcontrols(dev);
+ if (ret < 0)
+ dev_err(dev,
+ "error: restoring kcontrols after resume\n");
+
+ return ret;
+}
+
+/*
+ * Generic object lookup APIs.
+ */
+
+struct snd_sof_pcm *snd_sof_find_spcm_name(struct snd_soc_component *scomp,
+ const char *name)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_pcm *spcm;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ /* match with PCM dai name */
+ if (strcmp(spcm->pcm.dai_name, name) == 0)
+ return spcm;
+
+ /* match with playback caps name if set */
+ if (*spcm->pcm.caps[0].name &&
+ !strcmp(spcm->pcm.caps[0].name, name))
+ return spcm;
+
+ /* match with capture caps name if set */
+ if (*spcm->pcm.caps[1].name &&
+ !strcmp(spcm->pcm.caps[1].name, name))
+ return spcm;
+ }
+
+ return NULL;
+}
+
+struct snd_sof_pcm *snd_sof_find_spcm_comp(struct snd_soc_component *scomp,
+ unsigned int comp_id,
+ int *direction)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_pcm *spcm;
+ int dir;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ for_each_pcm_streams(dir) {
+ if (spcm->stream[dir].comp_id == comp_id) {
+ *direction = dir;
+ return spcm;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+struct snd_sof_pcm *snd_sof_find_spcm_pcm_id(struct snd_soc_component *scomp,
+ unsigned int pcm_id)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_pcm *spcm;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ if (le32_to_cpu(spcm->pcm.pcm_id) == pcm_id)
+ return spcm;
+ }
+
+ return NULL;
+}
+
+struct snd_sof_widget *snd_sof_find_swidget(struct snd_soc_component *scomp,
+ const char *name)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_widget *swidget;
+
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (strcmp(name, swidget->widget->name) == 0)
+ return swidget;
+ }
+
+ return NULL;
+}
+
+/* find widget by stream name and direction */
+struct snd_sof_widget *
+snd_sof_find_swidget_sname(struct snd_soc_component *scomp,
+ const char *pcm_name, int dir)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_widget *swidget;
+ enum snd_soc_dapm_type type;
+
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ type = snd_soc_dapm_aif_in;
+ else
+ type = snd_soc_dapm_aif_out;
+
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (!strcmp(pcm_name, swidget->widget->sname) &&
+ swidget->id == type)
+ return swidget;
+ }
+
+ return NULL;
+}
+
+struct snd_sof_dai *snd_sof_find_dai(struct snd_soc_component *scomp,
+ const char *name)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_dai *dai;
+
+ list_for_each_entry(dai, &sdev->dai_list, list) {
+ if (dai->name && (strcmp(name, dai->name) == 0))
+ return dai;
+ }
+
+ return NULL;
+}
+
+/*
+ * SOF Driver enumeration.
+ */
+int sof_machine_check(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach;
+ int ret;
+
+ /* force nocodec mode */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_FORCE_NOCODEC_MODE)
+ dev_warn(sdev->dev, "Force to use nocodec mode\n");
+ goto nocodec;
+#endif
+
+ /* find machine */
+ snd_sof_machine_select(sdev);
+ if (sof_pdata->machine) {
+ snd_sof_set_mach_params(sof_pdata->machine, sdev->dev);
+ return 0;
+ }
+
+#if !IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC)
+ dev_err(sdev->dev, "error: no matching ASoC machine driver found - aborting probe\n");
+ return -ENODEV;
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_FORCE_NOCODEC_MODE)
+nocodec:
+#endif
+ /* select nocodec mode */
+ dev_warn(sdev->dev, "Using nocodec machine driver\n");
+ mach = devm_kzalloc(sdev->dev, sizeof(*mach), GFP_KERNEL);
+ if (!mach)
+ return -ENOMEM;
+
+ mach->drv_name = "sof-nocodec";
+ sof_pdata->tplg_filename = desc->nocodec_tplg_filename;
+
+ ret = sof_nocodec_setup(sdev->dev, desc->ops);
+ if (ret < 0)
+ return ret;
+
+ sof_pdata->machine = mach;
+ snd_sof_set_mach_params(sof_pdata->machine, sdev->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_machine_check);
+
+int sof_machine_register(struct snd_sof_dev *sdev, void *pdata)
+{
+ struct snd_sof_pdata *plat_data = pdata;
+ const char *drv_name;
+ const void *mach;
+ int size;
+
+ drv_name = plat_data->machine->drv_name;
+ mach = plat_data->machine;
+ size = sizeof(*plat_data->machine);
+
+ /* register machine driver, pass machine info as pdata */
+ plat_data->pdev_mach =
+ platform_device_register_data(sdev->dev, drv_name,
+ PLATFORM_DEVID_NONE, mach, size);
+ if (IS_ERR(plat_data->pdev_mach))
+ return PTR_ERR(plat_data->pdev_mach);
+
+ dev_dbg(sdev->dev, "created machine %s\n",
+ dev_name(&plat_data->pdev_mach->dev));
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_machine_register);
+
+void sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata)
+{
+ struct snd_sof_pdata *plat_data = pdata;
+
+ if (!IS_ERR_OR_NULL(plat_data->pdev_mach))
+ platform_device_unregister(plat_data->pdev_mach);
+}
+EXPORT_SYMBOL(sof_machine_unregister);
diff --git a/sound/soc/sof/sof-audio.h b/sound/soc/sof/sof-audio.h
new file mode 100644
index 000000000..9f645a2e5
--- /dev/null
+++ b/sound/soc/sof/sof-audio.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2019 Intel Corporation. All rights reserved.
+ *
+ * Author: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+ */
+
+#ifndef __SOUND_SOC_SOF_AUDIO_H
+#define __SOUND_SOC_SOF_AUDIO_H
+
+#include <linux/workqueue.h>
+
+#include <sound/soc.h>
+#include <sound/control.h>
+#include <sound/sof/stream.h> /* needs to be included before control.h */
+#include <sound/sof/control.h>
+#include <sound/sof/dai.h>
+#include <sound/sof/topology.h>
+#include "sof-priv.h"
+
+#define SOF_AUDIO_PCM_DRV_NAME "sof-audio-component"
+
+/* max number of FE PCMs before BEs */
+#define SOF_BE_PCM_BASE 16
+
+#define DMA_CHAN_INVALID 0xFFFFFFFF
+
+/* PCM stream, mapped to FW component */
+struct snd_sof_pcm_stream {
+ u32 comp_id;
+ struct snd_dma_buffer page_table;
+ struct sof_ipc_stream_posn posn;
+ struct snd_pcm_substream *substream;
+ struct work_struct period_elapsed_work;
+ bool d0i3_compatible; /* DSP can be in D0I3 when this pcm is opened */
+ /*
+ * flag to indicate that the DSP pipelines should be kept
+ * active or not while suspending the stream
+ */
+ bool suspend_ignored;
+};
+
+/* ALSA SOF PCM device */
+struct snd_sof_pcm {
+ struct snd_soc_component *scomp;
+ struct snd_soc_tplg_pcm pcm;
+ struct snd_sof_pcm_stream stream[2];
+ struct list_head list; /* list in sdev pcm list */
+ struct snd_pcm_hw_params params[2];
+ bool prepared[2]; /* PCM_PARAMS set successfully */
+};
+
+struct snd_sof_led_control {
+ unsigned int use_led;
+ unsigned int direction;
+ int led_value;
+};
+
+/* ALSA SOF Kcontrol device */
+struct snd_sof_control {
+ struct snd_soc_component *scomp;
+ int comp_id;
+ int min_volume_step; /* min volume step for volume_table */
+ int max_volume_step; /* max volume step for volume_table */
+ int num_channels;
+ u32 readback_offset; /* offset to mmapped data if used */
+ struct sof_ipc_ctrl_data *control_data;
+ u32 size; /* cdata size */
+ enum sof_ipc_ctrl_cmd cmd;
+ u32 *volume_table; /* volume table computed from tlv data*/
+
+ struct list_head list; /* list in sdev control list */
+
+ struct snd_sof_led_control led_ctl;
+};
+
+/* ASoC SOF DAPM widget */
+struct snd_sof_widget {
+ struct snd_soc_component *scomp;
+ int comp_id;
+ int pipeline_id;
+ int complete;
+ int core;
+ int id;
+
+ struct snd_soc_dapm_widget *widget;
+ struct list_head list; /* list in sdev widget list */
+
+ /* extended data for UUID components */
+ struct sof_ipc_comp_ext comp_ext;
+
+ void *private; /* core does not touch this */
+};
+
+/* ASoC SOF DAPM route */
+struct snd_sof_route {
+ struct snd_soc_component *scomp;
+
+ struct snd_soc_dapm_route *route;
+ struct list_head list; /* list in sdev route list */
+
+ void *private;
+};
+
+/* ASoC DAI device */
+struct snd_sof_dai {
+ struct snd_soc_component *scomp;
+ const char *name;
+ const char *cpu_dai_name;
+
+ struct sof_ipc_comp_dai comp_dai;
+ struct sof_ipc_dai_config *dai_config;
+ struct list_head list; /* list in sdev dai list */
+};
+
+/*
+ * Kcontrols.
+ */
+
+int snd_sof_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_enum_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_enum_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_bytes_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_bytes_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size);
+int snd_sof_bytes_ext_get(struct snd_kcontrol *kcontrol,
+ unsigned int __user *binary_data,
+ unsigned int size);
+int snd_sof_bytes_ext_volatile_get(struct snd_kcontrol *kcontrol, unsigned int __user *binary_data,
+ unsigned int size);
+
+/*
+ * Topology.
+ * There is no snd_sof_free_topology since topology components will
+ * be freed by snd_soc_unregister_component,
+ */
+int snd_sof_load_topology(struct snd_soc_component *scomp, const char *file);
+int snd_sof_complete_pipeline(struct device *dev,
+ struct snd_sof_widget *swidget);
+
+int sof_load_pipeline_ipc(struct device *dev,
+ struct sof_ipc_pipe_new *pipeline,
+ struct sof_ipc_comp_reply *r);
+int sof_pipeline_core_enable(struct snd_sof_dev *sdev,
+ const struct snd_sof_widget *swidget);
+
+/*
+ * Stream IPC
+ */
+int snd_sof_ipc_stream_posn(struct snd_soc_component *scomp,
+ struct snd_sof_pcm *spcm, int direction,
+ struct sof_ipc_stream_posn *posn);
+
+struct snd_sof_widget *snd_sof_find_swidget(struct snd_soc_component *scomp,
+ const char *name);
+struct snd_sof_widget *
+snd_sof_find_swidget_sname(struct snd_soc_component *scomp,
+ const char *pcm_name, int dir);
+struct snd_sof_dai *snd_sof_find_dai(struct snd_soc_component *scomp,
+ const char *name);
+
+static inline
+struct snd_sof_pcm *snd_sof_find_spcm_dai(struct snd_soc_component *scomp,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+
+ struct snd_sof_pcm *spcm = NULL;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ if (le32_to_cpu(spcm->pcm.dai_id) == rtd->dai_link->id)
+ return spcm;
+ }
+
+ return NULL;
+}
+
+struct snd_sof_pcm *snd_sof_find_spcm_name(struct snd_soc_component *scomp,
+ const char *name);
+struct snd_sof_pcm *snd_sof_find_spcm_comp(struct snd_soc_component *scomp,
+ unsigned int comp_id,
+ int *direction);
+struct snd_sof_pcm *snd_sof_find_spcm_pcm_id(struct snd_soc_component *scomp,
+ unsigned int pcm_id);
+const struct sof_ipc_pipe_new *snd_sof_pipeline_find(struct snd_sof_dev *sdev,
+ int pipeline_id);
+void snd_sof_pcm_period_elapsed(struct snd_pcm_substream *substream);
+void snd_sof_pcm_period_elapsed_work(struct work_struct *work);
+
+/*
+ * Mixer IPC
+ */
+int snd_sof_ipc_set_get_comp_data(struct snd_sof_control *scontrol,
+ u32 ipc_cmd,
+ enum sof_ipc_ctrl_type ctrl_type,
+ enum sof_ipc_ctrl_cmd ctrl_cmd,
+ bool send);
+
+/* PM */
+int sof_restore_pipelines(struct device *dev);
+int sof_set_hw_params_upon_resume(struct device *dev);
+bool snd_sof_stream_suspend_ignored(struct snd_sof_dev *sdev);
+bool snd_sof_dsp_only_d0i3_compatible_stream_active(struct snd_sof_dev *sdev);
+
+/* Machine driver enumeration */
+int sof_machine_register(struct snd_sof_dev *sdev, void *pdata);
+void sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata);
+
+#endif
diff --git a/sound/soc/sof/sof-of-dev.c b/sound/soc/sof/sof-of-dev.c
new file mode 100644
index 000000000..85ff0db88
--- /dev/null
+++ b/sound/soc/sof/sof-of-dev.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright 2019 NXP
+//
+// Author: Daniel Baluta <daniel.baluta@nxp.com>
+//
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <sound/sof.h>
+
+#include "ops.h"
+
+extern struct snd_sof_dsp_ops sof_imx8_ops;
+extern struct snd_sof_dsp_ops sof_imx8x_ops;
+extern struct snd_sof_dsp_ops sof_imx8m_ops;
+
+/* platform specific devices */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_IMX8)
+static struct sof_dev_desc sof_of_imx8qxp_desc = {
+ .default_fw_path = "imx/sof",
+ .default_tplg_path = "imx/sof-tplg",
+ .default_fw_filename = "sof-imx8x.ri",
+ .nocodec_tplg_filename = "sof-imx8-nocodec.tplg",
+ .ops = &sof_imx8x_ops,
+};
+
+static struct sof_dev_desc sof_of_imx8qm_desc = {
+ .default_fw_path = "imx/sof",
+ .default_tplg_path = "imx/sof-tplg",
+ .default_fw_filename = "sof-imx8.ri",
+ .nocodec_tplg_filename = "sof-imx8-nocodec.tplg",
+ .ops = &sof_imx8_ops,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_IMX8M)
+static struct sof_dev_desc sof_of_imx8mp_desc = {
+ .default_fw_path = "imx/sof",
+ .default_tplg_path = "imx/sof-tplg",
+ .default_fw_filename = "sof-imx8m.ri",
+ .nocodec_tplg_filename = "sof-imx8-nocodec.tplg",
+ .ops = &sof_imx8m_ops,
+};
+#endif
+
+static const struct dev_pm_ops sof_of_pm = {
+ .prepare = snd_sof_prepare,
+ .complete = snd_sof_complete,
+ SET_SYSTEM_SLEEP_PM_OPS(snd_sof_suspend, snd_sof_resume)
+ SET_RUNTIME_PM_OPS(snd_sof_runtime_suspend, snd_sof_runtime_resume,
+ NULL)
+};
+
+static void sof_of_probe_complete(struct device *dev)
+{
+ /* allow runtime_pm */
+ pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+static int sof_of_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct sof_dev_desc *desc;
+ struct snd_sof_pdata *sof_pdata;
+ const struct snd_sof_dsp_ops *ops;
+ int ret;
+
+ dev_info(&pdev->dev, "DT DSP detected");
+
+ sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL);
+ if (!sof_pdata)
+ return -ENOMEM;
+
+ desc = device_get_match_data(dev);
+ if (!desc)
+ return -ENODEV;
+
+ /* get ops for platform */
+ ops = desc->ops;
+ if (!ops) {
+ dev_err(dev, "error: no matching DT descriptor ops\n");
+ return -ENODEV;
+ }
+
+ sof_pdata->desc = desc;
+ sof_pdata->dev = &pdev->dev;
+ sof_pdata->fw_filename = desc->default_fw_filename;
+
+ /* TODO: read alternate fw and tplg filenames from DT */
+ sof_pdata->fw_filename_prefix = sof_pdata->desc->default_fw_path;
+ sof_pdata->tplg_filename_prefix = sof_pdata->desc->default_tplg_path;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+ /* set callback to enable runtime_pm */
+ sof_pdata->sof_probe_complete = sof_of_probe_complete;
+#endif
+ /* call sof helper for DSP hardware probe */
+ ret = snd_sof_device_probe(dev, sof_pdata);
+ if (ret) {
+ dev_err(dev, "error: failed to probe DSP hardware\n");
+ return ret;
+ }
+
+#if !IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+ sof_of_probe_complete(dev);
+#endif
+
+ return ret;
+}
+
+static int sof_of_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ /* call sof helper for DSP hardware remove */
+ snd_sof_device_remove(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id sof_of_ids[] = {
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_IMX8)
+ { .compatible = "fsl,imx8qxp-dsp", .data = &sof_of_imx8qxp_desc},
+ { .compatible = "fsl,imx8qm-dsp", .data = &sof_of_imx8qm_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_IMX8M)
+ { .compatible = "fsl,imx8mp-dsp", .data = &sof_of_imx8mp_desc},
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, sof_of_ids);
+
+/* DT driver definition */
+static struct platform_driver snd_sof_of_driver = {
+ .probe = sof_of_probe,
+ .remove = sof_of_remove,
+ .driver = {
+ .name = "sof-audio-of",
+ .pm = &sof_of_pm,
+ .of_match_table = sof_of_ids,
+ },
+};
+module_platform_driver(snd_sof_of_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
new file mode 100644
index 000000000..ade292a61
--- /dev/null
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/firmware.h>
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_data/x86/soc.h>
+#include <linux/pm_runtime.h>
+#include <sound/intel-dsp-config.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "ops.h"
+
+/* platform specific devices */
+#include "intel/shim.h"
+#include "intel/hda.h"
+
+static char *fw_path;
+module_param(fw_path, charp, 0444);
+MODULE_PARM_DESC(fw_path, "alternate path for SOF firmware.");
+
+static char *tplg_path;
+module_param(tplg_path, charp, 0444);
+MODULE_PARM_DESC(tplg_path, "alternate path for SOF topology.");
+
+static char *tplg_filename;
+module_param(tplg_filename, charp, 0444);
+MODULE_PARM_DESC(tplg_filename, "alternate filename for SOF topology.");
+
+static int sof_pci_debug;
+module_param_named(sof_pci_debug, sof_pci_debug, int, 0444);
+MODULE_PARM_DESC(sof_pci_debug, "SOF PCI debug options (0x0 all off)");
+
+static const char *sof_dmi_override_tplg_name;
+static bool sof_dmi_use_community_key;
+
+#define SOF_PCI_DISABLE_PM_RUNTIME BIT(0)
+
+static int sof_tplg_cb(const struct dmi_system_id *id)
+{
+ sof_dmi_override_tplg_name = id->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id sof_tplg_table[] = {
+ {
+ .callback = sof_tplg_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Volteer"),
+ DMI_MATCH(DMI_OEM_STRING, "AUDIO-MAX98373_ALC5682I_I2S_UP4"),
+ },
+ .driver_data = "sof-tgl-rt5682-ssp0-max98373-ssp2.tplg",
+ },
+ {}
+};
+
+/* all Up boards use the community key */
+static int up_use_community_key(const struct dmi_system_id *id)
+{
+ sof_dmi_use_community_key = true;
+ return 1;
+}
+
+/*
+ * For ApolloLake Chromebooks we want to force the use of the Intel production key.
+ * All newer platforms use the community key
+ */
+static int chromebook_use_community_key(const struct dmi_system_id *id)
+{
+ if (!soc_intel_is_apl())
+ sof_dmi_use_community_key = true;
+ return 1;
+}
+
+static const struct dmi_system_id community_key_platforms[] = {
+ {
+ .ident = "Up boards",
+ .callback = up_use_community_key,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
+ }
+ },
+ {
+ .ident = "Google Chromebooks",
+ .callback = chromebook_use_community_key,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google"),
+ }
+ },
+ {
+ .ident = "Google firmware",
+ .callback = chromebook_use_community_key,
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VERSION, "Google"),
+ }
+ },
+ {},
+};
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
+static const struct sof_dev_desc bxt_desc = {
+ .machines = snd_soc_acpi_intel_bxt_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &apl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-apl.ri",
+ .nocodec_tplg_filename = "sof-apl-nocodec.tplg",
+ .ops = &sof_apl_ops,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_GEMINILAKE)
+static const struct sof_dev_desc glk_desc = {
+ .machines = snd_soc_acpi_intel_glk_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &apl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-glk.ri",
+ .nocodec_tplg_filename = "sof-glk-nocodec.tplg",
+ .ops = &sof_apl_ops,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_MERRIFIELD)
+static struct snd_soc_acpi_mach sof_tng_machines[] = {
+ {
+ .id = "INT343A",
+ .drv_name = "edison",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt.tplg",
+ },
+ {}
+};
+
+static const struct sof_dev_desc tng_desc = {
+ .machines = sof_tng_machines,
+ .resindex_lpe_base = 3, /* IRAM, but subtract IRAM offset */
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = 0,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &tng_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-byt.ri",
+ .nocodec_tplg_filename = "sof-byt.tplg",
+ .ops = &sof_tng_ops,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_CANNONLAKE)
+static const struct sof_dev_desc cnl_desc = {
+ .machines = snd_soc_acpi_intel_cnl_machines,
+ .alt_machines = snd_soc_acpi_intel_cnl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &cnl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-cnl.ri",
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COFFEELAKE)
+static const struct sof_dev_desc cfl_desc = {
+ .machines = snd_soc_acpi_intel_cfl_machines,
+ .alt_machines = snd_soc_acpi_intel_cfl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &cnl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-cfl.ri",
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
+static const struct sof_dev_desc cml_desc = {
+ .machines = snd_soc_acpi_intel_cml_machines,
+ .alt_machines = snd_soc_acpi_intel_cml_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &cnl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-cml.ri",
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE)
+static const struct sof_dev_desc icl_desc = {
+ .machines = snd_soc_acpi_intel_icl_machines,
+ .alt_machines = snd_soc_acpi_intel_icl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &icl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-icl.ri",
+ .nocodec_tplg_filename = "sof-icl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
+static const struct sof_dev_desc tgl_desc = {
+ .machines = snd_soc_acpi_intel_tgl_machines,
+ .alt_machines = snd_soc_acpi_intel_tgl_sdw_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &tgl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-tgl.ri",
+ .nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+};
+
+static const struct sof_dev_desc tglh_desc = {
+ .machines = snd_soc_acpi_intel_tgl_machines,
+ .alt_machines = snd_soc_acpi_intel_tgl_sdw_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &tglh_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-tgl-h.ri",
+ .nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
+ .ops = &sof_tgl_ops,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_ELKHARTLAKE)
+static const struct sof_dev_desc ehl_desc = {
+ .machines = snd_soc_acpi_intel_ehl_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &ehl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-ehl.ri",
+ .nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
+static const struct sof_dev_desc jsl_desc = {
+ .machines = snd_soc_acpi_intel_jsl_machines,
+ .use_acpi_target_states = true,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &jsl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .default_fw_filename = "sof-jsl.ri",
+ .nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+};
+#endif
+
+static const struct dev_pm_ops sof_pci_pm = {
+ .prepare = snd_sof_prepare,
+ .complete = snd_sof_complete,
+ SET_SYSTEM_SLEEP_PM_OPS(snd_sof_suspend, snd_sof_resume)
+ SET_RUNTIME_PM_OPS(snd_sof_runtime_suspend, snd_sof_runtime_resume,
+ snd_sof_runtime_idle)
+};
+
+static void sof_pci_probe_complete(struct device *dev)
+{
+ dev_dbg(dev, "Completing SOF PCI probe");
+
+ if (sof_pci_debug & SOF_PCI_DISABLE_PM_RUNTIME)
+ return;
+
+ /* allow runtime_pm */
+ pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+
+ /*
+ * runtime pm for pci device is "forbidden" by default.
+ * so call pm_runtime_allow() to enable it.
+ */
+ pm_runtime_allow(dev);
+
+ /* mark last_busy for pm_runtime to make sure not suspend immediately */
+ pm_runtime_mark_last_busy(dev);
+
+ /* follow recommendation in pci-driver.c to decrement usage counter */
+ pm_runtime_put_noidle(dev);
+}
+
+static int sof_pci_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ struct device *dev = &pci->dev;
+ const struct sof_dev_desc *desc =
+ (const struct sof_dev_desc *)pci_id->driver_data;
+ struct snd_sof_pdata *sof_pdata;
+ const struct snd_sof_dsp_ops *ops;
+ int ret;
+
+ ret = snd_intel_dsp_driver_probe(pci);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+ dev_dbg(&pci->dev, "SOF PCI driver not selected, aborting probe\n");
+ return -ENODEV;
+ }
+ dev_dbg(&pci->dev, "PCI DSP detected");
+
+ /* get ops for platform */
+ ops = desc->ops;
+ if (!ops) {
+ dev_err(dev, "error: no matching PCI descriptor ops\n");
+ return -ENODEV;
+ }
+
+ sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL);
+ if (!sof_pdata)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pci);
+ if (ret < 0)
+ return ret;
+
+ ret = pci_request_regions(pci, "Audio DSP");
+ if (ret < 0)
+ return ret;
+
+ sof_pdata->name = pci_name(pci);
+ sof_pdata->desc = (struct sof_dev_desc *)pci_id->driver_data;
+ sof_pdata->dev = dev;
+ sof_pdata->fw_filename = desc->default_fw_filename;
+
+ /*
+ * for platforms using the SOF community key, change the
+ * default path automatically to pick the right files from the
+ * linux-firmware tree. This can be overridden with the
+ * fw_path kernel parameter, e.g. for developers.
+ */
+
+ /* alternate fw and tplg filenames ? */
+ if (fw_path) {
+ sof_pdata->fw_filename_prefix = fw_path;
+
+ dev_dbg(dev,
+ "Module parameter used, changed fw path to %s\n",
+ sof_pdata->fw_filename_prefix);
+
+ } else if (dmi_check_system(community_key_platforms) && sof_dmi_use_community_key) {
+ sof_pdata->fw_filename_prefix =
+ devm_kasprintf(dev, GFP_KERNEL, "%s/%s",
+ sof_pdata->desc->default_fw_path,
+ "community");
+
+ dev_dbg(dev,
+ "Platform uses community key, changed fw path to %s\n",
+ sof_pdata->fw_filename_prefix);
+ } else {
+ sof_pdata->fw_filename_prefix =
+ sof_pdata->desc->default_fw_path;
+ }
+
+ if (tplg_path)
+ sof_pdata->tplg_filename_prefix = tplg_path;
+ else
+ sof_pdata->tplg_filename_prefix =
+ sof_pdata->desc->default_tplg_path;
+
+ /*
+ * the topology filename will be provided in the machine descriptor, unless
+ * it is overridden by a module parameter or DMI quirk.
+ */
+ if (tplg_filename) {
+ sof_pdata->tplg_filename = tplg_filename;
+
+ dev_dbg(dev, "Module parameter used, changed tplg filename to %s\n",
+ sof_pdata->tplg_filename);
+ } else {
+ dmi_check_system(sof_tplg_table);
+ if (sof_dmi_override_tplg_name)
+ sof_pdata->tplg_filename = sof_dmi_override_tplg_name;
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+ /* set callback to enable runtime_pm */
+ sof_pdata->sof_probe_complete = sof_pci_probe_complete;
+#endif
+ /* call sof helper for DSP hardware probe */
+ ret = snd_sof_device_probe(dev, sof_pdata);
+ if (ret) {
+ dev_err(dev, "error: failed to probe DSP hardware!\n");
+ goto release_regions;
+ }
+
+#if !IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+ sof_pci_probe_complete(dev);
+#endif
+
+ return ret;
+
+release_regions:
+ pci_release_regions(pci);
+
+ return ret;
+}
+
+static void sof_pci_remove(struct pci_dev *pci)
+{
+ /* call sof helper for DSP hardware remove */
+ snd_sof_device_remove(&pci->dev);
+
+ /* follow recommendation in pci-driver.c to increment usage counter */
+ if (!(sof_pci_debug & SOF_PCI_DISABLE_PM_RUNTIME))
+ pm_runtime_get_noresume(&pci->dev);
+
+ /* release pci regions and disable device */
+ pci_release_regions(pci);
+}
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_MERRIFIELD)
+ { PCI_DEVICE(0x8086, 0x119a),
+ .driver_data = (unsigned long)&tng_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
+ /* BXT-P & Apollolake */
+ { PCI_DEVICE(0x8086, 0x5a98),
+ .driver_data = (unsigned long)&bxt_desc},
+ { PCI_DEVICE(0x8086, 0x1a98),
+ .driver_data = (unsigned long)&bxt_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_GEMINILAKE)
+ { PCI_DEVICE(0x8086, 0x3198),
+ .driver_data = (unsigned long)&glk_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_CANNONLAKE)
+ { PCI_DEVICE(0x8086, 0x9dc8),
+ .driver_data = (unsigned long)&cnl_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COFFEELAKE)
+ { PCI_DEVICE(0x8086, 0xa348),
+ .driver_data = (unsigned long)&cfl_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE)
+ { PCI_DEVICE(0x8086, 0x34C8), /* ICL-LP */
+ .driver_data = (unsigned long)&icl_desc},
+ { PCI_DEVICE(0x8086, 0x3dc8), /* ICL-H */
+ .driver_data = (unsigned long)&icl_desc},
+
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
+ { PCI_DEVICE(0x8086, 0x38c8),
+ .driver_data = (unsigned long)&jsl_desc},
+ { PCI_DEVICE(0x8086, 0x4dc8),
+ .driver_data = (unsigned long)&jsl_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
+ { PCI_DEVICE(0x8086, 0x02c8), /* CML-LP */
+ .driver_data = (unsigned long)&cml_desc},
+ { PCI_DEVICE(0x8086, 0x06c8), /* CML-H */
+ .driver_data = (unsigned long)&cml_desc},
+ { PCI_DEVICE(0x8086, 0xa3f0), /* CML-S */
+ .driver_data = (unsigned long)&cml_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
+ { PCI_DEVICE(0x8086, 0xa0c8), /* TGL-LP */
+ .driver_data = (unsigned long)&tgl_desc},
+ { PCI_DEVICE(0x8086, 0x43c8), /* TGL-H */
+ .driver_data = (unsigned long)&tglh_desc},
+
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_ELKHARTLAKE)
+ { PCI_DEVICE(0x8086, 0x4b55),
+ .driver_data = (unsigned long)&ehl_desc},
+ { PCI_DEVICE(0x8086, 0x4b58),
+ .driver_data = (unsigned long)&ehl_desc},
+#endif
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_driver = {
+ .name = "sof-audio-pci",
+ .id_table = sof_pci_ids,
+ .probe = sof_pci_probe,
+ .remove = sof_pci_remove,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_MERRIFIELD);
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
new file mode 100644
index 000000000..0aed2a7ab
--- /dev/null
+++ b/sound/soc/sof/sof-priv.h
@@ -0,0 +1,589 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOUND_SOC_SOF_PRIV_H
+#define __SOUND_SOC_SOF_PRIV_H
+
+#include <linux/device.h>
+#include <sound/hdaudio.h>
+#include <sound/sof.h>
+#include <sound/sof/info.h>
+#include <sound/sof/pm.h>
+#include <sound/sof/trace.h>
+#include <uapi/sound/sof/fw.h>
+
+/* debug flags */
+#define SOF_DBG_ENABLE_TRACE BIT(0)
+#define SOF_DBG_REGS BIT(1)
+#define SOF_DBG_MBOX BIT(2)
+#define SOF_DBG_TEXT BIT(3)
+#define SOF_DBG_PCI BIT(4)
+#define SOF_DBG_RETAIN_CTX BIT(5) /* prevent DSP D3 on FW exception */
+
+/* global debug state set by SOF_DBG_ flags */
+extern int sof_core_debug;
+
+/* max BARs mmaped devices can use */
+#define SND_SOF_BARS 8
+
+/* time in ms for runtime suspend delay */
+#define SND_SOF_SUSPEND_DELAY_MS 2000
+
+/* DMA buffer size for trace */
+#define DMA_BUF_SIZE_FOR_TRACE (PAGE_SIZE * 16)
+
+#define SOF_IPC_DSP_REPLY 0
+#define SOF_IPC_HOST_REPLY 1
+
+/* convenience constructor for DAI driver streams */
+#define SOF_DAI_STREAM(sname, scmin, scmax, srates, sfmt) \
+ {.stream_name = sname, .channels_min = scmin, .channels_max = scmax, \
+ .rates = srates, .formats = sfmt}
+
+#define SOF_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_FLOAT)
+
+#define ENABLE_DEBUGFS_CACHEBUF \
+ (IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE) || \
+ IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST))
+
+/* So far the primary core on all DSPs has ID 0 */
+#define SOF_DSP_PRIMARY_CORE 0
+
+/* DSP power state */
+enum sof_dsp_power_states {
+ SOF_DSP_PM_D0,
+ SOF_DSP_PM_D1,
+ SOF_DSP_PM_D2,
+ SOF_DSP_PM_D3_HOT,
+ SOF_DSP_PM_D3,
+ SOF_DSP_PM_D3_COLD,
+};
+
+struct sof_dsp_power_state {
+ u32 state;
+ u32 substate; /* platform-specific */
+};
+
+/* System suspend target state */
+enum sof_system_suspend_state {
+ SOF_SUSPEND_NONE = 0,
+ SOF_SUSPEND_S0IX,
+ SOF_SUSPEND_S3,
+};
+
+struct snd_sof_dev;
+struct snd_sof_ipc_msg;
+struct snd_sof_ipc;
+struct snd_sof_debugfs_map;
+struct snd_soc_tplg_ops;
+struct snd_soc_component;
+struct snd_sof_pdata;
+
+/*
+ * SOF DSP HW abstraction operations.
+ * Used to abstract DSP HW architecture and any IO busses between host CPU
+ * and DSP device(s).
+ */
+struct snd_sof_dsp_ops {
+
+ /* probe and remove */
+ int (*probe)(struct snd_sof_dev *sof_dev); /* mandatory */
+ int (*remove)(struct snd_sof_dev *sof_dev); /* optional */
+
+ /* DSP core boot / reset */
+ int (*run)(struct snd_sof_dev *sof_dev); /* mandatory */
+ int (*stall)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*reset)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*core_power_up)(struct snd_sof_dev *sof_dev,
+ unsigned int core_mask); /* optional */
+ int (*core_power_down)(struct snd_sof_dev *sof_dev,
+ unsigned int core_mask); /* optional */
+
+ /*
+ * Register IO: only used by respective drivers themselves,
+ * TODO: consider removing these operations and calling respective
+ * implementations directly
+ */
+ void (*write)(struct snd_sof_dev *sof_dev, void __iomem *addr,
+ u32 value); /* optional */
+ u32 (*read)(struct snd_sof_dev *sof_dev,
+ void __iomem *addr); /* optional */
+ void (*write64)(struct snd_sof_dev *sof_dev, void __iomem *addr,
+ u64 value); /* optional */
+ u64 (*read64)(struct snd_sof_dev *sof_dev,
+ void __iomem *addr); /* optional */
+
+ /* memcpy IO */
+ void (*block_read)(struct snd_sof_dev *sof_dev, u32 bar,
+ u32 offset, void *dest,
+ size_t size); /* mandatory */
+ void (*block_write)(struct snd_sof_dev *sof_dev, u32 bar,
+ u32 offset, void *src,
+ size_t size); /* mandatory */
+
+ /* doorbell */
+ irqreturn_t (*irq_handler)(int irq, void *context); /* optional */
+ irqreturn_t (*irq_thread)(int irq, void *context); /* optional */
+
+ /* ipc */
+ int (*send_msg)(struct snd_sof_dev *sof_dev,
+ struct snd_sof_ipc_msg *msg); /* mandatory */
+
+ /* FW loading */
+ int (*load_firmware)(struct snd_sof_dev *sof_dev); /* mandatory */
+ int (*load_module)(struct snd_sof_dev *sof_dev,
+ struct snd_sof_mod_hdr *hdr); /* optional */
+ /*
+ * FW ready checks for ABI compatibility and creates
+ * memory windows at first boot
+ */
+ int (*fw_ready)(struct snd_sof_dev *sdev, u32 msg_id); /* mandatory */
+
+ /* connect pcm substream to a host stream */
+ int (*pcm_open)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream); /* optional */
+ /* disconnect pcm substream to a host stream */
+ int (*pcm_close)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream); /* optional */
+
+ /* host stream hw params */
+ int (*pcm_hw_params)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc_stream_params *ipc_params); /* optional */
+
+ /* host stream hw_free */
+ int (*pcm_hw_free)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream); /* optional */
+
+ /* host stream trigger */
+ int (*pcm_trigger)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ int cmd); /* optional */
+
+ /* host stream pointer */
+ snd_pcm_uframes_t (*pcm_pointer)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream); /* optional */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+ /* Except for probe_pointer, all probe ops are mandatory */
+ int (*probe_assign)(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai); /* mandatory */
+ int (*probe_free)(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai); /* mandatory */
+ int (*probe_set_params)(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params,
+ struct snd_soc_dai *dai); /* mandatory */
+ int (*probe_trigger)(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai); /* mandatory */
+ int (*probe_pointer)(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp,
+ struct snd_soc_dai *dai); /* optional */
+#endif
+
+ /* host read DSP stream data */
+ void (*ipc_msg_data)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz); /* mandatory */
+
+ /* host configure DSP HW parameters */
+ int (*ipc_pcm_params)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply); /* mandatory */
+
+ /* pre/post firmware run */
+ int (*pre_fw_run)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*post_fw_run)(struct snd_sof_dev *sof_dev); /* optional */
+
+ /* DSP PM */
+ int (*suspend)(struct snd_sof_dev *sof_dev,
+ u32 target_state); /* optional */
+ int (*resume)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*runtime_suspend)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*runtime_resume)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*runtime_idle)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*set_hw_params_upon_resume)(struct snd_sof_dev *sdev); /* optional */
+ int (*set_power_state)(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state); /* optional */
+
+ /* DSP clocking */
+ int (*set_clk)(struct snd_sof_dev *sof_dev, u32 freq); /* optional */
+
+ /* debug */
+ const struct snd_sof_debugfs_map *debug_map; /* optional */
+ int debug_map_count; /* optional */
+ void (*dbg_dump)(struct snd_sof_dev *sof_dev,
+ u32 flags); /* optional */
+ void (*ipc_dump)(struct snd_sof_dev *sof_dev); /* optional */
+
+ /* host DMA trace initialization */
+ int (*trace_init)(struct snd_sof_dev *sdev,
+ u32 *stream_tag); /* optional */
+ int (*trace_release)(struct snd_sof_dev *sdev); /* optional */
+ int (*trace_trigger)(struct snd_sof_dev *sdev,
+ int cmd); /* optional */
+
+ /* misc */
+ int (*get_bar_index)(struct snd_sof_dev *sdev,
+ u32 type); /* optional */
+ int (*get_mailbox_offset)(struct snd_sof_dev *sdev);/* mandatory for common loader code */
+ int (*get_window_offset)(struct snd_sof_dev *sdev,
+ u32 id);/* mandatory for common loader code */
+
+ /* machine driver ops */
+ int (*machine_register)(struct snd_sof_dev *sdev,
+ void *pdata); /* optional */
+ void (*machine_unregister)(struct snd_sof_dev *sdev,
+ void *pdata); /* optional */
+ void (*machine_select)(struct snd_sof_dev *sdev); /* optional */
+ void (*set_mach_params)(const struct snd_soc_acpi_mach *mach,
+ struct device *dev); /* optional */
+
+ /* DAI ops */
+ struct snd_soc_dai_driver *drv;
+ int num_drv;
+
+ /* ALSA HW info flags, will be stored in snd_pcm_runtime.hw.info */
+ u32 hw_info;
+
+ const struct sof_arch_ops *arch_ops;
+};
+
+/* DSP architecture specific callbacks for oops and stack dumps */
+struct sof_arch_ops {
+ void (*dsp_oops)(struct snd_sof_dev *sdev, void *oops);
+ void (*dsp_stack)(struct snd_sof_dev *sdev, void *oops,
+ u32 *stack, u32 stack_words);
+};
+
+#define sof_arch_ops(sdev) ((sdev)->pdata->desc->ops->arch_ops)
+
+/* DSP device HW descriptor mapping between bus ID and ops */
+struct sof_ops_table {
+ const struct sof_dev_desc *desc;
+ const struct snd_sof_dsp_ops *ops;
+};
+
+enum sof_dfsentry_type {
+ SOF_DFSENTRY_TYPE_IOMEM = 0,
+ SOF_DFSENTRY_TYPE_BUF,
+};
+
+enum sof_debugfs_access_type {
+ SOF_DEBUGFS_ACCESS_ALWAYS = 0,
+ SOF_DEBUGFS_ACCESS_D0_ONLY,
+};
+
+/* FS entry for debug files that can expose DSP memories, registers */
+struct snd_sof_dfsentry {
+ size_t size;
+ enum sof_dfsentry_type type;
+ /*
+ * access_type specifies if the
+ * memory -> DSP resource (memory, register etc) is always accessible
+ * or if it is accessible only when the DSP is in D0.
+ */
+ enum sof_debugfs_access_type access_type;
+#if ENABLE_DEBUGFS_CACHEBUF
+ char *cache_buf; /* buffer to cache the contents of debugfs memory */
+#endif
+ struct snd_sof_dev *sdev;
+ struct list_head list; /* list in sdev dfsentry list */
+ union {
+ void __iomem *io_mem;
+ void *buf;
+ };
+};
+
+/* Debug mapping for any DSP memory or registers that can used for debug */
+struct snd_sof_debugfs_map {
+ const char *name;
+ u32 bar;
+ u32 offset;
+ u32 size;
+ /*
+ * access_type specifies if the memory is always accessible
+ * or if it is accessible only when the DSP is in D0.
+ */
+ enum sof_debugfs_access_type access_type;
+};
+
+/* mailbox descriptor, used for host <-> DSP IPC */
+struct snd_sof_mailbox {
+ u32 offset;
+ size_t size;
+};
+
+/* IPC message descriptor for host <-> DSP IO */
+struct snd_sof_ipc_msg {
+ /* message data */
+ u32 header;
+ void *msg_data;
+ void *reply_data;
+ size_t msg_size;
+ size_t reply_size;
+ int reply_error;
+
+ wait_queue_head_t waitq;
+ bool ipc_complete;
+};
+
+enum snd_sof_fw_state {
+ SOF_FW_BOOT_NOT_STARTED = 0,
+ SOF_FW_BOOT_PREPARE,
+ SOF_FW_BOOT_IN_PROGRESS,
+ SOF_FW_BOOT_FAILED,
+ SOF_FW_BOOT_READY_FAILED, /* firmware booted but fw_ready op failed */
+ SOF_FW_BOOT_COMPLETE,
+};
+
+/*
+ * SOF Device Level.
+ */
+struct snd_sof_dev {
+ struct device *dev;
+ spinlock_t ipc_lock; /* lock for IPC users */
+ spinlock_t hw_lock; /* lock for HW IO access */
+
+ /*
+ * ASoC components. plat_drv fields are set dynamically so
+ * can't use const
+ */
+ struct snd_soc_component_driver plat_drv;
+
+ /* current DSP power state */
+ struct sof_dsp_power_state dsp_power_state;
+
+ /* Intended power target of system suspend */
+ enum sof_system_suspend_state system_suspend_target;
+
+ /* DSP firmware boot */
+ wait_queue_head_t boot_wait;
+ enum snd_sof_fw_state fw_state;
+ bool first_boot;
+
+ /* work queue in case the probe is implemented in two steps */
+ struct work_struct probe_work;
+
+ /* DSP HW differentiation */
+ struct snd_sof_pdata *pdata;
+
+ /* IPC */
+ struct snd_sof_ipc *ipc;
+ struct snd_sof_mailbox dsp_box; /* DSP initiated IPC */
+ struct snd_sof_mailbox host_box; /* Host initiated IPC */
+ struct snd_sof_mailbox stream_box; /* Stream position update */
+ struct snd_sof_mailbox debug_box; /* Debug info updates */
+ struct snd_sof_ipc_msg *msg;
+ int ipc_irq;
+ u32 next_comp_id; /* monotonic - reset during S3 */
+
+ /* memory bases for mmaped DSPs - set by dsp_init() */
+ void __iomem *bar[SND_SOF_BARS]; /* DSP base address */
+ int mmio_bar;
+ int mailbox_bar;
+ size_t dsp_oops_offset;
+
+ /* debug */
+ struct dentry *debugfs_root;
+ struct list_head dfsentry_list;
+
+ /* firmware loader */
+ struct snd_dma_buffer dmab;
+ struct snd_dma_buffer dmab_bdl;
+ struct sof_ipc_fw_ready fw_ready;
+ struct sof_ipc_fw_version fw_version;
+ struct sof_ipc_cc_version *cc_version;
+
+ /* topology */
+ struct snd_soc_tplg_ops *tplg_ops;
+ struct list_head pcm_list;
+ struct list_head kcontrol_list;
+ struct list_head widget_list;
+ struct list_head dai_list;
+ struct list_head route_list;
+ struct snd_soc_component *component;
+ u32 enabled_cores_mask; /* keep track of enabled cores */
+
+ /* FW configuration */
+ struct sof_ipc_window *info_window;
+
+ /* IPC timeouts in ms */
+ int ipc_timeout;
+ int boot_timeout;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+ unsigned int extractor_stream_tag;
+#endif
+
+ /* DMA for Trace */
+ struct snd_dma_buffer dmatb;
+ struct snd_dma_buffer dmatp;
+ int dma_trace_pages;
+ wait_queue_head_t trace_sleep;
+ u32 host_offset;
+ bool dtrace_is_supported; /* set with Kconfig or module parameter */
+ bool dtrace_is_enabled;
+ bool dtrace_error;
+ bool dtrace_draining;
+
+ bool msi_enabled;
+
+ void *private; /* core does not touch this */
+};
+
+/*
+ * Device Level.
+ */
+
+int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data);
+int snd_sof_device_remove(struct device *dev);
+
+int snd_sof_runtime_suspend(struct device *dev);
+int snd_sof_runtime_resume(struct device *dev);
+int snd_sof_runtime_idle(struct device *dev);
+int snd_sof_resume(struct device *dev);
+int snd_sof_suspend(struct device *dev);
+int snd_sof_dsp_power_down_notify(struct snd_sof_dev *sdev);
+int snd_sof_prepare(struct device *dev);
+void snd_sof_complete(struct device *dev);
+
+void snd_sof_new_platform_drv(struct snd_sof_dev *sdev);
+
+int snd_sof_create_page_table(struct device *dev,
+ struct snd_dma_buffer *dmab,
+ unsigned char *page_table, size_t size);
+
+/*
+ * Firmware loading.
+ */
+int snd_sof_load_firmware(struct snd_sof_dev *sdev);
+int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev);
+int snd_sof_load_firmware_memcpy(struct snd_sof_dev *sdev);
+int snd_sof_run_firmware(struct snd_sof_dev *sdev);
+int snd_sof_parse_module_memcpy(struct snd_sof_dev *sdev,
+ struct snd_sof_mod_hdr *module);
+void snd_sof_fw_unload(struct snd_sof_dev *sdev);
+int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset);
+
+/*
+ * IPC low level APIs.
+ */
+struct snd_sof_ipc *snd_sof_ipc_init(struct snd_sof_dev *sdev);
+void snd_sof_ipc_free(struct snd_sof_dev *sdev);
+void snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id);
+void snd_sof_ipc_msgs_rx(struct snd_sof_dev *sdev);
+int snd_sof_ipc_stream_pcm_params(struct snd_sof_dev *sdev,
+ struct sof_ipc_pcm_params *params);
+int snd_sof_dsp_mailbox_init(struct snd_sof_dev *sdev, u32 dspbox,
+ size_t dspbox_size, u32 hostbox,
+ size_t hostbox_size);
+int snd_sof_ipc_valid(struct snd_sof_dev *sdev);
+int sof_ipc_tx_message(struct snd_sof_ipc *ipc, u32 header,
+ void *msg_data, size_t msg_bytes, void *reply_data,
+ size_t reply_bytes);
+int sof_ipc_tx_message_no_pm(struct snd_sof_ipc *ipc, u32 header,
+ void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes);
+
+/*
+ * Trace/debug
+ */
+int snd_sof_init_trace(struct snd_sof_dev *sdev);
+void snd_sof_release_trace(struct snd_sof_dev *sdev);
+void snd_sof_free_trace(struct snd_sof_dev *sdev);
+int snd_sof_dbg_init(struct snd_sof_dev *sdev);
+void snd_sof_free_debug(struct snd_sof_dev *sdev);
+int snd_sof_debugfs_io_item(struct snd_sof_dev *sdev,
+ void __iomem *base, size_t size,
+ const char *name,
+ enum sof_debugfs_access_type access_type);
+int snd_sof_debugfs_buf_item(struct snd_sof_dev *sdev,
+ void *base, size_t size,
+ const char *name, mode_t mode);
+int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
+ struct sof_ipc_dma_trace_posn *posn);
+void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev);
+void snd_sof_get_status(struct snd_sof_dev *sdev, u32 panic_code,
+ u32 tracep_code, void *oops,
+ struct sof_ipc_panic_info *panic_info,
+ void *stack, size_t stack_words);
+int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev);
+void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev);
+
+/*
+ * Platform specific ops.
+ */
+extern struct snd_compress_ops sof_compressed_ops;
+
+/*
+ * DSP Architectures.
+ */
+static inline void sof_stack(struct snd_sof_dev *sdev, void *oops, u32 *stack,
+ u32 stack_words)
+{
+ sof_arch_ops(sdev)->dsp_stack(sdev, oops, stack, stack_words);
+}
+
+static inline void sof_oops(struct snd_sof_dev *sdev, void *oops)
+{
+ if (sof_arch_ops(sdev)->dsp_oops)
+ sof_arch_ops(sdev)->dsp_oops(sdev, oops);
+}
+
+extern const struct sof_arch_ops sof_xtensa_arch_ops;
+
+/*
+ * Utilities
+ */
+void sof_io_write(struct snd_sof_dev *sdev, void __iomem *addr, u32 value);
+void sof_io_write64(struct snd_sof_dev *sdev, void __iomem *addr, u64 value);
+u32 sof_io_read(struct snd_sof_dev *sdev, void __iomem *addr);
+u64 sof_io_read64(struct snd_sof_dev *sdev, void __iomem *addr);
+void sof_mailbox_write(struct snd_sof_dev *sdev, u32 offset,
+ void *message, size_t bytes);
+void sof_mailbox_read(struct snd_sof_dev *sdev, u32 offset,
+ void *message, size_t bytes);
+void sof_block_write(struct snd_sof_dev *sdev, u32 bar, u32 offset, void *src,
+ size_t size);
+void sof_block_read(struct snd_sof_dev *sdev, u32 bar, u32 offset, void *dest,
+ size_t size);
+
+int sof_fw_ready(struct snd_sof_dev *sdev, u32 msg_id);
+
+void intel_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz);
+int intel_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply);
+
+int intel_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int intel_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+
+int sof_machine_check(struct snd_sof_dev *sdev);
+
+#define sof_dev_dbg_or_err(dev, is_err, fmt, ...) \
+ do { \
+ if (is_err) \
+ dev_err(dev, "error: " fmt, __VA_ARGS__); \
+ else \
+ dev_dbg(dev, fmt, __VA_ARGS__); \
+ } while (0)
+
+#endif
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
new file mode 100644
index 000000000..b6327c30c
--- /dev/null
+++ b/sound/soc/sof/topology.c
@@ -0,0 +1,3758 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/workqueue.h>
+#include <sound/tlv.h>
+#include <sound/pcm_params.h>
+#include <uapi/sound/sof/tokens.h>
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ops.h"
+
+#define COMP_ID_UNASSIGNED 0xffffffff
+/*
+ * Constants used in the computation of linear volume gain
+ * from dB gain 20th root of 10 in Q1.16 fixed-point notation
+ */
+#define VOL_TWENTIETH_ROOT_OF_TEN 73533
+/* 40th root of 10 in Q1.16 fixed-point notation*/
+#define VOL_FORTIETH_ROOT_OF_TEN 69419
+/*
+ * Volume fractional word length define to 16 sets
+ * the volume linear gain value to use Qx.16 format
+ */
+#define VOLUME_FWL 16
+/* 0.5 dB step value in topology TLV */
+#define VOL_HALF_DB_STEP 50
+/* Full volume for default values */
+#define VOL_ZERO_DB BIT(VOLUME_FWL)
+
+/* TLV data items */
+#define TLV_ITEMS 3
+#define TLV_MIN 0
+#define TLV_STEP 1
+#define TLV_MUTE 2
+
+/* size of tplg abi in byte */
+#define SOF_TPLG_ABI_SIZE 3
+
+struct sof_widget_data {
+ int ctrl_type;
+ int ipc_cmd;
+ struct sof_abi_hdr *pdata;
+ struct snd_sof_control *control;
+};
+
+/* send pcm params ipc */
+static int ipc_pcm_params(struct snd_sof_widget *swidget, int dir)
+{
+ struct sof_ipc_pcm_params_reply ipc_params_reply;
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_pcm_params pcm;
+ struct snd_pcm_hw_params *params;
+ struct snd_sof_pcm *spcm;
+ int ret;
+
+ memset(&pcm, 0, sizeof(pcm));
+
+ /* get runtime PCM params using widget's stream name */
+ spcm = snd_sof_find_spcm_name(scomp, swidget->widget->sname);
+ if (!spcm) {
+ dev_err(scomp->dev, "error: cannot find PCM for %s\n",
+ swidget->widget->name);
+ return -EINVAL;
+ }
+
+ params = &spcm->params[dir];
+
+ /* set IPC PCM params */
+ pcm.hdr.size = sizeof(pcm);
+ pcm.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_PARAMS;
+ pcm.comp_id = swidget->comp_id;
+ pcm.params.hdr.size = sizeof(pcm.params);
+ pcm.params.direction = dir;
+ pcm.params.sample_valid_bytes = params_width(params) >> 3;
+ pcm.params.buffer_fmt = SOF_IPC_BUFFER_INTERLEAVED;
+ pcm.params.rate = params_rate(params);
+ pcm.params.channels = params_channels(params);
+ pcm.params.host_period_bytes = params_period_bytes(params);
+
+ /* set format */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S16_LE;
+ break;
+ case SNDRV_PCM_FORMAT_S24:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S24_4LE;
+ break;
+ case SNDRV_PCM_FORMAT_S32:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S32_LE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc, pcm.hdr.cmd, &pcm, sizeof(pcm),
+ &ipc_params_reply, sizeof(ipc_params_reply));
+ if (ret < 0)
+ dev_err(scomp->dev, "error: pcm params failed for %s\n",
+ swidget->widget->name);
+
+ return ret;
+}
+
+ /* send stream trigger ipc */
+static int ipc_trigger(struct snd_sof_widget *swidget, int cmd)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_stream stream;
+ struct sof_ipc_reply reply;
+ int ret;
+
+ /* set IPC stream params */
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | cmd;
+ stream.comp_id = swidget->comp_id;
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
+ sizeof(stream), &reply, sizeof(reply));
+ if (ret < 0)
+ dev_err(scomp->dev, "error: failed to trigger %s\n",
+ swidget->widget->name);
+
+ return ret;
+}
+
+static int sof_keyword_dapm_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_sof_widget *swidget = w->dobj.private;
+ struct snd_soc_component *scomp;
+ int stream = SNDRV_PCM_STREAM_CAPTURE;
+ struct snd_sof_pcm *spcm;
+ int ret = 0;
+
+ if (!swidget)
+ return 0;
+
+ scomp = swidget->scomp;
+
+ dev_dbg(scomp->dev, "received event %d for widget %s\n",
+ event, w->name);
+
+ /* get runtime PCM params using widget's stream name */
+ spcm = snd_sof_find_spcm_name(scomp, swidget->widget->sname);
+ if (!spcm) {
+ dev_err(scomp->dev, "error: cannot find PCM for %s\n",
+ swidget->widget->name);
+ return -EINVAL;
+ }
+
+ /* process events */
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (spcm->stream[stream].suspend_ignored) {
+ dev_dbg(scomp->dev, "PRE_PMU event ignored, KWD pipeline is already RUNNING\n");
+ return 0;
+ }
+
+ /* set pcm params */
+ ret = ipc_pcm_params(swidget, stream);
+ if (ret < 0) {
+ dev_err(scomp->dev,
+ "error: failed to set pcm params for widget %s\n",
+ swidget->widget->name);
+ break;
+ }
+
+ /* start trigger */
+ ret = ipc_trigger(swidget, SOF_IPC_STREAM_TRIG_START);
+ if (ret < 0)
+ dev_err(scomp->dev,
+ "error: failed to trigger widget %s\n",
+ swidget->widget->name);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (spcm->stream[stream].suspend_ignored) {
+ dev_dbg(scomp->dev, "POST_PMD even ignored, KWD pipeline will remain RUNNING\n");
+ return 0;
+ }
+
+ /* stop trigger */
+ ret = ipc_trigger(swidget, SOF_IPC_STREAM_TRIG_STOP);
+ if (ret < 0)
+ dev_err(scomp->dev,
+ "error: failed to trigger widget %s\n",
+ swidget->widget->name);
+
+ /* pcm free */
+ ret = ipc_trigger(swidget, SOF_IPC_STREAM_PCM_FREE);
+ if (ret < 0)
+ dev_err(scomp->dev,
+ "error: failed to trigger widget %s\n",
+ swidget->widget->name);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* event handlers for keyword detect component */
+static const struct snd_soc_tplg_widget_events sof_kwd_events[] = {
+ {SOF_KEYWORD_DETECT_DAPM_EVENT, sof_keyword_dapm_event},
+};
+
+static inline int get_tlv_data(const int *p, int tlv[TLV_ITEMS])
+{
+ /* we only support dB scale TLV type at the moment */
+ if ((int)p[SNDRV_CTL_TLVO_TYPE] != SNDRV_CTL_TLVT_DB_SCALE)
+ return -EINVAL;
+
+ /* min value in topology tlv data is multiplied by 100 */
+ tlv[TLV_MIN] = (int)p[SNDRV_CTL_TLVO_DB_SCALE_MIN] / 100;
+
+ /* volume steps */
+ tlv[TLV_STEP] = (int)(p[SNDRV_CTL_TLVO_DB_SCALE_MUTE_AND_STEP] &
+ TLV_DB_SCALE_MASK);
+
+ /* mute ON/OFF */
+ if ((p[SNDRV_CTL_TLVO_DB_SCALE_MUTE_AND_STEP] &
+ TLV_DB_SCALE_MUTE) == 0)
+ tlv[TLV_MUTE] = 0;
+ else
+ tlv[TLV_MUTE] = 1;
+
+ return 0;
+}
+
+/*
+ * Function to truncate an unsigned 64-bit number
+ * by x bits and return 32-bit unsigned number. This
+ * function also takes care of rounding while truncating
+ */
+static inline u32 vol_shift_64(u64 i, u32 x)
+{
+ /* do not truncate more than 32 bits */
+ if (x > 32)
+ x = 32;
+
+ if (x == 0)
+ return (u32)i;
+
+ return (u32)(((i >> (x - 1)) + 1) >> 1);
+}
+
+/*
+ * Function to compute a ^ exp where,
+ * a is a fractional number represented by a fixed-point
+ * integer with a fractional world length of "fwl"
+ * exp is an integer
+ * fwl is the fractional word length
+ * Return value is a fractional number represented by a
+ * fixed-point integer with a fractional word length of "fwl"
+ */
+static u32 vol_pow32(u32 a, int exp, u32 fwl)
+{
+ int i, iter;
+ u32 power = 1 << fwl;
+ u64 numerator;
+
+ /* if exponent is 0, return 1 */
+ if (exp == 0)
+ return power;
+
+ /* determine the number of iterations based on the exponent */
+ if (exp < 0)
+ iter = exp * -1;
+ else
+ iter = exp;
+
+ /* mutiply a "iter" times to compute power */
+ for (i = 0; i < iter; i++) {
+ /*
+ * Product of 2 Qx.fwl fixed-point numbers yields a Q2*x.2*fwl
+ * Truncate product back to fwl fractional bits with rounding
+ */
+ power = vol_shift_64((u64)power * a, fwl);
+ }
+
+ if (exp > 0) {
+ /* if exp is positive, return the result */
+ return power;
+ }
+
+ /* if exp is negative, return the multiplicative inverse */
+ numerator = (u64)1 << (fwl << 1);
+ do_div(numerator, power);
+
+ return (u32)numerator;
+}
+
+/*
+ * Function to calculate volume gain from TLV data.
+ * This function can only handle gain steps that are multiples of 0.5 dB
+ */
+static u32 vol_compute_gain(u32 value, int *tlv)
+{
+ int dB_gain;
+ u32 linear_gain;
+ int f_step;
+
+ /* mute volume */
+ if (value == 0 && tlv[TLV_MUTE])
+ return 0;
+
+ /*
+ * compute dB gain from tlv. tlv_step
+ * in topology is multiplied by 100
+ */
+ dB_gain = tlv[TLV_MIN] + (value * tlv[TLV_STEP]) / 100;
+
+ /*
+ * compute linear gain represented by fixed-point
+ * int with VOLUME_FWL fractional bits
+ */
+ linear_gain = vol_pow32(VOL_TWENTIETH_ROOT_OF_TEN, dB_gain, VOLUME_FWL);
+
+ /* extract the fractional part of volume step */
+ f_step = tlv[TLV_STEP] - (tlv[TLV_STEP] / 100);
+
+ /* if volume step is an odd multiple of 0.5 dB */
+ if (f_step == VOL_HALF_DB_STEP && (value & 1))
+ linear_gain = vol_shift_64((u64)linear_gain *
+ VOL_FORTIETH_ROOT_OF_TEN,
+ VOLUME_FWL);
+
+ return linear_gain;
+}
+
+/*
+ * Set up volume table for kcontrols from tlv data
+ * "size" specifies the number of entries in the table
+ */
+static int set_up_volume_table(struct snd_sof_control *scontrol,
+ int tlv[TLV_ITEMS], int size)
+{
+ int j;
+
+ /* init the volume table */
+ scontrol->volume_table = kcalloc(size, sizeof(u32), GFP_KERNEL);
+ if (!scontrol->volume_table)
+ return -ENOMEM;
+
+ /* populate the volume table */
+ for (j = 0; j < size ; j++)
+ scontrol->volume_table[j] = vol_compute_gain(j, tlv);
+
+ return 0;
+}
+
+struct sof_dai_types {
+ const char *name;
+ enum sof_ipc_dai_type type;
+};
+
+static const struct sof_dai_types sof_dais[] = {
+ {"SSP", SOF_DAI_INTEL_SSP},
+ {"HDA", SOF_DAI_INTEL_HDA},
+ {"DMIC", SOF_DAI_INTEL_DMIC},
+ {"ALH", SOF_DAI_INTEL_ALH},
+ {"SAI", SOF_DAI_IMX_SAI},
+ {"ESAI", SOF_DAI_IMX_ESAI},
+};
+
+static enum sof_ipc_dai_type find_dai(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sof_dais); i++) {
+ if (strcmp(name, sof_dais[i].name) == 0)
+ return sof_dais[i].type;
+ }
+
+ return SOF_DAI_INTEL_NONE;
+}
+
+/*
+ * Supported Frame format types and lookup, add new ones to end of list.
+ */
+
+struct sof_frame_types {
+ const char *name;
+ enum sof_ipc_frame frame;
+};
+
+static const struct sof_frame_types sof_frames[] = {
+ {"s16le", SOF_IPC_FRAME_S16_LE},
+ {"s24le", SOF_IPC_FRAME_S24_4LE},
+ {"s32le", SOF_IPC_FRAME_S32_LE},
+ {"float", SOF_IPC_FRAME_FLOAT},
+};
+
+static enum sof_ipc_frame find_format(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sof_frames); i++) {
+ if (strcmp(name, sof_frames[i].name) == 0)
+ return sof_frames[i].frame;
+ }
+
+ /* use s32le if nothing is specified */
+ return SOF_IPC_FRAME_S32_LE;
+}
+
+struct sof_process_types {
+ const char *name;
+ enum sof_ipc_process_type type;
+ enum sof_comp_type comp_type;
+};
+
+static const struct sof_process_types sof_process[] = {
+ {"EQFIR", SOF_PROCESS_EQFIR, SOF_COMP_EQ_FIR},
+ {"EQIIR", SOF_PROCESS_EQIIR, SOF_COMP_EQ_IIR},
+ {"KEYWORD_DETECT", SOF_PROCESS_KEYWORD_DETECT, SOF_COMP_KEYWORD_DETECT},
+ {"KPB", SOF_PROCESS_KPB, SOF_COMP_KPB},
+ {"CHAN_SELECTOR", SOF_PROCESS_CHAN_SELECTOR, SOF_COMP_SELECTOR},
+ {"MUX", SOF_PROCESS_MUX, SOF_COMP_MUX},
+ {"DEMUX", SOF_PROCESS_DEMUX, SOF_COMP_DEMUX},
+ {"DCBLOCK", SOF_PROCESS_DCBLOCK, SOF_COMP_DCBLOCK},
+ {"SMART_AMP", SOF_PROCESS_SMART_AMP, SOF_COMP_SMART_AMP},
+};
+
+static enum sof_ipc_process_type find_process(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sof_process); i++) {
+ if (strcmp(name, sof_process[i].name) == 0)
+ return sof_process[i].type;
+ }
+
+ return SOF_PROCESS_NONE;
+}
+
+static enum sof_comp_type find_process_comp_type(enum sof_ipc_process_type type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sof_process); i++) {
+ if (sof_process[i].type == type)
+ return sof_process[i].comp_type;
+ }
+
+ return SOF_COMP_NONE;
+}
+
+/*
+ * Topology Token Parsing.
+ * New tokens should be added to headers and parsing tables below.
+ */
+
+struct sof_topology_token {
+ u32 token;
+ u32 type;
+ int (*get_token)(void *elem, void *object, u32 offset, u32 size);
+ u32 offset;
+ u32 size;
+};
+
+static int get_token_u32(void *elem, void *object, u32 offset, u32 size)
+{
+ struct snd_soc_tplg_vendor_value_elem *velem = elem;
+ u32 *val = (u32 *)((u8 *)object + offset);
+
+ *val = le32_to_cpu(velem->value);
+ return 0;
+}
+
+static int get_token_u16(void *elem, void *object, u32 offset, u32 size)
+{
+ struct snd_soc_tplg_vendor_value_elem *velem = elem;
+ u16 *val = (u16 *)((u8 *)object + offset);
+
+ *val = (u16)le32_to_cpu(velem->value);
+ return 0;
+}
+
+static int get_token_uuid(void *elem, void *object, u32 offset, u32 size)
+{
+ struct snd_soc_tplg_vendor_uuid_elem *velem = elem;
+ u8 *dst = (u8 *)object + offset;
+
+ memcpy(dst, velem->uuid, UUID_SIZE);
+
+ return 0;
+}
+
+static int get_token_comp_format(void *elem, void *object, u32 offset, u32 size)
+{
+ struct snd_soc_tplg_vendor_string_elem *velem = elem;
+ u32 *val = (u32 *)((u8 *)object + offset);
+
+ *val = find_format(velem->string);
+ return 0;
+}
+
+static int get_token_dai_type(void *elem, void *object, u32 offset, u32 size)
+{
+ struct snd_soc_tplg_vendor_string_elem *velem = elem;
+ u32 *val = (u32 *)((u8 *)object + offset);
+
+ *val = find_dai(velem->string);
+ return 0;
+}
+
+static int get_token_process_type(void *elem, void *object, u32 offset,
+ u32 size)
+{
+ struct snd_soc_tplg_vendor_string_elem *velem = elem;
+ u32 *val = (u32 *)((u8 *)object + offset);
+
+ *val = find_process(velem->string);
+ return 0;
+}
+
+/* Buffers */
+static const struct sof_topology_token buffer_tokens[] = {
+ {SOF_TKN_BUF_SIZE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_buffer, size), 0},
+ {SOF_TKN_BUF_CAPS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_buffer, caps), 0},
+};
+
+/* DAI */
+static const struct sof_topology_token dai_tokens[] = {
+ {SOF_TKN_DAI_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_dai_type,
+ offsetof(struct sof_ipc_comp_dai, type), 0},
+ {SOF_TKN_DAI_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_dai, dai_index), 0},
+ {SOF_TKN_DAI_DIRECTION, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_dai, direction), 0},
+};
+
+/* BE DAI link */
+static const struct sof_topology_token dai_link_tokens[] = {
+ {SOF_TKN_DAI_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_dai_type,
+ offsetof(struct sof_ipc_dai_config, type), 0},
+ {SOF_TKN_DAI_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_config, dai_index), 0},
+};
+
+/* scheduling */
+static const struct sof_topology_token sched_tokens[] = {
+ {SOF_TKN_SCHED_PERIOD, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, period), 0},
+ {SOF_TKN_SCHED_PRIORITY, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, priority), 0},
+ {SOF_TKN_SCHED_MIPS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, period_mips), 0},
+ {SOF_TKN_SCHED_CORE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, core), 0},
+ {SOF_TKN_SCHED_FRAMES, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, frames_per_sched), 0},
+ {SOF_TKN_SCHED_TIME_DOMAIN, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, time_domain), 0},
+};
+
+/* volume */
+static const struct sof_topology_token volume_tokens[] = {
+ {SOF_TKN_VOLUME_RAMP_STEP_TYPE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct sof_ipc_comp_volume, ramp), 0},
+ {SOF_TKN_VOLUME_RAMP_STEP_MS,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_volume, initial_ramp), 0},
+};
+
+/* SRC */
+static const struct sof_topology_token src_tokens[] = {
+ {SOF_TKN_SRC_RATE_IN, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_src, source_rate), 0},
+ {SOF_TKN_SRC_RATE_OUT, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_src, sink_rate), 0},
+};
+
+/* ASRC */
+static const struct sof_topology_token asrc_tokens[] = {
+ {SOF_TKN_ASRC_RATE_IN, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_asrc, source_rate), 0},
+ {SOF_TKN_ASRC_RATE_OUT, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_asrc, sink_rate), 0},
+ {SOF_TKN_ASRC_ASYNCHRONOUS_MODE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32,
+ offsetof(struct sof_ipc_comp_asrc, asynchronous_mode), 0},
+ {SOF_TKN_ASRC_OPERATION_MODE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32,
+ offsetof(struct sof_ipc_comp_asrc, operation_mode), 0},
+};
+
+/* Tone */
+static const struct sof_topology_token tone_tokens[] = {
+};
+
+/* EFFECT */
+static const struct sof_topology_token process_tokens[] = {
+ {SOF_TKN_PROCESS_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING,
+ get_token_process_type,
+ offsetof(struct sof_ipc_comp_process, type), 0},
+};
+
+/* PCM */
+static const struct sof_topology_token pcm_tokens[] = {
+ {SOF_TKN_PCM_DMAC_CONFIG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_host, dmac_config), 0},
+};
+
+/* PCM */
+static const struct sof_topology_token stream_tokens[] = {
+ {SOF_TKN_STREAM_PLAYBACK_COMPATIBLE_D0I3,
+ SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_pcm, stream[0].d0i3_compatible), 0},
+ {SOF_TKN_STREAM_CAPTURE_COMPATIBLE_D0I3,
+ SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_pcm, stream[1].d0i3_compatible), 0},
+};
+
+/* Generic components */
+static const struct sof_topology_token comp_tokens[] = {
+ {SOF_TKN_COMP_PERIOD_SINK_COUNT,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_config, periods_sink), 0},
+ {SOF_TKN_COMP_PERIOD_SOURCE_COUNT,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_config, periods_source), 0},
+ {SOF_TKN_COMP_FORMAT,
+ SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_comp_format,
+ offsetof(struct sof_ipc_comp_config, frame_fmt), 0},
+};
+
+/* SSP */
+static const struct sof_topology_token ssp_tokens[] = {
+ {SOF_TKN_INTEL_SSP_CLKS_CONTROL,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_ssp_params, clks_control), 0},
+ {SOF_TKN_INTEL_SSP_MCLK_ID,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_ssp_params, mclk_id), 0},
+ {SOF_TKN_INTEL_SSP_SAMPLE_BITS, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32,
+ offsetof(struct sof_ipc_dai_ssp_params, sample_valid_bits), 0},
+ {SOF_TKN_INTEL_SSP_FRAME_PULSE_WIDTH, SND_SOC_TPLG_TUPLE_TYPE_SHORT,
+ get_token_u16,
+ offsetof(struct sof_ipc_dai_ssp_params, frame_pulse_width), 0},
+ {SOF_TKN_INTEL_SSP_QUIRKS, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32,
+ offsetof(struct sof_ipc_dai_ssp_params, quirks), 0},
+ {SOF_TKN_INTEL_SSP_TDM_PADDING_PER_SLOT, SND_SOC_TPLG_TUPLE_TYPE_BOOL,
+ get_token_u16,
+ offsetof(struct sof_ipc_dai_ssp_params,
+ tdm_per_slot_padding_flag), 0},
+ {SOF_TKN_INTEL_SSP_BCLK_DELAY, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32,
+ offsetof(struct sof_ipc_dai_ssp_params, bclk_delay), 0},
+
+};
+
+/* ALH */
+static const struct sof_topology_token alh_tokens[] = {
+ {SOF_TKN_INTEL_ALH_RATE,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_alh_params, rate), 0},
+ {SOF_TKN_INTEL_ALH_CH,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_alh_params, channels), 0},
+};
+
+/* DMIC */
+static const struct sof_topology_token dmic_tokens[] = {
+ {SOF_TKN_INTEL_DMIC_DRIVER_VERSION,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, driver_ipc_version),
+ 0},
+ {SOF_TKN_INTEL_DMIC_CLK_MIN,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, pdmclk_min), 0},
+ {SOF_TKN_INTEL_DMIC_CLK_MAX,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, pdmclk_max), 0},
+ {SOF_TKN_INTEL_DMIC_SAMPLE_RATE,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, fifo_fs), 0},
+ {SOF_TKN_INTEL_DMIC_DUTY_MIN,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_params, duty_min), 0},
+ {SOF_TKN_INTEL_DMIC_DUTY_MAX,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_params, duty_max), 0},
+ {SOF_TKN_INTEL_DMIC_NUM_PDM_ACTIVE,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params,
+ num_pdm_active), 0},
+ {SOF_TKN_INTEL_DMIC_FIFO_WORD_LENGTH,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_params, fifo_bits), 0},
+ {SOF_TKN_INTEL_DMIC_UNMUTE_RAMP_TIME_MS,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, unmute_ramp_time), 0},
+
+};
+
+/* ESAI */
+static const struct sof_topology_token esai_tokens[] = {
+ {SOF_TKN_IMX_ESAI_MCLK_ID,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_esai_params, mclk_id), 0},
+};
+
+/* SAI */
+static const struct sof_topology_token sai_tokens[] = {
+ {SOF_TKN_IMX_SAI_MCLK_ID,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_sai_params, mclk_id), 0},
+};
+
+/* Core tokens */
+static const struct sof_topology_token core_tokens[] = {
+ {SOF_TKN_COMP_CORE_ID,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp, core), 0},
+};
+
+/* Component extended tokens */
+static const struct sof_topology_token comp_ext_tokens[] = {
+ {SOF_TKN_COMP_UUID,
+ SND_SOC_TPLG_TUPLE_TYPE_UUID, get_token_uuid,
+ offsetof(struct sof_ipc_comp_ext, uuid), 0},
+};
+
+/*
+ * DMIC PDM Tokens
+ * SOF_TKN_INTEL_DMIC_PDM_CTRL_ID should be the first token
+ * as it increments the index while parsing the array of pdm tokens
+ * and determines the correct offset
+ */
+static const struct sof_topology_token dmic_pdm_tokens[] = {
+ {SOF_TKN_INTEL_DMIC_PDM_CTRL_ID,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, id),
+ 0},
+ {SOF_TKN_INTEL_DMIC_PDM_MIC_A_Enable,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, enable_mic_a),
+ 0},
+ {SOF_TKN_INTEL_DMIC_PDM_MIC_B_Enable,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, enable_mic_b),
+ 0},
+ {SOF_TKN_INTEL_DMIC_PDM_POLARITY_A,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, polarity_mic_a),
+ 0},
+ {SOF_TKN_INTEL_DMIC_PDM_POLARITY_B,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, polarity_mic_b),
+ 0},
+ {SOF_TKN_INTEL_DMIC_PDM_CLK_EDGE,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, clk_edge),
+ 0},
+ {SOF_TKN_INTEL_DMIC_PDM_SKEW,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, skew),
+ 0},
+};
+
+/* HDA */
+static const struct sof_topology_token hda_tokens[] = {
+ {SOF_TKN_INTEL_HDA_RATE,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_hda_params, rate), 0},
+ {SOF_TKN_INTEL_HDA_CH,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_hda_params, channels), 0},
+};
+
+/* Leds */
+static const struct sof_topology_token led_tokens[] = {
+ {SOF_TKN_MUTE_LED_USE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct snd_sof_led_control, use_led), 0},
+ {SOF_TKN_MUTE_LED_DIRECTION, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct snd_sof_led_control, direction), 0},
+};
+
+static int sof_parse_uuid_tokens(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array,
+ size_t offset)
+{
+ struct snd_soc_tplg_vendor_uuid_elem *elem;
+ int found = 0;
+ int i, j;
+
+ /* parse element by element */
+ for (i = 0; i < le32_to_cpu(array->num_elems); i++) {
+ elem = &array->uuid[i];
+
+ /* search for token */
+ for (j = 0; j < count; j++) {
+ /* match token type */
+ if (tokens[j].type != SND_SOC_TPLG_TUPLE_TYPE_UUID)
+ continue;
+
+ /* match token id */
+ if (tokens[j].token != le32_to_cpu(elem->token))
+ continue;
+
+ /* matched - now load token */
+ tokens[j].get_token(elem, object,
+ offset + tokens[j].offset,
+ tokens[j].size);
+
+ found++;
+ }
+ }
+
+ return found;
+}
+
+static int sof_parse_string_tokens(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array,
+ size_t offset)
+{
+ struct snd_soc_tplg_vendor_string_elem *elem;
+ int found = 0;
+ int i, j;
+
+ /* parse element by element */
+ for (i = 0; i < le32_to_cpu(array->num_elems); i++) {
+ elem = &array->string[i];
+
+ /* search for token */
+ for (j = 0; j < count; j++) {
+ /* match token type */
+ if (tokens[j].type != SND_SOC_TPLG_TUPLE_TYPE_STRING)
+ continue;
+
+ /* match token id */
+ if (tokens[j].token != le32_to_cpu(elem->token))
+ continue;
+
+ /* matched - now load token */
+ tokens[j].get_token(elem, object,
+ offset + tokens[j].offset,
+ tokens[j].size);
+
+ found++;
+ }
+ }
+
+ return found;
+}
+
+static int sof_parse_word_tokens(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array,
+ size_t offset)
+{
+ struct snd_soc_tplg_vendor_value_elem *elem;
+ int found = 0;
+ int i, j;
+
+ /* parse element by element */
+ for (i = 0; i < le32_to_cpu(array->num_elems); i++) {
+ elem = &array->value[i];
+
+ /* search for token */
+ for (j = 0; j < count; j++) {
+ /* match token type */
+ if (!(tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_WORD ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BYTE ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BOOL))
+ continue;
+
+ /* match token id */
+ if (tokens[j].token != le32_to_cpu(elem->token))
+ continue;
+
+ /* load token */
+ tokens[j].get_token(elem, object,
+ offset + tokens[j].offset,
+ tokens[j].size);
+
+ found++;
+ }
+ }
+
+ return found;
+}
+
+/**
+ * sof_parse_token_sets - Parse multiple sets of tokens
+ * @scomp: pointer to soc component
+ * @object: target ipc struct for parsed values
+ * @tokens: token definition array describing what tokens to parse
+ * @count: number of tokens in definition array
+ * @array: source pointer to consecutive vendor arrays to be parsed
+ * @priv_size: total size of the consecutive source arrays
+ * @sets: number of similar token sets to be parsed, 1 set has count elements
+ * @object_size: offset to next target ipc struct with multiple sets
+ *
+ * This function parses multiple sets of tokens in vendor arrays into
+ * consecutive ipc structs.
+ */
+static int sof_parse_token_sets(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array,
+ int priv_size, int sets, size_t object_size)
+{
+ size_t offset = 0;
+ int found = 0;
+ int total = 0;
+ int asize;
+
+ while (priv_size > 0 && total < count * sets) {
+ asize = le32_to_cpu(array->size);
+
+ /* validate asize */
+ if (asize < 0) { /* FIXME: A zero-size array makes no sense */
+ dev_err(scomp->dev, "error: invalid array size 0x%x\n",
+ asize);
+ return -EINVAL;
+ }
+
+ /* make sure there is enough data before parsing */
+ priv_size -= asize;
+ if (priv_size < 0) {
+ dev_err(scomp->dev, "error: invalid array size 0x%x\n",
+ asize);
+ return -EINVAL;
+ }
+
+ /* call correct parser depending on type */
+ switch (le32_to_cpu(array->type)) {
+ case SND_SOC_TPLG_TUPLE_TYPE_UUID:
+ found += sof_parse_uuid_tokens(scomp, object, tokens,
+ count, array, offset);
+ break;
+ case SND_SOC_TPLG_TUPLE_TYPE_STRING:
+ found += sof_parse_string_tokens(scomp, object, tokens,
+ count, array, offset);
+ break;
+ case SND_SOC_TPLG_TUPLE_TYPE_BOOL:
+ case SND_SOC_TPLG_TUPLE_TYPE_BYTE:
+ case SND_SOC_TPLG_TUPLE_TYPE_WORD:
+ case SND_SOC_TPLG_TUPLE_TYPE_SHORT:
+ found += sof_parse_word_tokens(scomp, object, tokens,
+ count, array, offset);
+ break;
+ default:
+ dev_err(scomp->dev, "error: unknown token type %d\n",
+ array->type);
+ return -EINVAL;
+ }
+
+ /* next array */
+ array = (struct snd_soc_tplg_vendor_array *)((u8 *)array
+ + asize);
+
+ /* move to next target struct */
+ if (found >= count) {
+ offset += object_size;
+ total += found;
+ found = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int sof_parse_tokens(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array,
+ int priv_size)
+{
+ /*
+ * sof_parse_tokens is used when topology contains only a single set of
+ * identical tuples arrays. So additional parameters to
+ * sof_parse_token_sets are sets = 1 (only 1 set) and
+ * object_size = 0 (irrelevant).
+ */
+ return sof_parse_token_sets(scomp, object, tokens, count, array,
+ priv_size, 1, 0);
+}
+
+static void sof_dbg_comp_config(struct snd_soc_component *scomp,
+ struct sof_ipc_comp_config *config)
+{
+ dev_dbg(scomp->dev, " config: periods snk %d src %d fmt %d\n",
+ config->periods_sink, config->periods_source,
+ config->frame_fmt);
+}
+
+/*
+ * Standard Kcontrols.
+ */
+
+static int sof_control_load_volume(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_mixer_control *mc =
+ container_of(hdr, struct snd_soc_tplg_mixer_control, hdr);
+ struct sof_ipc_ctrl_data *cdata;
+ int tlv[TLV_ITEMS];
+ unsigned int i;
+ int ret;
+
+ /* validate topology data */
+ if (le32_to_cpu(mc->num_channels) > SND_SOC_TPLG_MAX_CHAN) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* init the volume get/put data */
+ scontrol->size = struct_size(scontrol->control_data, chanv,
+ le32_to_cpu(mc->num_channels));
+ scontrol->control_data = kzalloc(scontrol->size, GFP_KERNEL);
+ if (!scontrol->control_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->min_volume_step = le32_to_cpu(mc->min);
+ scontrol->max_volume_step = le32_to_cpu(mc->max);
+ scontrol->num_channels = le32_to_cpu(mc->num_channels);
+
+ /* set cmd for mixer control */
+ if (le32_to_cpu(mc->max) == 1) {
+ scontrol->cmd = SOF_CTRL_CMD_SWITCH;
+ goto skip;
+ }
+
+ scontrol->cmd = SOF_CTRL_CMD_VOLUME;
+
+ /* extract tlv data */
+ if (get_tlv_data(kc->tlv.p, tlv) < 0) {
+ dev_err(scomp->dev, "error: invalid TLV data\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ /* set up volume table */
+ ret = set_up_volume_table(scontrol, tlv, le32_to_cpu(mc->max) + 1);
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: setting up volume table\n");
+ goto out_free;
+ }
+
+ /* set default volume values to 0dB in control */
+ cdata = scontrol->control_data;
+ for (i = 0; i < scontrol->num_channels; i++) {
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = VOL_ZERO_DB;
+ }
+
+skip:
+ /* set up possible led control from mixer private data */
+ ret = sof_parse_tokens(scomp, &scontrol->led_ctl, led_tokens,
+ ARRAY_SIZE(led_tokens), mc->priv.array,
+ le32_to_cpu(mc->priv.size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse led tokens failed %d\n",
+ le32_to_cpu(mc->priv.size));
+ goto out_free_table;
+ }
+
+ dev_dbg(scomp->dev, "tplg: load kcontrol index %d chans %d\n",
+ scontrol->comp_id, scontrol->num_channels);
+
+ return 0;
+
+out_free_table:
+ if (le32_to_cpu(mc->max) > 1)
+ kfree(scontrol->volume_table);
+out_free:
+ kfree(scontrol->control_data);
+out:
+ return ret;
+}
+
+static int sof_control_load_enum(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_enum_control *ec =
+ container_of(hdr, struct snd_soc_tplg_enum_control, hdr);
+
+ /* validate topology data */
+ if (le32_to_cpu(ec->num_channels) > SND_SOC_TPLG_MAX_CHAN)
+ return -EINVAL;
+
+ /* init the enum get/put data */
+ scontrol->size = struct_size(scontrol->control_data, chanv,
+ le32_to_cpu(ec->num_channels));
+ scontrol->control_data = kzalloc(scontrol->size, GFP_KERNEL);
+ if (!scontrol->control_data)
+ return -ENOMEM;
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->num_channels = le32_to_cpu(ec->num_channels);
+
+ scontrol->cmd = SOF_CTRL_CMD_ENUM;
+
+ dev_dbg(scomp->dev, "tplg: load kcontrol index %d chans %d comp_id %d\n",
+ scontrol->comp_id, scontrol->num_channels, scontrol->comp_id);
+
+ return 0;
+}
+
+static int sof_control_load_bytes(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_ctrl_data *cdata;
+ struct snd_soc_tplg_bytes_control *control =
+ container_of(hdr, struct snd_soc_tplg_bytes_control, hdr);
+ struct soc_bytes_ext *sbe = (struct soc_bytes_ext *)kc->private_value;
+ size_t max_size = sbe->max;
+ size_t priv_size = le32_to_cpu(control->priv.size);
+ int ret;
+
+ if (max_size < sizeof(struct sof_ipc_ctrl_data) ||
+ max_size < sizeof(struct sof_abi_hdr)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* init the get/put bytes data */
+ if (priv_size > max_size - sizeof(struct sof_ipc_ctrl_data)) {
+ dev_err(scomp->dev, "err: bytes data size %zu exceeds max %zu.\n",
+ priv_size, max_size - sizeof(struct sof_ipc_ctrl_data));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ scontrol->size = sizeof(struct sof_ipc_ctrl_data) + priv_size;
+
+ scontrol->control_data = kzalloc(max_size, GFP_KERNEL);
+ cdata = scontrol->control_data;
+ if (!scontrol->control_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->cmd = SOF_CTRL_CMD_BINARY;
+
+ dev_dbg(scomp->dev, "tplg: load kcontrol index %d chans %d\n",
+ scontrol->comp_id, scontrol->num_channels);
+
+ if (le32_to_cpu(control->priv.size) > 0) {
+ memcpy(cdata->data, control->priv.data,
+ le32_to_cpu(control->priv.size));
+
+ if (cdata->data->magic != SOF_ABI_MAGIC) {
+ dev_err(scomp->dev, "error: Wrong ABI magic 0x%08x.\n",
+ cdata->data->magic);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION,
+ cdata->data->abi)) {
+ dev_err(scomp->dev,
+ "error: Incompatible ABI version 0x%08x.\n",
+ cdata->data->abi);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ if (cdata->data->size + sizeof(const struct sof_abi_hdr) !=
+ le32_to_cpu(control->priv.size)) {
+ dev_err(scomp->dev,
+ "error: Conflict in bytes vs. priv size.\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ return 0;
+
+out_free:
+ kfree(scontrol->control_data);
+out:
+ return ret;
+}
+
+/* external kcontrol init - used for any driver specific init */
+static int sof_control_load(struct snd_soc_component *scomp, int index,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct soc_mixer_control *sm;
+ struct soc_bytes_ext *sbe;
+ struct soc_enum *se;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_dobj *dobj;
+ struct snd_sof_control *scontrol;
+ int ret;
+
+ dev_dbg(scomp->dev, "tplg: load control type %d name : %s\n",
+ hdr->type, hdr->name);
+
+ scontrol = kzalloc(sizeof(*scontrol), GFP_KERNEL);
+ if (!scontrol)
+ return -ENOMEM;
+
+ scontrol->scomp = scomp;
+
+ switch (le32_to_cpu(hdr->ops.info)) {
+ case SND_SOC_TPLG_CTL_VOLSW:
+ case SND_SOC_TPLG_CTL_VOLSW_SX:
+ case SND_SOC_TPLG_CTL_VOLSW_XR_SX:
+ sm = (struct soc_mixer_control *)kc->private_value;
+ dobj = &sm->dobj;
+ ret = sof_control_load_volume(scomp, scontrol, kc, hdr);
+ break;
+ case SND_SOC_TPLG_CTL_BYTES:
+ sbe = (struct soc_bytes_ext *)kc->private_value;
+ dobj = &sbe->dobj;
+ ret = sof_control_load_bytes(scomp, scontrol, kc, hdr);
+ break;
+ case SND_SOC_TPLG_CTL_ENUM:
+ case SND_SOC_TPLG_CTL_ENUM_VALUE:
+ se = (struct soc_enum *)kc->private_value;
+ dobj = &se->dobj;
+ ret = sof_control_load_enum(scomp, scontrol, kc, hdr);
+ break;
+ case SND_SOC_TPLG_CTL_RANGE:
+ case SND_SOC_TPLG_CTL_STROBE:
+ case SND_SOC_TPLG_DAPM_CTL_VOLSW:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
+ case SND_SOC_TPLG_DAPM_CTL_PIN:
+ default:
+ dev_warn(scomp->dev, "control type not supported %d:%d:%d\n",
+ hdr->ops.get, hdr->ops.put, hdr->ops.info);
+ kfree(scontrol);
+ return 0;
+ }
+
+ if (ret < 0) {
+ kfree(scontrol);
+ return ret;
+ }
+
+ scontrol->led_ctl.led_value = -1;
+
+ dobj->private = scontrol;
+ list_add(&scontrol->list, &sdev->kcontrol_list);
+ return 0;
+}
+
+static int sof_control_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_free fcomp;
+ struct snd_sof_control *scontrol = dobj->private;
+
+ dev_dbg(scomp->dev, "tplg: unload control name : %s\n", scomp->name);
+
+ fcomp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_FREE;
+ fcomp.hdr.size = sizeof(fcomp);
+ fcomp.id = scontrol->comp_id;
+
+ kfree(scontrol->control_data);
+ list_del(&scontrol->list);
+ kfree(scontrol);
+ /* send IPC to the DSP */
+ return sof_ipc_tx_message(sdev->ipc,
+ fcomp.hdr.cmd, &fcomp, sizeof(fcomp),
+ NULL, 0);
+}
+
+/*
+ * DAI Topology
+ */
+
+/* Static DSP core power management so far, should be extended in the future */
+static int sof_core_enable(struct snd_sof_dev *sdev, int core)
+{
+ struct sof_ipc_pm_core_config pm_core_config = {
+ .hdr = {
+ .cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CORE_ENABLE,
+ .size = sizeof(pm_core_config),
+ },
+ .enable_mask = sdev->enabled_cores_mask | BIT(core),
+ };
+ int ret;
+
+ if (sdev->enabled_cores_mask & BIT(core))
+ return 0;
+
+ /* power up the core if it is host managed */
+ ret = snd_sof_dsp_core_power_up(sdev, BIT(core));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: %d powering up core %d\n",
+ ret, core);
+ return ret;
+ }
+
+ /* Now notify DSP */
+ ret = sof_ipc_tx_message(sdev->ipc, pm_core_config.hdr.cmd,
+ &pm_core_config, sizeof(pm_core_config),
+ &pm_core_config, sizeof(pm_core_config));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: core %d enable ipc failure %d\n",
+ core, ret);
+ goto err;
+ }
+
+ /* update enabled cores mask */
+ sdev->enabled_cores_mask |= BIT(core);
+
+ return ret;
+err:
+ /* power down core if it is host managed and return the original error if this fails too */
+ if (snd_sof_dsp_core_power_down(sdev, BIT(core)) < 0)
+ dev_err(sdev->dev, "error: powering down core %d\n", core);
+
+ return ret;
+}
+
+int sof_pipeline_core_enable(struct snd_sof_dev *sdev,
+ const struct snd_sof_widget *swidget)
+{
+ const struct sof_ipc_pipe_new *pipeline;
+ int ret;
+
+ if (swidget->id == snd_soc_dapm_scheduler) {
+ pipeline = swidget->private;
+ } else {
+ pipeline = snd_sof_pipeline_find(sdev, swidget->pipeline_id);
+ if (!pipeline)
+ return -ENOENT;
+ }
+
+ /* First enable the pipeline core */
+ ret = sof_core_enable(sdev, pipeline->core);
+ if (ret < 0)
+ return ret;
+
+ return sof_core_enable(sdev, swidget->core);
+}
+
+static int sof_connect_dai_widget(struct snd_soc_component *scomp,
+ struct snd_soc_dapm_widget *w,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct snd_sof_dai *dai)
+{
+ struct snd_soc_card *card = scomp->card;
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_soc_dai *cpu_dai;
+ int i;
+
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ dev_vdbg(scomp->dev, "tplg: check widget: %s stream: %s dai stream: %s\n",
+ w->name, w->sname, rtd->dai_link->stream_name);
+
+ if (!w->sname || !rtd->dai_link->stream_name)
+ continue;
+
+ /* does stream match DAI link ? */
+ if (strcmp(w->sname, rtd->dai_link->stream_name))
+ continue;
+
+ switch (w->id) {
+ case snd_soc_dapm_dai_out:
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ /*
+ * Please create DAI widget in the right order
+ * to ensure BE will connect to the right DAI
+ * widget.
+ */
+ if (!cpu_dai->capture_widget) {
+ cpu_dai->capture_widget = w;
+ break;
+ }
+ }
+ if (i == rtd->num_cpus) {
+ dev_err(scomp->dev, "error: can't find BE for DAI %s\n",
+ w->name);
+
+ return -EINVAL;
+ }
+ dai->name = rtd->dai_link->name;
+ dev_dbg(scomp->dev, "tplg: connected widget %s -> DAI link %s\n",
+ w->name, rtd->dai_link->name);
+ break;
+ case snd_soc_dapm_dai_in:
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ /*
+ * Please create DAI widget in the right order
+ * to ensure BE will connect to the right DAI
+ * widget.
+ */
+ if (!cpu_dai->playback_widget) {
+ cpu_dai->playback_widget = w;
+ break;
+ }
+ }
+ if (i == rtd->num_cpus) {
+ dev_err(scomp->dev, "error: can't find BE for DAI %s\n",
+ w->name);
+
+ return -EINVAL;
+ }
+ dai->name = rtd->dai_link->name;
+ dev_dbg(scomp->dev, "tplg: connected widget %s -> DAI link %s\n",
+ w->name, rtd->dai_link->name);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* check we have a connection */
+ if (!dai->name) {
+ dev_err(scomp->dev, "error: can't connect DAI %s stream %s\n",
+ w->name, w->sname);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * sof_comp_alloc - allocate and initialize buffer for a new component
+ * @swidget: pointer to struct snd_sof_widget containing extended data
+ * @ipc_size: IPC payload size that will be updated depending on valid
+ * extended data.
+ * @index: ID of the pipeline the component belongs to
+ *
+ * Return: The pointer to the new allocated component, NULL if failed.
+ */
+static struct sof_ipc_comp *sof_comp_alloc(struct snd_sof_widget *swidget,
+ size_t *ipc_size, int index)
+{
+ u8 nil_uuid[SOF_UUID_SIZE] = {0};
+ struct sof_ipc_comp *comp;
+ size_t total_size = *ipc_size;
+
+ /* only non-zero UUID is valid */
+ if (memcmp(&swidget->comp_ext, nil_uuid, SOF_UUID_SIZE))
+ total_size += sizeof(swidget->comp_ext);
+
+ comp = kzalloc(total_size, GFP_KERNEL);
+ if (!comp)
+ return NULL;
+
+ /* configure comp new IPC message */
+ comp->hdr.size = total_size;
+ comp->hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_NEW;
+ comp->id = swidget->comp_id;
+ comp->pipeline_id = index;
+ comp->core = swidget->core;
+
+ /* handle the extended data if needed */
+ if (total_size > *ipc_size) {
+ /* append extended data to the end of the component */
+ memcpy((u8 *)comp + *ipc_size, &swidget->comp_ext, sizeof(swidget->comp_ext));
+ comp->ext_data_length = sizeof(swidget->comp_ext);
+ }
+
+ /* update ipc_size and return */
+ *ipc_size = total_size;
+ return comp;
+}
+
+static int sof_widget_load_dai(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r,
+ struct snd_sof_dai *dai)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_dai *comp_dai;
+ size_t ipc_size = sizeof(*comp_dai);
+ int ret;
+
+ comp_dai = (struct sof_ipc_comp_dai *)
+ sof_comp_alloc(swidget, &ipc_size, index);
+ if (!comp_dai)
+ return -ENOMEM;
+
+ /* configure dai IPC message */
+ comp_dai->comp.type = SOF_COMP_DAI;
+ comp_dai->config.hdr.size = sizeof(comp_dai->config);
+
+ ret = sof_parse_tokens(scomp, comp_dai, dai_tokens,
+ ARRAY_SIZE(dai_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse dai tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto finish;
+ }
+
+ ret = sof_parse_tokens(scomp, &comp_dai->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse dai.cfg tokens failed %d\n",
+ private->size);
+ goto finish;
+ }
+
+ dev_dbg(scomp->dev, "dai %s: type %d index %d\n",
+ swidget->widget->name, comp_dai->type, comp_dai->dai_index);
+ sof_dbg_comp_config(scomp, &comp_dai->config);
+
+ ret = sof_ipc_tx_message(sdev->ipc, comp_dai->comp.hdr.cmd,
+ comp_dai, ipc_size, r, sizeof(*r));
+
+ if (ret == 0 && dai) {
+ dai->scomp = scomp;
+
+ /*
+ * copy only the sof_ipc_comp_dai to avoid collapsing
+ * the snd_sof_dai, the extended data is kept in the
+ * snd_sof_widget.
+ */
+ memcpy(&dai->comp_dai, comp_dai, sizeof(*comp_dai));
+ }
+
+finish:
+ kfree(comp_dai);
+ return ret;
+}
+
+/*
+ * Buffer topology
+ */
+
+static int sof_widget_load_buffer(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_buffer *buffer;
+ int ret;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* configure dai IPC message */
+ buffer->comp.hdr.size = sizeof(*buffer);
+ buffer->comp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_BUFFER_NEW;
+ buffer->comp.id = swidget->comp_id;
+ buffer->comp.type = SOF_COMP_BUFFER;
+ buffer->comp.pipeline_id = index;
+ buffer->comp.core = swidget->core;
+
+ ret = sof_parse_tokens(scomp, buffer, buffer_tokens,
+ ARRAY_SIZE(buffer_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse buffer tokens failed %d\n",
+ private->size);
+ kfree(buffer);
+ return ret;
+ }
+
+ dev_dbg(scomp->dev, "buffer %s: size %d caps 0x%x\n",
+ swidget->widget->name, buffer->size, buffer->caps);
+
+ swidget->private = buffer;
+
+ ret = sof_ipc_tx_message(sdev->ipc, buffer->comp.hdr.cmd, buffer,
+ sizeof(*buffer), r, sizeof(*r));
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: buffer %s load failed\n",
+ swidget->widget->name);
+ kfree(buffer);
+ }
+
+ return ret;
+}
+
+/* bind PCM ID to host component ID */
+static int spcm_bind(struct snd_soc_component *scomp, struct snd_sof_pcm *spcm,
+ int dir)
+{
+ struct snd_sof_widget *host_widget;
+
+ host_widget = snd_sof_find_swidget_sname(scomp,
+ spcm->pcm.caps[dir].name,
+ dir);
+ if (!host_widget) {
+ dev_err(scomp->dev, "can't find host comp to bind pcm\n");
+ return -EINVAL;
+ }
+
+ spcm->stream[dir].comp_id = host_widget->comp_id;
+
+ return 0;
+}
+
+/*
+ * PCM Topology
+ */
+
+static int sof_widget_load_pcm(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ enum sof_ipc_stream_direction dir,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_host *host;
+ size_t ipc_size = sizeof(*host);
+ int ret;
+
+ host = (struct sof_ipc_comp_host *)
+ sof_comp_alloc(swidget, &ipc_size, index);
+ if (!host)
+ return -ENOMEM;
+
+ /* configure host comp IPC message */
+ host->comp.type = SOF_COMP_HOST;
+ host->direction = dir;
+ host->config.hdr.size = sizeof(host->config);
+
+ ret = sof_parse_tokens(scomp, host, pcm_tokens,
+ ARRAY_SIZE(pcm_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse host tokens failed %d\n",
+ private->size);
+ goto err;
+ }
+
+ ret = sof_parse_tokens(scomp, &host->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse host.cfg tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ dev_dbg(scomp->dev, "loaded host %s\n", swidget->widget->name);
+ sof_dbg_comp_config(scomp, &host->config);
+
+ swidget->private = host;
+
+ ret = sof_ipc_tx_message(sdev->ipc, host->comp.hdr.cmd, host,
+ ipc_size, r, sizeof(*r));
+ if (ret >= 0)
+ return ret;
+err:
+ kfree(host);
+ return ret;
+}
+
+/*
+ * Pipeline Topology
+ */
+int sof_load_pipeline_ipc(struct device *dev,
+ struct sof_ipc_pipe_new *pipeline,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ int ret = sof_core_enable(sdev, pipeline->core);
+
+ if (ret < 0)
+ return ret;
+
+ ret = sof_ipc_tx_message(sdev->ipc, pipeline->hdr.cmd, pipeline,
+ sizeof(*pipeline), r, sizeof(*r));
+ if (ret < 0)
+ dev_err(dev, "error: load pipeline ipc failure\n");
+
+ return ret;
+}
+
+static int sof_widget_load_pipeline(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_pipe_new *pipeline;
+ struct snd_sof_widget *comp_swidget;
+ int ret;
+
+ pipeline = kzalloc(sizeof(*pipeline), GFP_KERNEL);
+ if (!pipeline)
+ return -ENOMEM;
+
+ /* configure dai IPC message */
+ pipeline->hdr.size = sizeof(*pipeline);
+ pipeline->hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_PIPE_NEW;
+ pipeline->pipeline_id = index;
+ pipeline->comp_id = swidget->comp_id;
+
+ /* component at start of pipeline is our stream id */
+ comp_swidget = snd_sof_find_swidget(scomp, tw->sname);
+ if (!comp_swidget) {
+ dev_err(scomp->dev, "error: widget %s refers to non existent widget %s\n",
+ tw->name, tw->sname);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ pipeline->sched_id = comp_swidget->comp_id;
+
+ dev_dbg(scomp->dev, "tplg: pipeline id %d comp %d scheduling comp id %d\n",
+ pipeline->pipeline_id, pipeline->comp_id, pipeline->sched_id);
+
+ ret = sof_parse_tokens(scomp, pipeline, sched_tokens,
+ ARRAY_SIZE(sched_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse pipeline tokens failed %d\n",
+ private->size);
+ goto err;
+ }
+
+ dev_dbg(scomp->dev, "pipeline %s: period %d pri %d mips %d core %d frames %d\n",
+ swidget->widget->name, pipeline->period, pipeline->priority,
+ pipeline->period_mips, pipeline->core, pipeline->frames_per_sched);
+
+ swidget->private = pipeline;
+
+ /* send ipc's to create pipeline comp and power up schedule core */
+ ret = sof_load_pipeline_ipc(scomp->dev, pipeline, r);
+ if (ret >= 0)
+ return ret;
+err:
+ kfree(pipeline);
+ return ret;
+}
+
+/*
+ * Mixer topology
+ */
+
+static int sof_widget_load_mixer(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_mixer *mixer;
+ size_t ipc_size = sizeof(*mixer);
+ int ret;
+
+ mixer = (struct sof_ipc_comp_mixer *)
+ sof_comp_alloc(swidget, &ipc_size, index);
+ if (!mixer)
+ return -ENOMEM;
+
+ /* configure mixer IPC message */
+ mixer->comp.type = SOF_COMP_MIXER;
+ mixer->config.hdr.size = sizeof(mixer->config);
+
+ ret = sof_parse_tokens(scomp, &mixer->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse mixer.cfg tokens failed %d\n",
+ private->size);
+ kfree(mixer);
+ return ret;
+ }
+
+ sof_dbg_comp_config(scomp, &mixer->config);
+
+ swidget->private = mixer;
+
+ ret = sof_ipc_tx_message(sdev->ipc, mixer->comp.hdr.cmd, mixer,
+ ipc_size, r, sizeof(*r));
+ if (ret < 0)
+ kfree(mixer);
+
+ return ret;
+}
+
+/*
+ * Mux topology
+ */
+static int sof_widget_load_mux(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_mux *mux;
+ size_t ipc_size = sizeof(*mux);
+ int ret;
+
+ mux = (struct sof_ipc_comp_mux *)
+ sof_comp_alloc(swidget, &ipc_size, index);
+ if (!mux)
+ return -ENOMEM;
+
+ /* configure mux IPC message */
+ mux->comp.type = SOF_COMP_MUX;
+ mux->config.hdr.size = sizeof(mux->config);
+
+ ret = sof_parse_tokens(scomp, &mux->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse mux.cfg tokens failed %d\n",
+ private->size);
+ kfree(mux);
+ return ret;
+ }
+
+ sof_dbg_comp_config(scomp, &mux->config);
+
+ swidget->private = mux;
+
+ ret = sof_ipc_tx_message(sdev->ipc, mux->comp.hdr.cmd, mux,
+ ipc_size, r, sizeof(*r));
+ if (ret < 0)
+ kfree(mux);
+
+ return ret;
+}
+
+/*
+ * PGA Topology
+ */
+
+static int sof_widget_load_pga(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_volume *volume;
+ struct snd_sof_control *scontrol;
+ size_t ipc_size = sizeof(*volume);
+ int min_step;
+ int max_step;
+ int ret;
+
+ volume = (struct sof_ipc_comp_volume *)
+ sof_comp_alloc(swidget, &ipc_size, index);
+ if (!volume)
+ return -ENOMEM;
+
+ if (!le32_to_cpu(tw->num_kcontrols)) {
+ dev_err(scomp->dev, "error: invalid kcontrol count %d for volume\n",
+ tw->num_kcontrols);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* configure volume IPC message */
+ volume->comp.type = SOF_COMP_VOLUME;
+ volume->config.hdr.size = sizeof(volume->config);
+
+ ret = sof_parse_tokens(scomp, volume, volume_tokens,
+ ARRAY_SIZE(volume_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse volume tokens failed %d\n",
+ private->size);
+ goto err;
+ }
+ ret = sof_parse_tokens(scomp, &volume->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse volume.cfg tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ sof_dbg_comp_config(scomp, &volume->config);
+
+ swidget->private = volume;
+
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
+ if (scontrol->comp_id == swidget->comp_id &&
+ scontrol->volume_table) {
+ min_step = scontrol->min_volume_step;
+ max_step = scontrol->max_volume_step;
+ volume->min_value = scontrol->volume_table[min_step];
+ volume->max_value = scontrol->volume_table[max_step];
+ volume->channels = scontrol->num_channels;
+ break;
+ }
+ }
+
+ ret = sof_ipc_tx_message(sdev->ipc, volume->comp.hdr.cmd, volume,
+ ipc_size, r, sizeof(*r));
+ if (ret >= 0)
+ return ret;
+err:
+ kfree(volume);
+ return ret;
+}
+
+/*
+ * SRC Topology
+ */
+
+static int sof_widget_load_src(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_src *src;
+ size_t ipc_size = sizeof(*src);
+ int ret;
+
+ src = (struct sof_ipc_comp_src *)
+ sof_comp_alloc(swidget, &ipc_size, index);
+ if (!src)
+ return -ENOMEM;
+
+ /* configure src IPC message */
+ src->comp.type = SOF_COMP_SRC;
+ src->config.hdr.size = sizeof(src->config);
+
+ ret = sof_parse_tokens(scomp, src, src_tokens,
+ ARRAY_SIZE(src_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse src tokens failed %d\n",
+ private->size);
+ goto err;
+ }
+
+ ret = sof_parse_tokens(scomp, &src->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse src.cfg tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ dev_dbg(scomp->dev, "src %s: source rate %d sink rate %d\n",
+ swidget->widget->name, src->source_rate, src->sink_rate);
+ sof_dbg_comp_config(scomp, &src->config);
+
+ swidget->private = src;
+
+ ret = sof_ipc_tx_message(sdev->ipc, src->comp.hdr.cmd, src,
+ ipc_size, r, sizeof(*r));
+ if (ret >= 0)
+ return ret;
+err:
+ kfree(src);
+ return ret;
+}
+
+/*
+ * ASRC Topology
+ */
+
+static int sof_widget_load_asrc(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_asrc *asrc;
+ size_t ipc_size = sizeof(*asrc);
+ int ret;
+
+ asrc = (struct sof_ipc_comp_asrc *)
+ sof_comp_alloc(swidget, &ipc_size, index);
+ if (!asrc)
+ return -ENOMEM;
+
+ /* configure ASRC IPC message */
+ asrc->comp.type = SOF_COMP_ASRC;
+ asrc->config.hdr.size = sizeof(asrc->config);
+
+ ret = sof_parse_tokens(scomp, asrc, asrc_tokens,
+ ARRAY_SIZE(asrc_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse asrc tokens failed %d\n",
+ private->size);
+ goto err;
+ }
+
+ ret = sof_parse_tokens(scomp, &asrc->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse asrc.cfg tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ dev_dbg(scomp->dev, "asrc %s: source rate %d sink rate %d "
+ "asynch %d operation %d\n",
+ swidget->widget->name, asrc->source_rate, asrc->sink_rate,
+ asrc->asynchronous_mode, asrc->operation_mode);
+ sof_dbg_comp_config(scomp, &asrc->config);
+
+ swidget->private = asrc;
+
+ ret = sof_ipc_tx_message(sdev->ipc, asrc->comp.hdr.cmd, asrc,
+ ipc_size, r, sizeof(*r));
+ if (ret >= 0)
+ return ret;
+err:
+ kfree(asrc);
+ return ret;
+}
+
+/*
+ * Signal Generator Topology
+ */
+
+static int sof_widget_load_siggen(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_tone *tone;
+ size_t ipc_size = sizeof(*tone);
+ int ret;
+
+ tone = (struct sof_ipc_comp_tone *)
+ sof_comp_alloc(swidget, &ipc_size, index);
+ if (!tone)
+ return -ENOMEM;
+
+ /* configure siggen IPC message */
+ tone->comp.type = SOF_COMP_TONE;
+ tone->config.hdr.size = sizeof(tone->config);
+
+ ret = sof_parse_tokens(scomp, tone, tone_tokens,
+ ARRAY_SIZE(tone_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse tone tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ ret = sof_parse_tokens(scomp, &tone->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse tone.cfg tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ dev_dbg(scomp->dev, "tone %s: frequency %d amplitude %d\n",
+ swidget->widget->name, tone->frequency, tone->amplitude);
+ sof_dbg_comp_config(scomp, &tone->config);
+
+ swidget->private = tone;
+
+ ret = sof_ipc_tx_message(sdev->ipc, tone->comp.hdr.cmd, tone,
+ ipc_size, r, sizeof(*r));
+ if (ret >= 0)
+ return ret;
+err:
+ kfree(tone);
+ return ret;
+}
+
+static int sof_get_control_data(struct snd_soc_component *scomp,
+ struct snd_soc_dapm_widget *widget,
+ struct sof_widget_data *wdata,
+ size_t *size)
+{
+ const struct snd_kcontrol_new *kc;
+ struct soc_mixer_control *sm;
+ struct soc_bytes_ext *sbe;
+ struct soc_enum *se;
+ int i;
+
+ *size = 0;
+
+ for (i = 0; i < widget->num_kcontrols; i++) {
+ kc = &widget->kcontrol_news[i];
+
+ switch (widget->dobj.widget.kcontrol_type) {
+ case SND_SOC_TPLG_TYPE_MIXER:
+ sm = (struct soc_mixer_control *)kc->private_value;
+ wdata[i].control = sm->dobj.private;
+ break;
+ case SND_SOC_TPLG_TYPE_BYTES:
+ sbe = (struct soc_bytes_ext *)kc->private_value;
+ wdata[i].control = sbe->dobj.private;
+ break;
+ case SND_SOC_TPLG_TYPE_ENUM:
+ se = (struct soc_enum *)kc->private_value;
+ wdata[i].control = se->dobj.private;
+ break;
+ default:
+ dev_err(scomp->dev, "error: unknown kcontrol type %d in widget %s\n",
+ widget->dobj.widget.kcontrol_type,
+ widget->name);
+ return -EINVAL;
+ }
+
+ if (!wdata[i].control) {
+ dev_err(scomp->dev, "error: no scontrol for widget %s\n",
+ widget->name);
+ return -EINVAL;
+ }
+
+ wdata[i].pdata = wdata[i].control->control_data->data;
+ if (!wdata[i].pdata)
+ return -EINVAL;
+
+ /* make sure data is valid - data can be updated at runtime */
+ if (wdata[i].pdata->magic != SOF_ABI_MAGIC)
+ return -EINVAL;
+
+ *size += wdata[i].pdata->size;
+
+ /* get data type */
+ switch (wdata[i].control->cmd) {
+ case SOF_CTRL_CMD_VOLUME:
+ case SOF_CTRL_CMD_ENUM:
+ case SOF_CTRL_CMD_SWITCH:
+ wdata[i].ipc_cmd = SOF_IPC_COMP_SET_VALUE;
+ wdata[i].ctrl_type = SOF_CTRL_TYPE_VALUE_CHAN_SET;
+ break;
+ case SOF_CTRL_CMD_BINARY:
+ wdata[i].ipc_cmd = SOF_IPC_COMP_SET_DATA;
+ wdata[i].ctrl_type = SOF_CTRL_TYPE_DATA_SET;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int sof_process_load(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r,
+ int type)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_dapm_widget *widget = swidget->widget;
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_process *process;
+ struct sof_widget_data *wdata = NULL;
+ size_t ipc_data_size = 0;
+ size_t ipc_size;
+ int offset = 0;
+ int ret;
+ int i;
+
+ /* allocate struct for widget control data sizes and types */
+ if (widget->num_kcontrols) {
+ wdata = kcalloc(widget->num_kcontrols,
+ sizeof(*wdata),
+ GFP_KERNEL);
+
+ if (!wdata)
+ return -ENOMEM;
+
+ /* get possible component controls and get size of all pdata */
+ ret = sof_get_control_data(scomp, widget, wdata,
+ &ipc_data_size);
+
+ if (ret < 0)
+ goto out;
+ }
+
+ ipc_size = sizeof(struct sof_ipc_comp_process) + ipc_data_size;
+
+ /* we are exceeding max ipc size, config needs to be sent separately */
+ if (ipc_size > SOF_IPC_MSG_MAX_SIZE) {
+ ipc_size -= ipc_data_size;
+ ipc_data_size = 0;
+ }
+
+ process = (struct sof_ipc_comp_process *)
+ sof_comp_alloc(swidget, &ipc_size, index);
+ if (!process) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* configure iir IPC message */
+ process->comp.type = type;
+ process->config.hdr.size = sizeof(process->config);
+
+ ret = sof_parse_tokens(scomp, &process->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse process.cfg tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ sof_dbg_comp_config(scomp, &process->config);
+
+ /*
+ * found private data in control, so copy it.
+ * get possible component controls - get size of all pdata,
+ * then memcpy with headers
+ */
+ if (ipc_data_size) {
+ for (i = 0; i < widget->num_kcontrols; i++) {
+ memcpy(&process->data + offset,
+ wdata[i].pdata->data,
+ wdata[i].pdata->size);
+ offset += wdata[i].pdata->size;
+ }
+ }
+
+ process->size = ipc_data_size;
+ swidget->private = process;
+
+ ret = sof_ipc_tx_message(sdev->ipc, process->comp.hdr.cmd, process,
+ ipc_size, r, sizeof(*r));
+
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: create process failed\n");
+ goto err;
+ }
+
+ /* we sent the data in single message so return */
+ if (ipc_data_size)
+ goto out;
+
+ /* send control data with large message supported method */
+ for (i = 0; i < widget->num_kcontrols; i++) {
+ wdata[i].control->readback_offset = 0;
+ ret = snd_sof_ipc_set_get_comp_data(wdata[i].control,
+ wdata[i].ipc_cmd,
+ wdata[i].ctrl_type,
+ wdata[i].control->cmd,
+ true);
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: send control failed\n");
+ break;
+ }
+ }
+
+err:
+ if (ret < 0)
+ kfree(process);
+out:
+ kfree(wdata);
+ return ret;
+}
+
+/*
+ * Processing Component Topology - can be "effect", "codec", or general
+ * "processing".
+ */
+
+static int sof_widget_load_process(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_process config;
+ int ret;
+
+ /* check we have some tokens - we need at least process type */
+ if (le32_to_cpu(private->size) == 0) {
+ dev_err(scomp->dev, "error: process tokens not found\n");
+ return -EINVAL;
+ }
+
+ memset(&config, 0, sizeof(config));
+ config.comp.core = swidget->core;
+
+ /* get the process token */
+ ret = sof_parse_tokens(scomp, &config, process_tokens,
+ ARRAY_SIZE(process_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse process tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ /* now load process specific data and send IPC */
+ ret = sof_process_load(scomp, index, swidget, tw, r,
+ find_process_comp_type(config.type));
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: process loading failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sof_widget_bind_event(struct snd_soc_component *scomp,
+ struct snd_sof_widget *swidget,
+ u16 event_type)
+{
+ struct sof_ipc_comp *ipc_comp;
+
+ /* validate widget event type */
+ switch (event_type) {
+ case SOF_KEYWORD_DETECT_DAPM_EVENT:
+ /* only KEYWORD_DETECT comps should handle this */
+ if (swidget->id != snd_soc_dapm_effect)
+ break;
+
+ ipc_comp = swidget->private;
+ if (ipc_comp && ipc_comp->type != SOF_COMP_KEYWORD_DETECT)
+ break;
+
+ /* bind event to keyword detect comp */
+ return snd_soc_tplg_widget_bind_event(swidget->widget,
+ sof_kwd_events,
+ ARRAY_SIZE(sof_kwd_events),
+ event_type);
+ default:
+ break;
+ }
+
+ dev_err(scomp->dev,
+ "error: invalid event type %d for widget %s\n",
+ event_type, swidget->widget->name);
+ return -EINVAL;
+}
+
+/* external widget init - used for any driver specific init */
+static int sof_widget_ready(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dapm_widget *w,
+ struct snd_soc_tplg_dapm_widget *tw)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_widget *swidget;
+ struct snd_sof_dai *dai;
+ struct sof_ipc_comp_reply reply;
+ struct snd_sof_control *scontrol;
+ struct sof_ipc_comp comp = {
+ .core = SOF_DSP_PRIMARY_CORE,
+ };
+ int ret = 0;
+
+ swidget = kzalloc(sizeof(*swidget), GFP_KERNEL);
+ if (!swidget)
+ return -ENOMEM;
+
+ swidget->scomp = scomp;
+ swidget->widget = w;
+ swidget->comp_id = sdev->next_comp_id++;
+ swidget->complete = 0;
+ swidget->id = w->id;
+ swidget->pipeline_id = index;
+ swidget->private = NULL;
+ memset(&reply, 0, sizeof(reply));
+
+ dev_dbg(scomp->dev, "tplg: ready widget id %d pipe %d type %d name : %s stream %s\n",
+ swidget->comp_id, index, swidget->id, tw->name,
+ strnlen(tw->sname, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) > 0
+ ? tw->sname : "none");
+
+ ret = sof_parse_tokens(scomp, &comp, core_tokens,
+ ARRAY_SIZE(core_tokens), tw->priv.array,
+ le32_to_cpu(tw->priv.size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parsing core tokens failed %d\n",
+ ret);
+ kfree(swidget);
+ return ret;
+ }
+
+ swidget->core = comp.core;
+
+ /* default is primary core, safe to call for already enabled cores */
+ ret = sof_core_enable(sdev, comp.core);
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: enable core: %d\n", ret);
+ kfree(swidget);
+ return ret;
+ }
+
+ ret = sof_parse_tokens(scomp, &swidget->comp_ext, comp_ext_tokens,
+ ARRAY_SIZE(comp_ext_tokens), tw->priv.array,
+ le32_to_cpu(tw->priv.size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parsing comp_ext_tokens failed %d\n",
+ ret);
+ kfree(swidget);
+ return ret;
+ }
+
+ /* handle any special case widgets */
+ switch (w->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ dai = kzalloc(sizeof(*dai), GFP_KERNEL);
+ if (!dai) {
+ kfree(swidget);
+ return -ENOMEM;
+ }
+
+ ret = sof_widget_load_dai(scomp, index, swidget, tw, &reply, dai);
+ if (ret == 0) {
+ sof_connect_dai_widget(scomp, w, tw, dai);
+ list_add(&dai->list, &sdev->dai_list);
+ swidget->private = dai;
+ } else {
+ kfree(dai);
+ }
+ break;
+ case snd_soc_dapm_mixer:
+ ret = sof_widget_load_mixer(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_pga:
+ ret = sof_widget_load_pga(scomp, index, swidget, tw, &reply);
+ /* Find scontrol for this pga and set readback offset*/
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
+ if (scontrol->comp_id == swidget->comp_id) {
+ scontrol->readback_offset = reply.offset;
+ break;
+ }
+ }
+ break;
+ case snd_soc_dapm_buffer:
+ ret = sof_widget_load_buffer(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_scheduler:
+ ret = sof_widget_load_pipeline(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_aif_out:
+ ret = sof_widget_load_pcm(scomp, index, swidget,
+ SOF_IPC_STREAM_CAPTURE, tw, &reply);
+ break;
+ case snd_soc_dapm_aif_in:
+ ret = sof_widget_load_pcm(scomp, index, swidget,
+ SOF_IPC_STREAM_PLAYBACK, tw, &reply);
+ break;
+ case snd_soc_dapm_src:
+ ret = sof_widget_load_src(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_asrc:
+ ret = sof_widget_load_asrc(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_siggen:
+ ret = sof_widget_load_siggen(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_effect:
+ ret = sof_widget_load_process(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_mux:
+ case snd_soc_dapm_demux:
+ ret = sof_widget_load_mux(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_switch:
+ case snd_soc_dapm_dai_link:
+ case snd_soc_dapm_kcontrol:
+ default:
+ dev_dbg(scomp->dev, "widget type %d name %s not handled\n", swidget->id, tw->name);
+ break;
+ }
+
+ /* check IPC reply */
+ if (ret < 0 || reply.rhdr.error < 0) {
+ dev_err(scomp->dev,
+ "error: DSP failed to add widget id %d type %d name : %s stream %s reply %d\n",
+ tw->shift, swidget->id, tw->name,
+ strnlen(tw->sname, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) > 0
+ ? tw->sname : "none", reply.rhdr.error);
+ kfree(swidget);
+ return ret;
+ }
+
+ /* bind widget to external event */
+ if (tw->event_type) {
+ ret = sof_widget_bind_event(scomp, swidget,
+ le16_to_cpu(tw->event_type));
+ if (ret) {
+ dev_err(scomp->dev, "error: widget event binding failed\n");
+ kfree(swidget->private);
+ kfree(swidget);
+ return ret;
+ }
+ }
+
+ w->dobj.private = swidget;
+ list_add(&swidget->list, &sdev->widget_list);
+ return ret;
+}
+
+static int sof_route_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_route *sroute;
+
+ sroute = dobj->private;
+ if (!sroute)
+ return 0;
+
+ /* free sroute and its private data */
+ kfree(sroute->private);
+ list_del(&sroute->list);
+ kfree(sroute);
+
+ return 0;
+}
+
+static int sof_widget_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct snd_kcontrol_new *kc;
+ struct snd_soc_dapm_widget *widget;
+ struct sof_ipc_pipe_new *pipeline;
+ struct snd_sof_control *scontrol;
+ struct snd_sof_widget *swidget;
+ struct soc_mixer_control *sm;
+ struct soc_bytes_ext *sbe;
+ struct snd_sof_dai *dai;
+ struct soc_enum *se;
+ int ret = 0;
+ int i;
+
+ swidget = dobj->private;
+ if (!swidget)
+ return 0;
+
+ widget = swidget->widget;
+
+ switch (swidget->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ dai = swidget->private;
+
+ if (dai) {
+ /* free dai config */
+ kfree(dai->dai_config);
+ list_del(&dai->list);
+ }
+ break;
+ case snd_soc_dapm_scheduler:
+
+ /* power down the pipeline schedule core */
+ pipeline = swidget->private;
+
+ /*
+ * Runtime PM should still function normally if topology loading fails and
+ * it's components are unloaded. Do not power down the primary core so that the
+ * CTX_SAVE IPC can succeed during runtime suspend.
+ */
+ if (pipeline->core == SOF_DSP_PRIMARY_CORE)
+ break;
+
+ ret = snd_sof_dsp_core_power_down(sdev, 1 << pipeline->core);
+ if (ret < 0)
+ dev_err(scomp->dev, "error: powering down pipeline schedule core %d\n",
+ pipeline->core);
+
+ /* update enabled cores mask */
+ sdev->enabled_cores_mask &= ~(1 << pipeline->core);
+
+ break;
+ default:
+ break;
+ }
+ for (i = 0; i < widget->num_kcontrols; i++) {
+ kc = &widget->kcontrol_news[i];
+ switch (dobj->widget.kcontrol_type) {
+ case SND_SOC_TPLG_TYPE_MIXER:
+ sm = (struct soc_mixer_control *)kc->private_value;
+ scontrol = sm->dobj.private;
+ if (sm->max > 1)
+ kfree(scontrol->volume_table);
+ break;
+ case SND_SOC_TPLG_TYPE_ENUM:
+ se = (struct soc_enum *)kc->private_value;
+ scontrol = se->dobj.private;
+ break;
+ case SND_SOC_TPLG_TYPE_BYTES:
+ sbe = (struct soc_bytes_ext *)kc->private_value;
+ scontrol = sbe->dobj.private;
+ break;
+ default:
+ dev_warn(scomp->dev, "unsupported kcontrol_type\n");
+ goto out;
+ }
+ kfree(scontrol->control_data);
+ list_del(&scontrol->list);
+ kfree(scontrol);
+ }
+
+out:
+ /* free private value */
+ kfree(swidget->private);
+
+ /* remove and free swidget object */
+ list_del(&swidget->list);
+ kfree(swidget);
+
+ return ret;
+}
+
+/*
+ * DAI HW configuration.
+ */
+
+/* FE DAI - used for any driver specific init */
+static int sof_dai_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_driver *dai_drv,
+ struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_stream_caps *caps;
+ struct snd_soc_tplg_private *private = &pcm->priv;
+ struct snd_sof_pcm *spcm;
+ int stream;
+ int ret;
+
+ /* nothing to do for BEs atm */
+ if (!pcm)
+ return 0;
+
+ spcm = kzalloc(sizeof(*spcm), GFP_KERNEL);
+ if (!spcm)
+ return -ENOMEM;
+
+ spcm->scomp = scomp;
+
+ for_each_pcm_streams(stream) {
+ spcm->stream[stream].comp_id = COMP_ID_UNASSIGNED;
+ INIT_WORK(&spcm->stream[stream].period_elapsed_work,
+ snd_sof_pcm_period_elapsed_work);
+ }
+
+ spcm->pcm = *pcm;
+ dev_dbg(scomp->dev, "tplg: load pcm %s\n", pcm->dai_name);
+
+ dai_drv->dobj.private = spcm;
+ list_add(&spcm->list, &sdev->pcm_list);
+
+ ret = sof_parse_tokens(scomp, spcm, stream_tokens,
+ ARRAY_SIZE(stream_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret) {
+ dev_err(scomp->dev, "error: parse stream tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ /* do we need to allocate playback PCM DMA pages */
+ if (!spcm->pcm.playback)
+ goto capture;
+
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+
+ dev_vdbg(scomp->dev, "tplg: pcm %s stream tokens: playback d0i3:%d\n",
+ spcm->pcm.pcm_name, spcm->stream[stream].d0i3_compatible);
+
+ caps = &spcm->pcm.caps[stream];
+
+ /* allocate playback page table buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
+ PAGE_SIZE, &spcm->stream[stream].page_table);
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: can't alloc page table for %s %d\n",
+ caps->name, ret);
+
+ return ret;
+ }
+
+ /* bind pcm to host comp */
+ ret = spcm_bind(scomp, spcm, stream);
+ if (ret) {
+ dev_err(scomp->dev,
+ "error: can't bind pcm to host\n");
+ goto free_playback_tables;
+ }
+
+capture:
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+
+ /* do we need to allocate capture PCM DMA pages */
+ if (!spcm->pcm.capture)
+ return ret;
+
+ dev_vdbg(scomp->dev, "tplg: pcm %s stream tokens: capture d0i3:%d\n",
+ spcm->pcm.pcm_name, spcm->stream[stream].d0i3_compatible);
+
+ caps = &spcm->pcm.caps[stream];
+
+ /* allocate capture page table buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
+ PAGE_SIZE, &spcm->stream[stream].page_table);
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: can't alloc page table for %s %d\n",
+ caps->name, ret);
+ goto free_playback_tables;
+ }
+
+ /* bind pcm to host comp */
+ ret = spcm_bind(scomp, spcm, stream);
+ if (ret) {
+ dev_err(scomp->dev,
+ "error: can't bind pcm to host\n");
+ snd_dma_free_pages(&spcm->stream[stream].page_table);
+ goto free_playback_tables;
+ }
+
+ return ret;
+
+free_playback_tables:
+ if (spcm->pcm.playback)
+ snd_dma_free_pages(&spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].page_table);
+
+ return ret;
+}
+
+static int sof_dai_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_pcm *spcm = dobj->private;
+
+ /* free PCM DMA pages */
+ if (spcm->pcm.playback)
+ snd_dma_free_pages(&spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].page_table);
+
+ if (spcm->pcm.capture)
+ snd_dma_free_pages(&spcm->stream[SNDRV_PCM_STREAM_CAPTURE].page_table);
+
+ /* remove from list and free spcm */
+ list_del(&spcm->list);
+ kfree(spcm);
+
+ return 0;
+}
+
+static void sof_dai_set_format(struct snd_soc_tplg_hw_config *hw_config,
+ struct sof_ipc_dai_config *config)
+{
+ /* clock directions wrt codec */
+ if (hw_config->bclk_master == SND_SOC_TPLG_BCLK_CM) {
+ /* codec is bclk master */
+ if (hw_config->fsync_master == SND_SOC_TPLG_FSYNC_CM)
+ config->format |= SOF_DAI_FMT_CBM_CFM;
+ else
+ config->format |= SOF_DAI_FMT_CBM_CFS;
+ } else {
+ /* codec is bclk slave */
+ if (hw_config->fsync_master == SND_SOC_TPLG_FSYNC_CM)
+ config->format |= SOF_DAI_FMT_CBS_CFM;
+ else
+ config->format |= SOF_DAI_FMT_CBS_CFS;
+ }
+
+ /* inverted clocks ? */
+ if (hw_config->invert_bclk) {
+ if (hw_config->invert_fsync)
+ config->format |= SOF_DAI_FMT_IB_IF;
+ else
+ config->format |= SOF_DAI_FMT_IB_NF;
+ } else {
+ if (hw_config->invert_fsync)
+ config->format |= SOF_DAI_FMT_NB_IF;
+ else
+ config->format |= SOF_DAI_FMT_NB_NF;
+ }
+}
+
+/*
+ * Send IPC and set the same config for all DAIs with name matching the link
+ * name. Note that the function can only be used for the case that all DAIs
+ * have a common DAI config for now.
+ */
+static int sof_set_dai_config(struct snd_sof_dev *sdev, u32 size,
+ struct snd_soc_dai_link *link,
+ struct sof_ipc_dai_config *config)
+{
+ struct snd_sof_dai *dai;
+ int found = 0;
+
+ list_for_each_entry(dai, &sdev->dai_list, list) {
+ if (!dai->name)
+ continue;
+
+ if (strcmp(link->name, dai->name) == 0) {
+ struct sof_ipc_reply reply;
+ int ret;
+
+ /*
+ * the same dai config will be applied to all DAIs in
+ * the same dai link. We have to ensure that the ipc
+ * dai config's dai_index match to the component's
+ * dai_index.
+ */
+ config->dai_index = dai->comp_dai.dai_index;
+
+ /* send message to DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ config->hdr.cmd, config, size,
+ &reply, sizeof(reply));
+
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DAI config for %s index %d\n",
+ dai->name, config->dai_index);
+ return ret;
+ }
+ dai->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!dai->dai_config)
+ return -ENOMEM;
+
+ /* set cpu_dai_name */
+ dai->cpu_dai_name = link->cpus->dai_name;
+
+ found = 1;
+ }
+ }
+
+ /*
+ * machine driver may define a dai link with playback and capture
+ * dai enabled, but the dai link in topology would support both, one
+ * or none of them. Here print a warning message to notify user
+ */
+ if (!found) {
+ dev_warn(sdev->dev, "warning: failed to find dai for dai link %s",
+ link->name);
+ }
+
+ return 0;
+}
+
+static int sof_link_ssp_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg,
+ struct snd_soc_tplg_hw_config *hw_config,
+ struct sof_ipc_dai_config *config)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* handle master/slave and inverted clocks */
+ sof_dai_set_format(hw_config, config);
+
+ /* init IPC */
+ memset(&config->ssp, 0, sizeof(struct sof_ipc_dai_ssp_params));
+ config->hdr.size = size;
+
+ ret = sof_parse_tokens(scomp, &config->ssp, ssp_tokens,
+ ARRAY_SIZE(ssp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse ssp tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ config->ssp.mclk_rate = le32_to_cpu(hw_config->mclk_rate);
+ config->ssp.bclk_rate = le32_to_cpu(hw_config->bclk_rate);
+ config->ssp.fsync_rate = le32_to_cpu(hw_config->fsync_rate);
+ config->ssp.tdm_slots = le32_to_cpu(hw_config->tdm_slots);
+ config->ssp.tdm_slot_width = le32_to_cpu(hw_config->tdm_slot_width);
+ config->ssp.mclk_direction = hw_config->mclk_direction;
+ config->ssp.rx_slots = le32_to_cpu(hw_config->rx_slots);
+ config->ssp.tx_slots = le32_to_cpu(hw_config->tx_slots);
+
+ dev_dbg(scomp->dev, "tplg: config SSP%d fmt 0x%x mclk %d bclk %d fclk %d width (%d)%d slots %d mclk id %d quirks %d\n",
+ config->dai_index, config->format,
+ config->ssp.mclk_rate, config->ssp.bclk_rate,
+ config->ssp.fsync_rate, config->ssp.sample_valid_bits,
+ config->ssp.tdm_slot_width, config->ssp.tdm_slots,
+ config->ssp.mclk_id, config->ssp.quirks);
+
+ /* validate SSP fsync rate and channel count */
+ if (config->ssp.fsync_rate < 8000 || config->ssp.fsync_rate > 192000) {
+ dev_err(scomp->dev, "error: invalid fsync rate for SSP%d\n",
+ config->dai_index);
+ return -EINVAL;
+ }
+
+ if (config->ssp.tdm_slots < 1 || config->ssp.tdm_slots > 8) {
+ dev_err(scomp->dev, "error: invalid channel count for SSP%d\n",
+ config->dai_index);
+ return -EINVAL;
+ }
+
+ /* set config for all DAI's with name matching the link name */
+ ret = sof_set_dai_config(sdev, size, link, config);
+ if (ret < 0)
+ dev_err(scomp->dev, "error: failed to save DAI config for SSP%d\n",
+ config->dai_index);
+
+ return ret;
+}
+
+static int sof_link_sai_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg,
+ struct snd_soc_tplg_hw_config *hw_config,
+ struct sof_ipc_dai_config *config)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* handle master/slave and inverted clocks */
+ sof_dai_set_format(hw_config, config);
+
+ /* init IPC */
+ memset(&config->sai, 0, sizeof(struct sof_ipc_dai_sai_params));
+ config->hdr.size = size;
+
+ ret = sof_parse_tokens(scomp, &config->sai, sai_tokens,
+ ARRAY_SIZE(sai_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse sai tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ config->sai.mclk_rate = le32_to_cpu(hw_config->mclk_rate);
+ config->sai.bclk_rate = le32_to_cpu(hw_config->bclk_rate);
+ config->sai.fsync_rate = le32_to_cpu(hw_config->fsync_rate);
+ config->sai.mclk_direction = hw_config->mclk_direction;
+
+ config->sai.tdm_slots = le32_to_cpu(hw_config->tdm_slots);
+ config->sai.tdm_slot_width = le32_to_cpu(hw_config->tdm_slot_width);
+ config->sai.rx_slots = le32_to_cpu(hw_config->rx_slots);
+ config->sai.tx_slots = le32_to_cpu(hw_config->tx_slots);
+
+ dev_info(scomp->dev,
+ "tplg: config SAI%d fmt 0x%x mclk %d width %d slots %d mclk id %d\n",
+ config->dai_index, config->format,
+ config->sai.mclk_rate, config->sai.tdm_slot_width,
+ config->sai.tdm_slots, config->sai.mclk_id);
+
+ if (config->sai.tdm_slots < 1 || config->sai.tdm_slots > 8) {
+ dev_err(scomp->dev, "error: invalid channel count for SAI%d\n",
+ config->dai_index);
+ return -EINVAL;
+ }
+
+ /* set config for all DAI's with name matching the link name */
+ ret = sof_set_dai_config(sdev, size, link, config);
+ if (ret < 0)
+ dev_err(scomp->dev, "error: failed to save DAI config for SAI%d\n",
+ config->dai_index);
+
+ return ret;
+}
+
+static int sof_link_esai_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg,
+ struct snd_soc_tplg_hw_config *hw_config,
+ struct sof_ipc_dai_config *config)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* handle master/slave and inverted clocks */
+ sof_dai_set_format(hw_config, config);
+
+ /* init IPC */
+ memset(&config->esai, 0, sizeof(struct sof_ipc_dai_esai_params));
+ config->hdr.size = size;
+
+ ret = sof_parse_tokens(scomp, &config->esai, esai_tokens,
+ ARRAY_SIZE(esai_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse esai tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ config->esai.mclk_rate = le32_to_cpu(hw_config->mclk_rate);
+ config->esai.bclk_rate = le32_to_cpu(hw_config->bclk_rate);
+ config->esai.fsync_rate = le32_to_cpu(hw_config->fsync_rate);
+ config->esai.mclk_direction = hw_config->mclk_direction;
+ config->esai.tdm_slots = le32_to_cpu(hw_config->tdm_slots);
+ config->esai.tdm_slot_width = le32_to_cpu(hw_config->tdm_slot_width);
+ config->esai.rx_slots = le32_to_cpu(hw_config->rx_slots);
+ config->esai.tx_slots = le32_to_cpu(hw_config->tx_slots);
+
+ dev_info(scomp->dev,
+ "tplg: config ESAI%d fmt 0x%x mclk %d width %d slots %d mclk id %d\n",
+ config->dai_index, config->format,
+ config->esai.mclk_rate, config->esai.tdm_slot_width,
+ config->esai.tdm_slots, config->esai.mclk_id);
+
+ if (config->esai.tdm_slots < 1 || config->esai.tdm_slots > 8) {
+ dev_err(scomp->dev, "error: invalid channel count for ESAI%d\n",
+ config->dai_index);
+ return -EINVAL;
+ }
+
+ /* set config for all DAI's with name matching the link name */
+ ret = sof_set_dai_config(sdev, size, link, config);
+ if (ret < 0)
+ dev_err(scomp->dev, "error: failed to save DAI config for ESAI%d\n",
+ config->dai_index);
+
+ return ret;
+}
+
+static int sof_link_dmic_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg,
+ struct snd_soc_tplg_hw_config *hw_config,
+ struct sof_ipc_dai_config *config)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
+ size_t size = sizeof(*config);
+ int ret, j;
+
+ /* Ensure the entire DMIC config struct is zeros */
+ memset(&config->dmic, 0, sizeof(struct sof_ipc_dai_dmic_params));
+
+ /* get DMIC tokens */
+ ret = sof_parse_tokens(scomp, &config->dmic, dmic_tokens,
+ ARRAY_SIZE(dmic_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse dmic tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ /* get DMIC PDM tokens */
+ ret = sof_parse_token_sets(scomp, &config->dmic.pdm[0], dmic_pdm_tokens,
+ ARRAY_SIZE(dmic_pdm_tokens), private->array,
+ le32_to_cpu(private->size),
+ config->dmic.num_pdm_active,
+ sizeof(struct sof_ipc_dai_dmic_pdm_ctrl));
+
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse dmic pdm tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ /* set IPC header size */
+ config->hdr.size = size;
+
+ /* debug messages */
+ dev_dbg(scomp->dev, "tplg: config DMIC%d driver version %d\n",
+ config->dai_index, config->dmic.driver_ipc_version);
+ dev_dbg(scomp->dev, "pdmclk_min %d pdm_clkmax %d duty_min %hd\n",
+ config->dmic.pdmclk_min, config->dmic.pdmclk_max,
+ config->dmic.duty_min);
+ dev_dbg(scomp->dev, "duty_max %hd fifo_fs %d num_pdms active %d\n",
+ config->dmic.duty_max, config->dmic.fifo_fs,
+ config->dmic.num_pdm_active);
+ dev_dbg(scomp->dev, "fifo word length %hd\n", config->dmic.fifo_bits);
+
+ for (j = 0; j < config->dmic.num_pdm_active; j++) {
+ dev_dbg(scomp->dev, "pdm %hd mic a %hd mic b %hd\n",
+ config->dmic.pdm[j].id,
+ config->dmic.pdm[j].enable_mic_a,
+ config->dmic.pdm[j].enable_mic_b);
+ dev_dbg(scomp->dev, "pdm %hd polarity a %hd polarity b %hd\n",
+ config->dmic.pdm[j].id,
+ config->dmic.pdm[j].polarity_mic_a,
+ config->dmic.pdm[j].polarity_mic_b);
+ dev_dbg(scomp->dev, "pdm %hd clk_edge %hd skew %hd\n",
+ config->dmic.pdm[j].id,
+ config->dmic.pdm[j].clk_edge,
+ config->dmic.pdm[j].skew);
+ }
+
+ /*
+ * this takes care of backwards compatible handling of fifo_bits_b.
+ * It is deprecated since firmware ABI version 3.0.1.
+ */
+ if (SOF_ABI_VER(v->major, v->minor, v->micro) < SOF_ABI_VER(3, 0, 1))
+ config->dmic.fifo_bits_b = config->dmic.fifo_bits;
+
+ /* set config for all DAI's with name matching the link name */
+ ret = sof_set_dai_config(sdev, size, link, config);
+ if (ret < 0)
+ dev_err(scomp->dev, "error: failed to save DAI config for DMIC%d\n",
+ config->dai_index);
+
+ return ret;
+}
+
+static int sof_link_hda_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg,
+ struct snd_soc_tplg_hw_config *hw_config,
+ struct sof_ipc_dai_config *config)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ struct snd_soc_dai *dai;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* init IPC */
+ memset(&config->hda, 0, sizeof(struct sof_ipc_dai_hda_params));
+ config->hdr.size = size;
+
+ /* get any bespoke DAI tokens */
+ ret = sof_parse_tokens(scomp, &config->hda, hda_tokens,
+ ARRAY_SIZE(hda_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse hda tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ dev_dbg(scomp->dev, "HDA config rate %d channels %d\n",
+ config->hda.rate, config->hda.channels);
+
+ dai = snd_soc_find_dai(link->cpus);
+ if (!dai) {
+ dev_err(scomp->dev, "error: failed to find dai %s in %s",
+ link->cpus->dai_name, __func__);
+ return -EINVAL;
+ }
+
+ config->hda.link_dma_ch = DMA_CHAN_INVALID;
+
+ ret = sof_set_dai_config(sdev, size, link, config);
+ if (ret < 0)
+ dev_err(scomp->dev, "error: failed to process hda dai link %s",
+ link->name);
+
+ return ret;
+}
+
+static int sof_link_alh_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg,
+ struct snd_soc_tplg_hw_config *hw_config,
+ struct sof_ipc_dai_config *config)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ u32 size = sizeof(*config);
+ int ret;
+
+ ret = sof_parse_tokens(scomp, &config->alh, alh_tokens,
+ ARRAY_SIZE(alh_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse alh tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ /* init IPC */
+ config->hdr.size = size;
+
+ /* set config for all DAI's with name matching the link name */
+ ret = sof_set_dai_config(sdev, size, link, config);
+ if (ret < 0)
+ dev_err(scomp->dev, "error: failed to save DAI config for ALH %d\n",
+ config->dai_index);
+
+ return ret;
+}
+
+/* DAI link - used for any driver specific init */
+static int sof_link_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg)
+{
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ struct sof_ipc_dai_config config;
+ struct snd_soc_tplg_hw_config *hw_config;
+ int num_hw_configs;
+ int ret;
+ int i = 0;
+
+ if (!link->platforms) {
+ dev_err(scomp->dev, "error: no platforms\n");
+ return -EINVAL;
+ }
+ link->platforms->name = dev_name(scomp->dev);
+
+ /*
+ * Set nonatomic property for FE dai links as their trigger action
+ * involves IPC's.
+ */
+ if (!link->no_pcm) {
+ link->nonatomic = true;
+
+ /*
+ * set default trigger order for all links. Exceptions to
+ * the rule will be handled in sof_pcm_dai_link_fixup()
+ * For playback, the sequence is the following: start FE,
+ * start BE, stop BE, stop FE; for Capture the sequence is
+ * inverted start BE, start FE, stop FE, stop BE
+ */
+ link->trigger[SNDRV_PCM_STREAM_PLAYBACK] =
+ SND_SOC_DPCM_TRIGGER_PRE;
+ link->trigger[SNDRV_PCM_STREAM_CAPTURE] =
+ SND_SOC_DPCM_TRIGGER_POST;
+
+ /* nothing more to do for FE dai links */
+ return 0;
+ }
+
+ /* check we have some tokens - we need at least DAI type */
+ if (le32_to_cpu(private->size) == 0) {
+ dev_err(scomp->dev, "error: expected tokens for DAI, none found\n");
+ return -EINVAL;
+ }
+
+ /* Send BE DAI link configurations to DSP */
+ memset(&config, 0, sizeof(config));
+
+ /* get any common DAI tokens */
+ ret = sof_parse_tokens(scomp, &config, dai_link_tokens,
+ ARRAY_SIZE(dai_link_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse link tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ /*
+ * DAI links are expected to have at least 1 hw_config.
+ * But some older topologies might have no hw_config for HDA dai links.
+ */
+ num_hw_configs = le32_to_cpu(cfg->num_hw_configs);
+ if (!num_hw_configs) {
+ if (config.type != SOF_DAI_INTEL_HDA) {
+ dev_err(scomp->dev, "error: unexpected DAI config count %d!\n",
+ le32_to_cpu(cfg->num_hw_configs));
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(scomp->dev, "tplg: %d hw_configs found, default id: %d!\n",
+ cfg->num_hw_configs, le32_to_cpu(cfg->default_hw_config_id));
+
+ for (i = 0; i < num_hw_configs; i++) {
+ if (cfg->hw_config[i].id == cfg->default_hw_config_id)
+ break;
+ }
+
+ if (i == num_hw_configs) {
+ dev_err(scomp->dev, "error: default hw_config id: %d not found!\n",
+ le32_to_cpu(cfg->default_hw_config_id));
+ return -EINVAL;
+ }
+ }
+
+ /* configure dai IPC message */
+ hw_config = &cfg->hw_config[i];
+
+ config.hdr.cmd = SOF_IPC_GLB_DAI_MSG | SOF_IPC_DAI_CONFIG;
+ config.format = le32_to_cpu(hw_config->fmt);
+
+ /* now load DAI specific data and send IPC - type comes from token */
+ switch (config.type) {
+ case SOF_DAI_INTEL_SSP:
+ ret = sof_link_ssp_load(scomp, index, link, cfg, hw_config,
+ &config);
+ break;
+ case SOF_DAI_INTEL_DMIC:
+ ret = sof_link_dmic_load(scomp, index, link, cfg, hw_config,
+ &config);
+ break;
+ case SOF_DAI_INTEL_HDA:
+ ret = sof_link_hda_load(scomp, index, link, cfg, hw_config,
+ &config);
+ break;
+ case SOF_DAI_INTEL_ALH:
+ ret = sof_link_alh_load(scomp, index, link, cfg, hw_config,
+ &config);
+ break;
+ case SOF_DAI_IMX_SAI:
+ ret = sof_link_sai_load(scomp, index, link, cfg, hw_config,
+ &config);
+ break;
+ case SOF_DAI_IMX_ESAI:
+ ret = sof_link_esai_load(scomp, index, link, cfg, hw_config,
+ &config);
+ break;
+ default:
+ dev_err(scomp->dev, "error: invalid DAI type %d\n",
+ config.type);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sof_link_hda_unload(struct snd_sof_dev *sdev,
+ struct snd_soc_dai_link *link)
+{
+ struct snd_soc_dai *dai;
+
+ dai = snd_soc_find_dai(link->cpus);
+ if (!dai) {
+ dev_err(sdev->dev, "error: failed to find dai %s in %s",
+ link->cpus->dai_name, __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sof_link_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_dai_link *link =
+ container_of(dobj, struct snd_soc_dai_link, dobj);
+
+ struct snd_sof_dai *sof_dai;
+ int ret = 0;
+
+ /* only BE link is loaded by sof */
+ if (!link->no_pcm)
+ return 0;
+
+ list_for_each_entry(sof_dai, &sdev->dai_list, list) {
+ if (!sof_dai->name)
+ continue;
+
+ if (strcmp(link->name, sof_dai->name) == 0)
+ goto found;
+ }
+
+ dev_err(scomp->dev, "error: failed to find dai %s in %s",
+ link->name, __func__);
+ return -EINVAL;
+found:
+
+ switch (sof_dai->dai_config->type) {
+ case SOF_DAI_INTEL_SSP:
+ case SOF_DAI_INTEL_DMIC:
+ case SOF_DAI_INTEL_ALH:
+ case SOF_DAI_IMX_SAI:
+ case SOF_DAI_IMX_ESAI:
+ /* no resource needs to be released for all cases above */
+ break;
+ case SOF_DAI_INTEL_HDA:
+ ret = sof_link_hda_unload(sdev, link);
+ break;
+ default:
+ dev_err(scomp->dev, "error: invalid DAI type %d\n",
+ sof_dai->dai_config->type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* DAI link - used for any driver specific init */
+static int sof_route_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dapm_route *route)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_pipe_comp_connect *connect;
+ struct snd_sof_widget *source_swidget, *sink_swidget;
+ struct snd_soc_dobj *dobj = &route->dobj;
+ struct snd_sof_route *sroute;
+ struct sof_ipc_reply reply;
+ int ret = 0;
+
+ /* allocate memory for sroute and connect */
+ sroute = kzalloc(sizeof(*sroute), GFP_KERNEL);
+ if (!sroute)
+ return -ENOMEM;
+
+ sroute->scomp = scomp;
+
+ connect = kzalloc(sizeof(*connect), GFP_KERNEL);
+ if (!connect) {
+ kfree(sroute);
+ return -ENOMEM;
+ }
+
+ connect->hdr.size = sizeof(*connect);
+ connect->hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_CONNECT;
+
+ dev_dbg(scomp->dev, "sink %s control %s source %s\n",
+ route->sink, route->control ? route->control : "none",
+ route->source);
+
+ /* source component */
+ source_swidget = snd_sof_find_swidget(scomp, (char *)route->source);
+ if (!source_swidget) {
+ dev_err(scomp->dev, "error: source %s not found\n",
+ route->source);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Virtual widgets of type output/out_drv may be added in topology
+ * for compatibility. These are not handled by the FW.
+ * So, don't send routes whose source/sink widget is of such types
+ * to the DSP.
+ */
+ if (source_swidget->id == snd_soc_dapm_out_drv ||
+ source_swidget->id == snd_soc_dapm_output)
+ goto err;
+
+ connect->source_id = source_swidget->comp_id;
+
+ /* sink component */
+ sink_swidget = snd_sof_find_swidget(scomp, (char *)route->sink);
+ if (!sink_swidget) {
+ dev_err(scomp->dev, "error: sink %s not found\n",
+ route->sink);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Don't send routes whose sink widget is of type
+ * output or out_drv to the DSP
+ */
+ if (sink_swidget->id == snd_soc_dapm_out_drv ||
+ sink_swidget->id == snd_soc_dapm_output)
+ goto err;
+
+ connect->sink_id = sink_swidget->comp_id;
+
+ /*
+ * For virtual routes, both sink and source are not
+ * buffer. Since only buffer linked to component is supported by
+ * FW, others are reported as error, add check in route function,
+ * do not send it to FW when both source and sink are not buffer
+ */
+ if (source_swidget->id != snd_soc_dapm_buffer &&
+ sink_swidget->id != snd_soc_dapm_buffer) {
+ dev_dbg(scomp->dev, "warning: neither Linked source component %s nor sink component %s is of buffer type, ignoring link\n",
+ route->source, route->sink);
+ goto err;
+ } else {
+ ret = sof_ipc_tx_message(sdev->ipc,
+ connect->hdr.cmd,
+ connect, sizeof(*connect),
+ &reply, sizeof(reply));
+
+ /* check IPC return value */
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: failed to add route sink %s control %s source %s\n",
+ route->sink,
+ route->control ? route->control : "none",
+ route->source);
+ goto err;
+ }
+
+ /* check IPC reply */
+ if (reply.error < 0) {
+ dev_err(scomp->dev, "error: DSP failed to add route sink %s control %s source %s result %d\n",
+ route->sink,
+ route->control ? route->control : "none",
+ route->source, reply.error);
+ ret = reply.error;
+ goto err;
+ }
+
+ sroute->route = route;
+ dobj->private = sroute;
+ sroute->private = connect;
+
+ /* add route to route list */
+ list_add(&sroute->list, &sdev->route_list);
+
+ return 0;
+ }
+
+err:
+ kfree(connect);
+ kfree(sroute);
+ return ret;
+}
+
+/* Function to set the initial value of SOF kcontrols.
+ * The value will be stored in scontrol->control_data
+ */
+static int snd_sof_cache_kcontrol_val(struct snd_soc_component *scomp)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_control *scontrol = NULL;
+ int ipc_cmd, ctrl_type;
+ int ret = 0;
+
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
+
+ /* notify DSP of kcontrol values */
+ switch (scontrol->cmd) {
+ case SOF_CTRL_CMD_VOLUME:
+ case SOF_CTRL_CMD_ENUM:
+ case SOF_CTRL_CMD_SWITCH:
+ ipc_cmd = SOF_IPC_COMP_GET_VALUE;
+ ctrl_type = SOF_CTRL_TYPE_VALUE_CHAN_GET;
+ break;
+ case SOF_CTRL_CMD_BINARY:
+ ipc_cmd = SOF_IPC_COMP_GET_DATA;
+ ctrl_type = SOF_CTRL_TYPE_DATA_GET;
+ break;
+ default:
+ dev_err(scomp->dev,
+ "error: Invalid scontrol->cmd: %d\n",
+ scontrol->cmd);
+ return -EINVAL;
+ }
+ ret = snd_sof_ipc_set_get_comp_data(scontrol,
+ ipc_cmd, ctrl_type,
+ scontrol->cmd,
+ false);
+ if (ret < 0) {
+ dev_warn(scomp->dev,
+ "error: kcontrol value get for widget: %d\n",
+ scontrol->comp_id);
+ }
+ }
+
+ return ret;
+}
+
+int snd_sof_complete_pipeline(struct device *dev,
+ struct snd_sof_widget *swidget)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct sof_ipc_pipe_ready ready;
+ struct sof_ipc_reply reply;
+ int ret;
+
+ dev_dbg(dev, "tplg: complete pipeline %s id %d\n",
+ swidget->widget->name, swidget->comp_id);
+
+ memset(&ready, 0, sizeof(ready));
+ ready.hdr.size = sizeof(ready);
+ ready.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_PIPE_COMPLETE;
+ ready.comp_id = swidget->comp_id;
+
+ ret = sof_ipc_tx_message(sdev->ipc,
+ ready.hdr.cmd, &ready, sizeof(ready), &reply,
+ sizeof(reply));
+ if (ret < 0)
+ return ret;
+ return 1;
+}
+
+/* completion - called at completion of firmware loading */
+static void sof_complete(struct snd_soc_component *scomp)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_widget *swidget;
+
+ /* some widget types require completion notificattion */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (swidget->complete)
+ continue;
+
+ switch (swidget->id) {
+ case snd_soc_dapm_scheduler:
+ swidget->complete =
+ snd_sof_complete_pipeline(scomp->dev, swidget);
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * cache initial values of SOF kcontrols by reading DSP value over
+ * IPC. It may be overwritten by alsa-mixer after booting up
+ */
+ snd_sof_cache_kcontrol_val(scomp);
+}
+
+/* manifest - optional to inform component of manifest */
+static int sof_manifest(struct snd_soc_component *scomp, int index,
+ struct snd_soc_tplg_manifest *man)
+{
+ u32 size;
+ u32 abi_version;
+
+ size = le32_to_cpu(man->priv.size);
+
+ /* backward compatible with tplg without ABI info */
+ if (!size) {
+ dev_dbg(scomp->dev, "No topology ABI info\n");
+ return 0;
+ }
+
+ if (size != SOF_TPLG_ABI_SIZE) {
+ dev_err(scomp->dev, "error: invalid topology ABI size\n");
+ return -EINVAL;
+ }
+
+ dev_info(scomp->dev,
+ "Topology: ABI %d:%d:%d Kernel ABI %d:%d:%d\n",
+ man->priv.data[0], man->priv.data[1],
+ man->priv.data[2], SOF_ABI_MAJOR, SOF_ABI_MINOR,
+ SOF_ABI_PATCH);
+
+ abi_version = SOF_ABI_VER(man->priv.data[0],
+ man->priv.data[1],
+ man->priv.data[2]);
+
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION, abi_version)) {
+ dev_err(scomp->dev, "error: incompatible topology ABI version\n");
+ return -EINVAL;
+ }
+
+ if (abi_version > SOF_ABI_VERSION) {
+ if (!IS_ENABLED(CONFIG_SND_SOC_SOF_STRICT_ABI_CHECKS)) {
+ dev_warn(scomp->dev, "warn: topology ABI is more recent than kernel\n");
+ } else {
+ dev_err(scomp->dev, "error: topology ABI is more recent than kernel\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* vendor specific kcontrol handlers available for binding */
+static const struct snd_soc_tplg_kcontrol_ops sof_io_ops[] = {
+ {SOF_TPLG_KCTL_VOL_ID, snd_sof_volume_get, snd_sof_volume_put},
+ {SOF_TPLG_KCTL_BYTES_ID, snd_sof_bytes_get, snd_sof_bytes_put},
+ {SOF_TPLG_KCTL_ENUM_ID, snd_sof_enum_get, snd_sof_enum_put},
+ {SOF_TPLG_KCTL_SWITCH_ID, snd_sof_switch_get, snd_sof_switch_put},
+};
+
+/* vendor specific bytes ext handlers available for binding */
+static const struct snd_soc_tplg_bytes_ext_ops sof_bytes_ext_ops[] = {
+ {SOF_TPLG_KCTL_BYTES_ID, snd_sof_bytes_ext_get, snd_sof_bytes_ext_put},
+ {SOF_TPLG_KCTL_BYTES_VOLATILE_RO, snd_sof_bytes_ext_volatile_get},
+};
+
+static struct snd_soc_tplg_ops sof_tplg_ops = {
+ /* external kcontrol init - used for any driver specific init */
+ .control_load = sof_control_load,
+ .control_unload = sof_control_unload,
+
+ /* external kcontrol init - used for any driver specific init */
+ .dapm_route_load = sof_route_load,
+ .dapm_route_unload = sof_route_unload,
+
+ /* external widget init - used for any driver specific init */
+ /* .widget_load is not currently used */
+ .widget_ready = sof_widget_ready,
+ .widget_unload = sof_widget_unload,
+
+ /* FE DAI - used for any driver specific init */
+ .dai_load = sof_dai_load,
+ .dai_unload = sof_dai_unload,
+
+ /* DAI link - used for any driver specific init */
+ .link_load = sof_link_load,
+ .link_unload = sof_link_unload,
+
+ /* completion - called at completion of firmware loading */
+ .complete = sof_complete,
+
+ /* manifest - optional to inform component of manifest */
+ .manifest = sof_manifest,
+
+ /* vendor specific kcontrol handlers available for binding */
+ .io_ops = sof_io_ops,
+ .io_ops_count = ARRAY_SIZE(sof_io_ops),
+
+ /* vendor specific bytes ext handlers available for binding */
+ .bytes_ext_ops = sof_bytes_ext_ops,
+ .bytes_ext_ops_count = ARRAY_SIZE(sof_bytes_ext_ops),
+};
+
+int snd_sof_load_topology(struct snd_soc_component *scomp, const char *file)
+{
+ const struct firmware *fw;
+ int ret;
+
+ dev_dbg(scomp->dev, "loading topology:%s\n", file);
+
+ ret = request_firmware(&fw, file, scomp->dev);
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: tplg request firmware %s failed err: %d\n",
+ file, ret);
+ return ret;
+ }
+
+ ret = snd_soc_tplg_component_load(scomp,
+ &sof_tplg_ops, fw,
+ SND_SOC_TPLG_INDEX_ALL);
+ if (ret < 0) {
+ dev_err(scomp->dev, "error: tplg component load failed %d\n",
+ ret);
+ ret = -EINVAL;
+ }
+
+ release_firmware(fw);
+ return ret;
+}
+EXPORT_SYMBOL(snd_sof_load_topology);
diff --git a/sound/soc/sof/trace.c b/sound/soc/sof/trace.c
new file mode 100644
index 000000000..69889241a
--- /dev/null
+++ b/sound/soc/sof/trace.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/debugfs.h>
+#include <linux/sched/signal.h>
+#include "sof-priv.h"
+#include "ops.h"
+
+static size_t sof_trace_avail(struct snd_sof_dev *sdev,
+ loff_t pos, size_t buffer_size)
+{
+ loff_t host_offset = READ_ONCE(sdev->host_offset);
+
+ /*
+ * If host offset is less than local pos, it means write pointer of
+ * host DMA buffer has been wrapped. We should output the trace data
+ * at the end of host DMA buffer at first.
+ */
+ if (host_offset < pos)
+ return buffer_size - pos;
+
+ /* If there is available trace data now, it is unnecessary to wait. */
+ if (host_offset > pos)
+ return host_offset - pos;
+
+ return 0;
+}
+
+static size_t sof_wait_trace_avail(struct snd_sof_dev *sdev,
+ loff_t pos, size_t buffer_size)
+{
+ wait_queue_entry_t wait;
+ size_t ret = sof_trace_avail(sdev, pos, buffer_size);
+
+ /* data immediately available */
+ if (ret)
+ return ret;
+
+ if (!sdev->dtrace_is_enabled && sdev->dtrace_draining) {
+ /*
+ * tracing has ended and all traces have been
+ * read by client, return EOF
+ */
+ sdev->dtrace_draining = false;
+ return 0;
+ }
+
+ /* wait for available trace data from FW */
+ init_waitqueue_entry(&wait, current);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&sdev->trace_sleep, &wait);
+
+ if (!signal_pending(current)) {
+ /* set timeout to max value, no error code */
+ schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+ }
+ remove_wait_queue(&sdev->trace_sleep, &wait);
+
+ return sof_trace_avail(sdev, pos, buffer_size);
+}
+
+static ssize_t sof_dfsentry_trace_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ unsigned long rem;
+ loff_t lpos = *ppos;
+ size_t avail, buffer_size = dfse->size;
+ u64 lpos_64;
+
+ /* make sure we know about any failures on the DSP side */
+ sdev->dtrace_error = false;
+
+ /* check pos and count */
+ if (lpos < 0)
+ return -EINVAL;
+ if (!count)
+ return 0;
+
+ /* check for buffer wrap and count overflow */
+ lpos_64 = lpos;
+ lpos = do_div(lpos_64, buffer_size);
+
+ if (count > buffer_size - lpos) /* min() not used to avoid sparse warnings */
+ count = buffer_size - lpos;
+
+ /* get available count based on current host offset */
+ avail = sof_wait_trace_avail(sdev, lpos, buffer_size);
+ if (sdev->dtrace_error) {
+ dev_err(sdev->dev, "error: trace IO error\n");
+ return -EIO;
+ }
+
+ /* make sure count is <= avail */
+ count = avail > count ? count : avail;
+
+ /* copy available trace data to debugfs */
+ rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count);
+ if (rem)
+ return -EFAULT;
+
+ *ppos += count;
+
+ /* move debugfs reading position */
+ return count;
+}
+
+static int sof_dfsentry_trace_release(struct inode *inode, struct file *file)
+{
+ struct snd_sof_dfsentry *dfse = inode->i_private;
+ struct snd_sof_dev *sdev = dfse->sdev;
+
+ /* avoid duplicate traces at next open */
+ if (!sdev->dtrace_is_enabled)
+ sdev->host_offset = 0;
+
+ return 0;
+}
+
+static const struct file_operations sof_dfs_trace_fops = {
+ .open = simple_open,
+ .read = sof_dfsentry_trace_read,
+ .llseek = default_llseek,
+ .release = sof_dfsentry_trace_release,
+};
+
+static int trace_debugfs_create(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ if (!sdev)
+ return -EINVAL;
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ dfse->type = SOF_DFSENTRY_TYPE_BUF;
+ dfse->buf = sdev->dmatb.area;
+ dfse->size = sdev->dmatb.bytes;
+ dfse->sdev = sdev;
+
+ debugfs_create_file("trace", 0444, sdev->debugfs_root, dfse,
+ &sof_dfs_trace_fops);
+
+ return 0;
+}
+
+int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
+ struct sof_ipc_dma_trace_params_ext params;
+ struct sof_ipc_reply ipc_reply;
+ int ret;
+
+ if (!sdev->dtrace_is_supported)
+ return 0;
+
+ if (sdev->dtrace_is_enabled || !sdev->dma_trace_pages)
+ return -EINVAL;
+
+ /* set IPC parameters */
+ params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG;
+ /* PARAMS_EXT is only supported from ABI 3.7.0 onwards */
+ if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) {
+ params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext);
+ params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT;
+ params.timestamp_ns = ktime_get(); /* in nanosecond */
+ } else {
+ params.hdr.size = sizeof(struct sof_ipc_dma_trace_params);
+ params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS;
+ }
+ params.buffer.phy_addr = sdev->dmatp.addr;
+ params.buffer.size = sdev->dmatb.bytes;
+ params.buffer.pages = sdev->dma_trace_pages;
+ params.stream_tag = 0;
+
+ sdev->host_offset = 0;
+ sdev->dtrace_draining = false;
+
+ ret = snd_sof_dma_trace_init(sdev, &params.stream_tag);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: fail in snd_sof_dma_trace_init %d\n", ret);
+ return ret;
+ }
+ dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag);
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ params.hdr.cmd, &params, sizeof(params),
+ &ipc_reply, sizeof(ipc_reply));
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: can't set params for DMA for trace %d\n", ret);
+ goto trace_release;
+ }
+
+ ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_START);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: snd_sof_dma_trace_trigger: start: %d\n", ret);
+ goto trace_release;
+ }
+
+ sdev->dtrace_is_enabled = true;
+
+ return 0;
+
+trace_release:
+ snd_sof_dma_trace_release(sdev);
+ return ret;
+}
+
+int snd_sof_init_trace(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ if (!sdev->dtrace_is_supported)
+ return 0;
+
+ /* set false before start initialization */
+ sdev->dtrace_is_enabled = false;
+
+ /* allocate trace page table buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
+ PAGE_SIZE, &sdev->dmatp);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: can't alloc page table for trace %d\n", ret);
+ return ret;
+ }
+
+ /* allocate trace data buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
+ DMA_BUF_SIZE_FOR_TRACE, &sdev->dmatb);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: can't alloc buffer for trace %d\n", ret);
+ goto page_err;
+ }
+
+ /* create compressed page table for audio firmware */
+ ret = snd_sof_create_page_table(sdev->dev, &sdev->dmatb,
+ sdev->dmatp.area, sdev->dmatb.bytes);
+ if (ret < 0)
+ goto table_err;
+
+ sdev->dma_trace_pages = ret;
+ dev_dbg(sdev->dev, "dma_trace_pages: %d\n", sdev->dma_trace_pages);
+
+ if (sdev->first_boot) {
+ ret = trace_debugfs_create(sdev);
+ if (ret < 0)
+ goto table_err;
+ }
+
+ init_waitqueue_head(&sdev->trace_sleep);
+
+ ret = snd_sof_init_trace_ipc(sdev);
+ if (ret < 0)
+ goto table_err;
+
+ return 0;
+table_err:
+ sdev->dma_trace_pages = 0;
+ snd_dma_free_pages(&sdev->dmatb);
+page_err:
+ snd_dma_free_pages(&sdev->dmatp);
+ return ret;
+}
+EXPORT_SYMBOL(snd_sof_init_trace);
+
+int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
+ struct sof_ipc_dma_trace_posn *posn)
+{
+ if (!sdev->dtrace_is_supported)
+ return 0;
+
+ if (sdev->dtrace_is_enabled && sdev->host_offset != posn->host_offset) {
+ sdev->host_offset = posn->host_offset;
+ wake_up(&sdev->trace_sleep);
+ }
+
+ if (posn->overflow != 0)
+ dev_err(sdev->dev,
+ "error: DSP trace buffer overflow %u bytes. Total messages %d\n",
+ posn->overflow, posn->messages);
+
+ return 0;
+}
+
+/* an error has occurred within the DSP that prevents further trace */
+void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
+{
+ if (!sdev->dtrace_is_supported)
+ return;
+
+ if (sdev->dtrace_is_enabled) {
+ dev_err(sdev->dev, "error: waking up any trace sleepers\n");
+ sdev->dtrace_error = true;
+ wake_up(&sdev->trace_sleep);
+ }
+}
+EXPORT_SYMBOL(snd_sof_trace_notify_for_error);
+
+void snd_sof_release_trace(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ if (!sdev->dtrace_is_supported || !sdev->dtrace_is_enabled)
+ return;
+
+ ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: snd_sof_dma_trace_trigger: stop: %d\n", ret);
+
+ ret = snd_sof_dma_trace_release(sdev);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: fail in snd_sof_dma_trace_release %d\n", ret);
+
+ sdev->dtrace_is_enabled = false;
+ sdev->dtrace_draining = true;
+ wake_up(&sdev->trace_sleep);
+}
+EXPORT_SYMBOL(snd_sof_release_trace);
+
+void snd_sof_free_trace(struct snd_sof_dev *sdev)
+{
+ if (!sdev->dtrace_is_supported)
+ return;
+
+ snd_sof_release_trace(sdev);
+
+ if (sdev->dma_trace_pages) {
+ snd_dma_free_pages(&sdev->dmatb);
+ snd_dma_free_pages(&sdev->dmatp);
+ sdev->dma_trace_pages = 0;
+ }
+}
+EXPORT_SYMBOL(snd_sof_free_trace);
diff --git a/sound/soc/sof/utils.c b/sound/soc/sof/utils.c
new file mode 100644
index 000000000..5539d3afb
--- /dev/null
+++ b/sound/soc/sof/utils.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Keyon Jie <yang.jie@linux.intel.com>
+//
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/platform_device.h>
+#include <asm/unaligned.h>
+#include <sound/soc.h>
+#include <sound/sof.h>
+#include "sof-priv.h"
+
+/*
+ * Register IO
+ *
+ * The sof_io_xyz() wrappers are typically referenced in snd_sof_dsp_ops
+ * structures and cannot be inlined.
+ */
+
+void sof_io_write(struct snd_sof_dev *sdev, void __iomem *addr, u32 value)
+{
+ writel(value, addr);
+}
+EXPORT_SYMBOL(sof_io_write);
+
+u32 sof_io_read(struct snd_sof_dev *sdev, void __iomem *addr)
+{
+ return readl(addr);
+}
+EXPORT_SYMBOL(sof_io_read);
+
+void sof_io_write64(struct snd_sof_dev *sdev, void __iomem *addr, u64 value)
+{
+ writeq(value, addr);
+}
+EXPORT_SYMBOL(sof_io_write64);
+
+u64 sof_io_read64(struct snd_sof_dev *sdev, void __iomem *addr)
+{
+ return readq(addr);
+}
+EXPORT_SYMBOL(sof_io_read64);
+
+/*
+ * IPC Mailbox IO
+ */
+
+void sof_mailbox_write(struct snd_sof_dev *sdev, u32 offset,
+ void *message, size_t bytes)
+{
+ void __iomem *dest = sdev->bar[sdev->mailbox_bar] + offset;
+
+ memcpy_toio(dest, message, bytes);
+}
+EXPORT_SYMBOL(sof_mailbox_write);
+
+void sof_mailbox_read(struct snd_sof_dev *sdev, u32 offset,
+ void *message, size_t bytes)
+{
+ void __iomem *src = sdev->bar[sdev->mailbox_bar] + offset;
+
+ memcpy_fromio(message, src, bytes);
+}
+EXPORT_SYMBOL(sof_mailbox_read);
+
+/*
+ * Memory copy.
+ */
+
+void sof_block_write(struct snd_sof_dev *sdev, u32 bar, u32 offset, void *src,
+ size_t size)
+{
+ void __iomem *dest = sdev->bar[bar] + offset;
+ const u8 *src_byte = src;
+ u32 affected_mask;
+ u32 tmp;
+ int m, n;
+
+ m = size / 4;
+ n = size % 4;
+
+ /* __iowrite32_copy use 32bit size values so divide by 4 */
+ __iowrite32_copy(dest, src, m);
+
+ if (n) {
+ affected_mask = (1 << (8 * n)) - 1;
+
+ /* first read the 32bit data of dest, then change affected
+ * bytes, and write back to dest. For unaffected bytes, it
+ * should not be changed
+ */
+ tmp = ioread32(dest + m * 4);
+ tmp &= ~affected_mask;
+
+ tmp |= *(u32 *)(src_byte + m * 4) & affected_mask;
+ iowrite32(tmp, dest + m * 4);
+ }
+}
+EXPORT_SYMBOL(sof_block_write);
+
+void sof_block_read(struct snd_sof_dev *sdev, u32 bar, u32 offset, void *dest,
+ size_t size)
+{
+ void __iomem *src = sdev->bar[bar] + offset;
+
+ memcpy_fromio(dest, src, size);
+}
+EXPORT_SYMBOL(sof_block_read);
+
+/*
+ * Generic buffer page table creation.
+ * Take the each physical page address and drop the least significant unused
+ * bits from each (based on PAGE_SIZE). Then pack valid page address bits
+ * into compressed page table.
+ */
+
+int snd_sof_create_page_table(struct device *dev,
+ struct snd_dma_buffer *dmab,
+ unsigned char *page_table, size_t size)
+{
+ int i, pages;
+
+ pages = snd_sgbuf_aligned_pages(size);
+
+ dev_dbg(dev, "generating page table for %p size 0x%zx pages %d\n",
+ dmab->area, size, pages);
+
+ for (i = 0; i < pages; i++) {
+ /*
+ * The number of valid address bits for each page is 20.
+ * idx determines the byte position within page_table
+ * where the current page's address is stored
+ * in the compressed page_table.
+ * This can be calculated by multiplying the page number by 2.5.
+ */
+ u32 idx = (5 * i) >> 1;
+ u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT;
+ u8 *pg_table;
+
+ dev_vdbg(dev, "pfn i %i idx %d pfn %x\n", i, idx, pfn);
+
+ pg_table = (u8 *)(page_table + idx);
+
+ /*
+ * pagetable compression:
+ * byte 0 byte 1 byte 2 byte 3 byte 4 byte 5
+ * ___________pfn 0__________ __________pfn 1___________ _pfn 2...
+ * .... .... .... .... .... .... .... .... .... .... ....
+ * It is created by:
+ * 1. set current location to 0, PFN index i to 0
+ * 2. put pfn[i] at current location in Little Endian byte order
+ * 3. calculate an intermediate value as
+ * x = (pfn[i+1] << 4) | (pfn[i] & 0xf)
+ * 4. put x at offset (current location + 2) in LE byte order
+ * 5. increment current location by 5 bytes, increment i by 2
+ * 6. continue to (2)
+ */
+ if (i & 1)
+ put_unaligned_le32((pg_table[0] & 0xf) | pfn << 4,
+ pg_table);
+ else
+ put_unaligned_le32(pfn, pg_table);
+ }
+
+ return pages;
+}
+EXPORT_SYMBOL(snd_sof_create_page_table);
diff --git a/sound/soc/sof/xtensa/Kconfig b/sound/soc/sof/xtensa/Kconfig
new file mode 100644
index 000000000..defd6d3dc
--- /dev/null
+++ b/sound/soc/sof/xtensa/Kconfig
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SND_SOC_SOF_XTENSA
+ tristate
diff --git a/sound/soc/sof/xtensa/Makefile b/sound/soc/sof/xtensa/Makefile
new file mode 100644
index 000000000..b8376ea04
--- /dev/null
+++ b/sound/soc/sof/xtensa/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+snd-sof-xtensa-dsp-objs := core.o
+
+obj-$(CONFIG_SND_SOC_SOF_XTENSA) += snd-sof-xtensa-dsp.o
diff --git a/sound/soc/sof/xtensa/core.c b/sound/soc/sof/xtensa/core.c
new file mode 100644
index 000000000..bbb9a2282
--- /dev/null
+++ b/sound/soc/sof/xtensa/core.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Pan Xiuli <xiuli.pan@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include "../sof-priv.h"
+
+struct xtensa_exception_cause {
+ u32 id;
+ const char *msg;
+ const char *description;
+};
+
+/*
+ * From 4.4.1.5 table 4-64 Exception Causes of Xtensa
+ * Instruction Set Architecture (ISA) Reference Manual
+ */
+static const struct xtensa_exception_cause xtensa_exception_causes[] = {
+ {0, "IllegalInstructionCause", "Illegal instruction"},
+ {1, "SyscallCause", "SYSCALL instruction"},
+ {2, "InstructionFetchErrorCause",
+ "Processor internal physical address or data error during instruction fetch"},
+ {3, "LoadStoreErrorCause",
+ "Processor internal physical address or data error during load or store"},
+ {4, "Level1InterruptCause",
+ "Level-1 interrupt as indicated by set level-1 bits in the INTERRUPT register"},
+ {5, "AllocaCause",
+ "MOVSP instruction, if caller’s registers are not in the register file"},
+ {6, "IntegerDivideByZeroCause",
+ "QUOS, QUOU, REMS, or REMU divisor operand is zero"},
+ {8, "PrivilegedCause",
+ "Attempt to execute a privileged operation when CRING ? 0"},
+ {9, "LoadStoreAlignmentCause", "Load or store to an unaligned address"},
+ {12, "InstrPIFDataErrorCause",
+ "PIF data error during instruction fetch"},
+ {13, "LoadStorePIFDataErrorCause",
+ "Synchronous PIF data error during LoadStore access"},
+ {14, "InstrPIFAddrErrorCause",
+ "PIF address error during instruction fetch"},
+ {15, "LoadStorePIFAddrErrorCause",
+ "Synchronous PIF address error during LoadStore access"},
+ {16, "InstTLBMissCause", "Error during Instruction TLB refill"},
+ {17, "InstTLBMultiHitCause",
+ "Multiple instruction TLB entries matched"},
+ {18, "InstFetchPrivilegeCause",
+ "An instruction fetch referenced a virtual address at a ring level less than CRING"},
+ {20, "InstFetchProhibitedCause",
+ "An instruction fetch referenced a page mapped with an attribute that does not permit instruction fetch"},
+ {24, "LoadStoreTLBMissCause",
+ "Error during TLB refill for a load or store"},
+ {25, "LoadStoreTLBMultiHitCause",
+ "Multiple TLB entries matched for a load or store"},
+ {26, "LoadStorePrivilegeCause",
+ "A load or store referenced a virtual address at a ring level less than CRING"},
+ {28, "LoadProhibitedCause",
+ "A load referenced a page mapped with an attribute that does not permit loads"},
+ {32, "Coprocessor0Disabled",
+ "Coprocessor 0 instruction when cp0 disabled"},
+ {33, "Coprocessor1Disabled",
+ "Coprocessor 1 instruction when cp1 disabled"},
+ {34, "Coprocessor2Disabled",
+ "Coprocessor 2 instruction when cp2 disabled"},
+ {35, "Coprocessor3Disabled",
+ "Coprocessor 3 instruction when cp3 disabled"},
+ {36, "Coprocessor4Disabled",
+ "Coprocessor 4 instruction when cp4 disabled"},
+ {37, "Coprocessor5Disabled",
+ "Coprocessor 5 instruction when cp5 disabled"},
+ {38, "Coprocessor6Disabled",
+ "Coprocessor 6 instruction when cp6 disabled"},
+ {39, "Coprocessor7Disabled",
+ "Coprocessor 7 instruction when cp7 disabled"},
+};
+
+/* only need xtensa atm */
+static void xtensa_dsp_oops(struct snd_sof_dev *sdev, void *oops)
+{
+ struct sof_ipc_dsp_oops_xtensa *xoops = oops;
+ int i;
+
+ dev_err(sdev->dev, "error: DSP Firmware Oops\n");
+ for (i = 0; i < ARRAY_SIZE(xtensa_exception_causes); i++) {
+ if (xtensa_exception_causes[i].id == xoops->exccause) {
+ dev_err(sdev->dev, "error: Exception Cause: %s, %s\n",
+ xtensa_exception_causes[i].msg,
+ xtensa_exception_causes[i].description);
+ }
+ }
+ dev_err(sdev->dev, "EXCCAUSE 0x%8.8x EXCVADDR 0x%8.8x PS 0x%8.8x SAR 0x%8.8x\n",
+ xoops->exccause, xoops->excvaddr, xoops->ps, xoops->sar);
+ dev_err(sdev->dev, "EPC1 0x%8.8x EPC2 0x%8.8x EPC3 0x%8.8x EPC4 0x%8.8x",
+ xoops->epc1, xoops->epc2, xoops->epc3, xoops->epc4);
+ dev_err(sdev->dev, "EPC5 0x%8.8x EPC6 0x%8.8x EPC7 0x%8.8x DEPC 0x%8.8x",
+ xoops->epc5, xoops->epc6, xoops->epc7, xoops->depc);
+ dev_err(sdev->dev, "EPS2 0x%8.8x EPS3 0x%8.8x EPS4 0x%8.8x EPS5 0x%8.8x",
+ xoops->eps2, xoops->eps3, xoops->eps4, xoops->eps5);
+ dev_err(sdev->dev, "EPS6 0x%8.8x EPS7 0x%8.8x INTENABL 0x%8.8x INTERRU 0x%8.8x",
+ xoops->eps6, xoops->eps7, xoops->intenable, xoops->interrupt);
+}
+
+static void xtensa_stack(struct snd_sof_dev *sdev, void *oops, u32 *stack,
+ u32 stack_words)
+{
+ struct sof_ipc_dsp_oops_xtensa *xoops = oops;
+ u32 stack_ptr = xoops->plat_hdr.stackptr;
+ /* 4 * 8chars + 3 ws + 1 terminating NUL */
+ unsigned char buf[4 * 8 + 3 + 1];
+ int i;
+
+ dev_err(sdev->dev, "stack dump from 0x%8.8x\n", stack_ptr);
+
+ /*
+ * example output:
+ * 0x0049fbb0: 8000f2d0 0049fc00 6f6c6c61 00632e63
+ */
+ for (i = 0; i < stack_words; i += 4) {
+ hex_dump_to_buffer(stack + i * 4, 16, 16, 4,
+ buf, sizeof(buf), false);
+ dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i, buf);
+ }
+}
+
+const struct sof_arch_ops sof_xtensa_arch_ops = {
+ .dsp_oops = xtensa_dsp_oops,
+ .dsp_stack = xtensa_stack,
+};
+EXPORT_SYMBOL_NS(sof_xtensa_arch_ops, SND_SOC_SOF_XTENSA);
+
+MODULE_DESCRIPTION("SOF Xtensa DSP support");
+MODULE_LICENSE("Dual BSD/GPL");