From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- sound/soc/intel/avs/Makefile | 15 + sound/soc/intel/avs/apl.c | 254 +++++ sound/soc/intel/avs/avs.h | 349 +++++++ sound/soc/intel/avs/board_selection.c | 502 ++++++++++ sound/soc/intel/avs/boards/Kconfig | 121 +++ sound/soc/intel/avs/boards/Makefile | 27 + sound/soc/intel/avs/boards/da7219.c | 303 ++++++ sound/soc/intel/avs/boards/dmic.c | 93 ++ sound/soc/intel/avs/boards/hdaudio.c | 298 ++++++ sound/soc/intel/avs/boards/i2s_test.c | 180 ++++ sound/soc/intel/avs/boards/max98357a.c | 176 ++++ sound/soc/intel/avs/boards/max98373.c | 239 +++++ sound/soc/intel/avs/boards/nau8825.c | 353 +++++++ sound/soc/intel/avs/boards/rt274.c | 310 ++++++ sound/soc/intel/avs/boards/rt286.c | 281 ++++++ sound/soc/intel/avs/boards/rt298.c | 301 ++++++ sound/soc/intel/avs/boards/rt5682.c | 340 +++++++ sound/soc/intel/avs/boards/ssm4567.c | 240 +++++ sound/soc/intel/avs/cldma.c | 316 +++++++ sound/soc/intel/avs/cldma.h | 29 + sound/soc/intel/avs/core.c | 715 ++++++++++++++ sound/soc/intel/avs/dsp.c | 330 +++++++ sound/soc/intel/avs/ipc.c | 628 ++++++++++++ sound/soc/intel/avs/loader.c | 692 ++++++++++++++ sound/soc/intel/avs/messages.c | 734 +++++++++++++++ sound/soc/intel/avs/messages.h | 803 ++++++++++++++++ sound/soc/intel/avs/path.c | 1009 ++++++++++++++++++++ sound/soc/intel/avs/path.h | 72 ++ sound/soc/intel/avs/pcm.c | 1193 +++++++++++++++++++++++ sound/soc/intel/avs/registers.h | 83 ++ sound/soc/intel/avs/skl.c | 125 +++ sound/soc/intel/avs/topology.c | 1625 ++++++++++++++++++++++++++++++++ sound/soc/intel/avs/topology.h | 194 ++++ sound/soc/intel/avs/trace.c | 33 + sound/soc/intel/avs/trace.h | 154 +++ sound/soc/intel/avs/utils.c | 324 +++++++ 36 files changed, 13441 insertions(+) create mode 100644 sound/soc/intel/avs/Makefile create mode 100644 sound/soc/intel/avs/apl.c create mode 100644 sound/soc/intel/avs/avs.h create mode 100644 sound/soc/intel/avs/board_selection.c create mode 100644 sound/soc/intel/avs/boards/Kconfig create mode 100644 sound/soc/intel/avs/boards/Makefile create mode 100644 sound/soc/intel/avs/boards/da7219.c create mode 100644 sound/soc/intel/avs/boards/dmic.c create mode 100644 sound/soc/intel/avs/boards/hdaudio.c create mode 100644 sound/soc/intel/avs/boards/i2s_test.c create mode 100644 sound/soc/intel/avs/boards/max98357a.c create mode 100644 sound/soc/intel/avs/boards/max98373.c create mode 100644 sound/soc/intel/avs/boards/nau8825.c create mode 100644 sound/soc/intel/avs/boards/rt274.c create mode 100644 sound/soc/intel/avs/boards/rt286.c create mode 100644 sound/soc/intel/avs/boards/rt298.c create mode 100644 sound/soc/intel/avs/boards/rt5682.c create mode 100644 sound/soc/intel/avs/boards/ssm4567.c create mode 100644 sound/soc/intel/avs/cldma.c create mode 100644 sound/soc/intel/avs/cldma.h create mode 100644 sound/soc/intel/avs/core.c create mode 100644 sound/soc/intel/avs/dsp.c create mode 100644 sound/soc/intel/avs/ipc.c create mode 100644 sound/soc/intel/avs/loader.c create mode 100644 sound/soc/intel/avs/messages.c create mode 100644 sound/soc/intel/avs/messages.h create mode 100644 sound/soc/intel/avs/path.c create mode 100644 sound/soc/intel/avs/path.h create mode 100644 sound/soc/intel/avs/pcm.c create mode 100644 sound/soc/intel/avs/registers.h create mode 100644 sound/soc/intel/avs/skl.c create mode 100644 sound/soc/intel/avs/topology.c create mode 100644 sound/soc/intel/avs/topology.h create mode 100644 sound/soc/intel/avs/trace.c create mode 100644 sound/soc/intel/avs/trace.h create mode 100644 sound/soc/intel/avs/utils.c (limited to 'sound/soc/intel/avs') diff --git a/sound/soc/intel/avs/Makefile b/sound/soc/intel/avs/Makefile new file mode 100644 index 000000000..919212825 --- /dev/null +++ b/sound/soc/intel/avs/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only + +snd-soc-avs-objs := dsp.o ipc.o messages.o utils.o core.o loader.o \ + topology.o path.o pcm.o board_selection.o +snd-soc-avs-objs += cldma.o +snd-soc-avs-objs += skl.o apl.o + +snd-soc-avs-objs += trace.o +# tell define_trace.h where to find the trace header +CFLAGS_trace.o := -I$(src) + +obj-$(CONFIG_SND_SOC_INTEL_AVS) += snd-soc-avs.o + +# Machine support +obj-$(CONFIG_SND_SOC) += boards/ diff --git a/sound/soc/intel/avs/apl.c b/sound/soc/intel/avs/apl.c new file mode 100644 index 000000000..f366478a8 --- /dev/null +++ b/sound/soc/intel/avs/apl.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include "avs.h" +#include "messages.h" +#include "path.h" +#include "topology.h" + +static int apl_enable_logs(struct avs_dev *adev, enum avs_log_enable enable, u32 aging_period, + u32 fifo_full_period, unsigned long resource_mask, u32 *priorities) +{ + struct apl_log_state_info *info; + u32 size, num_cores = adev->hw_cfg.dsp_cores; + int ret, i; + + if (fls_long(resource_mask) > num_cores) + return -EINVAL; + size = struct_size(info, logs_core, num_cores); + info = kzalloc(size, GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->aging_timer_period = aging_period; + info->fifo_full_timer_period = fifo_full_period; + info->core_mask = resource_mask; + if (enable) + for_each_set_bit(i, &resource_mask, num_cores) { + info->logs_core[i].enable = enable; + info->logs_core[i].min_priority = *priorities++; + } + else + for_each_set_bit(i, &resource_mask, num_cores) + info->logs_core[i].enable = enable; + + ret = avs_ipc_set_enable_logs(adev, (u8 *)info, size); + kfree(info); + if (ret) + return AVS_IPC_RET(ret); + + return 0; +} + +static int apl_log_buffer_status(struct avs_dev *adev, union avs_notify_msg *msg) +{ + struct apl_log_buffer_layout layout; + unsigned long flags; + void __iomem *addr, *buf; + + addr = avs_log_buffer_addr(adev, msg->log.core); + if (!addr) + return -ENXIO; + + memcpy_fromio(&layout, addr, sizeof(layout)); + + spin_lock_irqsave(&adev->dbg.trace_lock, flags); + if (!kfifo_initialized(&adev->dbg.trace_fifo)) + /* consume the logs regardless of consumer presence */ + goto update_read_ptr; + + buf = apl_log_payload_addr(addr); + + if (layout.read_ptr > layout.write_ptr) { + __kfifo_fromio_locked(&adev->dbg.trace_fifo, buf + layout.read_ptr, + apl_log_payload_size(adev) - layout.read_ptr, + &adev->dbg.fifo_lock); + layout.read_ptr = 0; + } + __kfifo_fromio_locked(&adev->dbg.trace_fifo, buf + layout.read_ptr, + layout.write_ptr - layout.read_ptr, &adev->dbg.fifo_lock); + + wake_up(&adev->dbg.trace_waitq); + +update_read_ptr: + spin_unlock_irqrestore(&adev->dbg.trace_lock, flags); + writel(layout.write_ptr, addr); + return 0; +} + +static int apl_wait_log_entry(struct avs_dev *adev, u32 core, struct apl_log_buffer_layout *layout) +{ + unsigned long timeout; + void __iomem *addr; + + addr = avs_log_buffer_addr(adev, core); + if (!addr) + return -ENXIO; + + timeout = jiffies + msecs_to_jiffies(10); + + do { + memcpy_fromio(layout, addr, sizeof(*layout)); + if (layout->read_ptr != layout->write_ptr) + return 0; + usleep_range(500, 1000); + } while (!time_after(jiffies, timeout)); + + return -ETIMEDOUT; +} + +/* reads log header and tests its type */ +#define apl_is_entry_stackdump(addr) ((readl(addr) >> 30) & 0x1) + +static int apl_coredump(struct avs_dev *adev, union avs_notify_msg *msg) +{ + struct apl_log_buffer_layout layout; + void __iomem *addr, *buf; + size_t dump_size; + u16 offset = 0; + u8 *dump, *pos; + + dump_size = AVS_FW_REGS_SIZE + msg->ext.coredump.stack_dump_size; + dump = vzalloc(dump_size); + if (!dump) + return -ENOMEM; + + memcpy_fromio(dump, avs_sram_addr(adev, AVS_FW_REGS_WINDOW), AVS_FW_REGS_SIZE); + + if (!msg->ext.coredump.stack_dump_size) + goto exit; + + /* Dump the registers even if an external error prevents gathering the stack. */ + addr = avs_log_buffer_addr(adev, msg->ext.coredump.core_id); + if (!addr) + goto exit; + + buf = apl_log_payload_addr(addr); + memcpy_fromio(&layout, addr, sizeof(layout)); + if (!apl_is_entry_stackdump(buf + layout.read_ptr)) { + /* + * DSP awaits the remaining logs to be + * gathered before dumping stack + */ + msg->log.core = msg->ext.coredump.core_id; + avs_dsp_op(adev, log_buffer_status, msg); + } + + pos = dump + AVS_FW_REGS_SIZE; + /* gather the stack */ + do { + u32 count; + + if (apl_wait_log_entry(adev, msg->ext.coredump.core_id, &layout)) + break; + + if (layout.read_ptr > layout.write_ptr) { + count = apl_log_payload_size(adev) - layout.read_ptr; + memcpy_fromio(pos + offset, buf + layout.read_ptr, count); + layout.read_ptr = 0; + offset += count; + } + count = layout.write_ptr - layout.read_ptr; + memcpy_fromio(pos + offset, buf + layout.read_ptr, count); + offset += count; + + /* update read pointer */ + writel(layout.write_ptr, addr); + } while (offset < msg->ext.coredump.stack_dump_size); + +exit: + dev_coredumpv(adev->dev, dump, dump_size, GFP_KERNEL); + + return 0; +} + +static bool apl_lp_streaming(struct avs_dev *adev) +{ + struct avs_path *path; + + spin_lock(&adev->path_list_lock); + /* Any gateway without buffer allocated in LP area disqualifies D0IX. */ + list_for_each_entry(path, &adev->path_list, node) { + struct avs_path_pipeline *ppl; + + list_for_each_entry(ppl, &path->ppl_list, node) { + struct avs_path_module *mod; + + list_for_each_entry(mod, &ppl->mod_list, node) { + struct avs_tplg_modcfg_ext *cfg; + + cfg = mod->template->cfg_ext; + + /* only copiers have gateway attributes */ + if (!guid_equal(&cfg->type, &AVS_COPIER_MOD_UUID)) + continue; + /* non-gateway copiers do not prevent PG */ + if (cfg->copier.dma_type == INVALID_OBJECT_ID) + continue; + + if (!mod->gtw_attrs.lp_buffer_alloc) { + spin_unlock(&adev->path_list_lock); + return false; + } + } + } + } + spin_unlock(&adev->path_list_lock); + + return true; +} + +static bool apl_d0ix_toggle(struct avs_dev *adev, struct avs_ipc_msg *tx, bool wake) +{ + /* wake in all cases */ + if (wake) + return true; + + /* + * If no pipelines are running, allow for d0ix schedule. + * If all gateways have lp=1, allow for d0ix schedule. + * If any gateway with lp=0 is allocated, abort scheduling d0ix. + * + * Note: for cAVS 1.5+ and 1.8, D0IX is LP-firmware transition, + * not the power-gating mechanism known from cAVS 2.0. + */ + return apl_lp_streaming(adev); +} + +static int apl_set_d0ix(struct avs_dev *adev, bool enable) +{ + bool streaming = false; + int ret; + + if (enable) + /* Either idle or all gateways with lp=1. */ + streaming = !list_empty(&adev->path_list); + + ret = avs_ipc_set_d0ix(adev, enable, streaming); + return AVS_IPC_RET(ret); +} + +const struct avs_dsp_ops apl_dsp_ops = { + .power = avs_dsp_core_power, + .reset = avs_dsp_core_reset, + .stall = avs_dsp_core_stall, + .irq_handler = avs_dsp_irq_handler, + .irq_thread = avs_dsp_irq_thread, + .int_control = avs_dsp_interrupt_control, + .load_basefw = avs_hda_load_basefw, + .load_lib = avs_hda_load_library, + .transfer_mods = avs_hda_transfer_modules, + .enable_logs = apl_enable_logs, + .log_buffer_offset = skl_log_buffer_offset, + .log_buffer_status = apl_log_buffer_status, + .coredump = apl_coredump, + .d0ix_toggle = apl_d0ix_toggle, + .set_d0ix = apl_set_d0ix, +}; diff --git a/sound/soc/intel/avs/avs.h b/sound/soc/intel/avs/avs.h new file mode 100644 index 000000000..92e37722d --- /dev/null +++ b/sound/soc/intel/avs/avs.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2021-2022 Intel Corporation. All rights reserved. + * + * Authors: Cezary Rojewski + * Amadeusz Slawinski + */ + +#ifndef __SOUND_SOC_INTEL_AVS_H +#define __SOUND_SOC_INTEL_AVS_H + +#include +#include +#include +#include +#include +#include +#include "messages.h" +#include "registers.h" + +struct avs_dev; +struct avs_tplg; +struct avs_tplg_library; +struct avs_soc_component; +struct avs_ipc_msg; + +/* + * struct avs_dsp_ops - Platform-specific DSP operations + * + * @power: Power on or off DSP cores + * @reset: Enter or exit reset state on DSP cores + * @stall: Stall or run DSP cores + * @irq_handler: Top half of IPC servicing + * @irq_thread: Bottom half of IPC servicing + * @int_control: Enable or disable IPC interrupts + */ +struct avs_dsp_ops { + int (* const power)(struct avs_dev *, u32, bool); + int (* const reset)(struct avs_dev *, u32, bool); + int (* const stall)(struct avs_dev *, u32, bool); + irqreturn_t (* const irq_handler)(int, void *); + irqreturn_t (* const irq_thread)(int, void *); + void (* const int_control)(struct avs_dev *, bool); + int (* const load_basefw)(struct avs_dev *, struct firmware *); + int (* const load_lib)(struct avs_dev *, struct firmware *, u32); + int (* const transfer_mods)(struct avs_dev *, bool, struct avs_module_entry *, u32); + int (* const enable_logs)(struct avs_dev *, enum avs_log_enable, u32, u32, unsigned long, + u32 *); + int (* const log_buffer_offset)(struct avs_dev *, u32); + int (* const log_buffer_status)(struct avs_dev *, union avs_notify_msg *); + int (* const coredump)(struct avs_dev *, union avs_notify_msg *); + bool (* const d0ix_toggle)(struct avs_dev *, struct avs_ipc_msg *, bool); + int (* const set_d0ix)(struct avs_dev *, bool); +}; + +#define avs_dsp_op(adev, op, ...) \ + ((adev)->spec->dsp_ops->op(adev, ## __VA_ARGS__)) + +extern const struct avs_dsp_ops skl_dsp_ops; +extern const struct avs_dsp_ops apl_dsp_ops; + +#define AVS_PLATATTR_CLDMA BIT_ULL(0) +#define AVS_PLATATTR_IMR BIT_ULL(1) + +#define avs_platattr_test(adev, attr) \ + ((adev)->spec->attributes & AVS_PLATATTR_##attr) + +/* Platform specific descriptor */ +struct avs_spec { + const char *name; + + const struct avs_dsp_ops *const dsp_ops; + struct avs_fw_version min_fw_version; /* anything below is rejected */ + + const u32 core_init_mask; /* used during DSP boot */ + const u64 attributes; /* bitmask of AVS_PLATATTR_* */ + const u32 sram_base_offset; + const u32 sram_window_size; + const u32 rom_status; +}; + +struct avs_fw_entry { + char *name; + const struct firmware *fw; + + struct list_head node; +}; + +struct avs_debug { + struct kfifo trace_fifo; + spinlock_t fifo_lock; /* serialize I/O for trace_fifo */ + spinlock_t trace_lock; /* serialize debug window I/O between each LOG_BUFFER_STATUS */ + wait_queue_head_t trace_waitq; + u32 aging_timer_period; + u32 fifo_full_timer_period; + u32 logged_resources; /* context dependent: core or library */ +}; + +/* + * struct avs_dev - Intel HD-Audio driver data + * + * @dev: PCI device + * @dsp_ba: DSP bar address + * @spec: platform-specific descriptor + * @fw_cfg: Firmware configuration, obtained through FW_CONFIG message + * @hw_cfg: Hardware configuration, obtained through HW_CONFIG message + * @mods_info: Available module-types, obtained through MODULES_INFO message + * @mod_idas: Module instance ID pool, one per module-type + * @modres_mutex: For synchronizing any @mods_info updates + * @ppl_ida: Pipeline instance ID pool + * @fw_list: List of libraries loaded, including base firmware + */ +struct avs_dev { + struct hda_bus base; + struct device *dev; + + void __iomem *dsp_ba; + const struct avs_spec *spec; + struct avs_ipc *ipc; + + struct avs_fw_cfg fw_cfg; + struct avs_hw_cfg hw_cfg; + struct avs_mods_info *mods_info; + struct ida **mod_idas; + struct mutex modres_mutex; + struct ida ppl_ida; + struct list_head fw_list; + int *core_refs; /* reference count per core */ + char **lib_names; + + struct completion fw_ready; + struct work_struct probe_work; + + struct nhlt_acpi_table *nhlt; + struct list_head comp_list; + struct mutex comp_list_mutex; + struct list_head path_list; + spinlock_t path_list_lock; + struct mutex path_mutex; + + struct avs_debug dbg; +}; + +/* from hda_bus to avs_dev */ +#define hda_to_avs(hda) container_of(hda, struct avs_dev, base) +/* from hdac_bus to avs_dev */ +#define hdac_to_avs(hdac) hda_to_avs(to_hda_bus(hdac)) +/* from device to avs_dev */ +#define to_avs_dev(dev) \ +({ \ + struct hdac_bus *__bus = dev_get_drvdata(dev); \ + hdac_to_avs(__bus); \ +}) + +int avs_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power); +int avs_dsp_core_reset(struct avs_dev *adev, u32 core_mask, bool reset); +int avs_dsp_core_stall(struct avs_dev *adev, u32 core_mask, bool stall); +int avs_dsp_core_enable(struct avs_dev *adev, u32 core_mask); +int avs_dsp_core_disable(struct avs_dev *adev, u32 core_mask); + +/* Inter Process Communication */ + +struct avs_ipc_msg { + union { + u64 header; + union avs_global_msg glb; + union avs_reply_msg rsp; + }; + void *data; + size_t size; +}; + +/* + * struct avs_ipc - DSP IPC context + * + * @dev: PCI device + * @rx: Reply message cache + * @default_timeout_ms: default message timeout in MS + * @ready: whether firmware is ready and communication is open + * @rx_completed: whether RX for previously sent TX has been received + * @rx_lock: for serializing manipulation of rx_* fields + * @msg_lock: for synchronizing request handling + * @done_completion: DONE-part of IPC i.e. ROM and ACKs from FW + * @busy_completion: BUSY-part of IPC i.e. receiving responses from FW + */ +struct avs_ipc { + struct device *dev; + + struct avs_ipc_msg rx; + u32 default_timeout_ms; + bool ready; + atomic_t recovering; + + bool rx_completed; + spinlock_t rx_lock; + struct mutex msg_mutex; + struct completion done_completion; + struct completion busy_completion; + + struct work_struct recovery_work; + struct delayed_work d0ix_work; + atomic_t d0ix_disable_depth; + bool in_d0ix; +}; + +#define AVS_EIPC EREMOTEIO +/* + * IPC handlers may return positive value (firmware error code) what denotes + * successful HOST <-> DSP communication yet failure to process specific request. + * + * Below macro converts returned value to linux kernel error code. + * All IPC callers MUST use it as soon as firmware error code is consumed. + */ +#define AVS_IPC_RET(ret) \ + (((ret) <= 0) ? (ret) : -AVS_EIPC) + +static inline void avs_ipc_err(struct avs_dev *adev, struct avs_ipc_msg *tx, + const char *name, int error) +{ + /* + * If IPC channel is blocked e.g.: due to ongoing recovery, + * -EPERM error code is expected and thus it's not an actual error. + */ + if (error == -EPERM) + dev_dbg(adev->dev, "%s 0x%08x 0x%08x failed: %d\n", name, + tx->glb.primary, tx->glb.ext.val, error); + else + dev_err(adev->dev, "%s 0x%08x 0x%08x failed: %d\n", name, + tx->glb.primary, tx->glb.ext.val, error); +} + +irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id); +irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id); +void avs_dsp_process_response(struct avs_dev *adev, u64 header); +int avs_dsp_send_msg_timeout(struct avs_dev *adev, + struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, int timeout); +int avs_dsp_send_msg(struct avs_dev *adev, + struct avs_ipc_msg *request, struct avs_ipc_msg *reply); +/* Two variants below are for messages that control DSP power states. */ +int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, int timeout, bool wake_d0i0); +int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, bool wake_d0i0); +int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev, + struct avs_ipc_msg *request, int timeout); +int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request); +void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable); +int avs_ipc_init(struct avs_ipc *ipc, struct device *dev); +void avs_ipc_block(struct avs_ipc *ipc); + +int avs_dsp_disable_d0ix(struct avs_dev *adev); +int avs_dsp_enable_d0ix(struct avs_dev *adev); + +int skl_log_buffer_offset(struct avs_dev *adev, u32 core); + +/* Firmware resources management */ + +int avs_get_module_entry(struct avs_dev *adev, const guid_t *uuid, struct avs_module_entry *entry); +int avs_get_module_id_entry(struct avs_dev *adev, u32 module_id, struct avs_module_entry *entry); +int avs_get_module_id(struct avs_dev *adev, const guid_t *uuid); +bool avs_is_module_ida_empty(struct avs_dev *adev, u32 module_id); + +int avs_module_info_init(struct avs_dev *adev, bool purge); +void avs_module_info_free(struct avs_dev *adev); +int avs_module_id_alloc(struct avs_dev *adev, u16 module_id); +void avs_module_id_free(struct avs_dev *adev, u16 module_id, u8 instance_id); +int avs_request_firmware(struct avs_dev *adev, const struct firmware **fw_p, const char *name); +void avs_release_last_firmware(struct avs_dev *adev); +void avs_release_firmwares(struct avs_dev *adev); + +int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id, + u8 core_id, u8 domain, void *param, u32 param_size, + u16 *instance_id); +void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u16 instance_id, + u8 ppl_instance_id, u8 core_id); +int avs_dsp_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority, + bool lp, u16 attributes, u8 *instance_id); +int avs_dsp_delete_pipeline(struct avs_dev *adev, u8 instance_id); + +/* Firmware loading */ + +void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable); +void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable); +void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable); + +int avs_dsp_load_libraries(struct avs_dev *adev, struct avs_tplg_library *libs, u32 num_libs); +int avs_dsp_boot_firmware(struct avs_dev *adev, bool purge); +int avs_dsp_first_boot_firmware(struct avs_dev *adev); + +int avs_cldma_load_basefw(struct avs_dev *adev, struct firmware *fw); +int avs_cldma_load_library(struct avs_dev *adev, struct firmware *lib, u32 id); +int avs_cldma_transfer_modules(struct avs_dev *adev, bool load, + struct avs_module_entry *mods, u32 num_mods); +int avs_hda_load_basefw(struct avs_dev *adev, struct firmware *fw); +int avs_hda_load_library(struct avs_dev *adev, struct firmware *lib, u32 id); +int avs_hda_transfer_modules(struct avs_dev *adev, bool load, + struct avs_module_entry *mods, u32 num_mods); + +/* Soc component members */ + +struct avs_soc_component { + struct snd_soc_component base; + struct avs_tplg *tplg; + + struct list_head node; +}; + +#define to_avs_soc_component(comp) \ + container_of(comp, struct avs_soc_component, base) + +extern const struct snd_soc_dai_ops avs_dai_fe_ops; + +int avs_dmic_platform_register(struct avs_dev *adev, const char *name); +int avs_i2s_platform_register(struct avs_dev *adev, const char *name, unsigned long port_mask, + unsigned long *tdms); +int avs_hda_platform_register(struct avs_dev *adev, const char *name); + +int avs_register_all_boards(struct avs_dev *adev); +void avs_unregister_all_boards(struct avs_dev *adev); + +/* Firmware tracing helpers */ + +unsigned int __kfifo_fromio_locked(struct kfifo *fifo, const void __iomem *src, unsigned int len, + spinlock_t *lock); + +#define avs_log_buffer_size(adev) \ + ((adev)->fw_cfg.trace_log_bytes / (adev)->hw_cfg.dsp_cores) + +#define avs_log_buffer_addr(adev, core) \ +({ \ + s32 __offset = avs_dsp_op(adev, log_buffer_offset, core); \ + (__offset < 0) ? NULL : \ + (avs_sram_addr(adev, AVS_DEBUG_WINDOW) + __offset); \ +}) + +struct apl_log_buffer_layout { + u32 read_ptr; + u32 write_ptr; + u8 buffer[]; +} __packed; + +#define apl_log_payload_size(adev) \ + (avs_log_buffer_size(adev) - sizeof(struct apl_log_buffer_layout)) + +#define apl_log_payload_addr(addr) \ + (addr + sizeof(struct apl_log_buffer_layout)) + +#endif /* __SOUND_SOC_INTEL_AVS_H */ diff --git a/sound/soc/intel/avs/board_selection.c b/sound/soc/intel/avs/board_selection.c new file mode 100644 index 000000000..87353b4b0 --- /dev/null +++ b/sound/soc/intel/avs/board_selection.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "avs.h" + +static bool i2s_test; +module_param(i2s_test, bool, 0444); +MODULE_PARM_DESC(i2s_test, "Probe I2S test-board and skip all other I2S boards"); + +static const struct dmi_system_id kbl_dmi_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Skylake Y LPDDR3 RVP3"), + }, + }, + {} +}; + +static const struct dmi_system_id kblr_dmi_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Kabylake R DDR4 RVP"), + }, + }, + {} +}; + +static struct snd_soc_acpi_mach *dmi_match_quirk(void *arg) +{ + struct snd_soc_acpi_mach *mach = arg; + const struct dmi_system_id *dmi_id; + struct dmi_system_id *dmi_table; + + if (mach->quirk_data == NULL) + return mach; + + dmi_table = (struct dmi_system_id *)mach->quirk_data; + + dmi_id = dmi_first_match(dmi_table); + if (!dmi_id) + return NULL; + + return mach; +} + +#define AVS_SSP(x) (BIT(x)) +#define AVS_SSP_RANGE(a, b) (GENMASK(b, a)) + +/* supported I2S board codec configurations */ +static struct snd_soc_acpi_mach avs_skl_i2s_machines[] = { + { + .id = "INT343A", + .drv_name = "avs_rt286", + .mach_params = { + .i2s_link_mask = AVS_SSP(0), + }, + .tplg_filename = "rt286-tplg.bin", + }, + { + .id = "10508825", + .drv_name = "avs_nau8825", + .mach_params = { + .i2s_link_mask = AVS_SSP(1), + }, + .tplg_filename = "nau8825-tplg.bin", + }, + { + .id = "INT343B", + .drv_name = "avs_ssm4567", + .mach_params = { + .i2s_link_mask = AVS_SSP(0), + }, + .tplg_filename = "ssm4567-tplg.bin", + }, + { + .id = "MX98357A", + .drv_name = "avs_max98357a", + .mach_params = { + .i2s_link_mask = AVS_SSP(0), + }, + .tplg_filename = "max98357a-tplg.bin", + }, + {}, +}; + +static struct snd_soc_acpi_mach avs_kbl_i2s_machines[] = { + { + .id = "INT343A", + .drv_name = "avs_rt286", + .mach_params = { + .i2s_link_mask = AVS_SSP(0), + }, + .quirk_data = &kbl_dmi_table, + .machine_quirk = dmi_match_quirk, + .tplg_filename = "rt286-tplg.bin", + }, + { + .id = "INT343A", + .drv_name = "avs_rt298", + .mach_params = { + .i2s_link_mask = AVS_SSP(0), + }, + .quirk_data = &kblr_dmi_table, + .machine_quirk = dmi_match_quirk, + .tplg_filename = "rt298-tplg.bin", + }, + { + .id = "MX98373", + .drv_name = "avs_max98373", + .mach_params = { + .i2s_link_mask = AVS_SSP(0), + }, + .tplg_filename = "max98373-tplg.bin", + }, + { + .id = "DLGS7219", + .drv_name = "avs_da7219", + .mach_params = { + .i2s_link_mask = AVS_SSP(1), + }, + .tplg_filename = "da7219-tplg.bin", + }, + {}, +}; + +static struct snd_soc_acpi_mach avs_apl_i2s_machines[] = { + { + .id = "INT343A", + .drv_name = "avs_rt298", + .mach_params = { + .i2s_link_mask = AVS_SSP(5), + }, + .tplg_filename = "rt298-tplg.bin", + }, + { + .id = "INT34C3", + .drv_name = "avs_tdf8532", + .mach_params = { + .i2s_link_mask = AVS_SSP_RANGE(0, 5), + }, + .pdata = (unsigned long[]){ 0, 0, 0x14, 0, 0, 0 }, /* SSP2 TDMs */ + .tplg_filename = "tdf8532-tplg.bin", + }, + { + .id = "MX98357A", + .drv_name = "avs_max98357a", + .mach_params = { + .i2s_link_mask = AVS_SSP(5), + }, + .tplg_filename = "max98357a-tplg.bin", + }, + { + .id = "DLGS7219", + .drv_name = "avs_da7219", + .mach_params = { + .i2s_link_mask = AVS_SSP(1), + }, + .tplg_filename = "da7219-tplg.bin", + }, + {}, +}; + +static struct snd_soc_acpi_mach avs_gml_i2s_machines[] = { + { + .id = "INT343A", + .drv_name = "avs_rt298", + .mach_params = { + .i2s_link_mask = AVS_SSP(2), + }, + .tplg_filename = "rt298-tplg.bin", + }, + {}, +}; + +static struct snd_soc_acpi_mach avs_test_i2s_machines[] = { + { + .drv_name = "avs_i2s_test", + .mach_params = { + .i2s_link_mask = AVS_SSP(0), + }, + .tplg_filename = "i2s-test-tplg.bin", + }, + { + .drv_name = "avs_i2s_test", + .mach_params = { + .i2s_link_mask = AVS_SSP(1), + }, + .tplg_filename = "i2s-test-tplg.bin", + }, + { + .drv_name = "avs_i2s_test", + .mach_params = { + .i2s_link_mask = AVS_SSP(2), + }, + .tplg_filename = "i2s-test-tplg.bin", + }, + { + .drv_name = "avs_i2s_test", + .mach_params = { + .i2s_link_mask = AVS_SSP(3), + }, + .tplg_filename = "i2s-test-tplg.bin", + }, + { + .drv_name = "avs_i2s_test", + .mach_params = { + .i2s_link_mask = AVS_SSP(4), + }, + .tplg_filename = "i2s-test-tplg.bin", + }, + { + .drv_name = "avs_i2s_test", + .mach_params = { + .i2s_link_mask = AVS_SSP(5), + }, + .tplg_filename = "i2s-test-tplg.bin", + }, + /* no NULL terminator, as we depend on ARRAY SIZE due to .id == NULL */ +}; + +struct avs_acpi_boards { + int id; + struct snd_soc_acpi_mach *machs; +}; + +#define AVS_MACH_ENTRY(_id, _mach) \ + { .id = (_id), .machs = (_mach), } + +/* supported I2S boards per platform */ +static const struct avs_acpi_boards i2s_boards[] = { + AVS_MACH_ENTRY(0x9d70, avs_skl_i2s_machines), /* SKL */ + AVS_MACH_ENTRY(0x9d71, avs_kbl_i2s_machines), /* KBL */ + AVS_MACH_ENTRY(0x5a98, avs_apl_i2s_machines), /* APL */ + AVS_MACH_ENTRY(0x3198, avs_gml_i2s_machines), /* GML */ + {}, +}; + +static const struct avs_acpi_boards *avs_get_i2s_boards(struct avs_dev *adev) +{ + int id, i; + + id = adev->base.pci->device; + for (i = 0; i < ARRAY_SIZE(i2s_boards); i++) + if (i2s_boards[i].id == id) + return &i2s_boards[i]; + return NULL; +} + +/* platform devices owned by AVS audio are removed with this hook */ +static void board_pdev_unregister(void *data) +{ + platform_device_unregister(data); +} + +static int avs_register_dmic_board(struct avs_dev *adev) +{ + struct platform_device *codec, *board; + struct snd_soc_acpi_mach mach = {{0}}; + int ret; + + if (!adev->nhlt || + !intel_nhlt_has_endpoint_type(adev->nhlt, NHLT_LINK_DMIC)) { + dev_dbg(adev->dev, "no DMIC endpoints present\n"); + return 0; + } + + codec = platform_device_register_simple("dmic-codec", PLATFORM_DEVID_NONE, NULL, 0); + if (IS_ERR(codec)) { + dev_err(adev->dev, "dmic codec register failed\n"); + return PTR_ERR(codec); + } + + ret = devm_add_action(adev->dev, board_pdev_unregister, codec); + if (ret < 0) { + platform_device_unregister(codec); + return ret; + } + + ret = avs_dmic_platform_register(adev, "dmic-platform"); + if (ret < 0) + return ret; + + mach.tplg_filename = "dmic-tplg.bin"; + mach.mach_params.platform = "dmic-platform"; + + board = platform_device_register_data(NULL, "avs_dmic", PLATFORM_DEVID_NONE, + (const void *)&mach, sizeof(mach)); + if (IS_ERR(board)) { + dev_err(adev->dev, "dmic board register failed\n"); + return PTR_ERR(board); + } + + ret = devm_add_action(adev->dev, board_pdev_unregister, board); + if (ret < 0) { + platform_device_unregister(board); + return ret; + } + + return 0; +} + +static int avs_register_i2s_board(struct avs_dev *adev, struct snd_soc_acpi_mach *mach) +{ + struct platform_device *board; + int num_ssps; + char *name; + int ret; + + num_ssps = adev->hw_cfg.i2s_caps.ctrl_count; + if (fls(mach->mach_params.i2s_link_mask) > num_ssps) { + dev_err(adev->dev, "Platform supports %d SSPs but board %s requires SSP%ld\n", + num_ssps, mach->drv_name, + (unsigned long)__fls(mach->mach_params.i2s_link_mask)); + return -ENODEV; + } + + name = devm_kasprintf(adev->dev, GFP_KERNEL, "%s.%d-platform", mach->drv_name, + mach->mach_params.i2s_link_mask); + if (!name) + return -ENOMEM; + + ret = avs_i2s_platform_register(adev, name, mach->mach_params.i2s_link_mask, mach->pdata); + if (ret < 0) + return ret; + + mach->mach_params.platform = name; + + board = platform_device_register_data(NULL, mach->drv_name, mach->mach_params.i2s_link_mask, + (const void *)mach, sizeof(*mach)); + if (IS_ERR(board)) { + dev_err(adev->dev, "ssp board register failed\n"); + return PTR_ERR(board); + } + + ret = devm_add_action(adev->dev, board_pdev_unregister, board); + if (ret < 0) { + platform_device_unregister(board); + return ret; + } + + return 0; +} + +static int avs_register_i2s_boards(struct avs_dev *adev) +{ + const struct avs_acpi_boards *boards; + struct snd_soc_acpi_mach *mach; + int ret; + + if (!adev->nhlt || !intel_nhlt_has_endpoint_type(adev->nhlt, NHLT_LINK_SSP)) { + dev_dbg(adev->dev, "no I2S endpoints present\n"); + return 0; + } + + if (i2s_test) { + int i, num_ssps; + + num_ssps = adev->hw_cfg.i2s_caps.ctrl_count; + /* constrain just in case FW says there can be more SSPs than possible */ + num_ssps = min_t(int, ARRAY_SIZE(avs_test_i2s_machines), num_ssps); + + mach = avs_test_i2s_machines; + + for (i = 0; i < num_ssps; i++) { + ret = avs_register_i2s_board(adev, &mach[i]); + if (ret < 0) + dev_warn(adev->dev, "register i2s %s failed: %d\n", mach->drv_name, + ret); + } + return 0; + } + + boards = avs_get_i2s_boards(adev); + if (!boards) { + dev_dbg(adev->dev, "no I2S endpoints supported\n"); + return 0; + } + + for (mach = boards->machs; mach->id[0]; mach++) { + if (!acpi_dev_present(mach->id, mach->uid, -1)) + continue; + + if (mach->machine_quirk) + if (!mach->machine_quirk(mach)) + continue; + + ret = avs_register_i2s_board(adev, mach); + if (ret < 0) + dev_warn(adev->dev, "register i2s %s failed: %d\n", mach->drv_name, ret); + } + + return 0; +} + +static int avs_register_hda_board(struct avs_dev *adev, struct hda_codec *codec) +{ + struct snd_soc_acpi_mach mach = {{0}}; + struct platform_device *board; + struct hdac_device *hdev = &codec->core; + char *pname; + int ret, id; + + pname = devm_kasprintf(adev->dev, GFP_KERNEL, "%s-platform", dev_name(&hdev->dev)); + if (!pname) + return -ENOMEM; + + ret = avs_hda_platform_register(adev, pname); + if (ret < 0) + return ret; + + mach.pdata = codec; + mach.mach_params.platform = pname; + mach.tplg_filename = devm_kasprintf(adev->dev, GFP_KERNEL, "hda-%08x-tplg.bin", + hdev->vendor_id); + if (!mach.tplg_filename) + return -ENOMEM; + + id = adev->base.core.idx * HDA_MAX_CODECS + hdev->addr; + board = platform_device_register_data(NULL, "avs_hdaudio", id, (const void *)&mach, + sizeof(mach)); + if (IS_ERR(board)) { + dev_err(adev->dev, "hda board register failed\n"); + return PTR_ERR(board); + } + + ret = devm_add_action(adev->dev, board_pdev_unregister, board); + if (ret < 0) { + platform_device_unregister(board); + return ret; + } + + return 0; +} + +static int avs_register_hda_boards(struct avs_dev *adev) +{ + struct hdac_bus *bus = &adev->base.core; + struct hdac_device *hdev; + int ret; + + if (!bus->num_codecs) { + dev_dbg(adev->dev, "no HDA endpoints present\n"); + return 0; + } + + list_for_each_entry(hdev, &bus->codec_list, list) { + struct hda_codec *codec; + + codec = dev_to_hda_codec(&hdev->dev); + + ret = avs_register_hda_board(adev, codec); + if (ret < 0) + dev_warn(adev->dev, "register hda-%08x failed: %d\n", + codec->core.vendor_id, ret); + } + + return 0; +} + +int avs_register_all_boards(struct avs_dev *adev) +{ + int ret; + + ret = avs_register_dmic_board(adev); + if (ret < 0) + dev_warn(adev->dev, "enumerate DMIC endpoints failed: %d\n", + ret); + + ret = avs_register_i2s_boards(adev); + if (ret < 0) + dev_warn(adev->dev, "enumerate I2S endpoints failed: %d\n", + ret); + + ret = avs_register_hda_boards(adev); + if (ret < 0) + dev_warn(adev->dev, "enumerate HDA endpoints failed: %d\n", + ret); + + return 0; +} + +void avs_unregister_all_boards(struct avs_dev *adev) +{ + snd_soc_unregister_component(adev->dev); +} diff --git a/sound/soc/intel/avs/boards/Kconfig b/sound/soc/intel/avs/boards/Kconfig new file mode 100644 index 000000000..4d68e3ef9 --- /dev/null +++ b/sound/soc/intel/avs/boards/Kconfig @@ -0,0 +1,121 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "Intel AVS Machine drivers" + depends on SND_SOC_INTEL_AVS + +comment "Available DSP configurations" + +config SND_SOC_INTEL_AVS_MACH_DA7219 + tristate "da7219 I2S board" + depends on I2C + depends on MFD_INTEL_LPSS || COMPILE_TEST + select SND_SOC_DA7219 + help + This adds support for AVS with DA7219 I2S codec configuration. + Say Y or m if you have such a device. This is a recommended option. + If unsure select "N". + +config SND_SOC_INTEL_AVS_MACH_DMIC + tristate "DMIC generic board" + select SND_SOC_DMIC + help + This adds support for AVS with Digital Mic array configuration. + Say Y or m if you have such a device. This is a recommended option. + If unsure select "N". + +config SND_SOC_INTEL_AVS_MACH_HDAUDIO + tristate "HD-Audio generic board" + select SND_SOC_HDA + help + This adds support for AVS with HDAudio codec configuration. + Say Y or m if you have such a device. This is a recommended option. + If unsure select "N". + +config SND_SOC_INTEL_AVS_MACH_I2S_TEST + tristate "I2S test board" + help + This adds support for I2S test-board which can be used to verify + transfer over I2S interface with SSP loopback scenarios. + +config SND_SOC_INTEL_AVS_MACH_MAX98357A + tristate "max98357A I2S board" + depends on I2C + depends on MFD_INTEL_LPSS || COMPILE_TEST + select SND_SOC_MAX98357A + help + This adds support for AVS with MAX98357A I2S codec configuration. + Say Y or m if you have such a device. This is a recommended option. + If unsure select "N". + +config SND_SOC_INTEL_AVS_MACH_MAX98373 + tristate "max98373 I2S board" + depends on I2C + depends on MFD_INTEL_LPSS || COMPILE_TEST + select SND_SOC_MAX98373 + help + This adds support for AVS with MAX98373 I2S codec configuration. + Say Y or m if you have such a device. This is a recommended option. + If unsure select "N". + +config SND_SOC_INTEL_AVS_MACH_NAU8825 + tristate "nau8825 I2S board" + depends on I2C + depends on MFD_INTEL_LPSS || COMPILE_TEST + select SND_SOC_NAU8825 + help + This adds support for ASoC machine driver with NAU8825 I2S audio codec. + It is meant to be used with AVS driver. + Say Y or m if you have such a device. This is a recommended option. + If unsure select "N". + +config SND_SOC_INTEL_AVS_MACH_RT274 + tristate "rt274 in I2S mode" + depends on I2C + depends on MFD_INTEL_LPSS || COMPILE_TEST + select SND_SOC_RT274 + help + This adds support for ASoC machine driver with RT274 I2S audio codec. + Say Y or m if you have such a device. This is a recommended option. + If unsure select "N". + +config SND_SOC_INTEL_AVS_MACH_RT286 + tristate "rt286 in I2S mode" + depends on I2C + depends on MFD_INTEL_LPSS || COMPILE_TEST + select SND_SOC_RT286 + help + This adds support for ASoC machine driver with RT286 I2S audio codec. + Say Y or m if you have such a device. This is a recommended option. + If unsure select "N". + +config SND_SOC_INTEL_AVS_MACH_RT298 + tristate "rt298 in I2S mode" + depends on I2C + depends on MFD_INTEL_LPSS || COMPILE_TEST + select SND_SOC_RT298 + help + This adds support for ASoC machine driver with RT298 I2S audio codec. + Say Y or m if you have such a device. This is a recommended option. + If unsure select "N". + +config SND_SOC_INTEL_AVS_MACH_RT5682 + tristate "rt5682 in I2S mode" + depends on I2C + depends on MFD_INTEL_LPSS || COMPILE_TEST + select SND_SOC_RT5682_I2C + help + This adds support for ASoC machine driver with RT5682 I2S audio codec. + Say Y or m if you have such a device. This is a recommended option. + If unsure select "N". + +config SND_SOC_INTEL_AVS_MACH_SSM4567 + tristate "ssm4567 I2S board" + depends on I2C + depends on MFD_INTEL_LPSS || COMPILE_TEST + select SND_SOC_SSM4567 + help + This adds support for ASoC machine driver with SSM4567 I2S audio codec. + It is meant to be used with AVS driver. + Say Y or m if you have such a device. This is a recommended option. + If unsure select "N". + +endmenu diff --git a/sound/soc/intel/avs/boards/Makefile b/sound/soc/intel/avs/boards/Makefile new file mode 100644 index 000000000..bc75376d5 --- /dev/null +++ b/sound/soc/intel/avs/boards/Makefile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0-only + +snd-soc-avs-da7219-objs := da7219.o +snd-soc-avs-dmic-objs := dmic.o +snd-soc-avs-hdaudio-objs := hdaudio.o +snd-soc-avs-i2s-test-objs := i2s_test.o +snd-soc-avs-max98357a-objs := max98357a.o +snd-soc-avs-max98373-objs := max98373.o +snd-soc-avs-nau8825-objs := nau8825.o +snd-soc-avs-rt274-objs := rt274.o +snd-soc-avs-rt286-objs := rt286.o +snd-soc-avs-rt298-objs := rt298.o +snd-soc-avs-rt5682-objs := rt5682.o +snd-soc-avs-ssm4567-objs := ssm4567.o + +obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_DA7219) += snd-soc-avs-da7219.o +obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_DMIC) += snd-soc-avs-dmic.o +obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_HDAUDIO) += snd-soc-avs-hdaudio.o +obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_I2S_TEST) += snd-soc-avs-i2s-test.o +obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98357A) += snd-soc-avs-max98357a.o +obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98373) += snd-soc-avs-max98373.o +obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_NAU8825) += snd-soc-avs-nau8825.o +obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_RT274) += snd-soc-avs-rt274.o +obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_RT286) += snd-soc-avs-rt286.o +obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_RT298) += snd-soc-avs-rt298.o +obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_RT5682) += snd-soc-avs-rt5682.o +obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_SSM4567) += snd-soc-avs-ssm4567.o diff --git a/sound/soc/intel/avs/boards/da7219.c b/sound/soc/intel/avs/boards/da7219.c new file mode 100644 index 000000000..a63563594 --- /dev/null +++ b/sound/soc/intel/avs/boards/da7219.c @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Author: Cezary Rojewski +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../../codecs/da7219.h" +#include "../../../codecs/da7219-aad.h" + +#define DA7219_DAI_NAME "da7219-hifi" + +static const struct snd_kcontrol_new card_controls[] = { + SOC_DAPM_PIN_SWITCH("Headphone Jack"), + SOC_DAPM_PIN_SWITCH("Headset Mic"), +}; + +static int platform_clock_control(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *k, int event) +{ + struct snd_soc_dapm_context *dapm = w->dapm; + struct snd_soc_card *card = dapm->card; + struct snd_soc_dai *codec_dai; + int ret = 0; + + codec_dai = snd_soc_card_get_codec_dai(card, DA7219_DAI_NAME); + if (!codec_dai) { + dev_err(card->dev, "Codec dai not found. Unable to set/unset codec pll\n"); + return -EIO; + } + + if (SND_SOC_DAPM_EVENT_OFF(event)) { + ret = snd_soc_dai_set_pll(codec_dai, 0, DA7219_SYSCLK_MCLK, 0, 0); + if (ret) + dev_err(card->dev, "failed to stop PLL: %d\n", ret); + } else if (SND_SOC_DAPM_EVENT_ON(event)) { + ret = snd_soc_dai_set_pll(codec_dai, 0, DA7219_SYSCLK_PLL_SRM, + 0, DA7219_PLL_FREQ_OUT_98304); + if (ret) + dev_err(card->dev, "failed to start PLL: %d\n", ret); + } + + return ret; +} + +static const struct snd_soc_dapm_widget card_widgets[] = { + SND_SOC_DAPM_HP("Headphone Jack", NULL), + SND_SOC_DAPM_MIC("Headset Mic", NULL), + SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, platform_clock_control, + SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU), +}; + +static const struct snd_soc_dapm_route card_base_routes[] = { + /* HP jack connectors - unknown if we have jack detection */ + {"Headphone Jack", NULL, "HPL"}, + {"Headphone Jack", NULL, "HPR"}, + + {"MIC", NULL, "Headset Mic"}, + + { "Headphone Jack", NULL, "Platform Clock" }, + { "Headset Mic", NULL, "Platform Clock" }, +}; + +static int avs_da7219_codec_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component; + struct snd_soc_card *card = runtime->card; + struct snd_soc_jack *jack; + struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0); + int clk_freq; + int ret; + + jack = snd_soc_card_get_drvdata(card); + clk_freq = 19200000; + + ret = snd_soc_dai_set_sysclk(codec_dai, DA7219_CLKSRC_MCLK, clk_freq, SND_SOC_CLOCK_IN); + if (ret) { + dev_err(card->dev, "can't set codec sysclk configuration\n"); + return ret; + } + + /* + * Headset buttons map to the google Reference headset. + * These can be configured by userspace. + */ + ret = snd_soc_card_jack_new(card, "Headset Jack", + SND_JACK_HEADSET | SND_JACK_BTN_0 | + SND_JACK_BTN_1 | SND_JACK_BTN_2 | + SND_JACK_BTN_3 | SND_JACK_LINEOUT, jack); + if (ret) { + dev_err(card->dev, "Headset Jack creation failed: %d\n", ret); + return ret; + } + + snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); + snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP); + snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN); + snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND); + + da7219_aad_jack_det(component, jack); + + return 0; +} + +static int +avs_da7219_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate, *channels; + struct snd_mask *fmt; + + rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); + channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + /* The ADSP will convert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = 2; + + /* set SSP0 to 24 bit */ + snd_mask_none(fmt); + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); + return 0; +} + +static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, + struct snd_soc_dai_link **dai_link) +{ + struct snd_soc_dai_link_component *platform; + struct snd_soc_dai_link *dl; + + dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); + platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); + if (!dl || !platform) + return -ENOMEM; + + platform->name = platform_name; + + dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); + dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); + dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); + if (!dl->name || !dl->cpus || !dl->codecs) + return -ENOMEM; + + dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); + dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-DLGS7219:00"); + dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, DA7219_DAI_NAME); + if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) + return -ENOMEM; + + dl->num_cpus = 1; + dl->num_codecs = 1; + dl->platforms = platform; + dl->num_platforms = 1; + dl->id = 0; + dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; + dl->be_hw_params_fixup = avs_da7219_be_fixup; + dl->init = avs_da7219_codec_init; + dl->nonatomic = 1; + dl->no_pcm = 1; + dl->dpcm_capture = 1; + dl->dpcm_playback = 1; + + *dai_link = dl; + + return 0; +} + +static int avs_create_dapm_routes(struct device *dev, int ssp_port, + struct snd_soc_dapm_route **routes, int *num_routes) +{ + struct snd_soc_dapm_route *dr; + const int num_base = ARRAY_SIZE(card_base_routes); + const int num_dr = num_base + 2; + int idx; + + dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + memcpy(dr, card_base_routes, num_base * sizeof(*dr)); + + idx = num_base; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "Playback"); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + idx++; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "Capture"); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + *routes = dr; + *num_routes = num_dr; + + return 0; +} + +static int avs_card_set_jack(struct snd_soc_card *card, struct snd_soc_jack *jack) +{ + struct snd_soc_component *component; + + for_each_card_components(card, component) + snd_soc_component_set_jack(component, jack, NULL); + return 0; +} + +static int avs_card_remove(struct snd_soc_card *card) +{ + return avs_card_set_jack(card, NULL); +} + +static int avs_card_suspend_pre(struct snd_soc_card *card) +{ + return avs_card_set_jack(card, NULL); +} + +static int avs_card_resume_post(struct snd_soc_card *card) +{ + struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); + + return avs_card_set_jack(card, jack); +} + +static int avs_da7219_probe(struct platform_device *pdev) +{ + struct snd_soc_dapm_route *routes; + struct snd_soc_dai_link *dai_link; + struct snd_soc_acpi_mach *mach; + struct snd_soc_card *card; + struct snd_soc_jack *jack; + struct device *dev = &pdev->dev; + const char *pname; + int num_routes, ssp_port, ret; + + mach = dev_get_platdata(dev); + pname = mach->mach_params.platform; + ssp_port = __ffs(mach->mach_params.i2s_link_mask); + + ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); + if (ret) { + dev_err(dev, "Failed to create dai link: %d", ret); + return ret; + } + + ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes); + if (ret) { + dev_err(dev, "Failed to create dapm routes: %d", ret); + return ret; + } + + jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!jack || !card) + return -ENOMEM; + + card->name = "avs_da7219"; + card->dev = dev; + card->owner = THIS_MODULE; + card->remove = avs_card_remove; + card->suspend_pre = avs_card_suspend_pre; + card->resume_post = avs_card_resume_post; + card->dai_link = dai_link; + card->num_links = 1; + card->controls = card_controls; + card->num_controls = ARRAY_SIZE(card_controls); + card->dapm_widgets = card_widgets; + card->num_dapm_widgets = ARRAY_SIZE(card_widgets); + card->dapm_routes = routes; + card->num_dapm_routes = num_routes; + card->fully_routed = true; + snd_soc_card_set_drvdata(card, jack); + + ret = snd_soc_fixup_dai_links_platform_name(card, pname); + if (ret) + return ret; + + return devm_snd_soc_register_card(dev, card); +} + +static struct platform_driver avs_da7219_driver = { + .probe = avs_da7219_probe, + .driver = { + .name = "avs_da7219", + .pm = &snd_soc_pm_ops, + }, +}; + +module_platform_driver(avs_da7219_driver); + +MODULE_AUTHOR("Cezary Rojewski "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:avs_da7219"); diff --git a/sound/soc/intel/avs/boards/dmic.c b/sound/soc/intel/avs/boards/dmic.c new file mode 100644 index 000000000..90a921638 --- /dev/null +++ b/sound/soc/intel/avs/boards/dmic.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include + +SND_SOC_DAILINK_DEF(dmic_pin, DAILINK_COMP_ARRAY(COMP_CPU("DMIC Pin"))); +SND_SOC_DAILINK_DEF(dmic_wov_pin, DAILINK_COMP_ARRAY(COMP_CPU("DMIC WoV Pin"))); +SND_SOC_DAILINK_DEF(dmic_codec, DAILINK_COMP_ARRAY(COMP_CODEC("dmic-codec", "dmic-hifi"))); +/* Name overridden on probe */ +SND_SOC_DAILINK_DEF(platform, DAILINK_COMP_ARRAY(COMP_PLATFORM(""))); + +static struct snd_soc_dai_link card_dai_links[] = { + /* Back ends */ + { + .name = "DMIC", + .id = 0, + .dpcm_capture = 1, + .nonatomic = 1, + .no_pcm = 1, + SND_SOC_DAILINK_REG(dmic_pin, dmic_codec, platform), + }, + { + .name = "DMIC WoV", + .id = 1, + .dpcm_capture = 1, + .nonatomic = 1, + .no_pcm = 1, + .ignore_suspend = 1, + SND_SOC_DAILINK_REG(dmic_wov_pin, dmic_codec, platform), + }, +}; + +static const struct snd_soc_dapm_widget card_widgets[] = { + SND_SOC_DAPM_MIC("SoC DMIC", NULL), +}; + +static const struct snd_soc_dapm_route card_routes[] = { + {"DMic", NULL, "SoC DMIC"}, + {"DMIC Rx", NULL, "Capture"}, + {"DMIC WoV Rx", NULL, "Capture"}, +}; + +static int avs_dmic_probe(struct platform_device *pdev) +{ + struct snd_soc_acpi_mach *mach; + struct snd_soc_card *card; + struct device *dev = &pdev->dev; + int ret; + + mach = dev_get_platdata(dev); + + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; + + card->name = "avs_dmic"; + card->dev = dev; + card->owner = THIS_MODULE; + card->dai_link = card_dai_links; + card->num_links = ARRAY_SIZE(card_dai_links); + card->dapm_widgets = card_widgets; + card->num_dapm_widgets = ARRAY_SIZE(card_widgets); + card->dapm_routes = card_routes; + card->num_dapm_routes = ARRAY_SIZE(card_routes); + card->fully_routed = true; + + ret = snd_soc_fixup_dai_links_platform_name(card, mach->mach_params.platform); + if (ret) + return ret; + + return devm_snd_soc_register_card(dev, card); +} + +static struct platform_driver avs_dmic_driver = { + .probe = avs_dmic_probe, + .driver = { + .name = "avs_dmic", + .pm = &snd_soc_pm_ops, + }, +}; + +module_platform_driver(avs_dmic_driver); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:avs_dmic"); diff --git a/sound/soc/intel/avs/boards/hdaudio.c b/sound/soc/intel/avs/boards/hdaudio.c new file mode 100644 index 000000000..a65939f30 --- /dev/null +++ b/sound/soc/intel/avs/boards/hdaudio.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include "../../../codecs/hda.h" + +static int avs_create_dai_links(struct device *dev, struct hda_codec *codec, int pcm_count, + const char *platform_name, struct snd_soc_dai_link **links) +{ + struct snd_soc_dai_link_component *platform; + struct snd_soc_dai_link *dl; + struct hda_pcm *pcm; + const char *cname = dev_name(&codec->core.dev); + int i; + + dl = devm_kcalloc(dev, pcm_count, sizeof(*dl), GFP_KERNEL); + platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); + if (!dl || !platform) + return -ENOMEM; + + platform->name = platform_name; + pcm = list_first_entry(&codec->pcm_list_head, struct hda_pcm, list); + + for (i = 0; i < pcm_count; i++, pcm = list_next_entry(pcm, list)) { + dl[i].name = devm_kasprintf(dev, GFP_KERNEL, "%s link%d", cname, i); + if (!dl[i].name) + return -ENOMEM; + + dl[i].id = i; + dl[i].nonatomic = 1; + dl[i].no_pcm = 1; + dl[i].dpcm_playback = 1; + dl[i].dpcm_capture = 1; + dl[i].platforms = platform; + dl[i].num_platforms = 1; + dl[i].ignore_pmdown_time = 1; + + dl[i].codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); + dl[i].cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); + if (!dl[i].codecs || !dl[i].cpus) + return -ENOMEM; + + dl[i].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "%s-cpu%d", cname, i); + if (!dl[i].cpus->dai_name) + return -ENOMEM; + + dl[i].codecs->name = devm_kstrdup(dev, cname, GFP_KERNEL); + if (!dl[i].codecs->name) + return -ENOMEM; + + dl[i].codecs->dai_name = pcm->name; + dl[i].num_codecs = 1; + dl[i].num_cpus = 1; + } + + *links = dl; + return 0; +} + +static int avs_create_dapm_routes(struct device *dev, struct hda_codec *codec, int pcm_count, + struct snd_soc_dapm_route **routes, int *num_routes) +{ + struct snd_soc_dapm_route *dr; + struct hda_pcm *pcm; + const char *cname = dev_name(&codec->core.dev); + int i, n = 0; + + /* at max twice the number of pcms */ + dr = devm_kcalloc(dev, pcm_count * 2, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + pcm = list_first_entry(&codec->pcm_list_head, struct hda_pcm, list); + + for (i = 0; i < pcm_count; i++, pcm = list_next_entry(pcm, list)) { + struct hda_pcm_stream *stream; + int dir; + + dir = SNDRV_PCM_STREAM_PLAYBACK; + stream = &pcm->stream[dir]; + if (!stream->substreams) + goto capture_routes; + + dr[n].sink = devm_kasprintf(dev, GFP_KERNEL, "%s %s", pcm->name, + snd_pcm_direction_name(dir)); + dr[n].source = devm_kasprintf(dev, GFP_KERNEL, "%s-cpu%d Tx", cname, i); + if (!dr[n].sink || !dr[n].source) + return -ENOMEM; + n++; + +capture_routes: + dir = SNDRV_PCM_STREAM_CAPTURE; + stream = &pcm->stream[dir]; + if (!stream->substreams) + continue; + + dr[n].sink = devm_kasprintf(dev, GFP_KERNEL, "%s-cpu%d Rx", cname, i); + dr[n].source = devm_kasprintf(dev, GFP_KERNEL, "%s %s", pcm->name, + snd_pcm_direction_name(dir)); + if (!dr[n].sink || !dr[n].source) + return -ENOMEM; + n++; + } + + *routes = dr; + *num_routes = n; + return 0; +} + +/* Should be aligned with SectionPCM's name from topology */ +#define FEDAI_NAME_PREFIX "HDMI" + +static struct snd_pcm * +avs_card_hdmi_pcm_at(struct snd_soc_card *card, int hdmi_idx) +{ + struct snd_soc_pcm_runtime *rtd; + int dir = SNDRV_PCM_STREAM_PLAYBACK; + + for_each_card_rtds(card, rtd) { + struct snd_pcm *spcm; + int ret, n; + + spcm = rtd->pcm ? rtd->pcm->streams[dir].pcm : NULL; + if (!spcm || !strstr(spcm->id, FEDAI_NAME_PREFIX)) + continue; + + ret = sscanf(spcm->id, FEDAI_NAME_PREFIX "%d", &n); + if (ret != 1) + continue; + if (n == hdmi_idx) + return rtd->pcm; + } + + return NULL; +} + +static int avs_card_late_probe(struct snd_soc_card *card) +{ + struct snd_soc_acpi_mach *mach = dev_get_platdata(card->dev); + struct hda_codec *codec = mach->pdata; + struct hda_pcm *hpcm; + /* Topology pcm indexing is 1-based */ + int i = 1; + + list_for_each_entry(hpcm, &codec->pcm_list_head, list) { + struct snd_pcm *spcm; + + spcm = avs_card_hdmi_pcm_at(card, i); + if (spcm) { + hpcm->pcm = spcm; + hpcm->device = spcm->device; + dev_info(card->dev, "%s: mapping HDMI converter %d to PCM %d (%p)\n", + __func__, i, hpcm->device, spcm); + } else { + hpcm->pcm = NULL; + hpcm->device = SNDRV_PCM_INVALID_DEVICE; + dev_warn(card->dev, "%s: no PCM in topology for HDMI converter %d\n", + __func__, i); + } + i++; + } + + return hda_codec_probe_complete(codec); +} + +static int avs_probing_link_init(struct snd_soc_pcm_runtime *rtm) +{ + struct snd_soc_dapm_route *routes; + struct snd_soc_acpi_mach *mach; + struct snd_soc_dai_link *links = NULL; + struct snd_soc_card *card = rtm->card; + struct hda_codec *codec; + struct hda_pcm *pcm; + int ret, n, pcm_count = 0; + + mach = dev_get_platdata(card->dev); + codec = mach->pdata; + + if (list_empty(&codec->pcm_list_head)) + return -EINVAL; + list_for_each_entry(pcm, &codec->pcm_list_head, list) + pcm_count++; + + ret = avs_create_dai_links(card->dev, codec, pcm_count, mach->mach_params.platform, &links); + if (ret < 0) { + dev_err(card->dev, "create links failed: %d\n", ret); + return ret; + } + + for (n = 0; n < pcm_count; n++) { + ret = snd_soc_add_pcm_runtime(card, &links[n]); + if (ret < 0) { + dev_err(card->dev, "add links failed: %d\n", ret); + return ret; + } + } + + ret = avs_create_dapm_routes(card->dev, codec, pcm_count, &routes, &n); + if (ret < 0) { + dev_err(card->dev, "create routes failed: %d\n", ret); + return ret; + } + + ret = snd_soc_dapm_add_routes(&card->dapm, routes, n); + if (ret < 0) { + dev_err(card->dev, "add routes failed: %d\n", ret); + return ret; + } + + return 0; +} + +SND_SOC_DAILINK_DEF(dummy, DAILINK_COMP_ARRAY(COMP_DUMMY())); + +static struct snd_soc_dai_link probing_link = { + .name = "probing-LINK", + .id = -1, + .nonatomic = 1, + .no_pcm = 1, + .dpcm_playback = 1, + .dpcm_capture = 1, + .cpus = dummy, + .num_cpus = ARRAY_SIZE(dummy), + .init = avs_probing_link_init, +}; + +static int avs_hdaudio_probe(struct platform_device *pdev) +{ + struct snd_soc_dai_link *binder; + struct snd_soc_acpi_mach *mach; + struct snd_soc_card *card; + struct device *dev = &pdev->dev; + struct hda_codec *codec; + + mach = dev_get_platdata(dev); + codec = mach->pdata; + + /* codec may be unloaded before card's probe() fires */ + if (!device_is_registered(&codec->core.dev)) + return -ENODEV; + + binder = devm_kmemdup(dev, &probing_link, sizeof(probing_link), GFP_KERNEL); + if (!binder) + return -ENOMEM; + + binder->platforms = devm_kzalloc(dev, sizeof(*binder->platforms), GFP_KERNEL); + binder->codecs = devm_kzalloc(dev, sizeof(*binder->codecs), GFP_KERNEL); + if (!binder->platforms || !binder->codecs) + return -ENOMEM; + + binder->codecs->name = devm_kstrdup(dev, dev_name(&codec->core.dev), GFP_KERNEL); + if (!binder->codecs->name) + return -ENOMEM; + + binder->platforms->name = mach->mach_params.platform; + binder->num_platforms = 1; + binder->codecs->dai_name = "codec-probing-DAI"; + binder->num_codecs = 1; + + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; + + card->name = binder->codecs->name; + card->dev = dev; + card->owner = THIS_MODULE; + card->dai_link = binder; + card->num_links = 1; + card->fully_routed = true; + if (hda_codec_is_display(codec)) + card->late_probe = avs_card_late_probe; + + return devm_snd_soc_register_card(dev, card); +} + +static struct platform_driver avs_hdaudio_driver = { + .probe = avs_hdaudio_probe, + .driver = { + .name = "avs_hdaudio", + .pm = &snd_soc_pm_ops, + }, +}; + +module_platform_driver(avs_hdaudio_driver) + +MODULE_DESCRIPTION("Intel HD-Audio machine driver"); +MODULE_AUTHOR("Cezary Rojewski "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:avs_hdaudio"); diff --git a/sound/soc/intel/avs/boards/i2s_test.c b/sound/soc/intel/avs/boards/i2s_test.c new file mode 100644 index 000000000..8f0fd87bc --- /dev/null +++ b/sound/soc/intel/avs/boards/i2s_test.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include + +static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, + struct snd_soc_dai_link **dai_link) +{ + struct snd_soc_dai_link_component *platform; + struct snd_soc_dai_link *dl; + + dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); + platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); + if (!dl || !platform) + return -ENOMEM; + + platform->name = platform_name; + + dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); + dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); + dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); + if (!dl->name || !dl->cpus || !dl->codecs) + return -ENOMEM; + + dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); + dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "snd-soc-dummy"); + dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "snd-soc-dummy-dai"); + if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) + return -ENOMEM; + + dl->num_cpus = 1; + dl->num_codecs = 1; + dl->platforms = platform; + dl->num_platforms = 1; + dl->id = 0; + dl->nonatomic = 1; + dl->no_pcm = 1; + dl->dpcm_capture = 1; + dl->dpcm_playback = 1; + + *dai_link = dl; + + return 0; +} + +static int avs_create_dapm_routes(struct device *dev, int ssp_port, + struct snd_soc_dapm_route **routes, int *num_routes) +{ + struct snd_soc_dapm_route *dr; + const int num_dr = 2; + + dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + dr[0].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%dpb", ssp_port); + dr[0].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); + if (!dr[0].sink || !dr[0].source) + return -ENOMEM; + + dr[1].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port); + dr[1].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%dcp", ssp_port); + if (!dr[1].sink || !dr[1].source) + return -ENOMEM; + + *routes = dr; + *num_routes = num_dr; + + return 0; +} + +static int avs_create_dapm_widgets(struct device *dev, int ssp_port, + struct snd_soc_dapm_widget **widgets, int *num_widgets) +{ + struct snd_soc_dapm_widget *dw; + const int num_dw = 2; + + dw = devm_kcalloc(dev, num_dw, sizeof(*dw), GFP_KERNEL); + if (!dw) + return -ENOMEM; + + dw[0].id = snd_soc_dapm_hp; + dw[0].reg = SND_SOC_NOPM; + dw[0].name = devm_kasprintf(dev, GFP_KERNEL, "ssp%dpb", ssp_port); + if (!dw[0].name) + return -ENOMEM; + + dw[1].id = snd_soc_dapm_mic; + dw[1].reg = SND_SOC_NOPM; + dw[1].name = devm_kasprintf(dev, GFP_KERNEL, "ssp%dcp", ssp_port); + if (!dw[1].name) + return -ENOMEM; + + *widgets = dw; + *num_widgets = num_dw; + + return 0; +} + +static int avs_i2s_test_probe(struct platform_device *pdev) +{ + struct snd_soc_dapm_widget *widgets; + struct snd_soc_dapm_route *routes; + struct snd_soc_dai_link *dai_link; + struct snd_soc_acpi_mach *mach; + struct snd_soc_card *card; + struct device *dev = &pdev->dev; + const char *pname; + int num_routes, num_widgets; + int ssp_port, ret; + + mach = dev_get_platdata(dev); + pname = mach->mach_params.platform; + ssp_port = __ffs(mach->mach_params.i2s_link_mask); + + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; + + card->name = devm_kasprintf(dev, GFP_KERNEL, "ssp%d-loopback", ssp_port); + if (!card->name) + return -ENOMEM; + + ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); + if (ret) { + dev_err(dev, "Failed to create dai link: %d\n", ret); + return ret; + } + + ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes); + if (ret) { + dev_err(dev, "Failed to create dapm routes: %d\n", ret); + return ret; + } + + ret = avs_create_dapm_widgets(dev, ssp_port, &widgets, &num_widgets); + if (ret) { + dev_err(dev, "Failed to create dapm widgets: %d\n", ret); + return ret; + } + + card->dev = dev; + card->owner = THIS_MODULE; + card->dai_link = dai_link; + card->num_links = 1; + card->dapm_routes = routes; + card->num_dapm_routes = num_routes; + card->dapm_widgets = widgets; + card->num_dapm_widgets = num_widgets; + card->fully_routed = true; + + ret = snd_soc_fixup_dai_links_platform_name(card, pname); + if (ret) + return ret; + + return devm_snd_soc_register_card(dev, card); +} + +static struct platform_driver avs_i2s_test_driver = { + .probe = avs_i2s_test_probe, + .driver = { + .name = "avs_i2s_test", + .pm = &snd_soc_pm_ops, + }, +}; + +module_platform_driver(avs_i2s_test_driver); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:avs_i2s_test"); diff --git a/sound/soc/intel/avs/boards/max98357a.c b/sound/soc/intel/avs/boards/max98357a.c new file mode 100644 index 000000000..183123d08 --- /dev/null +++ b/sound/soc/intel/avs/boards/max98357a.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include + +static const struct snd_kcontrol_new card_controls[] = { + SOC_DAPM_PIN_SWITCH("Spk"), +}; + +static const struct snd_soc_dapm_widget card_widgets[] = { + SND_SOC_DAPM_SPK("Spk", NULL), +}; + +static const struct snd_soc_dapm_route card_base_routes[] = { + { "Spk", NULL, "Speaker" }, +}; + +static int +avs_max98357a_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate, *channels; + struct snd_mask *fmt; + + rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); + channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + /* The ADSP will convert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = 2; + + /* set SSP0 to 16 bit */ + snd_mask_none(fmt); + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); + return 0; +} + +static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, + struct snd_soc_dai_link **dai_link) +{ + struct snd_soc_dai_link_component *platform; + struct snd_soc_dai_link *dl; + + dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); + platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); + if (!dl || !platform) + return -ENOMEM; + + platform->name = platform_name; + + dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); + dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); + dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); + if (!dl->name || !dl->cpus || !dl->codecs) + return -ENOMEM; + + dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); + dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "MX98357A:00"); + dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "HiFi"); + if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) + return -ENOMEM; + + dl->num_cpus = 1; + dl->num_codecs = 1; + dl->platforms = platform; + dl->num_platforms = 1; + dl->id = 0; + dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; + dl->be_hw_params_fixup = avs_max98357a_be_fixup; + dl->nonatomic = 1; + dl->no_pcm = 1; + dl->dpcm_playback = 1; + + *dai_link = dl; + + return 0; +} + +static int avs_create_dapm_routes(struct device *dev, int ssp_port, + struct snd_soc_dapm_route **routes, int *num_routes) +{ + struct snd_soc_dapm_route *dr; + const int num_base = ARRAY_SIZE(card_base_routes); + const int num_dr = num_base + 1; + int idx; + + dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + memcpy(dr, card_base_routes, num_base * sizeof(*dr)); + + idx = num_base; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "HiFi Playback"); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + *routes = dr; + *num_routes = num_dr; + + return 0; +} + +static int avs_max98357a_probe(struct platform_device *pdev) +{ + struct snd_soc_dapm_route *routes; + struct snd_soc_dai_link *dai_link; + struct snd_soc_acpi_mach *mach; + struct snd_soc_card *card; + struct device *dev = &pdev->dev; + const char *pname; + int num_routes, ssp_port, ret; + + mach = dev_get_platdata(dev); + pname = mach->mach_params.platform; + ssp_port = __ffs(mach->mach_params.i2s_link_mask); + + ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); + if (ret) { + dev_err(dev, "Failed to create dai link: %d", ret); + return ret; + } + + ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes); + if (ret) { + dev_err(dev, "Failed to create dapm routes: %d", ret); + return ret; + } + + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; + + card->name = "avs_max98357a"; + card->dev = dev; + card->owner = THIS_MODULE; + card->dai_link = dai_link; + card->num_links = 1; + card->controls = card_controls; + card->num_controls = ARRAY_SIZE(card_controls); + card->dapm_widgets = card_widgets; + card->num_dapm_widgets = ARRAY_SIZE(card_widgets); + card->dapm_routes = routes; + card->num_dapm_routes = num_routes; + card->fully_routed = true; + + ret = snd_soc_fixup_dai_links_platform_name(card, pname); + if (ret) + return ret; + + return devm_snd_soc_register_card(dev, card); +} + +static struct platform_driver avs_max98357a_driver = { + .probe = avs_max98357a_probe, + .driver = { + .name = "avs_max98357a", + .pm = &snd_soc_pm_ops, + }, +}; + +module_platform_driver(avs_max98357a_driver) + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:avs_max98357a"); diff --git a/sound/soc/intel/avs/boards/max98373.c b/sound/soc/intel/avs/boards/max98373.c new file mode 100644 index 000000000..0fa8f5606 --- /dev/null +++ b/sound/soc/intel/avs/boards/max98373.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include + +#define MAX98373_DEV0_NAME "i2c-MX98373:00" +#define MAX98373_DEV1_NAME "i2c-MX98373:01" +#define MAX98373_CODEC_NAME "max98373-aif1" + +static struct snd_soc_codec_conf card_codec_conf[] = { + { + .dlc = COMP_CODEC_CONF(MAX98373_DEV0_NAME), + .name_prefix = "Right", + }, + { + .dlc = COMP_CODEC_CONF(MAX98373_DEV1_NAME), + .name_prefix = "Left", + }, +}; + +static const struct snd_kcontrol_new card_controls[] = { + SOC_DAPM_PIN_SWITCH("Left Spk"), + SOC_DAPM_PIN_SWITCH("Right Spk"), +}; + +static const struct snd_soc_dapm_widget card_widgets[] = { + SND_SOC_DAPM_SPK("Left Spk", NULL), + SND_SOC_DAPM_SPK("Right Spk", NULL), +}; + +static const struct snd_soc_dapm_route card_base_routes[] = { + { "Left Spk", NULL, "Left BE_OUT" }, + { "Right Spk", NULL, "Right BE_OUT" }, +}; + +static int +avs_max98373_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate, *channels; + struct snd_mask *fmt; + + rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); + channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + /* The ADSP will covert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = 2; + + /* set SSP0 to 16 bit */ + snd_mask_none(fmt); + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); + return 0; +} + +static int avs_max98373_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *runtime = asoc_substream_to_rtd(substream); + struct snd_soc_dai *codec_dai; + int ret, i; + + for_each_rtd_codec_dais(runtime, i, codec_dai) { + if (!strcmp(codec_dai->component->name, MAX98373_DEV0_NAME)) { + ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16); + if (ret < 0) { + dev_err(runtime->dev, "DEV0 TDM slot err:%d\n", ret); + return ret; + } + } + if (!strcmp(codec_dai->component->name, MAX98373_DEV1_NAME)) { + ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xC0, 3, 8, 16); + if (ret < 0) { + dev_err(runtime->dev, "DEV1 TDM slot err:%d\n", ret); + return ret; + } + } + } + + return 0; +} + +static const struct snd_soc_ops avs_max98373_ops = { + .hw_params = avs_max98373_hw_params, +}; + +static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, + struct snd_soc_dai_link **dai_link) +{ + struct snd_soc_dai_link_component *platform; + struct snd_soc_dai_link *dl; + + dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); + platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); + if (!dl || !platform) + return -ENOMEM; + + platform->name = platform_name; + + dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); + dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); + dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs) * 2, GFP_KERNEL); + if (!dl->name || !dl->cpus || !dl->codecs) + return -ENOMEM; + + dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); + dl->codecs[0].name = devm_kasprintf(dev, GFP_KERNEL, MAX98373_DEV0_NAME); + dl->codecs[0].dai_name = devm_kasprintf(dev, GFP_KERNEL, MAX98373_CODEC_NAME); + dl->codecs[1].name = devm_kasprintf(dev, GFP_KERNEL, MAX98373_DEV1_NAME); + dl->codecs[1].dai_name = devm_kasprintf(dev, GFP_KERNEL, MAX98373_CODEC_NAME); + if (!dl->cpus->dai_name || !dl->codecs[0].name || !dl->codecs[0].dai_name || + !dl->codecs[1].name || !dl->codecs[1].dai_name) + return -ENOMEM; + + dl->num_cpus = 1; + dl->num_codecs = 2; + dl->platforms = platform; + dl->num_platforms = 1; + dl->id = 0; + dl->dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBC_CFC; + dl->be_hw_params_fixup = avs_max98373_be_fixup; + dl->nonatomic = 1; + dl->no_pcm = 1; + dl->dpcm_capture = 1; + dl->dpcm_playback = 1; + dl->ignore_pmdown_time = 1; + dl->ops = &avs_max98373_ops; + + *dai_link = dl; + + return 0; +} + +static int avs_create_dapm_routes(struct device *dev, int ssp_port, + struct snd_soc_dapm_route **routes, int *num_routes) +{ + struct snd_soc_dapm_route *dr; + const int num_base = ARRAY_SIZE(card_base_routes); + const int num_dr = num_base + 2; + int idx; + + dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + memcpy(dr, card_base_routes, num_base * sizeof(*dr)); + + idx = num_base; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "Left HiFi Playback"); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + idx++; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "Right HiFi Playback"); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + *routes = dr; + *num_routes = num_dr; + + return 0; +} + +static int avs_max98373_probe(struct platform_device *pdev) +{ + struct snd_soc_dapm_route *routes; + struct snd_soc_dai_link *dai_link; + struct snd_soc_acpi_mach *mach; + struct snd_soc_card *card; + struct device *dev = &pdev->dev; + const char *pname; + int num_routes, ssp_port, ret; + + mach = dev_get_platdata(dev); + pname = mach->mach_params.platform; + ssp_port = __ffs(mach->mach_params.i2s_link_mask); + + ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); + if (ret) { + dev_err(dev, "Failed to create dai link: %d", ret); + return ret; + } + + ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes); + if (ret) { + dev_err(dev, "Failed to create dapm routes: %d", ret); + return ret; + } + + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; + + card->name = "avs_max98373"; + card->dev = dev; + card->owner = THIS_MODULE; + card->dai_link = dai_link; + card->num_links = 1; + card->codec_conf = card_codec_conf; + card->num_configs = ARRAY_SIZE(card_codec_conf); + card->controls = card_controls; + card->num_controls = ARRAY_SIZE(card_controls); + card->dapm_widgets = card_widgets; + card->num_dapm_widgets = ARRAY_SIZE(card_widgets); + card->dapm_routes = routes; + card->num_dapm_routes = num_routes; + card->fully_routed = true; + + ret = snd_soc_fixup_dai_links_platform_name(card, pname); + if (ret) + return ret; + + return devm_snd_soc_register_card(dev, card); +} + +static struct platform_driver avs_max98373_driver = { + .probe = avs_max98373_probe, + .driver = { + .name = "avs_max98373", + .pm = &snd_soc_pm_ops, + }, +}; + +module_platform_driver(avs_max98373_driver) + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:avs_max98373"); diff --git a/sound/soc/intel/avs/boards/nau8825.c b/sound/soc/intel/avs/boards/nau8825.c new file mode 100644 index 000000000..8392d8fac --- /dev/null +++ b/sound/soc/intel/avs/boards/nau8825.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../../codecs/nau8825.h" + +#define SKL_NUVOTON_CODEC_DAI "nau8825-hifi" + +static int +avs_nau8825_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *control, int event) +{ + struct snd_soc_dapm_context *dapm = w->dapm; + struct snd_soc_card *card = dapm->card; + struct snd_soc_dai *codec_dai; + int ret; + + codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI); + if (!codec_dai) { + dev_err(card->dev, "Codec dai not found\n"); + return -EINVAL; + } + + if (SND_SOC_DAPM_EVENT_ON(event)) + ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000, + SND_SOC_CLOCK_IN); + else + ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN); + if (ret < 0) + dev_err(card->dev, "Set sysclk failed: %d\n", ret); + + return ret; +} + +static const struct snd_kcontrol_new card_controls[] = { + SOC_DAPM_PIN_SWITCH("Headphone Jack"), + SOC_DAPM_PIN_SWITCH("Headset Mic"), +}; + +static const struct snd_soc_dapm_widget card_widgets[] = { + SND_SOC_DAPM_HP("Headphone Jack", NULL), + SND_SOC_DAPM_MIC("Headset Mic", NULL), + SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, avs_nau8825_clock_control, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), +}; + +static const struct snd_soc_dapm_route card_base_routes[] = { + { "Headphone Jack", NULL, "HPOL" }, + { "Headphone Jack", NULL, "HPOR" }, + + { "MIC", NULL, "Headset Mic" }, + + { "Headphone Jack", NULL, "Platform Clock" }, + { "Headset Mic", NULL, "Platform Clock" }, +}; + +static struct snd_soc_jack_pin card_headset_pins[] = { + { + .pin = "Headphone Jack", + .mask = SND_JACK_HEADPHONE, + }, + { + .pin = "Headset Mic", + .mask = SND_JACK_MICROPHONE, + }, +}; + +static int avs_nau8825_codec_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0); + struct snd_soc_component *component = codec_dai->component; + struct snd_soc_jack_pin *pins; + struct snd_soc_jack *jack; + struct snd_soc_card *card = runtime->card; + int num_pins, ret; + + jack = snd_soc_card_get_drvdata(card); + num_pins = ARRAY_SIZE(card_headset_pins); + + pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL); + if (!pins) + return -ENOMEM; + + /* + * 4 buttons here map to the google Reference headset. + * The use of these buttons can be decided by the user space. + */ + ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0 | + SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3, + jack, pins, num_pins); + if (ret) + return ret; + + snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); + snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND); + snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP); + snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN); + + //snd_soc_component_set_jack(component, jack, NULL); + // TODO: Fix nau8825 codec to use .set_jack, like everyone else + nau8825_enable_jack_detect(component, jack); + + return 0; +} + +static int +avs_nau8825_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate, *channels; + struct snd_mask *fmt; + + rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); + channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + /* The ADSP will convert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = 2; + + /* set SSP to 24 bit */ + snd_mask_none(fmt); + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); + + return 0; +} + +static int avs_nau8825_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct snd_soc_pcm_runtime *rtm = asoc_substream_to_rtd(substream); + struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtm, 0); + int ret = 0; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_FLL_FS, 0, SND_SOC_CLOCK_IN); + if (ret < 0) { + dev_err(codec_dai->dev, "can't set FS clock %d\n", ret); + break; + } + + ret = snd_soc_dai_set_pll(codec_dai, 0, 0, runtime->rate, runtime->rate * 256); + if (ret < 0) + dev_err(codec_dai->dev, "can't set FLL: %d\n", ret); + break; + + case SNDRV_PCM_TRIGGER_RESUME: + ret = snd_soc_dai_set_pll(codec_dai, 0, 0, runtime->rate, runtime->rate * 256); + if (ret < 0) + dev_err(codec_dai->dev, "can't set FLL: %d\n", ret); + break; + } + + return ret; +} + + +static const struct snd_soc_ops avs_nau8825_ops = { + .trigger = avs_nau8825_trigger, +}; + +static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, + struct snd_soc_dai_link **dai_link) +{ + struct snd_soc_dai_link_component *platform; + struct snd_soc_dai_link *dl; + + dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); + platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); + if (!dl || !platform) + return -ENOMEM; + + platform->name = platform_name; + + dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); + dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); + dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); + if (!dl->name || !dl->cpus || !dl->codecs) + return -ENOMEM; + + dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); + dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-10508825:00"); + dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, SKL_NUVOTON_CODEC_DAI); + if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) + return -ENOMEM; + + dl->num_cpus = 1; + dl->num_codecs = 1; + dl->platforms = platform; + dl->num_platforms = 1; + dl->id = 0; + dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; + dl->init = avs_nau8825_codec_init; + dl->be_hw_params_fixup = avs_nau8825_be_fixup; + dl->ops = &avs_nau8825_ops; + dl->nonatomic = 1; + dl->no_pcm = 1; + dl->dpcm_capture = 1; + dl->dpcm_playback = 1; + + *dai_link = dl; + + return 0; +} + +static int avs_create_dapm_routes(struct device *dev, int ssp_port, + struct snd_soc_dapm_route **routes, int *num_routes) +{ + struct snd_soc_dapm_route *dr; + const int num_base = ARRAY_SIZE(card_base_routes); + const int num_dr = num_base + 2; + int idx; + + dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + memcpy(dr, card_base_routes, num_base * sizeof(*dr)); + + idx = num_base; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "Playback"); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + idx++; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "Capture"); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + *routes = dr; + *num_routes = num_dr; + + return 0; +} + +static int avs_card_set_jack(struct snd_soc_card *card, struct snd_soc_jack *jack) +{ + struct snd_soc_component *component; + + for_each_card_components(card, component) + snd_soc_component_set_jack(component, jack, NULL); + return 0; +} + +static int avs_card_remove(struct snd_soc_card *card) +{ + return avs_card_set_jack(card, NULL); +} + +static int avs_card_suspend_pre(struct snd_soc_card *card) +{ + return avs_card_set_jack(card, NULL); +} + +static int avs_card_resume_post(struct snd_soc_card *card) +{ + struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI); + struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); + + if (!codec_dai) { + dev_err(card->dev, "Codec dai not found\n"); + return -EINVAL; + } + + if (codec_dai->stream_active[SNDRV_PCM_STREAM_PLAYBACK] && + codec_dai->playback_widget->active) + snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_FLL_FS, 0, SND_SOC_CLOCK_IN); + + return avs_card_set_jack(card, jack); +} + +static int avs_nau8825_probe(struct platform_device *pdev) +{ + struct snd_soc_dapm_route *routes; + struct snd_soc_dai_link *dai_link; + struct snd_soc_acpi_mach *mach; + struct snd_soc_card *card; + struct snd_soc_jack *jack; + struct device *dev = &pdev->dev; + const char *pname; + int num_routes, ssp_port, ret; + + mach = dev_get_platdata(dev); + pname = mach->mach_params.platform; + ssp_port = __ffs(mach->mach_params.i2s_link_mask); + + ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); + if (ret) { + dev_err(dev, "Failed to create dai link: %d", ret); + return ret; + } + + ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes); + if (ret) { + dev_err(dev, "Failed to create dapm routes: %d", ret); + return ret; + } + + jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!jack || !card) + return -ENOMEM; + + card->name = "avs_nau8825"; + card->dev = dev; + card->owner = THIS_MODULE; + card->remove = avs_card_remove; + card->suspend_pre = avs_card_suspend_pre; + card->resume_post = avs_card_resume_post; + card->dai_link = dai_link; + card->num_links = 1; + card->controls = card_controls; + card->num_controls = ARRAY_SIZE(card_controls); + card->dapm_widgets = card_widgets; + card->num_dapm_widgets = ARRAY_SIZE(card_widgets); + card->dapm_routes = routes; + card->num_dapm_routes = num_routes; + card->fully_routed = true; + snd_soc_card_set_drvdata(card, jack); + + ret = snd_soc_fixup_dai_links_platform_name(card, pname); + if (ret) + return ret; + + return devm_snd_soc_register_card(dev, card); +} + +static struct platform_driver avs_nau8825_driver = { + .probe = avs_nau8825_probe, + .driver = { + .name = "avs_nau8825", + .pm = &snd_soc_pm_ops, + }, +}; + +module_platform_driver(avs_nau8825_driver) + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:avs_nau8825"); diff --git a/sound/soc/intel/avs/boards/rt274.c b/sound/soc/intel/avs/boards/rt274.c new file mode 100644 index 000000000..afef5a3ca --- /dev/null +++ b/sound/soc/intel/avs/boards/rt274.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include +#include "../../../codecs/rt274.h" + +#define AVS_RT274_FREQ_OUT 24000000 +#define AVS_RT274_BE_FIXUP_RATE 48000 +#define RT274_CODEC_DAI "rt274-aif1" + +static const struct snd_kcontrol_new card_controls[] = { + SOC_DAPM_PIN_SWITCH("Headphone Jack"), + SOC_DAPM_PIN_SWITCH("Mic Jack"), +}; + +static int +avs_rt274_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *control, int event) +{ + struct snd_soc_dapm_context *dapm = w->dapm; + struct snd_soc_card *card = dapm->card; + struct snd_soc_dai *codec_dai; + int ret; + + codec_dai = snd_soc_card_get_codec_dai(card, RT274_CODEC_DAI); + if (!codec_dai) + return -EINVAL; + + /* Codec needs clock for Jack detection and button press */ + ret = snd_soc_dai_set_sysclk(codec_dai, RT274_SCLK_S_PLL2, AVS_RT274_FREQ_OUT, + SND_SOC_CLOCK_IN); + if (ret < 0) { + dev_err(codec_dai->dev, "set codec sysclk failed: %d\n", ret); + return ret; + } + + if (SND_SOC_DAPM_EVENT_ON(event)) { + int ratio = 100; + + snd_soc_dai_set_bclk_ratio(codec_dai, ratio); + + ret = snd_soc_dai_set_pll(codec_dai, 0, RT274_PLL2_S_BCLK, + AVS_RT274_BE_FIXUP_RATE * ratio, AVS_RT274_FREQ_OUT); + if (ret) { + dev_err(codec_dai->dev, "failed to enable PLL2: %d\n", ret); + return ret; + } + } + + return 0; +} + +static const struct snd_soc_dapm_widget card_widgets[] = { + SND_SOC_DAPM_HP("Headphone Jack", NULL), + SND_SOC_DAPM_MIC("Mic Jack", NULL), + SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, avs_rt274_clock_control, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), +}; + +static const struct snd_soc_dapm_route card_base_routes[] = { + {"Headphone Jack", NULL, "HPO Pin"}, + {"MIC", NULL, "Mic Jack"}, + + {"Headphone Jack", NULL, "Platform Clock"}, + {"MIC", NULL, "Platform Clock"}, +}; + +static struct snd_soc_jack_pin card_headset_pins[] = { + { + .pin = "Headphone Jack", + .mask = SND_JACK_HEADPHONE, + }, + { + .pin = "Mic Jack", + .mask = SND_JACK_MICROPHONE, + }, +}; + +static int avs_rt274_codec_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0); + struct snd_soc_component *component = codec_dai->component; + struct snd_soc_jack_pin *pins; + struct snd_soc_jack *jack; + struct snd_soc_card *card = runtime->card; + int num_pins, ret; + + jack = snd_soc_card_get_drvdata(card); + num_pins = ARRAY_SIZE(card_headset_pins); + + pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL); + if (!pins) + return -ENOMEM; + + ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET, jack, pins, num_pins); + if (ret) + return ret; + + snd_soc_component_set_jack(component, jack, NULL); + + /* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */ + ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0xF, 4, 24); + if (ret < 0) { + dev_err(card->dev, "can't set codec pcm format %d\n", ret); + return ret; + } + + card->dapm.idle_bias_off = true; + + return 0; +} + +static int avs_rt274_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate, *channels; + struct snd_mask *fmt; + + rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); + channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + /* The ADSP will convert the FE rate to 48k, stereo */ + rate->min = rate->max = AVS_RT274_BE_FIXUP_RATE; + channels->min = channels->max = 2; + + /* set SSPN to 24 bit */ + snd_mask_none(fmt); + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); + + return 0; +} + +static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, + struct snd_soc_dai_link **dai_link) +{ + struct snd_soc_dai_link_component *platform; + struct snd_soc_dai_link *dl; + + dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); + platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); + if (!dl || !platform) + return -ENOMEM; + + platform->name = platform_name; + + dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); + dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); + dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); + if (!dl->name || !dl->cpus || !dl->codecs) + return -ENOMEM; + + dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); + dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT34C2:00"); + dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "rt274-aif1"); + if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) + return -ENOMEM; + + dl->num_cpus = 1; + dl->num_codecs = 1; + dl->platforms = platform; + dl->num_platforms = 1; + dl->id = 0; + dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; + dl->init = avs_rt274_codec_init; + dl->be_hw_params_fixup = avs_rt274_be_fixup; + dl->nonatomic = 1; + dl->no_pcm = 1; + dl->dpcm_capture = 1; + dl->dpcm_playback = 1; + + *dai_link = dl; + + return 0; +} + +static int avs_create_dapm_routes(struct device *dev, int ssp_port, + struct snd_soc_dapm_route **routes, int *num_routes) +{ + struct snd_soc_dapm_route *dr; + const int num_base = ARRAY_SIZE(card_base_routes); + const int num_dr = num_base + 2; + int idx; + + dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + memcpy(dr, card_base_routes, num_base * sizeof(*dr)); + + idx = num_base; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Playback"); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + idx++; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Capture"); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + *routes = dr; + *num_routes = num_dr; + + return 0; +} + +static int avs_card_set_jack(struct snd_soc_card *card, struct snd_soc_jack *jack) +{ + struct snd_soc_component *component; + + for_each_card_components(card, component) + snd_soc_component_set_jack(component, jack, NULL); + return 0; +} + +static int avs_card_remove(struct snd_soc_card *card) +{ + return avs_card_set_jack(card, NULL); +} + +static int avs_card_suspend_pre(struct snd_soc_card *card) +{ + return avs_card_set_jack(card, NULL); +} + +static int avs_card_resume_post(struct snd_soc_card *card) +{ + struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); + + return avs_card_set_jack(card, jack); +} + +static int avs_rt274_probe(struct platform_device *pdev) +{ + struct snd_soc_dapm_route *routes; + struct snd_soc_dai_link *dai_link; + struct snd_soc_acpi_mach *mach; + struct snd_soc_card *card; + struct snd_soc_jack *jack; + struct device *dev = &pdev->dev; + const char *pname; + int num_routes, ssp_port, ret; + + mach = dev_get_platdata(dev); + pname = mach->mach_params.platform; + ssp_port = __ffs(mach->mach_params.i2s_link_mask); + + ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); + if (ret) { + dev_err(dev, "Failed to create dai link: %d", ret); + return ret; + } + + ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes); + if (ret) { + dev_err(dev, "Failed to create dapm routes: %d", ret); + return ret; + } + + jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!jack || !card) + return -ENOMEM; + + card->name = "avs_rt274"; + card->dev = dev; + card->owner = THIS_MODULE; + card->remove = avs_card_remove; + card->suspend_pre = avs_card_suspend_pre; + card->resume_post = avs_card_resume_post; + card->dai_link = dai_link; + card->num_links = 1; + card->controls = card_controls; + card->num_controls = ARRAY_SIZE(card_controls); + card->dapm_widgets = card_widgets; + card->num_dapm_widgets = ARRAY_SIZE(card_widgets); + card->dapm_routes = routes; + card->num_dapm_routes = num_routes; + card->fully_routed = true; + snd_soc_card_set_drvdata(card, jack); + + ret = snd_soc_fixup_dai_links_platform_name(card, pname); + if (ret) + return ret; + + return devm_snd_soc_register_card(dev, card); +} + +static struct platform_driver avs_rt274_driver = { + .probe = avs_rt274_probe, + .driver = { + .name = "avs_rt274", + .pm = &snd_soc_pm_ops, + }, +}; + +module_platform_driver(avs_rt274_driver); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:avs_rt274"); diff --git a/sound/soc/intel/avs/boards/rt286.c b/sound/soc/intel/avs/boards/rt286.c new file mode 100644 index 000000000..e51d4e181 --- /dev/null +++ b/sound/soc/intel/avs/boards/rt286.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include +#include "../../../codecs/rt286.h" + +static const struct snd_kcontrol_new card_controls[] = { + SOC_DAPM_PIN_SWITCH("Headphone Jack"), + SOC_DAPM_PIN_SWITCH("Mic Jack"), + SOC_DAPM_PIN_SWITCH("Speaker"), +}; + +static const struct snd_soc_dapm_widget card_widgets[] = { + SND_SOC_DAPM_HP("Headphone Jack", NULL), + SND_SOC_DAPM_MIC("Mic Jack", NULL), + SND_SOC_DAPM_SPK("Speaker", NULL), +}; + +static const struct snd_soc_dapm_route card_base_routes[] = { + /* HP jack connectors - unknown if we have jack detect */ + {"Headphone Jack", NULL, "HPO Pin"}, + {"MIC1", NULL, "Mic Jack"}, + + {"Speaker", NULL, "SPOR"}, + {"Speaker", NULL, "SPOL"}, +}; + +static struct snd_soc_jack_pin card_headset_pins[] = { + { + .pin = "Headphone Jack", + .mask = SND_JACK_HEADPHONE, + }, + { + .pin = "Mic Jack", + .mask = SND_JACK_MICROPHONE, + }, +}; + +static int avs_rt286_codec_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component; + struct snd_soc_jack_pin *pins; + struct snd_soc_jack *jack; + struct snd_soc_card *card = runtime->card; + int num_pins, ret; + + jack = snd_soc_card_get_drvdata(card); + num_pins = ARRAY_SIZE(card_headset_pins); + + pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL); + if (!pins) + return -ENOMEM; + + ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0, jack, + pins, num_pins); + if (ret) + return ret; + + snd_soc_component_set_jack(component, jack, NULL); + + return 0; +} + +static int avs_rt286_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate, *channels; + struct snd_mask *fmt; + + rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); + channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + /* The ADSP will convert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = 2; + + /* set SSP0 to 24 bit */ + snd_mask_none(fmt); + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); + + return 0; +} + +static int +avs_rt286_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *runtime = substream->private_data; + struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0); + int ret; + + ret = snd_soc_dai_set_sysclk(codec_dai, RT286_SCLK_S_PLL, 24000000, SND_SOC_CLOCK_IN); + if (ret < 0) + dev_err(runtime->dev, "Set codec sysclk failed: %d\n", ret); + + return ret; +} + +static const struct snd_soc_ops avs_rt286_ops = { + .hw_params = avs_rt286_hw_params, +}; + +static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, + struct snd_soc_dai_link **dai_link) +{ + struct snd_soc_dai_link_component *platform; + struct snd_soc_dai_link *dl; + + dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); + platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); + if (!dl || !platform) + return -ENOMEM; + + platform->name = platform_name; + + dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); + dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); + dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); + if (!dl->name || !dl->cpus || !dl->codecs) + return -ENOMEM; + + dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); + dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT343A:00"); + dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "rt286-aif1"); + if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) + return -ENOMEM; + + dl->num_cpus = 1; + dl->num_codecs = 1; + dl->platforms = platform; + dl->num_platforms = 1; + dl->id = 0; + dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; + dl->init = avs_rt286_codec_init; + dl->be_hw_params_fixup = avs_rt286_be_fixup; + dl->ops = &avs_rt286_ops; + dl->nonatomic = 1; + dl->no_pcm = 1; + dl->dpcm_capture = 1; + dl->dpcm_playback = 1; + + *dai_link = dl; + + return 0; +} + +static int avs_create_dapm_routes(struct device *dev, int ssp_port, + struct snd_soc_dapm_route **routes, int *num_routes) +{ + struct snd_soc_dapm_route *dr; + const int num_base = ARRAY_SIZE(card_base_routes); + const int num_dr = num_base + 2; + int idx; + + dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + memcpy(dr, card_base_routes, num_base * sizeof(*dr)); + + idx = num_base; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Playback"); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + idx++; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Capture"); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + *routes = dr; + *num_routes = num_dr; + + return 0; +} + +static int avs_card_set_jack(struct snd_soc_card *card, struct snd_soc_jack *jack) +{ + struct snd_soc_component *component; + + for_each_card_components(card, component) + snd_soc_component_set_jack(component, jack, NULL); + return 0; +} + +static int avs_card_remove(struct snd_soc_card *card) +{ + return avs_card_set_jack(card, NULL); +} + +static int avs_card_suspend_pre(struct snd_soc_card *card) +{ + return avs_card_set_jack(card, NULL); +} + +static int avs_card_resume_post(struct snd_soc_card *card) +{ + struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); + + return avs_card_set_jack(card, jack); +} + +static int avs_rt286_probe(struct platform_device *pdev) +{ + struct snd_soc_dapm_route *routes; + struct snd_soc_dai_link *dai_link; + struct snd_soc_acpi_mach *mach; + struct snd_soc_card *card; + struct snd_soc_jack *jack; + struct device *dev = &pdev->dev; + const char *pname; + int num_routes, ssp_port, ret; + + mach = dev_get_platdata(dev); + pname = mach->mach_params.platform; + ssp_port = __ffs(mach->mach_params.i2s_link_mask); + + ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); + if (ret) { + dev_err(dev, "Failed to create dai link: %d", ret); + return ret; + } + + ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes); + if (ret) { + dev_err(dev, "Failed to create dapm routes: %d", ret); + return ret; + } + + jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!jack || !card) + return -ENOMEM; + + card->name = "avs_rt286"; + card->dev = dev; + card->owner = THIS_MODULE; + card->remove = avs_card_remove; + card->suspend_pre = avs_card_suspend_pre; + card->resume_post = avs_card_resume_post; + card->dai_link = dai_link; + card->num_links = 1; + card->controls = card_controls; + card->num_controls = ARRAY_SIZE(card_controls); + card->dapm_widgets = card_widgets; + card->num_dapm_widgets = ARRAY_SIZE(card_widgets); + card->dapm_routes = routes; + card->num_dapm_routes = num_routes; + card->fully_routed = true; + snd_soc_card_set_drvdata(card, jack); + + ret = snd_soc_fixup_dai_links_platform_name(card, pname); + if (ret) + return ret; + + return devm_snd_soc_register_card(dev, card); +} + +static struct platform_driver avs_rt286_driver = { + .probe = avs_rt286_probe, + .driver = { + .name = "avs_rt286", + .pm = &snd_soc_pm_ops, + }, +}; + +module_platform_driver(avs_rt286_driver); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:avs_rt286"); diff --git a/sound/soc/intel/avs/boards/rt298.c b/sound/soc/intel/avs/boards/rt298.c new file mode 100644 index 000000000..58c9d9ede --- /dev/null +++ b/sound/soc/intel/avs/boards/rt298.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include +#include +#include "../../../codecs/rt298.h" + +static const struct dmi_system_id kblr_dmi_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Kabylake R DDR4 RVP"), + }, + }, + {} +}; + +static const struct snd_kcontrol_new card_controls[] = { + SOC_DAPM_PIN_SWITCH("Headphone Jack"), + SOC_DAPM_PIN_SWITCH("Mic Jack"), + SOC_DAPM_PIN_SWITCH("Speaker"), +}; + +static const struct snd_soc_dapm_widget card_widgets[] = { + SND_SOC_DAPM_HP("Headphone Jack", NULL), + SND_SOC_DAPM_MIC("Mic Jack", NULL), + SND_SOC_DAPM_SPK("Speaker", NULL), +}; + +static const struct snd_soc_dapm_route card_base_routes[] = { + /* HP jack connectors - unknown if we have jack detect */ + {"Headphone Jack", NULL, "HPO Pin"}, + {"MIC1", NULL, "Mic Jack"}, + + {"Speaker", NULL, "SPOR"}, + {"Speaker", NULL, "SPOL"}, +}; + +static struct snd_soc_jack_pin card_headset_pins[] = { + { + .pin = "Headphone Jack", + .mask = SND_JACK_HEADPHONE, + }, + { + .pin = "Mic Jack", + .mask = SND_JACK_MICROPHONE, + }, +}; + +static int avs_rt298_codec_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component; + struct snd_soc_jack_pin *pins; + struct snd_soc_jack *jack; + struct snd_soc_card *card = runtime->card; + int num_pins, ret; + + jack = snd_soc_card_get_drvdata(card); + num_pins = ARRAY_SIZE(card_headset_pins); + + pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL); + if (!pins) + return -ENOMEM; + + ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0, jack, + pins, num_pins); + if (ret) + return ret; + + snd_soc_component_set_jack(component, jack, NULL); + + return 0; +} + +static int avs_rt298_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate, *channels; + struct snd_mask *fmt; + + rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); + channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + /* The ADSP will convert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = 2; + + /* set SSP0 to 24 bit */ + snd_mask_none(fmt); + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); + + return 0; +} + +static int +avs_rt298_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0); + unsigned int clk_freq; + int ret; + + if (dmi_first_match(kblr_dmi_table)) + clk_freq = 24000000; + else + clk_freq = 19200000; + + ret = snd_soc_dai_set_sysclk(codec_dai, RT298_SCLK_S_PLL, clk_freq, SND_SOC_CLOCK_IN); + if (ret < 0) + dev_err(rtd->dev, "Set codec sysclk failed: %d\n", ret); + + return ret; +} + +static const struct snd_soc_ops avs_rt298_ops = { + .hw_params = avs_rt298_hw_params, +}; + +static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, + struct snd_soc_dai_link **dai_link) +{ + struct snd_soc_dai_link_component *platform; + struct snd_soc_dai_link *dl; + + dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); + platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); + if (!dl || !platform) + return -ENOMEM; + + platform->name = platform_name; + + dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); + dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); + dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); + if (!dl->name || !dl->cpus || !dl->codecs) + return -ENOMEM; + + dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); + dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT343A:00"); + dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "rt298-aif1"); + if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) + return -ENOMEM; + + dl->num_cpus = 1; + dl->num_codecs = 1; + dl->platforms = platform; + dl->num_platforms = 1; + dl->id = 0; + if (dmi_first_match(kblr_dmi_table)) + dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; + else + dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; + dl->init = avs_rt298_codec_init; + dl->be_hw_params_fixup = avs_rt298_be_fixup; + dl->ops = &avs_rt298_ops; + dl->nonatomic = 1; + dl->no_pcm = 1; + dl->dpcm_capture = 1; + dl->dpcm_playback = 1; + + *dai_link = dl; + + return 0; +} + +static int avs_create_dapm_routes(struct device *dev, int ssp_port, + struct snd_soc_dapm_route **routes, int *num_routes) +{ + struct snd_soc_dapm_route *dr; + const int num_base = ARRAY_SIZE(card_base_routes); + const int num_dr = num_base + 2; + int idx; + + dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + memcpy(dr, card_base_routes, num_base * sizeof(*dr)); + + idx = num_base; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Playback"); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + idx++; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Capture"); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + *routes = dr; + *num_routes = num_dr; + + return 0; +} + +static int avs_card_set_jack(struct snd_soc_card *card, struct snd_soc_jack *jack) +{ + struct snd_soc_component *component; + + for_each_card_components(card, component) + snd_soc_component_set_jack(component, jack, NULL); + return 0; +} + +static int avs_card_remove(struct snd_soc_card *card) +{ + return avs_card_set_jack(card, NULL); +} + +static int avs_card_suspend_pre(struct snd_soc_card *card) +{ + return avs_card_set_jack(card, NULL); +} + +static int avs_card_resume_post(struct snd_soc_card *card) +{ + struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); + + return avs_card_set_jack(card, jack); +} + +static int avs_rt298_probe(struct platform_device *pdev) +{ + struct snd_soc_dapm_route *routes; + struct snd_soc_dai_link *dai_link; + struct snd_soc_acpi_mach *mach; + struct snd_soc_card *card; + struct snd_soc_jack *jack; + struct device *dev = &pdev->dev; + const char *pname; + int num_routes, ssp_port, ret; + + mach = dev_get_platdata(dev); + pname = mach->mach_params.platform; + ssp_port = __ffs(mach->mach_params.i2s_link_mask); + + ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); + if (ret) { + dev_err(dev, "Failed to create dai link: %d", ret); + return ret; + } + + ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes); + if (ret) { + dev_err(dev, "Failed to create dapm routes: %d", ret); + return ret; + } + + jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!jack || !card) + return -ENOMEM; + + card->name = "avs_rt298"; + card->dev = dev; + card->owner = THIS_MODULE; + card->remove = avs_card_remove; + card->suspend_pre = avs_card_suspend_pre; + card->resume_post = avs_card_resume_post; + card->dai_link = dai_link; + card->num_links = 1; + card->controls = card_controls; + card->num_controls = ARRAY_SIZE(card_controls); + card->dapm_widgets = card_widgets; + card->num_dapm_widgets = ARRAY_SIZE(card_widgets); + card->dapm_routes = routes; + card->num_dapm_routes = num_routes; + card->fully_routed = true; + snd_soc_card_set_drvdata(card, jack); + + ret = snd_soc_fixup_dai_links_platform_name(card, pname); + if (ret) + return ret; + + return devm_snd_soc_register_card(dev, card); +} + +static struct platform_driver avs_rt298_driver = { + .probe = avs_rt298_probe, + .driver = { + .name = "avs_rt298", + .pm = &snd_soc_pm_ops, + }, +}; + +module_platform_driver(avs_rt298_driver); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:avs_rt298"); diff --git a/sound/soc/intel/avs/boards/rt5682.c b/sound/soc/intel/avs/boards/rt5682.c new file mode 100644 index 000000000..01f9b9f0c --- /dev/null +++ b/sound/soc/intel/avs/boards/rt5682.c @@ -0,0 +1,340 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../common/soc-intel-quirks.h" +#include "../../../codecs/rt5682.h" + +#define AVS_RT5682_SSP_CODEC(quirk) ((quirk) & GENMASK(2, 0)) +#define AVS_RT5682_SSP_CODEC_MASK (GENMASK(2, 0)) +#define AVS_RT5682_MCLK_EN BIT(3) +#define AVS_RT5682_MCLK_24MHZ BIT(4) + +/* Default: MCLK on, MCLK 19.2M, SSP0 */ +static unsigned long avs_rt5682_quirk = AVS_RT5682_MCLK_EN | AVS_RT5682_SSP_CODEC(0); + +static int avs_rt5682_quirk_cb(const struct dmi_system_id *id) +{ + avs_rt5682_quirk = (unsigned long)id->driver_data; + return 1; +} + +static const struct dmi_system_id avs_rt5682_quirk_table[] = { + { + .callback = avs_rt5682_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "WhiskeyLake Client"), + }, + .driver_data = (void *)(AVS_RT5682_MCLK_EN | + AVS_RT5682_MCLK_24MHZ | + AVS_RT5682_SSP_CODEC(1)), + }, + { + .callback = avs_rt5682_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Ice Lake Client"), + }, + .driver_data = (void *)(AVS_RT5682_MCLK_EN | + AVS_RT5682_SSP_CODEC(0)), + }, + {} +}; + +static const struct snd_kcontrol_new card_controls[] = { + SOC_DAPM_PIN_SWITCH("Headphone Jack"), + SOC_DAPM_PIN_SWITCH("Headset Mic"), +}; + +static const struct snd_soc_dapm_widget card_widgets[] = { + SND_SOC_DAPM_HP("Headphone Jack", NULL), + SND_SOC_DAPM_MIC("Headset Mic", NULL), +}; + +static const struct snd_soc_dapm_route card_base_routes[] = { + /* HP jack connectors - unknown if we have jack detect */ + { "Headphone Jack", NULL, "HPOL" }, + { "Headphone Jack", NULL, "HPOR" }, + + /* other jacks */ + { "IN1P", NULL, "Headset Mic" }, +}; + +static int avs_rt5682_codec_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component; + struct snd_soc_jack *jack; + struct snd_soc_card *card = runtime->card; + int ret; + + jack = snd_soc_card_get_drvdata(card); + + /* Need to enable ASRC function for 24MHz mclk rate */ + if ((avs_rt5682_quirk & AVS_RT5682_MCLK_EN) && + (avs_rt5682_quirk & AVS_RT5682_MCLK_24MHZ)) { + rt5682_sel_asrc_clk_src(component, RT5682_DA_STEREO1_FILTER | + RT5682_AD_STEREO1_FILTER, RT5682_CLK_SEL_I2S1_ASRC); + } + + /* + * Headset buttons map to the google Reference headset. + * These can be configured by userspace. + */ + ret = snd_soc_card_jack_new(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0 | + SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3, jack); + if (ret) { + dev_err(card->dev, "Headset Jack creation failed: %d\n", ret); + return ret; + } + + snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); + snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND); + snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP); + snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN); + + ret = snd_soc_component_set_jack(component, jack, NULL); + if (ret) { + dev_err(card->dev, "Headset Jack call-back failed: %d\n", ret); + return ret; + } + + return 0; +}; + +static int +avs_rt5682_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *runtime = asoc_substream_to_rtd(substream); + struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0); + int clk_id, clk_freq; + int pll_out, ret; + + if (avs_rt5682_quirk & AVS_RT5682_MCLK_EN) { + clk_id = RT5682_PLL1_S_MCLK; + if (avs_rt5682_quirk & AVS_RT5682_MCLK_24MHZ) + clk_freq = 24000000; + else + clk_freq = 19200000; + } else { + clk_id = RT5682_PLL1_S_BCLK1; + clk_freq = params_rate(params) * 50; + } + + pll_out = params_rate(params) * 512; + + ret = snd_soc_dai_set_pll(codec_dai, 0, clk_id, clk_freq, pll_out); + if (ret < 0) + dev_err(runtime->dev, "snd_soc_dai_set_pll err = %d\n", ret); + + /* Configure sysclk for codec */ + ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1, pll_out, SND_SOC_CLOCK_IN); + if (ret < 0) + dev_err(runtime->dev, "snd_soc_dai_set_sysclk err = %d\n", ret); + + /* slot_width should equal or large than data length, set them be the same */ + ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x0, 0x0, 2, params_width(params)); + if (ret < 0) { + dev_err(runtime->dev, "set TDM slot err:%d\n", ret); + return ret; + } + + return 0; +} + +static const struct snd_soc_ops avs_rt5682_ops = { + .hw_params = avs_rt5682_hw_params, +}; + +static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, + struct snd_soc_dai_link **dai_link) +{ + struct snd_soc_dai_link_component *platform; + struct snd_soc_dai_link *dl; + + dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); + platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); + if (!dl || !platform) + return -ENOMEM; + + platform->name = platform_name; + + dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); + dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); + dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); + if (!dl->name || !dl->cpus || !dl->codecs) + return -ENOMEM; + + dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); + dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-10EC5682:00"); + dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "rt5682-aif1"); + if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) + return -ENOMEM; + + dl->num_cpus = 1; + dl->num_codecs = 1; + dl->platforms = platform; + dl->num_platforms = 1; + dl->id = 0; + dl->init = avs_rt5682_codec_init; + dl->ops = &avs_rt5682_ops; + dl->nonatomic = 1; + dl->no_pcm = 1; + dl->dpcm_capture = 1; + dl->dpcm_playback = 1; + + *dai_link = dl; + + return 0; +} + +static int avs_create_dapm_routes(struct device *dev, int ssp_port, + struct snd_soc_dapm_route **routes, int *num_routes) +{ + struct snd_soc_dapm_route *dr; + const int num_base = ARRAY_SIZE(card_base_routes); + const int num_dr = num_base + 2; + int idx; + + dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + memcpy(dr, card_base_routes, num_base * sizeof(*dr)); + + idx = num_base; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Playback"); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + idx++; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Capture"); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + *routes = dr; + *num_routes = num_dr; + + return 0; +} + +static int avs_card_set_jack(struct snd_soc_card *card, struct snd_soc_jack *jack) +{ + struct snd_soc_component *component; + + for_each_card_components(card, component) + snd_soc_component_set_jack(component, jack, NULL); + return 0; +} + +static int avs_card_remove(struct snd_soc_card *card) +{ + return avs_card_set_jack(card, NULL); +} + +static int avs_card_suspend_pre(struct snd_soc_card *card) +{ + return avs_card_set_jack(card, NULL); +} + +static int avs_card_resume_post(struct snd_soc_card *card) +{ + struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); + + return avs_card_set_jack(card, jack); +} + +static int avs_rt5682_probe(struct platform_device *pdev) +{ + struct snd_soc_dapm_route *routes; + struct snd_soc_dai_link *dai_link; + struct snd_soc_acpi_mach *mach; + struct snd_soc_card *card; + struct snd_soc_jack *jack; + struct device *dev = &pdev->dev; + const char *pname; + int num_routes, ssp_port, ret; + + if (pdev->id_entry && pdev->id_entry->driver_data) + avs_rt5682_quirk = (unsigned long)pdev->id_entry->driver_data; + + dmi_check_system(avs_rt5682_quirk_table); + dev_dbg(dev, "avs_rt5682_quirk = %lx\n", avs_rt5682_quirk); + + mach = dev_get_platdata(dev); + pname = mach->mach_params.platform; + ssp_port = __ffs(mach->mach_params.i2s_link_mask); + + ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); + if (ret) { + dev_err(dev, "Failed to create dai link: %d", ret); + return ret; + } + + ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes); + if (ret) { + dev_err(dev, "Failed to create dapm routes: %d", ret); + return ret; + } + + jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!jack || !card) + return -ENOMEM; + + card->name = "avs_rt5682"; + card->dev = dev; + card->owner = THIS_MODULE; + card->remove = avs_card_remove; + card->suspend_pre = avs_card_suspend_pre; + card->resume_post = avs_card_resume_post; + card->dai_link = dai_link; + card->num_links = 1; + card->controls = card_controls; + card->num_controls = ARRAY_SIZE(card_controls); + card->dapm_widgets = card_widgets; + card->num_dapm_widgets = ARRAY_SIZE(card_widgets); + card->dapm_routes = routes; + card->num_dapm_routes = num_routes; + card->fully_routed = true; + snd_soc_card_set_drvdata(card, jack); + + ret = snd_soc_fixup_dai_links_platform_name(card, pname); + if (ret) + return ret; + + return devm_snd_soc_register_card(dev, card); +} + +static struct platform_driver avs_rt5682_driver = { + .probe = avs_rt5682_probe, + .driver = { + .name = "avs_rt5682", + .pm = &snd_soc_pm_ops, + }, +}; + +module_platform_driver(avs_rt5682_driver) + +MODULE_AUTHOR("Cezary Rojewski "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:avs_rt5682"); diff --git a/sound/soc/intel/avs/boards/ssm4567.c b/sound/soc/intel/avs/boards/ssm4567.c new file mode 100644 index 000000000..51a886732 --- /dev/null +++ b/sound/soc/intel/avs/boards/ssm4567.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include +#include +#include "../../../codecs/nau8825.h" + +#define SKL_SSM_CODEC_DAI "ssm4567-hifi" + +static struct snd_soc_codec_conf card_codec_conf[] = { + { + .dlc = COMP_CODEC_CONF("i2c-INT343B:00"), + .name_prefix = "Left", + }, + { + .dlc = COMP_CODEC_CONF("i2c-INT343B:01"), + .name_prefix = "Right", + }, +}; + +static const struct snd_kcontrol_new card_controls[] = { + SOC_DAPM_PIN_SWITCH("Left Speaker"), + SOC_DAPM_PIN_SWITCH("Right Speaker"), +}; + +static const struct snd_soc_dapm_widget card_widgets[] = { + SND_SOC_DAPM_SPK("Left Speaker", NULL), + SND_SOC_DAPM_SPK("Right Speaker", NULL), + SND_SOC_DAPM_SPK("DP1", NULL), + SND_SOC_DAPM_SPK("DP2", NULL), +}; + +static const struct snd_soc_dapm_route card_base_routes[] = { + {"Left Speaker", NULL, "Left OUT"}, + {"Right Speaker", NULL, "Right OUT"}, +}; + +static int avs_ssm4567_codec_init(struct snd_soc_pcm_runtime *runtime) +{ + int ret; + + /* Slot 1 for left */ + ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(runtime, 0), 0x01, 0x01, 2, 48); + if (ret < 0) + return ret; + + /* Slot 2 for right */ + ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(runtime, 1), 0x02, 0x02, 2, 48); + if (ret < 0) + return ret; + + return 0; +} + +static int +avs_ssm4567_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate, *channels; + struct snd_mask *fmt; + + rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); + channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + /* The ADSP will covert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = 2; + + /* set SSP0 to 24 bit */ + snd_mask_none(fmt); + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); + return 0; +} + +static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, + struct snd_soc_dai_link **dai_link) +{ + struct snd_soc_dai_link_component *platform; + struct snd_soc_dai_link *dl; + + dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); + platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); + if (!dl || !platform) + return -ENOMEM; + + platform->name = platform_name; + + dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); + dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); + dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs) * 2, GFP_KERNEL); + if (!dl->name || !dl->cpus || !dl->codecs) + return -ENOMEM; + + dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); + dl->codecs[0].name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT343B:00"); + dl->codecs[0].dai_name = devm_kasprintf(dev, GFP_KERNEL, "ssm4567-hifi"); + dl->codecs[1].name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT343B:01"); + dl->codecs[1].dai_name = devm_kasprintf(dev, GFP_KERNEL, "ssm4567-hifi"); + if (!dl->cpus->dai_name || !dl->codecs[0].name || !dl->codecs[0].dai_name || + !dl->codecs[1].name || !dl->codecs[1].dai_name) + return -ENOMEM; + + dl->num_cpus = 1; + dl->num_codecs = 2; + dl->platforms = platform; + dl->num_platforms = 1; + dl->id = 0; + dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF | SND_SOC_DAIFMT_CBS_CFS; + dl->init = avs_ssm4567_codec_init; + dl->be_hw_params_fixup = avs_ssm4567_be_fixup; + dl->nonatomic = 1; + dl->no_pcm = 1; + dl->dpcm_capture = 1; + dl->dpcm_playback = 1; + dl->ignore_pmdown_time = 1; + + *dai_link = dl; + + return 0; +} + +static int avs_create_dapm_routes(struct device *dev, int ssp_port, + struct snd_soc_dapm_route **routes, int *num_routes) +{ + struct snd_soc_dapm_route *dr; + const int num_base = ARRAY_SIZE(card_base_routes); + const int num_dr = num_base + 4; + int idx; + + dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + memcpy(dr, card_base_routes, num_base * sizeof(*dr)); + + idx = num_base; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "Left Playback"); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + idx++; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "Right Playback"); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + idx++; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "Left Capture Sense"); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + idx++; + dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port); + dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "Right Capture Sense"); + if (!dr[idx].sink || !dr[idx].source) + return -ENOMEM; + + *routes = dr; + *num_routes = num_dr; + + return 0; +} + +static int avs_ssm4567_probe(struct platform_device *pdev) +{ + struct snd_soc_dapm_route *routes; + struct snd_soc_dai_link *dai_link; + struct snd_soc_acpi_mach *mach; + struct snd_soc_card *card; + struct device *dev = &pdev->dev; + const char *pname; + int num_routes, ssp_port, ret; + + mach = dev_get_platdata(dev); + pname = mach->mach_params.platform; + ssp_port = __ffs(mach->mach_params.i2s_link_mask); + + ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); + if (ret) { + dev_err(dev, "Failed to create dai link: %d", ret); + return ret; + } + + ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes); + if (ret) { + dev_err(dev, "Failed to create dapm routes: %d", ret); + return ret; + } + + card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; + + card->name = "avs_ssm4567-adi"; + card->dev = dev; + card->owner = THIS_MODULE; + card->dai_link = dai_link; + card->num_links = 1; + card->codec_conf = card_codec_conf; + card->num_configs = ARRAY_SIZE(card_codec_conf); + card->controls = card_controls; + card->num_controls = ARRAY_SIZE(card_controls); + card->dapm_widgets = card_widgets; + card->num_dapm_widgets = ARRAY_SIZE(card_widgets); + card->dapm_routes = routes; + card->num_dapm_routes = num_routes; + card->fully_routed = true; + card->disable_route_checks = true; + + ret = snd_soc_fixup_dai_links_platform_name(card, pname); + if (ret) + return ret; + + return devm_snd_soc_register_card(dev, card); +} + +static struct platform_driver avs_ssm4567_driver = { + .probe = avs_ssm4567_probe, + .driver = { + .name = "avs_ssm4567", + .pm = &snd_soc_pm_ops, + }, +}; + +module_platform_driver(avs_ssm4567_driver) + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:avs_ssm4567"); diff --git a/sound/soc/intel/avs/cldma.c b/sound/soc/intel/avs/cldma.c new file mode 100644 index 000000000..d7a9390b5 --- /dev/null +++ b/sound/soc/intel/avs/cldma.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Author: Cezary Rojewski +// + +#include +#include +#include +#include "cldma.h" +#include "registers.h" + +/* Stream Registers */ +#define AZX_CL_SD_BASE 0x80 +#define AZX_SD_CTL_STRM_MASK GENMASK(23, 20) +#define AZX_SD_CTL_STRM(s) (((s)->stream_tag << 20) & AZX_SD_CTL_STRM_MASK) +#define AZX_SD_BDLPL_BDLPLBA_MASK GENMASK(31, 7) +#define AZX_SD_BDLPL_BDLPLBA(lb) ((lb) & AZX_SD_BDLPL_BDLPLBA_MASK) + +/* Software Position Based FIFO Capability Registers */ +#define AZX_CL_SPBFCS 0x20 +#define AZX_REG_CL_SPBFCTL (AZX_CL_SPBFCS + 0x4) +#define AZX_REG_CL_SD_SPIB (AZX_CL_SPBFCS + 0x8) + +#define AVS_CL_OP_INTERVAL_US 3 +#define AVS_CL_OP_TIMEOUT_US 300 +#define AVS_CL_IOC_TIMEOUT_MS 300 +#define AVS_CL_STREAM_INDEX 0 + +struct hda_cldma { + struct device *dev; + struct hdac_bus *bus; + void __iomem *dsp_ba; + + unsigned int buffer_size; + unsigned int num_periods; + unsigned int stream_tag; + void __iomem *sd_addr; + + struct snd_dma_buffer dmab_data; + struct snd_dma_buffer dmab_bdl; + struct delayed_work memcpy_work; + struct completion completion; + + /* runtime */ + void *position; + unsigned int remaining; + unsigned int sd_status; +}; + +static void cldma_memcpy_work(struct work_struct *work); + +struct hda_cldma code_loader = { + .stream_tag = AVS_CL_STREAM_INDEX + 1, + .memcpy_work = __DELAYED_WORK_INITIALIZER(code_loader.memcpy_work, cldma_memcpy_work, 0), + .completion = COMPLETION_INITIALIZER(code_loader.completion), +}; + +void hda_cldma_fill(struct hda_cldma *cl) +{ + unsigned int size, offset; + + if (cl->remaining > cl->buffer_size) + size = cl->buffer_size; + else + size = cl->remaining; + + offset = snd_hdac_stream_readl(cl, CL_SD_SPIB); + if (offset + size > cl->buffer_size) { + unsigned int ss; + + ss = cl->buffer_size - offset; + memcpy(cl->dmab_data.area + offset, cl->position, ss); + offset = 0; + size -= ss; + cl->position += ss; + cl->remaining -= ss; + } + + memcpy(cl->dmab_data.area + offset, cl->position, size); + cl->position += size; + cl->remaining -= size; + + snd_hdac_stream_writel(cl, CL_SD_SPIB, offset + size); +} + +static void cldma_memcpy_work(struct work_struct *work) +{ + struct hda_cldma *cl = container_of(work, struct hda_cldma, memcpy_work.work); + int ret; + + ret = hda_cldma_start(cl); + if (ret < 0) { + dev_err(cl->dev, "cldma set RUN failed: %d\n", ret); + return; + } + + while (true) { + ret = wait_for_completion_timeout(&cl->completion, + msecs_to_jiffies(AVS_CL_IOC_TIMEOUT_MS)); + if (!ret) { + dev_err(cl->dev, "cldma IOC timeout\n"); + break; + } + + if (!(cl->sd_status & SD_INT_COMPLETE)) { + dev_err(cl->dev, "cldma transfer error, SD status: 0x%08x\n", + cl->sd_status); + break; + } + + if (!cl->remaining) + break; + + reinit_completion(&cl->completion); + hda_cldma_fill(cl); + /* enable CLDMA interrupt */ + snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, + AVS_ADSP_ADSPIC_CLDMA); + } +} + +void hda_cldma_transfer(struct hda_cldma *cl, unsigned long start_delay) +{ + if (!cl->remaining) + return; + + reinit_completion(&cl->completion); + /* fill buffer with the first chunk before scheduling run */ + hda_cldma_fill(cl); + + schedule_delayed_work(&cl->memcpy_work, start_delay); +} + +int hda_cldma_start(struct hda_cldma *cl) +{ + unsigned int reg; + + /* enable interrupts */ + snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, + AVS_ADSP_ADSPIC_CLDMA); + snd_hdac_stream_updateb(cl, SD_CTL, SD_INT_MASK | SD_CTL_DMA_START, + SD_INT_MASK | SD_CTL_DMA_START); + + /* await DMA engine start */ + return snd_hdac_stream_readb_poll(cl, SD_CTL, reg, reg & SD_CTL_DMA_START, + AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US); +} + +int hda_cldma_stop(struct hda_cldma *cl) +{ + unsigned int reg; + int ret; + + /* disable interrupts */ + snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, 0); + snd_hdac_stream_updateb(cl, SD_CTL, SD_INT_MASK | SD_CTL_DMA_START, 0); + + /* await DMA engine stop */ + ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, !(reg & SD_CTL_DMA_START), + AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US); + cancel_delayed_work_sync(&cl->memcpy_work); + + return ret; +} + +int hda_cldma_reset(struct hda_cldma *cl) +{ + unsigned int reg; + int ret; + + ret = hda_cldma_stop(cl); + if (ret < 0) { + dev_err(cl->dev, "cldma stop failed: %d\n", ret); + return ret; + } + + snd_hdac_stream_updateb(cl, SD_CTL, SD_CTL_STREAM_RESET, SD_CTL_STREAM_RESET); + ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, (reg & SD_CTL_STREAM_RESET), + AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US); + if (ret < 0) { + dev_err(cl->dev, "cldma set SRST failed: %d\n", ret); + return ret; + } + + snd_hdac_stream_updateb(cl, SD_CTL, SD_CTL_STREAM_RESET, 0); + ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, !(reg & SD_CTL_STREAM_RESET), + AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US); + if (ret < 0) { + dev_err(cl->dev, "cldma unset SRST failed: %d\n", ret); + return ret; + } + + return 0; +} + +void hda_cldma_set_data(struct hda_cldma *cl, void *data, unsigned int size) +{ + /* setup runtime */ + cl->position = data; + cl->remaining = size; +} + +static void cldma_setup_bdle(struct hda_cldma *cl, u32 bdle_size) +{ + struct snd_dma_buffer *dmab = &cl->dmab_data; + __le32 *bdl = (__le32 *)cl->dmab_bdl.area; + int remaining = cl->buffer_size; + int offset = 0; + + cl->num_periods = 0; + + while (remaining > 0) { + phys_addr_t addr; + int chunk; + + addr = snd_sgbuf_get_addr(dmab, offset); + bdl[0] = cpu_to_le32(lower_32_bits(addr)); + bdl[1] = cpu_to_le32(upper_32_bits(addr)); + chunk = snd_sgbuf_get_chunk_size(dmab, offset, bdle_size); + bdl[2] = cpu_to_le32(chunk); + + remaining -= chunk; + /* set IOC only for the last entry */ + bdl[3] = (remaining > 0) ? 0 : cpu_to_le32(0x01); + + bdl += 4; + offset += chunk; + cl->num_periods++; + } +} + +void hda_cldma_setup(struct hda_cldma *cl) +{ + dma_addr_t bdl_addr = cl->dmab_bdl.addr; + + cldma_setup_bdle(cl, cl->buffer_size / 2); + + snd_hdac_stream_writel(cl, SD_BDLPL, AZX_SD_BDLPL_BDLPLBA(lower_32_bits(bdl_addr))); + snd_hdac_stream_writel(cl, SD_BDLPU, upper_32_bits(bdl_addr)); + + snd_hdac_stream_writel(cl, SD_CBL, cl->buffer_size); + snd_hdac_stream_writeb(cl, SD_LVI, cl->num_periods - 1); + + snd_hdac_stream_updatel(cl, SD_CTL, AZX_SD_CTL_STRM_MASK, AZX_SD_CTL_STRM(cl)); + /* enable spib */ + snd_hdac_stream_writel(cl, CL_SPBFCTL, 1); +} + +static irqreturn_t cldma_irq_handler(int irq, void *dev_id) +{ + struct hda_cldma *cl = dev_id; + u32 adspis; + + adspis = snd_hdac_adsp_readl(cl, AVS_ADSP_REG_ADSPIS); + if (adspis == UINT_MAX) + return IRQ_NONE; + if (!(adspis & AVS_ADSP_ADSPIS_CLDMA)) + return IRQ_NONE; + + cl->sd_status = snd_hdac_stream_readb(cl, SD_STS); + dev_warn(cl->dev, "%s sd_status: 0x%08x\n", __func__, cl->sd_status); + + /* disable CLDMA interrupt */ + snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, 0); + + complete(&cl->completion); + + return IRQ_HANDLED; +} + +int hda_cldma_init(struct hda_cldma *cl, struct hdac_bus *bus, void __iomem *dsp_ba, + unsigned int buffer_size) +{ + struct pci_dev *pci = to_pci_dev(bus->dev); + int ret; + + ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, bus->dev, buffer_size, &cl->dmab_data); + if (ret < 0) + return ret; + + ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, bus->dev, BDL_SIZE, &cl->dmab_bdl); + if (ret < 0) + goto alloc_err; + + cl->dev = bus->dev; + cl->bus = bus; + cl->dsp_ba = dsp_ba; + cl->buffer_size = buffer_size; + cl->sd_addr = dsp_ba + AZX_CL_SD_BASE; + + ret = pci_request_irq(pci, 0, cldma_irq_handler, NULL, cl, "CLDMA"); + if (ret < 0) { + dev_err(cl->dev, "Failed to request CLDMA IRQ handler: %d\n", ret); + goto req_err; + } + + return 0; + +req_err: + snd_dma_free_pages(&cl->dmab_bdl); +alloc_err: + snd_dma_free_pages(&cl->dmab_data); + + return ret; +} + +void hda_cldma_free(struct hda_cldma *cl) +{ + struct pci_dev *pci = to_pci_dev(cl->dev); + + pci_free_irq(pci, 0, cl); + snd_dma_free_pages(&cl->dmab_data); + snd_dma_free_pages(&cl->dmab_bdl); +} diff --git a/sound/soc/intel/avs/cldma.h b/sound/soc/intel/avs/cldma.h new file mode 100644 index 000000000..754fcf9ee --- /dev/null +++ b/sound/soc/intel/avs/cldma.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2021-2022 Intel Corporation. All rights reserved. + * + * Author: Cezary Rojewski + */ + +#ifndef __SOUND_SOC_INTEL_AVS_CLDMA_H +#define __SOUND_SOC_INTEL_AVS_CLDMA_H + +#define AVS_CL_DEFAULT_BUFFER_SIZE (32 * PAGE_SIZE) + +struct hda_cldma; +extern struct hda_cldma code_loader; + +void hda_cldma_fill(struct hda_cldma *cl); +void hda_cldma_transfer(struct hda_cldma *cl, unsigned long start_delay); + +int hda_cldma_start(struct hda_cldma *cl); +int hda_cldma_stop(struct hda_cldma *cl); +int hda_cldma_reset(struct hda_cldma *cl); + +void hda_cldma_set_data(struct hda_cldma *cl, void *data, unsigned int size); +void hda_cldma_setup(struct hda_cldma *cl); +int hda_cldma_init(struct hda_cldma *cl, struct hdac_bus *bus, void __iomem *dsp_ba, + unsigned int buffer_size); +void hda_cldma_free(struct hda_cldma *cl); + +#endif diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c new file mode 100644 index 000000000..5bb3eee2f --- /dev/null +++ b/sound/soc/intel/avs/core.c @@ -0,0 +1,715 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// +// Special thanks to: +// Krzysztof Hejmowski +// Michal Sienkiewicz +// Filip Proborszcz +// +// for sharing Intel AudioDSP expertise and helping shape the very +// foundation of this driver +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../codecs/hda.h" +#include "avs.h" +#include "cldma.h" + +static void +avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value) +{ + struct pci_dev *pci = to_pci_dev(bus->dev); + u32 data; + + pci_read_config_dword(pci, reg, &data); + data &= ~mask; + data |= (value & mask); + pci_write_config_dword(pci, reg, data); +} + +void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable) +{ + u32 value; + + value = enable ? 0 : AZX_PGCTL_LSRMD_MASK; + avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, + AZX_PGCTL_LSRMD_MASK, value); +} + +static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable) +{ + u32 value; + + value = enable ? AZX_CGCTL_MISCBDCGE_MASK : 0; + avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, AZX_CGCTL_MISCBDCGE_MASK, value); +} + +void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable) +{ + avs_hdac_clock_gating_enable(&adev->base.core, enable); +} + +void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable) +{ + u32 value; + + value = enable ? AZX_VS_EM2_L1SEN : 0; + snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, value); +} + +static int avs_hdac_bus_init_streams(struct hdac_bus *bus) +{ + unsigned int cp_streams, pb_streams; + unsigned int gcap; + + gcap = snd_hdac_chip_readw(bus, GCAP); + cp_streams = (gcap >> 8) & 0x0F; + pb_streams = (gcap >> 12) & 0x0F; + bus->num_streams = cp_streams + pb_streams; + + snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE); + snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK); + + return snd_hdac_bus_alloc_stream_pages(bus); +} + +static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset) +{ + struct hdac_ext_link *hlink; + bool ret; + + avs_hdac_clock_gating_enable(bus, false); + ret = snd_hdac_bus_init_chip(bus, full_reset); + + /* Reset stream-to-link mapping */ + list_for_each_entry(hlink, &bus->hlink_list, list) + writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV); + + avs_hdac_clock_gating_enable(bus, true); + + /* Set DUM bit to address incorrect position reporting for capture + * streams. In order to do so, CTRL needs to be out of reset state + */ + snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM); + + return ret; +} + +static int probe_codec(struct hdac_bus *bus, int addr) +{ + struct hda_codec *codec; + unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) | + (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; + unsigned int res = -1; + int ret; + + mutex_lock(&bus->cmd_mutex); + snd_hdac_bus_send_cmd(bus, cmd); + snd_hdac_bus_get_response(bus, addr, &res); + mutex_unlock(&bus->cmd_mutex); + if (res == -1) + return -EIO; + + dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res); + + codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr); + if (IS_ERR(codec)) { + dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec)); + return PTR_ERR(codec); + } + /* + * Allow avs_core suspend by forcing suspended state on all + * of its codec child devices. Component interested in + * dealing with hda codecs directly takes pm responsibilities + */ + pm_runtime_set_suspended(hda_codec_dev(codec)); + + /* configure effectively creates new ASoC component */ + ret = snd_hda_codec_configure(codec); + if (ret < 0) { + dev_err(bus->dev, "failed to config codec %d\n", ret); + return ret; + } + + return 0; +} + +static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus) +{ + int c; + + /* First try to probe all given codec slots */ + for (c = 0; c < HDA_MAX_CODECS; c++) { + if (!(bus->codec_mask & BIT(c))) + continue; + + if (!probe_codec(bus, c)) + /* success, continue probing */ + continue; + + /* + * Some BIOSen give you wrong codec addresses + * that don't exist + */ + dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c); + bus->codec_mask &= ~BIT(c); + /* + * More badly, accessing to a non-existing + * codec often screws up the controller bus, + * and disturbs the further communications. + * Thus if an error occurs during probing, + * better to reset the controller bus to get + * back to the sanity state. + */ + snd_hdac_bus_stop_chip(bus); + avs_hdac_bus_init_chip(bus, true); + } +} + +static void avs_hda_probe_work(struct work_struct *work) +{ + struct avs_dev *adev = container_of(work, struct avs_dev, probe_work); + struct hdac_bus *bus = &adev->base.core; + struct hdac_ext_link *hlink; + int ret; + + pm_runtime_set_active(bus->dev); /* clear runtime_error flag */ + + ret = snd_hdac_i915_init(bus); + if (ret < 0) + dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret); + + snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true); + avs_hdac_bus_init_chip(bus, true); + avs_hdac_bus_probe_codecs(bus); + snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); + + /* with all codecs probed, links can be powered down */ + list_for_each_entry(hlink, &bus->hlink_list, list) + snd_hdac_ext_bus_link_put(bus, hlink); + + snd_hdac_ext_bus_ppcap_enable(bus, true); + snd_hdac_ext_bus_ppcap_int_enable(bus, true); + + ret = avs_dsp_first_boot_firmware(adev); + if (ret < 0) + return; + + adev->nhlt = intel_nhlt_init(adev->dev); + if (!adev->nhlt) + dev_info(bus->dev, "platform has no NHLT\n"); + + avs_register_all_boards(adev); + + /* configure PM */ + pm_runtime_set_autosuspend_delay(bus->dev, 2000); + pm_runtime_use_autosuspend(bus->dev); + pm_runtime_mark_last_busy(bus->dev); + pm_runtime_put_autosuspend(bus->dev); + pm_runtime_allow(bus->dev); +} + +static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size) +{ + u64 prev_pos, pos, num_bytes; + + div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos); + pos = snd_hdac_stream_get_pos_posbuf(stream); + + if (pos < prev_pos) + num_bytes = (buffer_size - prev_pos) + pos; + else + num_bytes = pos - prev_pos; + + stream->curr_pos += num_bytes; +} + +/* called from IRQ */ +static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream) +{ + if (stream->substream) { + snd_pcm_period_elapsed(stream->substream); + } else if (stream->cstream) { + u64 buffer_size = stream->cstream->runtime->buffer_size; + + hdac_stream_update_pos(stream, buffer_size); + snd_compr_fragment_elapsed(stream->cstream); + } +} + +static irqreturn_t hdac_bus_irq_handler(int irq, void *context) +{ + struct hdac_bus *bus = context; + u32 mask, int_enable; + u32 status; + int ret = IRQ_NONE; + + if (!pm_runtime_active(bus->dev)) + return ret; + + spin_lock(&bus->reg_lock); + + status = snd_hdac_chip_readl(bus, INTSTS); + if (status == 0 || status == UINT_MAX) { + spin_unlock(&bus->reg_lock); + return ret; + } + + /* clear rirb int */ + status = snd_hdac_chip_readb(bus, RIRBSTS); + if (status & RIRB_INT_MASK) { + if (status & RIRB_INT_RESPONSE) + snd_hdac_bus_update_rirb(bus); + snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK); + } + + mask = (0x1 << bus->num_streams) - 1; + + status = snd_hdac_chip_readl(bus, INTSTS); + status &= mask; + if (status) { + /* Disable stream interrupts; Re-enable in bottom half */ + int_enable = snd_hdac_chip_readl(bus, INTCTL); + snd_hdac_chip_writel(bus, INTCTL, (int_enable & (~mask))); + ret = IRQ_WAKE_THREAD; + } else { + ret = IRQ_HANDLED; + } + + spin_unlock(&bus->reg_lock); + return ret; +} + +static irqreturn_t hdac_bus_irq_thread(int irq, void *context) +{ + struct hdac_bus *bus = context; + u32 status; + u32 int_enable; + u32 mask; + unsigned long flags; + + status = snd_hdac_chip_readl(bus, INTSTS); + + snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream); + + /* Re-enable stream interrupts */ + mask = (0x1 << bus->num_streams) - 1; + spin_lock_irqsave(&bus->reg_lock, flags); + int_enable = snd_hdac_chip_readl(bus, INTCTL); + snd_hdac_chip_writel(bus, INTCTL, (int_enable | mask)); + spin_unlock_irqrestore(&bus->reg_lock, flags); + + return IRQ_HANDLED; +} + +static int avs_hdac_acquire_irq(struct avs_dev *adev) +{ + struct hdac_bus *bus = &adev->base.core; + struct pci_dev *pci = to_pci_dev(bus->dev); + int ret; + + /* request one and check that we only got one interrupt */ + ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY); + if (ret != 1) { + dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret); + return ret; + } + + ret = pci_request_irq(pci, 0, hdac_bus_irq_handler, hdac_bus_irq_thread, bus, + KBUILD_MODNAME); + if (ret < 0) { + dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret); + goto free_vector; + } + + ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev, + KBUILD_MODNAME); + if (ret < 0) { + dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret); + goto free_stream_irq; + } + + return 0; + +free_stream_irq: + pci_free_irq(pci, 0, bus); +free_vector: + pci_free_irq_vectors(pci); + return ret; +} + +static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id) +{ + struct hda_bus *bus = &adev->base; + struct avs_ipc *ipc; + struct device *dev = &pci->dev; + int ret; + + ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops); + if (ret < 0) + return ret; + + bus->core.use_posbuf = 1; + bus->core.bdl_pos_adj = 0; + bus->core.sync_write = 1; + bus->pci = pci; + bus->mixer_assigned = -1; + mutex_init(&bus->prepare_mutex); + + ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL); + if (!ipc) + return -ENOMEM; + ret = avs_ipc_init(ipc, dev); + if (ret < 0) + return ret; + + adev->dev = dev; + adev->spec = (const struct avs_spec *)id->driver_data; + adev->ipc = ipc; + adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK); + INIT_WORK(&adev->probe_work, avs_hda_probe_work); + INIT_LIST_HEAD(&adev->comp_list); + INIT_LIST_HEAD(&adev->path_list); + INIT_LIST_HEAD(&adev->fw_list); + init_completion(&adev->fw_ready); + spin_lock_init(&adev->path_list_lock); + mutex_init(&adev->modres_mutex); + mutex_init(&adev->comp_list_mutex); + mutex_init(&adev->path_mutex); + + return 0; +} + +static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) +{ + struct hdac_bus *bus; + struct avs_dev *adev; + struct device *dev = &pci->dev; + int ret; + + ret = snd_intel_dsp_driver_probe(pci); + if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_AVS) + return -ENODEV; + + ret = pcim_enable_device(pci); + if (ret < 0) + return ret; + + adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL); + if (!adev) + return -ENOMEM; + ret = avs_bus_init(adev, pci, id); + if (ret < 0) { + dev_err(dev, "failed to init avs bus: %d\n", ret); + return ret; + } + + ret = pci_request_regions(pci, "AVS HDAudio"); + if (ret < 0) + return ret; + + bus = &adev->base.core; + bus->addr = pci_resource_start(pci, 0); + bus->remap_addr = pci_ioremap_bar(pci, 0); + if (!bus->remap_addr) { + dev_err(bus->dev, "ioremap error\n"); + ret = -ENXIO; + goto err_remap_bar0; + } + + adev->dsp_ba = pci_ioremap_bar(pci, 4); + if (!adev->dsp_ba) { + dev_err(bus->dev, "ioremap error\n"); + ret = -ENXIO; + goto err_remap_bar4; + } + + snd_hdac_bus_parse_capabilities(bus); + if (bus->mlcap) + snd_hdac_ext_bus_get_ml_capabilities(bus); + + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + dma_set_max_seg_size(dev, UINT_MAX); + + ret = avs_hdac_bus_init_streams(bus); + if (ret < 0) { + dev_err(dev, "failed to init streams: %d\n", ret); + goto err_init_streams; + } + + ret = avs_hdac_acquire_irq(adev); + if (ret < 0) { + dev_err(bus->dev, "failed to acquire irq: %d\n", ret); + goto err_acquire_irq; + } + + pci_set_master(pci); + pci_set_drvdata(pci, bus); + device_disable_async_suspend(dev); + + schedule_work(&adev->probe_work); + + return 0; + +err_acquire_irq: + snd_hdac_bus_free_stream_pages(bus); + snd_hdac_ext_stream_free_all(bus); +err_init_streams: + iounmap(adev->dsp_ba); +err_remap_bar4: + iounmap(bus->remap_addr); +err_remap_bar0: + pci_release_regions(pci); + return ret; +} + +static void avs_pci_shutdown(struct pci_dev *pci) +{ + struct hdac_bus *bus = pci_get_drvdata(pci); + struct avs_dev *adev = hdac_to_avs(bus); + + cancel_work_sync(&adev->probe_work); + avs_ipc_block(adev->ipc); + + snd_hdac_stop_streams(bus); + avs_dsp_op(adev, int_control, false); + snd_hdac_ext_bus_ppcap_int_enable(bus, false); + snd_hdac_ext_bus_link_power_down_all(bus); + + snd_hdac_bus_stop_chip(bus); + snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); + + if (avs_platattr_test(adev, CLDMA)) + pci_free_irq(pci, 0, &code_loader); + pci_free_irq(pci, 0, adev); + pci_free_irq(pci, 0, bus); + pci_free_irq_vectors(pci); +} + +static void avs_pci_remove(struct pci_dev *pci) +{ + struct hdac_device *hdev, *save; + struct hdac_bus *bus = pci_get_drvdata(pci); + struct avs_dev *adev = hdac_to_avs(bus); + + cancel_work_sync(&adev->probe_work); + avs_ipc_block(adev->ipc); + + avs_unregister_all_boards(adev); + + if (adev->nhlt) + intel_nhlt_free(adev->nhlt); + + if (avs_platattr_test(adev, CLDMA)) + hda_cldma_free(&code_loader); + + snd_hdac_stop_streams_and_chip(bus); + avs_dsp_op(adev, int_control, false); + snd_hdac_ext_bus_ppcap_int_enable(bus, false); + + /* it is safe to remove all codecs from the system now */ + list_for_each_entry_safe(hdev, save, &bus->codec_list, list) + snd_hda_codec_unregister(hdac_to_hda_codec(hdev)); + + snd_hdac_bus_free_stream_pages(bus); + snd_hdac_ext_stream_free_all(bus); + /* reverse ml_capabilities */ + snd_hdac_link_free_all(bus); + snd_hdac_ext_bus_exit(bus); + + avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0)); + snd_hdac_ext_bus_ppcap_enable(bus, false); + + /* snd_hdac_stop_streams_and_chip does that already? */ + snd_hdac_bus_stop_chip(bus); + snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); + if (bus->audio_component) + snd_hdac_i915_exit(bus); + + avs_module_info_free(adev); + pci_free_irq(pci, 0, adev); + pci_free_irq(pci, 0, bus); + pci_free_irq_vectors(pci); + iounmap(bus->remap_addr); + iounmap(adev->dsp_ba); + pci_release_regions(pci); + + /* Firmware is not needed anymore */ + avs_release_firmwares(adev); + + /* pm_runtime_forbid() can rpm_resume() which we do not want */ + pm_runtime_disable(&pci->dev); + pm_runtime_forbid(&pci->dev); + pm_runtime_enable(&pci->dev); + pm_runtime_get_noresume(&pci->dev); +} + +static int __maybe_unused avs_suspend_common(struct avs_dev *adev) +{ + struct hdac_bus *bus = &adev->base.core; + int ret; + + flush_work(&adev->probe_work); + + snd_hdac_ext_bus_link_power_down_all(bus); + + ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false); + /* + * pm_runtime is blocked on DSP failure but system-wide suspend is not. + * Do not block entire system from suspending if that's the case. + */ + if (ret && ret != -EPERM) { + dev_err(adev->dev, "set dx failed: %d\n", ret); + return AVS_IPC_RET(ret); + } + + avs_ipc_block(adev->ipc); + avs_dsp_op(adev, int_control, false); + snd_hdac_ext_bus_ppcap_int_enable(bus, false); + + ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); + if (ret < 0) { + dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret); + return ret; + } + + snd_hdac_ext_bus_ppcap_enable(bus, false); + /* disable LP SRAM retention */ + avs_hda_power_gating_enable(adev, false); + snd_hdac_bus_stop_chip(bus); + /* disable CG when putting controller to reset */ + avs_hdac_clock_gating_enable(bus, false); + snd_hdac_bus_enter_link_reset(bus); + avs_hdac_clock_gating_enable(bus, true); + + snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); + + return 0; +} + +static int __maybe_unused avs_resume_common(struct avs_dev *adev, bool purge) +{ + struct hdac_bus *bus = &adev->base.core; + struct hdac_ext_link *hlink; + int ret; + + snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true); + avs_hdac_bus_init_chip(bus, true); + + snd_hdac_ext_bus_ppcap_enable(bus, true); + snd_hdac_ext_bus_ppcap_int_enable(bus, true); + + ret = avs_dsp_boot_firmware(adev, purge); + if (ret < 0) { + dev_err(adev->dev, "firmware boot failed: %d\n", ret); + return ret; + } + + /* turn off the links that were off before suspend */ + list_for_each_entry(hlink, &bus->hlink_list, list) { + if (!hlink->ref_count) + snd_hdac_ext_bus_link_power_down(hlink); + } + + /* check dma status and clean up CORB/RIRB buffers */ + if (!bus->cmd_dma_state) + snd_hdac_bus_stop_cmd_io(bus); + + return 0; +} + +static int __maybe_unused avs_suspend(struct device *dev) +{ + return avs_suspend_common(to_avs_dev(dev)); +} + +static int __maybe_unused avs_resume(struct device *dev) +{ + return avs_resume_common(to_avs_dev(dev), true); +} + +static int __maybe_unused avs_runtime_suspend(struct device *dev) +{ + return avs_suspend_common(to_avs_dev(dev)); +} + +static int __maybe_unused avs_runtime_resume(struct device *dev) +{ + return avs_resume_common(to_avs_dev(dev), true); +} + +static const struct dev_pm_ops avs_dev_pm = { + SET_SYSTEM_SLEEP_PM_OPS(avs_suspend, avs_resume) + SET_RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL) +}; + +static const struct avs_spec skl_desc = { + .name = "skl", + .min_fw_version = { + .major = 9, + .minor = 21, + .hotfix = 0, + .build = 4732, + }, + .dsp_ops = &skl_dsp_ops, + .core_init_mask = 1, + .attributes = AVS_PLATATTR_CLDMA, + .sram_base_offset = SKL_ADSP_SRAM_BASE_OFFSET, + .sram_window_size = SKL_ADSP_SRAM_WINDOW_SIZE, + .rom_status = SKL_ADSP_SRAM_BASE_OFFSET, +}; + +static const struct avs_spec apl_desc = { + .name = "apl", + .min_fw_version = { + .major = 9, + .minor = 22, + .hotfix = 1, + .build = 4323, + }, + .dsp_ops = &apl_dsp_ops, + .core_init_mask = 3, + .attributes = AVS_PLATATTR_IMR, + .sram_base_offset = APL_ADSP_SRAM_BASE_OFFSET, + .sram_window_size = APL_ADSP_SRAM_WINDOW_SIZE, + .rom_status = APL_ADSP_SRAM_BASE_OFFSET, +}; + +static const struct pci_device_id avs_ids[] = { + { PCI_VDEVICE(INTEL, 0x9d70), (unsigned long)&skl_desc }, /* SKL */ + { PCI_VDEVICE(INTEL, 0x9d71), (unsigned long)&skl_desc }, /* KBL */ + { PCI_VDEVICE(INTEL, 0x5a98), (unsigned long)&apl_desc }, /* APL */ + { PCI_VDEVICE(INTEL, 0x3198), (unsigned long)&apl_desc }, /* GML */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, avs_ids); + +static struct pci_driver avs_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = avs_ids, + .probe = avs_pci_probe, + .remove = avs_pci_remove, + .shutdown = avs_pci_shutdown, + .driver = { + .pm = &avs_dev_pm, + }, +}; +module_pci_driver(avs_pci_driver); + +MODULE_AUTHOR("Cezary Rojewski "); +MODULE_AUTHOR("Amadeusz Slawinski "); +MODULE_DESCRIPTION("Intel cAVS sound driver"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/intel/avs/dsp.c b/sound/soc/intel/avs/dsp.c new file mode 100644 index 000000000..b881100d3 --- /dev/null +++ b/sound/soc/intel/avs/dsp.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include "avs.h" +#include "registers.h" +#include "trace.h" + +#define AVS_ADSPCS_INTERVAL_US 500 +#define AVS_ADSPCS_TIMEOUT_US 50000 +#define AVS_ADSPCS_DELAY_US 1000 + +int avs_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power) +{ + u32 value, mask, reg; + int ret; + + value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS); + trace_avs_dsp_core_op(value, core_mask, "power", power); + + mask = AVS_ADSPCS_SPA_MASK(core_mask); + value = power ? mask : 0; + + snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value); + /* Delay the polling to avoid false positives. */ + usleep_range(AVS_ADSPCS_DELAY_US, 2 * AVS_ADSPCS_DELAY_US); + + mask = AVS_ADSPCS_CPA_MASK(core_mask); + value = power ? mask : 0; + + ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS, + reg, (reg & mask) == value, + AVS_ADSPCS_INTERVAL_US, + AVS_ADSPCS_TIMEOUT_US); + if (ret) + dev_err(adev->dev, "core_mask %d power %s failed: %d\n", + core_mask, power ? "on" : "off", ret); + + return ret; +} + +int avs_dsp_core_reset(struct avs_dev *adev, u32 core_mask, bool reset) +{ + u32 value, mask, reg; + int ret; + + value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS); + trace_avs_dsp_core_op(value, core_mask, "reset", reset); + + mask = AVS_ADSPCS_CRST_MASK(core_mask); + value = reset ? mask : 0; + + snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value); + + ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS, + reg, (reg & mask) == value, + AVS_ADSPCS_INTERVAL_US, + AVS_ADSPCS_TIMEOUT_US); + if (ret) + dev_err(adev->dev, "core_mask %d %s reset failed: %d\n", + core_mask, reset ? "enter" : "exit", ret); + + return ret; +} + +int avs_dsp_core_stall(struct avs_dev *adev, u32 core_mask, bool stall) +{ + u32 value, mask, reg; + int ret; + + value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS); + trace_avs_dsp_core_op(value, core_mask, "stall", stall); + + mask = AVS_ADSPCS_CSTALL_MASK(core_mask); + value = stall ? mask : 0; + + snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value); + + ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS, + reg, (reg & mask) == value, + AVS_ADSPCS_INTERVAL_US, + AVS_ADSPCS_TIMEOUT_US); + if (ret) { + dev_err(adev->dev, "core_mask %d %sstall failed: %d\n", + core_mask, stall ? "" : "un", ret); + return ret; + } + + /* Give HW time to propagate the change. */ + usleep_range(AVS_ADSPCS_DELAY_US, 2 * AVS_ADSPCS_DELAY_US); + return 0; +} + +int avs_dsp_core_enable(struct avs_dev *adev, u32 core_mask) +{ + int ret; + + ret = avs_dsp_op(adev, power, core_mask, true); + if (ret) + return ret; + + ret = avs_dsp_op(adev, reset, core_mask, false); + if (ret) + return ret; + + return avs_dsp_op(adev, stall, core_mask, false); +} + +int avs_dsp_core_disable(struct avs_dev *adev, u32 core_mask) +{ + /* No error checks to allow for complete DSP shutdown. */ + avs_dsp_op(adev, stall, core_mask, true); + avs_dsp_op(adev, reset, core_mask, true); + + return avs_dsp_op(adev, power, core_mask, false); +} + +static int avs_dsp_enable(struct avs_dev *adev, u32 core_mask) +{ + u32 mask; + int ret; + + ret = avs_dsp_core_enable(adev, core_mask); + if (ret < 0) + return ret; + + mask = core_mask & ~AVS_MAIN_CORE_MASK; + if (!mask) + /* + * without main core, fw is dead anyway + * so setting D0 for it is futile. + */ + return 0; + + ret = avs_ipc_set_dx(adev, mask, true); + return AVS_IPC_RET(ret); +} + +static int avs_dsp_disable(struct avs_dev *adev, u32 core_mask) +{ + int ret; + + ret = avs_ipc_set_dx(adev, core_mask, false); + if (ret) + return AVS_IPC_RET(ret); + + return avs_dsp_core_disable(adev, core_mask); +} + +static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id) +{ + u32 mask; + int ret; + + mask = BIT_MASK(core_id); + if (mask == AVS_MAIN_CORE_MASK) + /* nothing to do for main core */ + return 0; + if (core_id >= adev->hw_cfg.dsp_cores) { + ret = -EINVAL; + goto err; + } + + adev->core_refs[core_id]++; + if (adev->core_refs[core_id] == 1) { + /* + * No cores other than main-core can be running for DSP + * to achieve d0ix. Conscious SET_D0IX IPC failure is permitted, + * simply d0ix power state will no longer be attempted. + */ + ret = avs_dsp_disable_d0ix(adev); + if (ret && ret != -AVS_EIPC) + goto err_disable_d0ix; + + ret = avs_dsp_enable(adev, mask); + if (ret) + goto err_enable_dsp; + } + + return 0; + +err_enable_dsp: + avs_dsp_enable_d0ix(adev); +err_disable_d0ix: + adev->core_refs[core_id]--; +err: + dev_err(adev->dev, "get core %d failed: %d\n", core_id, ret); + return ret; +} + +static int avs_dsp_put_core(struct avs_dev *adev, u32 core_id) +{ + u32 mask; + int ret; + + mask = BIT_MASK(core_id); + if (mask == AVS_MAIN_CORE_MASK) + /* nothing to do for main core */ + return 0; + if (core_id >= adev->hw_cfg.dsp_cores) { + ret = -EINVAL; + goto err; + } + + adev->core_refs[core_id]--; + if (!adev->core_refs[core_id]) { + ret = avs_dsp_disable(adev, mask); + if (ret) + goto err; + + /* Match disable_d0ix in avs_dsp_get_core(). */ + avs_dsp_enable_d0ix(adev); + } + + return 0; +err: + dev_err(adev->dev, "put core %d failed: %d\n", core_id, ret); + return ret; +} + +int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id, + u8 core_id, u8 domain, void *param, u32 param_size, + u16 *instance_id) +{ + struct avs_module_entry mentry; + bool was_loaded = false; + int ret, id; + + id = avs_module_id_alloc(adev, module_id); + if (id < 0) + return id; + + ret = avs_get_module_id_entry(adev, module_id, &mentry); + if (ret) + goto err_mod_entry; + + ret = avs_dsp_get_core(adev, core_id); + if (ret) + goto err_mod_entry; + + /* Load code into memory if this is the first instance. */ + if (!id && !avs_module_entry_is_loaded(&mentry)) { + ret = avs_dsp_op(adev, transfer_mods, true, &mentry, 1); + if (ret) { + dev_err(adev->dev, "load modules failed: %d\n", ret); + goto err_mod_entry; + } + was_loaded = true; + } + + ret = avs_ipc_init_instance(adev, module_id, id, ppl_instance_id, + core_id, domain, param, param_size); + if (ret) { + ret = AVS_IPC_RET(ret); + goto err_ipc; + } + + *instance_id = id; + return 0; + +err_ipc: + if (was_loaded) + avs_dsp_op(adev, transfer_mods, false, &mentry, 1); + avs_dsp_put_core(adev, core_id); +err_mod_entry: + avs_module_id_free(adev, module_id, id); + return ret; +} + +void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u16 instance_id, + u8 ppl_instance_id, u8 core_id) +{ + struct avs_module_entry mentry; + int ret; + + /* Modules not owned by any pipeline need to be freed explicitly. */ + if (ppl_instance_id == INVALID_PIPELINE_ID) + avs_ipc_delete_instance(adev, module_id, instance_id); + + avs_module_id_free(adev, module_id, instance_id); + + ret = avs_get_module_id_entry(adev, module_id, &mentry); + /* Unload occupied memory if this was the last instance. */ + if (!ret && mentry.type.load_type == AVS_MODULE_LOAD_TYPE_LOADABLE) { + if (avs_is_module_ida_empty(adev, module_id)) { + ret = avs_dsp_op(adev, transfer_mods, false, &mentry, 1); + if (ret) + dev_err(adev->dev, "unload modules failed: %d\n", ret); + } + } + + avs_dsp_put_core(adev, core_id); +} + +int avs_dsp_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority, + bool lp, u16 attributes, u8 *instance_id) +{ + struct avs_fw_cfg *fw_cfg = &adev->fw_cfg; + int ret, id; + + id = ida_alloc_max(&adev->ppl_ida, fw_cfg->max_ppl_count - 1, GFP_KERNEL); + if (id < 0) + return id; + + ret = avs_ipc_create_pipeline(adev, req_size, priority, id, lp, attributes); + if (ret) { + ida_free(&adev->ppl_ida, id); + return AVS_IPC_RET(ret); + } + + *instance_id = id; + return 0; +} + +int avs_dsp_delete_pipeline(struct avs_dev *adev, u8 instance_id) +{ + int ret; + + ret = avs_ipc_delete_pipeline(adev, instance_id); + if (ret) + ret = AVS_IPC_RET(ret); + + ida_free(&adev->ppl_ida, instance_id); + return ret; +} diff --git a/sound/soc/intel/avs/ipc.c b/sound/soc/intel/avs/ipc.c new file mode 100644 index 000000000..306f0dc4e --- /dev/null +++ b/sound/soc/intel/avs/ipc.c @@ -0,0 +1,628 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include "avs.h" +#include "messages.h" +#include "registers.h" +#include "trace.h" + +#define AVS_IPC_TIMEOUT_MS 300 +#define AVS_D0IX_DELAY_MS 300 + +static int +avs_dsp_set_d0ix(struct avs_dev *adev, bool enable) +{ + struct avs_ipc *ipc = adev->ipc; + int ret; + + /* Is transition required? */ + if (ipc->in_d0ix == enable) + return 0; + + ret = avs_dsp_op(adev, set_d0ix, enable); + if (ret) { + /* Prevent further d0ix attempts on conscious IPC failure. */ + if (ret == -AVS_EIPC) + atomic_inc(&ipc->d0ix_disable_depth); + + ipc->in_d0ix = false; + return ret; + } + + ipc->in_d0ix = enable; + return 0; +} + +static void avs_dsp_schedule_d0ix(struct avs_dev *adev, struct avs_ipc_msg *tx) +{ + if (atomic_read(&adev->ipc->d0ix_disable_depth)) + return; + + mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work, + msecs_to_jiffies(AVS_D0IX_DELAY_MS)); +} + +static void avs_dsp_d0ix_work(struct work_struct *work) +{ + struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work); + + avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true); +} + +static int avs_dsp_wake_d0i0(struct avs_dev *adev, struct avs_ipc_msg *tx) +{ + struct avs_ipc *ipc = adev->ipc; + + if (!atomic_read(&ipc->d0ix_disable_depth)) { + cancel_delayed_work_sync(&ipc->d0ix_work); + return avs_dsp_set_d0ix(adev, false); + } + + return 0; +} + +int avs_dsp_disable_d0ix(struct avs_dev *adev) +{ + struct avs_ipc *ipc = adev->ipc; + + /* Prevent PG only on the first disable. */ + if (atomic_add_return(1, &ipc->d0ix_disable_depth) == 1) { + cancel_delayed_work_sync(&ipc->d0ix_work); + return avs_dsp_set_d0ix(adev, false); + } + + return 0; +} + +int avs_dsp_enable_d0ix(struct avs_dev *adev) +{ + struct avs_ipc *ipc = adev->ipc; + + if (atomic_dec_and_test(&ipc->d0ix_disable_depth)) + queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work, + msecs_to_jiffies(AVS_D0IX_DELAY_MS)); + return 0; +} + +static void avs_dsp_recovery(struct avs_dev *adev) +{ + struct avs_soc_component *acomp; + unsigned int core_mask; + int ret; + + mutex_lock(&adev->comp_list_mutex); + /* disconnect all running streams */ + list_for_each_entry(acomp, &adev->comp_list, node) { + struct snd_soc_pcm_runtime *rtd; + struct snd_soc_card *card; + + card = acomp->base.card; + if (!card) + continue; + + for_each_card_rtds(card, rtd) { + struct snd_pcm *pcm; + int dir; + + pcm = rtd->pcm; + if (!pcm || rtd->dai_link->no_pcm) + continue; + + for_each_pcm_streams(dir) { + struct snd_pcm_substream *substream; + + substream = pcm->streams[dir].substream; + if (!substream || !substream->runtime) + continue; + + /* No need for _irq() as we are in nonatomic context. */ + snd_pcm_stream_lock(substream); + snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED); + snd_pcm_stream_unlock(substream); + } + } + } + mutex_unlock(&adev->comp_list_mutex); + + /* forcibly shutdown all cores */ + core_mask = GENMASK(adev->hw_cfg.dsp_cores - 1, 0); + avs_dsp_core_disable(adev, core_mask); + + /* attempt dsp reboot */ + ret = avs_dsp_boot_firmware(adev, true); + if (ret < 0) + dev_err(adev->dev, "dsp reboot failed: %d\n", ret); + + pm_runtime_mark_last_busy(adev->dev); + pm_runtime_enable(adev->dev); + pm_request_autosuspend(adev->dev); + + atomic_set(&adev->ipc->recovering, 0); +} + +static void avs_dsp_recovery_work(struct work_struct *work) +{ + struct avs_ipc *ipc = container_of(work, struct avs_ipc, recovery_work); + + avs_dsp_recovery(to_avs_dev(ipc->dev)); +} + +static void avs_dsp_exception_caught(struct avs_dev *adev, union avs_notify_msg *msg) +{ + struct avs_ipc *ipc = adev->ipc; + + /* Account for the double-exception case. */ + ipc->ready = false; + + if (!atomic_add_unless(&ipc->recovering, 1, 1)) { + dev_err(adev->dev, "dsp recovery is already in progress\n"); + return; + } + + dev_crit(adev->dev, "communication severed, rebooting dsp..\n"); + + cancel_delayed_work_sync(&ipc->d0ix_work); + ipc->in_d0ix = false; + /* Re-enabled on recovery completion. */ + pm_runtime_disable(adev->dev); + + /* Process received notification. */ + avs_dsp_op(adev, coredump, msg); + + schedule_work(&ipc->recovery_work); +} + +static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header) +{ + struct avs_ipc *ipc = adev->ipc; + union avs_reply_msg msg = AVS_MSG(header); + u64 reg; + + reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW)); + trace_avs_ipc_reply_msg(header, reg); + + ipc->rx.header = header; + /* Abort copying payload if request processing was unsuccessful. */ + if (!msg.status) { + /* update size in case of LARGE_CONFIG_GET */ + if (msg.msg_target == AVS_MOD_MSG && + msg.global_msg_type == AVS_MOD_LARGE_CONFIG_GET) + ipc->rx.size = min_t(u32, AVS_MAILBOX_SIZE, + msg.ext.large_config.data_off_size); + + memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size); + trace_avs_msg_payload(ipc->rx.data, ipc->rx.size); + } +} + +static void avs_dsp_process_notification(struct avs_dev *adev, u64 header) +{ + struct avs_notify_mod_data mod_data; + union avs_notify_msg msg = AVS_MSG(header); + size_t data_size = 0; + void *data = NULL; + u64 reg; + + reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW)); + trace_avs_ipc_notify_msg(header, reg); + + /* Ignore spurious notifications until handshake is established. */ + if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) { + dev_dbg(adev->dev, "FW not ready, skip notification: 0x%08x\n", msg.primary); + return; + } + + /* Calculate notification payload size. */ + switch (msg.notify_msg_type) { + case AVS_NOTIFY_FW_READY: + break; + + case AVS_NOTIFY_PHRASE_DETECTED: + data_size = sizeof(struct avs_notify_voice_data); + break; + + case AVS_NOTIFY_RESOURCE_EVENT: + data_size = sizeof(struct avs_notify_res_data); + break; + + case AVS_NOTIFY_LOG_BUFFER_STATUS: + case AVS_NOTIFY_EXCEPTION_CAUGHT: + break; + + case AVS_NOTIFY_MODULE_EVENT: + /* To know the total payload size, header needs to be read first. */ + memcpy_fromio(&mod_data, avs_uplink_addr(adev), sizeof(mod_data)); + data_size = sizeof(mod_data) + mod_data.data_size; + break; + + default: + dev_info(adev->dev, "unknown notification: 0x%08x\n", msg.primary); + break; + } + + if (data_size) { + data = kmalloc(data_size, GFP_KERNEL); + if (!data) + return; + + memcpy_fromio(data, avs_uplink_addr(adev), data_size); + trace_avs_msg_payload(data, data_size); + } + + /* Perform notification-specific operations. */ + switch (msg.notify_msg_type) { + case AVS_NOTIFY_FW_READY: + dev_dbg(adev->dev, "FW READY 0x%08x\n", msg.primary); + adev->ipc->ready = true; + complete(&adev->fw_ready); + break; + + case AVS_NOTIFY_LOG_BUFFER_STATUS: + avs_dsp_op(adev, log_buffer_status, &msg); + break; + + case AVS_NOTIFY_EXCEPTION_CAUGHT: + avs_dsp_exception_caught(adev, &msg); + break; + + default: + break; + } + + kfree(data); +} + +void avs_dsp_process_response(struct avs_dev *adev, u64 header) +{ + struct avs_ipc *ipc = adev->ipc; + + /* + * Response may either be solicited - a reply for a request that has + * been sent beforehand - or unsolicited (notification). + */ + if (avs_msg_is_reply(header)) { + /* Response processing is invoked from IRQ thread. */ + spin_lock_irq(&ipc->rx_lock); + avs_dsp_receive_rx(adev, header); + ipc->rx_completed = true; + spin_unlock_irq(&ipc->rx_lock); + } else { + avs_dsp_process_notification(adev, header); + } + + complete(&ipc->busy_completion); +} + +irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id) +{ + struct avs_dev *adev = dev_id; + struct avs_ipc *ipc = adev->ipc; + u32 adspis, hipc_rsp, hipc_ack; + irqreturn_t ret = IRQ_NONE; + + adspis = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPIS); + if (adspis == UINT_MAX || !(adspis & AVS_ADSP_ADSPIS_IPC)) + return ret; + + hipc_ack = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCIE); + hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT); + + /* DSP acked host's request */ + if (hipc_ack & SKL_ADSP_HIPCIE_DONE) { + /* + * As an extra precaution, mask done interrupt. Code executed + * due to complete() found below does not assume any masking. + */ + snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, + AVS_ADSP_HIPCCTL_DONE, 0); + + complete(&ipc->done_completion); + + /* tell DSP it has our attention */ + snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCIE, + SKL_ADSP_HIPCIE_DONE, + SKL_ADSP_HIPCIE_DONE); + /* unmask done interrupt */ + snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, + AVS_ADSP_HIPCCTL_DONE, + AVS_ADSP_HIPCCTL_DONE); + ret = IRQ_HANDLED; + } + + /* DSP sent new response to process */ + if (hipc_rsp & SKL_ADSP_HIPCT_BUSY) { + /* mask busy interrupt */ + snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, + AVS_ADSP_HIPCCTL_BUSY, 0); + + ret = IRQ_WAKE_THREAD; + } + + return ret; +} + +irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id) +{ + struct avs_dev *adev = dev_id; + union avs_reply_msg msg; + u32 hipct, hipcte; + + hipct = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT); + hipcte = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCTE); + + /* ensure DSP sent new response to process */ + if (!(hipct & SKL_ADSP_HIPCT_BUSY)) + return IRQ_NONE; + + msg.primary = hipct; + msg.ext.val = hipcte; + avs_dsp_process_response(adev, msg.val); + + /* tell DSP we accepted its message */ + snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCT, + SKL_ADSP_HIPCT_BUSY, SKL_ADSP_HIPCT_BUSY); + /* unmask busy interrupt */ + snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, + AVS_ADSP_HIPCCTL_BUSY, AVS_ADSP_HIPCCTL_BUSY); + + return IRQ_HANDLED; +} + +static bool avs_ipc_is_busy(struct avs_ipc *ipc) +{ + struct avs_dev *adev = to_avs_dev(ipc->dev); + u32 hipc_rsp; + + hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT); + return hipc_rsp & SKL_ADSP_HIPCT_BUSY; +} + +static int avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout) +{ + u32 repeats_left = 128; /* to avoid infinite looping */ + int ret; + +again: + ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout)); + + /* DSP could be unresponsive at this point. */ + if (!ipc->ready) + return -EPERM; + + if (!ret) { + if (!avs_ipc_is_busy(ipc)) + return -ETIMEDOUT; + /* + * Firmware did its job, either notification or reply + * has been received - now wait until it's processed. + */ + wait_for_completion_killable(&ipc->busy_completion); + } + + /* Ongoing notification's bottom-half may cause early wakeup */ + spin_lock(&ipc->rx_lock); + if (!ipc->rx_completed) { + if (repeats_left) { + /* Reply delayed due to notification. */ + repeats_left--; + reinit_completion(&ipc->busy_completion); + spin_unlock(&ipc->rx_lock); + goto again; + } + + spin_unlock(&ipc->rx_lock); + return -ETIMEDOUT; + } + + spin_unlock(&ipc->rx_lock); + return 0; +} + +static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply) +{ + lockdep_assert_held(&ipc->rx_lock); + + ipc->rx.header = 0; + ipc->rx.size = reply ? reply->size : 0; + ipc->rx_completed = false; + + reinit_completion(&ipc->done_completion); + reinit_completion(&ipc->busy_completion); +} + +static void avs_dsp_send_tx(struct avs_dev *adev, struct avs_ipc_msg *tx, bool read_fwregs) +{ + u64 reg = ULONG_MAX; + + tx->header |= SKL_ADSP_HIPCI_BUSY; + if (read_fwregs) + reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW)); + + trace_avs_request(tx, reg); + + if (tx->size) + memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size); + snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCIE, tx->header >> 32); + snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCI, tx->header & UINT_MAX); +} + +static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, int timeout) +{ + struct avs_ipc *ipc = adev->ipc; + int ret; + + if (!ipc->ready) + return -EPERM; + + mutex_lock(&ipc->msg_mutex); + + spin_lock(&ipc->rx_lock); + avs_ipc_msg_init(ipc, reply); + avs_dsp_send_tx(adev, request, true); + spin_unlock(&ipc->rx_lock); + + ret = avs_ipc_wait_busy_completion(ipc, timeout); + if (ret) { + if (ret == -ETIMEDOUT) { + union avs_notify_msg msg = AVS_NOTIFICATION(EXCEPTION_CAUGHT); + + /* Same treatment as on exception, just stack_dump=0. */ + avs_dsp_exception_caught(adev, &msg); + } + goto exit; + } + + ret = ipc->rx.rsp.status; + if (reply) { + reply->header = ipc->rx.header; + reply->size = ipc->rx.size; + if (reply->data && ipc->rx.size) + memcpy(reply->data, ipc->rx.data, reply->size); + } + +exit: + mutex_unlock(&ipc->msg_mutex); + return ret; +} + +static int avs_dsp_send_msg_sequence(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, int timeout, bool wake_d0i0, + bool schedule_d0ix) +{ + int ret; + + trace_avs_d0ix("wake", wake_d0i0, request->header); + if (wake_d0i0) { + ret = avs_dsp_wake_d0i0(adev, request); + if (ret) + return ret; + } + + ret = avs_dsp_do_send_msg(adev, request, reply, timeout); + if (ret) + return ret; + + trace_avs_d0ix("schedule", schedule_d0ix, request->header); + if (schedule_d0ix) + avs_dsp_schedule_d0ix(adev, request); + + return 0; +} + +int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, int timeout) +{ + bool wake_d0i0 = avs_dsp_op(adev, d0ix_toggle, request, true); + bool schedule_d0ix = avs_dsp_op(adev, d0ix_toggle, request, false); + + return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, schedule_d0ix); +} + +int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply) +{ + return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms); +} + +int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, int timeout, bool wake_d0i0) +{ + return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, false); +} + +int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, bool wake_d0i0) +{ + return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, + wake_d0i0); +} + +static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout) +{ + struct avs_ipc *ipc = adev->ipc; + int ret; + + mutex_lock(&ipc->msg_mutex); + + spin_lock(&ipc->rx_lock); + avs_ipc_msg_init(ipc, NULL); + /* + * with hw still stalled, memory windows may not be + * configured properly so avoid accessing SRAM + */ + avs_dsp_send_tx(adev, request, false); + spin_unlock(&ipc->rx_lock); + + /* ROM messages must be sent before main core is unstalled */ + ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false); + if (!ret) { + ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout)); + ret = ret ? 0 : -ETIMEDOUT; + } + + mutex_unlock(&ipc->msg_mutex); + + return ret; +} + +int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout) +{ + return avs_dsp_do_send_rom_msg(adev, request, timeout); +} + +int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request) +{ + return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms); +} + +void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable) +{ + u32 value, mask; + + /* + * No particular bit setting order. All of these are required + * to have a functional SW <-> FW communication. + */ + value = enable ? AVS_ADSP_ADSPIC_IPC : 0; + snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_IPC, value); + + mask = AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY; + value = enable ? mask : 0; + snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, mask, value); +} + +int avs_ipc_init(struct avs_ipc *ipc, struct device *dev) +{ + ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL); + if (!ipc->rx.data) + return -ENOMEM; + + ipc->dev = dev; + ipc->ready = false; + ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS; + INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work); + INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work); + init_completion(&ipc->done_completion); + init_completion(&ipc->busy_completion); + spin_lock_init(&ipc->rx_lock); + mutex_init(&ipc->msg_mutex); + + return 0; +} + +void avs_ipc_block(struct avs_ipc *ipc) +{ + ipc->ready = false; + cancel_work_sync(&ipc->recovery_work); + cancel_delayed_work_sync(&ipc->d0ix_work); + ipc->in_d0ix = false; +} diff --git a/sound/soc/intel/avs/loader.c b/sound/soc/intel/avs/loader.c new file mode 100644 index 000000000..9e3f8ff33 --- /dev/null +++ b/sound/soc/intel/avs/loader.c @@ -0,0 +1,692 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include "avs.h" +#include "cldma.h" +#include "messages.h" +#include "registers.h" +#include "topology.h" + +#define AVS_ROM_STS_MASK 0xFF +#define AVS_ROM_INIT_DONE 0x1 +#define SKL_ROM_BASEFW_ENTERED 0xF +#define APL_ROM_FW_ENTERED 0x5 +#define AVS_ROM_INIT_POLLING_US 5 +#define SKL_ROM_INIT_TIMEOUT_US 1000000 +#define APL_ROM_INIT_TIMEOUT_US 300000 +#define APL_ROM_INIT_RETRIES 3 + +#define AVS_FW_INIT_POLLING_US 500 +#define AVS_FW_INIT_TIMEOUT_MS 3000 +#define AVS_FW_INIT_TIMEOUT_US (AVS_FW_INIT_TIMEOUT_MS * 1000) + +#define AVS_CLDMA_START_DELAY_MS 100 + +#define AVS_ROOT_DIR "intel/avs" +#define AVS_BASEFW_FILENAME "dsp_basefw.bin" +#define AVS_EXT_MANIFEST_MAGIC 0x31454124 +#define SKL_MANIFEST_MAGIC 0x00000006 +#define SKL_ADSPFW_OFFSET 0x284 +#define APL_MANIFEST_MAGIC 0x44504324 +#define APL_ADSPFW_OFFSET 0x2000 + +/* Occasionally, engineering (release candidate) firmware is provided for testing. */ +static bool debug_ignore_fw_version; +module_param_named(ignore_fw_version, debug_ignore_fw_version, bool, 0444); +MODULE_PARM_DESC(ignore_fw_version, "Verify FW version 0=yes (default), 1=no"); + +#define AVS_LIB_NAME_SIZE 8 + +struct avs_fw_manifest { + u32 id; + u32 len; + char name[AVS_LIB_NAME_SIZE]; + u32 preload_page_count; + u32 img_flags; + u32 feature_mask; + struct avs_fw_version version; +} __packed; + +struct avs_fw_ext_manifest { + u32 id; + u32 len; + u16 version_major; + u16 version_minor; + u32 entries; +} __packed; + +static int avs_fw_ext_manifest_strip(struct firmware *fw) +{ + struct avs_fw_ext_manifest *man; + + if (fw->size < sizeof(*man)) + return -EINVAL; + + man = (struct avs_fw_ext_manifest *)fw->data; + if (man->id == AVS_EXT_MANIFEST_MAGIC) { + fw->data += man->len; + fw->size -= man->len; + } + + return 0; +} + +static int avs_fw_manifest_offset(struct firmware *fw) +{ + /* Header type found in first DWORD of fw binary. */ + u32 magic = *(u32 *)fw->data; + + switch (magic) { + case SKL_MANIFEST_MAGIC: + return SKL_ADSPFW_OFFSET; + case APL_MANIFEST_MAGIC: + return APL_ADSPFW_OFFSET; + default: + return -EINVAL; + } +} + +static int avs_fw_manifest_strip_verify(struct avs_dev *adev, struct firmware *fw, + const struct avs_fw_version *min) +{ + struct avs_fw_manifest *man; + int offset, ret; + + ret = avs_fw_ext_manifest_strip(fw); + if (ret) + return ret; + + offset = avs_fw_manifest_offset(fw); + if (offset < 0) + return offset; + + if (fw->size < offset + sizeof(*man)) + return -EINVAL; + if (!min) + return 0; + + man = (struct avs_fw_manifest *)(fw->data + offset); + if (man->version.major != min->major || + man->version.minor != min->minor || + man->version.hotfix != min->hotfix || + man->version.build < min->build) { + dev_warn(adev->dev, "bad FW version %d.%d.%d.%d, expected %d.%d.%d.%d or newer\n", + man->version.major, man->version.minor, + man->version.hotfix, man->version.build, + min->major, min->minor, min->hotfix, min->build); + + if (!debug_ignore_fw_version) + return -EINVAL; + } + + return 0; +} + +int avs_cldma_load_basefw(struct avs_dev *adev, struct firmware *fw) +{ + struct hda_cldma *cl = &code_loader; + unsigned int reg; + int ret; + + ret = avs_dsp_op(adev, power, AVS_MAIN_CORE_MASK, true); + if (ret < 0) + return ret; + + ret = avs_dsp_op(adev, reset, AVS_MAIN_CORE_MASK, false); + if (ret < 0) + return ret; + + ret = hda_cldma_reset(cl); + if (ret < 0) { + dev_err(adev->dev, "cldma reset failed: %d\n", ret); + return ret; + } + hda_cldma_setup(cl); + + ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false); + if (ret < 0) + return ret; + + reinit_completion(&adev->fw_ready); + avs_dsp_op(adev, int_control, true); + + /* await ROM init */ + ret = snd_hdac_adsp_readl_poll(adev, AVS_FW_REG_STATUS(adev), reg, + (reg & AVS_ROM_INIT_DONE) == AVS_ROM_INIT_DONE, + AVS_ROM_INIT_POLLING_US, SKL_ROM_INIT_TIMEOUT_US); + if (ret < 0) { + dev_err(adev->dev, "rom init timeout: %d\n", ret); + avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); + return ret; + } + + hda_cldma_set_data(cl, (void *)fw->data, fw->size); + /* transfer firmware */ + hda_cldma_transfer(cl, 0); + ret = snd_hdac_adsp_readl_poll(adev, AVS_FW_REG_STATUS(adev), reg, + (reg & AVS_ROM_STS_MASK) == SKL_ROM_BASEFW_ENTERED, + AVS_FW_INIT_POLLING_US, AVS_FW_INIT_TIMEOUT_US); + hda_cldma_stop(cl); + if (ret < 0) { + dev_err(adev->dev, "transfer fw failed: %d\n", ret); + avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); + return ret; + } + + return 0; +} + +int avs_cldma_load_library(struct avs_dev *adev, struct firmware *lib, u32 id) +{ + struct hda_cldma *cl = &code_loader; + int ret; + + hda_cldma_set_data(cl, (void *)lib->data, lib->size); + /* transfer modules manifest */ + hda_cldma_transfer(cl, msecs_to_jiffies(AVS_CLDMA_START_DELAY_MS)); + + /* DMA id ignored as there is only ever one code-loader DMA */ + ret = avs_ipc_load_library(adev, 0, id); + hda_cldma_stop(cl); + + if (ret) { + ret = AVS_IPC_RET(ret); + dev_err(adev->dev, "transfer lib %d failed: %d\n", id, ret); + } + + return ret; +} + +static int avs_cldma_load_module(struct avs_dev *adev, struct avs_module_entry *mentry) +{ + struct hda_cldma *cl = &code_loader; + const struct firmware *mod; + char *mod_name; + int ret; + + mod_name = kasprintf(GFP_KERNEL, "%s/%s/dsp_mod_%pUL.bin", AVS_ROOT_DIR, + adev->spec->name, mentry->uuid.b); + if (!mod_name) + return -ENOMEM; + + ret = avs_request_firmware(adev, &mod, mod_name); + kfree(mod_name); + if (ret < 0) + return ret; + + hda_cldma_set_data(cl, (void *)mod->data, mod->size); + hda_cldma_transfer(cl, msecs_to_jiffies(AVS_CLDMA_START_DELAY_MS)); + ret = avs_ipc_load_modules(adev, &mentry->module_id, 1); + hda_cldma_stop(cl); + + if (ret) { + dev_err(adev->dev, "load module %d failed: %d\n", mentry->module_id, ret); + avs_release_last_firmware(adev); + return AVS_IPC_RET(ret); + } + + return 0; +} + +int avs_cldma_transfer_modules(struct avs_dev *adev, bool load, + struct avs_module_entry *mods, u32 num_mods) +{ + u16 *mod_ids; + int ret, i; + + /* Either load to DSP or unload them to free space. */ + if (load) { + for (i = 0; i < num_mods; i++) { + ret = avs_cldma_load_module(adev, &mods[i]); + if (ret) + return ret; + } + + return 0; + } + + mod_ids = kcalloc(num_mods, sizeof(u16), GFP_KERNEL); + if (!mod_ids) + return -ENOMEM; + + for (i = 0; i < num_mods; i++) + mod_ids[i] = mods[i].module_id; + + ret = avs_ipc_unload_modules(adev, mod_ids, num_mods); + kfree(mod_ids); + if (ret) + return AVS_IPC_RET(ret); + + return 0; +} + +static int +avs_hda_init_rom(struct avs_dev *adev, unsigned int dma_id, bool purge) +{ + const struct avs_spec *const spec = adev->spec; + unsigned int corex_mask, reg; + int ret; + + corex_mask = spec->core_init_mask & ~AVS_MAIN_CORE_MASK; + + ret = avs_dsp_op(adev, power, spec->core_init_mask, true); + if (ret < 0) + goto err; + + ret = avs_dsp_op(adev, reset, AVS_MAIN_CORE_MASK, false); + if (ret < 0) + goto err; + + reinit_completion(&adev->fw_ready); + avs_dsp_op(adev, int_control, true); + + /* set boot config */ + ret = avs_ipc_set_boot_config(adev, dma_id, purge); + if (ret) { + ret = AVS_IPC_RET(ret); + goto err; + } + + /* await ROM init */ + ret = snd_hdac_adsp_readq_poll(adev, spec->rom_status, reg, + (reg & 0xF) == AVS_ROM_INIT_DONE || + (reg & 0xF) == APL_ROM_FW_ENTERED, + AVS_ROM_INIT_POLLING_US, APL_ROM_INIT_TIMEOUT_US); + if (ret < 0) { + dev_err(adev->dev, "rom init timeout: %d\n", ret); + goto err; + } + + /* power down non-main cores */ + if (corex_mask) { + ret = avs_dsp_op(adev, power, corex_mask, false); + if (ret < 0) + goto err; + } + + return 0; + +err: + avs_dsp_core_disable(adev, spec->core_init_mask); + return ret; +} + +static int avs_imr_load_basefw(struct avs_dev *adev) +{ + int ret; + + /* DMA id ignored when flashing from IMR as no transfer occurs. */ + ret = avs_hda_init_rom(adev, 0, false); + if (ret < 0) { + dev_err(adev->dev, "rom init failed: %d\n", ret); + return ret; + } + + ret = wait_for_completion_timeout(&adev->fw_ready, + msecs_to_jiffies(AVS_FW_INIT_TIMEOUT_MS)); + if (!ret) { + dev_err(adev->dev, "firmware ready timeout\n"); + avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); + return -ETIMEDOUT; + } + + return 0; +} + +int avs_hda_load_basefw(struct avs_dev *adev, struct firmware *fw) +{ + struct snd_pcm_substream substream; + struct snd_dma_buffer dmab; + struct hdac_ext_stream *estream; + struct hdac_stream *hstream; + struct hdac_bus *bus = &adev->base.core; + unsigned int sdfmt, reg; + int ret, i; + + /* configure hda dma */ + memset(&substream, 0, sizeof(substream)); + substream.stream = SNDRV_PCM_STREAM_PLAYBACK; + estream = snd_hdac_ext_stream_assign(bus, &substream, + HDAC_EXT_STREAM_TYPE_HOST); + if (!estream) + return -ENODEV; + hstream = hdac_stream(estream); + + /* code loading performed with default format */ + sdfmt = snd_hdac_calc_stream_format(48000, 1, SNDRV_PCM_FORMAT_S32_LE, 32, 0); + ret = snd_hdac_dsp_prepare(hstream, sdfmt, fw->size, &dmab); + if (ret < 0) + goto release_stream; + + /* enable SPIB for hda stream */ + snd_hdac_ext_stream_spbcap_enable(bus, true, hstream->index); + ret = snd_hdac_ext_stream_set_spib(bus, estream, fw->size); + if (ret) + goto cleanup_resources; + + memcpy(dmab.area, fw->data, fw->size); + + for (i = 0; i < APL_ROM_INIT_RETRIES; i++) { + unsigned int dma_id = hstream->stream_tag - 1; + + ret = avs_hda_init_rom(adev, dma_id, true); + if (!ret) + break; + dev_info(adev->dev, "#%d rom init fail: %d\n", i + 1, ret); + } + if (ret < 0) + goto cleanup_resources; + + /* transfer firmware */ + snd_hdac_dsp_trigger(hstream, true); + ret = snd_hdac_adsp_readl_poll(adev, AVS_FW_REG_STATUS(adev), reg, + (reg & AVS_ROM_STS_MASK) == APL_ROM_FW_ENTERED, + AVS_FW_INIT_POLLING_US, AVS_FW_INIT_TIMEOUT_US); + snd_hdac_dsp_trigger(hstream, false); + if (ret < 0) { + dev_err(adev->dev, "transfer fw failed: %d\n", ret); + avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); + } + +cleanup_resources: + /* disable SPIB for hda stream */ + snd_hdac_ext_stream_spbcap_enable(bus, false, hstream->index); + snd_hdac_ext_stream_set_spib(bus, estream, 0); + + snd_hdac_dsp_cleanup(hstream, &dmab); +release_stream: + snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST); + + return ret; +} + +int avs_hda_load_library(struct avs_dev *adev, struct firmware *lib, u32 id) +{ + struct snd_pcm_substream substream; + struct snd_dma_buffer dmab; + struct hdac_ext_stream *estream; + struct hdac_stream *stream; + struct hdac_bus *bus = &adev->base.core; + unsigned int sdfmt; + int ret; + + /* configure hda dma */ + memset(&substream, 0, sizeof(substream)); + substream.stream = SNDRV_PCM_STREAM_PLAYBACK; + estream = snd_hdac_ext_stream_assign(bus, &substream, + HDAC_EXT_STREAM_TYPE_HOST); + if (!estream) + return -ENODEV; + stream = hdac_stream(estream); + + /* code loading performed with default format */ + sdfmt = snd_hdac_calc_stream_format(48000, 1, SNDRV_PCM_FORMAT_S32_LE, 32, 0); + ret = snd_hdac_dsp_prepare(stream, sdfmt, lib->size, &dmab); + if (ret < 0) + goto release_stream; + + /* enable SPIB for hda stream */ + snd_hdac_ext_stream_spbcap_enable(bus, true, stream->index); + snd_hdac_ext_stream_set_spib(bus, estream, lib->size); + + memcpy(dmab.area, lib->data, lib->size); + + /* transfer firmware */ + snd_hdac_dsp_trigger(stream, true); + ret = avs_ipc_load_library(adev, stream->stream_tag - 1, id); + snd_hdac_dsp_trigger(stream, false); + if (ret) { + dev_err(adev->dev, "transfer lib %d failed: %d\n", id, ret); + ret = AVS_IPC_RET(ret); + } + + /* disable SPIB for hda stream */ + snd_hdac_ext_stream_spbcap_enable(bus, false, stream->index); + snd_hdac_ext_stream_set_spib(bus, estream, 0); + + snd_hdac_dsp_cleanup(stream, &dmab); +release_stream: + snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST); + + return ret; +} + +int avs_hda_transfer_modules(struct avs_dev *adev, bool load, + struct avs_module_entry *mods, u32 num_mods) +{ + /* + * All platforms without CLDMA are equipped with IMR, + * and thus the module transferring is offloaded to DSP. + */ + return 0; +} + +int avs_dsp_load_libraries(struct avs_dev *adev, struct avs_tplg_library *libs, u32 num_libs) +{ + int start, id, i = 0; + int ret; + + /* Calculate the id to assign for the next lib. */ + for (id = 0; id < adev->fw_cfg.max_libs_count; id++) + if (adev->lib_names[id][0] == '\0') + break; + if (id + num_libs >= adev->fw_cfg.max_libs_count) + return -EINVAL; + + start = id; + while (i < num_libs) { + struct avs_fw_manifest *man; + const struct firmware *fw; + struct firmware stripped_fw; + char *filename; + int j; + + filename = kasprintf(GFP_KERNEL, "%s/%s/%s", AVS_ROOT_DIR, adev->spec->name, + libs[i].name); + if (!filename) + return -ENOMEM; + + /* + * If any call after this one fails, requested firmware is not released with + * avs_release_last_firmware() as failing to load code results in need for reload + * of entire driver module. And then avs_release_firmwares() is in place already. + */ + ret = avs_request_firmware(adev, &fw, filename); + kfree(filename); + if (ret < 0) + return ret; + + stripped_fw = *fw; + ret = avs_fw_manifest_strip_verify(adev, &stripped_fw, NULL); + if (ret) { + dev_err(adev->dev, "invalid library data: %d\n", ret); + return ret; + } + + ret = avs_fw_manifest_offset(&stripped_fw); + if (ret < 0) + return ret; + man = (struct avs_fw_manifest *)(stripped_fw.data + ret); + + /* Don't load anything that's already in DSP memory. */ + for (j = 0; j < id; j++) + if (!strncmp(adev->lib_names[j], man->name, AVS_LIB_NAME_SIZE)) + goto next_lib; + + ret = avs_dsp_op(adev, load_lib, &stripped_fw, id); + if (ret) + return ret; + + strncpy(adev->lib_names[id], man->name, AVS_LIB_NAME_SIZE); + id++; +next_lib: + i++; + } + + return start == id ? 1 : 0; +} + +static int avs_dsp_load_basefw(struct avs_dev *adev) +{ + const struct avs_fw_version *min_req; + const struct avs_spec *const spec = adev->spec; + const struct firmware *fw; + struct firmware stripped_fw; + char *filename; + int ret; + + filename = kasprintf(GFP_KERNEL, "%s/%s/%s", AVS_ROOT_DIR, spec->name, AVS_BASEFW_FILENAME); + if (!filename) + return -ENOMEM; + + ret = avs_request_firmware(adev, &fw, filename); + kfree(filename); + if (ret < 0) { + dev_err(adev->dev, "request firmware failed: %d\n", ret); + return ret; + } + + stripped_fw = *fw; + min_req = &adev->spec->min_fw_version; + + ret = avs_fw_manifest_strip_verify(adev, &stripped_fw, min_req); + if (ret < 0) { + dev_err(adev->dev, "invalid firmware data: %d\n", ret); + goto release_fw; + } + + ret = avs_dsp_op(adev, load_basefw, &stripped_fw); + if (ret < 0) { + dev_err(adev->dev, "basefw load failed: %d\n", ret); + goto release_fw; + } + + ret = wait_for_completion_timeout(&adev->fw_ready, + msecs_to_jiffies(AVS_FW_INIT_TIMEOUT_MS)); + if (!ret) { + dev_err(adev->dev, "firmware ready timeout\n"); + avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); + ret = -ETIMEDOUT; + goto release_fw; + } + + return 0; + +release_fw: + avs_release_last_firmware(adev); + return ret; +} + +int avs_dsp_boot_firmware(struct avs_dev *adev, bool purge) +{ + struct avs_soc_component *acomp; + int ret, i; + + /* Forgo full boot if flash from IMR succeeds. */ + if (!purge && avs_platattr_test(adev, IMR)) { + ret = avs_imr_load_basefw(adev); + if (!ret) + return 0; + + dev_dbg(adev->dev, "firmware flash from imr failed: %d\n", ret); + } + + /* Full boot, clear cached data except for basefw (slot 0). */ + for (i = 1; i < adev->fw_cfg.max_libs_count; i++) + memset(adev->lib_names[i], 0, AVS_LIB_NAME_SIZE); + + avs_hda_clock_gating_enable(adev, false); + avs_hda_l1sen_enable(adev, false); + + ret = avs_dsp_load_basefw(adev); + if (ret) + goto reenable_gating; + + mutex_lock(&adev->comp_list_mutex); + list_for_each_entry(acomp, &adev->comp_list, node) { + struct avs_tplg *tplg = acomp->tplg; + + ret = avs_dsp_load_libraries(adev, tplg->libs, tplg->num_libs); + if (ret < 0) + break; + } + mutex_unlock(&adev->comp_list_mutex); + +reenable_gating: + avs_hda_l1sen_enable(adev, true); + avs_hda_clock_gating_enable(adev, true); + + if (ret < 0) + return ret; + + /* With all code loaded, refresh module information. */ + ret = avs_module_info_init(adev, true); + if (ret) { + dev_err(adev->dev, "init module info failed: %d\n", ret); + return ret; + } + + return 0; +} + +int avs_dsp_first_boot_firmware(struct avs_dev *adev) +{ + int ret, i; + + if (avs_platattr_test(adev, CLDMA)) { + ret = hda_cldma_init(&code_loader, &adev->base.core, + adev->dsp_ba, AVS_CL_DEFAULT_BUFFER_SIZE); + if (ret < 0) { + dev_err(adev->dev, "cldma init failed: %d\n", ret); + return ret; + } + } + + ret = avs_dsp_boot_firmware(adev, true); + if (ret < 0) { + dev_err(adev->dev, "firmware boot failed: %d\n", ret); + return ret; + } + + ret = avs_ipc_get_hw_config(adev, &adev->hw_cfg); + if (ret) { + dev_err(adev->dev, "get hw cfg failed: %d\n", ret); + return AVS_IPC_RET(ret); + } + + ret = avs_ipc_get_fw_config(adev, &adev->fw_cfg); + if (ret) { + dev_err(adev->dev, "get fw cfg failed: %d\n", ret); + return AVS_IPC_RET(ret); + } + + adev->core_refs = devm_kcalloc(adev->dev, adev->hw_cfg.dsp_cores, + sizeof(*adev->core_refs), GFP_KERNEL); + adev->lib_names = devm_kcalloc(adev->dev, adev->fw_cfg.max_libs_count, + sizeof(*adev->lib_names), GFP_KERNEL); + if (!adev->core_refs || !adev->lib_names) + return -ENOMEM; + + for (i = 0; i < adev->fw_cfg.max_libs_count; i++) { + adev->lib_names[i] = devm_kzalloc(adev->dev, AVS_LIB_NAME_SIZE, GFP_KERNEL); + if (!adev->lib_names[i]) + return -ENOMEM; + } + + /* basefw always occupies slot 0 */ + strcpy(&adev->lib_names[0][0], "BASEFW"); + + ida_init(&adev->ppl_ida); + + return 0; +} diff --git a/sound/soc/intel/avs/messages.c b/sound/soc/intel/avs/messages.c new file mode 100644 index 000000000..d4bcee1aa --- /dev/null +++ b/sound/soc/intel/avs/messages.c @@ -0,0 +1,734 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include "avs.h" +#include "messages.h" + +#define AVS_CL_TIMEOUT_MS 5000 + +int avs_ipc_set_boot_config(struct avs_dev *adev, u32 dma_id, u32 purge) +{ + union avs_global_msg msg = AVS_GLOBAL_REQUEST(ROM_CONTROL); + struct avs_ipc_msg request = {{0}}; + int ret; + + msg.boot_cfg.rom_ctrl_msg_type = AVS_ROM_SET_BOOT_CONFIG; + msg.boot_cfg.dma_id = dma_id; + msg.boot_cfg.purge_request = purge; + request.header = msg.val; + + ret = avs_dsp_send_rom_msg(adev, &request); + if (ret) + avs_ipc_err(adev, &request, "set boot config", ret); + + return ret; +} + +int avs_ipc_load_modules(struct avs_dev *adev, u16 *mod_ids, u32 num_mod_ids) +{ + union avs_global_msg msg = AVS_GLOBAL_REQUEST(LOAD_MULTIPLE_MODULES); + struct avs_ipc_msg request; + int ret; + + msg.load_multi_mods.mod_cnt = num_mod_ids; + request.header = msg.val; + request.data = mod_ids; + request.size = sizeof(*mod_ids) * num_mod_ids; + + ret = avs_dsp_send_msg_timeout(adev, &request, NULL, AVS_CL_TIMEOUT_MS); + if (ret) + avs_ipc_err(adev, &request, "load multiple modules", ret); + + return ret; +} + +int avs_ipc_unload_modules(struct avs_dev *adev, u16 *mod_ids, u32 num_mod_ids) +{ + union avs_global_msg msg = AVS_GLOBAL_REQUEST(UNLOAD_MULTIPLE_MODULES); + struct avs_ipc_msg request; + int ret; + + msg.load_multi_mods.mod_cnt = num_mod_ids; + request.header = msg.val; + request.data = mod_ids; + request.size = sizeof(*mod_ids) * num_mod_ids; + + ret = avs_dsp_send_msg(adev, &request, NULL); + if (ret) + avs_ipc_err(adev, &request, "unload multiple modules", ret); + + return ret; +} + +int avs_ipc_load_library(struct avs_dev *adev, u32 dma_id, u32 lib_id) +{ + union avs_global_msg msg = AVS_GLOBAL_REQUEST(LOAD_LIBRARY); + struct avs_ipc_msg request = {{0}}; + int ret; + + msg.load_lib.dma_id = dma_id; + msg.load_lib.lib_id = lib_id; + request.header = msg.val; + + ret = avs_dsp_send_msg_timeout(adev, &request, NULL, AVS_CL_TIMEOUT_MS); + if (ret) + avs_ipc_err(adev, &request, "load library", ret); + + return ret; +} + +int avs_ipc_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority, + u8 instance_id, bool lp, u16 attributes) +{ + union avs_global_msg msg = AVS_GLOBAL_REQUEST(CREATE_PIPELINE); + struct avs_ipc_msg request = {{0}}; + int ret; + + msg.create_ppl.ppl_mem_size = req_size; + msg.create_ppl.ppl_priority = priority; + msg.create_ppl.instance_id = instance_id; + msg.ext.create_ppl.lp = lp; + msg.ext.create_ppl.attributes = attributes; + request.header = msg.val; + + ret = avs_dsp_send_msg(adev, &request, NULL); + if (ret) + avs_ipc_err(adev, &request, "create pipeline", ret); + + return ret; +} + +int avs_ipc_delete_pipeline(struct avs_dev *adev, u8 instance_id) +{ + union avs_global_msg msg = AVS_GLOBAL_REQUEST(DELETE_PIPELINE); + struct avs_ipc_msg request = {{0}}; + int ret; + + msg.ppl.instance_id = instance_id; + request.header = msg.val; + + ret = avs_dsp_send_msg(adev, &request, NULL); + if (ret) + avs_ipc_err(adev, &request, "delete pipeline", ret); + + return ret; +} + +int avs_ipc_set_pipeline_state(struct avs_dev *adev, u8 instance_id, + enum avs_pipeline_state state) +{ + union avs_global_msg msg = AVS_GLOBAL_REQUEST(SET_PIPELINE_STATE); + struct avs_ipc_msg request = {{0}}; + int ret; + + msg.set_ppl_state.ppl_id = instance_id; + msg.set_ppl_state.state = state; + request.header = msg.val; + + ret = avs_dsp_send_msg(adev, &request, NULL); + if (ret) + avs_ipc_err(adev, &request, "set pipeline state", ret); + + return ret; +} + +int avs_ipc_get_pipeline_state(struct avs_dev *adev, u8 instance_id, + enum avs_pipeline_state *state) +{ + union avs_global_msg msg = AVS_GLOBAL_REQUEST(GET_PIPELINE_STATE); + struct avs_ipc_msg request = {{0}}; + struct avs_ipc_msg reply = {{0}}; + int ret; + + msg.get_ppl_state.ppl_id = instance_id; + request.header = msg.val; + + ret = avs_dsp_send_msg(adev, &request, &reply); + if (ret) { + avs_ipc_err(adev, &request, "get pipeline state", ret); + return ret; + } + + *state = reply.rsp.ext.get_ppl_state.state; + return ret; +} + +/* + * avs_ipc_init_instance - Initialize module instance + * + * @adev: Driver context + * @module_id: Module-type id + * @instance_id: Unique module instance id + * @ppl_id: Parent pipeline id + * @core_id: DSP core to allocate module on + * @domain: Processing domain (low latency or data processing) + * @param: Module-type specific configuration + * @param_size: Size of @param in bytes + * + * Argument verification, as well as pipeline state checks are done by the + * firmware. + * + * Note: @ppl_id and @core_id are independent of each other as single pipeline + * can be composed of module instances located on different DSP cores. + */ +int avs_ipc_init_instance(struct avs_dev *adev, u16 module_id, u8 instance_id, + u8 ppl_id, u8 core_id, u8 domain, + void *param, u32 param_size) +{ + union avs_module_msg msg = AVS_MODULE_REQUEST(INIT_INSTANCE); + struct avs_ipc_msg request; + int ret; + + msg.module_id = module_id; + msg.instance_id = instance_id; + /* firmware expects size provided in dwords */ + msg.ext.init_instance.param_block_size = DIV_ROUND_UP(param_size, sizeof(u32)); + msg.ext.init_instance.ppl_instance_id = ppl_id; + msg.ext.init_instance.core_id = core_id; + msg.ext.init_instance.proc_domain = domain; + + request.header = msg.val; + request.data = param; + request.size = param_size; + + ret = avs_dsp_send_msg(adev, &request, NULL); + if (ret) + avs_ipc_err(adev, &request, "init instance", ret); + + return ret; +} + +/* + * avs_ipc_delete_instance - Delete module instance + * + * @adev: Driver context + * @module_id: Module-type id + * @instance_id: Unique module instance id + * + * Argument verification, as well as pipeline state checks are done by the + * firmware. + * + * Note: only standalone modules i.e. without a parent pipeline shall be + * deleted using this IPC message. In all other cases, pipeline owning the + * modules performs cleanup automatically when it is deleted. + */ +int avs_ipc_delete_instance(struct avs_dev *adev, u16 module_id, u8 instance_id) +{ + union avs_module_msg msg = AVS_MODULE_REQUEST(DELETE_INSTANCE); + struct avs_ipc_msg request = {{0}}; + int ret; + + msg.module_id = module_id; + msg.instance_id = instance_id; + request.header = msg.val; + + ret = avs_dsp_send_msg(adev, &request, NULL); + if (ret) + avs_ipc_err(adev, &request, "delete instance", ret); + + return ret; +} + +/* + * avs_ipc_bind - Bind two module instances + * + * @adev: Driver context + * @module_id: Source module-type id + * @instance_id: Source module instance id + * @dst_module_id: Sink module-type id + * @dst_instance_id: Sink module instance id + * @dst_queue: Sink module pin to bind @src_queue with + * @src_queue: Source module pin to bind @dst_queue with + */ +int avs_ipc_bind(struct avs_dev *adev, u16 module_id, u8 instance_id, + u16 dst_module_id, u8 dst_instance_id, + u8 dst_queue, u8 src_queue) +{ + union avs_module_msg msg = AVS_MODULE_REQUEST(BIND); + struct avs_ipc_msg request = {{0}}; + int ret; + + msg.module_id = module_id; + msg.instance_id = instance_id; + msg.ext.bind_unbind.dst_module_id = dst_module_id; + msg.ext.bind_unbind.dst_instance_id = dst_instance_id; + msg.ext.bind_unbind.dst_queue = dst_queue; + msg.ext.bind_unbind.src_queue = src_queue; + request.header = msg.val; + + ret = avs_dsp_send_msg(adev, &request, NULL); + if (ret) + avs_ipc_err(adev, &request, "bind modules", ret); + + return ret; +} + +/* + * avs_ipc_unbind - Unbind two module instances + * + * @adev: Driver context + * @module_id: Source module-type id + * @instance_id: Source module instance id + * @dst_module_id: Sink module-type id + * @dst_instance_id: Sink module instance id + * @dst_queue: Sink module pin to unbind @src_queue from + * @src_queue: Source module pin to unbind @dst_queue from + */ +int avs_ipc_unbind(struct avs_dev *adev, u16 module_id, u8 instance_id, + u16 dst_module_id, u8 dst_instance_id, + u8 dst_queue, u8 src_queue) +{ + union avs_module_msg msg = AVS_MODULE_REQUEST(UNBIND); + struct avs_ipc_msg request = {{0}}; + int ret; + + msg.module_id = module_id; + msg.instance_id = instance_id; + msg.ext.bind_unbind.dst_module_id = dst_module_id; + msg.ext.bind_unbind.dst_instance_id = dst_instance_id; + msg.ext.bind_unbind.dst_queue = dst_queue; + msg.ext.bind_unbind.src_queue = src_queue; + request.header = msg.val; + + ret = avs_dsp_send_msg(adev, &request, NULL); + if (ret) + avs_ipc_err(adev, &request, "unbind modules", ret); + + return ret; +} + +static int __avs_ipc_set_large_config(struct avs_dev *adev, u16 module_id, u8 instance_id, + u8 param_id, bool init_block, bool final_block, + u8 *request_data, size_t request_size, size_t off_size) +{ + union avs_module_msg msg = AVS_MODULE_REQUEST(LARGE_CONFIG_SET); + struct avs_ipc_msg request; + int ret; + + msg.module_id = module_id; + msg.instance_id = instance_id; + msg.ext.large_config.data_off_size = off_size; + msg.ext.large_config.large_param_id = param_id; + msg.ext.large_config.final_block = final_block; + msg.ext.large_config.init_block = init_block; + + request.header = msg.val; + request.data = request_data; + request.size = request_size; + + ret = avs_dsp_send_msg(adev, &request, NULL); + if (ret) + avs_ipc_err(adev, &request, "large config set", ret); + + return ret; +} + +int avs_ipc_set_large_config(struct avs_dev *adev, u16 module_id, + u8 instance_id, u8 param_id, + u8 *request, size_t request_size) +{ + size_t remaining, tx_size; + bool final; + int ret; + + remaining = request_size; + tx_size = min_t(size_t, AVS_MAILBOX_SIZE, remaining); + final = (tx_size == remaining); + + /* Initial request states total payload size. */ + ret = __avs_ipc_set_large_config(adev, module_id, instance_id, + param_id, 1, final, request, tx_size, + request_size); + if (ret) + return ret; + + remaining -= tx_size; + + /* Loop the rest only when payload exceeds mailbox's size. */ + while (remaining) { + size_t offset; + + offset = request_size - remaining; + tx_size = min_t(size_t, AVS_MAILBOX_SIZE, remaining); + final = (tx_size == remaining); + + ret = __avs_ipc_set_large_config(adev, module_id, instance_id, + param_id, 0, final, + request + offset, tx_size, + offset); + if (ret) + return ret; + + remaining -= tx_size; + } + + return 0; +} + +int avs_ipc_get_large_config(struct avs_dev *adev, u16 module_id, u8 instance_id, + u8 param_id, u8 *request_data, size_t request_size, + u8 **reply_data, size_t *reply_size) +{ + union avs_module_msg msg = AVS_MODULE_REQUEST(LARGE_CONFIG_GET); + struct avs_ipc_msg request; + struct avs_ipc_msg reply = {{0}}; + void *buf; + int ret; + + reply.data = kzalloc(AVS_MAILBOX_SIZE, GFP_KERNEL); + if (!reply.data) + return -ENOMEM; + + msg.module_id = module_id; + msg.instance_id = instance_id; + msg.ext.large_config.data_off_size = request_size; + msg.ext.large_config.large_param_id = param_id; + /* final_block is always 0 on request. Updated by fw on reply. */ + msg.ext.large_config.final_block = 0; + msg.ext.large_config.init_block = 1; + + request.header = msg.val; + request.data = request_data; + request.size = request_size; + reply.size = AVS_MAILBOX_SIZE; + + ret = avs_dsp_send_msg(adev, &request, &reply); + if (ret) { + avs_ipc_err(adev, &request, "large config get", ret); + kfree(reply.data); + return ret; + } + + buf = krealloc(reply.data, reply.size, GFP_KERNEL); + if (!buf) { + kfree(reply.data); + return -ENOMEM; + } + + *reply_data = buf; + *reply_size = reply.size; + + return 0; +} + +int avs_ipc_set_dx(struct avs_dev *adev, u32 core_mask, bool powerup) +{ + union avs_module_msg msg = AVS_MODULE_REQUEST(SET_DX); + struct avs_ipc_msg request; + struct avs_dxstate_info dx; + int ret; + + dx.core_mask = core_mask; + dx.dx_mask = powerup ? core_mask : 0; + request.header = msg.val; + request.data = &dx; + request.size = sizeof(dx); + + ret = avs_dsp_send_pm_msg(adev, &request, NULL, true); + if (ret) + avs_ipc_err(adev, &request, "set dx", ret); + + return ret; +} + +/* + * avs_ipc_set_d0ix - Set power gating policy (entering D0IX substates) + * + * @enable_pg: Whether to enable or disable power gating + * @streaming: Whether a stream is running when transitioning + */ +int avs_ipc_set_d0ix(struct avs_dev *adev, bool enable_pg, bool streaming) +{ + union avs_module_msg msg = AVS_MODULE_REQUEST(SET_D0IX); + struct avs_ipc_msg request = {{0}}; + int ret; + + msg.ext.set_d0ix.wake = enable_pg; + msg.ext.set_d0ix.streaming = streaming; + + request.header = msg.val; + + ret = avs_dsp_send_pm_msg(adev, &request, NULL, false); + if (ret) + avs_ipc_err(adev, &request, "set d0ix", ret); + + return ret; +} + +int avs_ipc_get_fw_config(struct avs_dev *adev, struct avs_fw_cfg *cfg) +{ + struct avs_tlv *tlv; + size_t payload_size; + size_t offset = 0; + u8 *payload; + int ret; + + ret = avs_ipc_get_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID, + AVS_BASEFW_FIRMWARE_CONFIG, NULL, 0, + &payload, &payload_size); + if (ret) + return ret; + /* Non-zero payload expected for FIRMWARE_CONFIG. */ + if (!payload_size) + return -EREMOTEIO; + + while (offset < payload_size) { + tlv = (struct avs_tlv *)(payload + offset); + + switch (tlv->type) { + case AVS_FW_CFG_FW_VERSION: + memcpy(&cfg->fw_version, tlv->value, sizeof(cfg->fw_version)); + break; + + case AVS_FW_CFG_MEMORY_RECLAIMED: + cfg->memory_reclaimed = *tlv->value; + break; + + case AVS_FW_CFG_SLOW_CLOCK_FREQ_HZ: + cfg->slow_clock_freq_hz = *tlv->value; + break; + + case AVS_FW_CFG_FAST_CLOCK_FREQ_HZ: + cfg->fast_clock_freq_hz = *tlv->value; + break; + + case AVS_FW_CFG_ALH_SUPPORT_LEVEL: + cfg->alh_support = *tlv->value; + break; + + case AVS_FW_CFG_IPC_DL_MAILBOX_BYTES: + cfg->ipc_dl_mailbox_bytes = *tlv->value; + break; + + case AVS_FW_CFG_IPC_UL_MAILBOX_BYTES: + cfg->ipc_ul_mailbox_bytes = *tlv->value; + break; + + case AVS_FW_CFG_TRACE_LOG_BYTES: + cfg->trace_log_bytes = *tlv->value; + break; + + case AVS_FW_CFG_MAX_PPL_COUNT: + cfg->max_ppl_count = *tlv->value; + break; + + case AVS_FW_CFG_MAX_ASTATE_COUNT: + cfg->max_astate_count = *tlv->value; + break; + + case AVS_FW_CFG_MAX_MODULE_PIN_COUNT: + cfg->max_module_pin_count = *tlv->value; + break; + + case AVS_FW_CFG_MODULES_COUNT: + cfg->modules_count = *tlv->value; + break; + + case AVS_FW_CFG_MAX_MOD_INST_COUNT: + cfg->max_mod_inst_count = *tlv->value; + break; + + case AVS_FW_CFG_MAX_LL_TASKS_PER_PRI_COUNT: + cfg->max_ll_tasks_per_pri_count = *tlv->value; + break; + + case AVS_FW_CFG_LL_PRI_COUNT: + cfg->ll_pri_count = *tlv->value; + break; + + case AVS_FW_CFG_MAX_DP_TASKS_COUNT: + cfg->max_dp_tasks_count = *tlv->value; + break; + + case AVS_FW_CFG_MAX_LIBS_COUNT: + cfg->max_libs_count = *tlv->value; + break; + + case AVS_FW_CFG_XTAL_FREQ_HZ: + cfg->xtal_freq_hz = *tlv->value; + break; + + case AVS_FW_CFG_POWER_GATING_POLICY: + cfg->power_gating_policy = *tlv->value; + break; + + /* Known but not useful to us. */ + case AVS_FW_CFG_DMA_BUFFER_CONFIG: + case AVS_FW_CFG_SCHEDULER_CONFIG: + case AVS_FW_CFG_CLOCKS_CONFIG: + case AVS_FW_CFG_RESERVED: + break; + + default: + dev_info(adev->dev, "Unrecognized fw param: %d\n", tlv->type); + break; + } + + offset += sizeof(*tlv) + tlv->length; + } + + /* No longer needed, free it as it's owned by the get_large_config() caller. */ + kfree(payload); + return ret; +} + +int avs_ipc_get_hw_config(struct avs_dev *adev, struct avs_hw_cfg *cfg) +{ + struct avs_tlv *tlv; + size_t payload_size; + size_t size, offset = 0; + u8 *payload; + int ret; + + ret = avs_ipc_get_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID, + AVS_BASEFW_HARDWARE_CONFIG, NULL, 0, + &payload, &payload_size); + if (ret) + return ret; + /* Non-zero payload expected for HARDWARE_CONFIG. */ + if (!payload_size) + return -EREMOTEIO; + + while (offset < payload_size) { + tlv = (struct avs_tlv *)(payload + offset); + + switch (tlv->type) { + case AVS_HW_CFG_AVS_VER: + cfg->avs_version = *tlv->value; + break; + + case AVS_HW_CFG_DSP_CORES: + cfg->dsp_cores = *tlv->value; + break; + + case AVS_HW_CFG_MEM_PAGE_BYTES: + cfg->mem_page_bytes = *tlv->value; + break; + + case AVS_HW_CFG_TOTAL_PHYS_MEM_PAGES: + cfg->total_phys_mem_pages = *tlv->value; + break; + + case AVS_HW_CFG_I2S_CAPS: + cfg->i2s_caps.i2s_version = tlv->value[0]; + size = tlv->value[1]; + cfg->i2s_caps.ctrl_count = size; + if (!size) + break; + + /* Multiply to get entire array size. */ + size *= sizeof(*cfg->i2s_caps.ctrl_base_addr); + cfg->i2s_caps.ctrl_base_addr = devm_kmemdup(adev->dev, + &tlv->value[2], + size, GFP_KERNEL); + if (!cfg->i2s_caps.ctrl_base_addr) { + ret = -ENOMEM; + goto exit; + } + break; + + case AVS_HW_CFG_GATEWAY_COUNT: + cfg->gateway_count = *tlv->value; + break; + + case AVS_HW_CFG_HP_EBB_COUNT: + cfg->hp_ebb_count = *tlv->value; + break; + + case AVS_HW_CFG_LP_EBB_COUNT: + cfg->lp_ebb_count = *tlv->value; + break; + + case AVS_HW_CFG_EBB_SIZE_BYTES: + cfg->ebb_size_bytes = *tlv->value; + break; + + case AVS_HW_CFG_GPDMA_CAPS: + break; + + default: + dev_info(adev->dev, "Unrecognized hw config: %d\n", tlv->type); + break; + } + + offset += sizeof(*tlv) + tlv->length; + } + +exit: + /* No longer needed, free it as it's owned by the get_large_config() caller. */ + kfree(payload); + return ret; +} + +int avs_ipc_get_modules_info(struct avs_dev *adev, struct avs_mods_info **info) +{ + size_t payload_size; + u8 *payload; + int ret; + + ret = avs_ipc_get_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID, + AVS_BASEFW_MODULES_INFO, NULL, 0, + &payload, &payload_size); + if (ret) + return ret; + /* Non-zero payload expected for MODULES_INFO. */ + if (!payload_size) + return -EREMOTEIO; + + *info = (struct avs_mods_info *)payload; + return 0; +} + +int avs_ipc_set_enable_logs(struct avs_dev *adev, u8 *log_info, size_t size) +{ + int ret; + + ret = avs_ipc_set_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID, + AVS_BASEFW_ENABLE_LOGS, log_info, size); + if (ret) + dev_err(adev->dev, "enable logs failed: %d\n", ret); + + return ret; +} + +int avs_ipc_set_system_time(struct avs_dev *adev) +{ + struct avs_sys_time sys_time; + int ret; + u64 us; + + /* firmware expects UTC time in micro seconds */ + us = ktime_to_us(ktime_get()); + sys_time.val_l = us & UINT_MAX; + sys_time.val_u = us >> 32; + + ret = avs_ipc_set_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID, + AVS_BASEFW_SYSTEM_TIME, (u8 *)&sys_time, sizeof(sys_time)); + if (ret) + dev_err(adev->dev, "set system time failed: %d\n", ret); + + return ret; +} + +int avs_ipc_copier_set_sink_format(struct avs_dev *adev, u16 module_id, + u8 instance_id, u32 sink_id, + const struct avs_audio_format *src_fmt, + const struct avs_audio_format *sink_fmt) +{ + struct avs_copier_sink_format cpr_fmt; + + cpr_fmt.sink_id = sink_id; + /* Firmware expects driver to resend copier's input format. */ + cpr_fmt.src_fmt = *src_fmt; + cpr_fmt.sink_fmt = *sink_fmt; + + return avs_ipc_set_large_config(adev, module_id, instance_id, + AVS_COPIER_SET_SINK_FORMAT, + (u8 *)&cpr_fmt, sizeof(cpr_fmt)); +} diff --git a/sound/soc/intel/avs/messages.h b/sound/soc/intel/avs/messages.h new file mode 100644 index 000000000..b82b2483e --- /dev/null +++ b/sound/soc/intel/avs/messages.h @@ -0,0 +1,803 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2021-2022 Intel Corporation. All rights reserved. + * + * Authors: Cezary Rojewski + * Amadeusz Slawinski + */ + +#ifndef __SOUND_SOC_INTEL_AVS_MSGS_H +#define __SOUND_SOC_INTEL_AVS_MSGS_H + +struct avs_dev; + +#define AVS_MAILBOX_SIZE 4096 + +enum avs_msg_target { + AVS_FW_GEN_MSG = 0, + AVS_MOD_MSG = 1 +}; + +enum avs_msg_direction { + AVS_MSG_REQUEST = 0, + AVS_MSG_REPLY = 1 +}; + +enum avs_global_msg_type { + AVS_GLB_ROM_CONTROL = 1, + AVS_GLB_LOAD_MULTIPLE_MODULES = 15, + AVS_GLB_UNLOAD_MULTIPLE_MODULES = 16, + AVS_GLB_CREATE_PIPELINE = 17, + AVS_GLB_DELETE_PIPELINE = 18, + AVS_GLB_SET_PIPELINE_STATE = 19, + AVS_GLB_GET_PIPELINE_STATE = 20, + AVS_GLB_LOAD_LIBRARY = 24, + AVS_GLB_NOTIFICATION = 27, +}; + +union avs_global_msg { + u64 val; + struct { + union { + u32 primary; + struct { + u32 rsvd:24; + u32 global_msg_type:5; + u32 msg_direction:1; + u32 msg_target:1; + }; + /* set boot config */ + struct { + u32 rom_ctrl_msg_type:9; + u32 dma_id:5; + u32 purge_request:1; + } boot_cfg; + /* module loading */ + struct { + u32 mod_cnt:8; + } load_multi_mods; + /* pipeline management */ + struct { + u32 ppl_mem_size:11; + u32 ppl_priority:5; + u32 instance_id:8; + } create_ppl; + struct { + u32 rsvd:16; + u32 instance_id:8; + } ppl; /* generic ppl request */ + struct { + u32 state:16; + u32 ppl_id:8; + } set_ppl_state; + struct { + u32 ppl_id:8; + } get_ppl_state; + /* library loading */ + struct { + u32 dma_id:5; + u32 rsvd:11; + u32 lib_id:4; + } load_lib; + }; + union { + u32 val; + /* pipeline management */ + struct { + u32 lp:1; /* low power flag */ + u32 rsvd:3; + u32 attributes:16; /* additional scheduling flags */ + } create_ppl; + } ext; + }; +} __packed; + +struct avs_tlv { + u32 type; + u32 length; + u32 value[]; +} __packed; + +enum avs_module_msg_type { + AVS_MOD_INIT_INSTANCE = 0, + AVS_MOD_LARGE_CONFIG_GET = 3, + AVS_MOD_LARGE_CONFIG_SET = 4, + AVS_MOD_BIND = 5, + AVS_MOD_UNBIND = 6, + AVS_MOD_SET_DX = 7, + AVS_MOD_SET_D0IX = 8, + AVS_MOD_DELETE_INSTANCE = 11, +}; + +union avs_module_msg { + u64 val; + struct { + union { + u32 primary; + struct { + u32 module_id:16; + u32 instance_id:8; + u32 module_msg_type:5; + u32 msg_direction:1; + u32 msg_target:1; + }; + }; + union { + u32 val; + struct { + u32 param_block_size:16; + u32 ppl_instance_id:8; + u32 core_id:4; + u32 proc_domain:1; + } init_instance; + struct { + u32 data_off_size:20; + u32 large_param_id:8; + u32 final_block:1; + u32 init_block:1; + } large_config; + struct { + u32 dst_module_id:16; + u32 dst_instance_id:8; + u32 dst_queue:3; + u32 src_queue:3; + } bind_unbind; + struct { + u32 wake:1; + u32 streaming:1; + } set_d0ix; + } ext; + }; +} __packed; + +union avs_reply_msg { + u64 val; + struct { + union { + u32 primary; + struct { + u32 status:24; + u32 global_msg_type:5; + u32 msg_direction:1; + u32 msg_target:1; + }; + }; + union { + u32 val; + /* module loading */ + struct { + u32 err_mod_id:16; + } load_multi_mods; + /* pipeline management */ + struct { + u32 state:5; + } get_ppl_state; + /* module management */ + struct { + u32 data_off_size:20; + u32 large_param_id:8; + u32 final_block:1; + u32 init_block:1; + } large_config; + } ext; + }; +} __packed; + +enum avs_notify_msg_type { + AVS_NOTIFY_PHRASE_DETECTED = 4, + AVS_NOTIFY_RESOURCE_EVENT = 5, + AVS_NOTIFY_LOG_BUFFER_STATUS = 6, + AVS_NOTIFY_FW_READY = 8, + AVS_NOTIFY_EXCEPTION_CAUGHT = 10, + AVS_NOTIFY_MODULE_EVENT = 12, +}; + +union avs_notify_msg { + u64 val; + struct { + union { + u32 primary; + struct { + u32 rsvd:16; + u32 notify_msg_type:8; + u32 global_msg_type:5; + u32 msg_direction:1; + u32 msg_target:1; + }; + struct { + u16 rsvd:12; + u16 core:4; + } log; + }; + union { + u32 val; + struct { + u32 core_id:2; + u32 stack_dump_size:16; + } coredump; + } ext; + }; +} __packed; + +#define AVS_MSG(hdr) { .val = hdr } + +#define AVS_GLOBAL_REQUEST(msg_type) \ +{ \ + .global_msg_type = AVS_GLB_##msg_type, \ + .msg_direction = AVS_MSG_REQUEST, \ + .msg_target = AVS_FW_GEN_MSG, \ +} + +#define AVS_MODULE_REQUEST(msg_type) \ +{ \ + .module_msg_type = AVS_MOD_##msg_type, \ + .msg_direction = AVS_MSG_REQUEST, \ + .msg_target = AVS_MOD_MSG, \ +} + +#define AVS_NOTIFICATION(msg_type) \ +{ \ + .notify_msg_type = AVS_NOTIFY_##msg_type,\ + .global_msg_type = AVS_GLB_NOTIFICATION,\ + .msg_direction = AVS_MSG_REPLY, \ + .msg_target = AVS_FW_GEN_MSG, \ +} + +#define avs_msg_is_reply(hdr) \ +({ \ + union avs_reply_msg __msg = AVS_MSG(hdr); \ + __msg.msg_direction == AVS_MSG_REPLY && \ + __msg.global_msg_type != AVS_GLB_NOTIFICATION; \ +}) + +/* Notification types */ + +struct avs_notify_voice_data { + u16 kpd_score; + u16 reserved; +} __packed; + +struct avs_notify_res_data { + u32 resource_type; + u32 resource_id; + u32 event_type; + u32 reserved; + u32 data[6]; +} __packed; + +struct avs_notify_mod_data { + u32 module_instance_id; + u32 event_id; + u32 data_size; + u32 data[]; +} __packed; + +/* ROM messages */ +enum avs_rom_control_msg_type { + AVS_ROM_SET_BOOT_CONFIG = 0, +}; + +int avs_ipc_set_boot_config(struct avs_dev *adev, u32 dma_id, u32 purge); + +/* Code loading messages */ +int avs_ipc_load_modules(struct avs_dev *adev, u16 *mod_ids, u32 num_mod_ids); +int avs_ipc_unload_modules(struct avs_dev *adev, u16 *mod_ids, u32 num_mod_ids); +int avs_ipc_load_library(struct avs_dev *adev, u32 dma_id, u32 lib_id); + +/* Pipeline management messages */ +enum avs_pipeline_state { + AVS_PPL_STATE_INVALID, + AVS_PPL_STATE_UNINITIALIZED, + AVS_PPL_STATE_RESET, + AVS_PPL_STATE_PAUSED, + AVS_PPL_STATE_RUNNING, +}; + +int avs_ipc_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority, + u8 instance_id, bool lp, u16 attributes); +int avs_ipc_delete_pipeline(struct avs_dev *adev, u8 instance_id); +int avs_ipc_set_pipeline_state(struct avs_dev *adev, u8 instance_id, + enum avs_pipeline_state state); +int avs_ipc_get_pipeline_state(struct avs_dev *adev, u8 instance_id, + enum avs_pipeline_state *state); + +/* Module management messages */ +int avs_ipc_init_instance(struct avs_dev *adev, u16 module_id, u8 instance_id, + u8 ppl_id, u8 core_id, u8 domain, + void *param, u32 param_size); +int avs_ipc_delete_instance(struct avs_dev *adev, u16 module_id, u8 instance_id); +int avs_ipc_bind(struct avs_dev *adev, u16 module_id, u8 instance_id, + u16 dst_module_id, u8 dst_instance_id, + u8 dst_queue, u8 src_queue); +int avs_ipc_unbind(struct avs_dev *adev, u16 module_id, u8 instance_id, + u16 dst_module_id, u8 dst_instance_id, + u8 dst_queue, u8 src_queue); +int avs_ipc_set_large_config(struct avs_dev *adev, u16 module_id, + u8 instance_id, u8 param_id, + u8 *request, size_t request_size); +int avs_ipc_get_large_config(struct avs_dev *adev, u16 module_id, u8 instance_id, + u8 param_id, u8 *request_data, size_t request_size, + u8 **reply_data, size_t *reply_size); + +/* DSP cores and domains power management messages */ +struct avs_dxstate_info { + u32 core_mask; /* which cores are subject for power transition */ + u32 dx_mask; /* bit[n]=1 core n goes to D0, bit[n]=0 it goes to D3 */ +} __packed; + +int avs_ipc_set_dx(struct avs_dev *adev, u32 core_mask, bool powerup); +int avs_ipc_set_d0ix(struct avs_dev *adev, bool enable_pg, bool streaming); + +/* Base-firmware runtime parameters */ + +#define AVS_BASEFW_MOD_ID 0 +#define AVS_BASEFW_INST_ID 0 + +enum avs_basefw_runtime_param { + AVS_BASEFW_ENABLE_LOGS = 6, + AVS_BASEFW_FIRMWARE_CONFIG = 7, + AVS_BASEFW_HARDWARE_CONFIG = 8, + AVS_BASEFW_MODULES_INFO = 9, + AVS_BASEFW_LIBRARIES_INFO = 16, + AVS_BASEFW_SYSTEM_TIME = 20, +}; + +enum avs_log_enable { + AVS_LOG_DISABLE = 0, + AVS_LOG_ENABLE = 1 +}; + +enum avs_skl_log_priority { + AVS_SKL_LOG_CRITICAL = 1, + AVS_SKL_LOG_HIGH, + AVS_SKL_LOG_MEDIUM, + AVS_SKL_LOG_LOW, + AVS_SKL_LOG_VERBOSE, +}; + +struct skl_log_state { + u32 enable; + u32 min_priority; +} __packed; + +struct skl_log_state_info { + u32 core_mask; + struct skl_log_state logs_core[]; +} __packed; + +struct apl_log_state_info { + u32 aging_timer_period; + u32 fifo_full_timer_period; + u32 core_mask; + struct skl_log_state logs_core[]; +} __packed; + +int avs_ipc_set_enable_logs(struct avs_dev *adev, u8 *log_info, size_t size); + +struct avs_fw_version { + u16 major; + u16 minor; + u16 hotfix; + u16 build; +}; + +enum avs_fw_cfg_params { + AVS_FW_CFG_FW_VERSION = 0, + AVS_FW_CFG_MEMORY_RECLAIMED, + AVS_FW_CFG_SLOW_CLOCK_FREQ_HZ, + AVS_FW_CFG_FAST_CLOCK_FREQ_HZ, + AVS_FW_CFG_DMA_BUFFER_CONFIG, + AVS_FW_CFG_ALH_SUPPORT_LEVEL, + AVS_FW_CFG_IPC_DL_MAILBOX_BYTES, + AVS_FW_CFG_IPC_UL_MAILBOX_BYTES, + AVS_FW_CFG_TRACE_LOG_BYTES, + AVS_FW_CFG_MAX_PPL_COUNT, + AVS_FW_CFG_MAX_ASTATE_COUNT, + AVS_FW_CFG_MAX_MODULE_PIN_COUNT, + AVS_FW_CFG_MODULES_COUNT, + AVS_FW_CFG_MAX_MOD_INST_COUNT, + AVS_FW_CFG_MAX_LL_TASKS_PER_PRI_COUNT, + AVS_FW_CFG_LL_PRI_COUNT, + AVS_FW_CFG_MAX_DP_TASKS_COUNT, + AVS_FW_CFG_MAX_LIBS_COUNT, + AVS_FW_CFG_SCHEDULER_CONFIG, + AVS_FW_CFG_XTAL_FREQ_HZ, + AVS_FW_CFG_CLOCKS_CONFIG, + AVS_FW_CFG_RESERVED, + AVS_FW_CFG_POWER_GATING_POLICY, + AVS_FW_CFG_ASSERT_MODE, +}; + +struct avs_fw_cfg { + struct avs_fw_version fw_version; + u32 memory_reclaimed; + u32 slow_clock_freq_hz; + u32 fast_clock_freq_hz; + u32 alh_support; + u32 ipc_dl_mailbox_bytes; + u32 ipc_ul_mailbox_bytes; + u32 trace_log_bytes; + u32 max_ppl_count; + u32 max_astate_count; + u32 max_module_pin_count; + u32 modules_count; + u32 max_mod_inst_count; + u32 max_ll_tasks_per_pri_count; + u32 ll_pri_count; + u32 max_dp_tasks_count; + u32 max_libs_count; + u32 xtal_freq_hz; + u32 power_gating_policy; +}; + +int avs_ipc_get_fw_config(struct avs_dev *adev, struct avs_fw_cfg *cfg); + +enum avs_hw_cfg_params { + AVS_HW_CFG_AVS_VER, + AVS_HW_CFG_DSP_CORES, + AVS_HW_CFG_MEM_PAGE_BYTES, + AVS_HW_CFG_TOTAL_PHYS_MEM_PAGES, + AVS_HW_CFG_I2S_CAPS, + AVS_HW_CFG_GPDMA_CAPS, + AVS_HW_CFG_GATEWAY_COUNT, + AVS_HW_CFG_HP_EBB_COUNT, + AVS_HW_CFG_LP_EBB_COUNT, + AVS_HW_CFG_EBB_SIZE_BYTES, +}; + +enum avs_iface_version { + AVS_AVS_VER_1_5 = 0x10005, + AVS_AVS_VER_1_8 = 0x10008, +}; + +enum avs_i2s_version { + AVS_I2S_VER_15_SKYLAKE = 0x00000, + AVS_I2S_VER_15_BROXTON = 0x10000, + AVS_I2S_VER_15_BROXTON_P = 0x20000, + AVS_I2S_VER_18_KBL_CNL = 0x30000, +}; + +struct avs_i2s_caps { + u32 i2s_version; + u32 ctrl_count; + u32 *ctrl_base_addr; +}; + +struct avs_hw_cfg { + u32 avs_version; + u32 dsp_cores; + u32 mem_page_bytes; + u32 total_phys_mem_pages; + struct avs_i2s_caps i2s_caps; + u32 gateway_count; + u32 hp_ebb_count; + u32 lp_ebb_count; + u32 ebb_size_bytes; +}; + +int avs_ipc_get_hw_config(struct avs_dev *adev, struct avs_hw_cfg *cfg); + +#define AVS_MODULE_LOAD_TYPE_BUILTIN 0 +#define AVS_MODULE_LOAD_TYPE_LOADABLE 1 +#define AVS_MODULE_STATE_LOADED BIT(0) + +struct avs_module_type { + u32 load_type:4; + u32 auto_start:1; + u32 domain_ll:1; + u32 domain_dp:1; + u32 lib_code:1; + u32 rsvd:24; +} __packed; + +union avs_segment_flags { + u32 ul; + struct { + u32 contents:1; + u32 alloc:1; + u32 load:1; + u32 readonly:1; + u32 code:1; + u32 data:1; + u32 rsvd_1:2; + u32 type:4; + u32 rsvd_2:4; + u32 length:16; + }; +} __packed; + +struct avs_segment_desc { + union avs_segment_flags flags; + u32 v_base_addr; + u32 file_offset; +} __packed; + +struct avs_module_entry { + u16 module_id; + u16 state_flags; + u8 name[8]; + guid_t uuid; + struct avs_module_type type; + u8 hash[32]; + u32 entry_point; + u16 cfg_offset; + u16 cfg_count; + u32 affinity_mask; + u16 instance_max_count; + u16 instance_bss_size; + struct avs_segment_desc segments[3]; +} __packed; + +struct avs_mods_info { + u32 count; + struct avs_module_entry entries[]; +} __packed; + +static inline bool avs_module_entry_is_loaded(struct avs_module_entry *mentry) +{ + return mentry->type.load_type == AVS_MODULE_LOAD_TYPE_BUILTIN || + mentry->state_flags & AVS_MODULE_STATE_LOADED; +} + +int avs_ipc_get_modules_info(struct avs_dev *adev, struct avs_mods_info **info); + +struct avs_sys_time { + u32 val_l; + u32 val_u; +} __packed; + +int avs_ipc_set_system_time(struct avs_dev *adev); + +/* Module configuration */ + +#define AVS_MIXIN_MOD_UUID \ + GUID_INIT(0x39656EB2, 0x3B71, 0x4049, 0x8D, 0x3F, 0xF9, 0x2C, 0xD5, 0xC4, 0x3C, 0x09) + +#define AVS_MIXOUT_MOD_UUID \ + GUID_INIT(0x3C56505A, 0x24D7, 0x418F, 0xBD, 0xDC, 0xC1, 0xF5, 0xA3, 0xAC, 0x2A, 0xE0) + +#define AVS_COPIER_MOD_UUID \ + GUID_INIT(0x9BA00C83, 0xCA12, 0x4A83, 0x94, 0x3C, 0x1F, 0xA2, 0xE8, 0x2F, 0x9D, 0xDA) + +#define AVS_KPBUFF_MOD_UUID \ + GUID_INIT(0xA8A0CB32, 0x4A77, 0x4DB1, 0x85, 0xC7, 0x53, 0xD7, 0xEE, 0x07, 0xBC, 0xE6) + +#define AVS_MICSEL_MOD_UUID \ + GUID_INIT(0x32FE92C1, 0x1E17, 0x4FC2, 0x97, 0x58, 0xC7, 0xF3, 0x54, 0x2E, 0x98, 0x0A) + +#define AVS_MUX_MOD_UUID \ + GUID_INIT(0x64CE6E35, 0x857A, 0x4878, 0xAC, 0xE8, 0xE2, 0xA2, 0xF4, 0x2e, 0x30, 0x69) + +#define AVS_UPDWMIX_MOD_UUID \ + GUID_INIT(0x42F8060C, 0x832F, 0x4DBF, 0xB2, 0x47, 0x51, 0xE9, 0x61, 0x99, 0x7b, 0x35) + +#define AVS_SRCINTC_MOD_UUID \ + GUID_INIT(0xE61BB28D, 0x149A, 0x4C1F, 0xB7, 0x09, 0x46, 0x82, 0x3E, 0xF5, 0xF5, 0xAE) + +#define AVS_PROBE_MOD_UUID \ + GUID_INIT(0x7CAD0808, 0xAB10, 0xCD23, 0xEF, 0x45, 0x12, 0xAB, 0x34, 0xCD, 0x56, 0xEF) + +#define AVS_AEC_MOD_UUID \ + GUID_INIT(0x46CB87FB, 0xD2C9, 0x4970, 0x96, 0xD2, 0x6D, 0x7E, 0x61, 0x4B, 0xB6, 0x05) + +#define AVS_ASRC_MOD_UUID \ + GUID_INIT(0x66B4402D, 0xB468, 0x42F2, 0x81, 0xA7, 0xB3, 0x71, 0x21, 0x86, 0x3D, 0xD4) + +#define AVS_INTELWOV_MOD_UUID \ + GUID_INIT(0xEC774FA9, 0x28D3, 0x424A, 0x90, 0xE4, 0x69, 0xF9, 0x84, 0xF1, 0xEE, 0xB7) + +/* channel map */ +enum avs_channel_index { + AVS_CHANNEL_LEFT = 0, + AVS_CHANNEL_RIGHT = 1, + AVS_CHANNEL_CENTER = 2, + AVS_CHANNEL_LEFT_SURROUND = 3, + AVS_CHANNEL_CENTER_SURROUND = 3, + AVS_CHANNEL_RIGHT_SURROUND = 4, + AVS_CHANNEL_LFE = 7, + AVS_CHANNEL_INVALID = 0xF, +}; + +enum avs_channel_config { + AVS_CHANNEL_CONFIG_MONO = 0, + AVS_CHANNEL_CONFIG_STEREO = 1, + AVS_CHANNEL_CONFIG_2_1 = 2, + AVS_CHANNEL_CONFIG_3_0 = 3, + AVS_CHANNEL_CONFIG_3_1 = 4, + AVS_CHANNEL_CONFIG_QUATRO = 5, + AVS_CHANNEL_CONFIG_4_0 = 6, + AVS_CHANNEL_CONFIG_5_0 = 7, + AVS_CHANNEL_CONFIG_5_1 = 8, + AVS_CHANNEL_CONFIG_DUAL_MONO = 9, + AVS_CHANNEL_CONFIG_I2S_DUAL_STEREO_0 = 10, + AVS_CHANNEL_CONFIG_I2S_DUAL_STEREO_1 = 11, + AVS_CHANNEL_CONFIG_7_1 = 12, + AVS_CHANNEL_CONFIG_INVALID +}; + +enum avs_interleaving { + AVS_INTERLEAVING_PER_CHANNEL = 0, + AVS_INTERLEAVING_PER_SAMPLE = 1, +}; + +enum avs_sample_type { + AVS_SAMPLE_TYPE_INT_MSB = 0, + AVS_SAMPLE_TYPE_INT_LSB = 1, + AVS_SAMPLE_TYPE_INT_SIGNED = 2, + AVS_SAMPLE_TYPE_INT_UNSIGNED = 3, + AVS_SAMPLE_TYPE_FLOAT = 4, +}; + +#define AVS_CHANNELS_MAX 8 +#define AVS_ALL_CHANNELS_MASK UINT_MAX + +struct avs_audio_format { + u32 sampling_freq; + u32 bit_depth; + u32 channel_map; + u32 channel_config; + u32 interleaving; + u32 num_channels:8; + u32 valid_bit_depth:8; + u32 sample_type:8; + u32 reserved:8; +} __packed; + +struct avs_modcfg_base { + u32 cpc; + u32 ibs; + u32 obs; + u32 is_pages; + struct avs_audio_format audio_fmt; +} __packed; + +struct avs_pin_format { + u32 pin_index; + u32 iobs; + struct avs_audio_format audio_fmt; +} __packed; + +struct avs_modcfg_ext { + struct avs_modcfg_base base; + u16 num_input_pins; + u16 num_output_pins; + u8 reserved[12]; + /* input pin formats followed by output ones */ + struct avs_pin_format pin_fmts[]; +} __packed; + +enum avs_dma_type { + AVS_DMA_HDA_HOST_OUTPUT = 0, + AVS_DMA_HDA_HOST_INPUT = 1, + AVS_DMA_HDA_LINK_OUTPUT = 8, + AVS_DMA_HDA_LINK_INPUT = 9, + AVS_DMA_DMIC_LINK_INPUT = 11, + AVS_DMA_I2S_LINK_OUTPUT = 12, + AVS_DMA_I2S_LINK_INPUT = 13, +}; + +union avs_virtual_index { + u8 val; + struct { + u8 time_slot:4; + u8 instance:4; + } i2s; + struct { + u8 queue_id:3; + u8 time_slot:2; + u8 instance:3; + } dmic; +} __packed; + +union avs_connector_node_id { + u32 val; + struct { + u32 vindex:8; + u32 dma_type:5; + u32 rsvd:19; + }; +} __packed; + +#define INVALID_PIPELINE_ID 0xFF +#define INVALID_NODE_ID \ + ((union avs_connector_node_id) { UINT_MAX }) + +union avs_gtw_attributes { + u32 val; + struct { + u32 lp_buffer_alloc:1; + u32 rsvd:31; + }; +} __packed; + +struct avs_copier_gtw_cfg { + union avs_connector_node_id node_id; + u32 dma_buffer_size; + u32 config_length; + struct { + union avs_gtw_attributes attrs; + u32 blob[]; + } config; +} __packed; + +struct avs_copier_cfg { + struct avs_modcfg_base base; + struct avs_audio_format out_fmt; + u32 feature_mask; + struct avs_copier_gtw_cfg gtw_cfg; +} __packed; + +struct avs_micsel_cfg { + struct avs_modcfg_base base; + struct avs_audio_format out_fmt; +} __packed; + +struct avs_mux_cfg { + struct avs_modcfg_base base; + struct avs_audio_format ref_fmt; + struct avs_audio_format out_fmt; +} __packed; + +struct avs_updown_mixer_cfg { + struct avs_modcfg_base base; + u32 out_channel_config; + u32 coefficients_select; + s32 coefficients[AVS_CHANNELS_MAX]; + u32 channel_map; +} __packed; + +struct avs_src_cfg { + struct avs_modcfg_base base; + u32 out_freq; +} __packed; + +struct avs_probe_gtw_cfg { + union avs_connector_node_id node_id; + u32 dma_buffer_size; +} __packed; + +struct avs_probe_cfg { + struct avs_modcfg_base base; + struct avs_probe_gtw_cfg gtw_cfg; +} __packed; + +struct avs_aec_cfg { + struct avs_modcfg_base base; + struct avs_audio_format ref_fmt; + struct avs_audio_format out_fmt; + u32 cpc_lp_mode; +} __packed; + +struct avs_asrc_cfg { + struct avs_modcfg_base base; + u32 out_freq; + u32 rsvd0:1; + u32 mode:1; + u32 rsvd2:2; + u32 disable_jitter_buffer:1; + u32 rsvd3:27; +} __packed; + +struct avs_wov_cfg { + struct avs_modcfg_base base; + u32 cpc_lp_mode; +} __packed; + +/* Module runtime parameters */ + +enum avs_copier_runtime_param { + AVS_COPIER_SET_SINK_FORMAT = 2, +}; + +struct avs_copier_sink_format { + u32 sink_id; + struct avs_audio_format src_fmt; + struct avs_audio_format sink_fmt; +} __packed; + +int avs_ipc_copier_set_sink_format(struct avs_dev *adev, u16 module_id, + u8 instance_id, u32 sink_id, + const struct avs_audio_format *src_fmt, + const struct avs_audio_format *sink_fmt); + +#endif /* __SOUND_SOC_INTEL_AVS_MSGS_H */ diff --git a/sound/soc/intel/avs/path.c b/sound/soc/intel/avs/path.c new file mode 100644 index 000000000..ce157a8d6 --- /dev/null +++ b/sound/soc/intel/avs/path.c @@ -0,0 +1,1009 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include "avs.h" +#include "path.h" +#include "topology.h" + +/* Must be called with adev->comp_list_mutex held. */ +static struct avs_tplg * +avs_path_find_tplg(struct avs_dev *adev, const char *name) +{ + struct avs_soc_component *acomp; + + list_for_each_entry(acomp, &adev->comp_list, node) + if (!strcmp(acomp->tplg->name, name)) + return acomp->tplg; + return NULL; +} + +static struct avs_path_module * +avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id) +{ + struct avs_path_module *mod; + + list_for_each_entry(mod, &ppl->mod_list, node) + if (mod->template->id == template_id) + return mod; + return NULL; +} + +static struct avs_path_pipeline * +avs_path_find_pipeline(struct avs_path *path, u32 template_id) +{ + struct avs_path_pipeline *ppl; + + list_for_each_entry(ppl, &path->ppl_list, node) + if (ppl->template->id == template_id) + return ppl; + return NULL; +} + +static struct avs_path * +avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id) +{ + struct avs_tplg_path_template *pos, *template = NULL; + struct avs_tplg *tplg; + struct avs_path *path; + + tplg = avs_path_find_tplg(adev, name); + if (!tplg) + return NULL; + + list_for_each_entry(pos, &tplg->path_tmpl_list, node) { + if (pos->id == template_id) { + template = pos; + break; + } + } + if (!template) + return NULL; + + spin_lock(&adev->path_list_lock); + /* Only one variant of given path template may be instantiated at a time. */ + list_for_each_entry(path, &adev->path_list, node) { + if (path->template->owner == template) { + spin_unlock(&adev->path_list_lock); + return path; + } + } + + spin_unlock(&adev->path_list_lock); + return NULL; +} + +static bool avs_test_hw_params(struct snd_pcm_hw_params *params, + struct avs_audio_format *fmt) +{ + return (params_rate(params) == fmt->sampling_freq && + params_channels(params) == fmt->num_channels && + params_physical_width(params) == fmt->bit_depth && + params_width(params) == fmt->valid_bit_depth); +} + +static struct avs_tplg_path * +avs_path_find_variant(struct avs_dev *adev, + struct avs_tplg_path_template *template, + struct snd_pcm_hw_params *fe_params, + struct snd_pcm_hw_params *be_params) +{ + struct avs_tplg_path *variant; + + list_for_each_entry(variant, &template->path_list, node) { + dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n", + variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels, + variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth); + dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n", + variant->be_fmt->sampling_freq, variant->be_fmt->num_channels, + variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth); + + if (variant->fe_fmt && avs_test_hw_params(fe_params, variant->fe_fmt) && + variant->be_fmt && avs_test_hw_params(be_params, variant->be_fmt)) + return variant; + } + + return NULL; +} + +__maybe_unused +static bool avs_dma_type_is_host(u32 dma_type) +{ + return dma_type == AVS_DMA_HDA_HOST_OUTPUT || + dma_type == AVS_DMA_HDA_HOST_INPUT; +} + +__maybe_unused +static bool avs_dma_type_is_link(u32 dma_type) +{ + return !avs_dma_type_is_host(dma_type); +} + +__maybe_unused +static bool avs_dma_type_is_output(u32 dma_type) +{ + return dma_type == AVS_DMA_HDA_HOST_OUTPUT || + dma_type == AVS_DMA_HDA_LINK_OUTPUT || + dma_type == AVS_DMA_I2S_LINK_OUTPUT; +} + +__maybe_unused +static bool avs_dma_type_is_input(u32 dma_type) +{ + return !avs_dma_type_is_output(dma_type); +} + +static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod) +{ + struct nhlt_acpi_table *nhlt = adev->nhlt; + struct avs_tplg_module *t = mod->template; + struct avs_copier_cfg *cfg; + struct nhlt_specific_cfg *ep_blob; + union avs_connector_node_id node_id = {0}; + size_t cfg_size, data_size = 0; + void *data = NULL; + u32 dma_type; + int ret; + + dma_type = t->cfg_ext->copier.dma_type; + node_id.dma_type = dma_type; + + switch (dma_type) { + struct avs_audio_format *fmt; + int direction; + + case AVS_DMA_I2S_LINK_OUTPUT: + case AVS_DMA_I2S_LINK_INPUT: + if (avs_dma_type_is_input(dma_type)) + direction = SNDRV_PCM_STREAM_CAPTURE; + else + direction = SNDRV_PCM_STREAM_PLAYBACK; + + if (t->cfg_ext->copier.blob_fmt) + fmt = t->cfg_ext->copier.blob_fmt; + else if (direction == SNDRV_PCM_STREAM_CAPTURE) + fmt = t->in_fmt; + else + fmt = t->cfg_ext->copier.out_fmt; + + ep_blob = intel_nhlt_get_endpoint_blob(adev->dev, + nhlt, t->cfg_ext->copier.vindex.i2s.instance, + NHLT_LINK_SSP, fmt->valid_bit_depth, fmt->bit_depth, + fmt->num_channels, fmt->sampling_freq, direction, + NHLT_DEVICE_I2S); + if (!ep_blob) { + dev_err(adev->dev, "no I2S ep_blob found\n"); + return -ENOENT; + } + + data = ep_blob->caps; + data_size = ep_blob->size; + /* I2S gateway's vindex is statically assigned in topology */ + node_id.vindex = t->cfg_ext->copier.vindex.val; + + break; + + case AVS_DMA_DMIC_LINK_INPUT: + direction = SNDRV_PCM_STREAM_CAPTURE; + + if (t->cfg_ext->copier.blob_fmt) + fmt = t->cfg_ext->copier.blob_fmt; + else + fmt = t->in_fmt; + + ep_blob = intel_nhlt_get_endpoint_blob(adev->dev, nhlt, 0, + NHLT_LINK_DMIC, fmt->valid_bit_depth, + fmt->bit_depth, fmt->num_channels, + fmt->sampling_freq, direction, NHLT_DEVICE_DMIC); + if (!ep_blob) { + dev_err(adev->dev, "no DMIC ep_blob found\n"); + return -ENOENT; + } + + data = ep_blob->caps; + data_size = ep_blob->size; + /* DMIC gateway's vindex is statically assigned in topology */ + node_id.vindex = t->cfg_ext->copier.vindex.val; + + break; + + case AVS_DMA_HDA_HOST_OUTPUT: + case AVS_DMA_HDA_HOST_INPUT: + /* HOST gateway's vindex is dynamically assigned with DMA id */ + node_id.vindex = mod->owner->owner->dma_id; + break; + + case AVS_DMA_HDA_LINK_OUTPUT: + case AVS_DMA_HDA_LINK_INPUT: + node_id.vindex = t->cfg_ext->copier.vindex.val | + mod->owner->owner->dma_id; + break; + + case INVALID_OBJECT_ID: + default: + node_id = INVALID_NODE_ID; + break; + } + + cfg_size = sizeof(*cfg) + data_size; + /* Every config-BLOB contains gateway attributes. */ + if (data_size) + cfg_size -= sizeof(cfg->gtw_cfg.config.attrs); + + cfg = kzalloc(cfg_size, GFP_KERNEL); + if (!cfg) + return -ENOMEM; + + cfg->base.cpc = t->cfg_base->cpc; + cfg->base.ibs = t->cfg_base->ibs; + cfg->base.obs = t->cfg_base->obs; + cfg->base.is_pages = t->cfg_base->is_pages; + cfg->base.audio_fmt = *t->in_fmt; + cfg->out_fmt = *t->cfg_ext->copier.out_fmt; + cfg->feature_mask = t->cfg_ext->copier.feature_mask; + cfg->gtw_cfg.node_id = node_id; + cfg->gtw_cfg.dma_buffer_size = t->cfg_ext->copier.dma_buffer_size; + /* config_length in DWORDs */ + cfg->gtw_cfg.config_length = DIV_ROUND_UP(data_size, 4); + if (data) + memcpy(&cfg->gtw_cfg.config, data, data_size); + + mod->gtw_attrs = cfg->gtw_cfg.config.attrs; + + ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, + t->core_id, t->domain, cfg, cfg_size, + &mod->instance_id); + kfree(cfg); + return ret; +} + +static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod) +{ + struct avs_tplg_module *t = mod->template; + struct avs_updown_mixer_cfg cfg; + int i; + + cfg.base.cpc = t->cfg_base->cpc; + cfg.base.ibs = t->cfg_base->ibs; + cfg.base.obs = t->cfg_base->obs; + cfg.base.is_pages = t->cfg_base->is_pages; + cfg.base.audio_fmt = *t->in_fmt; + cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config; + cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select; + for (i = 0; i < AVS_CHANNELS_MAX; i++) + cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i]; + cfg.channel_map = t->cfg_ext->updown_mix.channel_map; + + return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, + t->core_id, t->domain, &cfg, sizeof(cfg), + &mod->instance_id); +} + +static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod) +{ + struct avs_tplg_module *t = mod->template; + struct avs_src_cfg cfg; + + cfg.base.cpc = t->cfg_base->cpc; + cfg.base.ibs = t->cfg_base->ibs; + cfg.base.obs = t->cfg_base->obs; + cfg.base.is_pages = t->cfg_base->is_pages; + cfg.base.audio_fmt = *t->in_fmt; + cfg.out_freq = t->cfg_ext->src.out_freq; + + return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, + t->core_id, t->domain, &cfg, sizeof(cfg), + &mod->instance_id); +} + +static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod) +{ + struct avs_tplg_module *t = mod->template; + struct avs_asrc_cfg cfg; + + cfg.base.cpc = t->cfg_base->cpc; + cfg.base.ibs = t->cfg_base->ibs; + cfg.base.obs = t->cfg_base->obs; + cfg.base.is_pages = t->cfg_base->is_pages; + cfg.base.audio_fmt = *t->in_fmt; + cfg.out_freq = t->cfg_ext->asrc.out_freq; + cfg.mode = t->cfg_ext->asrc.mode; + cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer; + + return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, + t->core_id, t->domain, &cfg, sizeof(cfg), + &mod->instance_id); +} + +static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod) +{ + struct avs_tplg_module *t = mod->template; + struct avs_aec_cfg cfg; + + cfg.base.cpc = t->cfg_base->cpc; + cfg.base.ibs = t->cfg_base->ibs; + cfg.base.obs = t->cfg_base->obs; + cfg.base.is_pages = t->cfg_base->is_pages; + cfg.base.audio_fmt = *t->in_fmt; + cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt; + cfg.out_fmt = *t->cfg_ext->aec.out_fmt; + cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode; + + return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, + t->core_id, t->domain, &cfg, sizeof(cfg), + &mod->instance_id); +} + +static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod) +{ + struct avs_tplg_module *t = mod->template; + struct avs_mux_cfg cfg; + + cfg.base.cpc = t->cfg_base->cpc; + cfg.base.ibs = t->cfg_base->ibs; + cfg.base.obs = t->cfg_base->obs; + cfg.base.is_pages = t->cfg_base->is_pages; + cfg.base.audio_fmt = *t->in_fmt; + cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt; + cfg.out_fmt = *t->cfg_ext->mux.out_fmt; + + return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, + t->core_id, t->domain, &cfg, sizeof(cfg), + &mod->instance_id); +} + +static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod) +{ + struct avs_tplg_module *t = mod->template; + struct avs_wov_cfg cfg; + + cfg.base.cpc = t->cfg_base->cpc; + cfg.base.ibs = t->cfg_base->ibs; + cfg.base.obs = t->cfg_base->obs; + cfg.base.is_pages = t->cfg_base->is_pages; + cfg.base.audio_fmt = *t->in_fmt; + cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode; + + return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, + t->core_id, t->domain, &cfg, sizeof(cfg), + &mod->instance_id); +} + +static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod) +{ + struct avs_tplg_module *t = mod->template; + struct avs_micsel_cfg cfg; + + cfg.base.cpc = t->cfg_base->cpc; + cfg.base.ibs = t->cfg_base->ibs; + cfg.base.obs = t->cfg_base->obs; + cfg.base.is_pages = t->cfg_base->is_pages; + cfg.base.audio_fmt = *t->in_fmt; + cfg.out_fmt = *t->cfg_ext->micsel.out_fmt; + + return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, + t->core_id, t->domain, &cfg, sizeof(cfg), + &mod->instance_id); +} + +static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod) +{ + struct avs_tplg_module *t = mod->template; + struct avs_modcfg_base cfg; + + cfg.cpc = t->cfg_base->cpc; + cfg.ibs = t->cfg_base->ibs; + cfg.obs = t->cfg_base->obs; + cfg.is_pages = t->cfg_base->is_pages; + cfg.audio_fmt = *t->in_fmt; + + return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, + t->core_id, t->domain, &cfg, sizeof(cfg), + &mod->instance_id); +} + +static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod) +{ + struct avs_tplg_module *t = mod->template; + struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext; + struct avs_modcfg_ext *cfg; + size_t cfg_size, num_pins; + int ret, i; + + num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins; + cfg_size = sizeof(*cfg) + sizeof(*cfg->pin_fmts) * num_pins; + + cfg = kzalloc(cfg_size, GFP_KERNEL); + if (!cfg) + return -ENOMEM; + + cfg->base.cpc = t->cfg_base->cpc; + cfg->base.ibs = t->cfg_base->ibs; + cfg->base.obs = t->cfg_base->obs; + cfg->base.is_pages = t->cfg_base->is_pages; + cfg->base.audio_fmt = *t->in_fmt; + cfg->num_input_pins = tcfg->generic.num_input_pins; + cfg->num_output_pins = tcfg->generic.num_output_pins; + + /* configure pin formats */ + for (i = 0; i < num_pins; i++) { + struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i]; + struct avs_pin_format *pin = &cfg->pin_fmts[i]; + + pin->pin_index = tpin->pin_index; + pin->iobs = tpin->iobs; + pin->audio_fmt = *tpin->fmt; + } + + ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, + t->core_id, t->domain, cfg, cfg_size, + &mod->instance_id); + kfree(cfg); + return ret; +} + +static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod) +{ + dev_err(adev->dev, "Probe module can't be instantiated by topology"); + return -EINVAL; +} + +struct avs_module_create { + guid_t *guid; + int (*create)(struct avs_dev *adev, struct avs_path_module *mod); +}; + +static struct avs_module_create avs_module_create[] = { + { &AVS_MIXIN_MOD_UUID, avs_modbase_create }, + { &AVS_MIXOUT_MOD_UUID, avs_modbase_create }, + { &AVS_KPBUFF_MOD_UUID, avs_modbase_create }, + { &AVS_COPIER_MOD_UUID, avs_copier_create }, + { &AVS_MICSEL_MOD_UUID, avs_micsel_create }, + { &AVS_MUX_MOD_UUID, avs_mux_create }, + { &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create }, + { &AVS_SRCINTC_MOD_UUID, avs_src_create }, + { &AVS_AEC_MOD_UUID, avs_aec_create }, + { &AVS_ASRC_MOD_UUID, avs_asrc_create }, + { &AVS_INTELWOV_MOD_UUID, avs_wov_create }, + { &AVS_PROBE_MOD_UUID, avs_probe_create }, +}; + +static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod) +{ + const guid_t *type = &mod->template->cfg_ext->type; + + for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++) + if (guid_equal(type, avs_module_create[i].guid)) + return avs_module_create[i].create(adev, mod); + + return avs_modext_create(adev, mod); +} + +static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod) +{ + kfree(mod); +} + +static struct avs_path_module * +avs_path_module_create(struct avs_dev *adev, + struct avs_path_pipeline *owner, + struct avs_tplg_module *template) +{ + struct avs_path_module *mod; + int module_id, ret; + + module_id = avs_get_module_id(adev, &template->cfg_ext->type); + if (module_id < 0) + return ERR_PTR(module_id); + + mod = kzalloc(sizeof(*mod), GFP_KERNEL); + if (!mod) + return ERR_PTR(-ENOMEM); + + mod->template = template; + mod->module_id = module_id; + mod->owner = owner; + INIT_LIST_HEAD(&mod->node); + + ret = avs_path_module_type_create(adev, mod); + if (ret) { + dev_err(adev->dev, "module-type create failed: %d\n", ret); + kfree(mod); + return ERR_PTR(ret); + } + + return mod; +} + +static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding) +{ + struct avs_path_module *this_mod, *target_mod; + struct avs_path_pipeline *target_ppl; + struct avs_path *target_path; + struct avs_tplg_binding *t; + + t = binding->template; + this_mod = avs_path_find_module(binding->owner, + t->mod_id); + if (!this_mod) { + dev_err(adev->dev, "path mod %d not found\n", t->mod_id); + return -EINVAL; + } + + /* update with target_tplg_name too */ + target_path = avs_path_find_path(adev, t->target_tplg_name, + t->target_path_tmpl_id); + if (!target_path) { + dev_err(adev->dev, "target path %s:%d not found\n", + t->target_tplg_name, t->target_path_tmpl_id); + return -EINVAL; + } + + target_ppl = avs_path_find_pipeline(target_path, + t->target_ppl_id); + if (!target_ppl) { + dev_err(adev->dev, "target ppl %d not found\n", t->target_ppl_id); + return -EINVAL; + } + + target_mod = avs_path_find_module(target_ppl, t->target_mod_id); + if (!target_mod) { + dev_err(adev->dev, "target mod %d not found\n", t->target_mod_id); + return -EINVAL; + } + + if (t->is_sink) { + binding->sink = this_mod; + binding->sink_pin = t->mod_pin; + binding->source = target_mod; + binding->source_pin = t->target_mod_pin; + } else { + binding->sink = target_mod; + binding->sink_pin = t->target_mod_pin; + binding->source = this_mod; + binding->source_pin = t->mod_pin; + } + + return 0; +} + +static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding) +{ + kfree(binding); +} + +static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev, + struct avs_path_pipeline *owner, + struct avs_tplg_binding *t) +{ + struct avs_path_binding *binding; + + binding = kzalloc(sizeof(*binding), GFP_KERNEL); + if (!binding) + return ERR_PTR(-ENOMEM); + + binding->template = t; + binding->owner = owner; + INIT_LIST_HEAD(&binding->node); + + return binding; +} + +static int avs_path_pipeline_arm(struct avs_dev *adev, + struct avs_path_pipeline *ppl) +{ + struct avs_path_module *mod; + + list_for_each_entry(mod, &ppl->mod_list, node) { + struct avs_path_module *source, *sink; + int ret; + + /* + * Only one module (so it's implicitly last) or it is the last + * one, either way we don't have next module to bind it to. + */ + if (mod == list_last_entry(&ppl->mod_list, + struct avs_path_module, node)) + break; + + /* bind current module to next module on list */ + source = mod; + sink = list_next_entry(mod, node); + if (!source || !sink) + return -EINVAL; + + ret = avs_ipc_bind(adev, source->module_id, source->instance_id, + sink->module_id, sink->instance_id, 0, 0); + if (ret) + return AVS_IPC_RET(ret); + } + + return 0; +} + +static void avs_path_pipeline_free(struct avs_dev *adev, + struct avs_path_pipeline *ppl) +{ + struct avs_path_binding *binding, *bsave; + struct avs_path_module *mod, *save; + + list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) { + list_del(&binding->node); + avs_path_binding_free(adev, binding); + } + + avs_dsp_delete_pipeline(adev, ppl->instance_id); + + /* Unload resources occupied by owned modules */ + list_for_each_entry_safe(mod, save, &ppl->mod_list, node) { + avs_dsp_delete_module(adev, mod->module_id, mod->instance_id, + mod->owner->instance_id, + mod->template->core_id); + avs_path_module_free(adev, mod); + } + + list_del(&ppl->node); + kfree(ppl); +} + +static struct avs_path_pipeline * +avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner, + struct avs_tplg_pipeline *template) +{ + struct avs_path_pipeline *ppl; + struct avs_tplg_pplcfg *cfg = template->cfg; + struct avs_tplg_module *tmod; + int ret, i; + + ppl = kzalloc(sizeof(*ppl), GFP_KERNEL); + if (!ppl) + return ERR_PTR(-ENOMEM); + + ppl->template = template; + ppl->owner = owner; + INIT_LIST_HEAD(&ppl->binding_list); + INIT_LIST_HEAD(&ppl->mod_list); + INIT_LIST_HEAD(&ppl->node); + + ret = avs_dsp_create_pipeline(adev, cfg->req_size, cfg->priority, + cfg->lp, cfg->attributes, + &ppl->instance_id); + if (ret) { + dev_err(adev->dev, "error creating pipeline %d\n", ret); + kfree(ppl); + return ERR_PTR(ret); + } + + list_for_each_entry(tmod, &template->mod_list, node) { + struct avs_path_module *mod; + + mod = avs_path_module_create(adev, ppl, tmod); + if (IS_ERR(mod)) { + ret = PTR_ERR(mod); + dev_err(adev->dev, "error creating module %d\n", ret); + goto init_err; + } + + list_add_tail(&mod->node, &ppl->mod_list); + } + + for (i = 0; i < template->num_bindings; i++) { + struct avs_path_binding *binding; + + binding = avs_path_binding_create(adev, ppl, template->bindings[i]); + if (IS_ERR(binding)) { + ret = PTR_ERR(binding); + dev_err(adev->dev, "error creating binding %d\n", ret); + goto init_err; + } + + list_add_tail(&binding->node, &ppl->binding_list); + } + + return ppl; + +init_err: + avs_path_pipeline_free(adev, ppl); + return ERR_PTR(ret); +} + +static int avs_path_init(struct avs_dev *adev, struct avs_path *path, + struct avs_tplg_path *template, u32 dma_id) +{ + struct avs_tplg_pipeline *tppl; + + path->owner = adev; + path->template = template; + path->dma_id = dma_id; + INIT_LIST_HEAD(&path->ppl_list); + INIT_LIST_HEAD(&path->node); + + /* create all the pipelines */ + list_for_each_entry(tppl, &template->ppl_list, node) { + struct avs_path_pipeline *ppl; + + ppl = avs_path_pipeline_create(adev, path, tppl); + if (IS_ERR(ppl)) + return PTR_ERR(ppl); + + list_add_tail(&ppl->node, &path->ppl_list); + } + + spin_lock(&adev->path_list_lock); + list_add_tail(&path->node, &adev->path_list); + spin_unlock(&adev->path_list_lock); + + return 0; +} + +static int avs_path_arm(struct avs_dev *adev, struct avs_path *path) +{ + struct avs_path_pipeline *ppl; + struct avs_path_binding *binding; + int ret; + + list_for_each_entry(ppl, &path->ppl_list, node) { + /* + * Arm all ppl bindings before binding internal modules + * as it costs no IPCs which isn't true for the latter. + */ + list_for_each_entry(binding, &ppl->binding_list, node) { + ret = avs_path_binding_arm(adev, binding); + if (ret < 0) + return ret; + } + + ret = avs_path_pipeline_arm(adev, ppl); + if (ret < 0) + return ret; + } + + return 0; +} + +static void avs_path_free_unlocked(struct avs_path *path) +{ + struct avs_path_pipeline *ppl, *save; + + spin_lock(&path->owner->path_list_lock); + list_del(&path->node); + spin_unlock(&path->owner->path_list_lock); + + list_for_each_entry_safe(ppl, save, &path->ppl_list, node) + avs_path_pipeline_free(path->owner, ppl); + + kfree(path); +} + +static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id, + struct avs_tplg_path *template) +{ + struct avs_path *path; + int ret; + + path = kzalloc(sizeof(*path), GFP_KERNEL); + if (!path) + return ERR_PTR(-ENOMEM); + + ret = avs_path_init(adev, path, template, dma_id); + if (ret < 0) + goto err; + + ret = avs_path_arm(adev, path); + if (ret < 0) + goto err; + + path->state = AVS_PPL_STATE_INVALID; + return path; +err: + avs_path_free_unlocked(path); + return ERR_PTR(ret); +} + +void avs_path_free(struct avs_path *path) +{ + struct avs_dev *adev = path->owner; + + mutex_lock(&adev->path_mutex); + avs_path_free_unlocked(path); + mutex_unlock(&adev->path_mutex); +} + +struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id, + struct avs_tplg_path_template *template, + struct snd_pcm_hw_params *fe_params, + struct snd_pcm_hw_params *be_params) +{ + struct avs_tplg_path *variant; + struct avs_path *path; + + variant = avs_path_find_variant(adev, template, fe_params, be_params); + if (!variant) { + dev_err(adev->dev, "no matching variant found\n"); + return ERR_PTR(-ENOENT); + } + + /* Serialize path and its components creation. */ + mutex_lock(&adev->path_mutex); + /* Satisfy needs of avs_path_find_tplg(). */ + mutex_lock(&adev->comp_list_mutex); + + path = avs_path_create_unlocked(adev, dma_id, variant); + + mutex_unlock(&adev->comp_list_mutex); + mutex_unlock(&adev->path_mutex); + + return path; +} + +static int avs_path_bind_prepare(struct avs_dev *adev, + struct avs_path_binding *binding) +{ + const struct avs_audio_format *src_fmt, *sink_fmt; + struct avs_tplg_module *tsource = binding->source->template; + struct avs_path_module *source = binding->source; + int ret; + + /* + * only copier modules about to be bound + * to output pin other than 0 need preparation + */ + if (!binding->source_pin) + return 0; + if (!guid_equal(&tsource->cfg_ext->type, &AVS_COPIER_MOD_UUID)) + return 0; + + src_fmt = tsource->in_fmt; + sink_fmt = binding->sink->template->in_fmt; + + ret = avs_ipc_copier_set_sink_format(adev, source->module_id, + source->instance_id, binding->source_pin, + src_fmt, sink_fmt); + if (ret) { + dev_err(adev->dev, "config copier failed: %d\n", ret); + return AVS_IPC_RET(ret); + } + + return 0; +} + +int avs_path_bind(struct avs_path *path) +{ + struct avs_path_pipeline *ppl; + struct avs_dev *adev = path->owner; + int ret; + + list_for_each_entry(ppl, &path->ppl_list, node) { + struct avs_path_binding *binding; + + list_for_each_entry(binding, &ppl->binding_list, node) { + struct avs_path_module *source, *sink; + + source = binding->source; + sink = binding->sink; + + ret = avs_path_bind_prepare(adev, binding); + if (ret < 0) + return ret; + + ret = avs_ipc_bind(adev, source->module_id, + source->instance_id, sink->module_id, + sink->instance_id, binding->sink_pin, + binding->source_pin); + if (ret) { + dev_err(adev->dev, "bind path failed: %d\n", ret); + return AVS_IPC_RET(ret); + } + } + } + + return 0; +} + +int avs_path_unbind(struct avs_path *path) +{ + struct avs_path_pipeline *ppl; + struct avs_dev *adev = path->owner; + int ret; + + list_for_each_entry(ppl, &path->ppl_list, node) { + struct avs_path_binding *binding; + + list_for_each_entry(binding, &ppl->binding_list, node) { + struct avs_path_module *source, *sink; + + source = binding->source; + sink = binding->sink; + + ret = avs_ipc_unbind(adev, source->module_id, + source->instance_id, sink->module_id, + sink->instance_id, binding->sink_pin, + binding->source_pin); + if (ret) { + dev_err(adev->dev, "unbind path failed: %d\n", ret); + return AVS_IPC_RET(ret); + } + } + } + + return 0; +} + +int avs_path_reset(struct avs_path *path) +{ + struct avs_path_pipeline *ppl; + struct avs_dev *adev = path->owner; + int ret; + + if (path->state == AVS_PPL_STATE_RESET) + return 0; + + list_for_each_entry(ppl, &path->ppl_list, node) { + ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, + AVS_PPL_STATE_RESET); + if (ret) { + dev_err(adev->dev, "reset path failed: %d\n", ret); + path->state = AVS_PPL_STATE_INVALID; + return AVS_IPC_RET(ret); + } + } + + path->state = AVS_PPL_STATE_RESET; + return 0; +} + +int avs_path_pause(struct avs_path *path) +{ + struct avs_path_pipeline *ppl; + struct avs_dev *adev = path->owner; + int ret; + + if (path->state == AVS_PPL_STATE_PAUSED) + return 0; + + list_for_each_entry_reverse(ppl, &path->ppl_list, node) { + ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, + AVS_PPL_STATE_PAUSED); + if (ret) { + dev_err(adev->dev, "pause path failed: %d\n", ret); + path->state = AVS_PPL_STATE_INVALID; + return AVS_IPC_RET(ret); + } + } + + path->state = AVS_PPL_STATE_PAUSED; + return 0; +} + +int avs_path_run(struct avs_path *path, int trigger) +{ + struct avs_path_pipeline *ppl; + struct avs_dev *adev = path->owner; + int ret; + + if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO) + return 0; + + list_for_each_entry(ppl, &path->ppl_list, node) { + if (ppl->template->cfg->trigger != trigger) + continue; + + ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, + AVS_PPL_STATE_RUNNING); + if (ret) { + dev_err(adev->dev, "run path failed: %d\n", ret); + path->state = AVS_PPL_STATE_INVALID; + return AVS_IPC_RET(ret); + } + } + + path->state = AVS_PPL_STATE_RUNNING; + return 0; +} diff --git a/sound/soc/intel/avs/path.h b/sound/soc/intel/avs/path.h new file mode 100644 index 000000000..197222c5e --- /dev/null +++ b/sound/soc/intel/avs/path.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2021 Intel Corporation. All rights reserved. + * + * Authors: Cezary Rojewski + * Amadeusz Slawinski + */ + +#ifndef __SOUND_SOC_INTEL_AVS_PATH_H +#define __SOUND_SOC_INTEL_AVS_PATH_H + +#include +#include "avs.h" +#include "topology.h" + +struct avs_path { + u32 dma_id; + struct list_head ppl_list; + u32 state; + + struct avs_tplg_path *template; + struct avs_dev *owner; + /* device path management */ + struct list_head node; +}; + +struct avs_path_pipeline { + u8 instance_id; + struct list_head mod_list; + struct list_head binding_list; + + struct avs_tplg_pipeline *template; + struct avs_path *owner; + /* path pipelines management */ + struct list_head node; +}; + +struct avs_path_module { + u16 module_id; + u16 instance_id; + union avs_gtw_attributes gtw_attrs; + + struct avs_tplg_module *template; + struct avs_path_pipeline *owner; + /* pipeline modules management */ + struct list_head node; +}; + +struct avs_path_binding { + struct avs_path_module *source; + u8 source_pin; + struct avs_path_module *sink; + u8 sink_pin; + + struct avs_tplg_binding *template; + struct avs_path_pipeline *owner; + /* pipeline bindings management */ + struct list_head node; +}; + +void avs_path_free(struct avs_path *path); +struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id, + struct avs_tplg_path_template *template, + struct snd_pcm_hw_params *fe_params, + struct snd_pcm_hw_params *be_params); +int avs_path_bind(struct avs_path *path); +int avs_path_unbind(struct avs_path *path); +int avs_path_reset(struct avs_path *path); +int avs_path_pause(struct avs_path *path); +int avs_path_run(struct avs_path *path, int trigger); + +#endif diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c new file mode 100644 index 000000000..22f21f3f6 --- /dev/null +++ b/sound/soc/intel/avs/pcm.c @@ -0,0 +1,1193 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include "avs.h" +#include "path.h" +#include "topology.h" + +struct avs_dma_data { + struct avs_tplg_path_template *template; + struct avs_path *path; + /* + * link stream is stored within substream's runtime + * private_data to fulfill the needs of codec BE path + * + * host stream assigned + */ + struct hdac_ext_stream *host_stream; +}; + +static struct avs_tplg_path_template * +avs_dai_find_path_template(struct snd_soc_dai *dai, bool is_fe, int direction) +{ + struct snd_soc_dapm_widget *dw; + struct snd_soc_dapm_path *dp; + enum snd_soc_dapm_direction dir; + + if (direction == SNDRV_PCM_STREAM_CAPTURE) { + dw = dai->capture_widget; + dir = is_fe ? SND_SOC_DAPM_DIR_OUT : SND_SOC_DAPM_DIR_IN; + } else { + dw = dai->playback_widget; + dir = is_fe ? SND_SOC_DAPM_DIR_IN : SND_SOC_DAPM_DIR_OUT; + } + + dp = list_first_entry_or_null(&dw->edges[dir], typeof(*dp), list_node[dir]); + if (!dp) + return NULL; + + /* Get the other widget, with actual path template data */ + dw = (dp->source == dw) ? dp->sink : dp->source; + + return dw->priv; +} + +static int avs_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai, bool is_fe) +{ + struct avs_tplg_path_template *template; + struct avs_dma_data *data; + + template = avs_dai_find_path_template(dai, is_fe, substream->stream); + if (!template) { + dev_err(dai->dev, "no %s path for dai %s, invalid tplg?\n", + snd_pcm_stream_str(substream), dai->name); + return -EINVAL; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->template = template; + snd_soc_dai_set_dma_data(dai, substream, data); + + return 0; +} + +static int avs_dai_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *fe_hw_params, + struct snd_pcm_hw_params *be_hw_params, struct snd_soc_dai *dai, + int dma_id) +{ + struct avs_dma_data *data; + struct avs_path *path; + struct avs_dev *adev = to_avs_dev(dai->dev); + int ret; + + data = snd_soc_dai_get_dma_data(dai, substream); + + dev_dbg(dai->dev, "%s FE hw_params str %p rtd %p", + __func__, substream, substream->runtime); + dev_dbg(dai->dev, "rate %d chn %d vbd %d bd %d\n", + params_rate(fe_hw_params), params_channels(fe_hw_params), + params_width(fe_hw_params), params_physical_width(fe_hw_params)); + + dev_dbg(dai->dev, "%s BE hw_params str %p rtd %p", + __func__, substream, substream->runtime); + dev_dbg(dai->dev, "rate %d chn %d vbd %d bd %d\n", + params_rate(be_hw_params), params_channels(be_hw_params), + params_width(be_hw_params), params_physical_width(be_hw_params)); + + path = avs_path_create(adev, dma_id, data->template, fe_hw_params, be_hw_params); + if (IS_ERR(path)) { + ret = PTR_ERR(path); + dev_err(dai->dev, "create path failed: %d\n", ret); + return ret; + } + + data->path = path; + return 0; +} + +static int avs_dai_be_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *be_hw_params, struct snd_soc_dai *dai, + int dma_id) +{ + struct snd_pcm_hw_params *fe_hw_params = NULL; + struct snd_soc_pcm_runtime *fe, *be; + struct snd_soc_dpcm *dpcm; + + be = asoc_substream_to_rtd(substream); + for_each_dpcm_fe(be, substream->stream, dpcm) { + fe = dpcm->fe; + fe_hw_params = &fe->dpcm[substream->stream].hw_params; + } + + return avs_dai_hw_params(substream, fe_hw_params, be_hw_params, dai, dma_id); +} + +static int avs_dai_prepare(struct avs_dev *adev, struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct avs_dma_data *data; + int ret; + + data = snd_soc_dai_get_dma_data(dai, substream); + if (!data->path) + return 0; + + ret = avs_path_reset(data->path); + if (ret < 0) { + dev_err(dai->dev, "reset path failed: %d\n", ret); + return ret; + } + + ret = avs_path_pause(data->path); + if (ret < 0) + dev_err(dai->dev, "pause path failed: %d\n", ret); + return ret; +} + +static int avs_dai_nonhda_be_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + return avs_dai_startup(substream, dai, false); +} + +static void avs_dai_nonhda_be_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + struct avs_dma_data *data; + + data = snd_soc_dai_get_dma_data(dai, substream); + + snd_soc_dai_set_dma_data(dai, substream, NULL); + kfree(data); +} + +static int avs_dai_nonhda_be_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *dai) +{ + struct avs_dma_data *data; + + data = snd_soc_dai_get_dma_data(dai, substream); + if (data->path) + return 0; + + /* Actual port-id comes from topology. */ + return avs_dai_be_hw_params(substream, hw_params, dai, 0); +} + +static int avs_dai_nonhda_be_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + struct avs_dma_data *data; + + dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name); + + data = snd_soc_dai_get_dma_data(dai, substream); + if (data->path) { + avs_path_free(data->path); + data->path = NULL; + } + + return 0; +} + +static int avs_dai_nonhda_be_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + return avs_dai_prepare(to_avs_dev(dai->dev), substream, dai); +} + +static int avs_dai_nonhda_be_trigger(struct snd_pcm_substream *substream, int cmd, + struct snd_soc_dai *dai) +{ + struct avs_dma_data *data; + int ret = 0; + + data = snd_soc_dai_get_dma_data(dai, substream); + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + ret = avs_path_run(data->path, AVS_TPLG_TRIGGER_AUTO); + if (ret < 0) + dev_err(dai->dev, "run BE path failed: %d\n", ret); + break; + + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + case SNDRV_PCM_TRIGGER_STOP: + ret = avs_path_pause(data->path); + if (ret < 0) + dev_err(dai->dev, "pause BE path failed: %d\n", ret); + + if (cmd == SNDRV_PCM_TRIGGER_STOP) { + ret = avs_path_reset(data->path); + if (ret < 0) + dev_err(dai->dev, "reset BE path failed: %d\n", ret); + } + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct snd_soc_dai_ops avs_dai_nonhda_be_ops = { + .startup = avs_dai_nonhda_be_startup, + .shutdown = avs_dai_nonhda_be_shutdown, + .hw_params = avs_dai_nonhda_be_hw_params, + .hw_free = avs_dai_nonhda_be_hw_free, + .prepare = avs_dai_nonhda_be_prepare, + .trigger = avs_dai_nonhda_be_trigger, +}; + +static int avs_dai_hda_be_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + return avs_dai_startup(substream, dai, false); +} + +static void avs_dai_hda_be_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + return avs_dai_nonhda_be_shutdown(substream, dai); +} + +static int avs_dai_hda_be_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *dai) +{ + struct avs_dma_data *data; + struct hdac_ext_stream *link_stream; + + data = snd_soc_dai_get_dma_data(dai, substream); + if (data->path) + return 0; + + link_stream = substream->runtime->private_data; + + return avs_dai_be_hw_params(substream, hw_params, dai, + hdac_stream(link_stream)->stream_tag - 1); +} + +static int avs_dai_hda_be_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + struct avs_dma_data *data; + struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); + struct hdac_ext_stream *link_stream; + struct hdac_ext_link *link; + struct hda_codec *codec; + + dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name); + + data = snd_soc_dai_get_dma_data(dai, substream); + if (!data->path) + return 0; + + link_stream = substream->runtime->private_data; + link_stream->link_prepared = false; + avs_path_free(data->path); + data->path = NULL; + + /* clear link <-> stream mapping */ + codec = dev_to_hda_codec(asoc_rtd_to_codec(rtd, 0)->dev); + link = snd_hdac_ext_bus_link_at(&codec->bus->core, codec->core.addr); + if (!link) + return -EINVAL; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + snd_hdac_ext_link_clear_stream_id(link, hdac_stream(link_stream)->stream_tag); + + return 0; +} + +static int avs_dai_hda_be_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + struct hdac_ext_stream *link_stream = runtime->private_data; + struct hdac_ext_link *link; + struct hda_codec *codec; + struct hdac_bus *bus; + unsigned int format_val; + int ret; + + if (link_stream->link_prepared) + return 0; + + codec = dev_to_hda_codec(asoc_rtd_to_codec(rtd, 0)->dev); + bus = &codec->bus->core; + format_val = snd_hdac_calc_stream_format(runtime->rate, runtime->channels, runtime->format, + runtime->sample_bits, 0); + + snd_hdac_ext_stream_decouple(bus, link_stream, true); + snd_hdac_ext_link_stream_reset(link_stream); + snd_hdac_ext_link_stream_setup(link_stream, format_val); + + link = snd_hdac_ext_bus_link_at(bus, codec->core.addr); + if (!link) + return -EINVAL; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + snd_hdac_ext_link_set_stream_id(link, hdac_stream(link_stream)->stream_tag); + + ret = avs_dai_prepare(to_avs_dev(dai->dev), substream, dai); + if (ret) + return ret; + + link_stream->link_prepared = true; + return 0; +} + +static int avs_dai_hda_be_trigger(struct snd_pcm_substream *substream, int cmd, + struct snd_soc_dai *dai) +{ + struct hdac_ext_stream *link_stream; + struct avs_dma_data *data; + int ret = 0; + + dev_dbg(dai->dev, "entry %s cmd=%d\n", __func__, cmd); + + data = snd_soc_dai_get_dma_data(dai, substream); + link_stream = substream->runtime->private_data; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + snd_hdac_ext_link_stream_start(link_stream); + + ret = avs_path_run(data->path, AVS_TPLG_TRIGGER_AUTO); + if (ret < 0) + dev_err(dai->dev, "run BE path failed: %d\n", ret); + break; + + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + case SNDRV_PCM_TRIGGER_STOP: + ret = avs_path_pause(data->path); + if (ret < 0) + dev_err(dai->dev, "pause BE path failed: %d\n", ret); + + snd_hdac_ext_link_stream_clear(link_stream); + + if (cmd == SNDRV_PCM_TRIGGER_STOP) { + ret = avs_path_reset(data->path); + if (ret < 0) + dev_err(dai->dev, "reset BE path failed: %d\n", ret); + } + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct snd_soc_dai_ops avs_dai_hda_be_ops = { + .startup = avs_dai_hda_be_startup, + .shutdown = avs_dai_hda_be_shutdown, + .hw_params = avs_dai_hda_be_hw_params, + .hw_free = avs_dai_hda_be_hw_free, + .prepare = avs_dai_hda_be_prepare, + .trigger = avs_dai_hda_be_trigger, +}; + +static const unsigned int rates[] = { + 8000, 11025, 12000, 16000, + 22050, 24000, 32000, 44100, + 48000, 64000, 88200, 96000, + 128000, 176400, 192000, +}; + +static const struct snd_pcm_hw_constraint_list hw_rates = { + .count = ARRAY_SIZE(rates), + .list = rates, + .mask = 0, +}; + +static int avs_dai_fe_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct avs_dma_data *data; + struct avs_dev *adev = to_avs_dev(dai->dev); + struct hdac_bus *bus = &adev->base.core; + struct hdac_ext_stream *host_stream; + int ret; + + ret = avs_dai_startup(substream, dai, true); + if (ret) + return ret; + + data = snd_soc_dai_get_dma_data(dai, substream); + + host_stream = snd_hdac_ext_stream_assign(bus, substream, HDAC_EXT_STREAM_TYPE_HOST); + if (!host_stream) { + ret = -EBUSY; + goto err; + } + + data->host_stream = host_stream; + ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); + if (ret < 0) + goto err; + + /* avoid wrap-around with wall-clock */ + ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 20, 178000000); + if (ret < 0) + goto err; + + ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_rates); + if (ret < 0) + goto err; + + snd_pcm_set_sync(substream); + + dev_dbg(dai->dev, "%s fe STARTUP tag %d str %p", + __func__, hdac_stream(host_stream)->stream_tag, substream); + + return 0; + +err: + kfree(data); + return ret; +} + +static void avs_dai_fe_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + struct avs_dma_data *data; + + data = snd_soc_dai_get_dma_data(dai, substream); + + snd_soc_dai_set_dma_data(dai, substream, NULL); + snd_hdac_ext_stream_release(data->host_stream, HDAC_EXT_STREAM_TYPE_HOST); + kfree(data); +} + +static int avs_dai_fe_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *dai) +{ + struct snd_pcm_hw_params *be_hw_params = NULL; + struct snd_soc_pcm_runtime *fe, *be; + struct snd_soc_dpcm *dpcm; + struct avs_dma_data *data; + struct hdac_ext_stream *host_stream; + int ret; + + data = snd_soc_dai_get_dma_data(dai, substream); + if (data->path) + return 0; + + host_stream = data->host_stream; + + hdac_stream(host_stream)->bufsize = 0; + hdac_stream(host_stream)->period_bytes = 0; + hdac_stream(host_stream)->format_val = 0; + + fe = asoc_substream_to_rtd(substream); + for_each_dpcm_be(fe, substream->stream, dpcm) { + be = dpcm->be; + be_hw_params = &be->dpcm[substream->stream].hw_params; + } + + ret = avs_dai_hw_params(substream, hw_params, be_hw_params, dai, + hdac_stream(host_stream)->stream_tag - 1); + if (ret) + goto create_err; + + ret = avs_path_bind(data->path); + if (ret < 0) { + dev_err(dai->dev, "bind FE <-> BE failed: %d\n", ret); + goto bind_err; + } + + return 0; + +bind_err: + avs_path_free(data->path); + data->path = NULL; +create_err: + snd_pcm_lib_free_pages(substream); + return ret; +} + +static int avs_dai_fe_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + struct avs_dma_data *data; + struct hdac_ext_stream *host_stream; + int ret; + + dev_dbg(dai->dev, "%s fe HW_FREE str %p rtd %p", + __func__, substream, substream->runtime); + + data = snd_soc_dai_get_dma_data(dai, substream); + if (!data->path) + return 0; + + host_stream = data->host_stream; + + ret = avs_path_unbind(data->path); + if (ret < 0) + dev_err(dai->dev, "unbind FE <-> BE failed: %d\n", ret); + + avs_path_free(data->path); + data->path = NULL; + snd_hdac_stream_cleanup(hdac_stream(host_stream)); + hdac_stream(host_stream)->prepared = false; + + ret = snd_pcm_lib_free_pages(substream); + if (ret < 0) + dev_dbg(dai->dev, "Failed to free pages!\n"); + + return ret; +} + +static int avs_dai_fe_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct avs_dma_data *data; + struct avs_dev *adev = to_avs_dev(dai->dev); + struct hdac_ext_stream *host_stream; + struct hdac_bus *bus; + unsigned int format_val; + int ret; + + data = snd_soc_dai_get_dma_data(dai, substream); + host_stream = data->host_stream; + + if (hdac_stream(host_stream)->prepared) + return 0; + + bus = hdac_stream(host_stream)->bus; + snd_hdac_ext_stream_decouple(bus, data->host_stream, true); + snd_hdac_stream_reset(hdac_stream(host_stream)); + + format_val = snd_hdac_calc_stream_format(runtime->rate, runtime->channels, runtime->format, + runtime->sample_bits, 0); + + ret = snd_hdac_stream_set_params(hdac_stream(host_stream), format_val); + if (ret < 0) + return ret; + + ret = snd_hdac_stream_setup(hdac_stream(host_stream)); + if (ret < 0) + return ret; + + ret = avs_dai_prepare(adev, substream, dai); + if (ret) + return ret; + + hdac_stream(host_stream)->prepared = true; + return 0; +} + +static int avs_dai_fe_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) +{ + struct avs_dma_data *data; + struct hdac_ext_stream *host_stream; + struct hdac_bus *bus; + unsigned long flags; + int ret = 0; + + data = snd_soc_dai_get_dma_data(dai, substream); + host_stream = data->host_stream; + bus = hdac_stream(host_stream)->bus; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + spin_lock_irqsave(&bus->reg_lock, flags); + snd_hdac_stream_start(hdac_stream(host_stream), true); + spin_unlock_irqrestore(&bus->reg_lock, flags); + + ret = avs_path_run(data->path, AVS_TPLG_TRIGGER_AUTO); + if (ret < 0) + dev_err(dai->dev, "run FE path failed: %d\n", ret); + break; + + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + case SNDRV_PCM_TRIGGER_STOP: + ret = avs_path_pause(data->path); + if (ret < 0) + dev_err(dai->dev, "pause FE path failed: %d\n", ret); + + spin_lock_irqsave(&bus->reg_lock, flags); + snd_hdac_stream_stop(hdac_stream(host_stream)); + spin_unlock_irqrestore(&bus->reg_lock, flags); + + if (cmd == SNDRV_PCM_TRIGGER_STOP) { + ret = avs_path_reset(data->path); + if (ret < 0) + dev_err(dai->dev, "reset FE path failed: %d\n", ret); + } + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +const struct snd_soc_dai_ops avs_dai_fe_ops = { + .startup = avs_dai_fe_startup, + .shutdown = avs_dai_fe_shutdown, + .hw_params = avs_dai_fe_hw_params, + .hw_free = avs_dai_fe_hw_free, + .prepare = avs_dai_fe_prepare, + .trigger = avs_dai_fe_trigger, +}; + +static ssize_t topology_name_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct snd_soc_component *component = file->private_data; + struct snd_soc_card *card = component->card; + struct snd_soc_acpi_mach *mach = dev_get_platdata(card->dev); + char buf[64]; + size_t len; + + len = scnprintf(buf, sizeof(buf), "%s/%s\n", component->driver->topology_name_prefix, + mach->tplg_filename); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations topology_name_fops = { + .open = simple_open, + .read = topology_name_read, + .llseek = default_llseek, +}; + +static int avs_component_load_libraries(struct avs_soc_component *acomp) +{ + struct avs_tplg *tplg = acomp->tplg; + struct avs_dev *adev = to_avs_dev(acomp->base.dev); + int ret; + + if (!tplg->num_libs) + return 0; + + /* Parent device may be asleep and library loading involves IPCs. */ + ret = pm_runtime_resume_and_get(adev->dev); + if (ret < 0) + return ret; + + avs_hda_clock_gating_enable(adev, false); + avs_hda_l1sen_enable(adev, false); + + ret = avs_dsp_load_libraries(adev, tplg->libs, tplg->num_libs); + + avs_hda_l1sen_enable(adev, true); + avs_hda_clock_gating_enable(adev, true); + + if (!ret) + ret = avs_module_info_init(adev, false); + + pm_runtime_mark_last_busy(adev->dev); + pm_runtime_put_autosuspend(adev->dev); + + return ret; +} + +static int avs_component_probe(struct snd_soc_component *component) +{ + struct snd_soc_card *card = component->card; + struct snd_soc_acpi_mach *mach; + struct avs_soc_component *acomp; + struct avs_dev *adev; + char *filename; + int ret; + + dev_dbg(card->dev, "probing %s card %s\n", component->name, card->name); + mach = dev_get_platdata(card->dev); + acomp = to_avs_soc_component(component); + adev = to_avs_dev(component->dev); + + acomp->tplg = avs_tplg_new(component); + if (!acomp->tplg) + return -ENOMEM; + + if (!mach->tplg_filename) + goto finalize; + + /* Load specified topology and create debugfs for it. */ + filename = kasprintf(GFP_KERNEL, "%s/%s", component->driver->topology_name_prefix, + mach->tplg_filename); + if (!filename) + return -ENOMEM; + + ret = avs_load_topology(component, filename); + kfree(filename); + if (ret < 0) + return ret; + + ret = avs_component_load_libraries(acomp); + if (ret < 0) { + dev_err(card->dev, "libraries loading failed: %d\n", ret); + goto err_load_libs; + } + +finalize: + debugfs_create_file("topology_name", 0444, component->debugfs_root, component, + &topology_name_fops); + + mutex_lock(&adev->comp_list_mutex); + list_add_tail(&acomp->node, &adev->comp_list); + mutex_unlock(&adev->comp_list_mutex); + + return 0; + +err_load_libs: + avs_remove_topology(component); + return ret; +} + +static void avs_component_remove(struct snd_soc_component *component) +{ + struct avs_soc_component *acomp = to_avs_soc_component(component); + struct snd_soc_acpi_mach *mach; + struct avs_dev *adev = to_avs_dev(component->dev); + int ret; + + mach = dev_get_platdata(component->card->dev); + + mutex_lock(&adev->comp_list_mutex); + list_del(&acomp->node); + mutex_unlock(&adev->comp_list_mutex); + + if (mach->tplg_filename) { + ret = avs_remove_topology(component); + if (ret < 0) + dev_err(component->dev, "unload topology failed: %d\n", ret); + } +} + +static int avs_component_open(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); + struct snd_pcm_hardware hwparams; + + /* only FE DAI links are handled here */ + if (rtd->dai_link->no_pcm) + return 0; + + hwparams.info = SNDRV_PCM_INFO_MMAP | + SNDRV_PCM_INFO_MMAP_VALID | + SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_PAUSE | + SNDRV_PCM_INFO_NO_PERIOD_WAKEUP; + + hwparams.formats = SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE; + hwparams.period_bytes_min = 128; + hwparams.period_bytes_max = AZX_MAX_BUF_SIZE / 2; + hwparams.periods_min = 2; + hwparams.periods_max = AZX_MAX_FRAG; + hwparams.buffer_bytes_max = AZX_MAX_BUF_SIZE; + hwparams.fifo_size = 0; + + return snd_soc_set_runtime_hwparams(substream, &hwparams); +} + +static unsigned int avs_hda_stream_dpib_read(struct hdac_ext_stream *stream) +{ + return readl(hdac_stream(stream)->bus->remap_addr + AZX_REG_VS_SDXDPIB_XBASE + + (AZX_REG_VS_SDXDPIB_XINTERVAL * hdac_stream(stream)->index)); +} + +static snd_pcm_uframes_t +avs_component_pointer(struct snd_soc_component *component, struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); + struct avs_dma_data *data; + struct hdac_ext_stream *host_stream; + unsigned int pos; + + data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream); + if (!data->host_stream) + return 0; + + host_stream = data->host_stream; + pos = avs_hda_stream_dpib_read(host_stream); + + if (pos >= hdac_stream(host_stream)->bufsize) + pos = 0; + + return bytes_to_frames(substream->runtime, pos); +} + +static int avs_component_mmap(struct snd_soc_component *component, + struct snd_pcm_substream *substream, + struct vm_area_struct *vma) +{ + return snd_pcm_lib_default_mmap(substream, vma); +} + +#define MAX_PREALLOC_SIZE (32 * 1024 * 1024) + +static int avs_component_construct(struct snd_soc_component *component, + struct snd_soc_pcm_runtime *rtd) +{ + struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0); + struct snd_pcm *pcm = rtd->pcm; + + if (dai->driver->playback.channels_min) + snd_pcm_set_managed_buffer(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream, + SNDRV_DMA_TYPE_DEV_SG, component->dev, 0, + MAX_PREALLOC_SIZE); + + if (dai->driver->capture.channels_min) + snd_pcm_set_managed_buffer(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream, + SNDRV_DMA_TYPE_DEV_SG, component->dev, 0, + MAX_PREALLOC_SIZE); + + return 0; +} + +static const struct snd_soc_component_driver avs_component_driver = { + .name = "avs-pcm", + .probe = avs_component_probe, + .remove = avs_component_remove, + .open = avs_component_open, + .pointer = avs_component_pointer, + .mmap = avs_component_mmap, + .pcm_construct = avs_component_construct, + .module_get_upon_open = 1, /* increment refcount when a pcm is opened */ + .topology_name_prefix = "intel/avs", +}; + +static int avs_soc_component_register(struct device *dev, const char *name, + const struct snd_soc_component_driver *drv, + struct snd_soc_dai_driver *cpu_dais, int num_cpu_dais) +{ + struct avs_soc_component *acomp; + int ret; + + acomp = devm_kzalloc(dev, sizeof(*acomp), GFP_KERNEL); + if (!acomp) + return -ENOMEM; + + ret = snd_soc_component_initialize(&acomp->base, drv, dev); + if (ret < 0) + return ret; + + /* force name change after ASoC is done with its init */ + acomp->base.name = name; + INIT_LIST_HEAD(&acomp->node); + + return snd_soc_add_component(&acomp->base, cpu_dais, num_cpu_dais); +} + +static struct snd_soc_dai_driver dmic_cpu_dais[] = { +{ + .name = "DMIC Pin", + .ops = &avs_dai_nonhda_be_ops, + .capture = { + .stream_name = "DMIC Rx", + .channels_min = 1, + .channels_max = 4, + .rates = SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000, + .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + }, +}, +{ + .name = "DMIC WoV Pin", + .ops = &avs_dai_nonhda_be_ops, + .capture = { + .stream_name = "DMIC WoV Rx", + .channels_min = 1, + .channels_max = 4, + .rates = SNDRV_PCM_RATE_16000, + .formats = SNDRV_PCM_FMTBIT_S16_LE, + }, +}, +}; + +int avs_dmic_platform_register(struct avs_dev *adev, const char *name) +{ + return avs_soc_component_register(adev->dev, name, &avs_component_driver, dmic_cpu_dais, + ARRAY_SIZE(dmic_cpu_dais)); +} + +static const struct snd_soc_dai_driver i2s_dai_template = { + .ops = &avs_dai_nonhda_be_ops, + .playback = { + .channels_min = 1, + .channels_max = 8, + .rates = SNDRV_PCM_RATE_8000_192000 | + SNDRV_PCM_RATE_KNOT, + .formats = SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE, + }, + .capture = { + .channels_min = 1, + .channels_max = 8, + .rates = SNDRV_PCM_RATE_8000_192000 | + SNDRV_PCM_RATE_KNOT, + .formats = SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE, + }, +}; + +int avs_i2s_platform_register(struct avs_dev *adev, const char *name, unsigned long port_mask, + unsigned long *tdms) +{ + struct snd_soc_dai_driver *cpus, *dai; + size_t ssp_count, cpu_count; + int i, j; + + ssp_count = adev->hw_cfg.i2s_caps.ctrl_count; + cpu_count = hweight_long(port_mask); + if (tdms) + for_each_set_bit(i, &port_mask, ssp_count) + cpu_count += hweight_long(tdms[i]); + + cpus = devm_kzalloc(adev->dev, sizeof(*cpus) * cpu_count, GFP_KERNEL); + if (!cpus) + return -ENOMEM; + + dai = cpus; + for_each_set_bit(i, &port_mask, ssp_count) { + memcpy(dai, &i2s_dai_template, sizeof(*dai)); + + dai->name = + devm_kasprintf(adev->dev, GFP_KERNEL, "SSP%d Pin", i); + dai->playback.stream_name = + devm_kasprintf(adev->dev, GFP_KERNEL, "ssp%d Tx", i); + dai->capture.stream_name = + devm_kasprintf(adev->dev, GFP_KERNEL, "ssp%d Rx", i); + + if (!dai->name || !dai->playback.stream_name || !dai->capture.stream_name) + return -ENOMEM; + dai++; + } + + if (!tdms) + goto plat_register; + + for_each_set_bit(i, &port_mask, ssp_count) { + for_each_set_bit(j, &tdms[i], ssp_count) { + memcpy(dai, &i2s_dai_template, sizeof(*dai)); + + dai->name = + devm_kasprintf(adev->dev, GFP_KERNEL, "SSP%d:%d Pin", i, j); + dai->playback.stream_name = + devm_kasprintf(adev->dev, GFP_KERNEL, "ssp%d:%d Tx", i, j); + dai->capture.stream_name = + devm_kasprintf(adev->dev, GFP_KERNEL, "ssp%d:%d Rx", i, j); + + if (!dai->name || !dai->playback.stream_name || !dai->capture.stream_name) + return -ENOMEM; + dai++; + } + } + +plat_register: + return avs_soc_component_register(adev->dev, name, &avs_component_driver, cpus, cpu_count); +} + +/* HD-Audio CPU DAI template */ +static const struct snd_soc_dai_driver hda_cpu_dai = { + .ops = &avs_dai_hda_be_ops, + .playback = { + .channels_min = 1, + .channels_max = 8, + .rates = SNDRV_PCM_RATE_8000_192000, + .formats = SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE, + }, + .capture = { + .channels_min = 1, + .channels_max = 8, + .rates = SNDRV_PCM_RATE_8000_192000, + .formats = SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE, + }, +}; + +static void avs_component_hda_unregister_dais(struct snd_soc_component *component) +{ + struct snd_soc_acpi_mach *mach; + struct snd_soc_dai *dai, *save; + struct hda_codec *codec; + char name[32]; + + mach = dev_get_platdata(component->card->dev); + codec = mach->pdata; + sprintf(name, "%s-cpu", dev_name(&codec->core.dev)); + + for_each_component_dais_safe(component, dai, save) { + if (!strstr(dai->driver->name, name)) + continue; + + if (dai->playback_widget) + snd_soc_dapm_free_widget(dai->playback_widget); + if (dai->capture_widget) + snd_soc_dapm_free_widget(dai->capture_widget); + snd_soc_unregister_dai(dai); + } +} + +static int avs_component_hda_probe(struct snd_soc_component *component) +{ + struct snd_soc_dapm_context *dapm; + struct snd_soc_dai_driver *dais; + struct snd_soc_acpi_mach *mach; + struct hda_codec *codec; + struct hda_pcm *pcm; + const char *cname; + int pcm_count = 0, ret, i; + + mach = dev_get_platdata(component->card->dev); + if (!mach) + return -EINVAL; + + codec = mach->pdata; + if (list_empty(&codec->pcm_list_head)) + return -EINVAL; + list_for_each_entry(pcm, &codec->pcm_list_head, list) + pcm_count++; + + dais = devm_kcalloc(component->dev, pcm_count, sizeof(*dais), + GFP_KERNEL); + if (!dais) + return -ENOMEM; + + cname = dev_name(&codec->core.dev); + dapm = snd_soc_component_get_dapm(component); + pcm = list_first_entry(&codec->pcm_list_head, struct hda_pcm, list); + + for (i = 0; i < pcm_count; i++, pcm = list_next_entry(pcm, list)) { + struct snd_soc_dai *dai; + + memcpy(&dais[i], &hda_cpu_dai, sizeof(*dais)); + dais[i].id = i; + dais[i].name = devm_kasprintf(component->dev, GFP_KERNEL, + "%s-cpu%d", cname, i); + if (!dais[i].name) { + ret = -ENOMEM; + goto exit; + } + + if (pcm->stream[0].substreams) { + dais[i].playback.stream_name = + devm_kasprintf(component->dev, GFP_KERNEL, + "%s-cpu%d Tx", cname, i); + if (!dais[i].playback.stream_name) { + ret = -ENOMEM; + goto exit; + } + } + + if (pcm->stream[1].substreams) { + dais[i].capture.stream_name = + devm_kasprintf(component->dev, GFP_KERNEL, + "%s-cpu%d Rx", cname, i); + if (!dais[i].capture.stream_name) { + ret = -ENOMEM; + goto exit; + } + } + + dai = snd_soc_register_dai(component, &dais[i], false); + if (!dai) { + dev_err(component->dev, "register dai for %s failed\n", + pcm->name); + ret = -EINVAL; + goto exit; + } + + ret = snd_soc_dapm_new_dai_widgets(dapm, dai); + if (ret < 0) { + dev_err(component->dev, "create widgets failed: %d\n", + ret); + goto exit; + } + } + + ret = avs_component_probe(component); +exit: + if (ret) + avs_component_hda_unregister_dais(component); + + return ret; +} + +static void avs_component_hda_remove(struct snd_soc_component *component) +{ + avs_component_hda_unregister_dais(component); + avs_component_remove(component); +} + +static int avs_component_hda_open(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); + struct hdac_ext_stream *link_stream; + struct hda_codec *codec; + + /* only BE DAI links are handled here */ + if (!rtd->dai_link->no_pcm) + return avs_component_open(component, substream); + + codec = dev_to_hda_codec(asoc_rtd_to_codec(rtd, 0)->dev); + link_stream = snd_hdac_ext_stream_assign(&codec->bus->core, substream, + HDAC_EXT_STREAM_TYPE_LINK); + if (!link_stream) + return -EBUSY; + + substream->runtime->private_data = link_stream; + return 0; +} + +static int avs_component_hda_close(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); + struct hdac_ext_stream *link_stream; + + /* only BE DAI links are handled here */ + if (!rtd->dai_link->no_pcm) + return 0; + + link_stream = substream->runtime->private_data; + snd_hdac_ext_stream_release(link_stream, HDAC_EXT_STREAM_TYPE_LINK); + substream->runtime->private_data = NULL; + + return 0; +} + +static const struct snd_soc_component_driver avs_hda_component_driver = { + .name = "avs-hda-pcm", + .probe = avs_component_hda_probe, + .remove = avs_component_hda_remove, + .open = avs_component_hda_open, + .close = avs_component_hda_close, + .pointer = avs_component_pointer, + .mmap = avs_component_mmap, + .pcm_construct = avs_component_construct, + /* + * hda platform component's probe() is dependent on + * codec->pcm_list_head, it needs to be initialized after codec + * component. remove_order is here for completeness sake + */ + .probe_order = SND_SOC_COMP_ORDER_LATE, + .remove_order = SND_SOC_COMP_ORDER_EARLY, + .module_get_upon_open = 1, + .topology_name_prefix = "intel/avs", +}; + +int avs_hda_platform_register(struct avs_dev *adev, const char *name) +{ + return avs_soc_component_register(adev->dev, name, + &avs_hda_component_driver, NULL, 0); +} diff --git a/sound/soc/intel/avs/registers.h b/sound/soc/intel/avs/registers.h new file mode 100644 index 000000000..95be86148 --- /dev/null +++ b/sound/soc/intel/avs/registers.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2021-2022 Intel Corporation. All rights reserved. + * + * Authors: Cezary Rojewski + * Amadeusz Slawinski + */ + +#ifndef __SOUND_SOC_INTEL_AVS_REGS_H +#define __SOUND_SOC_INTEL_AVS_REGS_H + +#define AZX_PCIREG_PGCTL 0x44 +#define AZX_PCIREG_CGCTL 0x48 +#define AZX_PGCTL_LSRMD_MASK BIT(4) +#define AZX_CGCTL_MISCBDCGE_MASK BIT(6) +#define AZX_VS_EM2_L1SEN BIT(13) +#define AZX_VS_EM2_DUM BIT(23) + +/* Intel HD Audio General DSP Registers */ +#define AVS_ADSP_GEN_BASE 0x0 +#define AVS_ADSP_REG_ADSPCS (AVS_ADSP_GEN_BASE + 0x04) +#define AVS_ADSP_REG_ADSPIC (AVS_ADSP_GEN_BASE + 0x08) +#define AVS_ADSP_REG_ADSPIS (AVS_ADSP_GEN_BASE + 0x0C) + +#define AVS_ADSP_ADSPIC_IPC BIT(0) +#define AVS_ADSP_ADSPIC_CLDMA BIT(1) +#define AVS_ADSP_ADSPIS_IPC BIT(0) +#define AVS_ADSP_ADSPIS_CLDMA BIT(1) + +#define AVS_ADSPCS_CRST_MASK(cm) (cm) +#define AVS_ADSPCS_CSTALL_MASK(cm) ((cm) << 8) +#define AVS_ADSPCS_SPA_MASK(cm) ((cm) << 16) +#define AVS_ADSPCS_CPA_MASK(cm) ((cm) << 24) +#define AVS_MAIN_CORE_MASK BIT(0) + +#define AVS_ADSP_HIPCCTL_BUSY BIT(0) +#define AVS_ADSP_HIPCCTL_DONE BIT(1) + +/* SKL Intel HD Audio Inter-Processor Communication Registers */ +#define SKL_ADSP_IPC_BASE 0x40 +#define SKL_ADSP_REG_HIPCT (SKL_ADSP_IPC_BASE + 0x00) +#define SKL_ADSP_REG_HIPCTE (SKL_ADSP_IPC_BASE + 0x04) +#define SKL_ADSP_REG_HIPCI (SKL_ADSP_IPC_BASE + 0x08) +#define SKL_ADSP_REG_HIPCIE (SKL_ADSP_IPC_BASE + 0x0C) +#define SKL_ADSP_REG_HIPCCTL (SKL_ADSP_IPC_BASE + 0x10) + +#define SKL_ADSP_HIPCI_BUSY BIT(31) +#define SKL_ADSP_HIPCIE_DONE BIT(30) +#define SKL_ADSP_HIPCT_BUSY BIT(31) + +/* Intel HD Audio SRAM windows base addresses */ +#define SKL_ADSP_SRAM_BASE_OFFSET 0x8000 +#define SKL_ADSP_SRAM_WINDOW_SIZE 0x2000 +#define APL_ADSP_SRAM_BASE_OFFSET 0x80000 +#define APL_ADSP_SRAM_WINDOW_SIZE 0x20000 + +/* Constants used when accessing SRAM, space shared with firmware */ +#define AVS_FW_REG_BASE(adev) ((adev)->spec->sram_base_offset) +#define AVS_FW_REG_STATUS(adev) (AVS_FW_REG_BASE(adev) + 0x0) +#define AVS_FW_REG_ERROR_CODE(adev) (AVS_FW_REG_BASE(adev) + 0x4) + +#define AVS_FW_REGS_SIZE PAGE_SIZE +#define AVS_FW_REGS_WINDOW 0 +/* DSP -> HOST communication window */ +#define AVS_UPLINK_WINDOW AVS_FW_REGS_WINDOW +/* HOST -> DSP communication window */ +#define AVS_DOWNLINK_WINDOW 1 +#define AVS_DEBUG_WINDOW 2 + +/* registry I/O helpers */ +#define avs_sram_offset(adev, window_idx) \ + ((adev)->spec->sram_base_offset + \ + (adev)->spec->sram_window_size * (window_idx)) + +#define avs_sram_addr(adev, window_idx) \ + ((adev)->dsp_ba + avs_sram_offset(adev, window_idx)) + +#define avs_uplink_addr(adev) \ + (avs_sram_addr(adev, AVS_UPLINK_WINDOW) + AVS_FW_REGS_SIZE) +#define avs_downlink_addr(adev) \ + avs_sram_addr(adev, AVS_DOWNLINK_WINDOW) + +#endif /* __SOUND_SOC_INTEL_AVS_REGS_H */ diff --git a/sound/soc/intel/avs/skl.c b/sound/soc/intel/avs/skl.c new file mode 100644 index 000000000..bda5ec751 --- /dev/null +++ b/sound/soc/intel/avs/skl.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include "avs.h" +#include "messages.h" + +static int skl_enable_logs(struct avs_dev *adev, enum avs_log_enable enable, u32 aging_period, + u32 fifo_full_period, unsigned long resource_mask, u32 *priorities) +{ + struct skl_log_state_info *info; + u32 size, num_cores = adev->hw_cfg.dsp_cores; + int ret, i; + + if (fls_long(resource_mask) > num_cores) + return -EINVAL; + size = struct_size(info, logs_core, num_cores); + info = kzalloc(size, GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->core_mask = resource_mask; + if (enable) + for_each_set_bit(i, &resource_mask, GENMASK(num_cores, 0)) { + info->logs_core[i].enable = enable; + info->logs_core[i].min_priority = *priorities++; + } + else + for_each_set_bit(i, &resource_mask, GENMASK(num_cores, 0)) + info->logs_core[i].enable = enable; + + ret = avs_ipc_set_enable_logs(adev, (u8 *)info, size); + kfree(info); + if (ret) + return AVS_IPC_RET(ret); + + return 0; +} + +int skl_log_buffer_offset(struct avs_dev *adev, u32 core) +{ + return core * avs_log_buffer_size(adev); +} + +/* fw DbgLogWp registers */ +#define FW_REGS_DBG_LOG_WP(core) (0x30 + 0x4 * core) + +static int +skl_log_buffer_status(struct avs_dev *adev, union avs_notify_msg *msg) +{ + unsigned long flags; + void __iomem *buf; + u16 size, write, offset; + + spin_lock_irqsave(&adev->dbg.trace_lock, flags); + if (!kfifo_initialized(&adev->dbg.trace_fifo)) { + spin_unlock_irqrestore(&adev->dbg.trace_lock, flags); + return 0; + } + + size = avs_log_buffer_size(adev) / 2; + write = readl(avs_sram_addr(adev, AVS_FW_REGS_WINDOW) + FW_REGS_DBG_LOG_WP(msg->log.core)); + /* determine buffer half */ + offset = (write < size) ? size : 0; + + /* Address is guaranteed to exist in SRAM2. */ + buf = avs_log_buffer_addr(adev, msg->log.core) + offset; + __kfifo_fromio_locked(&adev->dbg.trace_fifo, buf, size, &adev->dbg.fifo_lock); + wake_up(&adev->dbg.trace_waitq); + spin_unlock_irqrestore(&adev->dbg.trace_lock, flags); + + return 0; +} + +static int skl_coredump(struct avs_dev *adev, union avs_notify_msg *msg) +{ + u8 *dump; + + dump = vzalloc(AVS_FW_REGS_SIZE); + if (!dump) + return -ENOMEM; + + memcpy_fromio(dump, avs_sram_addr(adev, AVS_FW_REGS_WINDOW), AVS_FW_REGS_SIZE); + dev_coredumpv(adev->dev, dump, AVS_FW_REGS_SIZE, GFP_KERNEL); + + return 0; +} + +static bool +skl_d0ix_toggle(struct avs_dev *adev, struct avs_ipc_msg *tx, bool wake) +{ + /* unsupported on cAVS 1.5 hw */ + return false; +} + +static int skl_set_d0ix(struct avs_dev *adev, bool enable) +{ + /* unsupported on cAVS 1.5 hw */ + return 0; +} + +const struct avs_dsp_ops skl_dsp_ops = { + .power = avs_dsp_core_power, + .reset = avs_dsp_core_reset, + .stall = avs_dsp_core_stall, + .irq_handler = avs_dsp_irq_handler, + .irq_thread = avs_dsp_irq_thread, + .int_control = avs_dsp_interrupt_control, + .load_basefw = avs_cldma_load_basefw, + .load_lib = avs_cldma_load_library, + .transfer_mods = avs_cldma_transfer_modules, + .enable_logs = skl_enable_logs, + .log_buffer_offset = skl_log_buffer_offset, + .log_buffer_status = skl_log_buffer_status, + .coredump = skl_coredump, + .d0ix_toggle = skl_d0ix_toggle, + .set_d0ix = skl_set_d0ix, +}; diff --git a/sound/soc/intel/avs/topology.c b/sound/soc/intel/avs/topology.c new file mode 100644 index 000000000..8a9f9fc48 --- /dev/null +++ b/sound/soc/intel/avs/topology.c @@ -0,0 +1,1625 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include +#include +#include +#include "avs.h" +#include "topology.h" + +/* Get pointer to vendor array at the specified offset. */ +#define avs_tplg_vendor_array_at(array, offset) \ + ((struct snd_soc_tplg_vendor_array *)((u8 *)array + offset)) + +/* Get pointer to vendor array that is next in line. */ +#define avs_tplg_vendor_array_next(array) \ + (avs_tplg_vendor_array_at(array, le32_to_cpu((array)->size))) + +/* + * Scan provided block of tuples for the specified token. If found, + * @offset is updated with position at which first matching token is + * located. + * + * Returns 0 on success, -ENOENT if not found and error code otherwise. + */ +static int +avs_tplg_vendor_array_lookup(struct snd_soc_tplg_vendor_array *tuples, + u32 block_size, u32 token, u32 *offset) +{ + u32 pos = 0; + + while (block_size > 0) { + struct snd_soc_tplg_vendor_value_elem *tuple; + u32 tuples_size = le32_to_cpu(tuples->size); + + if (tuples_size > block_size) + return -EINVAL; + + tuple = tuples->value; + if (le32_to_cpu(tuple->token) == token) { + *offset = pos; + return 0; + } + + block_size -= tuples_size; + pos += tuples_size; + tuples = avs_tplg_vendor_array_next(tuples); + } + + return -ENOENT; +} + +/* + * See avs_tplg_vendor_array_lookup() for description. + * + * Behaves exactly like avs_tplg_vendor_lookup() but starts from the + * next vendor array in line. Useful when searching for the finish line + * of an arbitrary entry in a list of entries where each is composed of + * several vendor tuples and a specific token marks the beginning of + * a new entry block. + */ +static int +avs_tplg_vendor_array_lookup_next(struct snd_soc_tplg_vendor_array *tuples, + u32 block_size, u32 token, u32 *offset) +{ + u32 tuples_size = le32_to_cpu(tuples->size); + int ret; + + if (tuples_size > block_size) + return -EINVAL; + + tuples = avs_tplg_vendor_array_next(tuples); + block_size -= tuples_size; + + ret = avs_tplg_vendor_array_lookup(tuples, block_size, token, offset); + if (!ret) + *offset += tuples_size; + return ret; +} + +/* + * Scan provided block of tuples for the specified token which marks + * the border of an entry block. Behavior is similar to + * avs_tplg_vendor_array_lookup() except 0 is also returned if no + * matching token has been found. In such case, returned @size is + * assigned to @block_size as the entire block belongs to the current + * entry. + * + * Returns 0 on success, error code otherwise. + */ +static int +avs_tplg_vendor_entry_size(struct snd_soc_tplg_vendor_array *tuples, + u32 block_size, u32 entry_id_token, u32 *size) +{ + int ret; + + ret = avs_tplg_vendor_array_lookup_next(tuples, block_size, entry_id_token, size); + if (ret == -ENOENT) { + *size = block_size; + ret = 0; + } + + return ret; +} + +/* + * Vendor tuple parsing descriptor. + * + * @token: vendor specific token that identifies tuple + * @type: tuple type, one of SND_SOC_TPLG_TUPLE_TYPE_XXX + * @offset: offset of a struct's field to initialize + * @parse: parsing function, extracts and assigns value to object's field + */ +struct avs_tplg_token_parser { + enum avs_tplg_token token; + u32 type; + u32 offset; + int (*parse)(struct snd_soc_component *comp, void *elem, void *object, u32 offset); +}; + +static int +avs_parse_uuid_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset) +{ + struct snd_soc_tplg_vendor_uuid_elem *tuple = elem; + guid_t *val = (guid_t *)((u8 *)object + offset); + + guid_copy((guid_t *)val, (const guid_t *)&tuple->uuid); + + return 0; +} + +static int +avs_parse_bool_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset) +{ + struct snd_soc_tplg_vendor_value_elem *tuple = elem; + bool *val = (bool *)((u8 *)object + offset); + + *val = le32_to_cpu(tuple->value); + + return 0; +} + +static int +avs_parse_byte_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset) +{ + struct snd_soc_tplg_vendor_value_elem *tuple = elem; + u8 *val = ((u8 *)object + offset); + + *val = le32_to_cpu(tuple->value); + + return 0; +} + +static int +avs_parse_short_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset) +{ + struct snd_soc_tplg_vendor_value_elem *tuple = elem; + u16 *val = (u16 *)((u8 *)object + offset); + + *val = le32_to_cpu(tuple->value); + + return 0; +} + +static int +avs_parse_word_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset) +{ + struct snd_soc_tplg_vendor_value_elem *tuple = elem; + u32 *val = (u32 *)((u8 *)object + offset); + + *val = le32_to_cpu(tuple->value); + + return 0; +} + +static int +avs_parse_string_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset) +{ + struct snd_soc_tplg_vendor_string_elem *tuple = elem; + char *val = (char *)((u8 *)object + offset); + + snprintf(val, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s", tuple->string); + + return 0; +} + +static int avs_parse_uuid_tokens(struct snd_soc_component *comp, void *object, + const struct avs_tplg_token_parser *parsers, int count, + struct snd_soc_tplg_vendor_array *tuples) +{ + struct snd_soc_tplg_vendor_uuid_elem *tuple; + int ret, i, j; + + /* Parse element by element. */ + for (i = 0; i < le32_to_cpu(tuples->num_elems); i++) { + tuple = &tuples->uuid[i]; + + for (j = 0; j < count; j++) { + /* Ignore non-UUID tokens. */ + if (parsers[j].type != SND_SOC_TPLG_TUPLE_TYPE_UUID || + parsers[j].token != le32_to_cpu(tuple->token)) + continue; + + ret = parsers[j].parse(comp, tuple, object, parsers[j].offset); + if (ret) + return ret; + } + } + + return 0; +} + +static int avs_parse_string_tokens(struct snd_soc_component *comp, void *object, + const struct avs_tplg_token_parser *parsers, int count, + struct snd_soc_tplg_vendor_array *tuples) +{ + struct snd_soc_tplg_vendor_string_elem *tuple; + int ret, i, j; + + /* Parse element by element. */ + for (i = 0; i < le32_to_cpu(tuples->num_elems); i++) { + tuple = &tuples->string[i]; + + for (j = 0; j < count; j++) { + /* Ignore non-string tokens. */ + if (parsers[j].type != SND_SOC_TPLG_TUPLE_TYPE_STRING || + parsers[j].token != le32_to_cpu(tuple->token)) + continue; + + ret = parsers[j].parse(comp, tuple, object, parsers[j].offset); + if (ret) + return ret; + } + } + + return 0; +} + +static int avs_parse_word_tokens(struct snd_soc_component *comp, void *object, + const struct avs_tplg_token_parser *parsers, int count, + struct snd_soc_tplg_vendor_array *tuples) +{ + struct snd_soc_tplg_vendor_value_elem *tuple; + int ret, i, j; + + /* Parse element by element. */ + for (i = 0; i < le32_to_cpu(tuples->num_elems); i++) { + tuple = &tuples->value[i]; + + for (j = 0; j < count; j++) { + /* Ignore non-integer tokens. */ + if (!(parsers[j].type == SND_SOC_TPLG_TUPLE_TYPE_WORD || + parsers[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT || + parsers[j].type == SND_SOC_TPLG_TUPLE_TYPE_BYTE || + parsers[j].type == SND_SOC_TPLG_TUPLE_TYPE_BOOL)) + continue; + + if (parsers[j].token != le32_to_cpu(tuple->token)) + continue; + + ret = parsers[j].parse(comp, tuple, object, parsers[j].offset); + if (ret) + return ret; + } + } + + return 0; +} + +static int avs_parse_tokens(struct snd_soc_component *comp, void *object, + const struct avs_tplg_token_parser *parsers, size_t count, + struct snd_soc_tplg_vendor_array *tuples, int priv_size) +{ + int array_size, ret; + + while (priv_size > 0) { + array_size = le32_to_cpu(tuples->size); + + if (array_size <= 0) { + dev_err(comp->dev, "invalid array size 0x%x\n", array_size); + return -EINVAL; + } + + /* Make sure there is enough data before parsing. */ + priv_size -= array_size; + if (priv_size < 0) { + dev_err(comp->dev, "invalid array size 0x%x\n", array_size); + return -EINVAL; + } + + switch (le32_to_cpu(tuples->type)) { + case SND_SOC_TPLG_TUPLE_TYPE_UUID: + ret = avs_parse_uuid_tokens(comp, object, parsers, count, tuples); + break; + case SND_SOC_TPLG_TUPLE_TYPE_STRING: + ret = avs_parse_string_tokens(comp, object, parsers, count, tuples); + break; + case SND_SOC_TPLG_TUPLE_TYPE_BOOL: + case SND_SOC_TPLG_TUPLE_TYPE_BYTE: + case SND_SOC_TPLG_TUPLE_TYPE_SHORT: + case SND_SOC_TPLG_TUPLE_TYPE_WORD: + ret = avs_parse_word_tokens(comp, object, parsers, count, tuples); + break; + default: + dev_err(comp->dev, "unknown token type %d\n", tuples->type); + ret = -EINVAL; + } + + if (ret) { + dev_err(comp->dev, "parsing %zu tokens of %d type failed: %d\n", + count, tuples->type, ret); + return ret; + } + + tuples = avs_tplg_vendor_array_next(tuples); + } + + return 0; +} + +#define AVS_DEFINE_PTR_PARSER(name, type, member) \ +static int \ +avs_parse_##name##_ptr(struct snd_soc_component *comp, void *elem, void *object, u32 offset) \ +{ \ + struct snd_soc_tplg_vendor_value_elem *tuple = elem; \ + struct avs_soc_component *acomp = to_avs_soc_component(comp); \ + type **val = (type **)(object + offset); \ + u32 idx; \ + \ + idx = le32_to_cpu(tuple->value); \ + if (idx >= acomp->tplg->num_##member) \ + return -EINVAL; \ + \ + *val = &acomp->tplg->member[idx]; \ + \ + return 0; \ +} + +AVS_DEFINE_PTR_PARSER(audio_format, struct avs_audio_format, fmts); +AVS_DEFINE_PTR_PARSER(modcfg_base, struct avs_tplg_modcfg_base, modcfgs_base); +AVS_DEFINE_PTR_PARSER(modcfg_ext, struct avs_tplg_modcfg_ext, modcfgs_ext); +AVS_DEFINE_PTR_PARSER(pplcfg, struct avs_tplg_pplcfg, pplcfgs); +AVS_DEFINE_PTR_PARSER(binding, struct avs_tplg_binding, bindings); + +static int +parse_audio_format_bitfield(struct snd_soc_component *comp, void *elem, void *object, u32 offset) +{ + struct snd_soc_tplg_vendor_value_elem *velem = elem; + struct avs_audio_format *audio_format = object; + + switch (offset) { + case AVS_TKN_AFMT_NUM_CHANNELS_U32: + audio_format->num_channels = le32_to_cpu(velem->value); + break; + case AVS_TKN_AFMT_VALID_BIT_DEPTH_U32: + audio_format->valid_bit_depth = le32_to_cpu(velem->value); + break; + case AVS_TKN_AFMT_SAMPLE_TYPE_U32: + audio_format->sample_type = le32_to_cpu(velem->value); + break; + } + + return 0; +} + +static int parse_link_formatted_string(struct snd_soc_component *comp, void *elem, + void *object, u32 offset) +{ + struct snd_soc_tplg_vendor_string_elem *tuple = elem; + struct snd_soc_acpi_mach *mach = dev_get_platdata(comp->card->dev); + char *val = (char *)((u8 *)object + offset); + + /* + * Dynamic naming - string formats, e.g.: ssp%d - supported only for + * topologies describing single device e.g.: an I2S codec on SSP0. + */ + if (hweight_long(mach->mach_params.i2s_link_mask) != 1) + return avs_parse_string_token(comp, elem, object, offset); + + snprintf(val, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, tuple->string, + __ffs(mach->mach_params.i2s_link_mask)); + + return 0; +} + +static int +parse_dictionary_header(struct snd_soc_component *comp, + struct snd_soc_tplg_vendor_array *tuples, + void **dict, u32 *num_entries, size_t entry_size, + u32 num_entries_token) +{ + struct snd_soc_tplg_vendor_value_elem *tuple; + + /* Dictionary header consists of single tuple - entry count. */ + tuple = tuples->value; + if (le32_to_cpu(tuple->token) != num_entries_token) { + dev_err(comp->dev, "invalid dictionary header, expected: %d\n", + num_entries_token); + return -EINVAL; + } + + *num_entries = le32_to_cpu(tuple->value); + *dict = devm_kcalloc(comp->card->dev, *num_entries, entry_size, GFP_KERNEL); + if (!*dict) + return -ENOMEM; + + return 0; +} + +static int +parse_dictionary_entries(struct snd_soc_component *comp, + struct snd_soc_tplg_vendor_array *tuples, u32 block_size, + void *dict, u32 num_entries, size_t entry_size, + u32 entry_id_token, + const struct avs_tplg_token_parser *parsers, size_t num_parsers) +{ + void *pos = dict; + int i; + + for (i = 0; i < num_entries; i++) { + u32 esize; + int ret; + + ret = avs_tplg_vendor_entry_size(tuples, block_size, + entry_id_token, &esize); + if (ret) + return ret; + + ret = avs_parse_tokens(comp, pos, parsers, num_parsers, tuples, esize); + if (ret < 0) { + dev_err(comp->dev, "parse entry: %d of type: %d failed: %d\n", + i, entry_id_token, ret); + return ret; + } + + pos += entry_size; + block_size -= esize; + tuples = avs_tplg_vendor_array_at(tuples, esize); + } + + return 0; +} + +static int parse_dictionary(struct snd_soc_component *comp, + struct snd_soc_tplg_vendor_array *tuples, u32 block_size, + void **dict, u32 *num_entries, size_t entry_size, + u32 num_entries_token, u32 entry_id_token, + const struct avs_tplg_token_parser *parsers, size_t num_parsers) +{ + int ret; + + ret = parse_dictionary_header(comp, tuples, dict, num_entries, + entry_size, num_entries_token); + if (ret) + return ret; + + block_size -= le32_to_cpu(tuples->size); + /* With header parsed, move on to parsing entries. */ + tuples = avs_tplg_vendor_array_next(tuples); + + return parse_dictionary_entries(comp, tuples, block_size, *dict, + *num_entries, entry_size, + entry_id_token, parsers, num_parsers); +} + +static const struct avs_tplg_token_parser library_parsers[] = { + { + .token = AVS_TKN_LIBRARY_NAME_STRING, + .type = SND_SOC_TPLG_TUPLE_TYPE_STRING, + .offset = offsetof(struct avs_tplg_library, name), + .parse = avs_parse_string_token, + }, +}; + +static int avs_tplg_parse_libraries(struct snd_soc_component *comp, + struct snd_soc_tplg_vendor_array *tuples, u32 block_size) +{ + struct avs_soc_component *acomp = to_avs_soc_component(comp); + struct avs_tplg *tplg = acomp->tplg; + + return parse_dictionary(comp, tuples, block_size, (void **)&tplg->libs, + &tplg->num_libs, sizeof(*tplg->libs), + AVS_TKN_MANIFEST_NUM_LIBRARIES_U32, + AVS_TKN_LIBRARY_ID_U32, + library_parsers, ARRAY_SIZE(library_parsers)); +} + +static const struct avs_tplg_token_parser audio_format_parsers[] = { + { + .token = AVS_TKN_AFMT_SAMPLE_RATE_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_audio_format, sampling_freq), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_AFMT_BIT_DEPTH_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_audio_format, bit_depth), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_AFMT_CHANNEL_MAP_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_audio_format, channel_map), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_AFMT_CHANNEL_CFG_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_audio_format, channel_config), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_AFMT_INTERLEAVING_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_audio_format, interleaving), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_AFMT_NUM_CHANNELS_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = AVS_TKN_AFMT_NUM_CHANNELS_U32, + .parse = parse_audio_format_bitfield, + }, + { + .token = AVS_TKN_AFMT_VALID_BIT_DEPTH_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = AVS_TKN_AFMT_VALID_BIT_DEPTH_U32, + .parse = parse_audio_format_bitfield, + }, + { + .token = AVS_TKN_AFMT_SAMPLE_TYPE_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = AVS_TKN_AFMT_SAMPLE_TYPE_U32, + .parse = parse_audio_format_bitfield, + }, +}; + +static int avs_tplg_parse_audio_formats(struct snd_soc_component *comp, + struct snd_soc_tplg_vendor_array *tuples, + u32 block_size) +{ + struct avs_soc_component *acomp = to_avs_soc_component(comp); + struct avs_tplg *tplg = acomp->tplg; + + return parse_dictionary(comp, tuples, block_size, (void **)&tplg->fmts, + &tplg->num_fmts, sizeof(*tplg->fmts), + AVS_TKN_MANIFEST_NUM_AFMTS_U32, + AVS_TKN_AFMT_ID_U32, + audio_format_parsers, ARRAY_SIZE(audio_format_parsers)); +} + +static const struct avs_tplg_token_parser modcfg_base_parsers[] = { + { + .token = AVS_TKN_MODCFG_BASE_CPC_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_base, cpc), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_BASE_IBS_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_base, ibs), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_BASE_OBS_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_base, obs), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_BASE_PAGES_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_base, is_pages), + .parse = avs_parse_word_token, + }, +}; + +static int avs_tplg_parse_modcfgs_base(struct snd_soc_component *comp, + struct snd_soc_tplg_vendor_array *tuples, + u32 block_size) +{ + struct avs_soc_component *acomp = to_avs_soc_component(comp); + struct avs_tplg *tplg = acomp->tplg; + + return parse_dictionary(comp, tuples, block_size, (void **)&tplg->modcfgs_base, + &tplg->num_modcfgs_base, sizeof(*tplg->modcfgs_base), + AVS_TKN_MANIFEST_NUM_MODCFGS_BASE_U32, + AVS_TKN_MODCFG_BASE_ID_U32, + modcfg_base_parsers, ARRAY_SIZE(modcfg_base_parsers)); +} + +static const struct avs_tplg_token_parser modcfg_ext_parsers[] = { + { + .token = AVS_TKN_MODCFG_EXT_TYPE_UUID, + .type = SND_SOC_TPLG_TUPLE_TYPE_UUID, + .offset = offsetof(struct avs_tplg_modcfg_ext, type), + .parse = avs_parse_uuid_token, + }, + { + .token = AVS_TKN_MODCFG_CPR_OUT_AFMT_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, copier.out_fmt), + .parse = avs_parse_audio_format_ptr, + }, + { + .token = AVS_TKN_MODCFG_CPR_FEATURE_MASK_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, copier.feature_mask), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_CPR_VINDEX_U8, + .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, + .offset = offsetof(struct avs_tplg_modcfg_ext, copier.vindex), + .parse = avs_parse_byte_token, + }, + { + .token = AVS_TKN_MODCFG_CPR_DMA_TYPE_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, copier.dma_type), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_CPR_DMABUFF_SIZE_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, copier.dma_buffer_size), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_CPR_BLOB_FMT_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, copier.blob_fmt), + .parse = avs_parse_audio_format_ptr, + }, + { + .token = AVS_TKN_MODCFG_MICSEL_OUT_AFMT_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, micsel.out_fmt), + .parse = avs_parse_audio_format_ptr, + }, + { + .token = AVS_TKN_MODCFG_INTELWOV_CPC_LP_MODE_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, wov.cpc_lp_mode), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_SRC_OUT_FREQ_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, src.out_freq), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_MUX_REF_AFMT_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, mux.ref_fmt), + .parse = avs_parse_audio_format_ptr, + }, + { + .token = AVS_TKN_MODCFG_MUX_OUT_AFMT_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, mux.out_fmt), + .parse = avs_parse_audio_format_ptr, + }, + { + .token = AVS_TKN_MODCFG_AEC_REF_AFMT_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, aec.ref_fmt), + .parse = avs_parse_audio_format_ptr, + }, + { + .token = AVS_TKN_MODCFG_AEC_OUT_AFMT_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, aec.out_fmt), + .parse = avs_parse_audio_format_ptr, + }, + { + .token = AVS_TKN_MODCFG_AEC_CPC_LP_MODE_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, aec.cpc_lp_mode), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_ASRC_OUT_FREQ_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, asrc.out_freq), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_ASRC_MODE_U8, + .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, + .offset = offsetof(struct avs_tplg_modcfg_ext, asrc.mode), + .parse = avs_parse_byte_token, + }, + { + .token = AVS_TKN_MODCFG_ASRC_DISABLE_JITTER_U8, + .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, + .offset = offsetof(struct avs_tplg_modcfg_ext, asrc.disable_jitter_buffer), + .parse = avs_parse_byte_token, + }, + { + .token = AVS_TKN_MODCFG_UPDOWN_MIX_OUT_CHAN_CFG_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.out_channel_config), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_SELECT_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients_select), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_0_S32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[0]), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_1_S32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[1]), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_2_S32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[2]), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_3_S32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[3]), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_4_S32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[4]), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_5_S32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[5]), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_6_S32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[6]), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_7_S32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[7]), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_UPDOWN_MIX_CHAN_MAP_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.channel_map), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MODCFG_EXT_NUM_INPUT_PINS_U16, + .type = SND_SOC_TPLG_TUPLE_TYPE_SHORT, + .offset = offsetof(struct avs_tplg_modcfg_ext, generic.num_input_pins), + .parse = avs_parse_short_token, + }, + { + .token = AVS_TKN_MODCFG_EXT_NUM_OUTPUT_PINS_U16, + .type = SND_SOC_TPLG_TUPLE_TYPE_SHORT, + .offset = offsetof(struct avs_tplg_modcfg_ext, generic.num_output_pins), + .parse = avs_parse_short_token, + }, +}; + +static const struct avs_tplg_token_parser pin_format_parsers[] = { + { + .token = AVS_TKN_PIN_FMT_INDEX_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_pin_format, pin_index), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_PIN_FMT_IOBS_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_pin_format, iobs), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_PIN_FMT_AFMT_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_pin_format, fmt), + .parse = avs_parse_audio_format_ptr, + }, +}; + +static void +assign_copier_gtw_instance(struct snd_soc_component *comp, struct avs_tplg_modcfg_ext *cfg) +{ + struct snd_soc_acpi_mach *mach; + + if (!guid_equal(&cfg->type, &AVS_COPIER_MOD_UUID)) + return; + + /* Only I2S boards assign port instance in ->i2s_link_mask. */ + switch (cfg->copier.dma_type) { + case AVS_DMA_I2S_LINK_OUTPUT: + case AVS_DMA_I2S_LINK_INPUT: + break; + default: + return; + } + + mach = dev_get_platdata(comp->card->dev); + + /* Automatic assignment only when board describes single SSP. */ + if (hweight_long(mach->mach_params.i2s_link_mask) == 1 && !cfg->copier.vindex.i2s.instance) + cfg->copier.vindex.i2s.instance = __ffs(mach->mach_params.i2s_link_mask); +} + +static int avs_tplg_parse_modcfg_ext(struct snd_soc_component *comp, + struct avs_tplg_modcfg_ext *cfg, + struct snd_soc_tplg_vendor_array *tuples, + u32 block_size) +{ + u32 esize; + int ret; + + /* See where pin block starts. */ + ret = avs_tplg_vendor_entry_size(tuples, block_size, + AVS_TKN_PIN_FMT_INDEX_U32, &esize); + if (ret) + return ret; + + ret = avs_parse_tokens(comp, cfg, modcfg_ext_parsers, + ARRAY_SIZE(modcfg_ext_parsers), tuples, esize); + if (ret) + return ret; + + /* Update copier gateway based on board's i2s_link_mask. */ + assign_copier_gtw_instance(comp, cfg); + + block_size -= esize; + /* Parse trailing in/out pin formats if any. */ + if (block_size) { + struct avs_tplg_pin_format *pins; + u32 num_pins; + + num_pins = cfg->generic.num_input_pins + cfg->generic.num_output_pins; + if (!num_pins) + return -EINVAL; + + pins = devm_kcalloc(comp->card->dev, num_pins, sizeof(*pins), GFP_KERNEL); + if (!pins) + return -ENOMEM; + + tuples = avs_tplg_vendor_array_at(tuples, esize); + ret = parse_dictionary_entries(comp, tuples, block_size, + pins, num_pins, sizeof(*pins), + AVS_TKN_PIN_FMT_INDEX_U32, + pin_format_parsers, + ARRAY_SIZE(pin_format_parsers)); + if (ret) + return ret; + cfg->generic.pin_fmts = pins; + } + + return 0; +} + +static int avs_tplg_parse_modcfgs_ext(struct snd_soc_component *comp, + struct snd_soc_tplg_vendor_array *tuples, + u32 block_size) +{ + struct avs_soc_component *acomp = to_avs_soc_component(comp); + struct avs_tplg *tplg = acomp->tplg; + int ret, i; + + ret = parse_dictionary_header(comp, tuples, (void **)&tplg->modcfgs_ext, + &tplg->num_modcfgs_ext, + sizeof(*tplg->modcfgs_ext), + AVS_TKN_MANIFEST_NUM_MODCFGS_EXT_U32); + if (ret) + return ret; + + block_size -= le32_to_cpu(tuples->size); + /* With header parsed, move on to parsing entries. */ + tuples = avs_tplg_vendor_array_next(tuples); + + for (i = 0; i < tplg->num_modcfgs_ext; i++) { + struct avs_tplg_modcfg_ext *cfg = &tplg->modcfgs_ext[i]; + u32 esize; + + ret = avs_tplg_vendor_entry_size(tuples, block_size, + AVS_TKN_MODCFG_EXT_ID_U32, &esize); + if (ret) + return ret; + + ret = avs_tplg_parse_modcfg_ext(comp, cfg, tuples, esize); + if (ret) + return ret; + + block_size -= esize; + tuples = avs_tplg_vendor_array_at(tuples, esize); + } + + return 0; +} + +static const struct avs_tplg_token_parser pplcfg_parsers[] = { + { + .token = AVS_TKN_PPLCFG_REQ_SIZE_U16, + .type = SND_SOC_TPLG_TUPLE_TYPE_SHORT, + .offset = offsetof(struct avs_tplg_pplcfg, req_size), + .parse = avs_parse_short_token, + }, + { + .token = AVS_TKN_PPLCFG_PRIORITY_U8, + .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, + .offset = offsetof(struct avs_tplg_pplcfg, priority), + .parse = avs_parse_byte_token, + }, + { + .token = AVS_TKN_PPLCFG_LOW_POWER_BOOL, + .type = SND_SOC_TPLG_TUPLE_TYPE_BOOL, + .offset = offsetof(struct avs_tplg_pplcfg, lp), + .parse = avs_parse_bool_token, + }, + { + .token = AVS_TKN_PPLCFG_ATTRIBUTES_U16, + .type = SND_SOC_TPLG_TUPLE_TYPE_SHORT, + .offset = offsetof(struct avs_tplg_pplcfg, attributes), + .parse = avs_parse_short_token, + }, + { + .token = AVS_TKN_PPLCFG_TRIGGER_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_pplcfg, trigger), + .parse = avs_parse_word_token, + }, +}; + +static int avs_tplg_parse_pplcfgs(struct snd_soc_component *comp, + struct snd_soc_tplg_vendor_array *tuples, + u32 block_size) +{ + struct avs_soc_component *acomp = to_avs_soc_component(comp); + struct avs_tplg *tplg = acomp->tplg; + + return parse_dictionary(comp, tuples, block_size, (void **)&tplg->pplcfgs, + &tplg->num_pplcfgs, sizeof(*tplg->pplcfgs), + AVS_TKN_MANIFEST_NUM_PPLCFGS_U32, + AVS_TKN_PPLCFG_ID_U32, + pplcfg_parsers, ARRAY_SIZE(pplcfg_parsers)); +} + +static const struct avs_tplg_token_parser binding_parsers[] = { + { + .token = AVS_TKN_BINDING_TARGET_TPLG_NAME_STRING, + .type = SND_SOC_TPLG_TUPLE_TYPE_STRING, + .offset = offsetof(struct avs_tplg_binding, target_tplg_name), + .parse = parse_link_formatted_string, + }, + { + .token = AVS_TKN_BINDING_TARGET_PATH_TMPL_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_binding, target_path_tmpl_id), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_BINDING_TARGET_PPL_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_binding, target_ppl_id), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_BINDING_TARGET_MOD_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_binding, target_mod_id), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_BINDING_TARGET_MOD_PIN_U8, + .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, + .offset = offsetof(struct avs_tplg_binding, target_mod_pin), + .parse = avs_parse_byte_token, + }, + { + .token = AVS_TKN_BINDING_MOD_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_binding, mod_id), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_BINDING_MOD_PIN_U8, + .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, + .offset = offsetof(struct avs_tplg_binding, mod_pin), + .parse = avs_parse_byte_token, + }, + { + .token = AVS_TKN_BINDING_IS_SINK_U8, + .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, + .offset = offsetof(struct avs_tplg_binding, is_sink), + .parse = avs_parse_byte_token, + }, +}; + +static int avs_tplg_parse_bindings(struct snd_soc_component *comp, + struct snd_soc_tplg_vendor_array *tuples, + u32 block_size) +{ + struct avs_soc_component *acomp = to_avs_soc_component(comp); + struct avs_tplg *tplg = acomp->tplg; + + return parse_dictionary(comp, tuples, block_size, (void **)&tplg->bindings, + &tplg->num_bindings, sizeof(*tplg->bindings), + AVS_TKN_MANIFEST_NUM_BINDINGS_U32, + AVS_TKN_BINDING_ID_U32, + binding_parsers, ARRAY_SIZE(binding_parsers)); +} + +static const struct avs_tplg_token_parser module_parsers[] = { + { + .token = AVS_TKN_MOD_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_module, id), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_MOD_MODCFG_BASE_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_module, cfg_base), + .parse = avs_parse_modcfg_base_ptr, + }, + { + .token = AVS_TKN_MOD_IN_AFMT_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_module, in_fmt), + .parse = avs_parse_audio_format_ptr, + }, + { + .token = AVS_TKN_MOD_CORE_ID_U8, + .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, + .offset = offsetof(struct avs_tplg_module, core_id), + .parse = avs_parse_byte_token, + }, + { + .token = AVS_TKN_MOD_PROC_DOMAIN_U8, + .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, + .offset = offsetof(struct avs_tplg_module, domain), + .parse = avs_parse_byte_token, + }, + { + .token = AVS_TKN_MOD_MODCFG_EXT_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_module, cfg_ext), + .parse = avs_parse_modcfg_ext_ptr, + }, +}; + +static struct avs_tplg_module * +avs_tplg_module_create(struct snd_soc_component *comp, struct avs_tplg_pipeline *owner, + struct snd_soc_tplg_vendor_array *tuples, u32 block_size) +{ + struct avs_tplg_module *module; + int ret; + + module = devm_kzalloc(comp->card->dev, sizeof(*module), GFP_KERNEL); + if (!module) + return ERR_PTR(-ENOMEM); + + ret = avs_parse_tokens(comp, module, module_parsers, + ARRAY_SIZE(module_parsers), tuples, block_size); + if (ret < 0) + return ERR_PTR(ret); + + module->owner = owner; + INIT_LIST_HEAD(&module->node); + + return module; +} + +static const struct avs_tplg_token_parser pipeline_parsers[] = { + { + .token = AVS_TKN_PPL_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_pipeline, id), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_PPL_PPLCFG_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_pipeline, cfg), + .parse = avs_parse_pplcfg_ptr, + }, + { + .token = AVS_TKN_PPL_NUM_BINDING_IDS_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_pipeline, num_bindings), + .parse = avs_parse_word_token, + }, +}; + +static const struct avs_tplg_token_parser bindings_parsers[] = { + { + .token = AVS_TKN_PPL_BINDING_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = 0, /* to treat pipeline->bindings as dictionary */ + .parse = avs_parse_binding_ptr, + }, +}; + +static struct avs_tplg_pipeline * +avs_tplg_pipeline_create(struct snd_soc_component *comp, struct avs_tplg_path *owner, + struct snd_soc_tplg_vendor_array *tuples, u32 block_size) +{ + struct avs_tplg_pipeline *pipeline; + u32 modblk_size, offset; + int ret; + + pipeline = devm_kzalloc(comp->card->dev, sizeof(*pipeline), GFP_KERNEL); + if (!pipeline) + return ERR_PTR(-ENOMEM); + + pipeline->owner = owner; + INIT_LIST_HEAD(&pipeline->mod_list); + + /* Pipeline header MUST be followed by at least one module. */ + ret = avs_tplg_vendor_array_lookup(tuples, block_size, + AVS_TKN_MOD_ID_U32, &offset); + if (!ret && !offset) + ret = -EINVAL; + if (ret) + return ERR_PTR(ret); + + /* Process header which precedes module sections. */ + ret = avs_parse_tokens(comp, pipeline, pipeline_parsers, + ARRAY_SIZE(pipeline_parsers), tuples, offset); + if (ret < 0) + return ERR_PTR(ret); + + block_size -= offset; + tuples = avs_tplg_vendor_array_at(tuples, offset); + + /* Optionally, binding sections follow module ones. */ + ret = avs_tplg_vendor_array_lookup_next(tuples, block_size, + AVS_TKN_PPL_BINDING_ID_U32, &offset); + if (ret) { + if (ret != -ENOENT) + return ERR_PTR(ret); + + /* Does header information match actual block layout? */ + if (pipeline->num_bindings) + return ERR_PTR(-EINVAL); + + modblk_size = block_size; + } else { + pipeline->bindings = devm_kcalloc(comp->card->dev, pipeline->num_bindings, + sizeof(*pipeline->bindings), GFP_KERNEL); + if (!pipeline->bindings) + return ERR_PTR(-ENOMEM); + + modblk_size = offset; + } + + block_size -= modblk_size; + do { + struct avs_tplg_module *module; + u32 esize; + + ret = avs_tplg_vendor_entry_size(tuples, modblk_size, + AVS_TKN_MOD_ID_U32, &esize); + if (ret) + return ERR_PTR(ret); + + module = avs_tplg_module_create(comp, pipeline, tuples, esize); + if (IS_ERR(module)) { + dev_err(comp->dev, "parse module failed: %ld\n", + PTR_ERR(module)); + return ERR_CAST(module); + } + + list_add_tail(&module->node, &pipeline->mod_list); + modblk_size -= esize; + tuples = avs_tplg_vendor_array_at(tuples, esize); + } while (modblk_size > 0); + + /* What's left is optional range of bindings. */ + ret = parse_dictionary_entries(comp, tuples, block_size, pipeline->bindings, + pipeline->num_bindings, sizeof(*pipeline->bindings), + AVS_TKN_PPL_BINDING_ID_U32, + bindings_parsers, ARRAY_SIZE(bindings_parsers)); + if (ret) + return ERR_PTR(ret); + + return pipeline; +} + +static const struct avs_tplg_token_parser path_parsers[] = { + { + .token = AVS_TKN_PATH_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_path, id), + .parse = avs_parse_word_token, + }, + { + .token = AVS_TKN_PATH_FE_FMT_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_path, fe_fmt), + .parse = avs_parse_audio_format_ptr, + }, + { + .token = AVS_TKN_PATH_BE_FMT_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_path, be_fmt), + .parse = avs_parse_audio_format_ptr, + }, +}; + +static struct avs_tplg_path * +avs_tplg_path_create(struct snd_soc_component *comp, struct avs_tplg_path_template *owner, + struct snd_soc_tplg_vendor_array *tuples, u32 block_size, + const struct avs_tplg_token_parser *parsers, u32 num_parsers) +{ + struct avs_tplg_pipeline *pipeline; + struct avs_tplg_path *path; + u32 offset; + int ret; + + path = devm_kzalloc(comp->card->dev, sizeof(*path), GFP_KERNEL); + if (!path) + return ERR_PTR(-ENOMEM); + + path->owner = owner; + INIT_LIST_HEAD(&path->ppl_list); + INIT_LIST_HEAD(&path->node); + + /* Path header MAY be followed by one or more pipelines. */ + ret = avs_tplg_vendor_array_lookup(tuples, block_size, + AVS_TKN_PPL_ID_U32, &offset); + if (ret == -ENOENT) + offset = block_size; + else if (ret) + return ERR_PTR(ret); + else if (!offset) + return ERR_PTR(-EINVAL); + + /* Process header which precedes pipeline sections. */ + ret = avs_parse_tokens(comp, path, parsers, num_parsers, tuples, offset); + if (ret < 0) + return ERR_PTR(ret); + + block_size -= offset; + tuples = avs_tplg_vendor_array_at(tuples, offset); + while (block_size > 0) { + u32 esize; + + ret = avs_tplg_vendor_entry_size(tuples, block_size, + AVS_TKN_PPL_ID_U32, &esize); + if (ret) + return ERR_PTR(ret); + + pipeline = avs_tplg_pipeline_create(comp, path, tuples, esize); + if (IS_ERR(pipeline)) { + dev_err(comp->dev, "parse pipeline failed: %ld\n", + PTR_ERR(pipeline)); + return ERR_CAST(pipeline); + } + + list_add_tail(&pipeline->node, &path->ppl_list); + block_size -= esize; + tuples = avs_tplg_vendor_array_at(tuples, esize); + } + + return path; +} + +static const struct avs_tplg_token_parser path_tmpl_parsers[] = { + { + .token = AVS_TKN_PATH_TMPL_ID_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg_path_template, id), + .parse = avs_parse_word_token, + }, +}; + +static int parse_path_template(struct snd_soc_component *comp, + struct snd_soc_tplg_vendor_array *tuples, u32 block_size, + struct avs_tplg_path_template *template, + const struct avs_tplg_token_parser *tmpl_tokens, u32 num_tmpl_tokens, + const struct avs_tplg_token_parser *path_tokens, u32 num_path_tokens) +{ + struct avs_tplg_path *path; + u32 offset; + int ret; + + /* Path template header MUST be followed by at least one path variant. */ + ret = avs_tplg_vendor_array_lookup(tuples, block_size, + AVS_TKN_PATH_ID_U32, &offset); + if (ret) + return ret; + + /* Process header which precedes path variants sections. */ + ret = avs_parse_tokens(comp, template, tmpl_tokens, num_tmpl_tokens, tuples, offset); + if (ret < 0) + return ret; + + block_size -= offset; + tuples = avs_tplg_vendor_array_at(tuples, offset); + do { + u32 esize; + + ret = avs_tplg_vendor_entry_size(tuples, block_size, + AVS_TKN_PATH_ID_U32, &esize); + if (ret) + return ret; + + path = avs_tplg_path_create(comp, template, tuples, esize, path_tokens, + num_path_tokens); + if (IS_ERR(path)) { + dev_err(comp->dev, "parse path failed: %ld\n", PTR_ERR(path)); + return PTR_ERR(path); + } + + list_add_tail(&path->node, &template->path_list); + block_size -= esize; + tuples = avs_tplg_vendor_array_at(tuples, esize); + } while (block_size > 0); + + return 0; +} + +static struct avs_tplg_path_template * +avs_tplg_path_template_create(struct snd_soc_component *comp, struct avs_tplg *owner, + struct snd_soc_tplg_vendor_array *tuples, u32 block_size) +{ + struct avs_tplg_path_template *template; + int ret; + + template = devm_kzalloc(comp->card->dev, sizeof(*template), GFP_KERNEL); + if (!template) + return ERR_PTR(-ENOMEM); + + template->owner = owner; /* Used to access component tplg is assigned to. */ + INIT_LIST_HEAD(&template->path_list); + INIT_LIST_HEAD(&template->node); + + ret = parse_path_template(comp, tuples, block_size, template, path_tmpl_parsers, + ARRAY_SIZE(path_tmpl_parsers), path_parsers, + ARRAY_SIZE(path_parsers)); + if (ret) + return ERR_PTR(ret); + + return template; +} + +static int avs_route_load(struct snd_soc_component *comp, int index, + struct snd_soc_dapm_route *route) +{ + struct snd_soc_acpi_mach *mach = dev_get_platdata(comp->card->dev); + size_t len = SNDRV_CTL_ELEM_ID_NAME_MAXLEN; + char buf[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; + u32 port; + + /* See parse_link_formatted_string() for dynamic naming when(s). */ + if (hweight_long(mach->mach_params.i2s_link_mask) == 1) { + port = __ffs(mach->mach_params.i2s_link_mask); + + snprintf(buf, len, route->source, port); + strncpy((char *)route->source, buf, len); + snprintf(buf, len, route->sink, port); + strncpy((char *)route->sink, buf, len); + if (route->control) { + snprintf(buf, len, route->control, port); + strncpy((char *)route->control, buf, len); + } + } + + return 0; +} + +static int avs_widget_load(struct snd_soc_component *comp, int index, + struct snd_soc_dapm_widget *w, + struct snd_soc_tplg_dapm_widget *dw) +{ + struct snd_soc_acpi_mach *mach; + struct avs_tplg_path_template *template; + struct avs_soc_component *acomp = to_avs_soc_component(comp); + struct avs_tplg *tplg; + + if (!le32_to_cpu(dw->priv.size)) + return 0; + + tplg = acomp->tplg; + mach = dev_get_platdata(comp->card->dev); + + /* See parse_link_formatted_string() for dynamic naming when(s). */ + if (hweight_long(mach->mach_params.i2s_link_mask) == 1) { + kfree(w->name); + /* w->name is freed later by soc_tplg_dapm_widget_create() */ + w->name = kasprintf(GFP_KERNEL, dw->name, __ffs(mach->mach_params.i2s_link_mask)); + if (!w->name) + return -ENOMEM; + } + + template = avs_tplg_path_template_create(comp, tplg, dw->priv.array, + le32_to_cpu(dw->priv.size)); + if (IS_ERR(template)) { + dev_err(comp->dev, "widget %s load failed: %ld\n", dw->name, + PTR_ERR(template)); + return PTR_ERR(template); + } + + w->priv = template; /* link path information to widget */ + list_add_tail(&template->node, &tplg->path_tmpl_list); + return 0; +} + +static int avs_dai_load(struct snd_soc_component *comp, int index, + struct snd_soc_dai_driver *dai_drv, struct snd_soc_tplg_pcm *pcm, + struct snd_soc_dai *dai) +{ + if (pcm) + dai_drv->ops = &avs_dai_fe_ops; + return 0; +} + +static int avs_link_load(struct snd_soc_component *comp, int index, struct snd_soc_dai_link *link, + struct snd_soc_tplg_link_config *cfg) +{ + if (!link->no_pcm) { + /* Stream control handled by IPCs. */ + link->nonatomic = true; + + /* Open LINK (BE) pipes last and close them first to prevent xruns. */ + link->trigger[0] = SND_SOC_DPCM_TRIGGER_PRE; + link->trigger[1] = SND_SOC_DPCM_TRIGGER_PRE; + } + + return 0; +} + +static const struct avs_tplg_token_parser manifest_parsers[] = { + { + .token = AVS_TKN_MANIFEST_NAME_STRING, + .type = SND_SOC_TPLG_TUPLE_TYPE_STRING, + .offset = offsetof(struct avs_tplg, name), + .parse = parse_link_formatted_string, + }, + { + .token = AVS_TKN_MANIFEST_VERSION_U32, + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, + .offset = offsetof(struct avs_tplg, version), + .parse = avs_parse_word_token, + }, +}; + +static int avs_manifest(struct snd_soc_component *comp, int index, + struct snd_soc_tplg_manifest *manifest) +{ + struct snd_soc_tplg_vendor_array *tuples = manifest->priv.array; + struct avs_soc_component *acomp = to_avs_soc_component(comp); + size_t remaining = le32_to_cpu(manifest->priv.size); + u32 offset; + int ret; + + ret = avs_tplg_vendor_array_lookup(tuples, remaining, + AVS_TKN_MANIFEST_NUM_LIBRARIES_U32, &offset); + /* Manifest MUST begin with a header. */ + if (!ret && !offset) + ret = -EINVAL; + if (ret) { + dev_err(comp->dev, "incorrect manifest format: %d\n", ret); + return ret; + } + + /* Process header which precedes any of the dictionaries. */ + ret = avs_parse_tokens(comp, acomp->tplg, manifest_parsers, + ARRAY_SIZE(manifest_parsers), tuples, offset); + if (ret < 0) + return ret; + + remaining -= offset; + tuples = avs_tplg_vendor_array_at(tuples, offset); + + ret = avs_tplg_vendor_array_lookup(tuples, remaining, + AVS_TKN_MANIFEST_NUM_AFMTS_U32, &offset); + if (ret) { + dev_err(comp->dev, "audio formats lookup failed: %d\n", ret); + return ret; + } + + /* Libraries dictionary. */ + ret = avs_tplg_parse_libraries(comp, tuples, offset); + if (ret < 0) + return ret; + + remaining -= offset; + tuples = avs_tplg_vendor_array_at(tuples, offset); + + ret = avs_tplg_vendor_array_lookup(tuples, remaining, + AVS_TKN_MANIFEST_NUM_MODCFGS_BASE_U32, &offset); + if (ret) { + dev_err(comp->dev, "modcfgs_base lookup failed: %d\n", ret); + return ret; + } + + /* Audio formats dictionary. */ + ret = avs_tplg_parse_audio_formats(comp, tuples, offset); + if (ret < 0) + return ret; + + remaining -= offset; + tuples = avs_tplg_vendor_array_at(tuples, offset); + + ret = avs_tplg_vendor_array_lookup(tuples, remaining, + AVS_TKN_MANIFEST_NUM_MODCFGS_EXT_U32, &offset); + if (ret) { + dev_err(comp->dev, "modcfgs_ext lookup failed: %d\n", ret); + return ret; + } + + /* Module configs-base dictionary. */ + ret = avs_tplg_parse_modcfgs_base(comp, tuples, offset); + if (ret < 0) + return ret; + + remaining -= offset; + tuples = avs_tplg_vendor_array_at(tuples, offset); + + ret = avs_tplg_vendor_array_lookup(tuples, remaining, + AVS_TKN_MANIFEST_NUM_PPLCFGS_U32, &offset); + if (ret) { + dev_err(comp->dev, "pplcfgs lookup failed: %d\n", ret); + return ret; + } + + /* Module configs-ext dictionary. */ + ret = avs_tplg_parse_modcfgs_ext(comp, tuples, offset); + if (ret < 0) + return ret; + + remaining -= offset; + tuples = avs_tplg_vendor_array_at(tuples, offset); + + ret = avs_tplg_vendor_array_lookup(tuples, remaining, + AVS_TKN_MANIFEST_NUM_BINDINGS_U32, &offset); + if (ret) { + dev_err(comp->dev, "bindings lookup failed: %d\n", ret); + return ret; + } + + /* Pipeline configs dictionary. */ + ret = avs_tplg_parse_pplcfgs(comp, tuples, offset); + if (ret < 0) + return ret; + + remaining -= offset; + tuples = avs_tplg_vendor_array_at(tuples, offset); + + /* Bindings dictionary. */ + return avs_tplg_parse_bindings(comp, tuples, remaining); +} + +static struct snd_soc_tplg_ops avs_tplg_ops = { + .dapm_route_load = avs_route_load, + .widget_load = avs_widget_load, + .dai_load = avs_dai_load, + .link_load = avs_link_load, + .manifest = avs_manifest, +}; + +struct avs_tplg *avs_tplg_new(struct snd_soc_component *comp) +{ + struct avs_tplg *tplg; + + tplg = devm_kzalloc(comp->card->dev, sizeof(*tplg), GFP_KERNEL); + if (!tplg) + return NULL; + + tplg->comp = comp; + INIT_LIST_HEAD(&tplg->path_tmpl_list); + + return tplg; +} + +int avs_load_topology(struct snd_soc_component *comp, const char *filename) +{ + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, filename, comp->dev); + if (ret < 0) { + dev_err(comp->dev, "request topology \"%s\" failed: %d\n", filename, ret); + return ret; + } + + ret = snd_soc_tplg_component_load(comp, &avs_tplg_ops, fw); + if (ret < 0) + dev_err(comp->dev, "load topology \"%s\" failed: %d\n", filename, ret); + + release_firmware(fw); + return ret; +} + +int avs_remove_topology(struct snd_soc_component *comp) +{ + snd_soc_tplg_component_remove(comp); + + return 0; +} diff --git a/sound/soc/intel/avs/topology.h b/sound/soc/intel/avs/topology.h new file mode 100644 index 000000000..68e5f6312 --- /dev/null +++ b/sound/soc/intel/avs/topology.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2021 Intel Corporation. All rights reserved. + * + * Authors: Cezary Rojewski + * Amadeusz Slawinski + */ + +#ifndef __SOUND_SOC_INTEL_AVS_TPLG_H +#define __SOUND_SOC_INTEL_AVS_TPLG_H + +#include +#include "messages.h" + +#define INVALID_OBJECT_ID UINT_MAX + +struct snd_soc_component; + +struct avs_tplg { + char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; + u32 version; + struct snd_soc_component *comp; + + struct avs_tplg_library *libs; + u32 num_libs; + struct avs_audio_format *fmts; + u32 num_fmts; + struct avs_tplg_modcfg_base *modcfgs_base; + u32 num_modcfgs_base; + struct avs_tplg_modcfg_ext *modcfgs_ext; + u32 num_modcfgs_ext; + struct avs_tplg_pplcfg *pplcfgs; + u32 num_pplcfgs; + struct avs_tplg_binding *bindings; + u32 num_bindings; + + struct list_head path_tmpl_list; +}; + +struct avs_tplg_library { + char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; +}; + +/* Matches header of struct avs_mod_cfg_base. */ +struct avs_tplg_modcfg_base { + u32 cpc; + u32 ibs; + u32 obs; + u32 is_pages; +}; + +struct avs_tplg_pin_format { + u32 pin_index; + u32 iobs; + struct avs_audio_format *fmt; +}; + +struct avs_tplg_modcfg_ext { + guid_t type; + + union { + struct { + u16 num_input_pins; + u16 num_output_pins; + struct avs_tplg_pin_format *pin_fmts; + } generic; + struct { + struct avs_audio_format *out_fmt; + struct avs_audio_format *blob_fmt; /* optional override */ + u32 feature_mask; + union avs_virtual_index vindex; + u32 dma_type; + u32 dma_buffer_size; + u32 config_length; + /* config_data part of priv data */ + } copier; + struct { + u32 out_channel_config; + u32 coefficients_select; + s32 coefficients[AVS_CHANNELS_MAX]; + u32 channel_map; + } updown_mix; + struct { + u32 out_freq; + } src; + struct { + u32 out_freq; + u8 mode; + u8 disable_jitter_buffer; + } asrc; + struct { + u32 cpc_lp_mode; + } wov; + struct { + struct avs_audio_format *ref_fmt; + struct avs_audio_format *out_fmt; + u32 cpc_lp_mode; + } aec; + struct { + struct avs_audio_format *ref_fmt; + struct avs_audio_format *out_fmt; + } mux; + struct { + struct avs_audio_format *out_fmt; + } micsel; + }; +}; + +/* Specifies path behaviour during PCM ->trigger(START) command. */ +enum avs_tplg_trigger { + AVS_TPLG_TRIGGER_AUTO = 0, +}; + +struct avs_tplg_pplcfg { + u16 req_size; + u8 priority; + bool lp; + u16 attributes; + enum avs_tplg_trigger trigger; +}; + +struct avs_tplg_binding { + char target_tplg_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; + u32 target_path_tmpl_id; + u32 target_ppl_id; + u32 target_mod_id; + u8 target_mod_pin; + u32 mod_id; + u8 mod_pin; + u8 is_sink; +}; + +struct avs_tplg_path_template_id { + u32 id; + char tplg_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; +}; + +struct avs_tplg_path_template { + u32 id; + + struct list_head path_list; + + struct avs_tplg *owner; + /* Driver path templates management. */ + struct list_head node; +}; + +struct avs_tplg_path { + u32 id; + + /* Path format requirements. */ + struct avs_audio_format *fe_fmt; + struct avs_audio_format *be_fmt; + + struct list_head ppl_list; + + struct avs_tplg_path_template *owner; + /* Path template path-variants management. */ + struct list_head node; +}; + +struct avs_tplg_pipeline { + u32 id; + + struct avs_tplg_pplcfg *cfg; + struct avs_tplg_binding **bindings; + u32 num_bindings; + struct list_head mod_list; + + struct avs_tplg_path *owner; + /* Path pipelines management. */ + struct list_head node; +}; + +struct avs_tplg_module { + u32 id; + + struct avs_tplg_modcfg_base *cfg_base; + struct avs_audio_format *in_fmt; + u8 core_id; + u8 domain; + struct avs_tplg_modcfg_ext *cfg_ext; + + struct avs_tplg_pipeline *owner; + /* Pipeline modules management. */ + struct list_head node; +}; + +struct avs_tplg *avs_tplg_new(struct snd_soc_component *comp); + +int avs_load_topology(struct snd_soc_component *comp, const char *filename); +int avs_remove_topology(struct snd_soc_component *comp); + +#endif diff --git a/sound/soc/intel/avs/trace.c b/sound/soc/intel/avs/trace.c new file mode 100644 index 000000000..fcb7cfc82 --- /dev/null +++ b/sound/soc/intel/avs/trace.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Author: Cezary Rojewski +// Amadeusz Slawinski +// + +#include + +#define CREATE_TRACE_POINTS +#include "trace.h" + +#define BYTES_PER_LINE 16 +#define MAX_CHUNK_SIZE ((PAGE_SIZE - 150) /* Place for trace header */ \ + / (2 * BYTES_PER_LINE + 4) /* chars per line */ \ + * BYTES_PER_LINE) + +void trace_avs_msg_payload(const void *data, size_t size) +{ + size_t remaining = size; + size_t offset = 0; + + while (remaining > 0) { + u32 chunk; + + chunk = min(remaining, (size_t)MAX_CHUNK_SIZE); + trace_avs_ipc_msg_payload(data, chunk, offset, size); + + remaining -= chunk; + offset += chunk; + } +} diff --git a/sound/soc/intel/avs/trace.h b/sound/soc/intel/avs/trace.h new file mode 100644 index 000000000..855b06bb1 --- /dev/null +++ b/sound/soc/intel/avs/trace.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM intel_avs + +#if !defined(_TRACE_INTEL_AVS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_INTEL_AVS_H + +#include +#include + +TRACE_EVENT(avs_dsp_core_op, + + TP_PROTO(unsigned int reg, unsigned int mask, const char *op, bool flag), + + TP_ARGS(reg, mask, op, flag), + + TP_STRUCT__entry( + __field(unsigned int, reg ) + __field(unsigned int, mask ) + __string(op, op ) + __field(bool, flag ) + ), + + TP_fast_assign( + __entry->reg = reg; + __entry->mask = mask; + __assign_str(op, op); + __entry->flag = flag; + ), + + TP_printk("%s: %d, core mask: 0x%X, prev state: 0x%08X", + __get_str(op), __entry->flag, __entry->mask, __entry->reg) +); + +#ifndef __TRACE_INTEL_AVS_TRACE_HELPER +#define __TRACE_INTEL_AVS_TRACE_HELPER + +void trace_avs_msg_payload(const void *data, size_t size); + +#define trace_avs_request(msg, fwregs) \ +({ \ + trace_avs_ipc_request_msg((msg)->header, fwregs); \ + trace_avs_msg_payload((msg)->data, (msg)->size); \ +}) + +#define trace_avs_reply(msg, fwregs) \ +({ \ + trace_avs_ipc_reply_msg((msg)->header, fwregs); \ + trace_avs_msg_payload((msg)->data, (msg)->size); \ +}) + +#define trace_avs_notify(msg, fwregs) \ +({ \ + trace_avs_ipc_notify_msg((msg)->header, fwregs); \ + trace_avs_msg_payload((msg)->data, (msg)->size); \ +}) +#endif + +DECLARE_EVENT_CLASS(avs_ipc_msg_hdr, + + TP_PROTO(u64 header, u64 fwregs), + + TP_ARGS(header, fwregs), + + TP_STRUCT__entry( + __field(u64, header) + __field(u64, fwregs) + ), + + TP_fast_assign( + __entry->header = header; + __entry->fwregs = fwregs; + ), + + TP_printk("primary: 0x%08X, extension: 0x%08X,\n" + "fwstatus: 0x%08X, fwerror: 0x%08X", + lower_32_bits(__entry->header), upper_32_bits(__entry->header), + lower_32_bits(__entry->fwregs), upper_32_bits(__entry->fwregs)) +); + +DEFINE_EVENT(avs_ipc_msg_hdr, avs_ipc_request_msg, + TP_PROTO(u64 header, u64 fwregs), + TP_ARGS(header, fwregs) +); + +DEFINE_EVENT(avs_ipc_msg_hdr, avs_ipc_reply_msg, + TP_PROTO(u64 header, u64 fwregs), + TP_ARGS(header, fwregs) +); + +DEFINE_EVENT(avs_ipc_msg_hdr, avs_ipc_notify_msg, + TP_PROTO(u64 header, u64 fwregs), + TP_ARGS(header, fwregs) +); + +TRACE_EVENT_CONDITION(avs_ipc_msg_payload, + + TP_PROTO(const u8 *data, size_t size, size_t offset, size_t total), + + TP_ARGS(data, size, offset, total), + + TP_CONDITION(data && size), + + TP_STRUCT__entry( + __dynamic_array(u8, buf, size ) + __field(size_t, offset ) + __field(size_t, pos ) + __field(size_t, total ) + ), + + TP_fast_assign( + memcpy(__get_dynamic_array(buf), data + offset, size); + __entry->offset = offset; + __entry->pos = offset + size; + __entry->total = total; + ), + + TP_printk("range %zu-%zu out of %zu bytes%s", + __entry->offset, __entry->pos, __entry->total, + __print_hex_dump("", DUMP_PREFIX_NONE, 16, 4, + __get_dynamic_array(buf), + __get_dynamic_array_len(buf), false)) +); + +TRACE_EVENT(avs_d0ix, + + TP_PROTO(const char *op, bool proceed, u64 header), + + TP_ARGS(op, proceed, header), + + TP_STRUCT__entry( + __string(op, op ) + __field(bool, proceed ) + __field(u64, header ) + ), + + TP_fast_assign( + __assign_str(op, op); + __entry->proceed = proceed; + __entry->header = header; + ), + + TP_printk("%s%s for request: 0x%08X 0x%08X", + __entry->proceed ? "" : "ignore ", __get_str(op), + lower_32_bits(__entry->header), upper_32_bits(__entry->header)) +); + +#endif /* _TRACE_INTEL_AVS_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace +#include diff --git a/sound/soc/intel/avs/utils.c b/sound/soc/intel/avs/utils.c new file mode 100644 index 000000000..13611dee9 --- /dev/null +++ b/sound/soc/intel/avs/utils.c @@ -0,0 +1,324 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. +// +// Authors: Cezary Rojewski +// Amadeusz Slawinski +// + +#include +#include +#include +#include "avs.h" +#include "messages.h" + +/* Caller responsible for holding adev->modres_mutex. */ +static int avs_module_entry_index(struct avs_dev *adev, const guid_t *uuid) +{ + int i; + + for (i = 0; i < adev->mods_info->count; i++) { + struct avs_module_entry *module; + + module = &adev->mods_info->entries[i]; + if (guid_equal(&module->uuid, uuid)) + return i; + } + + return -ENOENT; +} + +/* Caller responsible for holding adev->modres_mutex. */ +static int avs_module_id_entry_index(struct avs_dev *adev, u32 module_id) +{ + int i; + + for (i = 0; i < adev->mods_info->count; i++) { + struct avs_module_entry *module; + + module = &adev->mods_info->entries[i]; + if (module->module_id == module_id) + return i; + } + + return -ENOENT; +} + +int avs_get_module_entry(struct avs_dev *adev, const guid_t *uuid, struct avs_module_entry *entry) +{ + int idx; + + mutex_lock(&adev->modres_mutex); + + idx = avs_module_entry_index(adev, uuid); + if (idx >= 0) + memcpy(entry, &adev->mods_info->entries[idx], sizeof(*entry)); + + mutex_unlock(&adev->modres_mutex); + return (idx < 0) ? idx : 0; +} + +int avs_get_module_id_entry(struct avs_dev *adev, u32 module_id, struct avs_module_entry *entry) +{ + int idx; + + mutex_lock(&adev->modres_mutex); + + idx = avs_module_id_entry_index(adev, module_id); + if (idx >= 0) + memcpy(entry, &adev->mods_info->entries[idx], sizeof(*entry)); + + mutex_unlock(&adev->modres_mutex); + return (idx < 0) ? idx : 0; +} + +int avs_get_module_id(struct avs_dev *adev, const guid_t *uuid) +{ + struct avs_module_entry module; + int ret; + + ret = avs_get_module_entry(adev, uuid, &module); + return !ret ? module.module_id : -ENOENT; +} + +bool avs_is_module_ida_empty(struct avs_dev *adev, u32 module_id) +{ + bool ret = false; + int idx; + + mutex_lock(&adev->modres_mutex); + + idx = avs_module_id_entry_index(adev, module_id); + if (idx >= 0) + ret = ida_is_empty(adev->mod_idas[idx]); + + mutex_unlock(&adev->modres_mutex); + return ret; +} + +/* Caller responsible for holding adev->modres_mutex. */ +static void avs_module_ida_destroy(struct avs_dev *adev) +{ + int i = adev->mods_info ? adev->mods_info->count : 0; + + while (i--) { + ida_destroy(adev->mod_idas[i]); + kfree(adev->mod_idas[i]); + } + kfree(adev->mod_idas); +} + +/* Caller responsible for holding adev->modres_mutex. */ +static int +avs_module_ida_alloc(struct avs_dev *adev, struct avs_mods_info *newinfo, bool purge) +{ + struct avs_mods_info *oldinfo = adev->mods_info; + struct ida **ida_ptrs; + u32 tocopy_count = 0; + int i; + + if (!purge && oldinfo) { + if (oldinfo->count >= newinfo->count) + dev_warn(adev->dev, "refreshing %d modules info with %d\n", + oldinfo->count, newinfo->count); + tocopy_count = oldinfo->count; + } + + ida_ptrs = kcalloc(newinfo->count, sizeof(*ida_ptrs), GFP_KERNEL); + if (!ida_ptrs) + return -ENOMEM; + + if (tocopy_count) + memcpy(ida_ptrs, adev->mod_idas, tocopy_count * sizeof(*ida_ptrs)); + + for (i = tocopy_count; i < newinfo->count; i++) { + ida_ptrs[i] = kzalloc(sizeof(**ida_ptrs), GFP_KERNEL); + if (!ida_ptrs[i]) { + while (i--) + kfree(ida_ptrs[i]); + + kfree(ida_ptrs); + return -ENOMEM; + } + + ida_init(ida_ptrs[i]); + } + + /* If old elements have been reused, don't wipe them. */ + if (tocopy_count) + kfree(adev->mod_idas); + else + avs_module_ida_destroy(adev); + + adev->mod_idas = ida_ptrs; + return 0; +} + +int avs_module_info_init(struct avs_dev *adev, bool purge) +{ + struct avs_mods_info *info; + int ret; + + ret = avs_ipc_get_modules_info(adev, &info); + if (ret) + return AVS_IPC_RET(ret); + + mutex_lock(&adev->modres_mutex); + + ret = avs_module_ida_alloc(adev, info, purge); + if (ret < 0) { + dev_err(adev->dev, "initialize module idas failed: %d\n", ret); + goto exit; + } + + /* Refresh current information with newly received table. */ + kfree(adev->mods_info); + adev->mods_info = info; + +exit: + mutex_unlock(&adev->modres_mutex); + return ret; +} + +void avs_module_info_free(struct avs_dev *adev) +{ + mutex_lock(&adev->modres_mutex); + + avs_module_ida_destroy(adev); + kfree(adev->mods_info); + adev->mods_info = NULL; + + mutex_unlock(&adev->modres_mutex); +} + +int avs_module_id_alloc(struct avs_dev *adev, u16 module_id) +{ + int ret, idx, max_id; + + mutex_lock(&adev->modres_mutex); + + idx = avs_module_id_entry_index(adev, module_id); + if (idx == -ENOENT) { + dev_err(adev->dev, "invalid module id: %d", module_id); + ret = -EINVAL; + goto exit; + } + max_id = adev->mods_info->entries[idx].instance_max_count - 1; + ret = ida_alloc_max(adev->mod_idas[idx], max_id, GFP_KERNEL); +exit: + mutex_unlock(&adev->modres_mutex); + return ret; +} + +void avs_module_id_free(struct avs_dev *adev, u16 module_id, u8 instance_id) +{ + int idx; + + mutex_lock(&adev->modres_mutex); + + idx = avs_module_id_entry_index(adev, module_id); + if (idx == -ENOENT) { + dev_err(adev->dev, "invalid module id: %d", module_id); + goto exit; + } + + ida_free(adev->mod_idas[idx], instance_id); +exit: + mutex_unlock(&adev->modres_mutex); +} + +/* + * Once driver loads FW it should keep it in memory, so we are not affected + * by FW removal from filesystem or even worse by loading different FW at + * runtime suspend/resume. + */ +int avs_request_firmware(struct avs_dev *adev, const struct firmware **fw_p, const char *name) +{ + struct avs_fw_entry *entry; + int ret; + + /* first check in list if it is not already loaded */ + list_for_each_entry(entry, &adev->fw_list, node) { + if (!strcmp(name, entry->name)) { + *fw_p = entry->fw; + return 0; + } + } + + /* FW is not loaded, let's load it now and add to the list */ + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->name = kstrdup(name, GFP_KERNEL); + if (!entry->name) { + kfree(entry); + return -ENOMEM; + } + + ret = request_firmware(&entry->fw, name, adev->dev); + if (ret < 0) { + kfree(entry->name); + kfree(entry); + return ret; + } + + *fw_p = entry->fw; + + list_add_tail(&entry->node, &adev->fw_list); + + return 0; +} + +/* + * Release single FW entry, used to handle errors in functions calling + * avs_request_firmware() + */ +void avs_release_last_firmware(struct avs_dev *adev) +{ + struct avs_fw_entry *entry; + + entry = list_last_entry(&adev->fw_list, typeof(*entry), node); + + list_del(&entry->node); + release_firmware(entry->fw); + kfree(entry->name); + kfree(entry); +} + +/* + * Release all FW entries, used on driver removal + */ +void avs_release_firmwares(struct avs_dev *adev) +{ + struct avs_fw_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &adev->fw_list, node) { + list_del(&entry->node); + release_firmware(entry->fw); + kfree(entry->name); + kfree(entry); + } +} + +unsigned int __kfifo_fromio_locked(struct kfifo *fifo, const void __iomem *src, unsigned int len, + spinlock_t *lock) +{ + struct __kfifo *__fifo = &fifo->kfifo; + unsigned long flags; + unsigned int l, off; + + spin_lock_irqsave(lock, flags); + len = min(len, kfifo_avail(fifo)); + off = __fifo->in & __fifo->mask; + l = min(len, kfifo_size(fifo) - off); + + memcpy_fromio(__fifo->data + off, src, l); + memcpy_fromio(__fifo->data, src + l, len - l); + /* Make sure data copied from SRAM is visible to all CPUs. */ + smp_mb(); + __fifo->in += len; + spin_unlock_irqrestore(lock, flags); + + return len; +} -- cgit v1.2.3