1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
|
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
//
// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
//
#include <linux/devcoredump.h>
#include <linux/slab.h>
#include <sound/hdaudio_ext.h>
#include "avs.h"
#include "messages.h"
static int skl_enable_logs(struct avs_dev *adev, enum avs_log_enable enable, u32 aging_period,
u32 fifo_full_period, unsigned long resource_mask, u32 *priorities)
{
struct skl_log_state_info *info;
u32 size, num_cores = adev->hw_cfg.dsp_cores;
int ret, i;
if (fls_long(resource_mask) > num_cores)
return -EINVAL;
size = struct_size(info, logs_core, num_cores);
info = kzalloc(size, GFP_KERNEL);
if (!info)
return -ENOMEM;
info->core_mask = resource_mask;
if (enable)
for_each_set_bit(i, &resource_mask, GENMASK(num_cores, 0)) {
info->logs_core[i].enable = enable;
info->logs_core[i].min_priority = *priorities++;
}
else
for_each_set_bit(i, &resource_mask, GENMASK(num_cores, 0))
info->logs_core[i].enable = enable;
ret = avs_ipc_set_enable_logs(adev, (u8 *)info, size);
kfree(info);
if (ret)
return AVS_IPC_RET(ret);
return 0;
}
int skl_log_buffer_offset(struct avs_dev *adev, u32 core)
{
return core * avs_log_buffer_size(adev);
}
/* fw DbgLogWp registers */
#define FW_REGS_DBG_LOG_WP(core) (0x30 + 0x4 * core)
static int
skl_log_buffer_status(struct avs_dev *adev, union avs_notify_msg *msg)
{
unsigned long flags;
void __iomem *buf;
u16 size, write, offset;
spin_lock_irqsave(&adev->dbg.trace_lock, flags);
if (!kfifo_initialized(&adev->dbg.trace_fifo)) {
spin_unlock_irqrestore(&adev->dbg.trace_lock, flags);
return 0;
}
size = avs_log_buffer_size(adev) / 2;
write = readl(avs_sram_addr(adev, AVS_FW_REGS_WINDOW) + FW_REGS_DBG_LOG_WP(msg->log.core));
/* determine buffer half */
offset = (write < size) ? size : 0;
/* Address is guaranteed to exist in SRAM2. */
buf = avs_log_buffer_addr(adev, msg->log.core) + offset;
__kfifo_fromio_locked(&adev->dbg.trace_fifo, buf, size, &adev->dbg.fifo_lock);
wake_up(&adev->dbg.trace_waitq);
spin_unlock_irqrestore(&adev->dbg.trace_lock, flags);
return 0;
}
static int skl_coredump(struct avs_dev *adev, union avs_notify_msg *msg)
{
u8 *dump;
dump = vzalloc(AVS_FW_REGS_SIZE);
if (!dump)
return -ENOMEM;
memcpy_fromio(dump, avs_sram_addr(adev, AVS_FW_REGS_WINDOW), AVS_FW_REGS_SIZE);
dev_coredumpv(adev->dev, dump, AVS_FW_REGS_SIZE, GFP_KERNEL);
return 0;
}
static bool
skl_d0ix_toggle(struct avs_dev *adev, struct avs_ipc_msg *tx, bool wake)
{
/* unsupported on cAVS 1.5 hw */
return false;
}
static int skl_set_d0ix(struct avs_dev *adev, bool enable)
{
/* unsupported on cAVS 1.5 hw */
return 0;
}
const struct avs_dsp_ops skl_dsp_ops = {
.power = avs_dsp_core_power,
.reset = avs_dsp_core_reset,
.stall = avs_dsp_core_stall,
.irq_handler = avs_dsp_irq_handler,
.irq_thread = avs_dsp_irq_thread,
.int_control = avs_dsp_interrupt_control,
.load_basefw = avs_cldma_load_basefw,
.load_lib = avs_cldma_load_library,
.transfer_mods = avs_cldma_transfer_modules,
.enable_logs = skl_enable_logs,
.log_buffer_offset = skl_log_buffer_offset,
.log_buffer_status = skl_log_buffer_status,
.coredump = skl_coredump,
.d0ix_toggle = skl_d0ix_toggle,
.set_d0ix = skl_set_d0ix,
};
|