1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
|
/*
* Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <context.h>
#include <denver.h>
#include <cpu_macros.S>
#include <plat_macros.S>
/* -------------------------------------------------
* CVE-2017-5715 mitigation
*
* Flush the indirect branch predictor and RSB on
* entry to EL3 by issuing a newly added instruction
* for Denver CPUs.
*
* To achieve this without performing any branch
* instruction, a per-cpu vbar is installed which
* executes the workaround and then branches off to
* the corresponding vector entry in the main vector
* table.
* -------------------------------------------------
*/
vector_base workaround_bpflush_runtime_exceptions
.macro apply_workaround
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
/* Disable cycle counter when event counting is prohibited */
mrs x1, pmcr_el0
orr x0, x1, #PMCR_EL0_DP_BIT
msr pmcr_el0, x0
isb
/* -------------------------------------------------
* A new write-only system register where a write of
* 1 to bit 0 will cause the indirect branch predictor
* and RSB to be flushed.
*
* A write of 0 to bit 0 will be ignored. A write of
* 1 to any other bit will cause an MCA.
* -------------------------------------------------
*/
mov x0, #1
msr s3_0_c15_c0_6, x0
isb
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
.endm
/* ---------------------------------------------------------------------
* Current EL with SP_EL0 : 0x0 - 0x200
* ---------------------------------------------------------------------
*/
vector_entry workaround_bpflush_sync_exception_sp_el0
b sync_exception_sp_el0
end_vector_entry workaround_bpflush_sync_exception_sp_el0
vector_entry workaround_bpflush_irq_sp_el0
b irq_sp_el0
end_vector_entry workaround_bpflush_irq_sp_el0
vector_entry workaround_bpflush_fiq_sp_el0
b fiq_sp_el0
end_vector_entry workaround_bpflush_fiq_sp_el0
vector_entry workaround_bpflush_serror_sp_el0
b serror_sp_el0
end_vector_entry workaround_bpflush_serror_sp_el0
/* ---------------------------------------------------------------------
* Current EL with SP_ELx: 0x200 - 0x400
* ---------------------------------------------------------------------
*/
vector_entry workaround_bpflush_sync_exception_sp_elx
b sync_exception_sp_elx
end_vector_entry workaround_bpflush_sync_exception_sp_elx
vector_entry workaround_bpflush_irq_sp_elx
b irq_sp_elx
end_vector_entry workaround_bpflush_irq_sp_elx
vector_entry workaround_bpflush_fiq_sp_elx
b fiq_sp_elx
end_vector_entry workaround_bpflush_fiq_sp_elx
vector_entry workaround_bpflush_serror_sp_elx
b serror_sp_elx
end_vector_entry workaround_bpflush_serror_sp_elx
/* ---------------------------------------------------------------------
* Lower EL using AArch64 : 0x400 - 0x600
* ---------------------------------------------------------------------
*/
vector_entry workaround_bpflush_sync_exception_aarch64
apply_workaround
b sync_exception_aarch64
end_vector_entry workaround_bpflush_sync_exception_aarch64
vector_entry workaround_bpflush_irq_aarch64
apply_workaround
b irq_aarch64
end_vector_entry workaround_bpflush_irq_aarch64
vector_entry workaround_bpflush_fiq_aarch64
apply_workaround
b fiq_aarch64
end_vector_entry workaround_bpflush_fiq_aarch64
vector_entry workaround_bpflush_serror_aarch64
apply_workaround
b serror_aarch64
end_vector_entry workaround_bpflush_serror_aarch64
/* ---------------------------------------------------------------------
* Lower EL using AArch32 : 0x600 - 0x800
* ---------------------------------------------------------------------
*/
vector_entry workaround_bpflush_sync_exception_aarch32
apply_workaround
b sync_exception_aarch32
end_vector_entry workaround_bpflush_sync_exception_aarch32
vector_entry workaround_bpflush_irq_aarch32
apply_workaround
b irq_aarch32
end_vector_entry workaround_bpflush_irq_aarch32
vector_entry workaround_bpflush_fiq_aarch32
apply_workaround
b fiq_aarch32
end_vector_entry workaround_bpflush_fiq_aarch32
vector_entry workaround_bpflush_serror_aarch32
apply_workaround
b serror_aarch32
end_vector_entry workaround_bpflush_serror_aarch32
.global denver_disable_dco
/* ---------------------------------------------
* Disable debug interfaces
* ---------------------------------------------
*/
func denver_disable_ext_debug
mov x0, #1
msr osdlr_el1, x0
isb
dsb sy
ret
endfunc denver_disable_ext_debug
/* ----------------------------------------------------
* Enable dynamic code optimizer (DCO)
* ----------------------------------------------------
*/
func denver_enable_dco
/* DCO is not supported on PN5 and later */
mrs x1, midr_el1
mov_imm x2, DENVER_MIDR_PN4
cmp x1, x2
b.hi 1f
mov x18, x30
bl plat_my_core_pos
mov x1, #1
lsl x1, x1, x0
msr s3_0_c15_c0_2, x1
mov x30, x18
1: ret
endfunc denver_enable_dco
/* ----------------------------------------------------
* Disable dynamic code optimizer (DCO)
* ----------------------------------------------------
*/
func denver_disable_dco
/* DCO is not supported on PN5 and later */
mrs x1, midr_el1
mov_imm x2, DENVER_MIDR_PN4
cmp x1, x2
b.hi 2f
/* turn off background work */
mov x18, x30
bl plat_my_core_pos
mov x1, #1
lsl x1, x1, x0
lsl x2, x1, #16
msr s3_0_c15_c0_2, x2
isb
/* wait till the background work turns off */
1: mrs x2, s3_0_c15_c0_2
lsr x2, x2, #32
and w2, w2, 0xFFFF
and x2, x2, x1
cbnz x2, 1b
mov x30, x18
2: ret
endfunc denver_disable_dco
workaround_reset_start denver, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
#if IMAGE_BL31
adr x1, workaround_bpflush_runtime_exceptions
msr vbar_el3, x1
#endif
workaround_reset_end denver, CVE(2017, 5715)
check_erratum_custom_start denver, CVE(2017, 5715)
mov x0, #ERRATA_MISSING
#if WORKAROUND_CVE_2017_5715
/*
* Check if the CPU supports the special instruction
* required to flush the indirect branch predictor and
* RSB. Support for this operation can be determined by
* comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
*/
mrs x1, id_afr0_el1
mov x2, #0x10000
and x1, x1, x2
cbz x1, 1f
mov x0, #ERRATA_APPLIES
1:
#endif
ret
check_erratum_custom_end denver, CVE(2017, 5715)
workaround_reset_start denver, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
/*
* Denver CPUs with DENVER_MIDR_PN3 or earlier, use different
* bits in the ACTLR_EL3 register to disable speculative
* store buffer and memory disambiguation.
*/
mrs x0, midr_el1
mov_imm x1, DENVER_MIDR_PN4
cmp x0, x1
mrs x0, actlr_el3
mov x1, #(DENVER_CPU_DIS_MD_EL3 | DENVER_CPU_DIS_SSB_EL3)
mov x2, #(DENVER_PN4_CPU_DIS_MD_EL3 | DENVER_PN4_CPU_DIS_SSB_EL3)
csel x3, x1, x2, ne
orr x0, x0, x3
msr actlr_el3, x0
isb
dsb sy
workaround_reset_end denver, CVE(2018, 3639)
check_erratum_chosen denver, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
cpu_reset_func_start denver
/* ----------------------------------------------------
* Reset ACTLR.PMSTATE to C1 state
* ----------------------------------------------------
*/
mrs x0, actlr_el1
bic x0, x0, #DENVER_CPU_PMSTATE_MASK
orr x0, x0, #DENVER_CPU_PMSTATE_C1
msr actlr_el1, x0
/* ----------------------------------------------------
* Enable dynamic code optimizer (DCO)
* ----------------------------------------------------
*/
bl denver_enable_dco
cpu_reset_func_end denver
/* ----------------------------------------------------
* The CPU Ops core power down function for Denver.
* ----------------------------------------------------
*/
func denver_core_pwr_dwn
mov x19, x30
/* ---------------------------------------------
* Force the debug interfaces to be quiescent
* ---------------------------------------------
*/
bl denver_disable_ext_debug
ret x19
endfunc denver_core_pwr_dwn
/* -------------------------------------------------------
* The CPU Ops cluster power down function for Denver.
* -------------------------------------------------------
*/
func denver_cluster_pwr_dwn
ret
endfunc denver_cluster_pwr_dwn
errata_report_shim denver
/* ---------------------------------------------
* This function provides Denver specific
* register information for crash reporting.
* It needs to return with x6 pointing to
* a list of register names in ascii and
* x8 - x15 having values of registers to be
* reported.
* ---------------------------------------------
*/
.section .rodata.denver_regs, "aS"
denver_regs: /* The ascii list of register names to be reported */
.asciz "actlr_el1", ""
func denver_cpu_reg_dump
adr x6, denver_regs
mrs x8, ACTLR_EL1
ret
endfunc denver_cpu_reg_dump
/* macro to declare cpu_ops for Denver SKUs */
.macro denver_cpu_ops_wa midr
declare_cpu_ops_wa denver, \midr, \
denver_reset_func, \
check_erratum_denver_5715, \
CPU_NO_EXTRA2_FUNC, \
CPU_NO_EXTRA3_FUNC, \
denver_core_pwr_dwn, \
denver_cluster_pwr_dwn
.endm
denver_cpu_ops_wa DENVER_MIDR_PN0
denver_cpu_ops_wa DENVER_MIDR_PN1
denver_cpu_ops_wa DENVER_MIDR_PN2
denver_cpu_ops_wa DENVER_MIDR_PN3
denver_cpu_ops_wa DENVER_MIDR_PN4
denver_cpu_ops_wa DENVER_MIDR_PN5
denver_cpu_ops_wa DENVER_MIDR_PN6
denver_cpu_ops_wa DENVER_MIDR_PN7
denver_cpu_ops_wa DENVER_MIDR_PN8
denver_cpu_ops_wa DENVER_MIDR_PN9
|