summaryrefslogtreecommitdiffstats
path: root/lib/cpus
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
commit102b0d2daa97dae68d3eed54d8fe37a9cc38a892 (patch)
treebcf648efac40ca6139842707f0eba5a4496a6dd2 /lib/cpus
parentInitial commit. (diff)
downloadarm-trusted-firmware-102b0d2daa97dae68d3eed54d8fe37a9cc38a892.tar.xz
arm-trusted-firmware-102b0d2daa97dae68d3eed54d8fe37a9cc38a892.zip
Adding upstream version 2.8.0+dfsg.upstream/2.8.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--lib/cpus/aarch32/aem_generic.S55
-rw-r--r--lib/cpus/aarch32/cortex_a12.S84
-rw-r--r--lib/cpus/aarch32/cortex_a15.S191
-rw-r--r--lib/cpus/aarch32/cortex_a17.S185
-rw-r--r--lib/cpus/aarch32/cortex_a32.S132
-rw-r--r--lib/cpus/aarch32/cortex_a5.S84
-rw-r--r--lib/cpus/aarch32/cortex_a53.S316
-rw-r--r--lib/cpus/aarch32/cortex_a57.S618
-rw-r--r--lib/cpus/aarch32/cortex_a7.S84
-rw-r--r--lib/cpus/aarch32/cortex_a72.S278
-rw-r--r--lib/cpus/aarch32/cortex_a9.S121
-rw-r--r--lib/cpus/aarch32/cpu_helpers.S264
-rw-r--r--lib/cpus/aarch64/a64fx.S49
-rw-r--r--lib/cpus/aarch64/aem_generic.S113
-rw-r--r--lib/cpus/aarch64/cortex_a35.S196
-rw-r--r--lib/cpus/aarch64/cortex_a510.S500
-rw-r--r--lib/cpus/aarch64/cortex_a53.S409
-rw-r--r--lib/cpus/aarch64/cortex_a55.S353
-rw-r--r--lib/cpus/aarch64/cortex_a57.S686
-rw-r--r--lib/cpus/aarch64/cortex_a65.S81
-rw-r--r--lib/cpus/aarch64/cortex_a65ae.S81
-rw-r--r--lib/cpus/aarch64/cortex_a710.S668
-rw-r--r--lib/cpus/aarch64/cortex_a715.S113
-rw-r--r--lib/cpus/aarch64/cortex_a72.S374
-rw-r--r--lib/cpus/aarch64/cortex_a73.S305
-rw-r--r--lib/cpus/aarch64/cortex_a75.S261
-rw-r--r--lib/cpus/aarch64/cortex_a75_pubsub.c28
-rw-r--r--lib/cpus/aarch64/cortex_a76.S846
-rw-r--r--lib/cpus/aarch64/cortex_a76ae.S113
-rw-r--r--lib/cpus/aarch64/cortex_a77.S417
-rw-r--r--lib/cpus/aarch64/cortex_a78.S492
-rw-r--r--lib/cpus/aarch64/cortex_a78_ae.S312
-rw-r--r--lib/cpus/aarch64/cortex_a78c.S268
-rw-r--r--lib/cpus/aarch64/cortex_hayes.S77
-rw-r--r--lib/cpus/aarch64/cortex_hunter.S113
-rw-r--r--lib/cpus/aarch64/cortex_hunter_elp_arm.S113
-rw-r--r--lib/cpus/aarch64/cortex_x1.S217
-rw-r--r--lib/cpus/aarch64/cortex_x2.S432
-rw-r--r--lib/cpus/aarch64/cortex_x3.S149
-rw-r--r--lib/cpus/aarch64/cpu_helpers.S456
-rw-r--r--lib/cpus/aarch64/cpuamu.c70
-rw-r--r--lib/cpus/aarch64/cpuamu_helpers.S99
-rw-r--r--lib/cpus/aarch64/denver.S386
-rw-r--r--lib/cpus/aarch64/dsu_helpers.S195
-rw-r--r--lib/cpus/aarch64/generic.S89
-rw-r--r--lib/cpus/aarch64/neoverse_e1.S81
-rw-r--r--lib/cpus/aarch64/neoverse_n1.S745
-rw-r--r--lib/cpus/aarch64/neoverse_n1_pubsub.c28
-rw-r--r--lib/cpus/aarch64/neoverse_n2.S639
-rw-r--r--lib/cpus/aarch64/neoverse_n_common.S26
-rw-r--r--lib/cpus/aarch64/neoverse_poseidon.S113
-rw-r--r--lib/cpus/aarch64/neoverse_v1.S628
-rw-r--r--lib/cpus/aarch64/neoverse_v2.S112
-rw-r--r--lib/cpus/aarch64/qemu_max.S81
-rw-r--r--lib/cpus/aarch64/rainier.S175
-rw-r--r--lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S368
-rw-r--r--lib/cpus/aarch64/wa_cve_2017_5715_mmu.S152
-rw-r--r--lib/cpus/aarch64/wa_cve_2022_23960_bhb.S30
-rw-r--r--lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S108
-rw-r--r--lib/cpus/cpu-ops.mk1358
-rw-r--r--lib/cpus/errata_report.c101
61 files changed, 16218 insertions, 0 deletions
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S
new file mode 100644
index 0000000..7bd586a
--- /dev/null
+++ b/lib/cpus/aarch32/aem_generic.S
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <aem_generic.h>
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cpu_macros.S>
+
+func aem_generic_core_pwr_dwn
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ /* ---------------------------------------------
+ * Flush L1 cache to PoU.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ b dcsw_op_louis
+endfunc aem_generic_core_pwr_dwn
+
+
+func aem_generic_cluster_pwr_dwn
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ /* ---------------------------------------------
+ * Flush L1 and L2 caches to PoC.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ b dcsw_op_all
+endfunc aem_generic_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for AEM. Must follow AAPCS.
+ */
+func aem_generic_errata_report
+ bx lr
+endfunc aem_generic_errata_report
+#endif
+
+/* cpu_ops for Base AEM FVP */
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+ aem_generic_core_pwr_dwn, \
+ aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a12.S b/lib/cpus/aarch32/cortex_a12.S
new file mode 100644
index 0000000..5300fe0
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a12.S
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a12.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a12_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A12_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a12_disable_smp
+
+func cortex_a12_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A12_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a12_enable_smp
+
+func cortex_a12_reset_func
+ b cortex_a12_enable_smp
+endfunc cortex_a12_reset_func
+
+func cortex_a12_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a12_disable_smp
+endfunc cortex_a12_core_pwr_dwn
+
+func cortex_a12_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a12_disable_smp
+endfunc cortex_a12_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-A12. Must follow AAPCS.
+ */
+func cortex_a12_errata_report
+ bx lr
+endfunc cortex_a12_errata_report
+#endif
+
+declare_cpu_ops cortex_a12, CORTEX_A12_MIDR, \
+ cortex_a12_reset_func, \
+ cortex_a12_core_pwr_dwn, \
+ cortex_a12_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a15.S b/lib/cpus/aarch32/cortex_a15.S
new file mode 100644
index 0000000..1143e9b
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a15.S
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2016-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a15.h>
+#include <cpu_macros.S>
+
+/*
+ * Cortex-A15 support LPAE and Virtualization Extensions.
+ * Don't care if confiugration uses or not LPAE and VE.
+ * Therefore, where we don't check ARCH_IS_ARMV7_WITH_LPAE/VE
+ */
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a15_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A15_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+#if ERRATA_A15_816470
+ /*
+ * Invalidate any TLB address
+ */
+ mov r0, #0
+ stcopr r0, TLBIMVA
+#endif
+ dsb sy
+ bx lr
+endfunc cortex_a15_disable_smp
+
+func cortex_a15_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A15_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a15_enable_smp
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A15 Errata #816470.
+ * This applies only to revision >= r3p0 of Cortex A15.
+ * ----------------------------------------------------
+ */
+func check_errata_816470
+ /*
+ * Even though this is only needed for revision >= r3p0, it is always
+ * applied because of the low cost of the workaround.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_816470
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A15 Errata #827671.
+ * This applies only to revision >= r3p0 of Cortex A15.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ----------------------------------------------------
+ */
+func errata_a15_827671_wa
+ /*
+ * Compare r0 against revision r3p0
+ */
+ mov r2, lr
+ bl check_errata_827671
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr r0, CORTEX_A15_ACTLR2
+ orr r0, #CORTEX_A15_ACTLR2_INV_DCC_BIT
+ stcopr r0, CORTEX_A15_ACTLR2
+ isb
+1:
+ bx r2
+endfunc errata_a15_827671_wa
+
+func check_errata_827671
+ mov r1, #0x30
+ b cpu_rev_var_hs
+endfunc check_errata_827671
+
+func check_errata_cve_2017_5715
+#if WORKAROUND_CVE_2017_5715
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2017_5715
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2022_23960
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A15. Must follow AAPCS.
+ */
+func cortex_a15_errata_report
+ push {r12, lr}
+
+ bl cpu_get_rev_var
+ mov r4, r0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A15_816470, cortex_a15, 816470
+ report_errata ERRATA_A15_827671, cortex_a15, 827671
+ report_errata WORKAROUND_CVE_2017_5715, cortex_a15, cve_2017_5715
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a15, cve_2022_23960
+
+ pop {r12, lr}
+ bx lr
+endfunc cortex_a15_errata_report
+#endif
+
+func cortex_a15_reset_func
+ mov r5, lr
+ bl cpu_get_rev_var
+
+#if ERRATA_A15_827671
+ bl errata_a15_827671_wa
+#endif
+
+#if IMAGE_BL32 && (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960)
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A15_ACTLR_INV_BTB_BIT
+ stcopr r0, ACTLR
+ ldr r0, =wa_cve_2017_5715_icache_inv_vbar
+ stcopr r0, VBAR
+ stcopr r0, MVBAR
+ /* isb will be applied in the course of the reset func */
+#endif
+
+ mov lr, r5
+ b cortex_a15_enable_smp
+endfunc cortex_a15_reset_func
+
+func cortex_a15_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a15_disable_smp
+endfunc cortex_a15_core_pwr_dwn
+
+func cortex_a15_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a15_disable_smp
+endfunc cortex_a15_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a15, CORTEX_A15_MIDR, \
+ cortex_a15_reset_func, \
+ cortex_a15_core_pwr_dwn, \
+ cortex_a15_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a17.S b/lib/cpus/aarch32/cortex_a17.S
new file mode 100644
index 0000000..b8abd33
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a17.S
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a17.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a17_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A17_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a17_disable_smp
+
+func cortex_a17_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A17_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a17_enable_smp
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A17 Errata #852421.
+ * This applies only to revision <= r1p2 of Cortex A17.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ----------------------------------------------------
+ */
+func errata_a17_852421_wa
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_852421
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr r0, CORTEX_A17_IMP_DEF_REG1
+ orr r0, r0, #(1<<24)
+ stcopr r0, CORTEX_A17_IMP_DEF_REG1
+1:
+ bx r2
+endfunc errata_a17_852421_wa
+
+func check_errata_852421
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_852421
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A17 Errata #852423.
+ * This applies only to revision <= r1p2 of Cortex A17.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ----------------------------------------------------
+ */
+func errata_a17_852423_wa
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_852423
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr r0, CORTEX_A17_IMP_DEF_REG1
+ orr r0, r0, #(1<<12)
+ stcopr r0, CORTEX_A17_IMP_DEF_REG1
+1:
+ bx r2
+endfunc errata_a17_852423_wa
+
+func check_errata_852423
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_852423
+
+func check_errata_cve_2017_5715
+#if WORKAROUND_CVE_2017_5715
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2017_5715
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A17. Must follow AAPCS.
+ */
+func cortex_a17_errata_report
+ push {r12, lr}
+
+ bl cpu_get_rev_var
+ mov r4, r0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A17_852421, cortex_a17, 852421
+ report_errata ERRATA_A17_852423, cortex_a17, 852423
+ report_errata WORKAROUND_CVE_2017_5715, cortex_a17, cve_2017_5715
+
+ pop {r12, lr}
+ bx lr
+endfunc cortex_a17_errata_report
+#endif
+
+func cortex_a17_reset_func
+ mov r5, lr
+ bl cpu_get_rev_var
+ mov r4, r0
+
+#if ERRATA_A17_852421
+ mov r0, r4
+ bl errata_a17_852421_wa
+#endif
+
+#if ERRATA_A17_852423
+ mov r0, r4
+ bl errata_a17_852423_wa
+#endif
+
+#if IMAGE_BL32 && WORKAROUND_CVE_2017_5715
+ ldr r0, =workaround_bpiall_runtime_exceptions
+ stcopr r0, VBAR
+ stcopr r0, MVBAR
+ /* isb will be applied in the course of the reset func */
+#endif
+
+ mov lr, r5
+ b cortex_a17_enable_smp
+endfunc cortex_a17_reset_func
+
+func cortex_a17_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a17_disable_smp
+endfunc cortex_a17_core_pwr_dwn
+
+func cortex_a17_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a17_disable_smp
+endfunc cortex_a17_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a17, CORTEX_A17_MIDR, \
+ cortex_a17_reset_func, \
+ cortex_a17_core_pwr_dwn, \
+ cortex_a17_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S
new file mode 100644
index 0000000..c262276
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a32.S
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a32.h>
+#include <cpu_macros.S>
+
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * Clobbers: r0-r1
+ * ---------------------------------------------
+ */
+func cortex_a32_disable_smp
+ ldcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1
+ bic r0, r0, #CORTEX_A32_CPUECTLR_SMPEN_BIT
+ stcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a32_disable_smp
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A32.
+ * Clobbers: r0-r1
+ * -------------------------------------------------
+ */
+func cortex_a32_reset_func
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ ldcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1
+ orr r0, r0, #CORTEX_A32_CPUECTLR_SMPEN_BIT
+ stcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1
+ isb
+ bx lr
+endfunc cortex_a32_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A32.
+ * Clobbers: r0-r3
+ * ----------------------------------------------------
+ */
+func cortex_a32_core_pwr_dwn
+ /* r12 is pushed to meet the 8 byte stack alignment requirement */
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a32_disable_smp
+endfunc cortex_a32_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A32.
+ * Clobbers: r0-r3
+ * -------------------------------------------------------
+ */
+func cortex_a32_cluster_pwr_dwn
+ /* r12 is pushed to meet the 8 byte stack alignment requirement */
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Flush L1 cache.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 cache.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a32_disable_smp
+endfunc cortex_a32_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-A32. Must follow AAPCS.
+ */
+func cortex_a32_errata_report
+ bx lr
+endfunc cortex_a32_errata_report
+#endif
+
+declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \
+ cortex_a32_reset_func, \
+ cortex_a32_core_pwr_dwn, \
+ cortex_a32_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a5.S b/lib/cpus/aarch32/cortex_a5.S
new file mode 100644
index 0000000..8abb66f
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a5.S
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a5.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a5_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A5_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a5_disable_smp
+
+func cortex_a5_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A5_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a5_enable_smp
+
+func cortex_a5_reset_func
+ b cortex_a5_enable_smp
+endfunc cortex_a5_reset_func
+
+func cortex_a5_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a5_disable_smp
+endfunc cortex_a5_core_pwr_dwn
+
+func cortex_a5_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a5_disable_smp
+endfunc cortex_a5_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-A5. Must follow AAPCS.
+ */
+func cortex_a5_errata_report
+ bx lr
+endfunc cortex_a5_errata_report
+#endif
+
+declare_cpu_ops cortex_a5, CORTEX_A5_MIDR, \
+ cortex_a5_reset_func, \
+ cortex_a5_core_pwr_dwn, \
+ cortex_a5_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a53.S b/lib/cpus/aarch32/cortex_a53.S
new file mode 100644
index 0000000..6e3ff81
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a53.S
@@ -0,0 +1,316 @@
+/*
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <common/debug.h>
+#include <cortex_a53.h>
+#include <cpu_macros.S>
+
+#if A53_DISABLE_NON_TEMPORAL_HINT
+#undef ERRATA_A53_836870
+#define ERRATA_A53_836870 1
+#endif
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a53_disable_smp
+ ldcopr16 r0, r1, CORTEX_A53_ECTLR
+ bic64_imm r0, r1, CORTEX_A53_ECTLR_SMP_BIT
+ stcopr16 r0, r1, CORTEX_A53_ECTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a53_disable_smp
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #819472.
+ * This applies only to revision <= r0p1 of Cortex A53.
+ * ---------------------------------------------------
+ */
+func check_errata_819472
+ /*
+ * Even though this is only needed for revision <= r0p1, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_819472
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #824069.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * ---------------------------------------------------
+ */
+func check_errata_824069
+ /*
+ * Even though this is only needed for revision <= r0p2, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_824069
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #826319.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * --------------------------------------------------
+ */
+func errata_a53_826319_wa
+ /*
+ * Compare r0 against revision r0p2
+ */
+ mov r2, lr
+ bl check_errata_826319
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr r0, CORTEX_A53_L2ACTLR
+ bic r0, #CORTEX_A53_L2ACTLR_ENABLE_UNIQUECLEAN
+ orr r0, #CORTEX_A53_L2ACTLR_DISABLE_CLEAN_PUSH
+ stcopr r0, CORTEX_A53_L2ACTLR
+1:
+ bx lr
+endfunc errata_a53_826319_wa
+
+func check_errata_826319
+ mov r1, #0x02
+ b cpu_rev_var_ls
+endfunc check_errata_826319
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #827319.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * ---------------------------------------------------
+ */
+func check_errata_827319
+ /*
+ * Even though this is only needed for revision <= r0p2, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_827319
+
+ /* ---------------------------------------------------------------------
+ * Disable the cache non-temporal hint.
+ *
+ * This ignores the Transient allocation hint in the MAIR and treats
+ * allocations the same as non-transient allocation types. As a result,
+ * the LDNP and STNP instructions in AArch64 behave the same as the
+ * equivalent LDP and STP instructions.
+ *
+ * This is relevant only for revisions <= r0p3 of Cortex-A53.
+ * From r0p4 and onwards, the bit to disable the hint is enabled by
+ * default at reset.
+ *
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------------------------
+ */
+func a53_disable_non_temporal_hint
+ /*
+ * Compare r0 against revision r0p3
+ */
+ mov r2, lr
+ bl check_errata_disable_non_temporal_hint
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A53_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A53_CPUACTLR_DTAH
+ stcopr16 r0, r1, CORTEX_A53_CPUACTLR
+1:
+ bx lr
+endfunc a53_disable_non_temporal_hint
+
+func check_errata_disable_non_temporal_hint
+ mov r1, #0x03
+ b cpu_rev_var_ls
+endfunc check_errata_disable_non_temporal_hint
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #855873.
+ *
+ * This applies only to revisions >= r0p3 of Cortex A53.
+ * Earlier revisions of the core are affected as well, but don't
+ * have the chicken bit in the CPUACTLR register. It is expected that
+ * the rich OS takes care of that, especially as the workaround is
+ * shared with other erratas in those revisions of the CPU.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * --------------------------------------------------
+ */
+func errata_a53_855873_wa
+ /*
+ * Compare r0 against revision r0p3 and higher
+ */
+ mov r2, lr
+ bl check_errata_855873
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A53_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A53_CPUACTLR_ENDCCASCI
+ stcopr16 r0, r1, CORTEX_A53_CPUACTLR
+1:
+ bx lr
+endfunc errata_a53_855873_wa
+
+func check_errata_855873
+ mov r1, #0x03
+ b cpu_rev_var_hs
+endfunc check_errata_855873
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A53.
+ * Shall clobber: r0-r6
+ * -------------------------------------------------
+ */
+func cortex_a53_reset_func
+ mov r5, lr
+ bl cpu_get_rev_var
+ mov r4, r0
+
+#if ERRATA_A53_826319
+ mov r0, r4
+ bl errata_a53_826319_wa
+#endif
+
+#if ERRATA_A53_836870
+ mov r0, r4
+ bl a53_disable_non_temporal_hint
+#endif
+
+#if ERRATA_A53_855873
+ mov r0, r4
+ bl errata_a53_855873_wa
+#endif
+
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ ldcopr16 r0, r1, CORTEX_A53_ECTLR
+ orr64_imm r0, r1, CORTEX_A53_ECTLR_SMP_BIT
+ stcopr16 r0, r1, CORTEX_A53_ECTLR
+ isb
+ bx r5
+endfunc cortex_a53_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A53.
+ * ----------------------------------------------------
+ */
+func cortex_a53_core_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a53_disable_smp
+endfunc cortex_a53_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A53.
+ * Clobbers: r0-r3
+ * -------------------------------------------------------
+ */
+func cortex_a53_cluster_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a53_disable_smp
+endfunc cortex_a53_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A53. Must follow AAPCS.
+ */
+func cortex_a53_errata_report
+ push {r12, lr}
+
+ bl cpu_get_rev_var
+ mov r4, r0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A53_819472, cortex_a53, 819472
+ report_errata ERRATA_A53_824069, cortex_a53, 824069
+ report_errata ERRATA_A53_826319, cortex_a53, 826319
+ report_errata ERRATA_A53_827319, cortex_a53, 827319
+ report_errata ERRATA_A53_836870, cortex_a53, disable_non_temporal_hint
+ report_errata ERRATA_A53_855873, cortex_a53, 855873
+
+ pop {r12, lr}
+ bx lr
+endfunc cortex_a53_errata_report
+#endif
+
+declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
+ cortex_a53_reset_func, \
+ cortex_a53_core_pwr_dwn, \
+ cortex_a53_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
new file mode 100644
index 0000000..18ee1f9
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -0,0 +1,618 @@
+/*
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <common/debug.h>
+#include <cortex_a57.h>
+#include <cpu_macros.S>
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * Clobbers: r0-r1
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_smp
+ ldcopr16 r0, r1, CORTEX_A57_ECTLR
+ bic64_imm r0, r1, CORTEX_A57_ECTLR_SMP_BIT
+ stcopr16 r0, r1, CORTEX_A57_ECTLR
+ bx lr
+endfunc cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Disable all types of L2 prefetches.
+ * Clobbers: r0-r2
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_l2_prefetch
+ ldcopr16 r0, r1, CORTEX_A57_ECTLR
+ orr64_imm r0, r1, CORTEX_A57_ECTLR_DIS_TWD_ACC_PFTCH_BIT
+ bic64_imm r0, r1, (CORTEX_A57_ECTLR_L2_IPFTCH_DIST_MASK | \
+ CORTEX_A57_ECTLR_L2_DPFTCH_DIST_MASK)
+ stcopr16 r0, r1, CORTEX_A57_ECTLR
+ isb
+ dsb ish
+ bx lr
+endfunc cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_ext_debug
+ mov r0, #1
+ stcopr r0, DBGOSDLR
+ isb
+#if ERRATA_A57_817169
+ /*
+ * Invalidate any TLB address
+ */
+ mov r0, #0
+ stcopr r0, TLBIMVA
+#endif
+ dsb sy
+ bx lr
+endfunc cortex_a57_disable_ext_debug
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #806969.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * --------------------------------------------------
+ */
+func errata_a57_806969_wa
+ /*
+ * Compare r0 against revision r0p0
+ */
+ mov r2, lr
+ bl check_errata_806969
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_806969_wa
+
+func check_errata_806969
+ mov r1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_806969
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #813419.
+ * This applies only to revision r0p0 of Cortex A57.
+ * ---------------------------------------------------
+ */
+func check_errata_813419
+ /*
+ * Even though this is only needed for revision r0p0, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_813419
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #813420.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_813420_wa
+ /*
+ * Compare r0 against revision r0p0
+ */
+ mov r2, lr
+ bl check_errata_813420
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DCC_AS_DCCI
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_813420_wa
+
+func check_errata_813420
+ mov r1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_813420
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #814670.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_814670_wa
+ /*
+ * Compare r0 against revision r0p0
+ */
+ mov r2, lr
+ bl check_errata_814670
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_DMB_NULLIFICATION
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ isb
+1:
+ bx r2
+endfunc errata_a57_814670_wa
+
+func check_errata_814670
+ mov r1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_814670
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #817169.
+ * This applies only to revision <= r0p1 of Cortex A57.
+ * ----------------------------------------------------
+ */
+func check_errata_817169
+ /*
+ * Even though this is only needed for revision <= r0p1, it
+ * is always applied because of the low cost of the workaround.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_817169
+
+ /* --------------------------------------------------------------------
+ * Disable the over-read from the LDNP instruction.
+ *
+ * This applies to all revisions <= r1p2. The performance degradation
+ * observed with LDNP/STNP has been fixed on r1p3 and onwards.
+ *
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------------------------
+ */
+func a57_disable_ldnp_overread
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_disable_ldnp_overread
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_OVERREAD
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc a57_disable_ldnp_overread
+
+func check_errata_disable_ldnp_overread
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_disable_ldnp_overread
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #826974.
+ * This applies only to revision <= r1p1 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_826974_wa
+ /*
+ * Compare r0 against revision r1p1
+ */
+ mov r2, lr
+ bl check_errata_826974
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_DMB
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_826974_wa
+
+func check_errata_826974
+ mov r1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_826974
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #826977.
+ * This applies only to revision <= r1p1 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_826977_wa
+ /*
+ * Compare r0 against revision r1p1
+ */
+ mov r2, lr
+ bl check_errata_826977
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_GRE_NGRE_AS_NGNRE
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_826977_wa
+
+func check_errata_826977
+ mov r1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_826977
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #828024.
+ * This applies only to revision <= r1p1 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_828024_wa
+ /*
+ * Compare r0 against revision r1p1
+ */
+ mov r2, lr
+ bl check_errata_828024
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ /*
+ * Setting the relevant bits in CORTEX_A57_CPUACTLR has to be done in 2
+ * instructions here because the resulting bitmask doesn't fit in a
+ * 16-bit value so it cannot be encoded in a single instruction.
+ */
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA
+ orr64_imm r0, r1, (CORTEX_A57_CPUACTLR_DIS_L1_STREAMING | CORTEX_A57_CPUACTLR_DIS_STREAMING)
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_828024_wa
+
+func check_errata_828024
+ mov r1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_828024
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #829520.
+ * This applies only to revision <= r1p2 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_829520_wa
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_829520
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_INDIRECT_PREDICTOR
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_829520_wa
+
+func check_errata_829520
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_829520
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #833471.
+ * This applies only to revision <= r1p2 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_833471_wa
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_833471
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r1, r1, CORTEX_A57_CPUACTLR_FORCE_FPSCR_FLUSH
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_833471_wa
+
+func check_errata_833471
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_833471
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #859972.
+ * This applies only to revision <= r1p3 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_859972_wa
+ mov r2, lr
+ bl check_errata_859972
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r1, r1, CORTEX_A57_CPUACTLR_DIS_INSTR_PREFETCH
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_859972_wa
+
+func check_errata_859972
+ mov r1, #0x13
+ b cpu_rev_var_ls
+endfunc check_errata_859972
+
+func check_errata_cve_2017_5715
+ mov r0, #ERRATA_MISSING
+ bx lr
+endfunc check_errata_cve_2017_5715
+
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2018_3639
+
+func check_errata_cve_2022_23960
+ mov r0, #ERRATA_MISSING
+ bx lr
+endfunc check_errata_cve_2022_23960
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A57.
+ * Shall clobber: r0-r6
+ * -------------------------------------------------
+ */
+func cortex_a57_reset_func
+ mov r5, lr
+ bl cpu_get_rev_var
+ mov r4, r0
+
+#if ERRATA_A57_806969
+ mov r0, r4
+ bl errata_a57_806969_wa
+#endif
+
+#if ERRATA_A57_813420
+ mov r0, r4
+ bl errata_a57_813420_wa
+#endif
+
+#if ERRATA_A57_814670
+ mov r0, r4
+ bl errata_a57_814670_wa
+#endif
+
+#if A57_DISABLE_NON_TEMPORAL_HINT
+ mov r0, r4
+ bl a57_disable_ldnp_overread
+#endif
+
+#if ERRATA_A57_826974
+ mov r0, r4
+ bl errata_a57_826974_wa
+#endif
+
+#if ERRATA_A57_826977
+ mov r0, r4
+ bl errata_a57_826977_wa
+#endif
+
+#if ERRATA_A57_828024
+ mov r0, r4
+ bl errata_a57_828024_wa
+#endif
+
+#if ERRATA_A57_829520
+ mov r0, r4
+ bl errata_a57_829520_wa
+#endif
+
+#if ERRATA_A57_833471
+ mov r0, r4
+ bl errata_a57_833471_wa
+#endif
+
+#if ERRATA_A57_859972
+ mov r0, r4
+ bl errata_a57_859972_wa
+#endif
+
+#if WORKAROUND_CVE_2018_3639
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_STORE
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ isb
+ dsb sy
+#endif
+
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ ldcopr16 r0, r1, CORTEX_A57_ECTLR
+ orr64_imm r0, r1, CORTEX_A57_ECTLR_SMP_BIT
+ stcopr16 r0, r1, CORTEX_A57_ECTLR
+ isb
+ bx r5
+endfunc cortex_a57_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A57.
+ * ----------------------------------------------------
+ */
+func cortex_a57_core_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a57_disable_ext_debug
+endfunc cortex_a57_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A57.
+ * Clobbers: r0-r3
+ * -------------------------------------------------------
+ */
+func cortex_a57_cluster_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a57_disable_ext_debug
+endfunc cortex_a57_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A57. Must follow AAPCS.
+ */
+func cortex_a57_errata_report
+ push {r12, lr}
+
+ bl cpu_get_rev_var
+ mov r4, r0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A57_806969, cortex_a57, 806969
+ report_errata ERRATA_A57_813419, cortex_a57, 813419
+ report_errata ERRATA_A57_813420, cortex_a57, 813420
+ report_errata ERRATA_A57_814670, cortex_a57, 814670
+ report_errata ERRATA_A57_817169, cortex_a57, 817169
+ report_errata A57_DISABLE_NON_TEMPORAL_HINT, cortex_a57, \
+ disable_ldnp_overread
+ report_errata ERRATA_A57_826974, cortex_a57, 826974
+ report_errata ERRATA_A57_826977, cortex_a57, 826977
+ report_errata ERRATA_A57_828024, cortex_a57, 828024
+ report_errata ERRATA_A57_829520, cortex_a57, 829520
+ report_errata ERRATA_A57_833471, cortex_a57, 833471
+ report_errata ERRATA_A57_859972, cortex_a57, 859972
+ report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a57, cve_2022_23960
+
+ pop {r12, lr}
+ bx lr
+endfunc cortex_a57_errata_report
+#endif
+
+declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
+ cortex_a57_reset_func, \
+ cortex_a57_core_pwr_dwn, \
+ cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a7.S b/lib/cpus/aarch32/cortex_a7.S
new file mode 100644
index 0000000..4d4bb77
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a7.S
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a7.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a7_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A7_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a7_disable_smp
+
+func cortex_a7_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A7_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a7_enable_smp
+
+func cortex_a7_reset_func
+ b cortex_a7_enable_smp
+endfunc cortex_a7_reset_func
+
+func cortex_a7_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a7_disable_smp
+endfunc cortex_a7_core_pwr_dwn
+
+func cortex_a7_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a7_disable_smp
+endfunc cortex_a7_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-A7. Must follow AAPCS.
+ */
+func cortex_a7_errata_report
+ bx lr
+endfunc cortex_a7_errata_report
+#endif
+
+declare_cpu_ops cortex_a7, CORTEX_A7_MIDR, \
+ cortex_a7_reset_func, \
+ cortex_a7_core_pwr_dwn, \
+ cortex_a7_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
new file mode 100644
index 0000000..03914b2
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <common/debug.h>
+#include <cortex_a72.h>
+#include <cpu_macros.S>
+
+ /* ---------------------------------------------
+ * Disable all types of L2 prefetches.
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_l2_prefetch
+ ldcopr16 r0, r1, CORTEX_A72_ECTLR
+ orr64_imm r0, r1, CORTEX_A72_ECTLR_DIS_TWD_ACC_PFTCH_BIT
+ bic64_imm r0, r1, (CORTEX_A72_ECTLR_L2_IPFTCH_DIST_MASK | \
+ CORTEX_A72_ECTLR_L2_DPFTCH_DIST_MASK)
+ stcopr16 r0, r1, CORTEX_A72_ECTLR
+ isb
+ bx lr
+endfunc cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_hw_prefetcher
+ ldcopr16 r0, r1, CORTEX_A72_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A72_CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH
+ stcopr16 r0, r1, CORTEX_A72_CPUACTLR
+ isb
+ dsb ish
+ bx lr
+endfunc cortex_a72_disable_hw_prefetcher
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * Clobbers: r0-r1
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_smp
+ ldcopr16 r0, r1, CORTEX_A72_ECTLR
+ bic64_imm r0, r1, CORTEX_A72_ECTLR_SMP_BIT
+ stcopr16 r0, r1, CORTEX_A72_ECTLR
+ bx lr
+endfunc cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_ext_debug
+ mov r0, #1
+ stcopr r0, DBGOSDLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a72_disable_ext_debug
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A72 Errata #859971.
+ * This applies only to revision <= r0p3 of Cortex A72.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a72_859971_wa
+ mov r2,lr
+ bl check_errata_859971
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A72_CPUACTLR
+ orr64_imm r1, r1, CORTEX_A72_CPUACTLR_DIS_INSTR_PREFETCH
+ stcopr16 r0, r1, CORTEX_A72_CPUACTLR
+1:
+ bx lr
+endfunc errata_a72_859971_wa
+
+func check_errata_859971
+ mov r1, #0x03
+ b cpu_rev_var_ls
+endfunc check_errata_859971
+
+func check_errata_cve_2017_5715
+ mov r0, #ERRATA_MISSING
+ bx lr
+endfunc check_errata_cve_2017_5715
+
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2018_3639
+
+func check_errata_cve_2022_23960
+ mov r0, #ERRATA_MISSING
+ bx lr
+endfunc check_errata_cve_2022_23960
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A72.
+ * -------------------------------------------------
+ */
+func cortex_a72_reset_func
+ mov r5, lr
+ bl cpu_get_rev_var
+ mov r4, r0
+
+#if ERRATA_A72_859971
+ mov r0, r4
+ bl errata_a72_859971_wa
+#endif
+
+#if WORKAROUND_CVE_2018_3639
+ ldcopr16 r0, r1, CORTEX_A72_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A72_CPUACTLR_DIS_LOAD_PASS_STORE
+ stcopr16 r0, r1, CORTEX_A72_CPUACTLR
+ isb
+ dsb sy
+#endif
+
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ ldcopr16 r0, r1, CORTEX_A72_ECTLR
+ orr64_imm r0, r1, CORTEX_A72_ECTLR_SMP_BIT
+ stcopr16 r0, r1, CORTEX_A72_ECTLR
+ isb
+ bx r5
+endfunc cortex_a72_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A72.
+ * ----------------------------------------------------
+ */
+func cortex_a72_core_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_hw_prefetcher
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a72_disable_ext_debug
+endfunc cortex_a72_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A72.
+ * -------------------------------------------------------
+ */
+func cortex_a72_cluster_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_hw_prefetcher
+
+#if !SKIP_A72_L1_FLUSH_PWR_DWN
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+#endif
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* -------------------------------------------------
+ * Flush the L2 caches.
+ * -------------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a72_disable_ext_debug
+endfunc cortex_a72_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A72. Must follow AAPCS.
+ */
+func cortex_a72_errata_report
+ push {r12, lr}
+
+ bl cpu_get_rev_var
+ mov r4, r0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A72_859971, cortex_a72, 859971
+ report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a72, cve_2022_23960
+
+ pop {r12, lr}
+ bx lr
+endfunc cortex_a72_errata_report
+#endif
+
+declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
+ cortex_a72_reset_func, \
+ cortex_a72_core_pwr_dwn, \
+ cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a9.S b/lib/cpus/aarch32/cortex_a9.S
new file mode 100644
index 0000000..7200343
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a9.S
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a9.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a9_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A9_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a9_disable_smp
+
+func cortex_a9_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A9_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a9_enable_smp
+
+func check_errata_a9_794073
+#if ERRATA_A9_794073
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2017_5715
+
+func check_errata_cve_2017_5715
+#if WORKAROUND_CVE_2017_5715
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2017_5715
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A9. Must follow AAPCS.
+ */
+func cortex_a9_errata_report
+ push {r12, lr}
+
+ bl cpu_get_rev_var
+ mov r4, r0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata WORKAROUND_CVE_2017_5715, cortex_a9, cve_2017_5715
+ report_errata ERRATA_A9_794073, cortex_a9, a9_79407
+
+ pop {r12, lr}
+ bx lr
+endfunc cortex_a9_errata_report
+#endif
+
+func cortex_a9_reset_func
+#if IMAGE_BL32 && WORKAROUND_CVE_2017_5715
+ ldr r0, =workaround_bpiall_runtime_exceptions
+ stcopr r0, VBAR
+ stcopr r0, MVBAR
+ /* isb will be applied in the course of the reset func */
+#endif
+ b cortex_a9_enable_smp
+endfunc cortex_a9_reset_func
+
+func cortex_a9_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a9_disable_smp
+endfunc cortex_a9_core_pwr_dwn
+
+func cortex_a9_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a9_disable_smp
+endfunc cortex_a9_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a9, CORTEX_A9_MIDR, \
+ cortex_a9_reset_func, \
+ cortex_a9_core_pwr_dwn, \
+ cortex_a9_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
new file mode 100644
index 0000000..6ed800c
--- /dev/null
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cpu_macros.S>
+#include <common/bl_common.h>
+#include <lib/el3_runtime/cpu_data.h>
+
+#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
+ /*
+ * The reset handler common to all platforms. After a matching
+ * cpu_ops structure entry is found, the correponding reset_handler
+ * in the cpu_ops is invoked. The reset handler is invoked very early
+ * in the boot sequence and it is assumed that we can clobber r0 - r10
+ * without the need to follow AAPCS.
+ * Clobbers: r0 - r10
+ */
+ .globl reset_handler
+func reset_handler
+ mov r8, lr
+
+ /* The plat_reset_handler can clobber r0 - r7 */
+ bl plat_reset_handler
+
+ /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
+ bl get_cpu_ops_ptr
+
+#if ENABLE_ASSERTIONS
+ cmp r0, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the cpu_ops reset handler */
+ ldr r1, [r0, #CPU_RESET_FUNC]
+ cmp r1, #0
+ mov lr, r8
+ bxne r1
+ bx lr
+endfunc reset_handler
+
+#endif
+
+#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
+ /*
+ * void prepare_cpu_pwr_dwn(unsigned int power_level)
+ *
+ * Prepare CPU power down function for all platforms. The function takes
+ * a domain level to be powered down as its parameter. After the cpu_ops
+ * pointer is retrieved from cpu_data, the handler for requested power
+ * level is called.
+ */
+ .globl prepare_cpu_pwr_dwn
+func prepare_cpu_pwr_dwn
+ /*
+ * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
+ * power down handler for the last power level
+ */
+ mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
+ cmp r0, r2
+ movhi r0, r2
+
+ push {r0, lr}
+ bl _cpu_data
+ pop {r2, lr}
+
+ ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp r0, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the appropriate power down handler */
+ mov r1, #CPU_PWR_DWN_OPS
+ add r1, r1, r2, lsl #2
+ ldr r1, [r0, r1]
+#if ENABLE_ASSERTIONS
+ cmp r1, #0
+ ASM_ASSERT(ne)
+#endif
+ bx r1
+endfunc prepare_cpu_pwr_dwn
+
+ /*
+ * Initializes the cpu_ops_ptr if not already initialized
+ * in cpu_data. This must only be called after the data cache
+ * is enabled. AAPCS is followed.
+ */
+ .globl init_cpu_ops
+func init_cpu_ops
+ push {r4 - r6, lr}
+ bl _cpu_data
+ mov r6, r0
+ ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+ cmp r1, #0
+ bne 1f
+ bl get_cpu_ops_ptr
+#if ENABLE_ASSERTIONS
+ cmp r0, #0
+ ASM_ASSERT(ne)
+#endif
+ str r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
+1:
+ pop {r4 - r6, pc}
+endfunc init_cpu_ops
+
+#endif /* IMAGE_BL32 */
+
+ /*
+ * The below function returns the cpu_ops structure matching the
+ * midr of the core. It reads the MIDR and finds the matching
+ * entry in cpu_ops entries. Only the implementation and part number
+ * are used to match the entries.
+ * Return :
+ * r0 - The matching cpu_ops pointer on Success
+ * r0 - 0 on failure.
+ * Clobbers: r0 - r5
+ */
+ .globl get_cpu_ops_ptr
+func get_cpu_ops_ptr
+ /* Get the cpu_ops start and end locations */
+ ldr r4, =(__CPU_OPS_START__ + CPU_MIDR)
+ ldr r5, =(__CPU_OPS_END__ + CPU_MIDR)
+
+ /* Initialize the return parameter */
+ mov r0, #0
+
+ /* Read the MIDR_EL1 */
+ ldcopr r2, MIDR
+ ldr r3, =CPU_IMPL_PN_MASK
+
+ /* Retain only the implementation and part number using mask */
+ and r2, r2, r3
+1:
+ /* Check if we have reached end of list */
+ cmp r4, r5
+ bhs error_exit
+
+ /* load the midr from the cpu_ops */
+ ldr r1, [r4], #CPU_OPS_SIZE
+ and r1, r1, r3
+
+ /* Check if midr matches to midr of this core */
+ cmp r1, r2
+ bne 1b
+
+ /* Subtract the increment and offset to get the cpu-ops pointer */
+ sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
+#if ENABLE_ASSERTIONS
+ cmp r0, #0
+ ASM_ASSERT(ne)
+#endif
+error_exit:
+ bx lr
+endfunc get_cpu_ops_ptr
+
+/*
+ * Extract CPU revision and variant, and combine them into a single numeric for
+ * easier comparison.
+ */
+ .globl cpu_get_rev_var
+func cpu_get_rev_var
+ ldcopr r1, MIDR
+
+ /*
+ * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
+ * r0[0:7] as variant[7:4] and revision[3:0]:
+ *
+ * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
+ * extract r1[3:0] into r0[3:0] retaining other bits.
+ */
+ ubfx r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
+ bfi r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
+ bx lr
+endfunc cpu_get_rev_var
+
+/*
+ * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
+ * application purposes. If the revision-variant is less than or same as a given
+ * value, indicates that errata applies; otherwise not.
+ */
+ .globl cpu_rev_var_ls
+func cpu_rev_var_ls
+ cmp r0, r1
+ movls r0, #ERRATA_APPLIES
+ movhi r0, #ERRATA_NOT_APPLIES
+ bx lr
+endfunc cpu_rev_var_ls
+
+/*
+ * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
+ * application purposes. If the revision-variant is higher than or same as a
+ * given value, indicates that errata applies; otherwise not.
+ */
+ .globl cpu_rev_var_hs
+func cpu_rev_var_hs
+ cmp r0, r1
+ movge r0, #ERRATA_APPLIES
+ movlt r0, #ERRATA_NOT_APPLIES
+ bx lr
+endfunc cpu_rev_var_hs
+
+#if REPORT_ERRATA
+/*
+ * void print_errata_status(void);
+ *
+ * Function to print errata status for CPUs of its class. Must be called only:
+ *
+ * - with MMU and data caches are enabled;
+ * - after cpu_ops have been initialized in per-CPU data.
+ */
+ .globl print_errata_status
+func print_errata_status
+ /* r12 is pushed only for the sake of 8-byte stack alignment */
+ push {r4, r5, r12, lr}
+#ifdef IMAGE_BL1
+ /*
+ * BL1 doesn't have per-CPU data. So retrieve the CPU operations
+ * directly.
+ */
+ bl get_cpu_ops_ptr
+ ldr r0, [r0, #CPU_ERRATA_FUNC]
+ cmp r0, #0
+ blxne r0
+#else
+ /*
+ * Retrieve pointer to cpu_ops, and further, the errata printing
+ * function. If it's non-NULL, jump to the function in turn.
+ */
+ bl _cpu_data
+#if ENABLE_ASSERTIONS
+ cmp r0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp r1, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr r0, [r1, #CPU_ERRATA_FUNC]
+ cmp r0, #0
+ beq 1f
+
+ mov r4, r0
+
+ /*
+ * Load pointers to errata lock and printed flag. Call
+ * errata_needs_reporting to check whether this CPU needs to report
+ * errata status pertaining to its class.
+ */
+ ldr r0, [r1, #CPU_ERRATA_LOCK]
+ ldr r1, [r1, #CPU_ERRATA_PRINTED]
+ bl errata_needs_reporting
+ cmp r0, #0
+ blxne r4
+1:
+#endif
+ pop {r4, r5, r12, pc}
+endfunc print_errata_status
+#endif
diff --git a/lib/cpus/aarch64/a64fx.S b/lib/cpus/aarch64/a64fx.S
new file mode 100644
index 0000000..54c20c3
--- /dev/null
+++ b/lib/cpus/aarch64/a64fx.S
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2022, Fujitsu Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <a64fx.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+func a64fx_core_pwr_dwn
+endfunc a64fx_core_pwr_dwn
+
+func a64fx_cluster_pwr_dwn
+endfunc a64fx_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for A64FX. Must follow AAPCS.
+ */
+func a64fx_errata_report
+ ret
+endfunc a64fx_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cpu specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.a64fx_regs, "aS"
+a64fx_regs: /* The ascii list of register names to be reported */
+ .asciz ""
+
+func a64fx_cpu_reg_dump
+ adr x6, a64fx_regs
+ ret
+endfunc a64fx_cpu_reg_dump
+
+declare_cpu_ops a64fx, A64FX_MIDR, CPU_NO_RESET_FUNC \
+ a64fx_core_pwr_dwn, \
+ a64fx_cluster_pwr_dwn
+
diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S
new file mode 100644
index 0000000..6291e43
--- /dev/null
+++ b/lib/cpus/aarch64/aem_generic.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <aem_generic.h>
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+
+func aem_generic_core_pwr_dwn
+ /* ---------------------------------------------
+ * Disable the Data Cache.
+ * ---------------------------------------------
+ */
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+
+ /* ---------------------------------------------
+ * AEM model supports L3 caches in which case L2
+ * will be private per core caches and flush
+ * from L1 to L2 is not sufficient.
+ * ---------------------------------------------
+ */
+ mrs x1, clidr_el1
+
+ /* ---------------------------------------------
+ * Check if L3 cache is implemented.
+ * ---------------------------------------------
+ */
+ tst x1, ((1 << CLIDR_FIELD_WIDTH) - 1) << CTYPE_SHIFT(3)
+
+ /* ---------------------------------------------
+ * There is no L3 cache, flush L1 to L2 only.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ b.eq dcsw_op_level1
+
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Flush L1 cache to L2.
+ * ---------------------------------------------
+ */
+ bl dcsw_op_level1
+ mov x30, x18
+
+ /* ---------------------------------------------
+ * Flush L2 cache to L3.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ b dcsw_op_level2
+endfunc aem_generic_core_pwr_dwn
+
+func aem_generic_cluster_pwr_dwn
+ /* ---------------------------------------------
+ * Disable the Data Cache.
+ * ---------------------------------------------
+ */
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+
+ /* ---------------------------------------------
+ * Flush all caches to PoC.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ b dcsw_op_all
+endfunc aem_generic_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for AEM. Must follow AAPCS.
+ */
+func aem_generic_errata_report
+ ret
+endfunc aem_generic_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cpu specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.aem_generic_regs, "aS"
+aem_generic_regs: /* The ascii list of register names to be reported */
+ .asciz "" /* no registers to report */
+
+func aem_generic_cpu_reg_dump
+ adr x6, aem_generic_regs
+ ret
+endfunc aem_generic_cpu_reg_dump
+
+
+/* cpu_ops for Base AEM FVP */
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+ aem_generic_core_pwr_dwn, \
+ aem_generic_cluster_pwr_dwn
+
+/* cpu_ops for Foundation FVP */
+declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, CPU_NO_RESET_FUNC, \
+ aem_generic_core_pwr_dwn, \
+ aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a35.S b/lib/cpus/aarch64/cortex_a35.S
new file mode 100644
index 0000000..be3c652
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a35.S
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a35.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func cortex_a35_disable_dcache
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+ ret
+endfunc cortex_a35_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a35_disable_smp
+ mrs x0, CORTEX_A35_CPUECTLR_EL1
+ bic x0, x0, #CORTEX_A35_CPUECTLR_SMPEN_BIT
+ msr CORTEX_A35_CPUECTLR_EL1, x0
+ isb
+ dsb sy
+ ret
+endfunc cortex_a35_disable_smp
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A35 Errata #855472.
+ * This applies to revisions r0p0 of Cortex A35.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a35_855472_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_855472
+ cbz x0, 1f
+ mrs x1, CORTEX_A35_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A35_CPUACTLR_EL1_ENDCCASCI
+ msr CORTEX_A35_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a35_855472_wa
+
+func check_errata_855472
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_855472
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A35.
+ * Clobbers: x0
+ * -------------------------------------------------
+ */
+func cortex_a35_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+
+#if ERRATA_A35_855472
+ bl errata_a35_855472_wa
+#endif
+
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A35_CPUECTLR_EL1
+ orr x0, x0, #CORTEX_A35_CPUECTLR_SMPEN_BIT
+ msr CORTEX_A35_CPUECTLR_EL1, x0
+ isb
+ ret x19
+endfunc cortex_a35_reset_func
+
+func cortex_a35_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a35_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a35_disable_smp
+endfunc cortex_a35_core_pwr_dwn
+
+func cortex_a35_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a35_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a35_disable_smp
+endfunc cortex_a35_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A35. Must follow AAPCS.
+ */
+func cortex_a35_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A35_855472, cortex_a35, 855472
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a35_errata_report
+#endif
+
+
+ /* ---------------------------------------------
+ * This function provides cortex_a35 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a35_regs, "aS"
+cortex_a35_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a35_cpu_reg_dump
+ adr x6, cortex_a35_regs
+ mrs x8, CORTEX_A35_CPUECTLR_EL1
+ ret
+endfunc cortex_a35_cpu_reg_dump
+
+declare_cpu_ops cortex_a35, CORTEX_A35_MIDR, \
+ cortex_a35_reset_func, \
+ cortex_a35_core_pwr_dwn, \
+ cortex_a35_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a510.S b/lib/cpus/aarch64/cortex_a510.S
new file mode 100644
index 0000000..f7f8027
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a510.S
@@ -0,0 +1,500 @@
+/*
+ * Copyright (c) 2022, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a510.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A510 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-A510 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex-A510 Errata #1922240.
+ * This applies only to revision r0p0 (fixed in r0p1)
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * --------------------------------------------------
+ */
+func errata_cortex_a510_1922240_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_1922240
+ cbz x0, 1f
+
+ /* Apply the workaround by setting IMP_CMPXACTLR_EL1[11:10] = 0b11. */
+ mrs x0, CORTEX_A510_CMPXACTLR_EL1
+ mov x1, #3
+ bfi x0, x1, #10, #2
+ msr CORTEX_A510_CMPXACTLR_EL1, x0
+
+1:
+ ret x17
+endfunc errata_cortex_a510_1922240_wa
+
+func check_errata_1922240
+ /* Applies to r0p0 only */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_1922240
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex-A510 Errata #2288014.
+ * This applies only to revisions r0p0, r0p1, r0p2,
+ * r0p3 and r1p0. (fixed in r1p1)
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * --------------------------------------------------
+ */
+func errata_cortex_a510_2288014_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2288014
+ cbz x0, 1f
+
+ /* Apply the workaround by setting IMP_CPUACTLR_EL1[18] = 0b1. */
+ mrs x0, CORTEX_A510_CPUACTLR_EL1
+ mov x1, #1
+ bfi x0, x1, #18, #1
+ msr CORTEX_A510_CPUACTLR_EL1, x0
+
+1:
+ ret x17
+endfunc errata_cortex_a510_2288014_wa
+
+func check_errata_2288014
+ /* Applies to r1p0 and below */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_2288014
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex-A510 Errata #2042739.
+ * This applies only to revisions r0p0, r0p1 and r0p2.
+ * (fixed in r0p3)
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * --------------------------------------------------
+ */
+func errata_cortex_a510_2042739_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2042739
+ cbz x0, 1f
+
+ /* Apply the workaround by disabling ReadPreferUnique. */
+ mrs x0, CORTEX_A510_CPUECTLR_EL1
+ mov x1, #CORTEX_A510_CPUECTLR_EL1_READPREFERUNIQUE_DISABLE
+ bfi x0, x1, #CORTEX_A510_CPUECTLR_EL1_READPREFERUNIQUE_SHIFT, #1
+ msr CORTEX_A510_CPUECTLR_EL1, x0
+
+1:
+ ret x17
+endfunc errata_cortex_a510_2042739_wa
+
+func check_errata_2042739
+ /* Applies to revisions r0p0 - r0p2 */
+ mov x1, #0x02
+ b cpu_rev_var_ls
+endfunc check_errata_2042739
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex-A510 Errata #2041909.
+ * This applies only to revision r0p2 and it is fixed in
+ * r0p3. The issue is also present in r0p0 and r0p1 but
+ * there is no workaround in those revisions.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x2, x17
+ * --------------------------------------------------
+ */
+func errata_cortex_a510_2041909_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2041909
+ cbz x0, 1f
+
+ /* Apply workaround */
+ mov x0, xzr
+ msr S3_6_C15_C4_0, x0
+ isb
+
+ mov x0, #0x8500000
+ msr S3_6_C15_C4_2, x0
+
+ mov x0, #0x1F700000
+ movk x0, #0x8, lsl #32
+ msr S3_6_C15_C4_3, x0
+
+ mov x0, #0x3F1
+ movk x0, #0x110, lsl #16
+ msr S3_6_C15_C4_1, x0
+ isb
+
+1:
+ ret x17
+endfunc errata_cortex_a510_2041909_wa
+
+func check_errata_2041909
+ /* Applies only to revision r0p2 */
+ mov x1, #0x02
+ mov x2, #0x02
+ b cpu_rev_var_range
+endfunc check_errata_2041909
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex-A510 Errata #2250311.
+ * This applies only to revisions r0p0, r0p1, r0p2,
+ * r0p3 and r1p0, and is fixed in r1p1.
+ * This workaround is not a typical errata fix. MPMM
+ * is disabled here, but this conflicts with the BL31
+ * MPMM support. So in addition to simply disabling
+ * the feature, a flag is set in the MPMM library
+ * indicating that it should not be enabled even if
+ * ENABLE_MPMM=1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * --------------------------------------------------
+ */
+func errata_cortex_a510_2250311_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2250311
+ cbz x0, 1f
+
+ /* Disable MPMM */
+ mrs x0, CPUMPMMCR_EL3
+ bfm x0, xzr, #0, #0 /* bfc instruction does not work in GCC */
+ msr CPUMPMMCR_EL3, x0
+
+#if ENABLE_MPMM && IMAGE_BL31
+ /* If ENABLE_MPMM is set, tell the runtime lib to skip enabling it. */
+ bl mpmm_errata_disable
+#endif
+
+1:
+ ret x17
+endfunc errata_cortex_a510_2250311_wa
+
+func check_errata_2250311
+ /* Applies to r1p0 and lower */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_2250311
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex-A510 Errata #2218950.
+ * This applies only to revisions r0p0, r0p1, r0p2,
+ * r0p3 and r1p0, and is fixed in r1p1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * --------------------------------------------------
+ */
+func errata_cortex_a510_2218950_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2218950
+ cbz x0, 1f
+
+ /* Source register for BFI */
+ mov x1, #1
+
+ /* Set bit 18 in CPUACTLR_EL1 */
+ mrs x0, CORTEX_A510_CPUACTLR_EL1
+ bfi x0, x1, #18, #1
+ msr CORTEX_A510_CPUACTLR_EL1, x0
+
+ /* Set bit 25 in CMPXACTLR_EL1 */
+ mrs x0, CORTEX_A510_CMPXACTLR_EL1
+ bfi x0, x1, #25, #1
+ msr CORTEX_A510_CMPXACTLR_EL1, x0
+
+1:
+ ret x17
+endfunc errata_cortex_a510_2218950_wa
+
+func check_errata_2218950
+ /* Applies to r1p0 and lower */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_2218950
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex-A510 Errata #2172148.
+ * This applies only to revisions r0p0, r0p1, r0p2,
+ * r0p3 and r1p0, and is fixed in r1p1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * --------------------------------------------------
+ */
+func errata_cortex_a510_2172148_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2172148
+ cbz x0, 1f
+
+ /*
+ * Force L2 allocation of transient lines by setting
+ * CPUECTLR_EL1.RSCTL=0b01 and CPUECTLR_EL1.NTCTL=0b01.
+ */
+ mrs x0, CORTEX_A510_CPUECTLR_EL1
+ mov x1, #1
+ bfi x0, x1, #CORTEX_A510_CPUECTLR_EL1_RSCTL_SHIFT, #2
+ bfi x0, x1, #CORTEX_A510_CPUECTLR_EL1_NTCTL_SHIFT, #2
+ msr CORTEX_A510_CPUECTLR_EL1, x0
+
+1:
+ ret x17
+endfunc errata_cortex_a510_2172148_wa
+
+func check_errata_2172148
+ /* Applies to r1p0 and lower */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_2172148
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex-A510 Errata #2347730.
+ * This applies to revisions r0p0 - r0p3, r1p0, r1p1.
+ * It is fixed in r1p2.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * ----------------------------------------------------
+ */
+func errata_cortex_a510_2347730_wa
+ mov x17, x30
+ bl check_errata_2347730
+ cbz x0, 1f
+
+ /*
+ * Set CPUACTLR_EL1[17] to 1'b1, which disables
+ * specific microarchitectural clock gating
+ * behaviour.
+ */
+ mrs x1, CORTEX_A510_CPUACTLR_EL1
+ orr x1, x1, CORTEX_A510_CPUACTLR_EL1_BIT_17
+ msr CORTEX_A510_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_cortex_a510_2347730_wa
+
+func check_errata_2347730
+ /* Applies to revisions r1p1 and lower. */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_2347730
+
+ /*---------------------------------------------------
+ * Errata Workaround for Cortex-A510 Errata #2371937.
+ * This applies to revisions r1p1 and lower, and is
+ * fixed in r1p2.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ *---------------------------------------------------
+ */
+func errata_cortex_a510_2371937_wa
+ mov x17, x30
+ bl check_errata_2371937
+ cbz x0, 1f
+
+ /*
+ * Cacheable atomic operations can be forced
+ * to be executed near by setting
+ * IMP_CPUECTLR_EL1.ATOM=0b010. ATOM is found
+ * in [40:38] of CPUECTLR_EL1.
+ */
+ mrs x0, CORTEX_A510_CPUECTLR_EL1
+ mov x1, CORTEX_A510_CPUECTLR_EL1_ATOM_EXECALLINSTRNEAR
+ bfi x0, x1, CORTEX_A510_CPUECTLR_EL1_ATOM, #3
+ msr CORTEX_A510_CPUECTLR_EL1, x0
+1:
+ ret x17
+endfunc errata_cortex_a510_2371937_wa
+
+func check_errata_2371937
+ /* Applies to r1p1 and lower */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_2371937
+
+ /* ------------------------------------------------------
+ * Errata Workaround for Cortex-A510 Errata #2666669
+ * This applies to revisions r1p1 and lower, and is fixed
+ * in r1p2.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * ------------------------------------------------------
+ */
+func errata_cortex_a510_2666669_wa
+ mov x17, x30
+ bl check_errata_2666669
+ cbz x0, 1f
+
+ /*
+ * Workaround will set IMP_CPUACTLR_EL1[38]
+ * to 0b1.
+ */
+ mrs x1, CORTEX_A510_CPUACTLR_EL1
+ orr x1, x1, CORTEX_A510_CPUACTLR_EL1_BIT_38
+ msr CORTEX_A510_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_cortex_a510_2666669_wa
+
+func check_errata_2666669
+ /* Applies to r1p1 and lower */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_2666669
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_a510_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_A510_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A510_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_A510_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a510_core_pwr_dwn
+
+ /*
+ * Errata printing function for Cortex-A510. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func cortex_a510_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A510_1922240, cortex_a510, 1922240
+ report_errata ERRATA_A510_2041909, cortex_a510, 2041909
+ report_errata ERRATA_A510_2042739, cortex_a510, 2042739
+ report_errata ERRATA_A510_2172148, cortex_a510, 2172148
+ report_errata ERRATA_A510_2218950, cortex_a510, 2218950
+ report_errata ERRATA_A510_2250311, cortex_a510, 2250311
+ report_errata ERRATA_A510_2288014, cortex_a510, 2288014
+ report_errata ERRATA_A510_2347730, cortex_a510, 2347730
+ report_errata ERRATA_A510_2371937, cortex_a510, 2371937
+ report_errata ERRATA_A510_2666669, cortex_a510, 2666669
+ report_errata ERRATA_DSU_2313941, cortex_a510, dsu_2313941
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a510_errata_report
+#endif
+
+func cortex_a510_reset_func
+ mov x19, x30
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+ /* Get the CPU revision and stash it in x18. */
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_DSU_2313941
+ bl errata_dsu_2313941_wa
+#endif
+
+#if ERRATA_A510_1922240
+ mov x0, x18
+ bl errata_cortex_a510_1922240_wa
+#endif
+
+#if ERRATA_A510_2288014
+ mov x0, x18
+ bl errata_cortex_a510_2288014_wa
+#endif
+
+#if ERRATA_A510_2042739
+ mov x0, x18
+ bl errata_cortex_a510_2042739_wa
+#endif
+
+#if ERRATA_A510_2041909
+ mov x0, x18
+ bl errata_cortex_a510_2041909_wa
+#endif
+
+#if ERRATA_A510_2250311
+ mov x0, x18
+ bl errata_cortex_a510_2250311_wa
+#endif
+
+#if ERRATA_A510_2218950
+ mov x0, x18
+ bl errata_cortex_a510_2218950_wa
+#endif
+
+#if ERRATA_A510_2371937
+ mov x0, x18
+ bl errata_cortex_a510_2371937_wa
+#endif
+
+#if ERRATA_A510_2172148
+ mov x0, x18
+ bl errata_cortex_a510_2172148_wa
+#endif
+
+#if ERRATA_A510_2347730
+ mov x0, x18
+ bl errata_cortex_a510_2347730_wa
+#endif
+
+#if ERRATA_A510_2666669
+ mov x0, x18
+ bl errata_cortex_a510_2666669_wa
+#endif
+
+ isb
+ ret x19
+endfunc cortex_a510_reset_func
+
+ /* ---------------------------------------------
+ * This function provides Cortex-A510 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a510_regs, "aS"
+cortex_a510_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a510_cpu_reg_dump
+ adr x6, cortex_a510_regs
+ mrs x8, CORTEX_A510_CPUECTLR_EL1
+ ret
+endfunc cortex_a510_cpu_reg_dump
+
+declare_cpu_ops cortex_a510, CORTEX_A510_MIDR, \
+ cortex_a510_reset_func, \
+ cortex_a510_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
new file mode 100644
index 0000000..df11d86
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -0,0 +1,409 @@
+/*
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cortex_a53.h>
+#include <cpu_macros.S>
+#include <lib/cpus/errata_report.h>
+#include <plat_macros.S>
+
+#if A53_DISABLE_NON_TEMPORAL_HINT
+#undef ERRATA_A53_836870
+#define ERRATA_A53_836870 1
+#endif
+
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func cortex_a53_disable_dcache
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+ ret
+endfunc cortex_a53_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a53_disable_smp
+ mrs x0, CORTEX_A53_ECTLR_EL1
+ bic x0, x0, #CORTEX_A53_ECTLR_SMP_BIT
+ msr CORTEX_A53_ECTLR_EL1, x0
+ isb
+ dsb sy
+ ret
+endfunc cortex_a53_disable_smp
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #819472.
+ * This applies only to revision <= r0p1 of Cortex A53.
+ * Due to the nature of the errata it is applied unconditionally
+ * when built in, report it as applicable in this case
+ * ---------------------------------------------------
+ */
+func check_errata_819472
+#if ERRATA_A53_819472
+ mov x0, #ERRATA_APPLIES
+ ret
+#else
+ mov x1, #0x01
+ b cpu_rev_var_ls
+#endif
+endfunc check_errata_819472
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #824069.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * Due to the nature of the errata it is applied unconditionally
+ * when built in, report it as applicable in this case
+ * ---------------------------------------------------
+ */
+func check_errata_824069
+#if ERRATA_A53_824069
+ mov x0, #ERRATA_APPLIES
+ ret
+#else
+ mov x1, #0x02
+ b cpu_rev_var_ls
+#endif
+endfunc check_errata_824069
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #826319.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a53_826319_wa
+ /*
+ * Compare x0 against revision r0p2
+ */
+ mov x17, x30
+ bl check_errata_826319
+ cbz x0, 1f
+ mrs x1, CORTEX_A53_L2ACTLR_EL1
+ bic x1, x1, #CORTEX_A53_L2ACTLR_ENABLE_UNIQUECLEAN
+ orr x1, x1, #CORTEX_A53_L2ACTLR_DISABLE_CLEAN_PUSH
+ msr CORTEX_A53_L2ACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a53_826319_wa
+
+func check_errata_826319
+ mov x1, #0x02
+ b cpu_rev_var_ls
+endfunc check_errata_826319
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #827319.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * Due to the nature of the errata it is applied unconditionally
+ * when built in, report it as applicable in this case
+ * ---------------------------------------------------
+ */
+func check_errata_827319
+#if ERRATA_A53_827319
+ mov x0, #ERRATA_APPLIES
+ ret
+#else
+ mov x1, #0x02
+ b cpu_rev_var_ls
+#endif
+endfunc check_errata_827319
+
+ /* ---------------------------------------------------------------------
+ * Disable the cache non-temporal hint.
+ *
+ * This ignores the Transient allocation hint in the MAIR and treats
+ * allocations the same as non-transient allocation types. As a result,
+ * the LDNP and STNP instructions in AArch64 behave the same as the
+ * equivalent LDP and STP instructions.
+ *
+ * This is relevant only for revisions <= r0p3 of Cortex-A53.
+ * From r0p4 and onwards, the bit to disable the hint is enabled by
+ * default at reset.
+ *
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------------------------
+ */
+func a53_disable_non_temporal_hint
+ /*
+ * Compare x0 against revision r0p3
+ */
+ mov x17, x30
+ bl check_errata_disable_non_temporal_hint
+ cbz x0, 1f
+ mrs x1, CORTEX_A53_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A53_CPUACTLR_EL1_DTAH
+ msr CORTEX_A53_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc a53_disable_non_temporal_hint
+
+func check_errata_disable_non_temporal_hint
+ mov x1, #0x03
+ b cpu_rev_var_ls
+endfunc check_errata_disable_non_temporal_hint
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #855873.
+ *
+ * This applies only to revisions >= r0p3 of Cortex A53.
+ * Earlier revisions of the core are affected as well, but don't
+ * have the chicken bit in the CPUACTLR register. It is expected that
+ * the rich OS takes care of that, especially as the workaround is
+ * shared with other erratas in those revisions of the CPU.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a53_855873_wa
+ /*
+ * Compare x0 against revision r0p3 and higher
+ */
+ mov x17, x30
+ bl check_errata_855873
+ cbz x0, 1f
+
+ mrs x1, CORTEX_A53_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A53_CPUACTLR_EL1_ENDCCASCI
+ msr CORTEX_A53_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a53_855873_wa
+
+func check_errata_855873
+ mov x1, #0x03
+ b cpu_rev_var_hs
+endfunc check_errata_855873
+
+/*
+ * Errata workaround for Cortex A53 Errata #835769.
+ * This applies to revisions <= r0p4 of Cortex A53.
+ * This workaround is statically enabled at build time.
+ */
+func check_errata_835769
+ cmp x0, #0x04
+ b.hi errata_not_applies
+ /*
+ * Fix potentially available for revisions r0p2, r0p3 and r0p4.
+ * If r0p2, r0p3 or r0p4; check for fix in REVIDR, else exit.
+ */
+ cmp x0, #0x01
+ mov x0, #ERRATA_APPLIES
+ b.ls exit_check_errata_835769
+ /* Load REVIDR. */
+ mrs x1, revidr_el1
+ /* If REVIDR[7] is set (fix exists) set ERRATA_NOT_APPLIES, else exit. */
+ tbz x1, #7, exit_check_errata_835769
+errata_not_applies:
+ mov x0, #ERRATA_NOT_APPLIES
+exit_check_errata_835769:
+ ret
+endfunc check_errata_835769
+
+/*
+ * Errata workaround for Cortex A53 Errata #843419.
+ * This applies to revisions <= r0p4 of Cortex A53.
+ * This workaround is statically enabled at build time.
+ */
+func check_errata_843419
+ mov x1, #ERRATA_APPLIES
+ mov x2, #ERRATA_NOT_APPLIES
+ cmp x0, #0x04
+ csel x0, x1, x2, ls
+ /*
+ * Fix potentially available for revision r0p4.
+ * If r0p4 check for fix in REVIDR, else exit.
+ */
+ b.ne exit_check_errata_843419
+ /* Load REVIDR. */
+ mrs x3, revidr_el1
+ /* If REVIDR[8] is set (fix exists) set ERRATA_NOT_APPLIES, else exit. */
+ tbz x3, #8, exit_check_errata_843419
+ mov x0, x2
+exit_check_errata_843419:
+ ret
+endfunc check_errata_843419
+
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A53 Errata #1530924.
+ * This applies to all revisions of Cortex A53.
+ * --------------------------------------------------
+ */
+func check_errata_1530924
+#if ERRATA_A53_1530924
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_1530924
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A53.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func cortex_a53_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+
+#if ERRATA_A53_826319
+ mov x0, x18
+ bl errata_a53_826319_wa
+#endif
+
+#if ERRATA_A53_836870
+ mov x0, x18
+ bl a53_disable_non_temporal_hint
+#endif
+
+#if ERRATA_A53_855873
+ mov x0, x18
+ bl errata_a53_855873_wa
+#endif
+
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A53_ECTLR_EL1
+ orr x0, x0, #CORTEX_A53_ECTLR_SMP_BIT
+ msr CORTEX_A53_ECTLR_EL1, x0
+ isb
+ ret x19
+endfunc cortex_a53_reset_func
+
+func cortex_a53_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a53_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a53_disable_smp
+endfunc cortex_a53_core_pwr_dwn
+
+func cortex_a53_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a53_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a53_disable_smp
+endfunc cortex_a53_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A53. Must follow AAPCS.
+ */
+func cortex_a53_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A53_819472, cortex_a53, 819472
+ report_errata ERRATA_A53_824069, cortex_a53, 824069
+ report_errata ERRATA_A53_826319, cortex_a53, 826319
+ report_errata ERRATA_A53_827319, cortex_a53, 827319
+ report_errata ERRATA_A53_835769, cortex_a53, 835769
+ report_errata ERRATA_A53_836870, cortex_a53, disable_non_temporal_hint
+ report_errata ERRATA_A53_843419, cortex_a53, 843419
+ report_errata ERRATA_A53_855873, cortex_a53, 855873
+ report_errata ERRATA_A53_1530924, cortex_a53, 1530924
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a53_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_a53 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a53_regs, "aS"
+cortex_a53_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", "cpumerrsr_el1", "l2merrsr_el1", \
+ "cpuactlr_el1", ""
+
+func cortex_a53_cpu_reg_dump
+ adr x6, cortex_a53_regs
+ mrs x8, CORTEX_A53_ECTLR_EL1
+ mrs x9, CORTEX_A53_MERRSR_EL1
+ mrs x10, CORTEX_A53_L2MERRSR_EL1
+ mrs x11, CORTEX_A53_CPUACTLR_EL1
+ ret
+endfunc cortex_a53_cpu_reg_dump
+
+declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
+ cortex_a53_reset_func, \
+ cortex_a53_core_pwr_dwn, \
+ cortex_a53_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S
new file mode 100644
index 0000000..0e0388b
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a55.S
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a55.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A55 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+ .globl cortex_a55_reset_func
+ .globl cortex_a55_core_pwr_dwn
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A55 Errata #768277.
+ * This applies only to revision r0p0 of Cortex A55.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a55_768277_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_768277
+ cbz x0, 1f
+ mrs x1, CORTEX_A55_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A55_CPUACTLR_EL1_DISABLE_DUAL_ISSUE
+ msr CORTEX_A55_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a55_768277_wa
+
+func check_errata_768277
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_768277
+
+ /* ------------------------------------------------------------------
+ * Errata Workaround for Cortex A55 Errata #778703.
+ * This applies only to revision r0p0 of Cortex A55 where L2 cache is
+ * not configured.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ------------------------------------------------------------------
+ */
+func errata_a55_778703_wa
+ /*
+ * Compare x0 against revision r0p0 and check that no private L2 cache
+ * is configured
+ */
+ mov x17, x30
+ bl check_errata_778703
+ cbz x0, 1f
+ mrs x1, CORTEX_A55_CPUECTLR_EL1
+ orr x1, x1, #CORTEX_A55_CPUECTLR_EL1_L1WSCTL
+ msr CORTEX_A55_CPUECTLR_EL1, x1
+ mrs x1, CORTEX_A55_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A55_CPUACTLR_EL1_DISABLE_WRITE_STREAMING
+ msr CORTEX_A55_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a55_778703_wa
+
+func check_errata_778703
+ mov x16, x30
+ mov x1, #0x00
+ bl cpu_rev_var_ls
+ /*
+ * Check that no private L2 cache is configured
+ */
+ mrs x1, CORTEX_A55_CLIDR_EL1
+ and x1, x1, CORTEX_A55_CLIDR_EL1_CTYPE3
+ cmp x1, #0
+ mov x2, #ERRATA_NOT_APPLIES
+ csel x0, x0, x2, eq
+ ret x16
+endfunc check_errata_778703
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A55 Errata #798797.
+ * This applies only to revision r0p0 of Cortex A55.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a55_798797_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_798797
+ cbz x0, 1f
+ mrs x1, CORTEX_A55_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A55_CPUACTLR_EL1_DISABLE_L1_PAGEWALKS
+ msr CORTEX_A55_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a55_798797_wa
+
+func check_errata_798797
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_798797
+
+ /* --------------------------------------------------------------------
+ * Errata Workaround for Cortex A55 Errata #846532.
+ * This applies only to revisions <= r0p1 of Cortex A55.
+ * Disabling dual-issue has a small impact on performance. Disabling a
+ * power optimization feature is an alternate workaround with no impact
+ * on performance but with an increase in power consumption (see errata
+ * notice).
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------------------------
+ */
+func errata_a55_846532_wa
+ /*
+ * Compare x0 against revision r0p1
+ */
+ mov x17, x30
+ bl check_errata_846532
+ cbz x0, 1f
+ mrs x1, CORTEX_A55_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A55_CPUACTLR_EL1_DISABLE_DUAL_ISSUE
+ msr CORTEX_A55_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a55_846532_wa
+
+func check_errata_846532
+ mov x1, #0x01
+ b cpu_rev_var_ls
+endfunc check_errata_846532
+
+ /* -----------------------------------------------------
+ * Errata Workaround for Cortex A55 Errata #903758.
+ * This applies only to revisions <= r0p1 of Cortex A55.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * -----------------------------------------------------
+ */
+func errata_a55_903758_wa
+ /*
+ * Compare x0 against revision r0p1
+ */
+ mov x17, x30
+ bl check_errata_903758
+ cbz x0, 1f
+ mrs x1, CORTEX_A55_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A55_CPUACTLR_EL1_DISABLE_L1_PAGEWALKS
+ msr CORTEX_A55_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a55_903758_wa
+
+func check_errata_903758
+ mov x1, #0x01
+ b cpu_rev_var_ls
+endfunc check_errata_903758
+
+ /* -----------------------------------------------------
+ * Errata Workaround for Cortex A55 Errata #1221012.
+ * This applies only to revisions <= r1p0 of Cortex A55.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * -----------------------------------------------------
+ */
+func errata_a55_1221012_wa
+ /*
+ * Compare x0 against revision r1p0
+ */
+ mov x17, x30
+ bl check_errata_1221012
+ cbz x0, 1f
+ mov x0, #0x0020
+ movk x0, #0x0850, lsl #16
+ msr CPUPOR_EL3, x0
+ mov x0, #0x0000
+ movk x0, #0x1FF0, lsl #16
+ movk x0, #0x2, lsl #32
+ msr CPUPMR_EL3, x0
+ mov x0, #0x03fd
+ movk x0, #0x0110, lsl #16
+ msr CPUPCR_EL3, x0
+ mov x0, #0x1
+ msr CPUPSELR_EL3, x0
+ mov x0, #0x0040
+ movk x0, #0x08D0, lsl #16
+ msr CPUPOR_EL3, x0
+ mov x0, #0x0040
+ movk x0, #0x1FF0, lsl #16
+ movk x0, #0x2, lsl #32
+ msr CPUPMR_EL3, x0
+ mov x0, #0x03fd
+ movk x0, #0x0110, lsl #16
+ msr CPUPCR_EL3, x0
+ isb
+1:
+ ret x17
+endfunc errata_a55_1221012_wa
+
+func check_errata_1221012
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1221012
+
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A55 Errata #1530923.
+ * This applies to all revisions of Cortex A55.
+ * --------------------------------------------------
+ */
+func check_errata_1530923
+#if ERRATA_A55_1530923
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_1530923
+
+func cortex_a55_reset_func
+ mov x19, x30
+
+#if ERRATA_DSU_798953
+ bl errata_dsu_798953_wa
+#endif
+
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A55_768277
+ mov x0, x18
+ bl errata_a55_768277_wa
+#endif
+
+#if ERRATA_A55_778703
+ mov x0, x18
+ bl errata_a55_778703_wa
+#endif
+
+#if ERRATA_A55_798797
+ mov x0, x18
+ bl errata_a55_798797_wa
+#endif
+
+#if ERRATA_A55_846532
+ mov x0, x18
+ bl errata_a55_846532_wa
+#endif
+
+#if ERRATA_A55_903758
+ mov x0, x18
+ bl errata_a55_903758_wa
+#endif
+
+#if ERRATA_A55_1221012
+ mov x0, x18
+ bl errata_a55_1221012_wa
+#endif
+
+ ret x19
+endfunc cortex_a55_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a55_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A55_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A55_CORE_PWRDN_EN_MASK
+ msr CORTEX_A55_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a55_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A55. Must follow AAPCS & can use stack.
+ */
+func cortex_a55_errata_report
+ stp x8, x30, [sp, #-16]!
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision variant information is at x8, where
+ * "report_errata" is expecting it and it doesn't corrupt it.
+ */
+ report_errata ERRATA_DSU_798953, cortex_a55, dsu_798953
+ report_errata ERRATA_DSU_936184, cortex_a55, dsu_936184
+ report_errata ERRATA_A55_768277, cortex_a55, 768277
+ report_errata ERRATA_A55_778703, cortex_a55, 778703
+ report_errata ERRATA_A55_798797, cortex_a55, 798797
+ report_errata ERRATA_A55_846532, cortex_a55, 846532
+ report_errata ERRATA_A55_903758, cortex_a55, 903758
+ report_errata ERRATA_A55_1221012, cortex_a55, 1221012
+ report_errata ERRATA_A55_1530923, cortex_a55, 1530923
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a55_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_a55 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a55_regs, "aS"
+cortex_a55_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a55_cpu_reg_dump
+ adr x6, cortex_a55_regs
+ mrs x8, CORTEX_A55_CPUECTLR_EL1
+ ret
+endfunc cortex_a55_cpu_reg_dump
+
+declare_cpu_ops cortex_a55, CORTEX_A55_MIDR, \
+ cortex_a55_reset_func, \
+ cortex_a55_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
new file mode 100644
index 0000000..3766ec7
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -0,0 +1,686 @@
+/*
+ * Copyright (c) 2014-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cortex_a57.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_dcache
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+ ret
+endfunc cortex_a57_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable all types of L2 prefetches.
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_l2_prefetch
+ mrs x0, CORTEX_A57_ECTLR_EL1
+ orr x0, x0, #CORTEX_A57_ECTLR_DIS_TWD_ACC_PFTCH_BIT
+ mov x1, #CORTEX_A57_ECTLR_L2_IPFTCH_DIST_MASK
+ orr x1, x1, #CORTEX_A57_ECTLR_L2_DPFTCH_DIST_MASK
+ bic x0, x0, x1
+ msr CORTEX_A57_ECTLR_EL1, x0
+ isb
+ dsb ish
+ ret
+endfunc cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_smp
+ mrs x0, CORTEX_A57_ECTLR_EL1
+ bic x0, x0, #CORTEX_A57_ECTLR_SMP_BIT
+ msr CORTEX_A57_ECTLR_EL1, x0
+ ret
+endfunc cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_ext_debug
+ mov x0, #1
+ msr osdlr_el1, x0
+ isb
+#if ERRATA_A57_817169
+ /*
+ * Invalidate any TLB address
+ */
+ mov x0, #0
+ tlbi vae3, x0
+#endif
+ dsb sy
+ ret
+endfunc cortex_a57_disable_ext_debug
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #806969.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a57_806969_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_806969
+ cbz x0, 1f
+ mrs x1, CORTEX_A57_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA
+ msr CORTEX_A57_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a57_806969_wa
+
+func check_errata_806969
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_806969
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #813419.
+ * This applies only to revision r0p0 of Cortex A57.
+ * ---------------------------------------------------
+ */
+func check_errata_813419
+ /*
+ * Even though this is only needed for revision r0p0, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov x0, #ERRATA_APPLIES
+ ret
+endfunc check_errata_813419
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #813420.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a57_813420_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_813420
+ cbz x0, 1f
+ mrs x1, CORTEX_A57_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A57_CPUACTLR_EL1_DCC_AS_DCCI
+ msr CORTEX_A57_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a57_813420_wa
+
+func check_errata_813420
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_813420
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #814670.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a57_814670_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_814670
+ cbz x0, 1f
+ mrs x1, CORTEX_A57_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A57_CPUACTLR_EL1_DIS_DMB_NULLIFICATION
+ msr CORTEX_A57_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a57_814670_wa
+
+func check_errata_814670
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_814670
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #817169.
+ * This applies only to revision <= r0p1 of Cortex A57.
+ * ----------------------------------------------------
+ */
+func check_errata_817169
+ /*
+ * Even though this is only needed for revision <= r0p1, it
+ * is always applied because of the low cost of the workaround.
+ */
+ mov x0, #ERRATA_APPLIES
+ ret
+endfunc check_errata_817169
+
+ /* --------------------------------------------------------------------
+ * Disable the over-read from the LDNP instruction.
+ *
+ * This applies to all revisions <= r1p2. The performance degradation
+ * observed with LDNP/STNP has been fixed on r1p3 and onwards.
+ *
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------------------------
+ */
+func a57_disable_ldnp_overread
+ /*
+ * Compare x0 against revision r1p2
+ */
+ mov x17, x30
+ bl check_errata_disable_ldnp_overread
+ cbz x0, 1f
+ mrs x1, CORTEX_A57_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A57_CPUACTLR_EL1_DIS_OVERREAD
+ msr CORTEX_A57_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc a57_disable_ldnp_overread
+
+func check_errata_disable_ldnp_overread
+ mov x1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_disable_ldnp_overread
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #826974.
+ * This applies only to revision <= r1p1 of Cortex A57.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a57_826974_wa
+ /*
+ * Compare x0 against revision r1p1
+ */
+ mov x17, x30
+ bl check_errata_826974
+ cbz x0, 1f
+ mrs x1, CORTEX_A57_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_DMB
+ msr CORTEX_A57_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a57_826974_wa
+
+func check_errata_826974
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_826974
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #826977.
+ * This applies only to revision <= r1p1 of Cortex A57.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a57_826977_wa
+ /*
+ * Compare x0 against revision r1p1
+ */
+ mov x17, x30
+ bl check_errata_826977
+ cbz x0, 1f
+ mrs x1, CORTEX_A57_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A57_CPUACTLR_EL1_GRE_NGRE_AS_NGNRE
+ msr CORTEX_A57_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a57_826977_wa
+
+func check_errata_826977
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_826977
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #828024.
+ * This applies only to revision <= r1p1 of Cortex A57.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a57_828024_wa
+ /*
+ * Compare x0 against revision r1p1
+ */
+ mov x17, x30
+ bl check_errata_828024
+ cbz x0, 1f
+ mrs x1, CORTEX_A57_CPUACTLR_EL1
+ /*
+ * Setting the relevant bits in CPUACTLR_EL1 has to be done in 2
+ * instructions here because the resulting bitmask doesn't fit in a
+ * 16-bit value so it cannot be encoded in a single instruction.
+ */
+ orr x1, x1, #CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA
+ orr x1, x1, #(CORTEX_A57_CPUACTLR_EL1_DIS_L1_STREAMING | \
+ CORTEX_A57_CPUACTLR_EL1_DIS_STREAMING)
+ msr CORTEX_A57_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a57_828024_wa
+
+func check_errata_828024
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_828024
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #829520.
+ * This applies only to revision <= r1p2 of Cortex A57.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a57_829520_wa
+ /*
+ * Compare x0 against revision r1p2
+ */
+ mov x17, x30
+ bl check_errata_829520
+ cbz x0, 1f
+ mrs x1, CORTEX_A57_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A57_CPUACTLR_EL1_DIS_INDIRECT_PREDICTOR
+ msr CORTEX_A57_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a57_829520_wa
+
+func check_errata_829520
+ mov x1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_829520
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #833471.
+ * This applies only to revision <= r1p2 of Cortex A57.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a57_833471_wa
+ /*
+ * Compare x0 against revision r1p2
+ */
+ mov x17, x30
+ bl check_errata_833471
+ cbz x0, 1f
+ mrs x1, CORTEX_A57_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A57_CPUACTLR_EL1_FORCE_FPSCR_FLUSH
+ msr CORTEX_A57_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a57_833471_wa
+
+func check_errata_833471
+ mov x1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_833471
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #859972.
+ * This applies only to revision <= r1p3 of Cortex A57.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber:
+ * --------------------------------------------------
+ */
+func errata_a57_859972_wa
+ mov x17, x30
+ bl check_errata_859972
+ cbz x0, 1f
+ mrs x1, CORTEX_A57_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A57_CPUACTLR_EL1_DIS_INSTR_PREFETCH
+ msr CORTEX_A57_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a57_859972_wa
+
+func check_errata_859972
+ mov x1, #0x13
+ b cpu_rev_var_ls
+endfunc check_errata_859972
+
+func check_errata_cve_2017_5715
+#if WORKAROUND_CVE_2017_5715
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2017_5715
+
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2018_3639
+
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A57 Errata #1319537.
+ * This applies to all revisions of Cortex A57.
+ * --------------------------------------------------
+ */
+func check_errata_1319537
+#if ERRATA_A57_1319537
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_1319537
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A57.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func cortex_a57_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A57_806969
+ mov x0, x18
+ bl errata_a57_806969_wa
+#endif
+
+#if ERRATA_A57_813420
+ mov x0, x18
+ bl errata_a57_813420_wa
+#endif
+
+#if ERRATA_A57_814670
+ mov x0, x18
+ bl errata_a57_814670_wa
+#endif
+
+#if A57_DISABLE_NON_TEMPORAL_HINT
+ mov x0, x18
+ bl a57_disable_ldnp_overread
+#endif
+
+#if ERRATA_A57_826974
+ mov x0, x18
+ bl errata_a57_826974_wa
+#endif
+
+#if ERRATA_A57_826977
+ mov x0, x18
+ bl errata_a57_826977_wa
+#endif
+
+#if ERRATA_A57_828024
+ mov x0, x18
+ bl errata_a57_828024_wa
+#endif
+
+#if ERRATA_A57_829520
+ mov x0, x18
+ bl errata_a57_829520_wa
+#endif
+
+#if ERRATA_A57_833471
+ mov x0, x18
+ bl errata_a57_833471_wa
+#endif
+
+#if ERRATA_A57_859972
+ mov x0, x18
+ bl errata_a57_859972_wa
+#endif
+
+#if IMAGE_BL31 && ( WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960 )
+ /* ---------------------------------------------------------------
+ * Override vector table & enable existing workaround if either of
+ * the build flags are enabled
+ * ---------------------------------------------------------------
+ */
+ adr x0, wa_cve_2017_5715_mmu_vbar
+ msr vbar_el3, x0
+ /* isb will be performed before returning from this function */
+#endif
+
+#if WORKAROUND_CVE_2018_3639
+ mrs x0, CORTEX_A57_CPUACTLR_EL1
+ orr x0, x0, #CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_STORE
+ msr CORTEX_A57_CPUACTLR_EL1, x0
+ isb
+ dsb sy
+#endif
+
+#if A57_ENABLE_NONCACHEABLE_LOAD_FWD
+ /* ---------------------------------------------
+ * Enable higher performance non-cacheable load
+ * forwarding
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A57_CPUACTLR_EL1
+ orr x0, x0, #CORTEX_A57_CPUACTLR_EL1_EN_NC_LOAD_FWD
+ msr CORTEX_A57_CPUACTLR_EL1, x0
+#endif
+
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A57_ECTLR_EL1
+ orr x0, x0, #CORTEX_A57_ECTLR_SMP_BIT
+ msr CORTEX_A57_ECTLR_EL1, x0
+ isb
+ ret x19
+endfunc cortex_a57_reset_func
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+func check_smccc_arch_workaround_3
+ mov x0, #ERRATA_APPLIES
+ ret
+endfunc check_smccc_arch_workaround_3
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A57.
+ * ----------------------------------------------------
+ */
+func cortex_a57_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a57_disable_ext_debug
+endfunc cortex_a57_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A57.
+ * -------------------------------------------------------
+ */
+func cortex_a57_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_l2_prefetch
+
+#if !SKIP_A57_L1_FLUSH_PWR_DWN
+ /* -------------------------------------------------
+ * Flush the L1 caches.
+ * -------------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+#endif
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* -------------------------------------------------
+ * Flush the L2 caches.
+ * -------------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a57_disable_ext_debug
+endfunc cortex_a57_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A57. Must follow AAPCS.
+ */
+func cortex_a57_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A57_806969, cortex_a57, 806969
+ report_errata ERRATA_A57_813419, cortex_a57, 813419
+ report_errata ERRATA_A57_813420, cortex_a57, 813420
+ report_errata ERRATA_A57_814670, cortex_a57, 814670
+ report_errata ERRATA_A57_817169, cortex_a57, 817169
+ report_errata A57_DISABLE_NON_TEMPORAL_HINT, cortex_a57, \
+ disable_ldnp_overread
+ report_errata ERRATA_A57_826974, cortex_a57, 826974
+ report_errata ERRATA_A57_826977, cortex_a57, 826977
+ report_errata ERRATA_A57_828024, cortex_a57, 828024
+ report_errata ERRATA_A57_829520, cortex_a57, 829520
+ report_errata ERRATA_A57_833471, cortex_a57, 833471
+ report_errata ERRATA_A57_859972, cortex_a57, 859972
+ report_errata ERRATA_A57_1319537, cortex_a57, 1319537
+ report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a57, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a57_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_a57 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a57_regs, "aS"
+cortex_a57_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", "cpumerrsr_el1", "l2merrsr_el1", ""
+
+func cortex_a57_cpu_reg_dump
+ adr x6, cortex_a57_regs
+ mrs x8, CORTEX_A57_ECTLR_EL1
+ mrs x9, CORTEX_A57_MERRSR_EL1
+ mrs x10, CORTEX_A57_L2MERRSR_EL1
+ ret
+endfunc cortex_a57_cpu_reg_dump
+
+declare_cpu_ops_wa cortex_a57, CORTEX_A57_MIDR, \
+ cortex_a57_reset_func, \
+ check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
+ check_smccc_arch_workaround_3, \
+ cortex_a57_core_pwr_dwn, \
+ cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a65.S b/lib/cpus/aarch64/cortex_a65.S
new file mode 100644
index 0000000..666324c
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a65.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cortex_a65.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if !HW_ASSISTED_COHERENCY
+#error "Cortex-A65 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS
+#error "Cortex-A65 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+/* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A65.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func cortex_a65_reset_func
+ mov x19, x30
+
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+
+ ret x19
+endfunc cortex_a65_reset_func
+
+func cortex_a65_cpu_pwr_dwn
+ mrs x0, CORTEX_A65_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A65_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_A65_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a65_cpu_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-A65. Must follow AAPCS.
+ */
+func cortex_a65_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_DSU_936184, cortex_a65, dsu_936184
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a65_errata_report
+#endif
+
+.section .rodata.cortex_a65_regs, "aS"
+cortex_a65_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a65_cpu_reg_dump
+ adr x6, cortex_a65_regs
+ mrs x8, CORTEX_A65_ECTLR_EL1
+ ret
+endfunc cortex_a65_cpu_reg_dump
+
+declare_cpu_ops cortex_a65, CORTEX_A65_MIDR, \
+ cortex_a65_reset_func, \
+ cortex_a65_cpu_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a65ae.S b/lib/cpus/aarch64/cortex_a65ae.S
new file mode 100644
index 0000000..ac6583e
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a65ae.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cortex_a65ae.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if !HW_ASSISTED_COHERENCY
+#error "Cortex-A65AE must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS
+#error "Cortex-A65AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+/* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A65.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func cortex_a65ae_reset_func
+ mov x19, x30
+
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+
+ ret x19
+endfunc cortex_a65ae_reset_func
+
+func cortex_a65ae_cpu_pwr_dwn
+ mrs x0, CORTEX_A65AE_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A65AE_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_A65AE_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a65ae_cpu_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-A65AE. Must follow AAPCS.
+ */
+func cortex_a65ae_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_DSU_936184, cortex_a65ae, dsu_936184
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a65ae_errata_report
+#endif
+
+.section .rodata.cortex_a65ae_regs, "aS"
+cortex_a65ae_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a65ae_cpu_reg_dump
+ adr x6, cortex_a65ae_regs
+ mrs x8, CORTEX_A65AE_ECTLR_EL1
+ ret
+endfunc cortex_a65ae_cpu_reg_dump
+
+declare_cpu_ops cortex_a65ae, CORTEX_A65AE_MIDR, \
+ cortex_a65ae_reset_func, \
+ cortex_a65ae_cpu_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
new file mode 100644
index 0000000..fed3f33
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a710.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex A710 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex A710 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A710_BHB_LOOP_COUNT, cortex_a710
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 1987031.
+ * This applies to revision r0p0, r1p0 and r2p0 of Cortex-A710. It is still
+ * open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a710_1987031_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_1987031
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ ldr x0,=0x6
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xF3A08002
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFF0F7FE
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x40000001003ff
+ msr S3_6_c15_c8_1,x0
+ ldr x0,=0x7
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xBF200000
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFEF0000
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x40000001003f3
+ msr S3_6_c15_c8_1,x0
+ isb
+1:
+ ret x17
+endfunc errata_a710_1987031_wa
+
+func check_errata_1987031
+ /* Applies to r0p0, r1p0 and r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_1987031
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2081180.
+ * This applies to revision r0p0, r1p0 and r2p0 of Cortex-A710.
+ * It is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a710_2081180_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2081180
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ ldr x0,=0x3
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xF3A08002
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFF0F7FE
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x10002001003FF
+ msr S3_6_c15_c8_1,x0
+ ldr x0,=0x4
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xBF200000
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFEF0000
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x10002001003F3
+ msr S3_6_c15_c8_1,x0
+ isb
+1:
+ ret x17
+endfunc errata_a710_2081180_wa
+
+func check_errata_2081180
+ /* Applies to r0p0, r1p0 and r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2081180
+
+/* ---------------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2055002.
+ * This applies to revision r1p0, r2p0 of Cortex-A710 and is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------------------------
+ */
+func errata_a710_2055002_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_2055002
+ cbz x0, 1f
+ mrs x1, CORTEX_A710_CPUACTLR_EL1
+ orr x1, x1, CORTEX_A710_CPUACTLR_EL1_BIT_46
+ msr CORTEX_A710_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a710_2055002_wa
+
+func check_errata_2055002
+ /* Applies to r1p0, r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2055002
+
+/* -------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2017096.
+ * This applies to revisions r0p0, r1p0 and r2p0 of Cortex-A710.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * -------------------------------------------------------------
+ */
+func errata_a710_2017096_wa
+ /* Compare x0 against revision r0p0 to r2p0 */
+ mov x17, x30
+ bl check_errata_2017096
+ cbz x0, 1f
+ mrs x1, CORTEX_A710_CPUECTLR_EL1
+ orr x1, x1, CORTEX_A710_CPUECTLR_EL1_PFSTIDIS_BIT
+ msr CORTEX_A710_CPUECTLR_EL1, x1
+
+1:
+ ret x17
+endfunc errata_a710_2017096_wa
+
+func check_errata_2017096
+ /* Applies to r0p0, r1p0, r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2017096
+
+
+/* ---------------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2083908.
+ * This applies to revision r2p0 of Cortex-A710 and is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------------------------
+ */
+func errata_a710_2083908_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_2083908
+ cbz x0, 1f
+ mrs x1, CORTEX_A710_CPUACTLR5_EL1
+ orr x1, x1, CORTEX_A710_CPUACTLR5_EL1_BIT_13
+ msr CORTEX_A710_CPUACTLR5_EL1, x1
+1:
+ ret x17
+endfunc errata_a710_2083908_wa
+
+func check_errata_2083908
+ /* Applies to r2p0 */
+ mov x1, #CPU_REV(2, 0)
+ mov x2, #CPU_REV(2, 0)
+ b cpu_rev_var_range
+endfunc check_errata_2083908
+
+/* ---------------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2058056.
+ * This applies to revisions r0p0, r1p0 and r2p0 of Cortex-A710 and is still
+ * open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------------------------
+ */
+func errata_a710_2058056_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_2058056
+ cbz x0, 1f
+ mrs x1, CORTEX_A710_CPUECTLR2_EL1
+ mov x0, #CORTEX_A710_CPUECTLR2_EL1_PF_MODE_CNSRV
+ bfi x1, x0, #CPUECTLR2_EL1_PF_MODE_LSB, #CPUECTLR2_EL1_PF_MODE_WIDTH
+ msr CORTEX_A710_CPUECTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_a710_2058056_wa
+
+func check_errata_2058056
+ /* Applies to r0p0, r1p0 and r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2058056
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2267065.
+ * This applies to revisions r0p0, r1p0 and r2p0.
+ * It is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_a710_2267065_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_2267065
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ mrs x1, CORTEX_A710_CPUACTLR_EL1
+ orr x1, x1, CORTEX_A710_CPUACTLR_EL1_BIT_22
+ msr CORTEX_A710_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a710_2267065_wa
+
+func check_errata_2267065
+ /* Applies to r0p0, r1p0 and r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2267065
+
+/* ---------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2136059.
+ * This applies to revision r0p0, r1p0 and r2p0.
+ * It is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------------------
+ */
+func errata_a710_2136059_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_2136059
+ cbz x0, 1f
+
+ /* Apply the workaround */
+ mrs x1, CORTEX_A710_CPUACTLR5_EL1
+ orr x1, x1, CORTEX_A710_CPUACTLR5_EL1_BIT_44
+ msr CORTEX_A710_CPUACTLR5_EL1, x1
+
+1:
+ ret x17
+endfunc errata_a710_2136059_wa
+
+func check_errata_2136059
+ /* Applies to r0p0, r1p0 and r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2136059
+
+/* ----------------------------------------------------------------
+ * Errata workaround for Cortex-A710 Erratum 2147715.
+ * This applies to revision r2p0, and is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * ----------------------------------------------------------------
+ */
+func errata_a710_2147715_wa
+ mov x17, x30
+ bl check_errata_2147715
+ cbz x0, 1f
+
+ /* Apply workaround; set CPUACTLR_EL1[22]
+ * to 1, which will cause the CFP instruction
+ * to invalidate all branch predictor resources
+ * regardless of context.
+ */
+ mrs x1, CORTEX_A710_CPUACTLR_EL1
+ orr x1, x1, CORTEX_A710_CPUACTLR_EL1_BIT_22
+ msr CORTEX_A710_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a710_2147715_wa
+
+func check_errata_2147715
+ mov x1, #0x20
+ mov x2, #0x20
+ b cpu_rev_var_range
+endfunc check_errata_2147715
+
+/* ---------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2216384.
+ * This applies to revision r0p0, r1p0 and r2p0.
+ * It is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------------------
+ */
+func errata_a710_2216384_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_2216384
+ cbz x0, 1f
+
+ /* Apply workaround: set CPUACTLR5_EL1[17]
+ * to 1 and the following instruction
+ * patching sequence.
+ */
+ mrs x1, CORTEX_A710_CPUACTLR5_EL1
+ orr x1, x1, CORTEX_A710_CPUACTLR5_EL1_BIT_17
+ msr CORTEX_A710_CPUACTLR5_EL1, x1
+
+ ldr x0,=0x5
+ msr CORTEX_A710_CPUPSELR_EL3, x0
+ ldr x0,=0x10F600E000
+ msr CORTEX_A710_CPUPOR_EL3, x0
+ ldr x0,=0x10FF80E000
+ msr CORTEX_A710_CPUPMR_EL3, x0
+ ldr x0,=0x80000000003FF
+ msr CORTEX_A710_CPUPCR_EL3, x0
+ isb
+1:
+ ret x17
+endfunc errata_a710_2216384_wa
+
+func check_errata_2216384
+ /* Applies to r0p0, r1p0 and r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2216384
+
+/* ---------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2282622.
+ * This applies to revision r0p0, r1p0 and r2p0.
+ * It is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * ---------------------------------------------------------------
+ */
+func errata_a710_2282622_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_2282622
+ cbz x0, 1f
+
+ /* Apply the workaround */
+ mrs x1, CORTEX_A710_CPUACTLR2_EL1
+ orr x1, x1, BIT(0)
+ msr CORTEX_A710_CPUACTLR2_EL1, x1
+
+1:
+ ret x17
+endfunc errata_a710_2282622_wa
+
+func check_errata_2282622
+ /* Applies to r0p0, r1p0 and r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2282622
+
+/* ------------------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2291219 on power down request.
+ * This applies to revision <= r2p0 and is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * ------------------------------------------------------------------------
+ */
+func errata_a710_2291219_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2291219
+ cbz x0, 1f
+
+ /* Set bit 36 in ACTLR2_EL1 */
+ mrs x1, CORTEX_A710_CPUACTLR2_EL1
+ orr x1, x1, #CORTEX_A710_CPUACTLR2_EL1_BIT_36
+ msr CORTEX_A710_CPUACTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_a710_2291219_wa
+
+func check_errata_2291219
+ /* Applies to <= r2p0. */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2291219
+
+/* ---------------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2008768.
+ * This applies to revision r0p0, r1p0 and r2p0.
+ * It is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x2, x17
+ * ---------------------------------------------------------------
+ */
+func errata_a710_2008768_wa
+ mov x17, x30
+ bl check_errata_2008768
+ cbz x0, 1f
+
+ /* Stash ERRSELR_EL1 in x2 */
+ mrs x2, ERRSELR_EL1
+
+ /* Select error record 0 and clear ED bit */
+ msr ERRSELR_EL1, xzr
+ mrs x1, ERXCTLR_EL1
+ bfi x1, xzr, #ERXCTLR_ED_SHIFT, #1
+ msr ERXCTLR_EL1, x1
+
+ /* Select error record 1 and clear ED bit */
+ mov x0, #1
+ msr ERRSELR_EL1, x0
+ mrs x1, ERXCTLR_EL1
+ bfi x1, xzr, #ERXCTLR_ED_SHIFT, #1
+ msr ERXCTLR_EL1, x1
+
+ /* Restore ERRSELR_EL1 from x2 */
+ msr ERRSELR_EL1, x2
+
+1:
+ ret x17
+endfunc errata_a710_2008768_wa
+
+func check_errata_2008768
+ /* Applies to r0p0, r1p0 and r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2008768
+
+/* -------------------------------------------------------
+ * Errata Workaround for Cortex-A710 Erratum 2371105.
+ * This applies to revisions <= r2p0 and is fixed in r2p1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * -------------------------------------------------------
+ */
+func errata_a710_2371105_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2371105
+ cbz x0, 1f
+
+ /* Set bit 40 in CPUACTLR2_EL1 */
+ mrs x1, CORTEX_A710_CPUACTLR2_EL1
+ orr x1, x1, #CORTEX_A710_CPUACTLR2_EL1_BIT_40
+ msr CORTEX_A710_CPUACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a710_2371105_wa
+
+func check_errata_2371105
+ /* Applies to <= r2p0. */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2371105
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_a710_core_pwr_dwn
+
+#if ERRATA_A710_2008768
+ mov x4, x30
+ bl cpu_get_rev_var
+ bl errata_a710_2008768_wa
+ mov x30, x4
+#endif
+
+#if ERRATA_A710_2291219
+ mov x15, x30
+ bl cpu_get_rev_var
+ bl errata_a710_2291219_wa
+ mov x30, x15
+#endif /* ERRATA_A710_2291219 */
+
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_A710_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A710_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_A710_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a710_core_pwr_dwn
+
+#if REPORT_ERRATA
+ /*
+ * Errata printing function for Cortex-A710. Must follow AAPCS.
+ */
+func cortex_a710_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A710_1987031, cortex_a710, 1987031
+ report_errata ERRATA_A710_2081180, cortex_a710, 2081180
+ report_errata ERRATA_A710_2055002, cortex_a710, 2055002
+ report_errata ERRATA_A710_2017096, cortex_a710, 2017096
+ report_errata ERRATA_A710_2083908, cortex_a710, 2083908
+ report_errata ERRATA_A710_2058056, cortex_a710, 2058056
+ report_errata ERRATA_A710_2267065, cortex_a710, 2267065
+ report_errata ERRATA_A710_2136059, cortex_a710, 2136059
+ report_errata ERRATA_A710_2282622, cortex_a710, 2282622
+ report_errata ERRATA_A710_2008768, cortex_a710, 2008768
+ report_errata ERRATA_A710_2147715, cortex_a710, 2147715
+ report_errata ERRATA_A710_2216384, cortex_a710, 2216384
+ report_errata ERRATA_A710_2291219, cortex_a710, 2291219
+ report_errata ERRATA_A710_2371105, cortex_a710, 2371105
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a710, cve_2022_23960
+ report_errata ERRATA_DSU_2313941, cortex_a710, dsu_2313941
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a710_errata_report
+#endif
+
+func cortex_a710_reset_func
+ mov x19, x30
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_DSU_2313941
+ bl errata_dsu_2313941_wa
+#endif
+
+#if ERRATA_A710_1987031
+ mov x0, x18
+ bl errata_a710_1987031_wa
+#endif
+
+#if ERRATA_A710_2081180
+ mov x0, x18
+ bl errata_a710_2081180_wa
+#endif
+
+#if ERRATA_A710_2055002
+ mov x0, x18
+ bl errata_a710_2055002_wa
+#endif
+
+#if ERRATA_A710_2017096
+ mov x0, x18
+ bl errata_a710_2017096_wa
+#endif
+
+#if ERRATA_A710_2083908
+ mov x0, x18
+ bl errata_a710_2083908_wa
+#endif
+
+#if ERRATA_A710_2058056
+ mov x0, x18
+ bl errata_a710_2058056_wa
+#endif
+
+#if ERRATA_A710_2267065
+ mov x0, x18
+ bl errata_a710_2267065_wa
+#endif
+
+#if ERRATA_A710_2136059
+ mov x0, x18
+ bl errata_a710_2136059_wa
+#endif
+
+#if ERRATA_A710_2147715
+ mov x0, x18
+ bl errata_a710_2147715_wa
+#endif
+
+#if ERRATA_A710_2216384
+ mov x0, x18
+ bl errata_a710_2216384_wa
+#endif /* ERRATA_A710_2216384 */
+
+#if ERRATA_A710_2282622
+ mov x0, x18
+ bl errata_a710_2282622_wa
+#endif
+
+#if ERRATA_A710_2371105
+ mov x0, x18
+ bl errata_a710_2371105_wa
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A710 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_a710
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret x19
+endfunc cortex_a710_reset_func
+
+ /* ---------------------------------------------
+ * This function provides Cortex-A710 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a710_regs, "aS"
+cortex_a710_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a710_cpu_reg_dump
+ adr x6, cortex_a710_regs
+ mrs x8, CORTEX_A710_CPUECTLR_EL1
+ ret
+endfunc cortex_a710_cpu_reg_dump
+
+declare_cpu_ops cortex_a710, CORTEX_A710_MIDR, \
+ cortex_a710_reset_func, \
+ cortex_a710_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a715.S b/lib/cpus/aarch64/cortex_a715.S
new file mode 100644
index 0000000..7603210
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a715.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_makalu.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex Makalu must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex Makalu supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_MAKALU_BHB_LOOP_COUNT, cortex_makalu
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+func cortex_makalu_reset_func
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex Makalu generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_makalu
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret
+endfunc cortex_makalu_reset_func
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_makalu_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_MAKALU_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_MAKALU_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_MAKALU_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_makalu_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex Makalu. Must follow AAPCS.
+ */
+func cortex_makalu_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata WORKAROUND_CVE_2022_23960, cortex_makalu, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_makalu_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Cortex Makalu-specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_makalu_regs, "aS"
+cortex_makalu_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_makalu_cpu_reg_dump
+ adr x6, cortex_makalu_regs
+ mrs x8, CORTEX_MAKALU_CPUECTLR_EL1
+ ret
+endfunc cortex_makalu_cpu_reg_dump
+
+declare_cpu_ops cortex_makalu, CORTEX_MAKALU_MIDR, \
+ cortex_makalu_reset_func, \
+ cortex_makalu_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
new file mode 100644
index 0000000..de2d36e
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a72.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A72_BHB_LOOP_COUNT, cortex_a72
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_dcache
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+ ret
+endfunc cortex_a72_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable all types of L2 prefetches.
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_l2_prefetch
+ mrs x0, CORTEX_A72_ECTLR_EL1
+ orr x0, x0, #CORTEX_A72_ECTLR_DIS_TWD_ACC_PFTCH_BIT
+ mov x1, #CORTEX_A72_ECTLR_L2_IPFTCH_DIST_MASK
+ orr x1, x1, #CORTEX_A72_ECTLR_L2_DPFTCH_DIST_MASK
+ bic x0, x0, x1
+ msr CORTEX_A72_ECTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_hw_prefetcher
+ mrs x0, CORTEX_A72_CPUACTLR_EL1
+ orr x0, x0, #CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH
+ msr CORTEX_A72_CPUACTLR_EL1, x0
+ isb
+ dsb ish
+ ret
+endfunc cortex_a72_disable_hw_prefetcher
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_smp
+ mrs x0, CORTEX_A72_ECTLR_EL1
+ bic x0, x0, #CORTEX_A72_ECTLR_SMP_BIT
+ msr CORTEX_A72_ECTLR_EL1, x0
+ ret
+endfunc cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_ext_debug
+ mov x0, #1
+ msr osdlr_el1, x0
+ isb
+ dsb sy
+ ret
+endfunc cortex_a72_disable_ext_debug
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A72 Errata #859971.
+ * This applies only to revision <= r0p3 of Cortex A72.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber:
+ * --------------------------------------------------
+ */
+func errata_a72_859971_wa
+ mov x17,x30
+ bl check_errata_859971
+ cbz x0, 1f
+ mrs x1, CORTEX_A72_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A72_CPUACTLR_EL1_DIS_INSTR_PREFETCH
+ msr CORTEX_A72_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a72_859971_wa
+
+func check_errata_859971
+ mov x1, #0x03
+ b cpu_rev_var_ls
+endfunc check_errata_859971
+
+func check_errata_cve_2017_5715
+ cpu_check_csv2 x0, 1f
+#if WORKAROUND_CVE_2017_5715
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+endfunc check_errata_cve_2017_5715
+
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2018_3639
+
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A72 Errata #1319367.
+ * This applies to all revisions of Cortex A72.
+ * --------------------------------------------------
+ */
+func check_errata_1319367
+#if ERRATA_A72_1319367
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_1319367
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+func check_smccc_arch_workaround_3
+ cpu_check_csv2 x0, 1f
+ mov x0, #ERRATA_APPLIES
+ ret
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+endfunc check_smccc_arch_workaround_3
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A72.
+ * -------------------------------------------------
+ */
+func cortex_a72_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A72_859971
+ mov x0, x18
+ bl errata_a72_859971_wa
+#endif
+
+#if IMAGE_BL31 && (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960)
+ cpu_check_csv2 x0, 1f
+ adr x0, wa_cve_2017_5715_mmu_vbar
+ msr vbar_el3, x0
+ /* isb will be performed before returning from this function */
+
+ /* Skip CVE_2022_23960 mitigation if cve_2017_5715 mitigation applied */
+ b 2f
+1:
+#if WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A72 generic vectors are overridden to apply the
+ * mitigation on exception entry from lower ELs for revisions >= r1p0
+ * which has CSV2 implemented.
+ */
+ adr x0, wa_cve_vbar_cortex_a72
+ msr vbar_el3, x0
+
+ /* isb will be performed before returning from this function */
+#endif /* WORKAROUND_CVE_2022_23960 */
+2:
+#endif /* IMAGE_BL31 && (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960) */
+
+#if WORKAROUND_CVE_2018_3639
+ mrs x0, CORTEX_A72_CPUACTLR_EL1
+ orr x0, x0, #CORTEX_A72_CPUACTLR_EL1_DIS_LOAD_PASS_STORE
+ msr CORTEX_A72_CPUACTLR_EL1, x0
+ isb
+ dsb sy
+#endif
+
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A72_ECTLR_EL1
+ orr x0, x0, #CORTEX_A72_ECTLR_SMP_BIT
+ msr CORTEX_A72_ECTLR_EL1, x0
+ isb
+ ret x19
+endfunc cortex_a72_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A72.
+ * ----------------------------------------------------
+ */
+func cortex_a72_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_hw_prefetcher
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a72_disable_ext_debug
+endfunc cortex_a72_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A72.
+ * -------------------------------------------------------
+ */
+func cortex_a72_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_hw_prefetcher
+
+#if !SKIP_A72_L1_FLUSH_PWR_DWN
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+#endif
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* -------------------------------------------------
+ * Flush the L2 caches.
+ * -------------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a72_disable_ext_debug
+endfunc cortex_a72_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A72. Must follow AAPCS.
+ */
+func cortex_a72_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A72_859971, cortex_a72, 859971
+ report_errata ERRATA_A72_1319367, cortex_a72, 1319367
+ report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a72, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a72_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_a72 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a72_regs, "aS"
+cortex_a72_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", "cpumerrsr_el1", "l2merrsr_el1", ""
+
+func cortex_a72_cpu_reg_dump
+ adr x6, cortex_a72_regs
+ mrs x8, CORTEX_A72_ECTLR_EL1
+ mrs x9, CORTEX_A72_MERRSR_EL1
+ mrs x10, CORTEX_A72_L2MERRSR_EL1
+ ret
+endfunc cortex_a72_cpu_reg_dump
+
+declare_cpu_ops_wa cortex_a72, CORTEX_A72_MIDR, \
+ cortex_a72_reset_func, \
+ check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
+ check_smccc_arch_workaround_3, \
+ cortex_a72_core_pwr_dwn, \
+ cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
new file mode 100644
index 0000000..edcd1f5
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a73.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+ /* ---------------------------------------------
+ * Disable L1 data cache
+ * ---------------------------------------------
+ */
+func cortex_a73_disable_dcache
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+ ret
+endfunc cortex_a73_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a73_disable_smp
+ mrs x0, CORTEX_A73_CPUECTLR_EL1
+ bic x0, x0, #CORTEX_A73_CPUECTLR_SMP_BIT
+ msr CORTEX_A73_CPUECTLR_EL1, x0
+ isb
+ dsb sy
+ ret
+endfunc cortex_a73_disable_smp
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A73 Errata #852427.
+ * This applies only to revision r0p0 of Cortex A73.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a73_852427_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_852427
+ cbz x0, 1f
+ mrs x1, CORTEX_A73_DIAGNOSTIC_REGISTER
+ orr x1, x1, #(1 << 12)
+ msr CORTEX_A73_DIAGNOSTIC_REGISTER, x1
+ isb
+1:
+ ret x17
+endfunc errata_a73_852427_wa
+
+func check_errata_852427
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_852427
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A73 Errata #855423.
+ * This applies only to revision <= r0p1 of Cortex A73.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------
+ */
+func errata_a73_855423_wa
+ /*
+ * Compare x0 against revision r0p1
+ */
+ mov x17, x30
+ bl check_errata_855423
+ cbz x0, 1f
+ mrs x1, CORTEX_A73_IMP_DEF_REG2
+ orr x1, x1, #(1 << 7)
+ msr CORTEX_A73_IMP_DEF_REG2, x1
+ isb
+1:
+ ret x17
+endfunc errata_a73_855423_wa
+
+func check_errata_855423
+ mov x1, #0x01
+ b cpu_rev_var_ls
+endfunc check_errata_855423
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A73.
+ * -------------------------------------------------
+ */
+
+func cortex_a73_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A73_852427
+ mov x0, x18
+ bl errata_a73_852427_wa
+#endif
+
+#if ERRATA_A73_855423
+ mov x0, x18
+ bl errata_a73_855423_wa
+#endif
+
+#if IMAGE_BL31 && (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960)
+ cpu_check_csv2 x0, 1f
+ adr x0, wa_cve_2017_5715_bpiall_vbar
+ msr vbar_el3, x0
+ isb
+ /* Skip installing vector table again for CVE_2022_23960 */
+ b 2f
+1:
+#if WORKAROUND_CVE_2022_23960
+ adr x0, wa_cve_2017_5715_bpiall_vbar
+ msr vbar_el3, x0
+ isb
+#endif
+2:
+#endif /* IMAGE_BL31 && (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960) */
+
+#if WORKAROUND_CVE_2018_3639
+ mrs x0, CORTEX_A73_IMP_DEF_REG1
+ orr x0, x0, #CORTEX_A73_IMP_DEF_REG1_DISABLE_LOAD_PASS_STORE
+ msr CORTEX_A73_IMP_DEF_REG1, x0
+ isb
+#endif
+
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * Clobbers : x0
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A73_CPUECTLR_EL1
+ orr x0, x0, #CORTEX_A73_CPUECTLR_SMP_BIT
+ msr CORTEX_A73_CPUECTLR_EL1, x0
+ isb
+ ret x19
+endfunc cortex_a73_reset_func
+
+func cortex_a73_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a73_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a73_disable_smp
+endfunc cortex_a73_core_pwr_dwn
+
+func cortex_a73_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a73_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a73_disable_smp
+endfunc cortex_a73_cluster_pwr_dwn
+
+func check_errata_cve_2017_5715
+ cpu_check_csv2 x0, 1f
+#if WORKAROUND_CVE_2017_5715
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+endfunc check_errata_cve_2017_5715
+
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2018_3639
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960
+ cpu_check_csv2 x0, 1f
+ mov x0, #ERRATA_APPLIES
+ ret
+ 1:
+# if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+# else
+ mov x0, #ERRATA_MISSING
+# endif /* WORKAROUND_CVE_2022_23960 */
+ ret
+#endif /* WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960 */
+ mov x0, #ERRATA_MISSING
+ ret
+endfunc check_errata_cve_2022_23960
+
+func check_smccc_arch_workaround_3
+ mov x0, #ERRATA_APPLIES
+ ret
+endfunc check_smccc_arch_workaround_3
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A75. Must follow AAPCS.
+ */
+func cortex_a73_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A73_852427, cortex_a73, 852427
+ report_errata ERRATA_A73_855423, cortex_a73, 855423
+ report_errata WORKAROUND_CVE_2017_5715, cortex_a73, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a73, cve_2018_3639
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a73, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a73_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_a73 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a73_regs, "aS"
+cortex_a73_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", "l2merrsr_el1", ""
+
+func cortex_a73_cpu_reg_dump
+ adr x6, cortex_a73_regs
+ mrs x8, CORTEX_A73_CPUECTLR_EL1
+ mrs x9, CORTEX_A73_L2MERRSR_EL1
+ ret
+endfunc cortex_a73_cpu_reg_dump
+
+declare_cpu_ops_wa cortex_a73, CORTEX_A73_MIDR, \
+ cortex_a73_reset_func, \
+ check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
+ check_smccc_arch_workaround_3, \
+ cortex_a73_core_pwr_dwn, \
+ cortex_a73_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
new file mode 100644
index 0000000..d561be4
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cortex_a75.h>
+#include <cpuamu.h>
+#include <cpu_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A75 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A75 Errata #764081.
+ * This applies only to revision r0p0 of Cortex A75.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a75_764081_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_764081
+ cbz x0, 1f
+ mrs x1, sctlr_el3
+ orr x1, x1 ,#SCTLR_IESB_BIT
+ msr sctlr_el3, x1
+ isb
+1:
+ ret x17
+endfunc errata_a75_764081_wa
+
+func check_errata_764081
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_764081
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A75 Errata #790748.
+ * This applies only to revision r0p0 of Cortex A75.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a75_790748_wa
+ /*
+ * Compare x0 against revision r0p0
+ */
+ mov x17, x30
+ bl check_errata_790748
+ cbz x0, 1f
+ mrs x1, CORTEX_A75_CPUACTLR_EL1
+ orr x1, x1 ,#(1 << 13)
+ msr CORTEX_A75_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a75_790748_wa
+
+func check_errata_790748
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_790748
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A75.
+ * -------------------------------------------------
+ */
+func cortex_a75_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A75_764081
+ mov x0, x18
+ bl errata_a75_764081_wa
+#endif
+
+#if ERRATA_A75_790748
+ mov x0, x18
+ bl errata_a75_790748_wa
+#endif
+
+#if IMAGE_BL31 && (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960)
+ cpu_check_csv2 x0, 1f
+ adr x0, wa_cve_2017_5715_bpiall_vbar
+ msr vbar_el3, x0
+ isb
+ /* Skip installing vector table again for CVE_2022_23960 */
+ b 2f
+1:
+#if WORKAROUND_CVE_2022_23960
+ adr x0, wa_cve_2017_5715_bpiall_vbar
+ msr vbar_el3, x0
+ isb
+#endif
+2:
+#endif /* IMAGE_BL31 && (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960) */
+
+#if WORKAROUND_CVE_2018_3639
+ mrs x0, CORTEX_A75_CPUACTLR_EL1
+ orr x0, x0, #CORTEX_A75_CPUACTLR_EL1_DISABLE_LOAD_PASS_STORE
+ msr CORTEX_A75_CPUACTLR_EL1, x0
+ isb
+#endif
+
+#if ERRATA_DSU_798953
+ bl errata_dsu_798953_wa
+#endif
+
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, actlr_el3
+ orr x0, x0, #CORTEX_A75_ACTLR_AMEN_BIT
+ msr actlr_el3, x0
+ isb
+
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ mrs x0, actlr_el2
+ orr x0, x0, #CORTEX_A75_ACTLR_AMEN_BIT
+ msr actlr_el2, x0
+ isb
+
+ /* Enable group0 counters */
+ mov x0, #CORTEX_A75_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET_EL0, x0
+ isb
+
+ /* Enable group1 counters */
+ mov x0, #CORTEX_A75_AMU_GROUP1_MASK
+ msr CPUAMCNTENSET_EL0, x0
+ isb
+#endif
+ ret x19
+endfunc cortex_a75_reset_func
+
+func check_errata_cve_2017_5715
+ cpu_check_csv2 x0, 1f
+#if WORKAROUND_CVE_2017_5715
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+endfunc check_errata_cve_2017_5715
+
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2018_3639
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960
+ cpu_check_csv2 x0, 1f
+ mov x0, #ERRATA_APPLIES
+ ret
+1:
+# if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+# else
+ mov x0, #ERRATA_MISSING
+# endif /* WORKAROUND_CVE_2022_23960 */
+ ret
+#endif /* WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960 */
+ mov x0, #ERRATA_MISSING
+ ret
+endfunc check_errata_cve_2022_23960
+
+func check_smccc_arch_workaround_3
+ mov x0, #ERRATA_APPLIES
+ ret
+endfunc check_smccc_arch_workaround_3
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a75_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A75_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A75_CORE_PWRDN_EN_MASK
+ msr CORTEX_A75_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a75_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A75. Must follow AAPCS.
+ */
+func cortex_a75_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A75_764081, cortex_a75, 764081
+ report_errata ERRATA_A75_790748, cortex_a75, 790748
+ report_errata WORKAROUND_CVE_2017_5715, cortex_a75, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a75, cve_2018_3639
+ report_errata ERRATA_DSU_798953, cortex_a75, dsu_798953
+ report_errata ERRATA_DSU_936184, cortex_a75, dsu_936184
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a75, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a75_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_a75 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a75_regs, "aS"
+cortex_a75_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a75_cpu_reg_dump
+ adr x6, cortex_a75_regs
+ mrs x8, CORTEX_A75_CPUECTLR_EL1
+ ret
+endfunc cortex_a75_cpu_reg_dump
+
+declare_cpu_ops_wa cortex_a75, CORTEX_A75_MIDR, \
+ cortex_a75_reset_func, \
+ check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
+ check_smccc_arch_workaround_3, \
+ cortex_a75_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a75_pubsub.c b/lib/cpus/aarch64/cortex_a75_pubsub.c
new file mode 100644
index 0000000..bd2c697
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a75_pubsub.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cortex_a75.h>
+#include <cpuamu.h>
+#include <lib/el3_runtime/pubsub_events.h>
+
+static void *cortex_a75_context_save(const void *arg)
+{
+ if (midr_match(CORTEX_A75_MIDR) != 0)
+ cpuamu_context_save(CORTEX_A75_AMU_NR_COUNTERS);
+
+ return (void *)0;
+}
+
+static void *cortex_a75_context_restore(const void *arg)
+{
+ if (midr_match(CORTEX_A75_MIDR) != 0)
+ cpuamu_context_restore(CORTEX_A75_AMU_NR_COUNTERS);
+
+ return (void *)0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, cortex_a75_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, cortex_a75_context_restore);
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
new file mode 100644
index 0000000..36507de
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -0,0 +1,846 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a76.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include <services/arm_arch_svc.h>
+#include "wa_cve_2022_23960_bhb.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A76 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+ .globl cortex_a76_reset_func
+ .globl cortex_a76_core_pwr_dwn
+ .globl cortex_a76_disable_wa_cve_2018_3639
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-A76 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#define ESR_EL3_A64_SMC0 0x5e000000
+#define ESR_EL3_A32_SMC0 0x4e000000
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ /*
+ * This macro applies the mitigation for CVE-2018-3639.
+ * It implements a fast path where `SMCCC_ARCH_WORKAROUND_2`
+ * SMC calls from a lower EL running in AArch32 or AArch64
+ * will go through the fast and return early.
+ *
+ * The macro saves x2-x3 to the context. In the fast path
+ * x0-x3 registers do not need to be restored as the calling
+ * context will have saved them. The macro also saves
+ * x29-x30 to the context in the sync_exception path.
+ */
+ .macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ .if \_is_sync_exception
+ stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+ mov_imm w2, \_esr_el3_val
+ bl apply_cve_2018_3639_sync_wa
+ ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+ .endif
+ /*
+ * Always enable v4 mitigation during EL3 execution. This is not
+ * required for the fast path above because it does not perform any
+ * memory loads.
+ */
+ mrs x2, CORTEX_A76_CPUACTLR2_EL1
+ orr x2, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ msr CORTEX_A76_CPUACTLR2_EL1, x2
+ isb
+
+ /*
+ * The caller may have passed arguments to EL3 via x2-x3.
+ * Restore these registers from the context before jumping to the
+ * main runtime vector table entry.
+ */
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ .endm
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960
+vector_base cortex_a76_wa_cve_vbar
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry cortex_a76_sync_exception_sp_el0
+ b sync_exception_sp_el0
+end_vector_entry cortex_a76_sync_exception_sp_el0
+
+vector_entry cortex_a76_irq_sp_el0
+ b irq_sp_el0
+end_vector_entry cortex_a76_irq_sp_el0
+
+vector_entry cortex_a76_fiq_sp_el0
+ b fiq_sp_el0
+end_vector_entry cortex_a76_fiq_sp_el0
+
+vector_entry cortex_a76_serror_sp_el0
+ b serror_sp_el0
+end_vector_entry cortex_a76_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry cortex_a76_sync_exception_sp_elx
+ b sync_exception_sp_elx
+end_vector_entry cortex_a76_sync_exception_sp_elx
+
+vector_entry cortex_a76_irq_sp_elx
+ b irq_sp_elx
+end_vector_entry cortex_a76_irq_sp_elx
+
+vector_entry cortex_a76_fiq_sp_elx
+ b fiq_sp_elx
+end_vector_entry cortex_a76_fiq_sp_elx
+
+vector_entry cortex_a76_serror_sp_elx
+ b serror_sp_elx
+end_vector_entry cortex_a76_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry cortex_a76_sync_exception_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b sync_exception_aarch64
+end_vector_entry cortex_a76_sync_exception_aarch64
+
+vector_entry cortex_a76_irq_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b irq_aarch64
+end_vector_entry cortex_a76_irq_aarch64
+
+vector_entry cortex_a76_fiq_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b fiq_aarch64
+end_vector_entry cortex_a76_fiq_aarch64
+
+vector_entry cortex_a76_serror_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b serror_aarch64
+end_vector_entry cortex_a76_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry cortex_a76_sync_exception_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b sync_exception_aarch32
+end_vector_entry cortex_a76_sync_exception_aarch32
+
+vector_entry cortex_a76_irq_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b irq_aarch32
+end_vector_entry cortex_a76_irq_aarch32
+
+vector_entry cortex_a76_fiq_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b fiq_aarch32
+end_vector_entry cortex_a76_fiq_aarch32
+
+vector_entry cortex_a76_serror_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b serror_aarch32
+end_vector_entry cortex_a76_serror_aarch32
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ /*
+ * -----------------------------------------------------------------
+ * This function applies the mitigation for CVE-2018-3639
+ * specifically for sync exceptions. It implements a fast path
+ * where `SMCCC_ARCH_WORKAROUND_2` SMC calls from a lower EL
+ * running in AArch64 will go through the fast and return early.
+ *
+ * In the fast path x0-x3 registers do not need to be restored as the
+ * calling context will have saved them.
+ *
+ * Caller must pass value of esr_el3 to compare via x2.
+ * Save and restore these registers outside of this function from the
+ * context before jumping to the main runtime vector table entry.
+ *
+ * Shall clobber: x0-x3, x30
+ * -----------------------------------------------------------------
+ */
+func apply_cve_2018_3639_sync_wa
+ /*
+ * Ensure SMC is coming from A64/A32 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_2
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
+ * X2 populated outside this function with the SMC FID.
+ */
+ orr w3, wzr, #SMCCC_ARCH_WORKAROUND_2
+ cmp x0, x3
+ mrs x3, esr_el3
+
+ ccmp w2, w3, #0, eq
+ /*
+ * Static predictor will predict a fall-through, optimizing
+ * the `SMCCC_ARCH_WORKAROUND_2` fast path.
+ */
+ bne 1f
+
+ /*
+ * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
+ * fast path.
+ */
+ cmp x1, xzr /* enable/disable check */
+
+ /*
+ * When the calling context wants mitigation disabled,
+ * we program the mitigation disable function in the
+ * CPU context, which gets invoked on subsequent exits from
+ * EL3 via the `el3_exit` function. Otherwise NULL is
+ * programmed in the CPU context, which results in caller's
+ * inheriting the EL3 mitigation state (enabled) on subsequent
+ * `el3_exit`.
+ */
+ mov x0, xzr
+ adr x1, cortex_a76_disable_wa_cve_2018_3639
+ csel x1, x1, x0, eq
+ str x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
+
+ mrs x2, CORTEX_A76_CPUACTLR2_EL1
+ orr x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ bic x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ csel x3, x3, x1, eq
+ msr CORTEX_A76_CPUACTLR2_EL1, x3
+ ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+ /*
+ * `SMCCC_ARCH_WORKAROUND_2`fast path return to lower EL.
+ */
+ exception_return /* exception_return contains ISB */
+1:
+ ret
+endfunc apply_cve_2018_3639_sync_wa
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1073348.
+ * This applies only to revision <= r1p0 of Cortex A76.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a76_1073348_wa
+ /*
+ * Compare x0 against revision r1p0
+ */
+ mov x17, x30
+ bl check_errata_1073348
+ cbz x0, 1f
+ mrs x1, CORTEX_A76_CPUACTLR_EL1
+ orr x1, x1 ,#CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION
+ msr CORTEX_A76_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a76_1073348_wa
+
+func check_errata_1073348
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1073348
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1130799.
+ * This applies only to revision <= r2p0 of Cortex A76.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a76_1130799_wa
+ /*
+ * Compare x0 against revision r2p0
+ */
+ mov x17, x30
+ bl check_errata_1130799
+ cbz x0, 1f
+ mrs x1, CORTEX_A76_CPUACTLR2_EL1
+ orr x1, x1 ,#(1 << 59)
+ msr CORTEX_A76_CPUACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a76_1130799_wa
+
+func check_errata_1130799
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_1130799
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1220197.
+ * This applies only to revision <= r2p0 of Cortex A76.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a76_1220197_wa
+/*
+ * Compare x0 against revision r2p0
+ */
+ mov x17, x30
+ bl check_errata_1220197
+ cbz x0, 1f
+ mrs x1, CORTEX_A76_CPUECTLR_EL1
+ orr x1, x1, #CORTEX_A76_CPUECTLR_EL1_WS_THR_L2
+ msr CORTEX_A76_CPUECTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a76_1220197_wa
+
+func check_errata_1220197
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_1220197
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1257314.
+ * This applies only to revision <= r3p0 of Cortex A76.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a76_1257314_wa
+ /*
+ * Compare x0 against revision r3p0
+ */
+ mov x17, x30
+ bl check_errata_1257314
+ cbz x0, 1f
+ mrs x1, CORTEX_A76_CPUACTLR3_EL1
+ orr x1, x1, CORTEX_A76_CPUACTLR3_EL1_BIT_10
+ msr CORTEX_A76_CPUACTLR3_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a76_1257314_wa
+
+func check_errata_1257314
+ mov x1, #0x30
+ b cpu_rev_var_ls
+endfunc check_errata_1257314
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1262888.
+ * This applies only to revision <= r3p0 of Cortex A76.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a76_1262888_wa
+ /*
+ * Compare x0 against revision r3p0
+ */
+ mov x17, x30
+ bl check_errata_1262888
+ cbz x0, 1f
+ mrs x1, CORTEX_A76_CPUECTLR_EL1
+ orr x1, x1, CORTEX_A76_CPUECTLR_EL1_BIT_51
+ msr CORTEX_A76_CPUECTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a76_1262888_wa
+
+func check_errata_1262888
+ mov x1, #0x30
+ b cpu_rev_var_ls
+endfunc check_errata_1262888
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1286807.
+ * This applies only to revision <= r3p0 of Cortex A76.
+ * Due to the nature of the errata it is applied unconditionally
+ * when built in, report it as applicable in this case
+ * ---------------------------------------------------
+ */
+func check_errata_1286807
+#if ERRATA_A76_1286807
+ mov x0, #ERRATA_APPLIES
+ ret
+#else
+ mov x1, #0x30
+ b cpu_rev_var_ls
+#endif
+endfunc check_errata_1286807
+
+ /* --------------------------------------------------
+ * Errata workaround for Cortex A76 Errata #1791580.
+ * This applies to revisions <= r4p0 of Cortex A76.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a76_1791580_wa
+ /* Compare x0 against revision r4p0 */
+ mov x17, x30
+ bl check_errata_1791580
+ cbz x0, 1f
+ mrs x1, CORTEX_A76_CPUACTLR2_EL1
+ orr x1, x1, CORTEX_A76_CPUACTLR2_EL1_BIT_2
+ msr CORTEX_A76_CPUACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a76_1791580_wa
+
+func check_errata_1791580
+ /* Applies to everything <=r4p0. */
+ mov x1, #0x40
+ b cpu_rev_var_ls
+endfunc check_errata_1791580
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1262606,
+ * #1275112, and #1868343. #1262606 and #1275112
+ * apply to revisions <= r3p0 and #1868343 applies to
+ * revisions <= r4p0.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+
+func errata_a76_1262606_1275112_1868343_wa
+ mov x17, x30
+
+/* Check for <= r3p0 cases and branch if check passes. */
+#if ERRATA_A76_1262606 || ERRATA_A76_1275112
+ bl check_errata_1262606
+ cbnz x0, 1f
+#endif
+
+/* Check for <= r4p0 cases and branch if check fails. */
+#if ERRATA_A76_1868343
+ bl check_errata_1868343
+ cbz x0, 2f
+#endif
+1:
+ mrs x1, CORTEX_A76_CPUACTLR_EL1
+ orr x1, x1, #CORTEX_A76_CPUACTLR_EL1_BIT_13
+ msr CORTEX_A76_CPUACTLR_EL1, x1
+ isb
+2:
+ ret x17
+endfunc errata_a76_1262606_1275112_1868343_wa
+
+func check_errata_1262606
+ mov x1, #0x30
+ b cpu_rev_var_ls
+endfunc check_errata_1262606
+
+func check_errata_1275112
+ mov x1, #0x30
+ b cpu_rev_var_ls
+endfunc check_errata_1275112
+
+func check_errata_1868343
+ mov x1, #0x40
+ b cpu_rev_var_ls
+endfunc check_errata_1868343
+
+/* --------------------------------------------------
+ * Errata Workaround for A76 Erratum 1946160.
+ * This applies to revisions r3p0 - r4p1 of A76.
+ * It also exists in r0p0 - r2p0 but there is no fix
+ * in those revisions.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a76_1946160_wa
+ /* Compare x0 against revisions r3p0 - r4p1 */
+ mov x17, x30
+ bl check_errata_1946160
+ cbz x0, 1f
+
+ mov x0, #3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3900002
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #4
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #5
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ isb
+1:
+ ret x17
+endfunc errata_a76_1946160_wa
+
+func check_errata_1946160
+ /* Applies to revisions r3p0 - r4p1. */
+ mov x1, #0x30
+ mov x2, #0x41
+ b cpu_rev_var_range
+endfunc check_errata_1946160
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex-A76 Errata #2743102
+ * This applies to revisions <= r4p1 and is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ----------------------------------------------------
+ */
+func errata_a76_2743102_wa
+ mov x17, x30
+ bl check_errata_2743102
+ cbz x0, 1f
+
+ /* dsb before isb of power down sequence */
+ dsb sy
+1:
+ ret x17
+endfunc errata_a76_2743102_wa
+
+func check_errata_2743102
+ /* Applies to all revisions <= r4p1 */
+ mov x1, #0x41
+ b cpu_rev_var_ls
+endfunc check_errata_2743102
+
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2018_3639
+
+func cortex_a76_disable_wa_cve_2018_3639
+ mrs x0, CORTEX_A76_CPUACTLR2_EL1
+ bic x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ msr CORTEX_A76_CPUACTLR2_EL1, x0
+ isb
+ ret
+endfunc cortex_a76_disable_wa_cve_2018_3639
+
+ /* --------------------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1165522.
+ * This applies only to revisions <= r3p0 of Cortex A76.
+ * Due to the nature of the errata it is applied unconditionally
+ * when built in, report it as applicable in this case
+ * --------------------------------------------------------------
+ */
+func check_errata_1165522
+#if ERRATA_A76_1165522
+ mov x0, #ERRATA_APPLIES
+ ret
+#else
+ mov x1, #0x30
+ b cpu_rev_var_ls
+#endif
+endfunc check_errata_1165522
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif /* WORKAROUND_CVE_2022_23960 */
+ ret
+endfunc check_errata_cve_2022_23960
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A76.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func cortex_a76_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A76_1073348
+ mov x0, x18
+ bl errata_a76_1073348_wa
+#endif
+
+#if ERRATA_A76_1130799
+ mov x0, x18
+ bl errata_a76_1130799_wa
+#endif
+
+#if ERRATA_A76_1220197
+ mov x0, x18
+ bl errata_a76_1220197_wa
+#endif
+
+#if ERRATA_A76_1257314
+ mov x0, x18
+ bl errata_a76_1257314_wa
+#endif
+
+#if ERRATA_A76_1262606 || ERRATA_A76_1275112 || ERRATA_A76_1868343
+ mov x0, x18
+ bl errata_a76_1262606_1275112_1868343_wa
+#endif
+
+#if ERRATA_A76_1262888
+ mov x0, x18
+ bl errata_a76_1262888_wa
+#endif
+
+#if ERRATA_A76_1791580
+ mov x0, x18
+ bl errata_a76_1791580_wa
+#endif
+
+#if ERRATA_A76_1946160
+ mov x0, x18
+ bl errata_a76_1946160_wa
+#endif
+
+#if WORKAROUND_CVE_2018_3639
+ /* If the PE implements SSBS, we don't need the dynamic workaround */
+ mrs x0, id_aa64pfr1_el1
+ lsr x0, x0, #ID_AA64PFR1_EL1_SSBS_SHIFT
+ and x0, x0, #ID_AA64PFR1_EL1_SSBS_MASK
+#if !DYNAMIC_WORKAROUND_CVE_2018_3639 && ENABLE_ASSERTIONS
+ cmp x0, 0
+ ASM_ASSERT(ne)
+#endif
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ cbnz x0, 1f
+ mrs x0, CORTEX_A76_CPUACTLR2_EL1
+ orr x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ msr CORTEX_A76_CPUACTLR2_EL1, x0
+ isb
+
+#ifdef IMAGE_BL31
+ /*
+ * The Cortex-A76 generic vectors are overwritten to use the vectors
+ * defined above. This is required in order to apply mitigation
+ * against CVE-2018-3639 on exception entry from lower ELs.
+ * If the below vector table is used, skip overriding it again for
+ * CVE_2022_23960 as both use the same vbar.
+ */
+ adr x0, cortex_a76_wa_cve_vbar
+ msr vbar_el3, x0
+ isb
+ b 2f
+#endif /* IMAGE_BL31 */
+
+1:
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
+#endif /* WORKAROUND_CVE_2018_3639 */
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A76 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs. This will be bypassed
+ * if DYNAMIC_WORKAROUND_CVE_2018_3639 has overridden the vectors.
+ */
+ adr x0, cortex_a76_wa_cve_vbar
+ msr vbar_el3, x0
+ isb
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+2:
+
+#if ERRATA_DSU_798953
+ bl errata_dsu_798953_wa
+#endif
+
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+
+ ret x19
+endfunc cortex_a76_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a76_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A76_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A76_CORE_PWRDN_EN_MASK
+ msr CORTEX_A76_CPUPWRCTLR_EL1, x0
+#if ERRATA_A76_2743102
+ mov x15, x30
+ bl cpu_get_rev_var
+ bl errata_a76_2743102_wa
+ mov x30, x15
+#endif /* ERRATA_A76_2743102 */
+ isb
+ ret
+endfunc cortex_a76_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A76. Must follow AAPCS.
+ */
+func cortex_a76_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A76_1073348, cortex_a76, 1073348
+ report_errata ERRATA_A76_1130799, cortex_a76, 1130799
+ report_errata ERRATA_A76_1165522, cortex_a76, 1165522
+ report_errata ERRATA_A76_1220197, cortex_a76, 1220197
+ report_errata ERRATA_A76_1257314, cortex_a76, 1257314
+ report_errata ERRATA_A76_1262606, cortex_a76, 1262606
+ report_errata ERRATA_A76_1262888, cortex_a76, 1262888
+ report_errata ERRATA_A76_1275112, cortex_a76, 1275112
+ report_errata ERRATA_A76_1286807, cortex_a76, 1286807
+ report_errata ERRATA_A76_1791580, cortex_a76, 1791580
+ report_errata ERRATA_A76_1868343, cortex_a76, 1868343
+ report_errata ERRATA_A76_1946160, cortex_a76, 1946160
+ report_errata ERRATA_A76_2743102, cortex_a76, 2743102
+ report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
+ report_errata ERRATA_DSU_798953, cortex_a76, dsu_798953
+ report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a76, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a76_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_a76 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a76_regs, "aS"
+cortex_a76_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a76_cpu_reg_dump
+ adr x6, cortex_a76_regs
+ mrs x8, CORTEX_A76_CPUECTLR_EL1
+ ret
+endfunc cortex_a76_cpu_reg_dump
+
+declare_cpu_ops_wa cortex_a76, CORTEX_A76_MIDR, \
+ cortex_a76_reset_func, \
+ CPU_NO_EXTRA1_FUNC, \
+ cortex_a76_disable_wa_cve_2018_3639, \
+ CPU_NO_EXTRA3_FUNC, \
+ cortex_a76_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a76ae.S b/lib/cpus/aarch64/cortex_a76ae.S
new file mode 100644
index 0000000..5c19548
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a76ae.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a76ae.h>
+#include <cpu_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A76AE must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-A76AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A76AE_BHB_LOOP_COUNT, cortex_a76ae
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif /* WORKAROUND_CVE_2022_23960 */
+ ret
+endfunc check_errata_cve_2022_23960
+
+ /* --------------------------------------------
+ * The CPU Ops reset function for Cortex-A76AE.
+ * Shall clobber: x0-x19
+ * --------------------------------------------
+ */
+func cortex_a76ae_reset_func
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A76ae generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_a76ae
+ msr vbar_el3, x0
+ isb
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ ret
+endfunc cortex_a76ae_reset_func
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_a76ae_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_A76AE_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A76AE_CORE_PWRDN_EN_MASK
+ msr CORTEX_A76AE_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a76ae_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-A76AE. Must follow AAPCS.
+ */
+func cortex_a76ae_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a76ae, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a76ae_errata_report
+#endif /* REPORT_ERRATA */
+
+ /* ---------------------------------------------
+ * This function provides cortex_a76ae specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a76ae_regs, "aS"
+cortex_a76ae_regs: /* The ASCII list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a76ae_cpu_reg_dump
+ adr x6, cortex_a76ae_regs
+ mrs x8, CORTEX_A76AE_CPUECTLR_EL1
+ ret
+endfunc cortex_a76ae_cpu_reg_dump
+
+declare_cpu_ops cortex_a76ae, CORTEX_A76AE_MIDR, cortex_a76ae_reset_func, \
+ cortex_a76ae_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S
new file mode 100644
index 0000000..2882df7
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a77.S
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a77.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A77 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A77_BHB_LOOP_COUNT, cortex_a77
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A77 Errata #1508412.
+ * This applies only to revision <= r1p0 of Cortex A77.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a77_1508412_wa
+ /*
+ * Compare x0 against revision r1p0
+ */
+ mov x17, x30
+ bl check_errata_1508412
+ cbz x0, 3f
+ /*
+ * Compare x0 against revision r0p0
+ */
+ bl check_errata_1508412_0
+ cbz x0, 1f
+ ldr x0, =0x0
+ msr CORTEX_A77_CPUPSELR_EL3, x0
+ ldr x0, =0x00E8400000
+ msr CORTEX_A77_CPUPOR_EL3, x0
+ ldr x0, =0x00FFE00000
+ msr CORTEX_A77_CPUPMR_EL3, x0
+ ldr x0, =0x4004003FF
+ msr CORTEX_A77_CPUPCR_EL3, x0
+ ldr x0, =0x1
+ msr CORTEX_A77_CPUPSELR_EL3, x0
+ ldr x0, =0x00E8C00040
+ msr CORTEX_A77_CPUPOR_EL3, x0
+ ldr x0, =0x00FFE00040
+ msr CORTEX_A77_CPUPMR_EL3, x0
+ b 2f
+1:
+ ldr x0, =0x0
+ msr CORTEX_A77_CPUPSELR_EL3, x0
+ ldr x0, =0x00E8400000
+ msr CORTEX_A77_CPUPOR_EL3, x0
+ ldr x0, =0x00FF600000
+ msr CORTEX_A77_CPUPMR_EL3, x0
+ ldr x0, =0x00E8E00080
+ msr CORTEX_A77_CPUPOR2_EL3, x0
+ ldr x0, =0x00FFE000C0
+ msr CORTEX_A77_CPUPMR2_EL3, x0
+2:
+ ldr x0, =0x04004003FF
+ msr CORTEX_A77_CPUPCR_EL3, x0
+ isb
+3:
+ ret x17
+endfunc errata_a77_1508412_wa
+
+func check_errata_1508412
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1508412
+
+func check_errata_1508412_0
+ mov x1, #0x0
+ b cpu_rev_var_ls
+endfunc check_errata_1508412_0
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A77 Errata #1925769.
+ * This applies to revision <= r1p1 of Cortex A77.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a77_1925769_wa
+ /* Compare x0 against revision <= r1p1 */
+ mov x17, x30
+ bl check_errata_1925769
+ cbz x0, 1f
+
+ /* Set bit 8 in ECTLR_EL1 */
+ mrs x1, CORTEX_A77_CPUECTLR_EL1
+ orr x1, x1, #CORTEX_A77_CPUECTLR_EL1_BIT_8
+ msr CORTEX_A77_CPUECTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a77_1925769_wa
+
+func check_errata_1925769
+ /* Applies to everything <= r1p1 */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_1925769
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A77 Errata #1946167.
+ * This applies to revision <= r1p1 of Cortex A77.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a77_1946167_wa
+ /* Compare x0 against revision <= r1p1 */
+ mov x17, x30
+ bl check_errata_1946167
+ cbz x0, 1f
+
+ ldr x0,=0x4
+ msr CORTEX_A77_CPUPSELR_EL3,x0
+ ldr x0,=0x10E3900002
+ msr CORTEX_A77_CPUPOR_EL3,x0
+ ldr x0,=0x10FFF00083
+ msr CORTEX_A77_CPUPMR_EL3,x0
+ ldr x0,=0x2001003FF
+ msr CORTEX_A77_CPUPCR_EL3,x0
+
+ ldr x0,=0x5
+ msr CORTEX_A77_CPUPSELR_EL3,x0
+ ldr x0,=0x10E3800082
+ msr CORTEX_A77_CPUPOR_EL3,x0
+ ldr x0,=0x10FFF00083
+ msr CORTEX_A77_CPUPMR_EL3,x0
+ ldr x0,=0x2001003FF
+ msr CORTEX_A77_CPUPCR_EL3,x0
+
+ ldr x0,=0x6
+ msr CORTEX_A77_CPUPSELR_EL3,x0
+ ldr x0,=0x10E3800200
+ msr CORTEX_A77_CPUPOR_EL3,x0
+ ldr x0,=0x10FFF003E0
+ msr CORTEX_A77_CPUPMR_EL3,x0
+ ldr x0,=0x2001003FF
+ msr CORTEX_A77_CPUPCR_EL3,x0
+
+ isb
+1:
+ ret x17
+endfunc errata_a77_1946167_wa
+
+func check_errata_1946167
+ /* Applies to everything <= r1p1 */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_1946167
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A77 Errata #1791578.
+ * This applies to revisions r0p0, r1p0, and r1p1 and is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a77_1791578_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_1791578
+ cbz x0, 1f
+
+ /* Set bit 2 in ACTLR2_EL1 */
+ mrs x1, CORTEX_A77_ACTLR2_EL1
+ orr x1, x1, #CORTEX_A77_ACTLR2_EL1_BIT_2
+ msr CORTEX_A77_ACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a77_1791578_wa
+
+func check_errata_1791578
+ /* Applies to r0p0, r1p0, and r1p1 right now */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_1791578
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A77 Errata #2356587.
+ * This applies to revisions r0p0, r1p0, and r1p1 and is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a77_2356587_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2356587
+ cbz x0, 1f
+
+ /* Set bit 0 in ACTLR2_EL1 */
+ mrs x1, CORTEX_A77_ACTLR2_EL1
+ orr x1, x1, #CORTEX_A77_ACTLR2_EL1_BIT_0
+ msr CORTEX_A77_ACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a77_2356587_wa
+
+func check_errata_2356587
+ /* Applies to r0p0, r1p0, and r1p1 right now */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_2356587
+
+ /* -----------------------------------------------------------------
+ * Errata Workaround for Cortex A77 Errata #2743100
+ * This applies to revisions r0p0, r1p0, and r1p1 and is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * -----------------------------------------------------------------
+ */
+func errata_a77_2743100_wa
+ mov x17, x30
+ bl check_errata_2743100
+ cbz x0, 1f
+
+ /* dsb before isb of power down sequence */
+ dsb sy
+1:
+ ret x17
+endfunc errata_a77_2743100_wa
+
+func check_errata_2743100
+ /* Applies to r0p0, r1p0, and r1p1 right now */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_2743100
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A77 Errata #1800714.
+ * This applies to revision <= r1p1 of Cortex A77.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a77_1800714_wa
+ /* Compare x0 against revision <= r1p1 */
+ mov x17, x30
+ bl check_errata_1800714
+ cbz x0, 1f
+
+ /* Disable allocation of splintered pages in the L2 TLB */
+ mrs x1, CORTEX_A77_CPUECTLR_EL1
+ orr x1, x1, CORTEX_A77_CPUECTLR_EL1_BIT_53
+ msr CORTEX_A77_CPUECTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a77_1800714_wa
+
+func check_errata_1800714
+ /* Applies to everything <= r1p1 */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_1800714
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A77.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func cortex_a77_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A77_1508412
+ mov x0, x18
+ bl errata_a77_1508412_wa
+#endif
+
+#if ERRATA_A77_1925769
+ mov x0, x18
+ bl errata_a77_1925769_wa
+#endif
+
+#if ERRATA_A77_1946167
+ mov x0, x18
+ bl errata_a77_1946167_wa
+#endif
+
+#if ERRATA_A77_1791578
+ mov x0, x18
+ bl errata_a77_1791578_wa
+#endif
+
+#if ERRATA_A77_2356587
+ mov x0, x18
+ bl errata_a77_2356587_wa
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A77 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_a77
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+#if ERRATA_A77_1800714
+ mov x0, x18
+ bl errata_a77_1800714_wa
+#endif
+
+ isb
+ ret x19
+endfunc cortex_a77_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a77_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A77_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A77_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_A77_CPUPWRCTLR_EL1, x0
+#if ERRATA_A77_2743100
+ mov x15, x30
+ bl cpu_get_rev_var
+ bl errata_a77_2743100_wa
+ mov x30, x15
+#endif /* ERRATA_A77_2743100 */
+ isb
+ ret
+endfunc cortex_a77_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-A77. Must follow AAPCS.
+ */
+func cortex_a77_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A77_1508412, cortex_a77, 1508412
+ report_errata ERRATA_A77_1791578, cortex_a77, 1791578
+ report_errata ERRATA_A77_1800714, cortex_a77, 1800714
+ report_errata ERRATA_A77_1925769, cortex_a77, 1925769
+ report_errata ERRATA_A77_1946167, cortex_a77, 1946167
+ report_errata ERRATA_A77_2356587, cortex_a77, 2356587
+ report_errata ERRATA_A77_2743100, cortex_a77, 2743100
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a77, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a77_errata_report
+#endif
+
+
+ /* ---------------------------------------------
+ * This function provides Cortex-A77 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a77_regs, "aS"
+cortex_a77_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a77_cpu_reg_dump
+ adr x6, cortex_a77_regs
+ mrs x8, CORTEX_A77_CPUECTLR_EL1
+ ret
+endfunc cortex_a77_cpu_reg_dump
+
+declare_cpu_ops cortex_a77, CORTEX_A77_MIDR, \
+ cortex_a77_reset_func, \
+ cortex_a77_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
new file mode 100644
index 0000000..dd3487a
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a78.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "cortex_a78 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+.globl cortex_a78_reset_func
+.globl cortex_a78_core_pwr_dwn
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A78_BHB_LOOP_COUNT, cortex_a78
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+/* --------------------------------------------------
+ * Errata Workaround for A78 Erratum 1688305.
+ * This applies to revision r0p0 and r1p0 of A78.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_1688305_wa
+ /* Compare x0 against revision r1p0 */
+ mov x17, x30
+ bl check_errata_1688305
+ cbz x0, 1f
+ mrs x1, CORTEX_A78_ACTLR2_EL1
+ orr x1, x1, #CORTEX_A78_ACTLR2_EL1_BIT_1
+ msr CORTEX_A78_ACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a78_1688305_wa
+
+func check_errata_1688305
+ /* Applies to r0p0 and r1p0 */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1688305
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata #1941498.
+ * This applies to revisions r0p0, r1p0, and r1p1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_1941498_wa
+ /* Compare x0 against revision <= r1p1 */
+ mov x17, x30
+ bl check_errata_1941498
+ cbz x0, 1f
+
+ /* Set bit 8 in ECTLR_EL1 */
+ mrs x1, CORTEX_A78_CPUECTLR_EL1
+ orr x1, x1, #CORTEX_A78_CPUECTLR_EL1_BIT_8
+ msr CORTEX_A78_CPUECTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a78_1941498_wa
+
+func check_errata_1941498
+ /* Check for revision <= r1p1, might need to be updated later. */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_1941498
+
+/* --------------------------------------------------
+ * Errata Workaround for A78 Erratum 1951500.
+ * This applies to revisions r1p0 and r1p1 of A78.
+ * The issue also exists in r0p0 but there is no fix
+ * in that revision.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_1951500_wa
+ /* Compare x0 against revisions r1p0 - r1p1 */
+ mov x17, x30
+ bl check_errata_1951500
+ cbz x0, 1f
+
+ msr S3_6_c15_c8_0, xzr
+ ldr x0, =0x10E3900002
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ mov x0, #1
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ mov x0, #2
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ isb
+1:
+ ret x17
+endfunc errata_a78_1951500_wa
+
+func check_errata_1951500
+ /* Applies to revisions r1p0 and r1p1. */
+ mov x1, #CPU_REV(1, 0)
+ mov x2, #CPU_REV(1, 1)
+ b cpu_rev_var_range
+endfunc check_errata_1951500
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata #1821534.
+ * This applies to revisions r0p0 and r1p0.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_1821534_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_1821534
+ cbz x0, 1f
+
+ /* Set bit 2 in ACTLR2_EL1 */
+ mrs x1, CORTEX_A78_ACTLR2_EL1
+ orr x1, x1, #CORTEX_A78_ACTLR2_EL1_BIT_2
+ msr CORTEX_A78_ACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_a78_1821534_wa
+
+func check_errata_1821534
+ /* Applies to r0p0 and r1p0 */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1821534
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata 1952683.
+ * This applies to revision r0p0.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_1952683_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_1952683
+ cbz x0, 1f
+
+ ldr x0,=0x5
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xEEE10A10
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFEF0FFF
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x0010F000
+ msr S3_6_c15_c8_4,x0
+ ldr x0,=0x0010F000
+ msr S3_6_c15_c8_5,x0
+ ldr x0,=0x40000080023ff
+ msr S3_6_c15_c8_1,x0
+ ldr x0,=0x6
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xEE640F34
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFEF0FFF
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x40000080023ff
+ msr S3_6_c15_c8_1,x0
+ isb
+1:
+ ret x17
+endfunc errata_a78_1952683_wa
+
+func check_errata_1952683
+ /* Applies to r0p0 only */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_1952683
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata 2132060.
+ * This applies to revisions r0p0, r1p0, r1p1, and r1p2.
+ * It is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_a78_2132060_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2132060
+ cbz x0, 1f
+
+ /* Apply the workaround. */
+ mrs x1, CORTEX_A78_CPUECTLR_EL1
+ mov x0, #CORTEX_A78_CPUECTLR_EL1_PF_MODE_CNSRV
+ bfi x1, x0, #CPUECTLR_EL1_PF_MODE_LSB, #CPUECTLR_EL1_PF_MODE_WIDTH
+ msr CORTEX_A78_CPUECTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_a78_2132060_wa
+
+func check_errata_2132060
+ /* Applies to r0p0, r0p1, r1p1, and r1p2 */
+ mov x1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_2132060
+
+/* --------------------------------------------------------------------
+ * Errata Workaround for A78 Erratum 2242635.
+ * This applies to revisions r1p0, r1p1, and r1p2 of the Cortex A78
+ * processor and is still open.
+ * The issue also exists in r0p0 but there is no fix in that revision.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------------------------
+ */
+func errata_a78_2242635_wa
+ /* Compare x0 against revisions r1p0 - r1p2 */
+ mov x17, x30
+ bl check_errata_2242635
+ cbz x0, 1f
+
+ ldr x0, =0x5
+ msr S3_6_c15_c8_0, x0 /* CPUPSELR_EL3 */
+ ldr x0, =0x10F600E000
+ msr S3_6_c15_c8_2, x0 /* CPUPOR_EL3 */
+ ldr x0, =0x10FF80E000
+ msr S3_6_c15_c8_3, x0 /* CPUPMR_EL3 */
+ ldr x0, =0x80000000003FF
+ msr S3_6_c15_c8_1, x0 /* CPUPCR_EL3 */
+
+ isb
+1:
+ ret x17
+endfunc errata_a78_2242635_wa
+
+func check_errata_2242635
+ /* Applies to revisions r1p0 through r1p2. */
+ mov x1, #CPU_REV(1, 0)
+ mov x2, #CPU_REV(1, 2)
+ b cpu_rev_var_range
+endfunc check_errata_2242635
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata 2376745.
+ * This applies to revisions r0p0, r1p0, r1p1, and r1p2.
+ * It is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_a78_2376745_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2376745
+ cbz x0, 1f
+
+ /* Apply the workaround. */
+ mrs x1, CORTEX_A78_ACTLR2_EL1
+ orr x1, x1, #BIT(0)
+ msr CORTEX_A78_ACTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_a78_2376745_wa
+
+func check_errata_2376745
+ /* Applies to r0p0, r0p1, r1p1, and r1p2 */
+ mov x1, #CPU_REV(1, 2)
+ b cpu_rev_var_ls
+endfunc check_errata_2376745
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78 Errata 2395406.
+ * This applies to revisions r0p0, r1p0, r1p1, and r1p2.
+ * It is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_a78_2395406_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2395406
+ cbz x0, 1f
+
+ /* Apply the workaround. */
+ mrs x1, CORTEX_A78_ACTLR2_EL1
+ orr x1, x1, #BIT(40)
+ msr CORTEX_A78_ACTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_a78_2395406_wa
+
+func check_errata_2395406
+ /* Applies to r0p0, r0p1, r1p1, and r1p2 */
+ mov x1, #CPU_REV(1, 2)
+ b cpu_rev_var_ls
+endfunc check_errata_2395406
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A78
+ * -------------------------------------------------
+ */
+func cortex_a78_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A78_1688305
+ mov x0, x18
+ bl errata_a78_1688305_wa
+#endif
+
+#if ERRATA_A78_1941498
+ mov x0, x18
+ bl errata_a78_1941498_wa
+#endif
+
+#if ERRATA_A78_1951500
+ mov x0, x18
+ bl errata_a78_1951500_wa
+#endif
+
+#if ERRATA_A78_1821534
+ mov x0, x18
+ bl errata_a78_1821534_wa
+#endif
+
+#if ERRATA_A78_1952683
+ mov x0, x18
+ bl errata_a78_1952683_wa
+#endif
+
+#if ERRATA_A78_2132060
+ mov x0, x18
+ bl errata_a78_2132060_wa
+#endif
+
+#if ERRATA_A78_2242635
+ mov x0, x18
+ bl errata_a78_2242635_wa
+#endif
+
+#if ERRATA_A78_2376745
+ mov x0, x18
+ bl errata_a78_2376745_wa
+#endif
+
+#if ERRATA_A78_2395406
+ mov x0, x18
+ bl errata_a78_2395406_wa
+#endif
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, actlr_el3
+ bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT
+ msr actlr_el3, x0
+
+ /* Make sure accesses from non-secure EL0/EL1 are not trapped to EL2 */
+ mrs x0, actlr_el2
+ bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT
+ msr actlr_el2, x0
+
+ /* Enable group0 counters */
+ mov x0, #CORTEX_A78_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET0_EL0, x0
+
+ /* Enable group1 counters */
+ mov x0, #CORTEX_A78_AMU_GROUP1_MASK
+ msr CPUAMCNTENSET1_EL0, x0
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A78 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_a78
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret x19
+endfunc cortex_a78_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a78_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_A78_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A78_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+ msr CORTEX_A78_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a78_core_pwr_dwn
+
+ /*
+ * Errata printing function for cortex_a78. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func cortex_a78_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A78_1688305, cortex_a78, 1688305
+ report_errata ERRATA_A78_1941498, cortex_a78, 1941498
+ report_errata ERRATA_A78_1951500, cortex_a78, 1951500
+ report_errata ERRATA_A78_1821534, cortex_a78, 1821534
+ report_errata ERRATA_A78_1952683, cortex_a78, 1952683
+ report_errata ERRATA_A78_2132060, cortex_a78, 2132060
+ report_errata ERRATA_A78_2242635, cortex_a78, 2242635
+ report_errata ERRATA_A78_2376745, cortex_a78, 2376745
+ report_errata ERRATA_A78_2395406, cortex_a78, 2395406
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a78, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a78_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_a78 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a78_regs, "aS"
+cortex_a78_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a78_cpu_reg_dump
+ adr x6, cortex_a78_regs
+ mrs x8, CORTEX_A78_CPUECTLR_EL1
+ ret
+endfunc cortex_a78_cpu_reg_dump
+
+declare_cpu_ops cortex_a78, CORTEX_A78_MIDR, \
+ cortex_a78_reset_func, \
+ cortex_a78_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a78_ae.S b/lib/cpus/aarch64/cortex_a78_ae.S
new file mode 100644
index 0000000..27adc38
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a78_ae.S
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2019-2022, ARM Limited. All rights reserved.
+ * Copyright (c) 2021-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a78_ae.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "cortex_a78_ae must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A78_AE_BHB_LOOP_COUNT, cortex_a78_ae
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+/* --------------------------------------------------
+ * Errata Workaround for A78 AE Erratum 1941500.
+ * This applies to revisions r0p0 and r0p1 of A78 AE.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_ae_1941500_wa
+ /* Compare x0 against revisions r0p0 - r0p1 */
+ mov x17, x30
+ bl check_errata_1941500
+ cbz x0, 1f
+
+ /* Set bit 8 in ECTLR_EL1 */
+ mrs x0, CORTEX_A78_AE_CPUECTLR_EL1
+ bic x0, x0, #CORTEX_A78_AE_CPUECTLR_EL1_BIT_8
+ msr CORTEX_A78_AE_CPUECTLR_EL1, x0
+ isb
+1:
+ ret x17
+endfunc errata_a78_ae_1941500_wa
+
+func check_errata_1941500
+ /* Applies to revisions r0p0 and r0p1. */
+ mov x1, #CPU_REV(0, 0)
+ mov x2, #CPU_REV(0, 1)
+ b cpu_rev_var_range
+endfunc check_errata_1941500
+
+/* --------------------------------------------------
+ * Errata Workaround for A78 AE Erratum 1951502.
+ * This applies to revisions r0p0 and r0p1 of A78 AE.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_ae_1951502_wa
+ /* Compare x0 against revisions r0p0 - r0p1 */
+ mov x17, x30
+ bl check_errata_1951502
+ cbz x0, 1f
+
+ msr S3_6_c15_c8_0, xzr
+ ldr x0, =0x10E3900002
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ mov x0, #1
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ mov x0, #2
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ isb
+1:
+ ret x17
+endfunc errata_a78_ae_1951502_wa
+
+func check_errata_1951502
+ /* Applies to revisions r0p0 and r0p1. */
+ mov x1, #CPU_REV(0, 0)
+ mov x2, #CPU_REV(0, 1)
+ b cpu_rev_var_range
+endfunc check_errata_1951502
+
+/* --------------------------------------------------
+ * Errata Workaround for A78 AE Erratum 2376748.
+ * This applies to revisions r0p0 and r0p1 of A78 AE.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_ae_2376748_wa
+ /* Compare x0 against revisions r0p0 - r0p1 */
+ mov x17, x30
+ bl check_errata_2376748
+ cbz x0, 1f
+
+ /* -------------------------------------------------------
+ * Set CPUACTLR2_EL1[0] to 1 to force PLDW/PFRM ST to
+ * behave like PLD/PRFM LD and not cause invalidations to
+ * other PE caches. There might be a small performance
+ * degradation to this workaround for certain workloads
+ * that share data.
+ * -------------------------------------------------------
+ */
+ mrs x0, CORTEX_A78_AE_ACTLR2_EL1
+ orr x0, x0, #CORTEX_A78_AE_ACTLR2_EL1_BIT_0
+ msr CORTEX_A78_AE_ACTLR2_EL1, x0
+ isb
+1:
+ ret x17
+endfunc errata_a78_ae_2376748_wa
+
+func check_errata_2376748
+ /* Applies to revisions r0p0 and r0p1. */
+ mov x1, #CPU_REV(0, 0)
+ mov x2, #CPU_REV(0, 1)
+ b cpu_rev_var_range
+endfunc check_errata_2376748
+
+/* --------------------------------------------------
+ * Errata Workaround for A78 AE Erratum 2395408.
+ * This applies to revisions r0p0 and r0p1 of A78 AE.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78_ae_2395408_wa
+ /* Compare x0 against revisions r0p0 - r0p1 */
+ mov x17, x30
+ bl check_errata_2395408
+ cbz x0, 1f
+
+ /* --------------------------------------------------------
+ * Disable folding of demand requests into older prefetches
+ * with L2 miss requests outstanding by setting the
+ * CPUACTLR2_EL1[40] to 1.
+ * --------------------------------------------------------
+ */
+ mrs x0, CORTEX_A78_AE_ACTLR2_EL1
+ orr x0, x0, #CORTEX_A78_AE_ACTLR2_EL1_BIT_40
+ msr CORTEX_A78_AE_ACTLR2_EL1, x0
+ isb
+1:
+ ret x17
+endfunc errata_a78_ae_2395408_wa
+
+func check_errata_2395408
+ /* Applies to revisions r0p0 and r0p1. */
+ mov x1, #CPU_REV(0, 0)
+ mov x2, #CPU_REV(0, 1)
+ b cpu_rev_var_range
+endfunc check_errata_2395408
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A78-AE
+ * -------------------------------------------------
+ */
+func cortex_a78_ae_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A78_AE_1941500
+ mov x0, x18
+ bl errata_a78_ae_1941500_wa
+#endif
+
+#if ERRATA_A78_AE_1951502
+ mov x0, x18
+ bl errata_a78_ae_1951502_wa
+#endif
+
+#if ERRATA_A78_AE_2376748
+ mov x0, x18
+ bl errata_a78_ae_2376748_wa
+#endif
+
+#if ERRATA_A78_AE_2395408
+ mov x0, x18
+ bl errata_a78_ae_2395408_wa
+#endif
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, actlr_el3
+ bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT
+ msr actlr_el3, x0
+
+ /* Make sure accesses from non-secure EL0/EL1 are not trapped to EL2 */
+ mrs x0, actlr_el2
+ bic x0, x0, #CORTEX_A78_ACTLR_TAM_BIT
+ msr actlr_el2, x0
+
+ /* Enable group0 counters */
+ mov x0, #CORTEX_A78_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET0_EL0, x0
+
+ /* Enable group1 counters */
+ mov x0, #CORTEX_A78_AMU_GROUP1_MASK
+ msr CPUAMCNTENSET1_EL0, x0
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A78AE generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_a78_ae
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret x19
+endfunc cortex_a78_ae_reset_func
+
+ /* -------------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * -------------------------------------------------------
+ */
+func cortex_a78_ae_core_pwr_dwn
+ /* -------------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * -------------------------------------------------------
+ */
+ mrs x0, CORTEX_A78_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A78_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+ msr CORTEX_A78_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a78_ae_core_pwr_dwn
+
+ /*
+ * Errata printing function for cortex_a78_ae. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func cortex_a78_ae_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A78_AE_1941500, cortex_a78_ae, 1941500
+ report_errata ERRATA_A78_AE_1951502, cortex_a78_ae, 1951502
+ report_errata ERRATA_A78_AE_2376748, cortex_a78_ae, 2376748
+ report_errata ERRATA_A78_AE_2395408, cortex_a78_ae, 2395408
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a78_ae, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a78_ae_errata_report
+#endif
+
+ /* -------------------------------------------------------
+ * This function provides cortex_a78_ae specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * -------------------------------------------------------
+ */
+.section .rodata.cortex_a78_ae_regs, "aS"
+cortex_a78_ae_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a78_ae_cpu_reg_dump
+ adr x6, cortex_a78_ae_regs
+ mrs x8, CORTEX_A78_CPUECTLR_EL1
+ ret
+endfunc cortex_a78_ae_cpu_reg_dump
+
+declare_cpu_ops cortex_a78_ae, CORTEX_A78_AE_MIDR, \
+ cortex_a78_ae_reset_func, \
+ cortex_a78_ae_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a78c.S b/lib/cpus/aarch64/cortex_a78c.S
new file mode 100644
index 0000000..49cebfe
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a78c.S
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a78c.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "cortex_a78c must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78C Erratum 2376749.
+ * This applies to revision r0p1 and r0p2 of the A78C
+ * and is currently open. It is a Cat B erratum.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x4, x17
+ * --------------------------------------------------
+ */
+func errata_a78c_2376749_wa
+ /* Check revision */
+ mov x17, x30
+ bl check_errata_2376749
+ cbz x0, 1f
+ /* Set CPUACTLR2_EL1[0] to 1. */
+ mrs x1, CORTEX_A78C_CPUACTLR2_EL1
+ orr x1, x1, #CORTEX_A78C_CPUACTLR2_EL1_BIT_0
+ msr CORTEX_A78C_CPUACTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_a78c_2376749_wa
+
+func check_errata_2376749
+ /* Applies to r0p1 and r0p2*/
+ mov x1, #0x01
+ mov x2, #0x02
+ b cpu_rev_var_range
+endfunc check_errata_2376749
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex A78C Erratum 2395411.
+ * This applies to revision r0p1 and r0p2 of the A78C
+ * and is currently open. It is a Cat B erratum.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x4, x17
+ * --------------------------------------------------
+ */
+func errata_a78c_2395411_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2395411
+ cbz x0, 1f
+
+ /* Set CPUACTRL2_EL1[40] to 1. */
+ mrs x1, CORTEX_A78C_CPUACTLR2_EL1
+ orr x1, x1, #CORTEX_A78C_CPUACTLR2_EL1_BIT_40
+ msr CORTEX_A78C_CPUACTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_a78c_2395411_wa
+
+func check_errata_2395411
+ /* Applies to r0p1 and r0p2 */
+ mov x1, #0x01
+ mov x2, #0x02
+ b cpu_rev_var_range
+endfunc check_errata_2395411
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A78C_BHB_LOOP_COUNT, cortex_a78c
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+/* --------------------------------------------------
+ * Errata Workaround for A78C Erratum 2132064.
+ * This applies to revisions r0p1 and r0p2 of A78C
+ * and is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_a78c_2132064_wa
+ /* Compare x0 against revisions r0p0 - r0p1 */
+ mov x17, x30
+ bl check_errata_2132064
+ cbz x0, 1f
+
+ /* --------------------------------------------------------
+ * Place the data prefetcher in the most conservative mode
+ * to reduce prefetches by writing the following bits to
+ * the value indicated: ecltr[7:6], PF_MODE = 2'b11
+ * --------------------------------------------------------
+ */
+ mrs x0, CORTEX_A78C_CPUECTLR_EL1
+ orr x0, x0, #CORTEX_A78C_CPUECTLR_EL1_BIT_6
+ orr x0, x0, #CORTEX_A78C_CPUECTLR_EL1_BIT_7
+ msr CORTEX_A78C_CPUECTLR_EL1, x0
+ isb
+1:
+ ret x17
+endfunc errata_a78c_2132064_wa
+
+func check_errata_2132064
+ /* Applies to revisions r0p1 and r0p2. */
+ mov x1, #CPU_REV(0, 1)
+ mov x2, #CPU_REV(0, 2)
+ b cpu_rev_var_range
+endfunc check_errata_2132064
+
+/* --------------------------------------------------------------------
+ * Errata Workaround for A78C Erratum 2242638.
+ * This applies to revisions r0p1 and r0p2 of the Cortex A78C
+ * processor and is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------------------------
+ */
+func errata_a78c_2242638_wa
+ /* Compare x0 against revisions r0p1 - r0p2 */
+ mov x17, x30
+ bl check_errata_2242638
+ cbz x0, 1f
+
+ ldr x0, =0x5
+ msr CORTEX_A78C_IMP_CPUPSELR_EL3, x0
+ ldr x0, =0x10F600E000
+ msr CORTEX_A78C_IMP_CPUPOR_EL3, x0
+ ldr x0, =0x10FF80E000
+ msr CORTEX_A78C_IMP_CPUPMR_EL3, x0
+ ldr x0, =0x80000000003FF
+ msr CORTEX_A78C_IMP_CPUPCR_EL3, x0
+
+ isb
+1:
+ ret x17
+endfunc errata_a78c_2242638_wa
+
+func check_errata_2242638
+ /* Applies to revisions r0p1-r0p2. */
+ mov x1, #CPU_REV(0, 1)
+ mov x2, #CPU_REV(0, 2)
+ b cpu_rev_var_range
+endfunc check_errata_2242638
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A78C
+ * -------------------------------------------------
+ */
+func cortex_a78c_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_A78C_2132064
+ mov x0, x18
+ bl errata_a78c_2132064_wa
+#endif
+
+#if ERRATA_A78C_2242638
+ mov x0, x18
+ bl errata_a78c_2242638_wa
+#endif
+
+#if ERRATA_A78C_2376749
+ mov x0, x18
+ bl errata_a78c_2376749_wa
+#endif
+
+#if ERRATA_A78C_2395411
+ mov x0, x18
+ bl errata_a78c_2395411_wa
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A78c generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_a78c
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret x19
+endfunc cortex_a78c_reset_func
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_a78c_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_A78C_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A78C_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+ msr CORTEX_A78C_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a78c_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A78C. Must follow AAPCS.
+ */
+func cortex_a78c_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A78C_2132064, cortex_a78c, 2132064
+ report_errata ERRATA_A78C_2242638, cortex_a78c, 2242638
+ report_errata ERRATA_A78C_2376749, cortex_a78c, 2376749
+ report_errata ERRATA_A78C_2395411, cortex_a78c, 2395411
+ report_errata WORKAROUND_CVE_2022_23960, cortex_a78c, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a78c_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cortex_a78c specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a78c_regs, "aS"
+cortex_a78c_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a78c_cpu_reg_dump
+ adr x6, cortex_a78c_regs
+ mrs x8, CORTEX_A78C_CPUECTLR_EL1
+ ret
+endfunc cortex_a78c_cpu_reg_dump
+
+declare_cpu_ops cortex_a78c, CORTEX_A78C_MIDR, \
+ cortex_a78c_reset_func, \
+ cortex_a78c_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_hayes.S b/lib/cpus/aarch64/cortex_hayes.S
new file mode 100644
index 0000000..445a691
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_hayes.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_hayes.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex Hayes must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex Hayes supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_hayes_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_HAYES_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_HAYES_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_HAYES_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_hayes_core_pwr_dwn
+
+ /*
+ * Errata printing function for Cortex Hayes. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func cortex_hayes_errata_report
+ ret
+endfunc cortex_hayes_errata_report
+#endif
+
+func cortex_hayes_reset_func
+ /* Disable speculative loads */
+ msr SSBS, xzr
+ isb
+ ret
+endfunc cortex_hayes_reset_func
+
+ /* ---------------------------------------------
+ * This function provides Cortex Hayes specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_hayes_regs, "aS"
+cortex_hayes_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_hayes_cpu_reg_dump
+ adr x6, cortex_hayes_regs
+ mrs x8, CORTEX_HAYES_CPUECTLR_EL1
+ ret
+endfunc cortex_hayes_cpu_reg_dump
+
+declare_cpu_ops cortex_hayes, CORTEX_HAYES_MIDR, \
+ cortex_hayes_reset_func, \
+ cortex_hayes_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_hunter.S b/lib/cpus/aarch64/cortex_hunter.S
new file mode 100644
index 0000000..973637e
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_hunter.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_hunter.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex Hunter must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex Hunter supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_HUNTER_BHB_LOOP_COUNT, cortex_hunter
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+func cortex_hunter_reset_func
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex Hunter generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_hunter
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret
+endfunc cortex_hunter_reset_func
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_hunter_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_HUNTER_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_HUNTER_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_HUNTER_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_hunter_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex Hunter. Must follow AAPCS.
+ */
+func cortex_hunter_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata WORKAROUND_CVE_2022_23960, cortex_hunter, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_hunter_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Cortex Hunter-specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_hunter_regs, "aS"
+cortex_hunter_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_hunter_cpu_reg_dump
+ adr x6, cortex_hunter_regs
+ mrs x8, CORTEX_HUNTER_CPUECTLR_EL1
+ ret
+endfunc cortex_hunter_cpu_reg_dump
+
+declare_cpu_ops cortex_hunter, CORTEX_HUNTER_MIDR, \
+ cortex_hunter_reset_func, \
+ cortex_hunter_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_hunter_elp_arm.S b/lib/cpus/aarch64/cortex_hunter_elp_arm.S
new file mode 100644
index 0000000..5f86d4e
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_hunter_elp_arm.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_hunter_elp_arm.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex Hunter ELP must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex Hunter ELP supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_HUNTER_ELP_ARM_BHB_LOOP_COUNT, cortex_hunter_elp_arm
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+func cortex_hunter_elp_arm_reset_func
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex Hunter ELP generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_hunter_elp_arm
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret
+endfunc cortex_hunter_elp_arm_reset_func
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_hunter_elp_arm_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_HUNTER_ELP_ARM_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_HUNTER_ELP_ARM_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_HUNTER_ELP_ARM_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_hunter_elp_arm_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex Hunter ELP. Must follow AAPCS.
+ */
+func cortex_hunter_elp_arm_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata WORKAROUND_CVE_2022_23960, cortex_hunter_elp_arm, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_hunter_elp_arm_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Cortex Hunter ELP-specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_hunter_elp_arm_regs, "aS"
+cortex_hunter_elp_arm_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_hunter_elp_arm_cpu_reg_dump
+ adr x6, cortex_hunter_elp_arm_regs
+ mrs x8, CORTEX_HUNTER_ELP_ARM_CPUECTLR_EL1
+ ret
+endfunc cortex_hunter_elp_arm_cpu_reg_dump
+
+declare_cpu_ops cortex_hunter_elp_arm, CORTEX_HUNTER_ELP_ARM_MIDR, \
+ cortex_hunter_elp_arm_reset_func, \
+ cortex_hunter_elp_arm_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x1.S b/lib/cpus/aarch64/cortex_x1.S
new file mode 100644
index 0000000..9a7f666
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_x1.S
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2022, Google LLC. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <cortex_x1.h>
+#include <cpu_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-X1 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-X1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_X1_BHB_LOOP_COUNT, cortex_x1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+/* --------------------------------------------------
+ * Errata Workaround for X1 Erratum 1821534.
+ * This applies to revision r0p0 and r1p0 of X1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_x1_1821534_wa
+ /* Compare x0 against revision r1p0 */
+ mov x17, x30
+ bl check_errata_1821534
+ cbz x0, 1f
+ mrs x1, CORTEX_X1_ACTLR2_EL1
+ orr x1, x1, BIT(2)
+ msr CORTEX_X1_ACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_x1_1821534_wa
+
+func check_errata_1821534
+ /* Applies to r0p0 and r1p0 */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1821534
+
+/* --------------------------------------------------
+ * Errata Workaround for X1 Erratum 1688305.
+ * This applies to revision r0p0 and r1p0 of X1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_x1_1688305_wa
+ /* Compare x0 against revision r1p0 */
+ mov x17, x30
+ bl check_errata_1688305
+ cbz x0, 1f
+ mrs x0, CORTEX_X1_ACTLR2_EL1
+ orr x0, x0, BIT(1)
+ msr CORTEX_X1_ACTLR2_EL1, x0
+ isb
+
+1:
+ ret x17
+endfunc errata_x1_1688305_wa
+
+func check_errata_1688305
+ /* Applies to r0p0 and r1p0 */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1688305
+
+/* --------------------------------------------------
+ * Errata Workaround for X1 Erratum 1827429.
+ * This applies to revision r0p0 and r1p0 of X1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_x1_1827429_wa
+ /* Compare x0 against revision r1p0 */
+ mov x17, x30
+ bl check_errata_1827429
+ cbz x0, 1f
+ mrs x0, CORTEX_X1_CPUECTLR_EL1
+ orr x0, x0, BIT(53)
+ msr CORTEX_X1_CPUECTLR_EL1, x0
+ isb
+
+1:
+ ret x17
+endfunc errata_x1_1827429_wa
+
+func check_errata_1827429
+ /* Applies to r0p0 and r1p0 */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1827429
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-X1.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func cortex_x1_reset_func
+ mov x19, x30
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_X1_1821534
+ mov x0, x18
+ bl errata_x1_1821534_wa
+#endif
+
+#if ERRATA_X1_1688305
+ mov x0, x18
+ bl errata_x1_1688305_wa
+#endif
+
+#if ERRATA_X1_1827429
+ mov x0, x18
+ bl errata_x1_1827429_wa
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-X1 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_x1
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret x19
+endfunc cortex_x1_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_x1_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, CORTEX_X1_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_X1_CORE_PWRDN_EN_MASK
+ msr CORTEX_X1_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_x1_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex X1. Must follow AAPCS.
+ */
+func cortex_x1_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_X1_1821534, cortex_x1, 1821534
+ report_errata ERRATA_X1_1688305, cortex_x1, 1688305
+ report_errata ERRATA_X1_1827429, cortex_x1, 1827429
+ report_errata WORKAROUND_CVE_2022_23960, cortex_x1, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_x1_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Cortex X1 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_x1_regs, "aS"
+cortex_x1_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_x1_cpu_reg_dump
+ adr x6, cortex_x1_regs
+ mrs x8, CORTEX_X1_CPUECTLR_EL1
+ ret
+endfunc cortex_x1_cpu_reg_dump
+
+declare_cpu_ops cortex_x1, CORTEX_X1_MIDR, \
+ cortex_x1_reset_func, \
+ cortex_x1_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
new file mode 100644
index 0000000..c810be6
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -0,0 +1,432 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_x2.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex X2 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex X2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_X2_BHB_LOOP_COUNT, cortex_x2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex X2 Errata #2002765.
+ * This applies to revisions r0p0, r1p0, and r2p0 and
+ * is open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * --------------------------------------------------
+ */
+func errata_cortex_x2_2002765_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2002765
+ cbz x0, 1f
+
+ ldr x0, =0x6
+ msr S3_6_C15_C8_0, x0 /* CPUPSELR_EL3 */
+ ldr x0, =0xF3A08002
+ msr S3_6_C15_C8_2, x0 /* CPUPOR_EL3 */
+ ldr x0, =0xFFF0F7FE
+ msr S3_6_C15_C8_3, x0 /* CPUPMR_EL3 */
+ ldr x0, =0x40000001003ff
+ msr S3_6_C15_C8_1, x0 /* CPUPCR_EL3 */
+ isb
+
+1:
+ ret x17
+endfunc errata_cortex_x2_2002765_wa
+
+func check_errata_2002765
+ /* Applies to r0p0 - r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2002765
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex X2 Errata #2058056.
+ * This applies to revisions r0p0, r1p0, and r2p0 and
+ * is open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * --------------------------------------------------
+ */
+func errata_cortex_x2_2058056_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2058056
+ cbz x0, 1f
+
+ mrs x1, CORTEX_X2_CPUECTLR2_EL1
+ mov x0, #CORTEX_X2_CPUECTLR2_EL1_PF_MODE_CNSRV
+ bfi x1, x0, #CORTEX_X2_CPUECTLR2_EL1_PF_MODE_SHIFT, #CORTEX_X2_CPUECTLR2_EL1_PF_MODE_WIDTH
+ msr CORTEX_X2_CPUECTLR2_EL1, x1
+
+1:
+ ret x17
+endfunc errata_cortex_x2_2058056_wa
+
+func check_errata_2058056
+ /* Applies to r0p0 - r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2058056
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex X2 Errata #2083908.
+ * This applies to revision r2p0 and is open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x2, x17
+ * --------------------------------------------------
+ */
+func errata_cortex_x2_2083908_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2083908
+ cbz x0, 1f
+
+ /* Apply the workaround by setting bit 13 in CPUACTLR5_EL1. */
+ mrs x1, CORTEX_X2_CPUACTLR5_EL1
+ orr x1, x1, #BIT(13)
+ msr CORTEX_X2_CPUACTLR5_EL1, x1
+
+1:
+ ret x17
+endfunc errata_cortex_x2_2083908_wa
+
+func check_errata_2083908
+ /* Applies to r2p0 */
+ mov x1, #0x20
+ mov x2, #0x20
+ b cpu_rev_var_range
+endfunc check_errata_2083908
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex-X2 Errata 2017096.
+ * This applies only to revisions r0p0, r1p0 and r2p0
+ * and is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * --------------------------------------------------
+ */
+func errata_x2_2017096_wa
+ /* Compare x0 against revision r0p0 to r2p0 */
+ mov x17, x30
+ bl check_errata_2017096
+ cbz x0, 1f
+ mrs x1, CORTEX_X2_CPUECTLR_EL1
+ orr x1, x1, CORTEX_X2_CPUECTLR_EL1_PFSTIDIS_BIT
+ msr CORTEX_X2_CPUECTLR_EL1, x1
+
+1:
+ ret x17
+endfunc errata_x2_2017096_wa
+
+func check_errata_2017096
+ /* Applies to r0p0, r1p0, r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2017096
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex-X2 Errata 2081180.
+ * This applies to revision r0p0, r1p0 and r2p0
+ * and is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * --------------------------------------------------
+ */
+func errata_x2_2081180_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2081180
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ ldr x0, =0x3
+ msr CORTEX_X2_IMP_CPUPSELR_EL3, x0
+ ldr x0, =0xF3A08002
+ msr CORTEX_X2_IMP_CPUPOR_EL3, x0
+ ldr x0, =0xFFF0F7FE
+ msr CORTEX_X2_IMP_CPUPMR_EL3, x0
+ ldr x0, =0x10002001003FF
+ msr CORTEX_X2_IMP_CPUPCR_EL3, x0
+ ldr x0, =0x4
+ msr CORTEX_X2_IMP_CPUPSELR_EL3, x0
+ ldr x0, =0xBF200000
+ msr CORTEX_X2_IMP_CPUPOR_EL3, x0
+ ldr x0, =0xFFEF0000
+ msr CORTEX_X2_IMP_CPUPMR_EL3, x0
+ ldr x0, =0x10002001003F3
+ msr CORTEX_X2_IMP_CPUPCR_EL3, x0
+ isb
+1:
+ ret x17
+endfunc errata_x2_2081180_wa
+
+func check_errata_2081180
+ /* Applies to r0p0, r1p0 and r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2081180
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex X2 Errata 2216384.
+ * This applies to revisions r0p0, r1p0, and r2p0
+ * and is fixed in r2p1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * --------------------------------------------------
+ */
+func errata_x2_2216384_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2216384
+ cbz x0, 1f
+
+ mrs x1, CORTEX_X2_CPUACTLR5_EL1
+ orr x1, x1, CORTEX_X2_CPUACTLR5_EL1_BIT_17
+ msr CORTEX_X2_CPUACTLR5_EL1, x1
+
+ /* Apply instruction patching sequence */
+ ldr x0, =0x5
+ msr CORTEX_X2_IMP_CPUPSELR_EL3, x0
+ ldr x0, =0x10F600E000
+ msr CORTEX_X2_IMP_CPUPOR_EL3, x0
+ ldr x0, =0x10FF80E000
+ msr CORTEX_X2_IMP_CPUPMR_EL3, x0
+ ldr x0, =0x80000000003FF
+ msr CORTEX_X2_IMP_CPUPCR_EL3, x0
+ isb
+
+1:
+ ret x17
+endfunc errata_x2_2216384_wa
+
+func check_errata_2216384
+ /* Applies to r0p0 - r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2216384
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+ /* ---------------------------------------------------------
+ * Errata Workaround for Cortex-X2 Errata 2147715.
+ * This applies only to revisions r2p0 and is fixed in r2p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1, x17
+ * ---------------------------------------------------------
+ */
+func errata_x2_2147715_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_2147715
+ cbz x0, 1f
+
+ /* Apply the workaround by setting bit 22 in CPUACTLR_EL1. */
+ mrs x1, CORTEX_X2_CPUACTLR_EL1
+ orr x1, x1, CORTEX_X2_CPUACTLR_EL1_BIT_22
+ msr CORTEX_X2_CPUACTLR_EL1, x1
+
+1:
+ ret x17
+endfunc errata_x2_2147715_wa
+
+func check_errata_2147715
+ /* Applies to r2p0 */
+ mov x1, #0x20
+ mov x2, #0x20
+ b cpu_rev_var_range
+endfunc check_errata_2147715
+
+/* -------------------------------------------------------
+ * Errata Workaround for Cortex-X2 Erratum 2371105.
+ * This applies to revisions <= r2p0 and is fixed in r2p1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * -------------------------------------------------------
+ */
+func errata_x2_2371105_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2371105
+ cbz x0, 1f
+
+ /* Set bit 40 in CPUACTLR2_EL1 */
+ mrs x1, CORTEX_X2_CPUACTLR2_EL1
+ orr x1, x1, #CORTEX_X2_CPUACTLR2_EL1_BIT_40
+ msr CORTEX_X2_CPUACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_x2_2371105_wa
+
+func check_errata_2371105
+ /* Applies to <= r2p0. */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_2371105
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_x2_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_X2_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_X2_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_X2_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_x2_core_pwr_dwn
+
+ /*
+ * Errata printing function for Cortex X2. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func cortex_x2_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_X2_2002765, cortex_x2, 2002765
+ report_errata ERRATA_X2_2017096, cortex_x2, 2017096
+ report_errata ERRATA_X2_2058056, cortex_x2, 2058056
+ report_errata ERRATA_X2_2081180, cortex_x2, 2081180
+ report_errata ERRATA_X2_2083908, cortex_x2, 2083908
+ report_errata ERRATA_X2_2147715, cortex_x2, 2147715
+ report_errata ERRATA_X2_2216384, cortex_x2, 2216384
+ report_errata ERRATA_X2_2371105, cortex_x2, 2371105
+ report_errata WORKAROUND_CVE_2022_23960, cortex_x2, cve_2022_23960
+ report_errata ERRATA_DSU_2313941, cortex_x2, dsu_2313941
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_x2_errata_report
+#endif
+
+func cortex_x2_reset_func
+ mov x19, x30
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+ /* Get the CPU revision and stash it in x18. */
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_DSU_2313941
+ bl errata_dsu_2313941_wa
+#endif
+
+#if ERRATA_X2_2002765
+ mov x0, x18
+ bl errata_cortex_x2_2002765_wa
+#endif
+
+#if ERRATA_X2_2058056
+ mov x0, x18
+ bl errata_cortex_x2_2058056_wa
+#endif
+
+#if ERRATA_X2_2083908
+ mov x0, x18
+ bl errata_cortex_x2_2083908_wa
+#endif
+
+#if ERRATA_X2_2017096
+ mov x0, x18
+ bl errata_x2_2017096_wa
+#endif
+
+#if ERRATA_X2_2081180
+ mov x0, x18
+ bl errata_x2_2081180_wa
+#endif
+
+#if ERRATA_X2_2216384
+ mov x0, x18
+ bl errata_x2_2216384_wa
+#endif
+
+#if ERRATA_X2_2147715
+ mov x0, x18
+ bl errata_x2_2147715_wa
+#endif
+
+#if ERRATA_X2_2371105
+ mov x0, x18
+ bl errata_x2_2371105_wa
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-X2 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_x2
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret x19
+endfunc cortex_x2_reset_func
+
+ /* ---------------------------------------------
+ * This function provides Cortex X2 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_x2_regs, "aS"
+cortex_x2_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_x2_cpu_reg_dump
+ adr x6, cortex_x2_regs
+ mrs x8, CORTEX_X2_CPUECTLR_EL1
+ ret
+endfunc cortex_x2_cpu_reg_dump
+
+declare_cpu_ops cortex_x2, CORTEX_X2_MIDR, \
+ cortex_x2_reset_func, \
+ cortex_x2_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x3.S b/lib/cpus/aarch64/cortex_x3.S
new file mode 100644
index 0000000..bf1b6ec
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_x3.S
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_x3.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-X3 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-X3 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_X3_BHB_LOOP_COUNT, cortex_x3
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_x3_core_pwr_dwn
+#if ERRATA_X3_2313909
+ mov x15, x30
+ bl cpu_get_rev_var
+ bl errata_cortex_x3_2313909_wa
+ mov x30, x15
+#endif /* ERRATA_X3_2313909 */
+
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_X3_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_X3_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_X3_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_x3_core_pwr_dwn
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+func cortex_x3_reset_func
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-X3 generic vectors are overridden to apply
+ * errata mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_x3
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret
+endfunc cortex_x3_reset_func
+
+/* ----------------------------------------------------------------------
+ * Errata Workaround for Cortex-X3 Erratum 2313909 on power down request.
+ * This applies to revision r0p0 and r1p0 of Cortex-X3. Fixed in r1p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * ----------------------------------------------------------------------
+ */
+func errata_cortex_x3_2313909_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2313909
+ cbz x0, 1f
+
+ /* Set bit 36 in ACTLR2_EL1 */
+ mrs x1, CORTEX_X3_CPUACTLR2_EL1
+ orr x1, x1, #CORTEX_X3_CPUACTLR2_EL1_BIT_36
+ msr CORTEX_X3_CPUACTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_cortex_x3_2313909_wa
+
+func check_errata_2313909
+ /* Applies to r0p0 and r1p0 */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_2313909
+
+#if REPORT_ERRATA
+ /*
+ * Errata printing function for Cortex-X3. Must follow AAPCS.
+ */
+func cortex_x3_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_X3_2313909, cortex_x3, 2313909
+ report_errata WORKAROUND_CVE_2022_23960, cortex_x3, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_x3_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Cortex-X3-
+ * specific register information for crash
+ * reporting. It needs to return with x6
+ * pointing to a list of register names in ascii
+ * and x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_x3_regs, "aS"
+cortex_x3_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_x3_cpu_reg_dump
+ adr x6, cortex_x3_regs
+ mrs x8, CORTEX_X3_CPUECTLR_EL1
+ ret
+endfunc cortex_x3_cpu_reg_dump
+
+declare_cpu_ops cortex_x3, CORTEX_X3_MIDR, \
+ cortex_x3_reset_func, \
+ cortex_x3_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
new file mode 100644
index 0000000..2385627
--- /dev/null
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 2014-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cpu_macros.S>
+#include <lib/cpus/errata_report.h>
+#include <lib/el3_runtime/cpu_data.h>
+
+ /* Reset fn is needed in BL at reset vector */
+#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
+ /*
+ * The reset handler common to all platforms. After a matching
+ * cpu_ops structure entry is found, the correponding reset_handler
+ * in the cpu_ops is invoked.
+ * Clobbers: x0 - x19, x30
+ */
+ .globl reset_handler
+func reset_handler
+ mov x19, x30
+
+ /* The plat_reset_handler can clobber x0 - x18, x30 */
+ bl plat_reset_handler
+
+ /* Get the matching cpu_ops pointer */
+ bl get_cpu_ops_ptr
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the cpu_ops reset handler */
+ ldr x2, [x0, #CPU_RESET_FUNC]
+ mov x30, x19
+ cbz x2, 1f
+
+ /* The cpu_ops reset handler can clobber x0 - x19, x30 */
+ br x2
+1:
+ ret
+endfunc reset_handler
+
+#endif
+
+#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
+ /*
+ * void prepare_cpu_pwr_dwn(unsigned int power_level)
+ *
+ * Prepare CPU power down function for all platforms. The function takes
+ * a domain level to be powered down as its parameter. After the cpu_ops
+ * pointer is retrieved from cpu_data, the handler for requested power
+ * level is called.
+ */
+ .globl prepare_cpu_pwr_dwn
+func prepare_cpu_pwr_dwn
+ /*
+ * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
+ * power down handler for the last power level
+ */
+ mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
+ cmp x0, x2
+ csel x2, x2, x0, hi
+
+ mrs x1, tpidr_el3
+ ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the appropriate power down handler */
+ mov x1, #CPU_PWR_DWN_OPS
+ add x1, x1, x2, lsl #3
+ ldr x1, [x0, x1]
+#if ENABLE_ASSERTIONS
+ cmp x1, #0
+ ASM_ASSERT(ne)
+#endif
+ br x1
+endfunc prepare_cpu_pwr_dwn
+
+
+ /*
+ * Initializes the cpu_ops_ptr if not already initialized
+ * in cpu_data. This can be called without a runtime stack, but may
+ * only be called after the MMU is enabled.
+ * clobbers: x0 - x6, x10
+ */
+ .globl init_cpu_ops
+func init_cpu_ops
+ mrs x6, tpidr_el3
+ ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
+ cbnz x0, 1f
+ mov x10, x30
+ bl get_cpu_ops_ptr
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
+ mov x30, x10
+1:
+ ret
+endfunc init_cpu_ops
+#endif /* IMAGE_BL31 */
+
+#if defined(IMAGE_BL31) && CRASH_REPORTING
+ /*
+ * The cpu specific registers which need to be reported in a crash
+ * are reported via cpu_ops cpu_reg_dump function. After a matching
+ * cpu_ops structure entry is found, the correponding cpu_reg_dump
+ * in the cpu_ops is invoked.
+ */
+ .globl do_cpu_reg_dump
+func do_cpu_reg_dump
+ mov x16, x30
+
+ /* Get the matching cpu_ops pointer */
+ bl get_cpu_ops_ptr
+ cbz x0, 1f
+
+ /* Get the cpu_ops cpu_reg_dump */
+ ldr x2, [x0, #CPU_REG_DUMP]
+ cbz x2, 1f
+ blr x2
+1:
+ mov x30, x16
+ ret
+endfunc do_cpu_reg_dump
+#endif
+
+ /*
+ * The below function returns the cpu_ops structure matching the
+ * midr of the core. It reads the MIDR_EL1 and finds the matching
+ * entry in cpu_ops entries. Only the implementation and part number
+ * are used to match the entries.
+ *
+ * If cpu_ops for the MIDR_EL1 cannot be found and
+ * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
+ * default cpu_ops with an MIDR value of 0.
+ * (Implementation number 0x0 should be reserved for software use
+ * and therefore no clashes should happen with that default value).
+ *
+ * Return :
+ * x0 - The matching cpu_ops pointer on Success
+ * x0 - 0 on failure.
+ * Clobbers : x0 - x5
+ */
+ .globl get_cpu_ops_ptr
+func get_cpu_ops_ptr
+ /* Read the MIDR_EL1 */
+ mrs x2, midr_el1
+ mov_imm x3, CPU_IMPL_PN_MASK
+
+ /* Retain only the implementation and part number using mask */
+ and w2, w2, w3
+
+ /* Get the cpu_ops end location */
+ adr x5, (__CPU_OPS_END__ + CPU_MIDR)
+
+ /* Initialize the return parameter */
+ mov x0, #0
+1:
+ /* Get the cpu_ops start location */
+ adr x4, (__CPU_OPS_START__ + CPU_MIDR)
+
+2:
+ /* Check if we have reached end of list */
+ cmp x4, x5
+ b.eq search_def_ptr
+
+ /* load the midr from the cpu_ops */
+ ldr x1, [x4], #CPU_OPS_SIZE
+ and w1, w1, w3
+
+ /* Check if midr matches to midr of this core */
+ cmp w1, w2
+ b.ne 2b
+
+ /* Subtract the increment and offset to get the cpu-ops pointer */
+ sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+#ifdef SUPPORT_UNKNOWN_MPID
+ cbnz x2, exit_mpid_found
+ /* Mark the unsupported MPID flag */
+ adrp x1, unsupported_mpid_flag
+ add x1, x1, :lo12:unsupported_mpid_flag
+ str w2, [x1]
+exit_mpid_found:
+#endif
+ ret
+
+ /*
+ * Search again for a default pointer (MIDR = 0x0)
+ * or return error if already searched.
+ */
+search_def_ptr:
+#ifdef SUPPORT_UNKNOWN_MPID
+ cbz x2, error_exit
+ mov x2, #0
+ b 1b
+error_exit:
+#endif
+ ret
+endfunc get_cpu_ops_ptr
+
+/*
+ * Extract CPU revision and variant, and combine them into a single numeric for
+ * easier comparison.
+ */
+ .globl cpu_get_rev_var
+func cpu_get_rev_var
+ mrs x1, midr_el1
+
+ /*
+ * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
+ * as variant[7:4] and revision[3:0] of x0.
+ *
+ * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
+ * extract x1[3:0] into x0[3:0] retaining other bits.
+ */
+ ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
+ bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
+ ret
+endfunc cpu_get_rev_var
+
+/*
+ * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
+ * application purposes. If the revision-variant is less than or same as a given
+ * value, indicates that errata applies; otherwise not.
+ *
+ * Shall clobber: x0-x3
+ */
+ .globl cpu_rev_var_ls
+func cpu_rev_var_ls
+ mov x2, #ERRATA_APPLIES
+ mov x3, #ERRATA_NOT_APPLIES
+ cmp x0, x1
+ csel x0, x2, x3, ls
+ ret
+endfunc cpu_rev_var_ls
+
+/*
+ * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
+ * application purposes. If the revision-variant is higher than or same as a
+ * given value, indicates that errata applies; otherwise not.
+ *
+ * Shall clobber: x0-x3
+ */
+ .globl cpu_rev_var_hs
+func cpu_rev_var_hs
+ mov x2, #ERRATA_APPLIES
+ mov x3, #ERRATA_NOT_APPLIES
+ cmp x0, x1
+ csel x0, x2, x3, hs
+ ret
+endfunc cpu_rev_var_hs
+
+/*
+ * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
+ * application purposes. If the revision-variant is between or includes the given
+ * values, this indicates that errata applies; otherwise not.
+ *
+ * Shall clobber: x0-x4
+ */
+ .globl cpu_rev_var_range
+func cpu_rev_var_range
+ mov x3, #ERRATA_APPLIES
+ mov x4, #ERRATA_NOT_APPLIES
+ cmp x0, x1
+ csel x1, x3, x4, hs
+ cbz x1, 1f
+ cmp x0, x2
+ csel x1, x3, x4, ls
+1:
+ mov x0, x1
+ ret
+endfunc cpu_rev_var_range
+
+#if REPORT_ERRATA
+/*
+ * void print_errata_status(void);
+ *
+ * Function to print errata status for CPUs of its class. Must be called only:
+ *
+ * - with MMU and data caches are enabled;
+ * - after cpu_ops have been initialized in per-CPU data.
+ */
+ .globl print_errata_status
+func print_errata_status
+#ifdef IMAGE_BL1
+ /*
+ * BL1 doesn't have per-CPU data. So retrieve the CPU operations
+ * directly.
+ */
+ stp xzr, x30, [sp, #-16]!
+ bl get_cpu_ops_ptr
+ ldp xzr, x30, [sp], #16
+ ldr x1, [x0, #CPU_ERRATA_FUNC]
+ cbnz x1, .Lprint
+#else
+ /*
+ * Retrieve pointer to cpu_ops from per-CPU data, and further, the
+ * errata printing function. If it's non-NULL, jump to the function in
+ * turn.
+ */
+ mrs x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x1, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x1, #CPU_ERRATA_FUNC]
+ cbz x0, .Lnoprint
+
+ /*
+ * Printing errata status requires atomically testing the printed flag.
+ */
+ stp x19, x30, [sp, #-16]!
+ mov x19, x0
+
+ /*
+ * Load pointers to errata lock and printed flag. Call
+ * errata_needs_reporting to check whether this CPU needs to report
+ * errata status pertaining to its class.
+ */
+ ldr x0, [x1, #CPU_ERRATA_LOCK]
+ ldr x1, [x1, #CPU_ERRATA_PRINTED]
+ bl errata_needs_reporting
+ mov x1, x19
+ ldp x19, x30, [sp], #16
+ cbnz x0, .Lprint
+#endif
+.Lnoprint:
+ ret
+.Lprint:
+ /* Jump to errata reporting function for this CPU */
+ br x1
+endfunc print_errata_status
+#endif
+
+/*
+ * int check_wa_cve_2017_5715(void);
+ *
+ * This function returns:
+ * - ERRATA_APPLIES when firmware mitigation is required.
+ * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
+ * - ERRATA_MISSING when firmware mitigation would be required but
+ * is not compiled in.
+ *
+ * NOTE: Must be called only after cpu_ops have been initialized
+ * in per-CPU data.
+ */
+ .globl check_wa_cve_2017_5715
+func check_wa_cve_2017_5715
+ mrs x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_EXTRA1_FUNC]
+ /*
+ * If the reserved function pointer is NULL, this CPU
+ * is unaffected by CVE-2017-5715 so bail out.
+ */
+ cmp x0, #CPU_NO_EXTRA1_FUNC
+ beq 1f
+ br x0
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+endfunc check_wa_cve_2017_5715
+
+/*
+ * void *wa_cve_2018_3639_get_disable_ptr(void);
+ *
+ * Returns a function pointer which is used to disable mitigation
+ * for CVE-2018-3639.
+ * The function pointer is only returned on cores that employ
+ * dynamic mitigation. If the core uses static mitigation or is
+ * unaffected by CVE-2018-3639 this function returns NULL.
+ *
+ * NOTE: Must be called only after cpu_ops have been initialized
+ * in per-CPU data.
+ */
+ .globl wa_cve_2018_3639_get_disable_ptr
+func wa_cve_2018_3639_get_disable_ptr
+ mrs x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_EXTRA2_FUNC]
+ ret
+endfunc wa_cve_2018_3639_get_disable_ptr
+
+/*
+ * int check_smccc_arch_wa3_applies(void);
+ *
+ * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
+ * CVE-2022-23960 for this CPU. It returns:
+ * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
+ * the CVE.
+ * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
+ * mitigate the CVE.
+ *
+ * NOTE: Must be called only after cpu_ops have been initialized
+ * in per-CPU data.
+ */
+ .globl check_smccc_arch_wa3_applies
+func check_smccc_arch_wa3_applies
+ mrs x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_EXTRA3_FUNC]
+ /*
+ * If the reserved function pointer is NULL, this CPU
+ * is unaffected by CVE-2022-23960 so bail out.
+ */
+ cmp x0, #CPU_NO_EXTRA3_FUNC
+ beq 1f
+ br x0
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+endfunc check_smccc_arch_wa3_applies
diff --git a/lib/cpus/aarch64/cpuamu.c b/lib/cpus/aarch64/cpuamu.c
new file mode 100644
index 0000000..3a2fa81
--- /dev/null
+++ b/lib/cpus/aarch64/cpuamu.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cpuamu.h>
+#include <lib/el3_runtime/pubsub_events.h>
+#include <plat/common/platform.h>
+
+#define CPUAMU_NR_COUNTERS 5U
+
+struct cpuamu_ctx {
+ uint64_t cnts[CPUAMU_NR_COUNTERS];
+ unsigned int mask;
+};
+
+static struct cpuamu_ctx cpuamu_ctxs[PLATFORM_CORE_COUNT];
+
+int midr_match(unsigned int cpu_midr)
+{
+ unsigned int midr, midr_mask;
+
+ midr = (unsigned int)read_midr();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ return ((midr & midr_mask) == (cpu_midr & midr_mask));
+}
+
+void cpuamu_context_save(unsigned int nr_counters)
+{
+ struct cpuamu_ctx *ctx = &cpuamu_ctxs[plat_my_core_pos()];
+ unsigned int i;
+
+ assert(nr_counters <= CPUAMU_NR_COUNTERS);
+
+ /* Save counter configuration */
+ ctx->mask = cpuamu_read_cpuamcntenset_el0();
+
+ /* Disable counters */
+ cpuamu_write_cpuamcntenclr_el0(ctx->mask);
+ isb();
+
+ /* Save counters */
+ for (i = 0; i < nr_counters; i++)
+ ctx->cnts[i] = cpuamu_cnt_read(i);
+}
+
+void cpuamu_context_restore(unsigned int nr_counters)
+{
+ struct cpuamu_ctx *ctx = &cpuamu_ctxs[plat_my_core_pos()];
+ unsigned int i;
+
+ assert(nr_counters <= CPUAMU_NR_COUNTERS);
+
+ /*
+ * Disable counters. They were enabled early in the
+ * CPU reset function.
+ */
+ cpuamu_write_cpuamcntenclr_el0(ctx->mask);
+ isb();
+
+ /* Restore counters */
+ for (i = 0; i < nr_counters; i++)
+ cpuamu_cnt_write(i, ctx->cnts[i]);
+ isb();
+
+ /* Restore counter configuration */
+ cpuamu_write_cpuamcntenset_el0(ctx->mask);
+}
diff --git a/lib/cpus/aarch64/cpuamu_helpers.S b/lib/cpus/aarch64/cpuamu_helpers.S
new file mode 100644
index 0000000..5a77fc7
--- /dev/null
+++ b/lib/cpus/aarch64/cpuamu_helpers.S
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpuamu.h>
+
+ .globl cpuamu_cnt_read
+ .globl cpuamu_cnt_write
+ .globl cpuamu_read_cpuamcntenset_el0
+ .globl cpuamu_read_cpuamcntenclr_el0
+ .globl cpuamu_write_cpuamcntenset_el0
+ .globl cpuamu_write_cpuamcntenclr_el0
+
+/*
+ * uint64_t cpuamu_cnt_read(unsigned int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func cpuamu_cnt_read
+ adr x1, 1f
+ add x1, x1, x0, lsl #3 /* each mrs/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x1, x1, x0, lsl #2 /* + "bti j" instruction */
+#endif
+ br x1
+
+1: read CPUAMEVCNTR0_EL0
+ read CPUAMEVCNTR1_EL0
+ read CPUAMEVCNTR2_EL0
+ read CPUAMEVCNTR3_EL0
+ read CPUAMEVCNTR4_EL0
+endfunc cpuamu_cnt_read
+
+/*
+ * void cpuamu_cnt_write(unsigned int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func cpuamu_cnt_write
+ adr x2, 1f
+ add x2, x2, x0, lsl #3 /* each msr/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x2, x2, x0, lsl #2 /* + "bti j" instruction */
+#endif
+ br x2
+
+1: write CPUAMEVCNTR0_EL0
+ write CPUAMEVCNTR1_EL0
+ write CPUAMEVCNTR2_EL0
+ write CPUAMEVCNTR3_EL0
+ write CPUAMEVCNTR4_EL0
+endfunc cpuamu_cnt_write
+
+/*
+ * unsigned int cpuamu_read_cpuamcntenset_el0(void);
+ *
+ * Read the `CPUAMCNTENSET_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cpuamu_read_cpuamcntenset_el0
+ mrs x0, CPUAMCNTENSET_EL0
+ ret
+endfunc cpuamu_read_cpuamcntenset_el0
+
+/*
+ * unsigned int cpuamu_read_cpuamcntenclr_el0(void);
+ *
+ * Read the `CPUAMCNTENCLR_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cpuamu_read_cpuamcntenclr_el0
+ mrs x0, CPUAMCNTENCLR_EL0
+ ret
+endfunc cpuamu_read_cpuamcntenclr_el0
+
+/*
+ * void cpuamu_write_cpuamcntenset_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENSET_EL0` CPU register.
+ */
+func cpuamu_write_cpuamcntenset_el0
+ msr CPUAMCNTENSET_EL0, x0
+ ret
+endfunc cpuamu_write_cpuamcntenset_el0
+
+/*
+ * void cpuamu_write_cpuamcntenclr_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENCLR_EL0` CPU register.
+ */
+func cpuamu_write_cpuamcntenclr_el0
+ msr CPUAMCNTENCLR_EL0, x0
+ ret
+endfunc cpuamu_write_cpuamcntenclr_el0
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
new file mode 100644
index 0000000..3c54a6f
--- /dev/null
+++ b/lib/cpus/aarch64/denver.S
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <context.h>
+#include <denver.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+ /* -------------------------------------------------
+ * CVE-2017-5715 mitigation
+ *
+ * Flush the indirect branch predictor and RSB on
+ * entry to EL3 by issuing a newly added instruction
+ * for Denver CPUs.
+ *
+ * To achieve this without performing any branch
+ * instruction, a per-cpu vbar is installed which
+ * executes the workaround and then branches off to
+ * the corresponding vector entry in the main vector
+ * table.
+ * -------------------------------------------------
+ */
+vector_base workaround_bpflush_runtime_exceptions
+
+ .macro apply_workaround
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+
+ /* Disable cycle counter when event counting is prohibited */
+ mrs x1, pmcr_el0
+ orr x0, x1, #PMCR_EL0_DP_BIT
+ msr pmcr_el0, x0
+ isb
+
+ /* -------------------------------------------------
+ * A new write-only system register where a write of
+ * 1 to bit 0 will cause the indirect branch predictor
+ * and RSB to be flushed.
+ *
+ * A write of 0 to bit 0 will be ignored. A write of
+ * 1 to any other bit will cause an MCA.
+ * -------------------------------------------------
+ */
+ mov x0, #1
+ msr s3_0_c15_c0_6, x0
+ isb
+
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpflush_sync_exception_sp_el0
+ b sync_exception_sp_el0
+end_vector_entry workaround_bpflush_sync_exception_sp_el0
+
+vector_entry workaround_bpflush_irq_sp_el0
+ b irq_sp_el0
+end_vector_entry workaround_bpflush_irq_sp_el0
+
+vector_entry workaround_bpflush_fiq_sp_el0
+ b fiq_sp_el0
+end_vector_entry workaround_bpflush_fiq_sp_el0
+
+vector_entry workaround_bpflush_serror_sp_el0
+ b serror_sp_el0
+end_vector_entry workaround_bpflush_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpflush_sync_exception_sp_elx
+ b sync_exception_sp_elx
+end_vector_entry workaround_bpflush_sync_exception_sp_elx
+
+vector_entry workaround_bpflush_irq_sp_elx
+ b irq_sp_elx
+end_vector_entry workaround_bpflush_irq_sp_elx
+
+vector_entry workaround_bpflush_fiq_sp_elx
+ b fiq_sp_elx
+end_vector_entry workaround_bpflush_fiq_sp_elx
+
+vector_entry workaround_bpflush_serror_sp_elx
+ b serror_sp_elx
+end_vector_entry workaround_bpflush_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpflush_sync_exception_aarch64
+ apply_workaround
+ b sync_exception_aarch64
+end_vector_entry workaround_bpflush_sync_exception_aarch64
+
+vector_entry workaround_bpflush_irq_aarch64
+ apply_workaround
+ b irq_aarch64
+end_vector_entry workaround_bpflush_irq_aarch64
+
+vector_entry workaround_bpflush_fiq_aarch64
+ apply_workaround
+ b fiq_aarch64
+end_vector_entry workaround_bpflush_fiq_aarch64
+
+vector_entry workaround_bpflush_serror_aarch64
+ apply_workaround
+ b serror_aarch64
+end_vector_entry workaround_bpflush_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpflush_sync_exception_aarch32
+ apply_workaround
+ b sync_exception_aarch32
+end_vector_entry workaround_bpflush_sync_exception_aarch32
+
+vector_entry workaround_bpflush_irq_aarch32
+ apply_workaround
+ b irq_aarch32
+end_vector_entry workaround_bpflush_irq_aarch32
+
+vector_entry workaround_bpflush_fiq_aarch32
+ apply_workaround
+ b fiq_aarch32
+end_vector_entry workaround_bpflush_fiq_aarch32
+
+vector_entry workaround_bpflush_serror_aarch32
+ apply_workaround
+ b serror_aarch32
+end_vector_entry workaround_bpflush_serror_aarch32
+
+ .global denver_disable_dco
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func denver_disable_ext_debug
+ mov x0, #1
+ msr osdlr_el1, x0
+ isb
+ dsb sy
+ ret
+endfunc denver_disable_ext_debug
+
+ /* ----------------------------------------------------
+ * Enable dynamic code optimizer (DCO)
+ * ----------------------------------------------------
+ */
+func denver_enable_dco
+ /* DCO is not supported on PN5 and later */
+ mrs x1, midr_el1
+ mov_imm x2, DENVER_MIDR_PN4
+ cmp x1, x2
+ b.hi 1f
+
+ mov x18, x30
+ bl plat_my_core_pos
+ mov x1, #1
+ lsl x1, x1, x0
+ msr s3_0_c15_c0_2, x1
+ mov x30, x18
+1: ret
+endfunc denver_enable_dco
+
+ /* ----------------------------------------------------
+ * Disable dynamic code optimizer (DCO)
+ * ----------------------------------------------------
+ */
+func denver_disable_dco
+ /* DCO is not supported on PN5 and later */
+ mrs x1, midr_el1
+ mov_imm x2, DENVER_MIDR_PN4
+ cmp x1, x2
+ b.hi 2f
+
+ /* turn off background work */
+ mov x18, x30
+ bl plat_my_core_pos
+ mov x1, #1
+ lsl x1, x1, x0
+ lsl x2, x1, #16
+ msr s3_0_c15_c0_2, x2
+ isb
+
+ /* wait till the background work turns off */
+1: mrs x2, s3_0_c15_c0_2
+ lsr x2, x2, #32
+ and w2, w2, 0xFFFF
+ and x2, x2, x1
+ cbnz x2, 1b
+
+ mov x30, x18
+2: ret
+endfunc denver_disable_dco
+
+func check_errata_cve_2017_5715
+ mov x0, #ERRATA_MISSING
+#if WORKAROUND_CVE_2017_5715
+ /*
+ * Check if the CPU supports the special instruction
+ * required to flush the indirect branch predictor and
+ * RSB. Support for this operation can be determined by
+ * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
+ */
+ mrs x1, id_afr0_el1
+ mov x2, #0x10000
+ and x1, x1, x2
+ cbz x1, 1f
+ mov x0, #ERRATA_APPLIES
+1:
+#endif
+ ret
+endfunc check_errata_cve_2017_5715
+
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2018_3639
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Denver.
+ * -------------------------------------------------
+ */
+func denver_reset_func
+
+ mov x19, x30
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ /*
+ * Check if the CPU supports the special instruction
+ * required to flush the indirect branch predictor and
+ * RSB. Support for this operation can be determined by
+ * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
+ */
+ mrs x0, id_afr0_el1
+ mov x1, #0x10000
+ and x0, x0, x1
+ cmp x0, #0
+ adr x1, workaround_bpflush_runtime_exceptions
+ mrs x2, vbar_el3
+ csel x0, x1, x2, ne
+ msr vbar_el3, x0
+#endif
+
+#if WORKAROUND_CVE_2018_3639
+ /*
+ * Denver CPUs with DENVER_MIDR_PN3 or earlier, use different
+ * bits in the ACTLR_EL3 register to disable speculative
+ * store buffer and memory disambiguation.
+ */
+ mrs x0, midr_el1
+ mov_imm x1, DENVER_MIDR_PN4
+ cmp x0, x1
+ mrs x0, actlr_el3
+ mov x1, #(DENVER_CPU_DIS_MD_EL3 | DENVER_CPU_DIS_SSB_EL3)
+ mov x2, #(DENVER_PN4_CPU_DIS_MD_EL3 | DENVER_PN4_CPU_DIS_SSB_EL3)
+ csel x3, x1, x2, ne
+ orr x0, x0, x3
+ msr actlr_el3, x0
+ isb
+ dsb sy
+#endif
+
+ /* ----------------------------------------------------
+ * Reset ACTLR.PMSTATE to C1 state
+ * ----------------------------------------------------
+ */
+ mrs x0, actlr_el1
+ bic x0, x0, #DENVER_CPU_PMSTATE_MASK
+ orr x0, x0, #DENVER_CPU_PMSTATE_C1
+ msr actlr_el1, x0
+
+ /* ----------------------------------------------------
+ * Enable dynamic code optimizer (DCO)
+ * ----------------------------------------------------
+ */
+ bl denver_enable_dco
+
+ ret x19
+endfunc denver_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Denver.
+ * ----------------------------------------------------
+ */
+func denver_core_pwr_dwn
+
+ mov x19, x30
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ bl denver_disable_ext_debug
+
+ ret x19
+endfunc denver_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Denver.
+ * -------------------------------------------------------
+ */
+func denver_cluster_pwr_dwn
+ ret
+endfunc denver_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+ /*
+ * Errata printing function for Denver. Must follow AAPCS.
+ */
+func denver_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata WORKAROUND_CVE_2017_5715, denver, cve_2017_5715
+ report_errata WORKAROUND_CVE_2018_3639, denver, cve_2018_3639
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc denver_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Denver specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.denver_regs, "aS"
+denver_regs: /* The ascii list of register names to be reported */
+ .asciz "actlr_el1", ""
+
+func denver_cpu_reg_dump
+ adr x6, denver_regs
+ mrs x8, ACTLR_EL1
+ ret
+endfunc denver_cpu_reg_dump
+
+/* macro to declare cpu_ops for Denver SKUs */
+.macro denver_cpu_ops_wa midr
+ declare_cpu_ops_wa denver, \midr, \
+ denver_reset_func, \
+ check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
+ CPU_NO_EXTRA3_FUNC, \
+ denver_core_pwr_dwn, \
+ denver_cluster_pwr_dwn
+.endm
+
+denver_cpu_ops_wa DENVER_MIDR_PN0
+denver_cpu_ops_wa DENVER_MIDR_PN1
+denver_cpu_ops_wa DENVER_MIDR_PN2
+denver_cpu_ops_wa DENVER_MIDR_PN3
+denver_cpu_ops_wa DENVER_MIDR_PN4
+denver_cpu_ops_wa DENVER_MIDR_PN5
+denver_cpu_ops_wa DENVER_MIDR_PN6
+denver_cpu_ops_wa DENVER_MIDR_PN7
+denver_cpu_ops_wa DENVER_MIDR_PN8
+denver_cpu_ops_wa DENVER_MIDR_PN9
diff --git a/lib/cpus/aarch64/dsu_helpers.S b/lib/cpus/aarch64/dsu_helpers.S
new file mode 100644
index 0000000..419b6ea
--- /dev/null
+++ b/lib/cpus/aarch64/dsu_helpers.S
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2019-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <dsu_def.h>
+#include <lib/cpus/errata_report.h>
+
+ /* -----------------------------------------------------------------------
+ * DSU erratum 798953 check function
+ * Checks the DSU variant, revision and configuration to determine if
+ * the erratum applies. Erratum applies on all configurations of the
+ * DSU and if revision-variant is r0p0.
+ *
+ * The erratum was fixed in r0p1.
+ *
+ * This function is called from both assembly and C environment. So it
+ * follows AAPCS.
+ *
+ * Clobbers: x0-x3
+ * -----------------------------------------------------------------------
+ */
+ .globl check_errata_dsu_798953
+ .globl errata_dsu_798953_wa
+
+func check_errata_dsu_798953
+ mov x2, #ERRATA_APPLIES
+ mov x3, #ERRATA_NOT_APPLIES
+
+ /* Check if DSU is equal to r0p0 */
+ mrs x1, CLUSTERIDR_EL1
+
+ /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+ ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\
+ #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+ mov x1, #(0x0 << CLUSTERIDR_REV_SHIFT)
+ cmp x0, x1
+ csel x0, x2, x3, EQ
+ ret
+endfunc check_errata_dsu_798953
+
+ /* --------------------------------------------------
+ * Errata Workaround for DSU erratum #798953.
+ *
+ * Can clobber only: x0-x17
+ * --------------------------------------------------
+ */
+func errata_dsu_798953_wa
+ mov x17, x30
+ bl check_errata_dsu_798953
+ cbz x0, 1f
+
+ /* If erratum applies, disable high-level clock gating */
+ mrs x0, CLUSTERACTLR_EL1
+ orr x0, x0, #CLUSTERACTLR_EL1_DISABLE_CLOCK_GATING
+ msr CLUSTERACTLR_EL1, x0
+ isb
+1:
+ ret x17
+endfunc errata_dsu_798953_wa
+
+ /* -----------------------------------------------------------------------
+ * DSU erratum 936184 check function
+ * Checks the DSU variant, revision and configuration to determine if
+ * the erratum applies. Erratum applies if ACP interface is present
+ * in the DSU and revision-variant < r2p0.
+ *
+ * The erratum was fixed in r2p0.
+ *
+ * This function is called from both assembly and C environment. So it
+ * follows AAPCS.
+ *
+ * Clobbers: x0-x15
+ * -----------------------------------------------------------------------
+ */
+ .globl check_errata_dsu_936184
+ .globl errata_dsu_936184_wa
+ .weak is_scu_present_in_dsu
+
+ /* --------------------------------------------------------------------
+ * Default behaviour respresents SCU is always present with DSU.
+ * CPUs can override this definition if required.
+ *
+ * Can clobber only: x0-x14
+ * --------------------------------------------------------------------
+ */
+func is_scu_present_in_dsu
+ mov x0, #1
+ ret
+endfunc is_scu_present_in_dsu
+
+func check_errata_dsu_936184
+ mov x15, x30
+ bl is_scu_present_in_dsu
+ cmp x0, xzr
+ /* Default error status */
+ mov x0, #ERRATA_NOT_APPLIES
+
+ /* If SCU is not present, return without applying patch */
+ b.eq 1f
+
+ /* Erratum applies only if DSU has the ACP interface */
+ mrs x1, CLUSTERCFR_EL1
+ ubfx x1, x1, #CLUSTERCFR_ACP_SHIFT, #1
+ cbz x1, 1f
+
+ /* If ACP is present, check if DSU is older than r2p0 */
+ mrs x1, CLUSTERIDR_EL1
+
+ /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+ ubfx x2, x1, #CLUSTERIDR_REV_SHIFT,\
+ #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+ cmp x2, #(0x2 << CLUSTERIDR_VAR_SHIFT)
+ b.hs 1f
+ mov x0, #ERRATA_APPLIES
+1:
+ ret x15
+endfunc check_errata_dsu_936184
+
+ /* --------------------------------------------------
+ * Errata Workaround for DSU erratum #936184.
+ *
+ * Can clobber only: x0-x17
+ * --------------------------------------------------
+ */
+func errata_dsu_936184_wa
+ mov x17, x30
+ bl check_errata_dsu_936184
+ cbz x0, 1f
+
+ /* If erratum applies, we set a mask to a DSU control register */
+ mrs x0, CLUSTERACTLR_EL1
+ ldr x1, =DSU_ERRATA_936184_MASK
+ orr x0, x0, x1
+ msr CLUSTERACTLR_EL1, x0
+ isb
+1:
+ ret x17
+endfunc errata_dsu_936184_wa
+
+ /* -----------------------------------------------------------------------
+ * DSU erratum 2313941 check function
+ * Checks the DSU variant, revision and configuration to determine if
+ * the erratum applies. Erratum applies on all configurations of the
+ * DSU and if revision-variant is r0p0, r1p0, r2p0, r2p1, r3p0, r3p1.
+ *
+ * The erratum is still open.
+ *
+ * This function is called from both assembly and C environment. So it
+ * follows AAPCS.
+ *
+ * Clobbers: x0-x3
+ * -----------------------------------------------------------------------
+ */
+ .globl check_errata_dsu_2313941
+ .globl errata_dsu_2313941_wa
+
+func check_errata_dsu_2313941
+ mov x2, #ERRATA_APPLIES
+ mov x3, #ERRATA_NOT_APPLIES
+
+ /* Check if DSU version is less than or equal to r3p1 */
+ mrs x1, CLUSTERIDR_EL1
+
+ /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+ ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\
+ #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+ mov x1, #(0x31 << CLUSTERIDR_REV_SHIFT)
+ cmp x0, x1
+ csel x0, x2, x3, LS
+ ret
+endfunc check_errata_dsu_2313941
+
+ /* --------------------------------------------------
+ * Errata Workaround for DSU erratum #2313941.
+ *
+ * Can clobber only: x0-x17
+ * --------------------------------------------------
+ */
+func errata_dsu_2313941_wa
+ mov x17, x30
+ bl check_errata_dsu_2313941
+ cbz x0, 1f
+
+ /* If erratum applies, disable high-level clock gating */
+ mrs x0, CLUSTERACTLR_EL1
+ orr x0, x0, #CLUSTERACTLR_EL1_DISABLE_SCLK_GATING
+ msr CLUSTERACTLR_EL1, x0
+ isb
+1:
+ ret x17
+endfunc errata_dsu_2313941_wa
+
diff --git a/lib/cpus/aarch64/generic.S b/lib/cpus/aarch64/generic.S
new file mode 100644
index 0000000..ef1f048
--- /dev/null
+++ b/lib/cpus/aarch64/generic.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <generic.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func generic_disable_dcache
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+ ret
+endfunc generic_disable_dcache
+
+func generic_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl generic_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ ret x18
+endfunc generic_core_pwr_dwn
+
+func generic_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl generic_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ ret x18
+
+endfunc generic_cluster_pwr_dwn
+
+/* ---------------------------------------------
+ * Unimplemented functions.
+ * ---------------------------------------------
+ */
+.equ generic_errata_report, 0
+.equ generic_cpu_reg_dump, 0
+.equ generic_reset_func, 0
+
+declare_cpu_ops generic, AARCH64_GENERIC_MIDR, \
+ generic_reset_func, \
+ generic_core_pwr_dwn, \
+ generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_e1.S b/lib/cpus/aarch64/neoverse_e1.S
new file mode 100644
index 0000000..96b63cf
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_e1.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <neoverse_e1.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse E1 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse-E1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Neoverse-E1.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func neoverse_e1_reset_func
+ mov x19, x30
+
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+
+ ret x19
+endfunc neoverse_e1_reset_func
+
+func neoverse_e1_cpu_pwr_dwn
+ mrs x0, NEOVERSE_E1_CPUPWRCTLR_EL1
+ orr x0, x0, #NEOVERSE_E1_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr NEOVERSE_E1_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc neoverse_e1_cpu_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Neoverse N1. Must follow AAPCS.
+ */
+func neoverse_e1_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_DSU_936184, neoverse_e1, dsu_936184
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc neoverse_e1_errata_report
+#endif
+
+
+.section .rodata.neoverse_e1_regs, "aS"
+neoverse_e1_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func neoverse_e1_cpu_reg_dump
+ adr x6, neoverse_e1_regs
+ mrs x8, NEOVERSE_E1_ECTLR_EL1
+ ret
+endfunc neoverse_e1_cpu_reg_dump
+
+declare_cpu_ops neoverse_e1, NEOVERSE_E1_MIDR, \
+ neoverse_e1_reset_func, \
+ neoverse_e1_cpu_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
new file mode 100644
index 0000000..ec62519
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -0,0 +1,745 @@
+/*
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpuamu.h>
+#include <cpu_macros.S>
+#include <neoverse_n1.h>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse N1 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse-N1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ .global neoverse_n1_errata_ic_trap_handler
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Erratum 1043202.
+ * This applies to revision r0p0 and r1p0 of Neoverse N1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1043202_wa
+ /* Compare x0 against revision r1p0 */
+ mov x17, x30
+ bl check_errata_1043202
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ ldr x0, =0x0
+ msr CPUPSELR_EL3, x0
+ ldr x0, =0xF3BF8F2F
+ msr CPUPOR_EL3, x0
+ ldr x0, =0xFFFFFFFF
+ msr CPUPMR_EL3, x0
+ ldr x0, =0x800200071
+ msr CPUPCR_EL3, x0
+ isb
+1:
+ ret x17
+endfunc errata_n1_1043202_wa
+
+func check_errata_1043202
+ /* Applies to r0p0 and r1p0 */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1043202
+
+/* --------------------------------------------------
+ * Disable speculative loads if Neoverse N1 supports
+ * SSBS.
+ *
+ * Shall clobber: x0.
+ * --------------------------------------------------
+ */
+func neoverse_n1_disable_speculative_loads
+ /* Check if the PE implements SSBS */
+ mrs x0, id_aa64pfr1_el1
+ tst x0, #(ID_AA64PFR1_EL1_SSBS_MASK << ID_AA64PFR1_EL1_SSBS_SHIFT)
+ b.eq 1f
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+1:
+ ret
+endfunc neoverse_n1_disable_speculative_loads
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1073348
+ * This applies to revision r0p0 and r1p0 of Neoverse N1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1073348_wa
+ /* Compare x0 against revision r1p0 */
+ mov x17, x30
+ bl check_errata_1073348
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N1_CPUACTLR_EL1
+ orr x1, x1, NEOVERSE_N1_CPUACTLR_EL1_BIT_6
+ msr NEOVERSE_N1_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_n1_1073348_wa
+
+func check_errata_1073348
+ /* Applies to r0p0 and r1p0 */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1073348
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1130799
+ * This applies to revision <=r2p0 of Neoverse N1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1130799_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_1130799
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N1_CPUACTLR2_EL1
+ orr x1, x1, NEOVERSE_N1_CPUACTLR2_EL1_BIT_59
+ msr NEOVERSE_N1_CPUACTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_n1_1130799_wa
+
+func check_errata_1130799
+ /* Applies to <=r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_1130799
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1165347
+ * This applies to revision <=r2p0 of Neoverse N1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1165347_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_1165347
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N1_CPUACTLR2_EL1
+ orr x1, x1, NEOVERSE_N1_CPUACTLR2_EL1_BIT_0
+ orr x1, x1, NEOVERSE_N1_CPUACTLR2_EL1_BIT_15
+ msr NEOVERSE_N1_CPUACTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_n1_1165347_wa
+
+func check_errata_1165347
+ /* Applies to <=r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_1165347
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1207823
+ * This applies to revision <=r2p0 of Neoverse N1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1207823_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_1207823
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N1_CPUACTLR2_EL1
+ orr x1, x1, NEOVERSE_N1_CPUACTLR2_EL1_BIT_11
+ msr NEOVERSE_N1_CPUACTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_n1_1207823_wa
+
+func check_errata_1207823
+ /* Applies to <=r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_1207823
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1220197
+ * This applies to revision <=r2p0 of Neoverse N1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1220197_wa
+ /* Compare x0 against revision r2p0 */
+ mov x17, x30
+ bl check_errata_1220197
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N1_CPUECTLR_EL1
+ orr x1, x1, NEOVERSE_N1_WS_THR_L2_MASK
+ msr NEOVERSE_N1_CPUECTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_n1_1220197_wa
+
+func check_errata_1220197
+ /* Applies to <=r2p0 */
+ mov x1, #0x20
+ b cpu_rev_var_ls
+endfunc check_errata_1220197
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1257314
+ * This applies to revision <=r3p0 of Neoverse N1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1257314_wa
+ /* Compare x0 against revision r3p0 */
+ mov x17, x30
+ bl check_errata_1257314
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N1_CPUACTLR3_EL1
+ orr x1, x1, NEOVERSE_N1_CPUACTLR3_EL1_BIT_10
+ msr NEOVERSE_N1_CPUACTLR3_EL1, x1
+1:
+ ret x17
+endfunc errata_n1_1257314_wa
+
+func check_errata_1257314
+ /* Applies to <=r3p0 */
+ mov x1, #0x30
+ b cpu_rev_var_ls
+endfunc check_errata_1257314
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1262606
+ * This applies to revision <=r3p0 of Neoverse N1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1262606_wa
+ /* Compare x0 against revision r3p0 */
+ mov x17, x30
+ bl check_errata_1262606
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N1_CPUACTLR_EL1
+ orr x1, x1, NEOVERSE_N1_CPUACTLR_EL1_BIT_13
+ msr NEOVERSE_N1_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_n1_1262606_wa
+
+func check_errata_1262606
+ /* Applies to <=r3p0 */
+ mov x1, #0x30
+ b cpu_rev_var_ls
+endfunc check_errata_1262606
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1262888
+ * This applies to revision <=r3p0 of Neoverse N1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1262888_wa
+ /* Compare x0 against revision r3p0 */
+ mov x17, x30
+ bl check_errata_1262888
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N1_CPUECTLR_EL1
+ orr x1, x1, NEOVERSE_N1_CPUECTLR_EL1_MM_TLBPF_DIS_BIT
+ msr NEOVERSE_N1_CPUECTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_n1_1262888_wa
+
+func check_errata_1262888
+ /* Applies to <=r3p0 */
+ mov x1, #0x30
+ b cpu_rev_var_ls
+endfunc check_errata_1262888
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1275112
+ * This applies to revision <=r3p0 of Neoverse N1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1275112_wa
+ /* Compare x0 against revision r3p0 */
+ mov x17, x30
+ bl check_errata_1275112
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N1_CPUACTLR_EL1
+ orr x1, x1, NEOVERSE_N1_CPUACTLR_EL1_BIT_13
+ msr NEOVERSE_N1_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_n1_1275112_wa
+
+func check_errata_1275112
+ /* Applies to <=r3p0 */
+ mov x1, #0x30
+ b cpu_rev_var_ls
+endfunc check_errata_1275112
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Erratum 1315703.
+ * This applies to revision <= r3p0 of Neoverse N1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1315703_wa
+ /* Compare x0 against revision r3p1 */
+ mov x17, x30
+ bl check_errata_1315703
+ cbz x0, 1f
+
+ mrs x0, NEOVERSE_N1_CPUACTLR2_EL1
+ orr x0, x0, #NEOVERSE_N1_CPUACTLR2_EL1_BIT_16
+ msr NEOVERSE_N1_CPUACTLR2_EL1, x0
+
+1:
+ ret x17
+endfunc errata_n1_1315703_wa
+
+func check_errata_1315703
+ /* Applies to everything <= r3p0. */
+ mov x1, #0x30
+ b cpu_rev_var_ls
+endfunc check_errata_1315703
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Erratum 1542419.
+ * This applies to revisions r3p0 - r4p0 of Neoverse N1
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1542419_wa
+ /* Compare x0 against revision r3p0 and r4p0 */
+ mov x17, x30
+ bl check_errata_1542419
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ ldr x0, =0x0
+ msr CPUPSELR_EL3, x0
+ ldr x0, =0xEE670D35
+ msr CPUPOR_EL3, x0
+ ldr x0, =0xFFFF0FFF
+ msr CPUPMR_EL3, x0
+ ldr x0, =0x08000020007D
+ msr CPUPCR_EL3, x0
+ isb
+1:
+ ret x17
+endfunc errata_n1_1542419_wa
+
+func check_errata_1542419
+ /* Applies to everything r3p0 - r4p0. */
+ mov x1, #0x30
+ mov x2, #0x40
+ b cpu_rev_var_range
+endfunc check_errata_1542419
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1868343.
+ * This applies to revision <= r4p0 of Neoverse N1.
+ * This workaround is the same as the workaround for
+ * errata 1262606 and 1275112 but applies to a wider
+ * revision range.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1868343_wa
+ /*
+ * Compare x0 against revision r4p0
+ */
+ mov x17, x30
+ bl check_errata_1868343
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N1_CPUACTLR_EL1
+ orr x1, x1, NEOVERSE_N1_CPUACTLR_EL1_BIT_13
+ msr NEOVERSE_N1_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_n1_1868343_wa
+
+func check_errata_1868343
+ /* Applies to everything <= r4p0 */
+ mov x1, #0x40
+ b cpu_rev_var_ls
+endfunc check_errata_1868343
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1946160.
+ * This applies to revisions r3p0, r3p1, r4p0, and
+ * r4p1 of Neoverse N1. It also exists in r0p0, r1p0,
+ * and r2p0 but there is no fix in these revisions.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n1_1946160_wa
+ /*
+ * Compare x0 against r3p0 - r4p1
+ */
+ mov x17, x30
+ bl check_errata_1946160
+ cbz x0, 1f
+
+ mov x0, #3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3900002
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #4
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #5
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ isb
+1:
+ ret x17
+endfunc errata_n1_1946160_wa
+
+func check_errata_1946160
+ /* Applies to r3p0 - r4p1. */
+ mov x1, #0x30
+ mov x2, #0x41
+ b cpu_rev_var_range
+endfunc check_errata_1946160
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #2743102
+ * This applies to revisions <= r4p1 and is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ----------------------------------------------------
+ */
+func errata_n1_2743102_wa
+ mov x17, x30
+ bl check_errata_2743102
+ cbz x0, 1f
+
+ /* dsb before isb of power down sequence */
+ dsb sy
+1:
+ ret x17
+endfunc errata_n1_2743102_wa
+
+func check_errata_2743102
+ /* Applies to all revisions <= r4p1 */
+ mov x1, #0x41
+ b cpu_rev_var_ls
+endfunc check_errata_2743102
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+func neoverse_n1_reset_func
+ mov x19, x30
+
+ bl neoverse_n1_disable_speculative_loads
+
+ /* Forces all cacheable atomic instructions to be near */
+ mrs x0, NEOVERSE_N1_CPUACTLR2_EL1
+ orr x0, x0, #NEOVERSE_N1_CPUACTLR2_EL1_BIT_2
+ msr NEOVERSE_N1_CPUACTLR2_EL1, x0
+ isb
+
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_N1_1043202
+ mov x0, x18
+ bl errata_n1_1043202_wa
+#endif
+
+#if ERRATA_N1_1073348
+ mov x0, x18
+ bl errata_n1_1073348_wa
+#endif
+
+#if ERRATA_N1_1130799
+ mov x0, x18
+ bl errata_n1_1130799_wa
+#endif
+
+#if ERRATA_N1_1165347
+ mov x0, x18
+ bl errata_n1_1165347_wa
+#endif
+
+#if ERRATA_N1_1207823
+ mov x0, x18
+ bl errata_n1_1207823_wa
+#endif
+
+#if ERRATA_N1_1220197
+ mov x0, x18
+ bl errata_n1_1220197_wa
+#endif
+
+#if ERRATA_N1_1257314
+ mov x0, x18
+ bl errata_n1_1257314_wa
+#endif
+
+#if ERRATA_N1_1262606
+ mov x0, x18
+ bl errata_n1_1262606_wa
+#endif
+
+#if ERRATA_N1_1262888
+ mov x0, x18
+ bl errata_n1_1262888_wa
+#endif
+
+#if ERRATA_N1_1275112
+ mov x0, x18
+ bl errata_n1_1275112_wa
+#endif
+
+#if ERRATA_N1_1315703
+ mov x0, x18
+ bl errata_n1_1315703_wa
+#endif
+
+#if ERRATA_N1_1542419
+ mov x0, x18
+ bl errata_n1_1542419_wa
+#endif
+
+#if ERRATA_N1_1868343
+ mov x0, x18
+ bl errata_n1_1868343_wa
+#endif
+
+#if ERRATA_N1_1946160
+ mov x0, x18
+ bl errata_n1_1946160_wa
+#endif
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, actlr_el3
+ orr x0, x0, #NEOVERSE_N1_ACTLR_AMEN_BIT
+ msr actlr_el3, x0
+
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ mrs x0, actlr_el2
+ orr x0, x0, #NEOVERSE_N1_ACTLR_AMEN_BIT
+ msr actlr_el2, x0
+
+ /* Enable group0 counters */
+ mov x0, #NEOVERSE_N1_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET_EL0, x0
+#endif
+
+#if NEOVERSE_Nx_EXTERNAL_LLC
+ /* Some system may have External LLC, core needs to be made aware */
+ mrs x0, NEOVERSE_N1_CPUECTLR_EL1
+ orr x0, x0, NEOVERSE_N1_CPUECTLR_EL1_EXTLLC_BIT
+ msr NEOVERSE_N1_CPUECTLR_EL1, x0
+#endif
+
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Neoverse-N1 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_neoverse_n1
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret x19
+endfunc neoverse_n1_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func neoverse_n1_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, NEOVERSE_N1_CPUPWRCTLR_EL1
+ orr x0, x0, #NEOVERSE_N1_CORE_PWRDN_EN_MASK
+ msr NEOVERSE_N1_CPUPWRCTLR_EL1, x0
+#if ERRATA_N1_2743102
+ mov x15, x30
+ bl cpu_get_rev_var
+ bl errata_n1_2743102_wa
+ mov x30, x15
+#endif /* ERRATA_N1_2743102 */
+ isb
+ ret
+endfunc neoverse_n1_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Neoverse N1. Must follow AAPCS.
+ */
+func neoverse_n1_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_N1_1043202, neoverse_n1, 1043202
+ report_errata ERRATA_N1_1073348, neoverse_n1, 1073348
+ report_errata ERRATA_N1_1130799, neoverse_n1, 1130799
+ report_errata ERRATA_N1_1165347, neoverse_n1, 1165347
+ report_errata ERRATA_N1_1207823, neoverse_n1, 1207823
+ report_errata ERRATA_N1_1220197, neoverse_n1, 1220197
+ report_errata ERRATA_N1_1257314, neoverse_n1, 1257314
+ report_errata ERRATA_N1_1262606, neoverse_n1, 1262606
+ report_errata ERRATA_N1_1262888, neoverse_n1, 1262888
+ report_errata ERRATA_N1_1275112, neoverse_n1, 1275112
+ report_errata ERRATA_N1_1315703, neoverse_n1, 1315703
+ report_errata ERRATA_N1_1542419, neoverse_n1, 1542419
+ report_errata ERRATA_N1_1868343, neoverse_n1, 1868343
+ report_errata ERRATA_N1_1946160, neoverse_n1, 1946160
+ report_errata ERRATA_N1_2743102, neoverse_n1, 2743102
+ report_errata ERRATA_DSU_936184, neoverse_n1, dsu_936184
+ report_errata WORKAROUND_CVE_2022_23960, neoverse_n1, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc neoverse_n1_errata_report
+#endif
+
+/*
+ * Handle trap of EL0 IC IVAU instructions to EL3 by executing a TLB
+ * inner-shareable invalidation to an arbitrary address followed by a DSB.
+ *
+ * x1: Exception Syndrome
+ */
+func neoverse_n1_errata_ic_trap_handler
+ cmp x1, #NEOVERSE_N1_EC_IC_TRAP
+ b.ne 1f
+ tlbi vae3is, xzr
+ dsb sy
+
+ # Skip the IC instruction itself
+ mrs x3, elr_el3
+ add x3, x3, #4
+ msr elr_el3, x3
+
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+#if IMAGE_BL31 && RAS_EXTENSION
+ /*
+ * Issue Error Synchronization Barrier to synchronize SErrors before
+ * exiting EL3. We're running with EAs unmasked, so any synchronized
+ * errors would be taken immediately; therefore no need to inspect
+ * DISR_EL1 register.
+ */
+ esb
+#endif
+ exception_return
+1:
+ ret
+endfunc neoverse_n1_errata_ic_trap_handler
+
+ /* ---------------------------------------------
+ * This function provides neoverse_n1 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_n1_regs, "aS"
+neoverse_n1_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func neoverse_n1_cpu_reg_dump
+ adr x6, neoverse_n1_regs
+ mrs x8, NEOVERSE_N1_CPUECTLR_EL1
+ ret
+endfunc neoverse_n1_cpu_reg_dump
+
+declare_cpu_ops_eh neoverse_n1, NEOVERSE_N1_MIDR, \
+ neoverse_n1_reset_func, \
+ neoverse_n1_errata_ic_trap_handler, \
+ neoverse_n1_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_n1_pubsub.c b/lib/cpus/aarch64/neoverse_n1_pubsub.c
new file mode 100644
index 0000000..b1b7bb8
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_n1_pubsub.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <neoverse_n1.h>
+#include <cpuamu.h>
+#include <lib/el3_runtime/pubsub_events.h>
+
+static void *neoverse_n1_context_save(const void *arg)
+{
+ if (midr_match(NEOVERSE_N1_MIDR) != 0)
+ cpuamu_context_save(NEOVERSE_N1_AMU_NR_COUNTERS);
+
+ return (void *)0;
+}
+
+static void *neoverse_n1_context_restore(const void *arg)
+{
+ if (midr_match(NEOVERSE_N1_MIDR) != 0)
+ cpuamu_context_restore(NEOVERSE_N1_AMU_NR_COUNTERS);
+
+ return (void *)0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, neoverse_n1_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, neoverse_n1_context_restore);
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
new file mode 100644
index 0000000..5861dec
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -0,0 +1,639 @@
+/*
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+#include <neoverse_n2.h>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse N2 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse-N2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2002655.
+ * This applies to revision r0p0 of Neoverse N2. it is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n2_2002655_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2002655
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ ldr x0,=0x6
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xF3A08002
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFF0F7FE
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x40000001003ff
+ msr S3_6_c15_c8_1,x0
+ ldr x0,=0x7
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xBF200000
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFEF0000
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x40000001003f3
+ msr S3_6_c15_c8_1,x0
+ isb
+1:
+ ret x17
+endfunc errata_n2_2002655_wa
+
+func check_errata_2002655
+ /* Applies to r0p0 */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2002655
+
+/* ---------------------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2067956.
+ * This applies to revision r0p0 of Neoverse N2 and is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------------------
+ */
+func errata_n2_2067956_wa
+ /* Compare x0 against revision r0p0 */
+ mov x17, x30
+ bl check_errata_2067956
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N2_CPUACTLR_EL1
+ orr x1, x1, NEOVERSE_N2_CPUACTLR_EL1_BIT_46
+ msr NEOVERSE_N2_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_n2_2067956_wa
+
+func check_errata_2067956
+ /* Applies to r0p0 */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2067956
+
+/* ---------------------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2025414.
+ * This applies to revision r0p0 of Neoverse N2 and is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------------------
+ */
+func errata_n2_2025414_wa
+ /* Compare x0 against revision r0p0 */
+ mov x17, x30
+ bl check_errata_2025414
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N2_CPUECTLR_EL1
+ orr x1, x1, NEOVERSE_N2_CPUECTLR_EL1_PFSTIDIS_BIT
+ msr NEOVERSE_N2_CPUECTLR_EL1, x1
+
+1:
+ ret x17
+endfunc errata_n2_2025414_wa
+
+func check_errata_2025414
+ /* Applies to r0p0 */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2025414
+
+/* ---------------------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2189731.
+ * This applies to revision r0p0 of Neoverse N2 and is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ---------------------------------------------------------------
+ */
+func errata_n2_2189731_wa
+ /* Compare x0 against revision r0p0 */
+ mov x17, x30
+ bl check_errata_2189731
+ cbz x0, 1f
+ mrs x1, NEOVERSE_N2_CPUACTLR5_EL1
+ orr x1, x1, NEOVERSE_N2_CPUACTLR5_EL1_BIT_44
+ msr NEOVERSE_N2_CPUACTLR5_EL1, x1
+
+1:
+ ret x17
+endfunc errata_n2_2189731_wa
+
+func check_errata_2189731
+ /* Applies to r0p0 */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2189731
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2138956.
+ * This applies to revision r0p0 of Neoverse N2. it is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_n2_2138956_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2138956
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ ldr x0,=0x3
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xF3A08002
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFF0F7FE
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x10002001003FF
+ msr S3_6_c15_c8_1,x0
+ ldr x0,=0x4
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xBF200000
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFEF0000
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x10002001003F3
+ msr S3_6_c15_c8_1,x0
+ isb
+1:
+ ret x17
+endfunc errata_n2_2138956_wa
+
+func check_errata_2138956
+ /* Applies to r0p0 */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2138956
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2242415.
+ * This applies to revision r0p0 of Neoverse N2. it is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_n2_2242415_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2242415
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ mrs x1, NEOVERSE_N2_CPUACTLR_EL1
+ orr x1, x1, NEOVERSE_N2_CPUACTLR_EL1_BIT_22
+ msr NEOVERSE_N2_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_n2_2242415_wa
+
+func check_errata_2242415
+ /* Applies to r0p0 */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2242415
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2138953.
+ * This applies to revision r0p0 of Neoverse N2. it is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_n2_2138953_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2138953
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ mrs x1, NEOVERSE_N2_CPUECTLR2_EL1
+ mov x0, #NEOVERSE_N2_CPUECTLR2_EL1_PF_MODE_CNSRV
+ bfi x1, x0, #CPUECTLR2_EL1_PF_MODE_LSB, #CPUECTLR2_EL1_PF_MODE_WIDTH
+ msr NEOVERSE_N2_CPUECTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_n2_2138953_wa
+
+func check_errata_2138953
+ /* Applies to r0p0 */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2138953
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2138958.
+ * This applies to revision r0p0 of Neoverse N2. it is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_n2_2138958_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2138958
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ mrs x1, NEOVERSE_N2_CPUACTLR5_EL1
+ orr x1, x1, NEOVERSE_N2_CPUACTLR5_EL1_BIT_13
+ msr NEOVERSE_N2_CPUACTLR5_EL1, x1
+1:
+ ret x17
+endfunc errata_n2_2138958_wa
+
+func check_errata_2138958
+ /* Applies to r0p0 */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2138958
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2242400.
+ * This applies to revision r0p0 of Neoverse N2. it is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_n2_2242400_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2242400
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ mrs x1, NEOVERSE_N2_CPUACTLR5_EL1
+ orr x1, x1, NEOVERSE_N2_CPUACTLR5_EL1_BIT_17
+ msr NEOVERSE_N2_CPUACTLR5_EL1, x1
+ ldr x0, =0x2
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10F600E000
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FF80E000
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x80000000003FF
+ msr S3_6_c15_c8_1, x0
+ isb
+1:
+ ret x17
+endfunc errata_n2_2242400_wa
+
+func check_errata_2242400
+ /* Applies to r0p0 */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2242400
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2280757.
+ * This applies to revision r0p0 of Neoverse N2. it is still open.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_n2_2280757_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2280757
+ cbz x0, 1f
+
+ /* Apply instruction patching sequence */
+ mrs x1, NEOVERSE_N2_CPUACTLR_EL1
+ orr x1, x1, NEOVERSE_N2_CPUACTLR_EL1_BIT_22
+ msr NEOVERSE_N2_CPUACTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_n2_2280757_wa
+
+func check_errata_2280757
+ /* Applies to r0p0 */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2280757
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2326639.
+ * This applies to revision r0p0 of Neoverse N2,
+ * fixed in r0p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_n2_2326639_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2326639
+ cbz x0, 1f
+
+ /* Set bit 36 in ACTLR2_EL1 */
+ mrs x1, NEOVERSE_N2_CPUACTLR2_EL1
+ orr x1, x1, #NEOVERSE_N2_CPUACTLR2_EL1_BIT_36
+ msr NEOVERSE_N2_CPUACTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_n2_2326639_wa
+
+func check_errata_2326639
+ /* Applies to r0p0, fixed in r0p1 */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2326639
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2376738.
+ * This applies to revision r0p0 of Neoverse N2,
+ * fixed in r0p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current CPU.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_n2_2376738_wa
+ mov x17, x30
+ bl check_errata_2376738
+ cbz x0, 1f
+
+ /* Set CPUACTLR2_EL1[0] to 1 to force PLDW/PFRM
+ * ST to behave like PLD/PFRM LD and not cause
+ * invalidations to other PE caches.
+ */
+ mrs x1, NEOVERSE_N2_CPUACTLR2_EL1
+ orr x1, x1, NEOVERSE_N2_CPUACTLR2_EL1_BIT_0
+ msr NEOVERSE_N2_CPUACTLR2_EL1, x1
+1:
+ ret x17
+endfunc errata_n2_2376738_wa
+
+func check_errata_2376738
+ /* Applies to r0p0, fixed in r0p1 */
+ mov x1, 0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2376738
+
+/* --------------------------------------------------
+ * Errata Workaround for Neoverse N2 Erratum 2388450.
+ * This applies to revision r0p0 of Neoverse N2,
+ * fixed in r0p1.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_n2_2388450_wa
+ /* Check revision. */
+ mov x17, x30
+ bl check_errata_2388450
+ cbz x0, 1f
+
+ /*Set bit 40 in ACTLR2_EL1 */
+ mrs x1, NEOVERSE_N2_CPUACTLR2_EL1
+ orr x1, x1, #NEOVERSE_N2_CPUACTLR2_EL1_BIT_40
+ msr NEOVERSE_N2_CPUACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_n2_2388450_wa
+
+func check_errata_2388450
+ /* Applies to r0p0, fixed in r0p1 */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_2388450
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+ /* -------------------------------------------
+ * The CPU Ops reset function for Neoverse N2.
+ * -------------------------------------------
+ */
+func neoverse_n2_reset_func
+ mov x19, x30
+
+ /* Check if the PE implements SSBS */
+ mrs x0, id_aa64pfr1_el1
+ tst x0, #(ID_AA64PFR1_EL1_SSBS_MASK << ID_AA64PFR1_EL1_SSBS_SHIFT)
+ b.eq 1f
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+1:
+ /* Force all cacheable atomic instructions to be near */
+ mrs x0, NEOVERSE_N2_CPUACTLR2_EL1
+ orr x0, x0, #NEOVERSE_N2_CPUACTLR2_EL1_BIT_2
+ msr NEOVERSE_N2_CPUACTLR2_EL1, x0
+
+ /* Get the CPU revision and stash it in x18. */
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_DSU_2313941
+ bl errata_dsu_2313941_wa
+#endif
+
+#if ERRATA_N2_2067956
+ mov x0, x18
+ bl errata_n2_2067956_wa
+#endif
+
+#if ERRATA_N2_2025414
+ mov x0, x18
+ bl errata_n2_2025414_wa
+#endif
+
+#if ERRATA_N2_2189731
+ mov x0, x18
+ bl errata_n2_2189731_wa
+#endif
+
+
+#if ERRATA_N2_2138956
+ mov x0, x18
+ bl errata_n2_2138956_wa
+#endif
+
+#if ERRATA_N2_2138953
+ mov x0, x18
+ bl errata_n2_2138953_wa
+#endif
+
+#if ERRATA_N2_2242415
+ mov x0, x18
+ bl errata_n2_2242415_wa
+#endif
+
+#if ERRATA_N2_2138958
+ mov x0, x18
+ bl errata_n2_2138958_wa
+#endif
+
+#if ERRATA_N2_2242400
+ mov x0, x18
+ bl errata_n2_2242400_wa
+#endif
+
+#if ERRATA_N2_2280757
+ mov x0, x18
+ bl errata_n2_2280757_wa
+#endif
+
+#if ERRATA_N2_2376738
+ mov x0, x18
+ bl errata_n2_2376738_wa
+#endif
+
+#if ERRATA_N2_2388450
+ mov x0, x18
+ bl errata_n2_2388450_wa
+#endif
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, cptr_el3
+ orr x0, x0, #TAM_BIT
+ msr cptr_el3, x0
+
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ mrs x0, cptr_el2
+ orr x0, x0, #TAM_BIT
+ msr cptr_el2, x0
+
+ /* No need to enable the counters as this would be done at el3 exit */
+#endif
+
+#if NEOVERSE_Nx_EXTERNAL_LLC
+ /* Some systems may have External LLC, core needs to be made aware */
+ mrs x0, NEOVERSE_N2_CPUECTLR_EL1
+ orr x0, x0, NEOVERSE_N2_CPUECTLR_EL1_EXTLLC_BIT
+ msr NEOVERSE_N2_CPUECTLR_EL1, x0
+#endif
+
+#if ERRATA_N2_2002655
+ mov x0, x18
+ bl errata_n2_2002655_wa
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Neoverse-N2 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_neoverse_n2
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret x19
+endfunc neoverse_n2_reset_func
+
+func neoverse_n2_core_pwr_dwn
+#if ERRATA_N2_2326639
+ mov x15, x30
+ bl cpu_get_rev_var
+ bl errata_n2_2326639_wa
+ mov x30, x15
+#endif /* ERRATA_N2_2326639 */
+
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * No need to do cache maintenance here.
+ * ---------------------------------------------------
+ */
+ mrs x0, NEOVERSE_N2_CPUPWRCTLR_EL1
+ orr x0, x0, #NEOVERSE_N2_CORE_PWRDN_EN_BIT
+ msr NEOVERSE_N2_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc neoverse_n2_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Neoverse N2 cores. Must follow AAPCS.
+ */
+func neoverse_n2_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_N2_2002655, neoverse_n2, 2002655
+ report_errata ERRATA_N2_2067956, neoverse_n2, 2067956
+ report_errata ERRATA_N2_2025414, neoverse_n2, 2025414
+ report_errata ERRATA_N2_2189731, neoverse_n2, 2189731
+ report_errata ERRATA_N2_2138956, neoverse_n2, 2138956
+ report_errata ERRATA_N2_2138953, neoverse_n2, 2138953
+ report_errata ERRATA_N2_2242415, neoverse_n2, 2242415
+ report_errata ERRATA_N2_2138958, neoverse_n2, 2138958
+ report_errata ERRATA_N2_2242400, neoverse_n2, 2242400
+ report_errata ERRATA_N2_2280757, neoverse_n2, 2280757
+ report_errata ERRATA_N2_2326639, neoverse_n2, 2326639
+ report_errata ERRATA_N2_2376738, neoverse_n2, 2376738
+ report_errata ERRATA_N2_2388450, neoverse_n2, 2388450
+ report_errata WORKAROUND_CVE_2022_23960, neoverse_n2, cve_2022_23960
+ report_errata ERRATA_DSU_2313941, neoverse_n2, dsu_2313941
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc neoverse_n2_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Neoverse N2 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ASCII and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_n2_regs, "aS"
+neoverse_n2_regs: /* The ASCII list of register names to be reported */
+ .asciz "cpupwrctlr_el1", ""
+
+func neoverse_n2_cpu_reg_dump
+ adr x6, neoverse_n2_regs
+ mrs x8, NEOVERSE_N2_CPUPWRCTLR_EL1
+ ret
+endfunc neoverse_n2_cpu_reg_dump
+
+declare_cpu_ops neoverse_n2, NEOVERSE_N2_MIDR, \
+ neoverse_n2_reset_func, \
+ neoverse_n2_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_n_common.S b/lib/cpus/aarch64/neoverse_n_common.S
new file mode 100644
index 0000000..b816342
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_n_common.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <neoverse_n_common.h>
+
+ .global is_scu_present_in_dsu
+
+/*
+ * Check if the SCU L3 Unit is present on the DSU
+ * 1-> SCU present
+ * 0-> SCU not present
+ *
+ * This function is implemented as weak on dsu_helpers.S and must be
+ * overwritten for Neoverse Nx cores.
+ */
+
+func is_scu_present_in_dsu
+ mrs x0, CPUCFR_EL1
+ ubfx x0, x0, #SCU_SHIFT, #1
+ eor x0, x0, #1
+ ret
+endfunc is_scu_present_in_dsu
diff --git a/lib/cpus/aarch64/neoverse_poseidon.S b/lib/cpus/aarch64/neoverse_poseidon.S
new file mode 100644
index 0000000..030293d
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_poseidon.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <neoverse_poseidon.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse Poseidon must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse Poseidon supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_POSEIDON_BHB_LOOP_COUNT, neoverse_poseidon
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func neoverse_poseidon_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, NEOVERSE_POSEIDON_CPUPWRCTLR_EL1
+ orr x0, x0, #NEOVERSE_POSEIDON_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr NEOVERSE_POSEIDON_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc neoverse_poseidon_core_pwr_dwn
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+func neoverse_poseidon_reset_func
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Neoverse Poseidon generic vectors are overridden to apply
+ * errata mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_neoverse_poseidon
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret
+endfunc neoverse_poseidon_reset_func
+
+#if REPORT_ERRATA
+ /*
+ * Errata printing function for Neoverse Poseidon. Must follow AAPCS.
+ */
+func neoverse_poseidon_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata WORKAROUND_CVE_2022_23960, neoverse_poseidon, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc neoverse_poseidon_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Neoverse-Poseidon specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_poseidon_regs, "aS"
+neoverse_poseidon_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func neoverse_poseidon_cpu_reg_dump
+ adr x6, neoverse_poseidon_regs
+ mrs x8, NEOVERSE_POSEIDON_CPUECTLR_EL1
+ ret
+endfunc neoverse_poseidon_cpu_reg_dump
+
+declare_cpu_ops neoverse_poseidon, NEOVERSE_POSEIDON_MIDR, \
+ neoverse_poseidon_reset_func, \
+ neoverse_poseidon_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S
new file mode 100644
index 0000000..3282fbc
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -0,0 +1,628 @@
+/*
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <neoverse_v1.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse V1 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_V1_BHB_LOOP_COUNT, neoverse_v1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse V1 Errata #1618635.
+ * This applies to revision r0p0 and is fixed in
+ * r1p0.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x17
+ * --------------------------------------------------
+ */
+func errata_neoverse_v1_1618635_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_1618635
+ cbz x0, 1f
+
+ /* Inserts a DMB SY before and after MRS PAR_EL1 */
+ ldr x0, =0x0
+ msr NEOVERSE_V1_CPUPSELR_EL3, x0
+ ldr x0, = 0xEE070F14
+ msr NEOVERSE_V1_CPUPOR_EL3, x0
+ ldr x0, = 0xFFFF0FFF
+ msr NEOVERSE_V1_CPUPMR_EL3, x0
+ ldr x0, =0x4005027FF
+ msr NEOVERSE_V1_CPUPCR_EL3, x0
+
+ /* Inserts a DMB SY before STREX imm offset */
+ ldr x0, =0x1
+ msr NEOVERSE_V1_CPUPSELR_EL3, x0
+ ldr x0, =0x00e8400000
+ msr NEOVERSE_V1_CPUPOR_EL3, x0
+ ldr x0, =0x00fff00000
+ msr NEOVERSE_V1_CPUPMR_EL3, x0
+ ldr x0, = 0x4001027FF
+ msr NEOVERSE_V1_CPUPCR_EL3, x0
+
+ /* Inserts a DMB SY before STREX[BHD}/STLEX* */
+ ldr x0, =0x2
+ msr NEOVERSE_V1_CPUPSELR_EL3, x0
+ ldr x0, =0x00e8c00040
+ msr NEOVERSE_V1_CPUPOR_EL3, x0
+ ldr x0, =0x00fff00040
+ msr NEOVERSE_V1_CPUPMR_EL3, x0
+ ldr x0, = 0x4001027FF
+ msr NEOVERSE_V1_CPUPCR_EL3, x0
+
+ /* Inserts a DMB SY after STREX imm offset */
+ ldr x0, =0x3
+ msr NEOVERSE_V1_CPUPSELR_EL3, x0
+ ldr x0, =0x00e8400000
+ msr NEOVERSE_V1_CPUPOR_EL3, x0
+ ldr x0, =0x00fff00000
+ msr NEOVERSE_V1_CPUPMR_EL3, x0
+ ldr x0, = 0x4004027FF
+ msr NEOVERSE_V1_CPUPCR_EL3, x0
+
+ /* Inserts a DMB SY after STREX[BHD}/STLEX* */
+ ldr x0, =0x4
+ msr NEOVERSE_V1_CPUPSELR_EL3, x0
+ ldr x0, =0x00e8c00040
+ msr NEOVERSE_V1_CPUPOR_EL3, x0
+ ldr x0, =0x00fff00040
+ msr NEOVERSE_V1_CPUPMR_EL3, x0
+ ldr x0, = 0x4004027FF
+ msr NEOVERSE_V1_CPUPCR_EL3, x0
+
+ /* Synchronize to enable patches */
+ isb
+1:
+ ret x17
+endfunc errata_neoverse_v1_1618635_wa
+
+func check_errata_1618635
+ /* Applies to revision r0p0. */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_1618635
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse V1 Errata #1774420.
+ * This applies to revisions r0p0 and r1p0, fixed in r1p1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_neoverse_v1_1774420_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_1774420
+ cbz x0, 1f
+
+ /* Set bit 53 in CPUECTLR_EL1 */
+ mrs x1, NEOVERSE_V1_CPUECTLR_EL1
+ orr x1, x1, #NEOVERSE_V1_CPUECTLR_EL1_BIT_53
+ msr NEOVERSE_V1_CPUECTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_neoverse_v1_1774420_wa
+
+func check_errata_1774420
+ /* Applies to r0p0 and r1p0. */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1774420
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse V1 Errata #1791573.
+ * This applies to revisions r0p0 and r1p0, fixed in r1p1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_neoverse_v1_1791573_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_1791573
+ cbz x0, 1f
+
+ /* Set bit 2 in ACTLR2_EL1 */
+ mrs x1, NEOVERSE_V1_ACTLR2_EL1
+ orr x1, x1, #NEOVERSE_V1_ACTLR2_EL1_BIT_2
+ msr NEOVERSE_V1_ACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_neoverse_v1_1791573_wa
+
+func check_errata_1791573
+ /* Applies to r0p0 and r1p0. */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1791573
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse V1 Errata #1852267.
+ * This applies to revisions r0p0 and r1p0, fixed in r1p1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_neoverse_v1_1852267_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_1852267
+ cbz x0, 1f
+
+ /* Set bit 28 in ACTLR2_EL1 */
+ mrs x1, NEOVERSE_V1_ACTLR2_EL1
+ orr x1, x1, #NEOVERSE_V1_ACTLR2_EL1_BIT_28
+ msr NEOVERSE_V1_ACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_neoverse_v1_1852267_wa
+
+func check_errata_1852267
+ /* Applies to r0p0 and r1p0. */
+ mov x1, #0x10
+ b cpu_rev_var_ls
+endfunc check_errata_1852267
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse V1 Errata #1925756.
+ * This applies to revisions <= r1p1.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_neoverse_v1_1925756_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_1925756
+ cbz x0, 1f
+
+ /* Set bit 8 in CPUECTLR_EL1 */
+ mrs x1, NEOVERSE_V1_CPUECTLR_EL1
+ orr x1, x1, #NEOVERSE_V1_CPUECTLR_EL1_BIT_8
+ msr NEOVERSE_V1_CPUECTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_neoverse_v1_1925756_wa
+
+func check_errata_1925756
+ /* Applies to <= r1p1. */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_1925756
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse V1 Erratum #1940577
+ * This applies to revisions r1p0 - r1p1 and is open.
+ * It also exists in r0p0 but there is no fix in that
+ * revision.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_neoverse_v1_1940577_wa
+ /* Compare x0 against revisions r1p0 - r1p1 */
+ mov x17, x30
+ bl check_errata_1940577
+ cbz x0, 1f
+
+ mov x0, #0
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3900002
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #1
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #2
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ isb
+1:
+ ret x17
+endfunc errata_neoverse_v1_1940577_wa
+
+func check_errata_1940577
+ /* Applies to revisions r1p0 - r1p1. */
+ mov x1, #0x10
+ mov x2, #0x11
+ b cpu_rev_var_range
+endfunc check_errata_1940577
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse V1 Errata #1966096
+ * This applies to revisions r1p0 - r1p1 and is open.
+ * It also exists in r0p0 but there is no workaround
+ * for that revision.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_neoverse_v1_1966096_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_1966096
+ cbz x0, 1f
+
+ /* Apply the workaround. */
+ mov x0, #0x3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0xEE010F12
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0xFFFF0FFF
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x80000000003FF
+ msr S3_6_C15_C8_1, x0
+ isb
+
+1:
+ ret x17
+endfunc errata_neoverse_v1_1966096_wa
+
+func check_errata_1966096
+ mov x1, #0x10
+ mov x2, #0x11
+ b cpu_rev_var_range
+endfunc check_errata_1966096
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse V1 Errata #2139242.
+ * This applies to revisions r0p0, r1p0, and r1p1, it
+ * is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_neoverse_v1_2139242_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2139242
+ cbz x0, 1f
+
+ /* Apply the workaround. */
+ mov x0, #0x3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0xEE720F14
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0xFFFF0FDF
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x40000005003FF
+ msr S3_6_C15_C8_1, x0
+ isb
+
+1:
+ ret x17
+endfunc errata_neoverse_v1_2139242_wa
+
+func check_errata_2139242
+ /* Applies to r0p0, r1p0, r1p1 */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_2139242
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse V1 Errata #2108267.
+ * This applies to revisions r0p0, r1p0, and r1p1, it
+ * is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x1, x17
+ * --------------------------------------------------
+ */
+func errata_neoverse_v1_2108267_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2108267
+ cbz x0, 1f
+
+ /* Apply the workaround. */
+ mrs x1, NEOVERSE_V1_CPUECTLR_EL1
+ mov x0, #NEOVERSE_V1_CPUECTLR_EL1_PF_MODE_CNSRV
+ bfi x1, x0, #CPUECTLR_EL1_PF_MODE_LSB, #CPUECTLR_EL1_PF_MODE_WIDTH
+ msr NEOVERSE_V1_CPUECTLR_EL1, x1
+1:
+ ret x17
+endfunc errata_neoverse_v1_2108267_wa
+
+func check_errata_2108267
+ /* Applies to r0p0, r1p0, r1p1 */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_2108267
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse V1 Errata #2216392.
+ * This applies to revisions r1p0 and r1p1 and is
+ * still open.
+ * This issue is also present in r0p0 but there is no
+ * workaround in that revision.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_neoverse_v1_2216392_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2216392
+ cbz x0, 1f
+
+ ldr x0, =0x5
+ msr S3_6_c15_c8_0, x0 /* CPUPSELR_EL3 */
+ ldr x0, =0x10F600E000
+ msr S3_6_c15_c8_2, x0 /* CPUPOR_EL3 */
+ ldr x0, =0x10FF80E000
+ msr S3_6_c15_c8_3, x0 /* CPUPMR_EL3 */
+ ldr x0, =0x80000000003FF
+ msr S3_6_c15_c8_1, x0 /* CPUPCR_EL3 */
+
+ isb
+1:
+ ret x17
+endfunc errata_neoverse_v1_2216392_wa
+
+func check_errata_2216392
+ /* Applies to revisions r1p0 and r1p1. */
+ mov x1, #CPU_REV(1, 0)
+ mov x2, #CPU_REV(1, 1)
+ b cpu_rev_var_range
+endfunc check_errata_2216392
+
+ /* -----------------------------------------------------------------
+ * Errata Workaround for Neoverse V1 Errata #2294912.
+ * This applies to revisions r0p0, r1p0, and r1p1 and is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * -----------------------------------------------------------------
+ */
+func errata_neoverse_v1_2294912_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2294912
+ cbz x0, 1f
+
+ /* Set bit 0 in ACTLR2_EL1 */
+ mrs x1, NEOVERSE_V1_ACTLR2_EL1
+ orr x1, x1, #NEOVERSE_V1_ACTLR2_EL1_BIT_0
+ msr NEOVERSE_V1_ACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_neoverse_v1_2294912_wa
+
+func check_errata_2294912
+ /* Applies to r0p0, r1p0, and r1p1 right now */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_2294912
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Neoverse V1 Errata #2372203.
+ * This applies to revisions <= r1p1 and is still open.
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * ----------------------------------------------------
+ */
+func errata_neoverse_v1_2372203_wa
+ /* Check workaround compatibility. */
+ mov x17, x30
+ bl check_errata_2372203
+ cbz x0, 1f
+
+ /* Set bit 40 in ACTLR2_EL1 */
+ mrs x1, NEOVERSE_V1_ACTLR2_EL1
+ orr x1, x1, #NEOVERSE_V1_ACTLR2_EL1_BIT_40
+ msr NEOVERSE_V1_ACTLR2_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_neoverse_v1_2372203_wa
+
+func check_errata_2372203
+ /* Applies to <= r1p1. */
+ mov x1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_2372203
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func neoverse_v1_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, NEOVERSE_V1_CPUPWRCTLR_EL1
+ orr x0, x0, #NEOVERSE_V1_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr NEOVERSE_V1_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc neoverse_v1_core_pwr_dwn
+
+ /*
+ * Errata printing function for Neoverse V1. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func neoverse_v1_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_V1_1618635, neoverse_v1, 1618635
+ report_errata ERRATA_V1_1774420, neoverse_v1, 1774420
+ report_errata ERRATA_V1_1791573, neoverse_v1, 1791573
+ report_errata ERRATA_V1_1852267, neoverse_v1, 1852267
+ report_errata ERRATA_V1_1925756, neoverse_v1, 1925756
+ report_errata ERRATA_V1_1940577, neoverse_v1, 1940577
+ report_errata ERRATA_V1_1966096, neoverse_v1, 1966096
+ report_errata ERRATA_V1_2108267, neoverse_v1, 2108267
+ report_errata ERRATA_V1_2139242, neoverse_v1, 2139242
+ report_errata ERRATA_V1_2216392, neoverse_v1, 2216392
+ report_errata ERRATA_V1_2294912, neoverse_v1, 2294912
+ report_errata ERRATA_V1_2372203, neoverse_v1, 2372203
+ report_errata WORKAROUND_CVE_2022_23960, neoverse_v1, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc neoverse_v1_errata_report
+#endif
+
+func neoverse_v1_reset_func
+ mov x19, x30
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+ isb
+
+ /* Get the CPU revision and stash it in x18. */
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_V1_1618635
+ mov x0, x18
+ bl errata_neoverse_v1_1618635_wa
+#endif
+
+#if ERRATA_V1_1774420
+ mov x0, x18
+ bl errata_neoverse_v1_1774420_wa
+#endif
+
+#if ERRATA_V1_1791573
+ mov x0, x18
+ bl errata_neoverse_v1_1791573_wa
+#endif
+
+#if ERRATA_V1_1852267
+ mov x0, x18
+ bl errata_neoverse_v1_1852267_wa
+#endif
+
+#if ERRATA_V1_1925756
+ mov x0, x18
+ bl errata_neoverse_v1_1925756_wa
+#endif
+
+#if ERRATA_V1_1940577
+ mov x0, x18
+ bl errata_neoverse_v1_1940577_wa
+#endif
+
+#if ERRATA_V1_1966096
+ mov x0, x18
+ bl errata_neoverse_v1_1966096_wa
+#endif
+
+#if ERRATA_V1_2139242
+ mov x0, x18
+ bl errata_neoverse_v1_2139242_wa
+#endif
+
+#if ERRATA_V1_2108267
+ mov x0, x18
+ bl errata_neoverse_v1_2108267_wa
+#endif
+
+#if ERRATA_V1_2216392
+ mov x0, x18
+ bl errata_neoverse_v1_2216392_wa
+#endif
+
+#if ERRATA_V1_2294912
+ mov x0, x18
+ bl errata_neoverse_v1_2294912_wa
+#endif
+
+#if ERRATA_V1_2372203
+ mov x0, x18
+ bl errata_neoverse_v1_2372203_wa
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Neoverse-V1 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_neoverse_v1
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+
+ isb
+ ret x19
+endfunc neoverse_v1_reset_func
+
+ /* ---------------------------------------------
+ * This function provides Neoverse-V1 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_v1_regs, "aS"
+neoverse_v1_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func neoverse_v1_cpu_reg_dump
+ adr x6, neoverse_v1_regs
+ mrs x8, NEOVERSE_V1_CPUECTLR_EL1
+ ret
+endfunc neoverse_v1_cpu_reg_dump
+
+declare_cpu_ops neoverse_v1, NEOVERSE_V1_MIDR, \
+ neoverse_v1_reset_func, \
+ neoverse_v1_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_v2.S b/lib/cpus/aarch64/neoverse_v2.S
new file mode 100644
index 0000000..4ea887f
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_v2.S
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <neoverse_v2.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse V2 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse V2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_V2_BHB_LOOP_COUNT, neoverse_v2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func neoverse_v2_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, NEOVERSE_V2_CPUPWRCTLR_EL1
+ orr x0, x0, #NEOVERSE_V2_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr NEOVERSE_V2_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc neoverse_v2_core_pwr_dwn
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+endfunc check_errata_cve_2022_23960
+
+func neoverse_v2_reset_func
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Neoverse V2 vectors are overridden to apply
+ * errata mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_neoverse_v2
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+ isb
+ ret
+endfunc neoverse_v2_reset_func
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Neoverse V2. Must follow AAPCS.
+ */
+func neoverse_v2_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata WORKAROUND_CVE_2022_23960, neoverse_v2, cve_2022_23960
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc neoverse_v2_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Neoverse V2-
+ * specific register information for crash
+ * reporting. It needs to return with x6
+ * pointing to a list of register names in ascii
+ * and x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_v2_regs, "aS"
+neoverse_v2_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func neoverse_v2_cpu_reg_dump
+ adr x6, neoverse_v2_regs
+ mrs x8, NEOVERSE_V2_CPUECTLR_EL1
+ ret
+endfunc neoverse_v2_cpu_reg_dump
+
+declare_cpu_ops neoverse_v2, NEOVERSE_V2_MIDR, \
+ neoverse_v2_reset_func, \
+ neoverse_v2_core_pwr_dwn
diff --git a/lib/cpus/aarch64/qemu_max.S b/lib/cpus/aarch64/qemu_max.S
new file mode 100644
index 0000000..8948fda
--- /dev/null
+++ b/lib/cpus/aarch64/qemu_max.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+#include <qemu_max.h>
+
+func qemu_max_core_pwr_dwn
+ /* ---------------------------------------------
+ * Disable the Data Cache.
+ * ---------------------------------------------
+ */
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+
+ /* ---------------------------------------------
+ * Flush L1 cache to L2.
+ * ---------------------------------------------
+ */
+ mov x18, lr
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+ mov lr, x18
+ ret
+endfunc qemu_max_core_pwr_dwn
+
+func qemu_max_cluster_pwr_dwn
+ /* ---------------------------------------------
+ * Disable the Data Cache.
+ * ---------------------------------------------
+ */
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+
+ /* ---------------------------------------------
+ * Flush all caches to PoC.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ b dcsw_op_all
+endfunc qemu_max_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for QEMU "max". Must follow AAPCS.
+ */
+func qemu_max_errata_report
+ ret
+endfunc qemu_max_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cpu specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.qemu_max_regs, "aS"
+qemu_max_regs: /* The ascii list of register names to be reported */
+ .asciz "" /* no registers to report */
+
+func qemu_max_cpu_reg_dump
+ adr x6, qemu_max_regs
+ ret
+endfunc qemu_max_cpu_reg_dump
+
+
+/* cpu_ops for QEMU MAX */
+declare_cpu_ops qemu_max, QEMU_MAX_MIDR, CPU_NO_RESET_FUNC, \
+ qemu_max_core_pwr_dwn, \
+ qemu_max_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/rainier.S b/lib/cpus/aarch64/rainier.S
new file mode 100644
index 0000000..584ab97
--- /dev/null
+++ b/lib/cpus/aarch64/rainier.S
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <cpu_macros.S>
+#include <cpuamu.h>
+#include <rainier.h>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Rainier CPU must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Rainier CPU supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+/* --------------------------------------------------
+ * Disable speculative loads if Rainier supports
+ * SSBS.
+ *
+ * Shall clobber: x0.
+ * --------------------------------------------------
+ */
+func rainier_disable_speculative_loads
+ /* Check if the PE implements SSBS */
+ mrs x0, id_aa64pfr1_el1
+ tst x0, #(ID_AA64PFR1_EL1_SSBS_MASK << ID_AA64PFR1_EL1_SSBS_SHIFT)
+ b.eq 1f
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+1:
+ ret
+endfunc rainier_disable_speculative_loads
+
+ /* --------------------------------------------------
+ * Errata Workaround for Neoverse N1 Errata #1868343.
+ * This applies to revision <= r4p0 of Neoverse N1.
+ * This workaround is the same as the workaround for
+ * errata 1262606 and 1275112 but applies to a wider
+ * revision range.
+ * Rainier R0P0 is based on Neoverse N1 R4P0 so the
+ * workaround checks for r0p0 version of Rainier CPU.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0, x1 & x17
+ * --------------------------------------------------
+ */
+func errata_n1_1868343_wa
+ /*
+ * Compare x0 against revision r4p0
+ */
+ mov x17, x30
+ bl check_errata_1868343
+ cbz x0, 1f
+ mrs x1, RAINIER_CPUACTLR_EL1
+ orr x1, x1, RAINIER_CPUACTLR_EL1_BIT_13
+ msr RAINIER_CPUACTLR_EL1, x1
+ isb
+1:
+ ret x17
+endfunc errata_n1_1868343_wa
+
+func check_errata_1868343
+ /* Applies to r0p0 of Rainier CPU */
+ mov x1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_1868343
+
+func rainier_reset_func
+ mov x19, x30
+
+ bl rainier_disable_speculative_loads
+
+ /* Forces all cacheable atomic instructions to be near */
+ mrs x0, RAINIER_CPUACTLR2_EL1
+ orr x0, x0, #RAINIER_CPUACTLR2_EL1_BIT_2
+ msr RAINIER_CPUACTLR2_EL1, x0
+ isb
+
+ bl cpu_get_rev_var
+ mov x18, x0
+
+#if ERRATA_N1_1868343
+ mov x0, x18
+ bl errata_n1_1868343_wa
+#endif
+
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, actlr_el3
+ orr x0, x0, #RAINIER_ACTLR_AMEN_BIT
+ msr actlr_el3, x0
+
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ mrs x0, actlr_el2
+ orr x0, x0, #RAINIER_ACTLR_AMEN_BIT
+ msr actlr_el2, x0
+
+ /* Enable group0 counters */
+ mov x0, #RAINIER_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET_EL0, x0
+#endif
+
+ isb
+ ret x19
+endfunc rainier_reset_func
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func rainier_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ mrs x0, RAINIER_CPUPWRCTLR_EL1
+ orr x0, x0, #RAINIER_CORE_PWRDN_EN_MASK
+ msr RAINIER_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc rainier_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Rainier. Must follow AAPCS.
+ */
+func rainier_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_N1_1868343, rainier, 1868343
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc rainier_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides Rainier specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.rainier_regs, "aS"
+rainier_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func rainier_cpu_reg_dump
+ adr x6, rainier_regs
+ mrs x8, RAINIER_CPUECTLR_EL1
+ ret
+endfunc rainier_cpu_reg_dump
+
+declare_cpu_ops rainier, RAINIER_MIDR, \
+ rainier_reset_func, \
+ rainier_core_pwr_dwn
diff --git a/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
new file mode 100644
index 0000000..0222818
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <services/arm_arch_svc.h>
+
+ .globl wa_cve_2017_5715_bpiall_vbar
+
+#define EMIT_BPIALL 0xee070fd5
+#define EMIT_SMC 0xe1600070
+#define ESR_EL3_A64_SMC0 0x5e000000
+
+ .macro apply_cve_2017_5715_wa _from_vector
+ /*
+ * Save register state to enable a call to AArch32 S-EL1 and return
+ * Identify the original calling vector in w2 (==_from_vector)
+ * Use w3-w6 for additional register state preservation while in S-EL1
+ */
+
+ /* Save GP regs */
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+
+ /* Identify the original exception vector */
+ mov w2, \_from_vector
+
+ /* Preserve 32-bit system registers in GP registers through the workaround */
+ mrs x3, esr_el3
+ mrs x4, spsr_el3
+ mrs x5, scr_el3
+ mrs x6, sctlr_el1
+
+ /*
+ * Preserve LR and ELR_EL3 registers in the GP regs context.
+ * Temporarily use the CTX_GPREG_SP_EL0 slot to preserve ELR_EL3
+ * through the workaround. This is OK because at this point the
+ * current state for this context's SP_EL0 is in the live system
+ * register, which is unmodified by the workaround.
+ */
+ mrs x7, elr_el3
+ stp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+ /*
+ * Load system registers for entry to S-EL1.
+ */
+
+ /* Mask all interrupts and set AArch32 Supervisor mode */
+ movz w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK)
+
+ /* Switch EL3 exception vectors while the workaround is executing. */
+ adr x9, wa_cve_2017_5715_bpiall_ret_vbar
+
+ /* Setup SCTLR_EL1 with MMU off and I$ on */
+ ldr x10, stub_sel1_sctlr
+
+ /* Land at the S-EL1 workaround stub */
+ adr x11, aarch32_stub
+
+ /*
+ * Setting SCR_EL3 to all zeroes means that the NS, RW
+ * and SMD bits are configured as expected.
+ */
+ msr scr_el3, xzr
+ msr spsr_el3, x8
+ msr vbar_el3, x9
+ msr sctlr_el1, x10
+ msr elr_el3, x11
+
+ eret
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * This vector table is used at runtime to enter the workaround at
+ * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions. If the workaround
+ * is not enabled, the existing runtime exception vector table is used.
+ * ---------------------------------------------------------------------
+ */
+vector_base wa_cve_2017_5715_bpiall_vbar
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_sync_exception_sp_el0
+ b sync_exception_sp_el0
+ nop /* to force 8 byte alignment for the following stub */
+
+ /*
+ * Since each vector table entry is 128 bytes, we can store the
+ * stub context in the unused space to minimize memory footprint.
+ */
+stub_sel1_sctlr:
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+
+aarch32_stub:
+ .word EMIT_BPIALL
+ .word EMIT_SMC
+
+end_vector_entry bpiall_sync_exception_sp_el0
+
+vector_entry bpiall_irq_sp_el0
+ b irq_sp_el0
+end_vector_entry bpiall_irq_sp_el0
+
+vector_entry bpiall_fiq_sp_el0
+ b fiq_sp_el0
+end_vector_entry bpiall_fiq_sp_el0
+
+vector_entry bpiall_serror_sp_el0
+ b serror_sp_el0
+end_vector_entry bpiall_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_sync_exception_sp_elx
+ b sync_exception_sp_elx
+end_vector_entry bpiall_sync_exception_sp_elx
+
+vector_entry bpiall_irq_sp_elx
+ b irq_sp_elx
+end_vector_entry bpiall_irq_sp_elx
+
+vector_entry bpiall_fiq_sp_elx
+ b fiq_sp_elx
+end_vector_entry bpiall_fiq_sp_elx
+
+vector_entry bpiall_serror_sp_elx
+ b serror_sp_elx
+end_vector_entry bpiall_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_sync_exception_aarch64
+ apply_cve_2017_5715_wa 1
+end_vector_entry bpiall_sync_exception_aarch64
+
+vector_entry bpiall_irq_aarch64
+ apply_cve_2017_5715_wa 2
+end_vector_entry bpiall_irq_aarch64
+
+vector_entry bpiall_fiq_aarch64
+ apply_cve_2017_5715_wa 4
+end_vector_entry bpiall_fiq_aarch64
+
+vector_entry bpiall_serror_aarch64
+ apply_cve_2017_5715_wa 8
+end_vector_entry bpiall_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_sync_exception_aarch32
+ apply_cve_2017_5715_wa 1
+end_vector_entry bpiall_sync_exception_aarch32
+
+vector_entry bpiall_irq_aarch32
+ apply_cve_2017_5715_wa 2
+end_vector_entry bpiall_irq_aarch32
+
+vector_entry bpiall_fiq_aarch32
+ apply_cve_2017_5715_wa 4
+end_vector_entry bpiall_fiq_aarch32
+
+vector_entry bpiall_serror_aarch32
+ apply_cve_2017_5715_wa 8
+end_vector_entry bpiall_serror_aarch32
+
+ /* ---------------------------------------------------------------------
+ * This vector table is used while the workaround is executing. It
+ * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError
+ * workaround stubs to enter EL3 from S-EL1. It restores the previous
+ * EL3 state before proceeding with the normal runtime exception vector.
+ * ---------------------------------------------------------------------
+ */
+vector_base wa_cve_2017_5715_bpiall_ret_vbar
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_ret_sync_exception_sp_el0
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_sync_exception_sp_el0
+
+vector_entry bpiall_ret_irq_sp_el0
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_irq_sp_el0
+
+vector_entry bpiall_ret_fiq_sp_el0
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_fiq_sp_el0
+
+vector_entry bpiall_ret_serror_sp_el0
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_ret_sync_exception_sp_elx
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_sync_exception_sp_elx
+
+vector_entry bpiall_ret_irq_sp_elx
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_irq_sp_elx
+
+vector_entry bpiall_ret_fiq_sp_elx
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_fiq_sp_elx
+
+vector_entry bpiall_ret_serror_sp_elx
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_ret_sync_exception_aarch64
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_sync_exception_aarch64
+
+vector_entry bpiall_ret_irq_aarch64
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_irq_aarch64
+
+vector_entry bpiall_ret_fiq_aarch64
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_fiq_aarch64
+
+vector_entry bpiall_ret_serror_aarch64
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_ret_sync_exception_aarch32
+ /*
+ * w2 indicates which SEL1 stub was run and thus which original vector was used
+ * w3-w6 contain saved system register state (esr_el3 in w3)
+ * Restore LR and ELR_EL3 register state from the GP regs context
+ */
+ ldp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+ /* Apply the restored system register state */
+ msr esr_el3, x3
+ msr spsr_el3, x4
+ msr scr_el3, x5
+ msr sctlr_el1, x6
+ msr elr_el3, x7
+
+ /*
+ * Workaround is complete, so swap VBAR_EL3 to point
+ * to workaround entry table in preparation for subsequent
+ * Sync/IRQ/FIQ/SError exceptions.
+ */
+ adr x0, wa_cve_2017_5715_bpiall_vbar
+ msr vbar_el3, x0
+
+ /*
+ * Restore all GP regs except x2 and x3 (esr). The value in x2
+ * indicates the type of the original exception.
+ */
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+
+ /* Fast path Sync exceptions. Static predictor will fall through. */
+ tbz w2, #0, workaround_not_sync
+
+ /*
+ * Check if SMC is coming from A64 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_1 or W0 = SMCCC_ARCH_WORKAROUND_3
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_1) || (W0==SMCCC_ARCH_WORKAROUND_3) ?
+ * (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
+ */
+ orr w2, wzr, #SMCCC_ARCH_WORKAROUND_1
+ cmp w0, w2
+ orr w2, wzr, #SMCCC_ARCH_WORKAROUND_3
+ ccmp w0, w2, #4, ne
+ mov_imm w2, ESR_EL3_A64_SMC0
+ ccmp w3, w2, #0, eq
+ /* Static predictor will predict a fall through */
+ bne 1f
+ eret
+1:
+ /* restore x2 and x3 and continue sync exception handling */
+ b bpiall_ret_sync_exception_aarch32_tail
+end_vector_entry bpiall_ret_sync_exception_aarch32
+
+vector_entry bpiall_ret_irq_aarch32
+ b report_unhandled_interrupt
+
+ /*
+ * Post-workaround fan-out for non-sync exceptions
+ */
+workaround_not_sync:
+ tbnz w2, #3, bpiall_ret_serror
+ tbnz w2, #2, bpiall_ret_fiq
+ /* IRQ */
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b irq_aarch64
+
+bpiall_ret_fiq:
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b fiq_aarch64
+
+bpiall_ret_serror:
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b serror_aarch64
+end_vector_entry bpiall_ret_irq_aarch32
+
+vector_entry bpiall_ret_fiq_aarch32
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_fiq_aarch32
+
+vector_entry bpiall_ret_serror_aarch32
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_serror_aarch32
+
+ /*
+ * Part of bpiall_ret_sync_exception_aarch32 to save vector space
+ */
+func bpiall_ret_sync_exception_aarch32_tail
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b sync_exception_aarch64
+endfunc bpiall_ret_sync_exception_aarch32_tail
diff --git a/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
new file mode 100644
index 0000000..ed0a549
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <services/arm_arch_svc.h>
+
+ .globl wa_cve_2017_5715_mmu_vbar
+
+#define ESR_EL3_A64_SMC0 0x5e000000
+#define ESR_EL3_A32_SMC0 0x4e000000
+
+vector_base wa_cve_2017_5715_mmu_vbar
+
+ .macro apply_cve_2017_5715_wa _is_sync_exception _esr_el3_val
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ mrs x1, sctlr_el3
+ /* Disable MMU */
+ bic x1, x1, #SCTLR_M_BIT
+ msr sctlr_el3, x1
+ isb
+ /* Enable MMU */
+ orr x1, x1, #SCTLR_M_BIT
+ msr sctlr_el3, x1
+ /*
+ * Defer ISB to avoid synchronizing twice in case we hit
+ * the workaround SMC call which will implicitly synchronize
+ * because of the ERET instruction.
+ */
+
+ /*
+ * Ensure SMC is coming from A64/A32 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_1 or W0 = SMCCC_ARCH_WORKAROUND_3
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_1) || (W0==SMCCC_ARCH_WORKAROUND_3) ?
+ * (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
+ */
+ .if \_is_sync_exception
+ orr w1, wzr, #SMCCC_ARCH_WORKAROUND_1
+ cmp w0, w1
+ orr w1, wzr, #SMCCC_ARCH_WORKAROUND_3
+ ccmp w0, w1, #4, ne
+ mrs x0, esr_el3
+ mov_imm w1, \_esr_el3_val
+ ccmp w0, w1, #0, eq
+ /* Static predictor will predict a fall through */
+ bne 1f
+ exception_return
+1:
+ .endif
+
+ /*
+ * Synchronize now to enable the MMU. This is required
+ * to ensure the load pair below reads the data stored earlier.
+ */
+ isb
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_sp_el0
+ b sync_exception_sp_el0
+end_vector_entry mmu_sync_exception_sp_el0
+
+vector_entry mmu_irq_sp_el0
+ b irq_sp_el0
+end_vector_entry mmu_irq_sp_el0
+
+vector_entry mmu_fiq_sp_el0
+ b fiq_sp_el0
+end_vector_entry mmu_fiq_sp_el0
+
+vector_entry mmu_serror_sp_el0
+ b serror_sp_el0
+end_vector_entry mmu_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_sp_elx
+ b sync_exception_sp_elx
+end_vector_entry mmu_sync_exception_sp_elx
+
+vector_entry mmu_irq_sp_elx
+ b irq_sp_elx
+end_vector_entry mmu_irq_sp_elx
+
+vector_entry mmu_fiq_sp_elx
+ b fiq_sp_elx
+end_vector_entry mmu_fiq_sp_elx
+
+vector_entry mmu_serror_sp_elx
+ b serror_sp_elx
+end_vector_entry mmu_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
+ b sync_exception_aarch64
+end_vector_entry mmu_sync_exception_aarch64
+
+vector_entry mmu_irq_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+ b irq_aarch64
+end_vector_entry mmu_irq_aarch64
+
+vector_entry mmu_fiq_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+ b fiq_aarch64
+end_vector_entry mmu_fiq_aarch64
+
+vector_entry mmu_serror_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+ b serror_aarch64
+end_vector_entry mmu_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
+ b sync_exception_aarch32
+end_vector_entry mmu_sync_exception_aarch32
+
+vector_entry mmu_irq_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+ b irq_aarch32
+end_vector_entry mmu_irq_aarch32
+
+vector_entry mmu_fiq_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+ b fiq_aarch32
+end_vector_entry mmu_fiq_aarch32
+
+vector_entry mmu_serror_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+ b serror_aarch32
+end_vector_entry mmu_serror_aarch32
diff --git a/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S b/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S
new file mode 100644
index 0000000..ceb93f1
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <context.h>
+
+#if WORKAROUND_CVE_2022_23960
+ /*
+ * This macro applies the mitigation for CVE-2022-23960.
+ * The macro saves x2 to the CPU context.
+ * SP should point to the CPU context.
+ */
+ .macro apply_cve_2022_23960_bhb_wa _bhb_loop_count
+ str x2, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+
+ /* CVE-BHB-NUM loop count */
+ mov x2, \_bhb_loop_count
+
+1:
+ /* b pc+4 part of the workaround */
+ b 2f
+2:
+ subs x2, x2, #1
+ bne 1b
+ speculation_barrier
+ ldr x2, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ .endm
+#endif /* WORKAROUND_CVE_2022_23960 */
diff --git a/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S b/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S
new file mode 100644
index 0000000..220fa11
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <services/arm_arch_svc.h>
+#include "wa_cve_2022_23960_bhb.S"
+
+ /*
+ * This macro is used to isolate the vector table for relevant CPUs
+ * used in the mitigation for CVE_2022_23960.
+ */
+ .macro wa_cve_2022_23960_bhb_vector_table _bhb_loop_count, _cpu
+
+ .globl wa_cve_vbar_\_cpu
+
+vector_base wa_cve_vbar_\_cpu
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry bhb_sync_exception_sp_el0_\_cpu
+ b sync_exception_sp_el0
+end_vector_entry bhb_sync_exception_sp_el0_\_cpu
+
+vector_entry bhb_irq_sp_el0_\_cpu
+ b irq_sp_el0
+end_vector_entry bhb_irq_sp_el0_\_cpu
+
+vector_entry bhb_fiq_sp_el0_\_cpu
+ b fiq_sp_el0
+end_vector_entry bhb_fiq_sp_el0_\_cpu
+
+vector_entry bhb_serror_sp_el0_\_cpu
+ b serror_sp_el0
+end_vector_entry bhb_serror_sp_el0_\_cpu
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry bhb_sync_exception_sp_elx_\_cpu
+ b sync_exception_sp_elx
+end_vector_entry bhb_sync_exception_sp_elx_\_cpu
+
+vector_entry bhb_irq_sp_elx_\_cpu
+ b irq_sp_elx
+end_vector_entry bhb_irq_sp_elx_\_cpu
+
+vector_entry bhb_fiq_sp_elx_\_cpu
+ b fiq_sp_elx
+end_vector_entry bhb_fiq_sp_elx_\_cpu
+
+vector_entry bhb_serror_sp_elx_\_cpu
+ b serror_sp_elx
+end_vector_entry bhb_serror_sp_elx_\_cpu
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry bhb_sync_exception_aarch64_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b sync_exception_aarch64
+end_vector_entry bhb_sync_exception_aarch64_\_cpu
+
+vector_entry bhb_irq_aarch64_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b irq_aarch64
+end_vector_entry bhb_irq_aarch64_\_cpu
+
+vector_entry bhb_fiq_aarch64_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b fiq_aarch64
+end_vector_entry bhb_fiq_aarch64_\_cpu
+
+vector_entry bhb_serror_aarch64_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b serror_aarch64
+end_vector_entry bhb_serror_aarch64_\_cpu
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry bhb_sync_exception_aarch32_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b sync_exception_aarch32
+end_vector_entry bhb_sync_exception_aarch32_\_cpu
+
+vector_entry bhb_irq_aarch32_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b irq_aarch32
+end_vector_entry bhb_irq_aarch32_\_cpu
+
+vector_entry bhb_fiq_aarch32_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b fiq_aarch32
+end_vector_entry bhb_fiq_aarch32_\_cpu
+
+vector_entry bhb_serror_aarch32_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b serror_aarch32
+end_vector_entry bhb_serror_aarch32_\_cpu
+ .endm
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
new file mode 100644
index 0000000..f19c16e
--- /dev/null
+++ b/lib/cpus/cpu-ops.mk
@@ -0,0 +1,1358 @@
+#
+# Copyright (c) 2014-2022, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Cortex A57 specific optimisation to skip L1 cache flush when
+# cluster is powered down.
+SKIP_A57_L1_FLUSH_PWR_DWN ?=0
+
+# Flag to disable the cache non-temporal hint.
+# It is enabled by default.
+A53_DISABLE_NON_TEMPORAL_HINT ?=1
+
+# Flag to disable the cache non-temporal hint.
+# It is enabled by default.
+A57_DISABLE_NON_TEMPORAL_HINT ?=1
+
+# Flag to enable higher performance non-cacheable load forwarding.
+# It is disabled by default.
+A57_ENABLE_NONCACHEABLE_LOAD_FWD ?= 0
+
+WORKAROUND_CVE_2017_5715 ?=1
+WORKAROUND_CVE_2018_3639 ?=1
+DYNAMIC_WORKAROUND_CVE_2018_3639 ?=0
+WORKAROUND_CVE_2022_23960 ?=1
+
+# Flags to indicate internal or external Last level cache
+# By default internal
+NEOVERSE_Nx_EXTERNAL_LLC ?=0
+
+# Process A57_ENABLE_NONCACHEABLE_LOAD_FWD flag
+$(eval $(call assert_boolean,A57_ENABLE_NONCACHEABLE_LOAD_FWD))
+$(eval $(call add_define,A57_ENABLE_NONCACHEABLE_LOAD_FWD))
+
+# Process SKIP_A57_L1_FLUSH_PWR_DWN flag
+$(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN))
+$(eval $(call add_define,SKIP_A57_L1_FLUSH_PWR_DWN))
+
+# Process A53_DISABLE_NON_TEMPORAL_HINT flag
+$(eval $(call assert_boolean,A53_DISABLE_NON_TEMPORAL_HINT))
+$(eval $(call add_define,A53_DISABLE_NON_TEMPORAL_HINT))
+
+# Process A57_DISABLE_NON_TEMPORAL_HINT flag
+$(eval $(call assert_boolean,A57_DISABLE_NON_TEMPORAL_HINT))
+$(eval $(call add_define,A57_DISABLE_NON_TEMPORAL_HINT))
+
+# Process WORKAROUND_CVE_2017_5715 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2017_5715))
+$(eval $(call add_define,WORKAROUND_CVE_2017_5715))
+
+# Process WORKAROUND_CVE_2018_3639 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2018_3639))
+$(eval $(call add_define,WORKAROUND_CVE_2018_3639))
+
+$(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639))
+$(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639))
+
+# Process WORKAROUND_CVE_2022_23960 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2022_23960))
+$(eval $(call add_define,WORKAROUND_CVE_2022_23960))
+
+$(eval $(call assert_boolean,NEOVERSE_Nx_EXTERNAL_LLC))
+$(eval $(call add_define,NEOVERSE_Nx_EXTERNAL_LLC))
+
+ifneq (${DYNAMIC_WORKAROUND_CVE_2018_3639},0)
+ ifeq (${WORKAROUND_CVE_2018_3639},0)
+ $(error "Error: WORKAROUND_CVE_2018_3639 must be 1 if DYNAMIC_WORKAROUND_CVE_2018_3639 is 1")
+ endif
+endif
+
+# CPU Errata Build flags.
+# These should be enabled by the platform if the erratum workaround needs to be
+# applied.
+
+# Flag to apply erratum 794073 workaround when disabling mmu.
+ERRATA_A9_794073 ?=0
+
+# Flag to apply erratum 816470 workaround during power down. This erratum
+# applies only to revision >= r3p0 of the Cortex A15 cpu.
+ERRATA_A15_816470 ?=0
+
+# Flag to apply erratum 827671 workaround during reset. This erratum applies
+# only to revision >= r3p0 of the Cortex A15 cpu.
+ERRATA_A15_827671 ?=0
+
+# Flag to apply erratum 852421 workaround during reset. This erratum applies
+# only to revision <= r1p2 of the Cortex A17 cpu.
+ERRATA_A17_852421 ?=0
+
+# Flag to apply erratum 852423 workaround during reset. This erratum applies
+# only to revision <= r1p2 of the Cortex A17 cpu.
+ERRATA_A17_852423 ?=0
+
+# Flag to apply erratum 855472 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A35 cpu.
+ERRATA_A35_855472 ?=0
+
+# Flag to apply erratum 819472 workaround during reset. This erratum applies
+# only to revision <= r0p1 of the Cortex A53 cpu.
+ERRATA_A53_819472 ?=0
+
+# Flag to apply erratum 824069 workaround during reset. This erratum applies
+# only to revision <= r0p2 of the Cortex A53 cpu.
+ERRATA_A53_824069 ?=0
+
+# Flag to apply erratum 826319 workaround during reset. This erratum applies
+# only to revision <= r0p2 of the Cortex A53 cpu.
+ERRATA_A53_826319 ?=0
+
+# Flag to apply erratum 827319 workaround during reset. This erratum applies
+# only to revision <= r0p2 of the Cortex A53 cpu.
+ERRATA_A53_827319 ?=0
+
+# Flag to apply erratum 835769 workaround at compile and link time. This
+# erratum applies to revision <= r0p4 of the Cortex A53 cpu. Enabling this
+# workaround can lead the linker to create "*.stub" sections.
+ERRATA_A53_835769 ?=0
+
+# Flag to apply erratum 836870 workaround during reset. This erratum applies
+# only to revision <= r0p3 of the Cortex A53 cpu. From r0p4 and onwards, this
+# erratum workaround is enabled by default in hardware.
+ERRATA_A53_836870 ?=0
+
+# Flag to apply erratum 843419 workaround at link time.
+# This erratum applies to revision <= r0p4 of the Cortex A53 cpu. Enabling this
+# workaround could lead the linker to emit "*.stub" sections which are 4kB
+# aligned.
+ERRATA_A53_843419 ?=0
+
+# Flag to apply errata 855873 during reset. This errata applies to all
+# revisions of the Cortex A53 CPU, but this firmware workaround only works
+# for revisions r0p3 and higher. Earlier revisions are taken care
+# of by the rich OS.
+ERRATA_A53_855873 ?=0
+
+# Flag to apply erratum 1530924 workaround during reset. This erratum applies
+# to all revisions of Cortex A53 cpu.
+ERRATA_A53_1530924 ?=0
+
+# Flag to apply erratum 768277 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A55 cpu.
+ERRATA_A55_768277 ?=0
+
+# Flag to apply erratum 778703 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A55 cpu.
+ERRATA_A55_778703 ?=0
+
+# Flag to apply erratum 798797 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A55 cpu.
+ERRATA_A55_798797 ?=0
+
+# Flag to apply erratum 846532 workaround during reset. This erratum applies
+# only to revision <= r0p1 of the Cortex A55 cpu.
+ERRATA_A55_846532 ?=0
+
+# Flag to apply erratum 903758 workaround during reset. This erratum applies
+# only to revision <= r0p1 of the Cortex A55 cpu.
+ERRATA_A55_903758 ?=0
+
+# Flag to apply erratum 1221012 workaround during reset. This erratum applies
+# only to revision <= r1p0 of the Cortex A55 cpu.
+ERRATA_A55_1221012 ?=0
+
+# Flag to apply erratum 1530923 workaround during reset. This erratum applies
+# to all revisions of Cortex A55 cpu.
+ERRATA_A55_1530923 ?=0
+
+# Flag to apply erratum 806969 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
+ERRATA_A57_806969 ?=0
+
+# Flag to apply erratum 813419 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
+ERRATA_A57_813419 ?=0
+
+# Flag to apply erratum 813420 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
+ERRATA_A57_813420 ?=0
+
+# Flag to apply erratum 814670 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
+ERRATA_A57_814670 ?=0
+
+# Flag to apply erratum 817169 workaround during power down. This erratum
+# applies only to revision <= r0p1 of the Cortex A57 cpu.
+ERRATA_A57_817169 ?=0
+
+# Flag to apply erratum 826974 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A57 cpu.
+ERRATA_A57_826974 ?=0
+
+# Flag to apply erratum 826977 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A57 cpu.
+ERRATA_A57_826977 ?=0
+
+# Flag to apply erratum 828024 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A57 cpu.
+ERRATA_A57_828024 ?=0
+
+# Flag to apply erratum 829520 workaround during reset. This erratum applies
+# only to revision <= r1p2 of the Cortex A57 cpu.
+ERRATA_A57_829520 ?=0
+
+# Flag to apply erratum 833471 workaround during reset. This erratum applies
+# only to revision <= r1p2 of the Cortex A57 cpu.
+ERRATA_A57_833471 ?=0
+
+# Flag to apply erratum 855972 workaround during reset. This erratum applies
+# only to revision <= r1p3 of the Cortex A57 cpu.
+ERRATA_A57_859972 ?=0
+
+# Flag to apply erratum 1319537 workaround during reset. This erratum applies
+# to all revisions of Cortex A57 cpu.
+ERRATA_A57_1319537 ?=0
+
+# Flag to apply erratum 855971 workaround during reset. This erratum applies
+# only to revision <= r0p3 of the Cortex A72 cpu.
+ERRATA_A72_859971 ?=0
+
+# Flag to apply erratum 1319367 workaround during reset. This erratum applies
+# to all revisions of Cortex A72 cpu.
+ERRATA_A72_1319367 ?=0
+
+# Flag to apply erratum 852427 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A73 cpu.
+ERRATA_A73_852427 ?=0
+
+# Flag to apply erratum 855423 workaround during reset. This erratum applies
+# only to revision <= r0p1 of the Cortex A73 cpu.
+ERRATA_A73_855423 ?=0
+
+# Flag to apply erratum 764081 workaround during reset. This erratum applies
+# only to revision <= r0p0 of the Cortex A75 cpu.
+ERRATA_A75_764081 ?=0
+
+# Flag to apply erratum 790748 workaround during reset. This erratum applies
+# only to revision <= r0p0 of the Cortex A75 cpu.
+ERRATA_A75_790748 ?=0
+
+# Flag to apply erratum 1073348 workaround during reset. This erratum applies
+# only to revision <= r1p0 of the Cortex A76 cpu.
+ERRATA_A76_1073348 ?=0
+
+# Flag to apply erratum 1130799 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Cortex A76 cpu.
+ERRATA_A76_1130799 ?=0
+
+# Flag to apply erratum 1220197 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Cortex A76 cpu.
+ERRATA_A76_1220197 ?=0
+
+# Flag to apply erratum 1257314 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Cortex A76 cpu.
+ERRATA_A76_1257314 ?=0
+
+# Flag to apply erratum 1262606 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Cortex A76 cpu.
+ERRATA_A76_1262606 ?=0
+
+# Flag to apply erratum 1262888 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Cortex A76 cpu.
+ERRATA_A76_1262888 ?=0
+
+# Flag to apply erratum 1275112 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Cortex A76 cpu.
+ERRATA_A76_1275112 ?=0
+
+# Flag to apply erratum 1286807 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Cortex A76 cpu.
+ERRATA_A76_1286807 ?=0
+
+# Flag to apply erratum 1791580 workaround during reset. This erratum applies
+# only to revision <= r4p0 of the Cortex A76 cpu.
+ERRATA_A76_1791580 ?=0
+
+# Flag to apply erratum 1165522 workaround during reset. This erratum applies
+# to all revisions of Cortex A76 cpu.
+ERRATA_A76_1165522 ?=0
+
+# Flag to apply erratum 1868343 workaround during reset. This erratum applies
+# only to revision <= r4p0 of the Cortex A76 cpu.
+ERRATA_A76_1868343 ?=0
+
+# Flag to apply erratum 1946160 workaround during reset. This erratum applies
+# only to revisions r3p0 - r4p1 of the Cortex A76 cpu.
+ERRATA_A76_1946160 ?=0
+
+# Flag to apply erratum 2743102 workaround during powerdown. This erratum
+# applies to all revisions <= r4p1 of the Cortex A76 cpu and is still open.
+ERRATA_A76_2743102 ?=0
+
+# Flag to apply erratum 1508412 workaround during reset. This erratum applies
+# only to revision <= r1p0 of the Cortex A77 cpu.
+ERRATA_A77_1508412 ?=0
+
+# Flag to apply erratum 1925769 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A77 cpu.
+ERRATA_A77_1925769 ?=0
+
+# Flag to apply erratum 1946167 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A77 cpu.
+ERRATA_A77_1946167 ?=0
+
+# Flag to apply erratum 1791578 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1, it is still open.
+ERRATA_A77_1791578 ?=0
+
+# Flag to apply erratum 2356587 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1, it is still open.
+ERRATA_A77_2356587 ?=0
+
+# Flag to apply erratum 1800714 workaround during reset. This erratum applies
+# to revisions <= r1p1 of the Cortex A77 cpu.
+ERRATA_A77_1800714 ?=0
+
+# Flag to apply erratum 2743100 workaround during power down. This erratum
+# applies to revisions r0p0, r1p0, and r1p1, it is still open.
+ERRATA_A77_2743100 ?=0
+
+# Flag to apply erratum 1688305 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the A78 cpu.
+ERRATA_A78_1688305 ?=0
+
+# Flag to apply erratum 1941498 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1 of the A78 cpu.
+ERRATA_A78_1941498 ?=0
+
+# Flag to apply erratum 1951500 workaround during reset. This erratum applies
+# to revisions r1p0 and r1p1 of the A78 cpu. The issue is present in r0p0 as
+# well but there is no workaround for that revision.
+ERRATA_A78_1951500 ?=0
+
+# Flag to apply erratum 1821534 workaround during reset. This erratum applies
+# to revisions r0p0 and r1p0 of the A78 cpu.
+ERRATA_A78_1821534 ?=0
+
+# Flag to apply erratum 1952683 workaround during reset. This erratum applies
+# to revision r0p0 of the A78 cpu and was fixed in the revision r1p0.
+ERRATA_A78_1952683 ?=0
+
+# Flag to apply erratum 2132060 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
+ERRATA_A78_2132060 ?=0
+
+# Flag to apply erratum 2242635 workaround during reset. This erratum applies
+# to revisions r1p0, r1p1, and r1p2 of the A78 cpu and is open. The issue is
+# present in r0p0 as well but there is no workaround for that revision.
+ERRATA_A78_2242635 ?=0
+
+# Flag to apply erratum 2376745 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
+ERRATA_A78_2376745 ?=0
+
+# Flag to apply erratum 2395406 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
+ERRATA_A78_2395406 ?=0
+
+# Flag to apply erratum 1941500 workaround during reset. This erratum applies
+# to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open.
+ERRATA_A78_AE_1941500 ?=0
+
+# Flag to apply erratum 1951502 workaround during reset. This erratum applies
+# to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open.
+ERRATA_A78_AE_1951502 ?=0
+
+# Flag to apply erratum 2376748 workaround during reset. This erratum applies
+# to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open.
+ERRATA_A78_AE_2376748 ?=0
+
+# Flag to apply erratum 2395408 workaround during reset. This erratum applies
+# to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open.
+ERRATA_A78_AE_2395408 ?=0
+
+# Flag to apply erratum 2132064 workaround during reset. This erratum applies
+# to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
+ERRATA_A78C_2132064 ?=0
+
+# Flag to apply erratum 2242638 workaround during reset. This erratum applies
+# to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
+ERRATA_A78C_2242638 ?=0
+
+# Flag to apply erratum 2376749 workaround during reset. This erratum applies
+# to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
+ERRATA_A78C_2376749 ?=0
+
+# Flag to apply erratum 2395411 workaround during reset. This erratum applies
+# to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
+ERRATA_A78C_2395411 ?=0
+
+# Flag to apply erratum 1821534 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the X1 cpu and fixed in r1p1.
+ERRATA_X1_1821534 ?=0
+
+# Flag to apply erratum 1688305 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the X1 cpu and fixed in r1p1.
+ERRATA_X1_1688305 ?=0
+
+# Flag to apply erratum 1827429 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the X1 cpu and fixed in r1p1.
+ERRATA_X1_1827429 ?=0
+
+# Flag to apply T32 CLREX workaround during reset. This erratum applies
+# only to r0p0 and r1p0 of the Neoverse N1 cpu.
+ERRATA_N1_1043202 ?=0
+
+# Flag to apply erratum 1073348 workaround during reset. This erratum applies
+# only to revision r0p0 and r1p0 of the Neoverse N1 cpu.
+ERRATA_N1_1073348 ?=0
+
+# Flag to apply erratum 1130799 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Neoverse N1 cpu.
+ERRATA_N1_1130799 ?=0
+
+# Flag to apply erratum 1165347 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Neoverse N1 cpu.
+ERRATA_N1_1165347 ?=0
+
+# Flag to apply erratum 1207823 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Neoverse N1 cpu.
+ERRATA_N1_1207823 ?=0
+
+# Flag to apply erratum 1220197 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Neoverse N1 cpu.
+ERRATA_N1_1220197 ?=0
+
+# Flag to apply erratum 1257314 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Neoverse N1 cpu.
+ERRATA_N1_1257314 ?=0
+
+# Flag to apply erratum 1262606 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Neoverse N1 cpu.
+ERRATA_N1_1262606 ?=0
+
+# Flag to apply erratum 1262888 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Neoverse N1 cpu.
+ERRATA_N1_1262888 ?=0
+
+# Flag to apply erratum 1275112 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Neoverse N1 cpu.
+ERRATA_N1_1275112 ?=0
+
+# Flag to apply erratum 1315703 workaround during reset. This erratum applies
+# to revisions before r3p1 of the Neoverse N1 cpu.
+ERRATA_N1_1315703 ?=0
+
+# Flag to apply erratum 1542419 workaround during reset. This erratum applies
+# to revisions r3p0 - r4p0 of the Neoverse N1 cpu.
+ERRATA_N1_1542419 ?=0
+
+# Flag to apply erratum 1868343 workaround during reset. This erratum applies
+# to revision <= r4p0 of the Neoverse N1 cpu.
+ERRATA_N1_1868343 ?=0
+
+# Flag to apply erratum 1946160 workaround during reset. This erratum applies
+# to revisions r3p0, r3p1, r4p0, and r4p1 of the Neoverse N1 cpu. The issue
+# exists in revisions r0p0, r1p0, and r2p0 as well but there is no workaround.
+ERRATA_N1_1946160 ?=0
+
+# Flag to apply erratum 2743102 workaround during powerdown. This erratum
+# applies to all revisions <= r4p1 of the Neoverse N1 cpu and is still open.
+ERRATA_N1_2743102 ?=0
+
+# Flag to apply erratum 2002655 workaround during reset. This erratum applies
+# to revisions r0p0 of the Neoverse-N2 cpu, it is still open.
+ERRATA_N2_2002655 ?=0
+
+# Flag to apply erratum 1618635 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse V1 cpu and was fixed in the revision r1p0.
+ERRATA_V1_1618635 ?=0
+
+# Flag to apply erratum 1774420 workaround during reset. This erratum applies
+# to revisions r0p0 and r1p0 of the Neoverse V1 core, and was fixed in r1p1.
+ERRATA_V1_1774420 ?=0
+
+# Flag to apply erratum 1791573 workaround during reset. This erratum applies
+# to revisions r0p0 and r1p0 of the Neoverse V1 core, and was fixed in r1p1.
+ERRATA_V1_1791573 ?=0
+
+# Flag to apply erratum 1852267 workaround during reset. This erratum applies
+# to revisions r0p0 and r1p0 of the Neoverse V1 core, and was fixed in r1p1.
+ERRATA_V1_1852267 ?=0
+
+# Flag to apply erratum 1925756 workaround during reset. This needs to be
+# enabled for r0p0, r1p0, and r1p1 of the Neoverse V1 core, it is still open.
+ERRATA_V1_1925756 ?=0
+
+# Flag to apply erratum 1940577 workaround during reset. This erratum applies
+# to revisions r1p0 and r1p1 of the Neoverse V1 cpu.
+ERRATA_V1_1940577 ?=0
+
+# Flag to apply erratum 1966096 workaround during reset. This erratum applies
+# to revisions r1p0 and r1p1 of the Neoverse V1 CPU and is open. This issue
+# exists in r0p0 as well but there is no workaround for that revision.
+ERRATA_V1_1966096 ?=0
+
+# Flag to apply erratum 2139242 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1 of the Neoverse V1 cpu and is still open.
+ERRATA_V1_2139242 ?=0
+
+# Flag to apply erratum 2108267 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1 of the Neoverse V1 cpu and is still open.
+ERRATA_V1_2108267 ?=0
+
+# Flag to apply erratum 2216392 workaround during reset. This erratum applies
+# to revisions r1p0 and r1p1 of the Neoverse V1 cpu and is still open. This
+# issue exists in r0p0 as well but there is no workaround for that revision.
+ERRATA_V1_2216392 ?=0
+
+# Flag to apply erratum 2294912 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1 of the Neoverse V1 cpu and is still open.
+ERRATA_V1_2294912 ?=0
+
+# Flag to apply erratum 2372203 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0 and r1p1 of the Neoverse V1 cpu and is still open.
+ERRATA_V1_2372203 ?=0
+
+# Flag to apply erratum 1987031 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open.
+ERRATA_A710_1987031 ?=0
+
+# Flag to apply erratum 2081180 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open.
+ERRATA_A710_2081180 ?=0
+
+# Flag to apply erratum 2083908 workaround during reset. This erratum applies
+# to revision r2p0 of the Cortex-A710 cpu and is still open.
+ERRATA_A710_2083908 ?=0
+
+# Flag to apply erratum 2058056 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open.
+ERRATA_A710_2058056 ?=0
+
+# Flag to apply erratum 2055002 workaround during reset. This erratum applies
+# to revision r1p0, r2p0 of the Cortex-A710 cpu and is still open.
+ERRATA_A710_2055002 ?=0
+
+# Flag to apply erratum 2017096 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open.
+ERRATA_A710_2017096 ?=0
+
+# Flag to apply erratum 2267065 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+ERRATA_A710_2267065 ?=0
+
+# Flag to apply erratum 2136059 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+ERRATA_A710_2136059 ?=0
+
+# Flag to apply erratum 2147715 workaround during reset. This erratum applies
+# to revision r2p0 of the Cortex-A710 CPU and is fixed in revision r2p1.
+ERRATA_A710_2147715 ?=0
+
+# Flag to apply erratum 2216384 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+ERRATA_A710_2216384 ?=0
+
+# Flag to apply erratum 2282622 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+ERRATA_A710_2282622 ?=0
+
+# Flag to apply erratum 2291219 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+ERRATA_A710_2291219 ?=0
+
+# Flag to apply erratum 2008768 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+ERRATA_A710_2008768 ?=0
+
+# Flag to apply erratum 2371105 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+ERRATA_A710_2371105 ?=0
+
+# Flag to apply erratum 2067956 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is still open.
+ERRATA_N2_2067956 ?=0
+
+# Flag to apply erratum 2025414 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is still open.
+ERRATA_N2_2025414 ?=0
+
+# Flag to apply erratum 2189731 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is still open.
+ERRATA_N2_2189731 ?=0
+
+# Flag to apply erratum 2138956 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is still open.
+ERRATA_N2_2138956 ?=0
+
+# Flag to apply erratum 2138953 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is still open.
+ERRATA_N2_2138953 ?=0
+
+# Flag to apply erratum 2242415 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is still open.
+ERRATA_N2_2242415 ?=0
+
+# Flag to apply erratum 2138958 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is still open.
+ERRATA_N2_2138958 ?=0
+
+# Flag to apply erratum 2242400 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is still open.
+ERRATA_N2_2242400 ?=0
+
+# Flag to apply erratum 2280757 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is still open.
+ERRATA_N2_2280757 ?=0
+
+# Flag to apply erraturm 2326639 workaroud during powerdown. This erratum
+# applies to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
+ERRATA_N2_2326639 ?=0
+
+# Flag to apply erratum 2376738 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu, it is fixed in r0p1.
+ERRATA_N2_2376738 ?=0
+
+# Flag to apply erratum 2388450 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu, it is fixed in r0p1.
+ERRATA_N2_2388450 ?=0
+
+# Flag to apply erratum 2002765 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r2p0 of the Cortex-X2 cpu and is still open.
+ERRATA_X2_2002765 ?=0
+
+# Flag to apply erratum 2058056 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r2p0 of the Cortex-X2 cpu and is still open.
+ERRATA_X2_2058056 ?=0
+
+# Flag to apply erratum 2083908 workaround during reset. This erratum applies
+# to revision r2p0 of the Cortex-X2 cpu and is still open.
+ERRATA_X2_2083908 ?=0
+
+# Flag to apply erratum 2017096 workaround during reset. This erratum applies
+# only to revisions r0p0, r1p0 and r2p0 of the Cortex-X2 cpu, it is fixed in
+# r2p1.
+ERRATA_X2_2017096 ?=0
+
+# Flag to apply erratum 2081180 workaround during reset. This erratum applies
+# only to revisions r0p0, r1p0 and r2p0 of the Cortex-X2 cpu, it is fixed in
+# r2p1.
+ERRATA_X2_2081180 ?=0
+
+# Flag to apply erratum 2216384 workaround during reset. This erratum applies
+# only to revisions r0p0, r1p0 and r2p0 of the Cortex-X2 cpu, it is fixed in
+# r2p1.
+ERRATA_X2_2216384 ?=0
+
+# Flag to apply erratum 2147715 workaround during reset. This erratum applies
+# only to revision r2p0 of the Cortex-X2 cpu, it is fixed in r2p1.
+ERRATA_X2_2147715 ?=0
+
+# Flag to apply erratum 2371105 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-X2 cpu and is fixed in r2p1.
+ERRATA_X2_2371105 ?=0
+
+# Flag to apply erratum 2313909 workaround on powerdown. This erratum applies
+# to revisions r0p0 and r1p0 of the Cortex-X3 cpu, it is fixed in r1p1.
+ERRATA_X3_2313909 ?=0
+
+# Flag to apply erratum 1922240 workaround during reset. This erratum applies
+# to revision r0p0 of the Cortex-A510 cpu and is fixed in r0p1.
+ERRATA_A510_1922240 ?=0
+
+# Flag to apply erratum 2288014 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3 and r1p0 of the Cortex-A510 cpu and is
+# fixed in r1p1.
+ERRATA_A510_2288014 ?=0
+
+# Flag to apply erratum 2042739 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1 and r0p2 of the Cortex-A510 cpu and is fixed in r0p3.
+ERRATA_A510_2042739 ?=0
+
+# Flag to apply erratum 2041909 workaround during reset. This erratum applies
+# to revision r0p2 of the Cortex-A510 cpu and is fixed in r0p3. The issue is
+# present in r0p0 and r0p1 but there is no workaround for those revisions.
+ERRATA_A510_2041909 ?=0
+
+# Flag to apply erratum 2250311 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3 and r1p0, and is fixed in r1p1.
+ERRATA_A510_2250311 ?=0
+
+# Flag to apply erratum 2218950 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3 and r1p0, and is fixed in r1p1.
+ERRATA_A510_2218950 ?=0
+
+# Flag to apply erratum 2172148 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3 and r1p0, and is fixed in r1p1.
+ERRATA_A510_2172148 ?=0
+
+# Flag to apply erratum 2347730 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3, r1p0 and r1p1 of the Cortex-A510 CPU,
+# and is fixed in r1p2.
+ERRATA_A510_2347730 ?=0
+
+# Flag to apply erratum 2371937 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3, r1p0, and r1p1. It is fixed in r1p2.
+ERRATA_A510_2371937 ?=0
+
+# Flag to apply erratum 2666669 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3, r1p0, and r1p1. It is fixed in r1p2.
+ERRATA_A510_2666669 ?=0
+
+# Flag to apply DSU erratum 798953. This erratum applies to DSUs revision r0p0.
+# Applying the workaround results in higher DSU power consumption on idle.
+ERRATA_DSU_798953 ?=0
+
+# Flag to apply DSU erratum 936184. This erratum applies to DSUs containing
+# the ACP interface and revision < r2p0. Applying the workaround results in
+# higher DSU power consumption on idle.
+ERRATA_DSU_936184 ?=0
+
+# Flag to apply DSU erratum 2313941. This erratum applies to DSUs revisions
+# r0p0, r1p0, r2p0, r2p1, r3p0, r3p1 and is still open. Applying the workaround
+# results in higher DSU power consumption on idle.
+ERRATA_DSU_2313941 ?=0
+
+# Process ERRATA_A9_794073 flag
+$(eval $(call assert_boolean,ERRATA_A9_794073))
+$(eval $(call add_define,ERRATA_A9_794073))
+
+# Process ERRATA_A15_816470 flag
+$(eval $(call assert_boolean,ERRATA_A15_816470))
+$(eval $(call add_define,ERRATA_A15_816470))
+
+# Process ERRATA_A15_827671 flag
+$(eval $(call assert_boolean,ERRATA_A15_827671))
+$(eval $(call add_define,ERRATA_A15_827671))
+
+# Process ERRATA_A17_852421 flag
+$(eval $(call assert_boolean,ERRATA_A17_852421))
+$(eval $(call add_define,ERRATA_A17_852421))
+
+# Process ERRATA_A17_852423 flag
+$(eval $(call assert_boolean,ERRATA_A17_852423))
+$(eval $(call add_define,ERRATA_A17_852423))
+
+# Process ERRATA_A35_855472 flag
+$(eval $(call assert_boolean,ERRATA_A35_855472))
+$(eval $(call add_define,ERRATA_A35_855472))
+
+# Process ERRATA_A53_819472 flag
+$(eval $(call assert_boolean,ERRATA_A53_819472))
+$(eval $(call add_define,ERRATA_A53_819472))
+
+# Process ERRATA_A53_824069 flag
+$(eval $(call assert_boolean,ERRATA_A53_824069))
+$(eval $(call add_define,ERRATA_A53_824069))
+
+# Process ERRATA_A53_826319 flag
+$(eval $(call assert_boolean,ERRATA_A53_826319))
+$(eval $(call add_define,ERRATA_A53_826319))
+
+# Process ERRATA_A53_827319 flag
+$(eval $(call assert_boolean,ERRATA_A53_827319))
+$(eval $(call add_define,ERRATA_A53_827319))
+
+# Process ERRATA_A53_835769 flag
+$(eval $(call assert_boolean,ERRATA_A53_835769))
+$(eval $(call add_define,ERRATA_A53_835769))
+
+# Process ERRATA_A53_836870 flag
+$(eval $(call assert_boolean,ERRATA_A53_836870))
+$(eval $(call add_define,ERRATA_A53_836870))
+
+# Process ERRATA_A53_843419 flag
+$(eval $(call assert_boolean,ERRATA_A53_843419))
+$(eval $(call add_define,ERRATA_A53_843419))
+
+# Process ERRATA_A53_855873 flag
+$(eval $(call assert_boolean,ERRATA_A53_855873))
+$(eval $(call add_define,ERRATA_A53_855873))
+
+# Process ERRATA_A53_1530924 flag
+$(eval $(call assert_boolean,ERRATA_A53_1530924))
+$(eval $(call add_define,ERRATA_A53_1530924))
+
+# Process ERRATA_A55_768277 flag
+$(eval $(call assert_boolean,ERRATA_A55_768277))
+$(eval $(call add_define,ERRATA_A55_768277))
+
+# Process ERRATA_A55_778703 flag
+$(eval $(call assert_boolean,ERRATA_A55_778703))
+$(eval $(call add_define,ERRATA_A55_778703))
+
+# Process ERRATA_A55_798797 flag
+$(eval $(call assert_boolean,ERRATA_A55_798797))
+$(eval $(call add_define,ERRATA_A55_798797))
+
+# Process ERRATA_A55_846532 flag
+$(eval $(call assert_boolean,ERRATA_A55_846532))
+$(eval $(call add_define,ERRATA_A55_846532))
+
+# Process ERRATA_A55_903758 flag
+$(eval $(call assert_boolean,ERRATA_A55_903758))
+$(eval $(call add_define,ERRATA_A55_903758))
+
+# Process ERRATA_A55_1221012 flag
+$(eval $(call assert_boolean,ERRATA_A55_1221012))
+$(eval $(call add_define,ERRATA_A55_1221012))
+
+# Process ERRATA_A55_1530923 flag
+$(eval $(call assert_boolean,ERRATA_A55_1530923))
+$(eval $(call add_define,ERRATA_A55_1530923))
+
+# Process ERRATA_A57_806969 flag
+$(eval $(call assert_boolean,ERRATA_A57_806969))
+$(eval $(call add_define,ERRATA_A57_806969))
+
+# Process ERRATA_A57_813419 flag
+$(eval $(call assert_boolean,ERRATA_A57_813419))
+$(eval $(call add_define,ERRATA_A57_813419))
+
+# Process ERRATA_A57_813420 flag
+$(eval $(call assert_boolean,ERRATA_A57_813420))
+$(eval $(call add_define,ERRATA_A57_813420))
+
+# Process ERRATA_A57_814670 flag
+$(eval $(call assert_boolean,ERRATA_A57_814670))
+$(eval $(call add_define,ERRATA_A57_814670))
+
+# Process ERRATA_A57_817169 flag
+$(eval $(call assert_boolean,ERRATA_A57_817169))
+$(eval $(call add_define,ERRATA_A57_817169))
+
+# Process ERRATA_A57_826974 flag
+$(eval $(call assert_boolean,ERRATA_A57_826974))
+$(eval $(call add_define,ERRATA_A57_826974))
+
+# Process ERRATA_A57_826977 flag
+$(eval $(call assert_boolean,ERRATA_A57_826977))
+$(eval $(call add_define,ERRATA_A57_826977))
+
+# Process ERRATA_A57_828024 flag
+$(eval $(call assert_boolean,ERRATA_A57_828024))
+$(eval $(call add_define,ERRATA_A57_828024))
+
+# Process ERRATA_A57_829520 flag
+$(eval $(call assert_boolean,ERRATA_A57_829520))
+$(eval $(call add_define,ERRATA_A57_829520))
+
+# Process ERRATA_A57_833471 flag
+$(eval $(call assert_boolean,ERRATA_A57_833471))
+$(eval $(call add_define,ERRATA_A57_833471))
+
+# Process ERRATA_A57_859972 flag
+$(eval $(call assert_boolean,ERRATA_A57_859972))
+$(eval $(call add_define,ERRATA_A57_859972))
+
+# Process ERRATA_A57_1319537 flag
+$(eval $(call assert_boolean,ERRATA_A57_1319537))
+$(eval $(call add_define,ERRATA_A57_1319537))
+
+# Process ERRATA_A72_859971 flag
+$(eval $(call assert_boolean,ERRATA_A72_859971))
+$(eval $(call add_define,ERRATA_A72_859971))
+
+# Process ERRATA_A72_1319367 flag
+$(eval $(call assert_boolean,ERRATA_A72_1319367))
+$(eval $(call add_define,ERRATA_A72_1319367))
+
+# Process ERRATA_A73_852427 flag
+$(eval $(call assert_boolean,ERRATA_A73_852427))
+$(eval $(call add_define,ERRATA_A73_852427))
+
+# Process ERRATA_A73_855423 flag
+$(eval $(call assert_boolean,ERRATA_A73_855423))
+$(eval $(call add_define,ERRATA_A73_855423))
+
+# Process ERRATA_A75_764081 flag
+$(eval $(call assert_boolean,ERRATA_A75_764081))
+$(eval $(call add_define,ERRATA_A75_764081))
+
+# Process ERRATA_A75_790748 flag
+$(eval $(call assert_boolean,ERRATA_A75_790748))
+$(eval $(call add_define,ERRATA_A75_790748))
+
+# Process ERRATA_A76_1073348 flag
+$(eval $(call assert_boolean,ERRATA_A76_1073348))
+$(eval $(call add_define,ERRATA_A76_1073348))
+
+# Process ERRATA_A76_1130799 flag
+$(eval $(call assert_boolean,ERRATA_A76_1130799))
+$(eval $(call add_define,ERRATA_A76_1130799))
+
+# Process ERRATA_A76_1220197 flag
+$(eval $(call assert_boolean,ERRATA_A76_1220197))
+$(eval $(call add_define,ERRATA_A76_1220197))
+
+# Process ERRATA_A76_1257314 flag
+$(eval $(call assert_boolean,ERRATA_A76_1257314))
+$(eval $(call add_define,ERRATA_A76_1257314))
+
+# Process ERRATA_A76_1262606 flag
+$(eval $(call assert_boolean,ERRATA_A76_1262606))
+$(eval $(call add_define,ERRATA_A76_1262606))
+
+# Process ERRATA_A76_1262888 flag
+$(eval $(call assert_boolean,ERRATA_A76_1262888))
+$(eval $(call add_define,ERRATA_A76_1262888))
+
+# Process ERRATA_A76_1275112 flag
+$(eval $(call assert_boolean,ERRATA_A76_1275112))
+$(eval $(call add_define,ERRATA_A76_1275112))
+
+# Process ERRATA_A76_1286807 flag
+$(eval $(call assert_boolean,ERRATA_A76_1286807))
+$(eval $(call add_define,ERRATA_A76_1286807))
+
+# Process ERRATA_A76_1791580 flag
+$(eval $(call assert_boolean,ERRATA_A76_1791580))
+$(eval $(call add_define,ERRATA_A76_1791580))
+
+# Process ERRATA_A76_1165522 flag
+$(eval $(call assert_boolean,ERRATA_A76_1165522))
+$(eval $(call add_define,ERRATA_A76_1165522))
+
+# Process ERRATA_A76_1868343 flag
+$(eval $(call assert_boolean,ERRATA_A76_1868343))
+$(eval $(call add_define,ERRATA_A76_1868343))
+
+# Process ERRATA_A76_1946160 flag
+$(eval $(call assert_boolean,ERRATA_A76_1946160))
+$(eval $(call add_define,ERRATA_A76_1946160))
+
+# Process ERRATA_A76_2743102 flag
+$(eval $(call assert_boolean,ERRATA_A76_2743102))
+$(eval $(call add_define,ERRATA_A76_2743102))
+
+# Process ERRATA_A77_1508412 flag
+$(eval $(call assert_boolean,ERRATA_A77_1508412))
+$(eval $(call add_define,ERRATA_A77_1508412))
+
+# Process ERRATA_A77_1925769 flag
+$(eval $(call assert_boolean,ERRATA_A77_1925769))
+$(eval $(call add_define,ERRATA_A77_1925769))
+
+# Process ERRATA_A77_1946167 flag
+$(eval $(call assert_boolean,ERRATA_A77_1946167))
+$(eval $(call add_define,ERRATA_A77_1946167))
+
+# Process ERRATA_A77_1791578 flag
+$(eval $(call assert_boolean,ERRATA_A77_1791578))
+$(eval $(call add_define,ERRATA_A77_1791578))
+
+# Process ERRATA_A77_2356587 flag
+$(eval $(call assert_boolean,ERRATA_A77_2356587))
+$(eval $(call add_define,ERRATA_A77_2356587))
+
+# Process ERRATA_A77_1800714 flag
+$(eval $(call assert_boolean,ERRATA_A77_1800714))
+$(eval $(call add_define,ERRATA_A77_1800714))
+
+# Process ERRATA_A77_2743100 flag
+$(eval $(call assert_boolean,ERRATA_A77_2743100))
+$(eval $(call add_define,ERRATA_A77_2743100))
+
+# Process ERRATA_A78_1688305 flag
+$(eval $(call assert_boolean,ERRATA_A78_1688305))
+$(eval $(call add_define,ERRATA_A78_1688305))
+
+# Process ERRATA_A78_1941498 flag
+$(eval $(call assert_boolean,ERRATA_A78_1941498))
+$(eval $(call add_define,ERRATA_A78_1941498))
+
+# Process ERRATA_A78_1951500 flag
+$(eval $(call assert_boolean,ERRATA_A78_1951500))
+$(eval $(call add_define,ERRATA_A78_1951500))
+
+# Process ERRATA_A78_1821534 flag
+$(eval $(call assert_boolean,ERRATA_A78_1821534))
+$(eval $(call add_define,ERRATA_A78_1821534))
+
+# Process ERRATA_A78_1952683 flag
+$(eval $(call assert_boolean,ERRATA_A78_1952683))
+$(eval $(call add_define,ERRATA_A78_1952683))
+
+# Process ERRATA_A78_2132060 flag
+$(eval $(call assert_boolean,ERRATA_A78_2132060))
+$(eval $(call add_define,ERRATA_A78_2132060))
+
+# Process ERRATA_A78_2242635 flag
+$(eval $(call assert_boolean,ERRATA_A78_2242635))
+$(eval $(call add_define,ERRATA_A78_2242635))
+
+# Process ERRATA_A78_2376745 flag
+$(eval $(call assert_boolean,ERRATA_A78_2376745))
+$(eval $(call add_define,ERRATA_A78_2376745))
+
+# Process ERRATA_A78_2395406 flag
+$(eval $(call assert_boolean,ERRATA_A78_2395406))
+$(eval $(call add_define,ERRATA_A78_2395406))
+
+# Process ERRATA_A78_AE_1941500 flag
+$(eval $(call assert_boolean,ERRATA_A78_AE_1941500))
+$(eval $(call add_define,ERRATA_A78_AE_1941500))
+
+# Process ERRATA_A78_AE_1951502 flag
+$(eval $(call assert_boolean,ERRATA_A78_AE_1951502))
+$(eval $(call add_define,ERRATA_A78_AE_1951502))
+
+# Process ERRATA_A78_AE_2376748 flag
+$(eval $(call assert_boolean,ERRATA_A78_AE_2376748))
+$(eval $(call add_define,ERRATA_A78_AE_2376748))
+
+# Process ERRATA_A78_AE_2395408 flag
+$(eval $(call assert_boolean,ERRATA_A78_AE_2395408))
+$(eval $(call add_define,ERRATA_A78_AE_2395408))
+
+# Process ERRATA_A78C_2132064 flag
+$(eval $(call assert_boolean,ERRATA_A78C_2132064))
+$(eval $(call add_define,ERRATA_A78C_2132064))
+
+# Process ERRATA_A78C_2242638 flag
+$(eval $(call assert_boolean,ERRATA_A78C_2242638))
+$(eval $(call add_define,ERRATA_A78C_2242638))
+
+# Process ERRATA_A78C_2376749 flag
+$(eval $(call assert_boolean,ERRATA_A78C_2376749))
+$(eval $(call add_define,ERRATA_A78C_2376749))
+
+# Process ERRATA_A78C_2395411 flag
+$(eval $(call assert_boolean,ERRATA_A78C_2395411))
+$(eval $(call add_define,ERRATA_A78C_2395411))
+
+# Process ERRATA_X1_1821534 flag
+$(eval $(call assert_boolean,ERRATA_X1_1821534))
+$(eval $(call add_define,ERRATA_X1_1821534))
+
+# Process ERRATA_X1_1688305 flag
+$(eval $(call assert_boolean,ERRATA_X1_1688305))
+$(eval $(call add_define,ERRATA_X1_1688305))
+
+# Process ERRATA_X1_1827429 flag
+$(eval $(call assert_boolean,ERRATA_X1_1827429))
+$(eval $(call add_define,ERRATA_X1_1827429))
+
+# Process ERRATA_N1_1043202 flag
+$(eval $(call assert_boolean,ERRATA_N1_1043202))
+$(eval $(call add_define,ERRATA_N1_1043202))
+
+# Process ERRATA_N1_1073348 flag
+$(eval $(call assert_boolean,ERRATA_N1_1073348))
+$(eval $(call add_define,ERRATA_N1_1073348))
+
+# Process ERRATA_N1_1130799 flag
+$(eval $(call assert_boolean,ERRATA_N1_1130799))
+$(eval $(call add_define,ERRATA_N1_1130799))
+
+# Process ERRATA_N1_1165347 flag
+$(eval $(call assert_boolean,ERRATA_N1_1165347))
+$(eval $(call add_define,ERRATA_N1_1165347))
+
+# Process ERRATA_N1_1207823 flag
+$(eval $(call assert_boolean,ERRATA_N1_1207823))
+$(eval $(call add_define,ERRATA_N1_1207823))
+
+# Process ERRATA_N1_1220197 flag
+$(eval $(call assert_boolean,ERRATA_N1_1220197))
+$(eval $(call add_define,ERRATA_N1_1220197))
+
+# Process ERRATA_N1_1257314 flag
+$(eval $(call assert_boolean,ERRATA_N1_1257314))
+$(eval $(call add_define,ERRATA_N1_1257314))
+
+# Process ERRATA_N1_1262606 flag
+$(eval $(call assert_boolean,ERRATA_N1_1262606))
+$(eval $(call add_define,ERRATA_N1_1262606))
+
+# Process ERRATA_N1_1262888 flag
+$(eval $(call assert_boolean,ERRATA_N1_1262888))
+$(eval $(call add_define,ERRATA_N1_1262888))
+
+# Process ERRATA_N1_1275112 flag
+$(eval $(call assert_boolean,ERRATA_N1_1275112))
+$(eval $(call add_define,ERRATA_N1_1275112))
+
+# Process ERRATA_N1_1315703 flag
+$(eval $(call assert_boolean,ERRATA_N1_1315703))
+$(eval $(call add_define,ERRATA_N1_1315703))
+
+# Process ERRATA_N1_1542419 flag
+$(eval $(call assert_boolean,ERRATA_N1_1542419))
+$(eval $(call add_define,ERRATA_N1_1542419))
+
+# Process ERRATA_N1_1868343 flag
+$(eval $(call assert_boolean,ERRATA_N1_1868343))
+$(eval $(call add_define,ERRATA_N1_1868343))
+
+# Process ERRATA_N1_1946160 flag
+$(eval $(call assert_boolean,ERRATA_N1_1946160))
+$(eval $(call add_define,ERRATA_N1_1946160))
+
+# Process ERRATA_N1_2743102 flag
+$(eval $(call assert_boolean,ERRATA_N1_2743102))
+$(eval $(call add_define,ERRATA_N1_2743102))
+#
+# Process ERRATA_N2_2002655 flag
+$(eval $(call assert_boolean,ERRATA_N2_2002655))
+$(eval $(call add_define,ERRATA_N2_2002655))
+
+# Process ERRATA_V1_1618635 flag
+$(eval $(call assert_boolean,ERRATA_V1_1618635))
+$(eval $(call add_define,ERRATA_V1_1618635))
+
+# Process ERRATA_V1_1774420 flag
+$(eval $(call assert_boolean,ERRATA_V1_1774420))
+$(eval $(call add_define,ERRATA_V1_1774420))
+
+# Process ERRATA_V1_1791573 flag
+$(eval $(call assert_boolean,ERRATA_V1_1791573))
+$(eval $(call add_define,ERRATA_V1_1791573))
+
+# Process ERRATA_V1_1852267 flag
+$(eval $(call assert_boolean,ERRATA_V1_1852267))
+$(eval $(call add_define,ERRATA_V1_1852267))
+
+# Process ERRATA_V1_1925756 flag
+$(eval $(call assert_boolean,ERRATA_V1_1925756))
+$(eval $(call add_define,ERRATA_V1_1925756))
+
+# Process ERRATA_V1_1940577 flag
+$(eval $(call assert_boolean,ERRATA_V1_1940577))
+$(eval $(call add_define,ERRATA_V1_1940577))
+
+# Process ERRATA_V1_1966096 flag
+$(eval $(call assert_boolean,ERRATA_V1_1966096))
+$(eval $(call add_define,ERRATA_V1_1966096))
+
+# Process ERRATA_V1_2139242 flag
+$(eval $(call assert_boolean,ERRATA_V1_2139242))
+$(eval $(call add_define,ERRATA_V1_2139242))
+
+# Process ERRATA_V1_2108267 flag
+$(eval $(call assert_boolean,ERRATA_V1_2108267))
+$(eval $(call add_define,ERRATA_V1_2108267))
+
+# Process ERRATA_V1_2216392 flag
+$(eval $(call assert_boolean,ERRATA_V1_2216392))
+$(eval $(call add_define,ERRATA_V1_2216392))
+
+# Process ERRATA_V1_2294912 flag
+$(eval $(call assert_boolean,ERRATA_V1_2294912))
+$(eval $(call add_define,ERRATA_V1_2294912))
+
+# Process ERRATA_V1_2372203 flag
+$(eval $(call assert_boolean,ERRATA_V1_2372203))
+$(eval $(call add_define,ERRATA_V1_2372203))
+
+# Process ERRATA_A710_1987031 flag
+$(eval $(call assert_boolean,ERRATA_A710_1987031))
+$(eval $(call add_define,ERRATA_A710_1987031))
+
+# Process ERRATA_A710_2081180 flag
+$(eval $(call assert_boolean,ERRATA_A710_2081180))
+$(eval $(call add_define,ERRATA_A710_2081180))
+
+# Process ERRATA_A710_2083908 flag
+$(eval $(call assert_boolean,ERRATA_A710_2083908))
+$(eval $(call add_define,ERRATA_A710_2083908))
+
+# Process ERRATA_A710_2058056 flag
+$(eval $(call assert_boolean,ERRATA_A710_2058056))
+$(eval $(call add_define,ERRATA_A710_2058056))
+
+# Process ERRATA_A710_2055002 flag
+$(eval $(call assert_boolean,ERRATA_A710_2055002))
+$(eval $(call add_define,ERRATA_A710_2055002))
+
+# Process ERRATA_A710_2017096 flag
+$(eval $(call assert_boolean,ERRATA_A710_2017096))
+$(eval $(call add_define,ERRATA_A710_2017096))
+
+# Process ERRATA_A710_2267065 flag
+$(eval $(call assert_boolean,ERRATA_A710_2267065))
+$(eval $(call add_define,ERRATA_A710_2267065))
+
+# Process ERRATA_A710_2136059 flag
+$(eval $(call assert_boolean,ERRATA_A710_2136059))
+$(eval $(call add_define,ERRATA_A710_2136059))
+
+# Process ERRATA_A710_2147715 flag
+$(eval $(call assert_boolean,ERRATA_A710_2147715))
+$(eval $(call add_define,ERRATA_A710_2147715))
+
+# Process ERRATA_A710_2216384 flag
+$(eval $(call assert_boolean,ERRATA_A710_2216384))
+$(eval $(call add_define,ERRATA_A710_2216384))
+
+# Process ERRATA_A710_2282622 flag
+$(eval $(call assert_boolean,ERRATA_A710_2282622))
+$(eval $(call add_define,ERRATA_A710_2282622))
+
+# Process ERRATA_A710_2291219 flag
+$(eval $(call assert_boolean,ERRATA_A710_2291219))
+$(eval $(call add_define,ERRATA_A710_2291219))
+
+# Process ERRATA_A710_2008768 flag
+$(eval $(call assert_boolean,ERRATA_A710_2008768))
+$(eval $(call add_define,ERRATA_A710_2008768))
+
+# Process ERRATA_A710_2371105 flag
+$(eval $(call assert_boolean,ERRATA_A710_2371105))
+$(eval $(call add_define,ERRATA_A710_2371105))
+
+# Process ERRATA_N2_2067956 flag
+$(eval $(call assert_boolean,ERRATA_N2_2067956))
+$(eval $(call add_define,ERRATA_N2_2067956))
+
+# Process ERRATA_N2_2025414 flag
+$(eval $(call assert_boolean,ERRATA_N2_2025414))
+$(eval $(call add_define,ERRATA_N2_2025414))
+
+# Process ERRATA_N2_2189731 flag
+$(eval $(call assert_boolean,ERRATA_N2_2189731))
+$(eval $(call add_define,ERRATA_N2_2189731))
+
+# Process ERRATA_N2_2138956 flag
+$(eval $(call assert_boolean,ERRATA_N2_2138956))
+$(eval $(call add_define,ERRATA_N2_2138956))
+
+# Process ERRATA_N2_2138953 flag
+$(eval $(call assert_boolean,ERRATA_N2_2138953))
+$(eval $(call add_define,ERRATA_N2_2138953))
+
+# Process ERRATA_N2_2242415 flag
+$(eval $(call assert_boolean,ERRATA_N2_2242415))
+$(eval $(call add_define,ERRATA_N2_2242415))
+
+# Process ERRATA_N2_2138958 flag
+$(eval $(call assert_boolean,ERRATA_N2_2138958))
+$(eval $(call add_define,ERRATA_N2_2138958))
+
+# Process ERRATA_N2_2242400 flag
+$(eval $(call assert_boolean,ERRATA_N2_2242400))
+$(eval $(call add_define,ERRATA_N2_2242400))
+
+# Process ERRATA_N2_2280757 flag
+$(eval $(call assert_boolean,ERRATA_N2_2280757))
+$(eval $(call add_define,ERRATA_N2_2280757))
+
+# Process ERRATA_N2_2326639 flag
+$(eval $(call assert_boolean,ERRATA_N2_2326639))
+$(eval $(call add_define,ERRATA_N2_2326639))
+
+# Process ERRATA_N2_2376738 flag
+$(eval $(call assert_boolean,ERRATA_N2_2376738))
+$(eval $(call add_define,ERRATA_N2_2376738))
+
+# Process ERRATA_N2_2388450 flag
+$(eval $(call assert_boolean,ERRATA_N2_2388450))
+$(eval $(call add_define,ERRATA_N2_2388450))
+
+# Process ERRATA_X2_2002765 flag
+$(eval $(call assert_boolean,ERRATA_X2_2002765))
+$(eval $(call add_define,ERRATA_X2_2002765))
+
+# Process ERRATA_X2_2058056 flag
+$(eval $(call assert_boolean,ERRATA_X2_2058056))
+$(eval $(call add_define,ERRATA_X2_2058056))
+
+# Process ERRATA_X2_2083908 flag
+$(eval $(call assert_boolean,ERRATA_X2_2083908))
+$(eval $(call add_define,ERRATA_X2_2083908))
+
+# Process ERRATA_X2_2017096 flag
+$(eval $(call assert_boolean,ERRATA_X2_2017096))
+$(eval $(call add_define,ERRATA_X2_2017096))
+
+# Process ERRATA_X2_2081180 flag
+$(eval $(call assert_boolean,ERRATA_X2_2081180))
+$(eval $(call add_define,ERRATA_X2_2081180))
+
+# Process ERRATA_X2_2216384 flag
+$(eval $(call assert_boolean,ERRATA_X2_2216384))
+$(eval $(call add_define,ERRATA_X2_2216384))
+
+# Process ERRATA_X2_2147715 flag
+$(eval $(call assert_boolean,ERRATA_X2_2147715))
+$(eval $(call add_define,ERRATA_X2_2147715))
+
+# Process ERRATA_X2_2371105 flag
+$(eval $(call assert_boolean,ERRATA_X2_2371105))
+$(eval $(call add_define,ERRATA_X2_2371105))
+
+# Process ERRATA_X3_2313909 flag
+$(eval $(call assert_boolean,ERRATA_X3_2313909))
+$(eval $(call add_define,ERRATA_X3_2313909))
+
+# Process ERRATA_A510_1922240 flag
+$(eval $(call assert_boolean,ERRATA_A510_1922240))
+$(eval $(call add_define,ERRATA_A510_1922240))
+
+# Process ERRATA_A510_2288014 flag
+$(eval $(call assert_boolean,ERRATA_A510_2288014))
+$(eval $(call add_define,ERRATA_A510_2288014))
+
+# Process ERRATA_A510_2042739 flag
+$(eval $(call assert_boolean,ERRATA_A510_2042739))
+$(eval $(call add_define,ERRATA_A510_2042739))
+
+# Process ERRATA_A510_2041909 flag
+$(eval $(call assert_boolean,ERRATA_A510_2041909))
+$(eval $(call add_define,ERRATA_A510_2041909))
+
+# Process ERRATA_A510_2250311 flag
+$(eval $(call assert_boolean,ERRATA_A510_2250311))
+$(eval $(call add_define,ERRATA_A510_2250311))
+
+# Process ERRATA_A510_2218950 flag
+$(eval $(call assert_boolean,ERRATA_A510_2218950))
+$(eval $(call add_define,ERRATA_A510_2218950))
+
+# Process ERRATA_A510_2172148 flag
+$(eval $(call assert_boolean,ERRATA_A510_2172148))
+$(eval $(call add_define,ERRATA_A510_2172148))
+
+# Process ERRATA_A510_2347730 flag
+$(eval $(call assert_boolean,ERRATA_A510_2347730))
+$(eval $(call add_define,ERRATA_A510_2347730))
+
+# Process ERRATA_A510_2371937 flag
+$(eval $(call assert_boolean,ERRATA_A510_2371937))
+$(eval $(call add_define,ERRATA_A510_2371937))
+
+# Process ERRATA_A510_2666669 flag
+$(eval $(call assert_boolean,ERRATA_A510_2666669))
+$(eval $(call add_define,ERRATA_A510_2666669))
+
+#Process ERRATA_DSU_798953 flag
+$(eval $(call assert_boolean,ERRATA_DSU_798953))
+$(eval $(call add_define,ERRATA_DSU_798953))
+
+# Process ERRATA_DSU_936184 flag
+$(eval $(call assert_boolean,ERRATA_DSU_936184))
+$(eval $(call add_define,ERRATA_DSU_936184))
+
+# Process ERRATA_DSU_2313941 flag
+$(eval $(call assert_boolean,ERRATA_DSU_2313941))
+$(eval $(call add_define,ERRATA_DSU_2313941))
+
+# Errata build flags
+ifneq (${ERRATA_A53_843419},0)
+TF_LDFLAGS_aarch64 += --fix-cortex-a53-843419
+endif
+
+ifneq (${ERRATA_A53_835769},0)
+TF_CFLAGS_aarch64 += -mfix-cortex-a53-835769
+TF_LDFLAGS_aarch64 += --fix-cortex-a53-835769
+endif
+
+ifneq ($(filter 1,${ERRATA_A53_1530924} ${ERRATA_A55_1530923} \
+ ${ERRATA_A57_1319537} ${ERRATA_A72_1319367} ${ERRATA_A76_1165522}),)
+ERRATA_SPECULATIVE_AT := 1
+else
+ERRATA_SPECULATIVE_AT := 0
+endif
diff --git a/lib/cpus/errata_report.c b/lib/cpus/errata_report.c
new file mode 100644
index 0000000..93b2744
--- /dev/null
+++ b/lib/cpus/errata_report.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* Runtime firmware routines to report errata status for the current CPU. */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/cpus/errata_report.h>
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/spinlock.h>
+
+#ifdef IMAGE_BL1
+# define BL_STRING "BL1"
+#elif defined(__aarch64__) && defined(IMAGE_BL31)
+# define BL_STRING "BL31"
+#elif !defined(__aarch64__) && defined(IMAGE_BL32)
+# define BL_STRING "BL32"
+#elif defined(IMAGE_BL2) && BL2_AT_EL3
+# define BL_STRING "BL2"
+#else
+# error This image should not be printing errata status
+#endif
+
+/* Errata format: BL stage, CPU, errata ID, message */
+#define ERRATA_FORMAT "%s: %s: CPU workaround for %s was %s\n"
+
+/*
+ * Returns whether errata needs to be reported. Passed arguments are private to
+ * a CPU type.
+ */
+int errata_needs_reporting(spinlock_t *lock, uint32_t *reported)
+{
+ bool report_now;
+
+ /* If already reported, return false. */
+ if (*reported != 0U)
+ return 0;
+
+ /*
+ * Acquire lock. Determine whether status needs reporting, and then mark
+ * report status to true.
+ */
+ spin_lock(lock);
+ report_now = (*reported == 0U);
+ if (report_now)
+ *reported = 1;
+ spin_unlock(lock);
+
+ return report_now;
+}
+
+/*
+ * Print errata status message.
+ *
+ * Unknown: WARN
+ * Missing: WARN
+ * Applied: INFO
+ * Not applied: VERBOSE
+ */
+void errata_print_msg(unsigned int status, const char *cpu, const char *id)
+{
+ /* Errata status strings */
+ static const char *const errata_status_str[] = {
+ [ERRATA_NOT_APPLIES] = "not applied",
+ [ERRATA_APPLIES] = "applied",
+ [ERRATA_MISSING] = "missing!"
+ };
+ static const char *const __unused bl_str = BL_STRING;
+ const char *msg __unused;
+
+
+ assert(status < ARRAY_SIZE(errata_status_str));
+ assert(cpu != NULL);
+ assert(id != NULL);
+
+ msg = errata_status_str[status];
+
+ switch (status) {
+ case ERRATA_NOT_APPLIES:
+ VERBOSE(ERRATA_FORMAT, bl_str, cpu, id, msg);
+ break;
+
+ case ERRATA_APPLIES:
+ INFO(ERRATA_FORMAT, bl_str, cpu, id, msg);
+ break;
+
+ case ERRATA_MISSING:
+ WARN(ERRATA_FORMAT, bl_str, cpu, id, msg);
+ break;
+
+ default:
+ WARN(ERRATA_FORMAT, bl_str, cpu, id, "unknown");
+ break;
+ }
+}