summaryrefslogtreecommitdiffstats
path: root/lib/cpus
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--lib/cpus/aarch32/aem_generic.S55
-rw-r--r--lib/cpus/aarch32/cortex_a12.S81
-rw-r--r--lib/cpus/aarch32/cortex_a15.S180
-rw-r--r--lib/cpus/aarch32/cortex_a17.S174
-rw-r--r--lib/cpus/aarch32/cortex_a32.S125
-rw-r--r--lib/cpus/aarch32/cortex_a5.S77
-rw-r--r--lib/cpus/aarch32/cortex_a53.S305
-rw-r--r--lib/cpus/aarch32/cortex_a57.S614
-rw-r--r--lib/cpus/aarch32/cortex_a7.S81
-rw-r--r--lib/cpus/aarch32/cortex_a72.S264
-rw-r--r--lib/cpus/aarch32/cortex_a9.S105
-rw-r--r--lib/cpus/aarch32/cpu_helpers.S207
-rw-r--r--lib/cpus/aarch64/a64fx.S49
-rw-r--r--lib/cpus/aarch64/aem_generic.S113
-rw-r--r--lib/cpus/aarch64/cortex_a35.S138
-rw-r--r--lib/cpus/aarch64/cortex_a510.S235
-rw-r--r--lib/cpus/aarch64/cortex_a520.S66
-rw-r--r--lib/cpus/aarch64/cortex_a53.S230
-rw-r--r--lib/cpus/aarch64/cortex_a55.S152
-rw-r--r--lib/cpus/aarch64/cortex_a57.S316
-rw-r--r--lib/cpus/aarch64/cortex_a65.S81
-rw-r--r--lib/cpus/aarch64/cortex_a65ae.S58
-rw-r--r--lib/cpus/aarch64/cortex_a710.S254
-rw-r--r--lib/cpus/aarch64/cortex_a715.S85
-rw-r--r--lib/cpus/aarch64/cortex_a72.S303
-rw-r--r--lib/cpus/aarch64/cortex_a720.S84
-rw-r--r--lib/cpus/aarch64/cortex_a73.S210
-rw-r--r--lib/cpus/aarch64/cortex_a75.S175
-rw-r--r--lib/cpus/aarch64/cortex_a75_pubsub.c28
-rw-r--r--lib/cpus/aarch64/cortex_a76.S540
-rw-r--r--lib/cpus/aarch64/cortex_a76ae.S76
-rw-r--r--lib/cpus/aarch64/cortex_a77.S192
-rw-r--r--lib/cpus/aarch64/cortex_a78.S224
-rw-r--r--lib/cpus/aarch64/cortex_a78_ae.S154
-rw-r--r--lib/cpus/aarch64/cortex_a78c.S143
-rw-r--r--lib/cpus/aarch64/cortex_blackhawk.S66
-rw-r--r--lib/cpus/aarch64/cortex_chaberton.S66
-rw-r--r--lib/cpus/aarch64/cortex_gelas.S84
-rw-r--r--lib/cpus/aarch64/cortex_x1.S92
-rw-r--r--lib/cpus/aarch64/cortex_x2.S207
-rw-r--r--lib/cpus/aarch64/cortex_x3.S111
-rw-r--r--lib/cpus/aarch64/cortex_x4.S83
-rw-r--r--lib/cpus/aarch64/cpu_helpers.S393
-rw-r--r--lib/cpus/aarch64/cpuamu.c70
-rw-r--r--lib/cpus/aarch64/cpuamu_helpers.S99
-rw-r--r--lib/cpus/aarch64/denver.S340
-rw-r--r--lib/cpus/aarch64/dsu_helpers.S194
-rw-r--r--lib/cpus/aarch64/generic.S89
-rw-r--r--lib/cpus/aarch64/neoverse_e1.S59
-rw-r--r--lib/cpus/aarch64/neoverse_hermes.S66
-rw-r--r--lib/cpus/aarch64/neoverse_n1.S303
-rw-r--r--lib/cpus/aarch64/neoverse_n1_pubsub.c28
-rw-r--r--lib/cpus/aarch64/neoverse_n2.S308
-rw-r--r--lib/cpus/aarch64/neoverse_n_common.S26
-rw-r--r--lib/cpus/aarch64/neoverse_poseidon.S86
-rw-r--r--lib/cpus/aarch64/neoverse_v1.S283
-rw-r--r--lib/cpus/aarch64/neoverse_v2.S117
-rw-r--r--lib/cpus/aarch64/nevis.S57
-rw-r--r--lib/cpus/aarch64/qemu_max.S74
-rw-r--r--lib/cpus/aarch64/rainier.S106
-rw-r--r--lib/cpus/aarch64/travis.S71
-rw-r--r--lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S368
-rw-r--r--lib/cpus/aarch64/wa_cve_2017_5715_mmu.S152
-rw-r--r--lib/cpus/aarch64/wa_cve_2022_23960_bhb.S30
-rw-r--r--lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S108
-rw-r--r--lib/cpus/cpu-ops.mk893
-rw-r--r--lib/cpus/errata_report.c228
67 files changed, 11431 insertions, 0 deletions
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S
new file mode 100644
index 0000000..9f45e38
--- /dev/null
+++ b/lib/cpus/aarch32/aem_generic.S
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2016-2017, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <aem_generic.h>
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cpu_macros.S>
+
+func aem_generic_core_pwr_dwn
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ /* ---------------------------------------------
+ * Flush L1 cache to PoU.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ b dcsw_op_louis
+endfunc aem_generic_core_pwr_dwn
+
+
+func aem_generic_cluster_pwr_dwn
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ /* ---------------------------------------------
+ * Flush L1 and L2 caches to PoC.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ b dcsw_op_all
+endfunc aem_generic_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for AEM. Must follow AAPCS.
+ */
+func aem_generic_errata_report
+ bx lr
+endfunc aem_generic_errata_report
+#endif
+
+/* cpu_ops for Base AEM FVP */
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+ aem_generic_core_pwr_dwn, \
+ aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a12.S b/lib/cpus/aarch32/cortex_a12.S
new file mode 100644
index 0000000..8eec27c
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a12.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a12.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a12_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A12_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a12_disable_smp
+
+func cortex_a12_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A12_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a12_enable_smp
+
+func cortex_a12_reset_func
+ b cortex_a12_enable_smp
+endfunc cortex_a12_reset_func
+
+func cortex_a12_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a12_disable_smp
+endfunc cortex_a12_core_pwr_dwn
+
+func cortex_a12_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Flush L2 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a12_disable_smp
+endfunc cortex_a12_cluster_pwr_dwn
+
+errata_report_shim cortex_a12
+
+declare_cpu_ops cortex_a12, CORTEX_A12_MIDR, \
+ cortex_a12_reset_func, \
+ cortex_a12_core_pwr_dwn, \
+ cortex_a12_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a15.S b/lib/cpus/aarch32/cortex_a15.S
new file mode 100644
index 0000000..b41676d
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a15.S
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a15.h>
+#include <cpu_macros.S>
+
+/*
+ * Cortex-A15 support LPAE and Virtualization Extensions.
+ * Don't care if confiugration uses or not LPAE and VE.
+ * Therefore, where we don't check ARCH_IS_ARMV7_WITH_LPAE/VE
+ */
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a15_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A15_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+#if ERRATA_A15_816470
+ /*
+ * Invalidate any TLB address
+ */
+ mov r0, #0
+ stcopr r0, TLBIMVA
+#endif
+ dsb sy
+ bx lr
+endfunc cortex_a15_disable_smp
+
+func cortex_a15_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A15_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a15_enable_smp
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A15 Errata #816470.
+ * This applies only to revision >= r3p0 of Cortex A15.
+ * ----------------------------------------------------
+ */
+func check_errata_816470
+ /*
+ * Even though this is only needed for revision >= r3p0, it is always
+ * applied because of the low cost of the workaround.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_816470
+
+add_erratum_entry cortex_a15, ERRATUM(816470), ERRATA_A15_816470
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A15 Errata #827671.
+ * This applies only to revision >= r3p0 of Cortex A15.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ----------------------------------------------------
+ */
+func errata_a15_827671_wa
+ /*
+ * Compare r0 against revision r3p0
+ */
+ mov r2, lr
+ bl check_errata_827671
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr r0, CORTEX_A15_ACTLR2
+ orr r0, #CORTEX_A15_ACTLR2_INV_DCC_BIT
+ stcopr r0, CORTEX_A15_ACTLR2
+ isb
+1:
+ bx r2
+endfunc errata_a15_827671_wa
+
+func check_errata_827671
+ mov r1, #0x30
+ b cpu_rev_var_hs
+endfunc check_errata_827671
+
+add_erratum_entry cortex_a15, ERRATUM(827671), ERRATA_A15_827671
+
+func check_errata_cve_2017_5715
+#if WORKAROUND_CVE_2017_5715
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2017_5715
+
+add_erratum_entry cortex_a15, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+
+func check_errata_cve_2022_23960
+#if WORKAROUND_CVE_2022_23960
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2022_23960
+
+add_erratum_entry cortex_a15, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+func cortex_a15_reset_func
+ mov r5, lr
+ bl cpu_get_rev_var
+
+#if ERRATA_A15_827671
+ bl errata_a15_827671_wa
+#endif
+
+#if IMAGE_BL32 && (WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960)
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A15_ACTLR_INV_BTB_BIT
+ stcopr r0, ACTLR
+ ldr r0, =wa_cve_2017_5715_icache_inv_vbar
+ stcopr r0, VBAR
+ stcopr r0, MVBAR
+ /* isb will be applied in the course of the reset func */
+#endif
+
+ mov lr, r5
+ b cortex_a15_enable_smp
+endfunc cortex_a15_reset_func
+
+func cortex_a15_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a15_disable_smp
+endfunc cortex_a15_core_pwr_dwn
+
+func cortex_a15_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Flush L2 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a15_disable_smp
+endfunc cortex_a15_cluster_pwr_dwn
+
+errata_report_shim cortex_a15
+
+declare_cpu_ops cortex_a15, CORTEX_A15_MIDR, \
+ cortex_a15_reset_func, \
+ cortex_a15_core_pwr_dwn, \
+ cortex_a15_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a17.S b/lib/cpus/aarch32/cortex_a17.S
new file mode 100644
index 0000000..1877570
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a17.S
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a17.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a17_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A17_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a17_disable_smp
+
+func cortex_a17_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A17_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a17_enable_smp
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A17 Errata #852421.
+ * This applies only to revision <= r1p2 of Cortex A17.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ----------------------------------------------------
+ */
+func errata_a17_852421_wa
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_852421
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr r0, CORTEX_A17_IMP_DEF_REG1
+ orr r0, r0, #(1<<24)
+ stcopr r0, CORTEX_A17_IMP_DEF_REG1
+1:
+ bx r2
+endfunc errata_a17_852421_wa
+
+func check_errata_852421
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_852421
+
+add_erratum_entry cortex_a17, ERRATUM(852421), ERRATA_A17_852421
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A17 Errata #852423.
+ * This applies only to revision <= r1p2 of Cortex A17.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ----------------------------------------------------
+ */
+func errata_a17_852423_wa
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_852423
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr r0, CORTEX_A17_IMP_DEF_REG1
+ orr r0, r0, #(1<<12)
+ stcopr r0, CORTEX_A17_IMP_DEF_REG1
+1:
+ bx r2
+endfunc errata_a17_852423_wa
+
+func check_errata_852423
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_852423
+
+add_erratum_entry cortex_a17, ERRATUM(852423), ERRATA_A17_852423
+
+func check_errata_cve_2017_5715
+#if WORKAROUND_CVE_2017_5715
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2017_5715
+
+add_erratum_entry cortex_a17, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+
+errata_report_shim cortex_a17
+
+func cortex_a17_reset_func
+ mov r5, lr
+ bl cpu_get_rev_var
+ mov r4, r0
+
+#if ERRATA_A17_852421
+ mov r0, r4
+ bl errata_a17_852421_wa
+#endif
+
+#if ERRATA_A17_852423
+ mov r0, r4
+ bl errata_a17_852423_wa
+#endif
+
+#if IMAGE_BL32 && WORKAROUND_CVE_2017_5715
+ ldr r0, =wa_cve_2017_5715_bpiall_vbar
+ stcopr r0, VBAR
+ stcopr r0, MVBAR
+ /* isb will be applied in the course of the reset func */
+#endif
+
+ mov lr, r5
+ b cortex_a17_enable_smp
+endfunc cortex_a17_reset_func
+
+func cortex_a17_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a17_disable_smp
+endfunc cortex_a17_core_pwr_dwn
+
+func cortex_a17_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Flush L2 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a17_disable_smp
+endfunc cortex_a17_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a17, CORTEX_A17_MIDR, \
+ cortex_a17_reset_func, \
+ cortex_a17_core_pwr_dwn, \
+ cortex_a17_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S
new file mode 100644
index 0000000..d08b4ff
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a32.S
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a32.h>
+#include <cpu_macros.S>
+
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * Clobbers: r0-r1
+ * ---------------------------------------------
+ */
+func cortex_a32_disable_smp
+ ldcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1
+ bic r0, r0, #CORTEX_A32_CPUECTLR_SMPEN_BIT
+ stcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a32_disable_smp
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A32.
+ * Clobbers: r0-r1
+ * -------------------------------------------------
+ */
+func cortex_a32_reset_func
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ ldcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1
+ orr r0, r0, #CORTEX_A32_CPUECTLR_SMPEN_BIT
+ stcopr16 r0, r1, CORTEX_A32_CPUECTLR_EL1
+ isb
+ bx lr
+endfunc cortex_a32_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A32.
+ * Clobbers: r0-r3
+ * ----------------------------------------------------
+ */
+func cortex_a32_core_pwr_dwn
+ /* r12 is pushed to meet the 8 byte stack alignment requirement */
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a32_disable_smp
+endfunc cortex_a32_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A32.
+ * Clobbers: r0-r3
+ * -------------------------------------------------------
+ */
+func cortex_a32_cluster_pwr_dwn
+ /* r12 is pushed to meet the 8 byte stack alignment requirement */
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Flush L1 cache.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 cache.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a32_disable_smp
+endfunc cortex_a32_cluster_pwr_dwn
+
+errata_report_shim cortex_a32
+
+declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \
+ cortex_a32_reset_func, \
+ cortex_a32_core_pwr_dwn, \
+ cortex_a32_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a5.S b/lib/cpus/aarch32/cortex_a5.S
new file mode 100644
index 0000000..625ea7b
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a5.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a5.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a5_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A5_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a5_disable_smp
+
+func cortex_a5_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A5_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a5_enable_smp
+
+func cortex_a5_reset_func
+ b cortex_a5_enable_smp
+endfunc cortex_a5_reset_func
+
+func cortex_a5_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a5_disable_smp
+endfunc cortex_a5_core_pwr_dwn
+
+func cortex_a5_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a5_disable_smp
+endfunc cortex_a5_cluster_pwr_dwn
+
+errata_report_shim cortex_a5
+
+declare_cpu_ops cortex_a5, CORTEX_A5_MIDR, \
+ cortex_a5_reset_func, \
+ cortex_a5_core_pwr_dwn, \
+ cortex_a5_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a53.S b/lib/cpus/aarch32/cortex_a53.S
new file mode 100644
index 0000000..89b238a
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a53.S
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <common/debug.h>
+#include <cortex_a53.h>
+#include <cpu_macros.S>
+
+#if A53_DISABLE_NON_TEMPORAL_HINT
+#undef ERRATA_A53_836870
+#define ERRATA_A53_836870 1
+#endif
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a53_disable_smp
+ ldcopr16 r0, r1, CORTEX_A53_ECTLR
+ bic64_imm r0, r1, CORTEX_A53_ECTLR_SMP_BIT
+ stcopr16 r0, r1, CORTEX_A53_ECTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a53_disable_smp
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #819472.
+ * This applies only to revision <= r0p1 of Cortex A53.
+ * ---------------------------------------------------
+ */
+func check_errata_819472
+ /*
+ * Even though this is only needed for revision <= r0p1, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_819472
+
+add_erratum_entry cortex_a53, ERRATUM(819472), ERRATA_A53_819472
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #824069.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * ---------------------------------------------------
+ */
+func check_errata_824069
+ /*
+ * Even though this is only needed for revision <= r0p2, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_824069
+
+add_erratum_entry cortex_a53, ERRATUM(824069), ERRATA_A53_824069
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #826319.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * --------------------------------------------------
+ */
+func errata_a53_826319_wa
+ /*
+ * Compare r0 against revision r0p2
+ */
+ mov r2, lr
+ bl check_errata_826319
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr r0, CORTEX_A53_L2ACTLR
+ bic r0, #CORTEX_A53_L2ACTLR_ENABLE_UNIQUECLEAN
+ orr r0, #CORTEX_A53_L2ACTLR_DISABLE_CLEAN_PUSH
+ stcopr r0, CORTEX_A53_L2ACTLR
+1:
+ bx lr
+endfunc errata_a53_826319_wa
+
+func check_errata_826319
+ mov r1, #0x02
+ b cpu_rev_var_ls
+endfunc check_errata_826319
+
+add_erratum_entry cortex_a53, ERRATUM(826319), ERRATA_A53_826319
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #827319.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * ---------------------------------------------------
+ */
+func check_errata_827319
+ /*
+ * Even though this is only needed for revision <= r0p2, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_827319
+
+add_erratum_entry cortex_a53, ERRATUM(827319), ERRATA_A53_827319
+
+ /* ---------------------------------------------------------------------
+ * Disable the cache non-temporal hint.
+ *
+ * This ignores the Transient allocation hint in the MAIR and treats
+ * allocations the same as non-transient allocation types. As a result,
+ * the LDNP and STNP instructions in AArch64 behave the same as the
+ * equivalent LDP and STP instructions.
+ *
+ * This is relevant only for revisions <= r0p3 of Cortex-A53.
+ * From r0p4 and onwards, the bit to disable the hint is enabled by
+ * default at reset.
+ *
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------------------------
+ */
+func a53_disable_non_temporal_hint
+ /*
+ * Compare r0 against revision r0p3
+ */
+ mov r2, lr
+ bl check_errata_disable_non_temporal_hint
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A53_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A53_CPUACTLR_DTAH
+ stcopr16 r0, r1, CORTEX_A53_CPUACTLR
+1:
+ bx lr
+endfunc a53_disable_non_temporal_hint
+
+func check_errata_disable_non_temporal_hint
+ mov r1, #0x03
+ b cpu_rev_var_ls
+endfunc check_errata_disable_non_temporal_hint
+
+add_erratum_entry cortex_a53, ERRATUM(836870), ERRATA_A53_836870 | A53_DISABLE_NON_TEMPORAL_HINT, \
+ disable_non_temporal_hint
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #855873.
+ *
+ * This applies only to revisions >= r0p3 of Cortex A53.
+ * Earlier revisions of the core are affected as well, but don't
+ * have the chicken bit in the CPUACTLR register. It is expected that
+ * the rich OS takes care of that, especially as the workaround is
+ * shared with other erratas in those revisions of the CPU.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * --------------------------------------------------
+ */
+func errata_a53_855873_wa
+ /*
+ * Compare r0 against revision r0p3 and higher
+ */
+ mov r2, lr
+ bl check_errata_855873
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A53_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A53_CPUACTLR_ENDCCASCI
+ stcopr16 r0, r1, CORTEX_A53_CPUACTLR
+1:
+ bx lr
+endfunc errata_a53_855873_wa
+
+func check_errata_855873
+ mov r1, #0x03
+ b cpu_rev_var_hs
+endfunc check_errata_855873
+
+add_erratum_entry cortex_a53, ERRATUM(855873), ERRATA_A53_855873
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A53.
+ * Shall clobber: r0-r6
+ * -------------------------------------------------
+ */
+func cortex_a53_reset_func
+ mov r5, lr
+ bl cpu_get_rev_var
+ mov r4, r0
+
+#if ERRATA_A53_826319
+ mov r0, r4
+ bl errata_a53_826319_wa
+#endif
+
+#if ERRATA_A53_836870
+ mov r0, r4
+ bl a53_disable_non_temporal_hint
+#endif
+
+#if ERRATA_A53_855873
+ mov r0, r4
+ bl errata_a53_855873_wa
+#endif
+
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ ldcopr16 r0, r1, CORTEX_A53_ECTLR
+ orr64_imm r0, r1, CORTEX_A53_ECTLR_SMP_BIT
+ stcopr16 r0, r1, CORTEX_A53_ECTLR
+ isb
+ bx r5
+endfunc cortex_a53_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A53.
+ * ----------------------------------------------------
+ */
+func cortex_a53_core_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a53_disable_smp
+endfunc cortex_a53_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A53.
+ * Clobbers: r0-r3
+ * -------------------------------------------------------
+ */
+func cortex_a53_cluster_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a53_disable_smp
+endfunc cortex_a53_cluster_pwr_dwn
+
+errata_report_shim cortex_a53
+
+declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
+ cortex_a53_reset_func, \
+ cortex_a53_core_pwr_dwn, \
+ cortex_a53_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
new file mode 100644
index 0000000..1e5377b
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -0,0 +1,614 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <common/debug.h>
+#include <cortex_a57.h>
+#include <cpu_macros.S>
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * Clobbers: r0-r1
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_smp
+ ldcopr16 r0, r1, CORTEX_A57_ECTLR
+ bic64_imm r0, r1, CORTEX_A57_ECTLR_SMP_BIT
+ stcopr16 r0, r1, CORTEX_A57_ECTLR
+ bx lr
+endfunc cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Disable all types of L2 prefetches.
+ * Clobbers: r0-r2
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_l2_prefetch
+ ldcopr16 r0, r1, CORTEX_A57_ECTLR
+ orr64_imm r0, r1, CORTEX_A57_ECTLR_DIS_TWD_ACC_PFTCH_BIT
+ bic64_imm r0, r1, (CORTEX_A57_ECTLR_L2_IPFTCH_DIST_MASK | \
+ CORTEX_A57_ECTLR_L2_DPFTCH_DIST_MASK)
+ stcopr16 r0, r1, CORTEX_A57_ECTLR
+ isb
+ dsb ish
+ bx lr
+endfunc cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_ext_debug
+ mov r0, #1
+ stcopr r0, DBGOSDLR
+ isb
+#if ERRATA_A57_817169
+ /*
+ * Invalidate any TLB address
+ */
+ mov r0, #0
+ stcopr r0, TLBIMVA
+#endif
+ dsb sy
+ bx lr
+endfunc cortex_a57_disable_ext_debug
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #806969.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * --------------------------------------------------
+ */
+func errata_a57_806969_wa
+ /*
+ * Compare r0 against revision r0p0
+ */
+ mov r2, lr
+ bl check_errata_806969
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_806969_wa
+
+func check_errata_806969
+ mov r1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_806969
+
+add_erratum_entry cortex_a57, ERRATUM(806969), ERRATA_A57_806969
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #813419.
+ * This applies only to revision r0p0 of Cortex A57.
+ * ---------------------------------------------------
+ */
+func check_errata_813419
+ /*
+ * Even though this is only needed for revision r0p0, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_813419
+
+add_erratum_entry cortex_a57, ERRATUM(813419), ERRATA_A57_813419
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #813420.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_813420_wa
+ /*
+ * Compare r0 against revision r0p0
+ */
+ mov r2, lr
+ bl check_errata_813420
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DCC_AS_DCCI
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_813420_wa
+
+func check_errata_813420
+ mov r1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_813420
+
+add_erratum_entry cortex_a57, ERRATUM(813420), ERRATA_A57_813420
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #814670.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_814670_wa
+ /*
+ * Compare r0 against revision r0p0
+ */
+ mov r2, lr
+ bl check_errata_814670
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_DMB_NULLIFICATION
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ isb
+1:
+ bx r2
+endfunc errata_a57_814670_wa
+
+func check_errata_814670
+ mov r1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_814670
+
+add_erratum_entry cortex_a57, ERRATUM(814670), ERRATA_A57_814670
+
+ /* ----------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #817169.
+ * This applies only to revision <= r0p1 of Cortex A57.
+ * ----------------------------------------------------
+ */
+func check_errata_817169
+ /*
+ * Even though this is only needed for revision <= r0p1, it
+ * is always applied because of the low cost of the workaround.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_817169
+
+add_erratum_entry cortex_a57, ERRATUM(817169), ERRATA_A57_817169
+
+ /* --------------------------------------------------------------------
+ * Disable the over-read from the LDNP instruction.
+ *
+ * This applies to all revisions <= r1p2. The performance degradation
+ * observed with LDNP/STNP has been fixed on r1p3 and onwards.
+ *
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------------------------
+ */
+func a57_disable_ldnp_overread
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_disable_ldnp_overread
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_OVERREAD
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc a57_disable_ldnp_overread
+
+func check_errata_disable_ldnp_overread
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_disable_ldnp_overread
+
+add_erratum_entry cortex_a57, ERRATUM(1), A57_DISABLE_NON_TEMPORAL_HINT, disable_ldnp_overread
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #826974.
+ * This applies only to revision <= r1p1 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_826974_wa
+ /*
+ * Compare r0 against revision r1p1
+ */
+ mov r2, lr
+ bl check_errata_826974
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_DMB
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_826974_wa
+
+func check_errata_826974
+ mov r1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_826974
+
+add_erratum_entry cortex_a57, ERRATUM(826974), ERRATA_A57_826974
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #826977.
+ * This applies only to revision <= r1p1 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_826977_wa
+ /*
+ * Compare r0 against revision r1p1
+ */
+ mov r2, lr
+ bl check_errata_826977
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_GRE_NGRE_AS_NGNRE
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_826977_wa
+
+func check_errata_826977
+ mov r1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_826977
+
+add_erratum_entry cortex_a57, ERRATUM(826977), ERRATA_A57_826977
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #828024.
+ * This applies only to revision <= r1p1 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_828024_wa
+ /*
+ * Compare r0 against revision r1p1
+ */
+ mov r2, lr
+ bl check_errata_828024
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ /*
+ * Setting the relevant bits in CORTEX_A57_CPUACTLR has to be done in 2
+ * instructions here because the resulting bitmask doesn't fit in a
+ * 16-bit value so it cannot be encoded in a single instruction.
+ */
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA
+ orr64_imm r0, r1, (CORTEX_A57_CPUACTLR_DIS_L1_STREAMING | CORTEX_A57_CPUACTLR_DIS_STREAMING)
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_828024_wa
+
+func check_errata_828024
+ mov r1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_828024
+
+add_erratum_entry cortex_a57, ERRATUM(828024), ERRATA_A57_828024
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #829520.
+ * This applies only to revision <= r1p2 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_829520_wa
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_829520
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_INDIRECT_PREDICTOR
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_829520_wa
+
+func check_errata_829520
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_829520
+
+add_erratum_entry cortex_a57, ERRATUM(829520), ERRATA_A57_829520
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #833471.
+ * This applies only to revision <= r1p2 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_833471_wa
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_833471
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r1, r1, CORTEX_A57_CPUACTLR_FORCE_FPSCR_FLUSH
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_833471_wa
+
+func check_errata_833471
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_833471
+
+add_erratum_entry cortex_a57, ERRATUM(833471), ERRATA_A57_833471
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #859972.
+ * This applies only to revision <= r1p3 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_859972_wa
+ mov r2, lr
+ bl check_errata_859972
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r1, r1, CORTEX_A57_CPUACTLR_DIS_INSTR_PREFETCH
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+1:
+ bx lr
+endfunc errata_a57_859972_wa
+
+func check_errata_859972
+ mov r1, #0x13
+ b cpu_rev_var_ls
+endfunc check_errata_859972
+
+add_erratum_entry cortex_a57, ERRATUM(859972), ERRATA_A57_859972
+
+func check_errata_cve_2017_5715
+ mov r0, #ERRATA_MISSING
+ bx lr
+endfunc check_errata_cve_2017_5715
+
+add_erratum_entry cortex_a57, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2018_3639
+
+add_erratum_entry cortex_a57, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+
+func check_errata_cve_2022_23960
+ mov r0, #ERRATA_MISSING
+ bx lr
+endfunc check_errata_cve_2022_23960
+
+add_erratum_entry cortex_a57, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A57.
+ * Shall clobber: r0-r6
+ * -------------------------------------------------
+ */
+func cortex_a57_reset_func
+ mov r5, lr
+ bl cpu_get_rev_var
+ mov r4, r0
+
+#if ERRATA_A57_806969
+ mov r0, r4
+ bl errata_a57_806969_wa
+#endif
+
+#if ERRATA_A57_813420
+ mov r0, r4
+ bl errata_a57_813420_wa
+#endif
+
+#if ERRATA_A57_814670
+ mov r0, r4
+ bl errata_a57_814670_wa
+#endif
+
+#if A57_DISABLE_NON_TEMPORAL_HINT
+ mov r0, r4
+ bl a57_disable_ldnp_overread
+#endif
+
+#if ERRATA_A57_826974
+ mov r0, r4
+ bl errata_a57_826974_wa
+#endif
+
+#if ERRATA_A57_826977
+ mov r0, r4
+ bl errata_a57_826977_wa
+#endif
+
+#if ERRATA_A57_828024
+ mov r0, r4
+ bl errata_a57_828024_wa
+#endif
+
+#if ERRATA_A57_829520
+ mov r0, r4
+ bl errata_a57_829520_wa
+#endif
+
+#if ERRATA_A57_833471
+ mov r0, r4
+ bl errata_a57_833471_wa
+#endif
+
+#if ERRATA_A57_859972
+ mov r0, r4
+ bl errata_a57_859972_wa
+#endif
+
+#if WORKAROUND_CVE_2018_3639
+ ldcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_STORE
+ stcopr16 r0, r1, CORTEX_A57_CPUACTLR
+ isb
+ dsb sy
+#endif
+
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ ldcopr16 r0, r1, CORTEX_A57_ECTLR
+ orr64_imm r0, r1, CORTEX_A57_ECTLR_SMP_BIT
+ stcopr16 r0, r1, CORTEX_A57_ECTLR
+ isb
+ bx r5
+endfunc cortex_a57_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A57.
+ * ----------------------------------------------------
+ */
+func cortex_a57_core_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a57_disable_ext_debug
+endfunc cortex_a57_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A57.
+ * Clobbers: r0-r3
+ * -------------------------------------------------------
+ */
+func cortex_a57_cluster_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a57_disable_ext_debug
+endfunc cortex_a57_cluster_pwr_dwn
+
+errata_report_shim cortex_a57
+
+declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
+ cortex_a57_reset_func, \
+ cortex_a57_core_pwr_dwn, \
+ cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a7.S b/lib/cpus/aarch32/cortex_a7.S
new file mode 100644
index 0000000..4842ca6
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a7.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a7.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a7_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A7_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a7_disable_smp
+
+func cortex_a7_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A7_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a7_enable_smp
+
+func cortex_a7_reset_func
+ b cortex_a7_enable_smp
+endfunc cortex_a7_reset_func
+
+func cortex_a7_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a7_disable_smp
+endfunc cortex_a7_core_pwr_dwn
+
+func cortex_a7_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Flush L2 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a7_disable_smp
+endfunc cortex_a7_cluster_pwr_dwn
+
+errata_report_shim cortex_a7
+
+declare_cpu_ops cortex_a7, CORTEX_A7_MIDR, \
+ cortex_a7_reset_func, \
+ cortex_a7_core_pwr_dwn, \
+ cortex_a7_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
new file mode 100644
index 0000000..77cf84d
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <common/debug.h>
+#include <cortex_a72.h>
+#include <cpu_macros.S>
+
+ /* ---------------------------------------------
+ * Disable all types of L2 prefetches.
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_l2_prefetch
+ ldcopr16 r0, r1, CORTEX_A72_ECTLR
+ orr64_imm r0, r1, CORTEX_A72_ECTLR_DIS_TWD_ACC_PFTCH_BIT
+ bic64_imm r0, r1, (CORTEX_A72_ECTLR_L2_IPFTCH_DIST_MASK | \
+ CORTEX_A72_ECTLR_L2_DPFTCH_DIST_MASK)
+ stcopr16 r0, r1, CORTEX_A72_ECTLR
+ isb
+ bx lr
+endfunc cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_hw_prefetcher
+ ldcopr16 r0, r1, CORTEX_A72_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A72_CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH
+ stcopr16 r0, r1, CORTEX_A72_CPUACTLR
+ isb
+ dsb ish
+ bx lr
+endfunc cortex_a72_disable_hw_prefetcher
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * Clobbers: r0-r1
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_smp
+ ldcopr16 r0, r1, CORTEX_A72_ECTLR
+ bic64_imm r0, r1, CORTEX_A72_ECTLR_SMP_BIT
+ stcopr16 r0, r1, CORTEX_A72_ECTLR
+ bx lr
+endfunc cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_ext_debug
+ mov r0, #1
+ stcopr r0, DBGOSDLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a72_disable_ext_debug
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A72 Errata #859971.
+ * This applies only to revision <= r0p3 of Cortex A72.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a72_859971_wa
+ mov r2,lr
+ bl check_errata_859971
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A72_CPUACTLR
+ orr64_imm r1, r1, CORTEX_A72_CPUACTLR_DIS_INSTR_PREFETCH
+ stcopr16 r0, r1, CORTEX_A72_CPUACTLR
+1:
+ bx lr
+endfunc errata_a72_859971_wa
+
+func check_errata_859971
+ mov r1, #0x03
+ b cpu_rev_var_ls
+endfunc check_errata_859971
+
+add_erratum_entry cortex_a72, ERRATUM(859971), ERRATA_A72_859971
+
+func check_errata_cve_2017_5715
+ mov r0, #ERRATA_MISSING
+ bx lr
+endfunc check_errata_cve_2017_5715
+
+add_erratum_entry cortex_a72, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2018_3639
+
+add_erratum_entry cortex_a72, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+
+func check_errata_cve_2022_23960
+ mov r0, #ERRATA_MISSING
+ bx lr
+endfunc check_errata_cve_2022_23960
+
+add_erratum_entry cortex_a72, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A72.
+ * -------------------------------------------------
+ */
+func cortex_a72_reset_func
+ mov r5, lr
+ bl cpu_get_rev_var
+ mov r4, r0
+
+#if ERRATA_A72_859971
+ mov r0, r4
+ bl errata_a72_859971_wa
+#endif
+
+#if WORKAROUND_CVE_2018_3639
+ ldcopr16 r0, r1, CORTEX_A72_CPUACTLR
+ orr64_imm r0, r1, CORTEX_A72_CPUACTLR_DIS_LOAD_PASS_STORE
+ stcopr16 r0, r1, CORTEX_A72_CPUACTLR
+ isb
+ dsb sy
+#endif
+
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ ldcopr16 r0, r1, CORTEX_A72_ECTLR
+ orr64_imm r0, r1, CORTEX_A72_ECTLR_SMP_BIT
+ stcopr16 r0, r1, CORTEX_A72_ECTLR
+ isb
+ bx r5
+endfunc cortex_a72_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A72.
+ * ----------------------------------------------------
+ */
+func cortex_a72_core_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_hw_prefetcher
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a72_disable_ext_debug
+endfunc cortex_a72_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A72.
+ * -------------------------------------------------------
+ */
+func cortex_a72_cluster_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_hw_prefetcher
+
+#if !SKIP_A72_L1_FLUSH_PWR_DWN
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+#endif
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* -------------------------------------------------
+ * Flush the L2 caches.
+ * -------------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a72_disable_ext_debug
+endfunc cortex_a72_cluster_pwr_dwn
+
+errata_report_shim cortex_a72
+
+declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
+ cortex_a72_reset_func, \
+ cortex_a72_core_pwr_dwn, \
+ cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a9.S b/lib/cpus/aarch32/cortex_a9.S
new file mode 100644
index 0000000..1e9757a
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a9.S
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a9.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a9_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A9_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a9_disable_smp
+
+func cortex_a9_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A9_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a9_enable_smp
+
+func check_errata_794073
+#if ERRATA_A9_794073
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_794073
+
+add_erratum_entry cortex_a9, ERRATUM(794073), ERRATA_A9_794073
+
+func check_errata_cve_2017_5715
+#if WORKAROUND_CVE_2017_5715
+ mov r0, #ERRATA_APPLIES
+#else
+ mov r0, #ERRATA_MISSING
+#endif
+ bx lr
+endfunc check_errata_cve_2017_5715
+
+add_erratum_entry cortex_a9, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+
+errata_report_shim cortex_a9
+
+func cortex_a9_reset_func
+#if IMAGE_BL32 && WORKAROUND_CVE_2017_5715
+ ldr r0, =wa_cve_2017_5715_bpiall_vbar
+ stcopr r0, VBAR
+ stcopr r0, MVBAR
+ /* isb will be applied in the course of the reset func */
+#endif
+ b cortex_a9_enable_smp
+endfunc cortex_a9_reset_func
+
+func cortex_a9_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a9_disable_smp
+endfunc cortex_a9_core_pwr_dwn
+
+func cortex_a9_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a9_disable_smp
+endfunc cortex_a9_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a9, CORTEX_A9_MIDR, \
+ cortex_a9_reset_func, \
+ cortex_a9_core_pwr_dwn, \
+ cortex_a9_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
new file mode 100644
index 0000000..83e3e49
--- /dev/null
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cpu_macros.S>
+#include <common/bl_common.h>
+#include <lib/cpus/cpu_ops.h>
+#include <lib/el3_runtime/cpu_data.h>
+
+#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
+ (defined(IMAGE_BL2) && RESET_TO_BL2)
+ /*
+ * The reset handler common to all platforms. After a matching
+ * cpu_ops structure entry is found, the correponding reset_handler
+ * in the cpu_ops is invoked. The reset handler is invoked very early
+ * in the boot sequence and it is assumed that we can clobber r0 - r10
+ * without the need to follow AAPCS.
+ * Clobbers: r0 - r10
+ */
+ .globl reset_handler
+func reset_handler
+ mov r8, lr
+
+ /* The plat_reset_handler can clobber r0 - r7 */
+ bl plat_reset_handler
+
+ /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
+ bl get_cpu_ops_ptr
+
+#if ENABLE_ASSERTIONS
+ cmp r0, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the cpu_ops reset handler */
+ ldr r1, [r0, #CPU_RESET_FUNC]
+ cmp r1, #0
+ mov lr, r8
+ bxne r1
+ bx lr
+endfunc reset_handler
+
+#endif
+
+#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
+ /*
+ * void prepare_cpu_pwr_dwn(unsigned int power_level)
+ *
+ * Prepare CPU power down function for all platforms. The function takes
+ * a domain level to be powered down as its parameter. After the cpu_ops
+ * pointer is retrieved from cpu_data, the handler for requested power
+ * level is called.
+ */
+ .globl prepare_cpu_pwr_dwn
+func prepare_cpu_pwr_dwn
+ /*
+ * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
+ * power down handler for the last power level
+ */
+ mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
+ cmp r0, r2
+ movhi r0, r2
+
+ push {r0, lr}
+ bl _cpu_data
+ pop {r2, lr}
+
+ ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp r0, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the appropriate power down handler */
+ mov r1, #CPU_PWR_DWN_OPS
+ add r1, r1, r2, lsl #2
+ ldr r1, [r0, r1]
+#if ENABLE_ASSERTIONS
+ cmp r1, #0
+ ASM_ASSERT(ne)
+#endif
+ bx r1
+endfunc prepare_cpu_pwr_dwn
+
+ /*
+ * Initializes the cpu_ops_ptr if not already initialized
+ * in cpu_data. This must only be called after the data cache
+ * is enabled. AAPCS is followed.
+ */
+ .globl init_cpu_ops
+func init_cpu_ops
+ push {r4 - r6, lr}
+ bl _cpu_data
+ mov r6, r0
+ ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+ cmp r1, #0
+ bne 1f
+ bl get_cpu_ops_ptr
+#if ENABLE_ASSERTIONS
+ cmp r0, #0
+ ASM_ASSERT(ne)
+#endif
+ str r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
+1:
+ pop {r4 - r6, pc}
+endfunc init_cpu_ops
+
+#endif /* IMAGE_BL32 */
+
+ /*
+ * The below function returns the cpu_ops structure matching the
+ * midr of the core. It reads the MIDR and finds the matching
+ * entry in cpu_ops entries. Only the implementation and part number
+ * are used to match the entries.
+ * Return :
+ * r0 - The matching cpu_ops pointer on Success
+ * r0 - 0 on failure.
+ * Clobbers: r0 - r5
+ */
+ .globl get_cpu_ops_ptr
+func get_cpu_ops_ptr
+ /* Get the cpu_ops start and end locations */
+ ldr r4, =(__CPU_OPS_START__ + CPU_MIDR)
+ ldr r5, =(__CPU_OPS_END__ + CPU_MIDR)
+
+ /* Initialize the return parameter */
+ mov r0, #0
+
+ /* Read the MIDR_EL1 */
+ ldcopr r2, MIDR
+ ldr r3, =CPU_IMPL_PN_MASK
+
+ /* Retain only the implementation and part number using mask */
+ and r2, r2, r3
+1:
+ /* Check if we have reached end of list */
+ cmp r4, r5
+ bhs error_exit
+
+ /* load the midr from the cpu_ops */
+ ldr r1, [r4], #CPU_OPS_SIZE
+ and r1, r1, r3
+
+ /* Check if midr matches to midr of this core */
+ cmp r1, r2
+ bne 1b
+
+ /* Subtract the increment and offset to get the cpu-ops pointer */
+ sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
+#if ENABLE_ASSERTIONS
+ cmp r0, #0
+ ASM_ASSERT(ne)
+#endif
+error_exit:
+ bx lr
+endfunc get_cpu_ops_ptr
+
+/*
+ * Extract CPU revision and variant, and combine them into a single numeric for
+ * easier comparison.
+ */
+ .globl cpu_get_rev_var
+func cpu_get_rev_var
+ ldcopr r1, MIDR
+
+ /*
+ * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
+ * r0[0:7] as variant[7:4] and revision[3:0]:
+ *
+ * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
+ * extract r1[3:0] into r0[3:0] retaining other bits.
+ */
+ ubfx r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
+ bfi r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
+ bx lr
+endfunc cpu_get_rev_var
+
+/*
+ * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
+ * application purposes. If the revision-variant is less than or same as a given
+ * value, indicates that errata applies; otherwise not.
+ */
+ .globl cpu_rev_var_ls
+func cpu_rev_var_ls
+ cmp r0, r1
+ movls r0, #ERRATA_APPLIES
+ movhi r0, #ERRATA_NOT_APPLIES
+ bx lr
+endfunc cpu_rev_var_ls
+
+/*
+ * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
+ * application purposes. If the revision-variant is higher than or same as a
+ * given value, indicates that errata applies; otherwise not.
+ */
+ .globl cpu_rev_var_hs
+func cpu_rev_var_hs
+ cmp r0, r1
+ movge r0, #ERRATA_APPLIES
+ movlt r0, #ERRATA_NOT_APPLIES
+ bx lr
+endfunc cpu_rev_var_hs
diff --git a/lib/cpus/aarch64/a64fx.S b/lib/cpus/aarch64/a64fx.S
new file mode 100644
index 0000000..54c20c3
--- /dev/null
+++ b/lib/cpus/aarch64/a64fx.S
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2022, Fujitsu Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <a64fx.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+func a64fx_core_pwr_dwn
+endfunc a64fx_core_pwr_dwn
+
+func a64fx_cluster_pwr_dwn
+endfunc a64fx_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for A64FX. Must follow AAPCS.
+ */
+func a64fx_errata_report
+ ret
+endfunc a64fx_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cpu specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.a64fx_regs, "aS"
+a64fx_regs: /* The ascii list of register names to be reported */
+ .asciz ""
+
+func a64fx_cpu_reg_dump
+ adr x6, a64fx_regs
+ ret
+endfunc a64fx_cpu_reg_dump
+
+declare_cpu_ops a64fx, A64FX_MIDR, CPU_NO_RESET_FUNC \
+ a64fx_core_pwr_dwn, \
+ a64fx_cluster_pwr_dwn
+
diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S
new file mode 100644
index 0000000..d47279a
--- /dev/null
+++ b/lib/cpus/aarch64/aem_generic.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2014-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <aem_generic.h>
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+
+func aem_generic_core_pwr_dwn
+ /* ---------------------------------------------
+ * Disable the Data Cache.
+ * ---------------------------------------------
+ */
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+
+ /* ---------------------------------------------
+ * AEM model supports L3 caches in which case L2
+ * will be private per core caches and flush
+ * from L1 to L2 is not sufficient.
+ * ---------------------------------------------
+ */
+ mrs x1, clidr_el1
+
+ /* ---------------------------------------------
+ * Check if L3 cache is implemented.
+ * ---------------------------------------------
+ */
+ tst x1, ((1 << CLIDR_FIELD_WIDTH) - 1) << CTYPE_SHIFT(3)
+
+ /* ---------------------------------------------
+ * There is no L3 cache, flush L1 to L2 only.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ b.eq dcsw_op_level1
+
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Flush L1 cache to L2.
+ * ---------------------------------------------
+ */
+ bl dcsw_op_level1
+ mov x30, x18
+
+ /* ---------------------------------------------
+ * Flush L2 cache to L3.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ b dcsw_op_level2
+endfunc aem_generic_core_pwr_dwn
+
+func aem_generic_cluster_pwr_dwn
+ /* ---------------------------------------------
+ * Disable the Data Cache.
+ * ---------------------------------------------
+ */
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+
+ /* ---------------------------------------------
+ * Flush all caches to PoC.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ b dcsw_op_all
+endfunc aem_generic_cluster_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for AEM. Must follow AAPCS.
+ */
+func aem_generic_errata_report
+ ret
+endfunc aem_generic_errata_report
+#endif
+
+ /* ---------------------------------------------
+ * This function provides cpu specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.aem_generic_regs, "aS"
+aem_generic_regs: /* The ascii list of register names to be reported */
+ .asciz "" /* no registers to report */
+
+func aem_generic_cpu_reg_dump
+ adr x6, aem_generic_regs
+ ret
+endfunc aem_generic_cpu_reg_dump
+
+
+/* cpu_ops for Base AEM FVP */
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+ aem_generic_core_pwr_dwn, \
+ aem_generic_cluster_pwr_dwn
+
+/* cpu_ops for Foundation FVP */
+declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, CPU_NO_RESET_FUNC, \
+ aem_generic_core_pwr_dwn, \
+ aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a35.S b/lib/cpus/aarch64/cortex_a35.S
new file mode 100644
index 0000000..6ffb944
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a35.S
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a35.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func cortex_a35_disable_dcache
+ sysreg_bit_clear sctlr_el3, SCTLR_C_BIT
+ isb
+ ret
+endfunc cortex_a35_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a35_disable_smp
+ sysreg_bit_clear CORTEX_A35_CPUECTLR_EL1, CORTEX_A35_CPUECTLR_SMPEN_BIT
+ isb
+ dsb sy
+ ret
+endfunc cortex_a35_disable_smp
+
+workaround_reset_start cortex_a35, ERRATUM(855472), ERRATA_A35_855472
+ sysreg_bit_set CORTEX_A35_CPUACTLR_EL1, CORTEX_A35_CPUACTLR_EL1_ENDCCASCI
+workaround_reset_end cortex_a35, ERRATUM(855472)
+
+check_erratum_ls cortex_a35, ERRATUM(855472), CPU_REV(0, 0)
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A35.
+ * -------------------------------------------------
+ */
+cpu_reset_func_start cortex_a35
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A35_CPUECTLR_EL1, CORTEX_A35_CPUECTLR_SMPEN_BIT
+cpu_reset_func_end cortex_a35
+
+func cortex_a35_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a35_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a35_disable_smp
+endfunc cortex_a35_core_pwr_dwn
+
+func cortex_a35_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a35_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a35_disable_smp
+endfunc cortex_a35_cluster_pwr_dwn
+
+errata_report_shim cortex_a35
+
+ /* ---------------------------------------------
+ * This function provides cortex_a35 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a35_regs, "aS"
+cortex_a35_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a35_cpu_reg_dump
+ adr x6, cortex_a35_regs
+ mrs x8, CORTEX_A35_CPUECTLR_EL1
+ ret
+endfunc cortex_a35_cpu_reg_dump
+
+declare_cpu_ops cortex_a35, CORTEX_A35_MIDR, \
+ cortex_a35_reset_func, \
+ cortex_a35_core_pwr_dwn, \
+ cortex_a35_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a510.S b/lib/cpus/aarch64/cortex_a510.S
new file mode 100644
index 0000000..a59b92c
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a510.S
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a510.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A510 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-A510 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+workaround_reset_start cortex_a510, ERRATUM(1922240), ERRATA_A510_1922240
+ /* Apply the workaround by setting IMP_CMPXACTLR_EL1[11:10] = 0b11. */
+ sysreg_bitfield_insert CORTEX_A510_CMPXACTLR_EL1, CORTEX_A510_CMPXACTLR_EL1_SNPPREFERUNIQUE_DISABLE, \
+ CORTEX_A510_CMPXACTLR_EL1_SNPPREFERUNIQUE_SHIFT, CORTEX_A510_CMPXACTLR_EL1_SNPPREFERUNIQUE_WIDTH
+workaround_reset_end cortex_a510, ERRATUM(1922240)
+
+check_erratum_ls cortex_a510, ERRATUM(1922240), CPU_REV(0, 0)
+
+workaround_reset_start cortex_a510, ERRATUM(2041909), ERRATA_A510_2041909
+ /* Apply workaround */
+ mov x0, xzr
+ msr S3_6_C15_C4_0, x0
+ isb
+
+ mov x0, #0x8500000
+ msr S3_6_C15_C4_2, x0
+
+ mov x0, #0x1F700000
+ movk x0, #0x8, lsl #32
+ msr S3_6_C15_C4_3, x0
+
+ mov x0, #0x3F1
+ movk x0, #0x110, lsl #16
+ msr S3_6_C15_C4_1, x0
+workaround_reset_end cortex_a510, ERRATUM(2041909)
+
+check_erratum_range cortex_a510, ERRATUM(2041909), CPU_REV(0, 2), CPU_REV(0, 2)
+
+workaround_reset_start cortex_a510, ERRATUM(2042739), ERRATA_A510_2042739
+ /* Apply the workaround by disabling ReadPreferUnique. */
+ sysreg_bitfield_insert CORTEX_A510_CPUECTLR_EL1, CORTEX_A510_CPUECTLR_EL1_READPREFERUNIQUE_DISABLE, \
+ CORTEX_A510_CPUECTLR_EL1_READPREFERUNIQUE_SHIFT, CORTEX_A510_CPUECTLR_EL1_READPREFERUNIQUE_WIDTH
+workaround_reset_end cortex_a510, ERRATUM(2042739)
+
+check_erratum_ls cortex_a510, ERRATUM(2042739), CPU_REV(0, 2)
+
+workaround_reset_start cortex_a510, ERRATUM(2080326), ERRATA_A510_2080326
+ /* Apply workaround */
+ mov x0, #1
+ msr S3_6_C15_C4_0, x0
+ isb
+
+ mov x0, #0x0100
+ movk x0, #0x0E08, lsl #16
+ msr S3_6_C15_C4_2, x0
+
+ mov x0, #0x0300
+ movk x0, #0x0F1F, lsl #16
+ movk x0, #0x0008, lsl #32
+ msr S3_6_C15_C4_3, x0
+
+ mov x0, #0x03F1
+ movk x0, #0x00C0, lsl #16
+ msr S3_6_C15_C4_1, x0
+
+ isb
+workaround_reset_end cortex_a510, ERRATUM(2080326)
+
+check_erratum_range cortex_a510, ERRATUM(2080326), CPU_REV(0, 2), CPU_REV(0, 2)
+
+workaround_reset_start cortex_a510, ERRATUM(2172148), ERRATA_A510_2172148
+ /*
+ * Force L2 allocation of transient lines by setting
+ * CPUECTLR_EL1.RSCTL=0b01 and CPUECTLR_EL1.NTCTL=0b01.
+ */
+ mrs x0, CORTEX_A510_CPUECTLR_EL1
+ mov x1, #1
+ bfi x0, x1, #CORTEX_A510_CPUECTLR_EL1_RSCTL_SHIFT, #2
+ bfi x0, x1, #CORTEX_A510_CPUECTLR_EL1_NTCTL_SHIFT, #2
+ msr CORTEX_A510_CPUECTLR_EL1, x0
+workaround_reset_end cortex_a510, ERRATUM(2172148)
+
+check_erratum_ls cortex_a510, ERRATUM(2172148), CPU_REV(1, 0)
+
+workaround_reset_start cortex_a510, ERRATUM(2218950), ERRATA_A510_2218950
+ /* Set bit 18 in CPUACTLR_EL1 */
+ sysreg_bitfield_insert CORTEX_A510_CPUACTLR_EL1, CORTEX_A510_CPUACTLR_EL1_ALIAS_LOADSTORE_DISABLE, \
+ CORTEX_A510_CPUACTLR_EL1_ALIAS_LOADSTORE_SHIFT, CORTEX_A510_CPUACTLR_EL1_ALIAS_LOADSTORE_WIDTH
+
+ /* Set bit 25 in CMPXACTLR_EL1 */
+ sysreg_bitfield_insert CORTEX_A510_CMPXACTLR_EL1, CORTEX_A510_CMPXACTLR_EL1_ALIAS_LOADSTORE_DISABLE, \
+ CORTEX_A510_CMPXACTLR_EL1_ALIAS_LOADSTORE_SHIFT, CORTEX_A510_CMPXACTLR_EL1_ALIAS_LOADSTORE_WIDTH
+
+workaround_reset_end cortex_a510, ERRATUM(2218950)
+
+check_erratum_ls cortex_a510, ERRATUM(2218950), CPU_REV(1, 0)
+
+ /* --------------------------------------------------
+ * This workaround is not a typical errata fix. MPMM
+ * is disabled here, but this conflicts with the BL31
+ * MPMM support. So in addition to simply disabling
+ * the feature, a flag is set in the MPMM library
+ * indicating that it should not be enabled even if
+ * ENABLE_MPMM=1.
+ * --------------------------------------------------
+ */
+workaround_reset_start cortex_a510, ERRATUM(2250311), ERRATA_A510_2250311
+ /* Disable MPMM */
+ mrs x0, CPUMPMMCR_EL3
+ bfm x0, xzr, #0, #0 /* bfc instruction does not work in GCC */
+ msr CPUMPMMCR_EL3, x0
+
+#if ENABLE_MPMM && IMAGE_BL31
+ /* If ENABLE_MPMM is set, tell the runtime lib to skip enabling it. */
+ bl mpmm_errata_disable
+#endif
+workaround_reset_end cortex_a510, ERRATUM(2250311)
+
+check_erratum_ls cortex_a510, ERRATUM(2250311), CPU_REV(1, 0)
+
+workaround_reset_start cortex_a510, ERRATUM(2288014), ERRATA_A510_2288014
+ /* Apply the workaround by setting IMP_CPUACTLR_EL1[18] = 0b1. */
+ sysreg_bitfield_insert CORTEX_A510_CPUACTLR_EL1, CORTEX_A510_CPUACTLR_EL1_DATA_CORRUPT_DISABLE, \
+ CORTEX_A510_CPUACTLR_EL1_DATA_CORRUPT_SHIFT, CORTEX_A510_CPUACTLR_EL1_DATA_CORRUPT_WIDTH
+workaround_reset_end cortex_a510, ERRATUM(2288014)
+
+check_erratum_ls cortex_a510, ERRATUM(2288014), CPU_REV(1, 0)
+
+workaround_reset_start cortex_a510, ERRATUM(2347730), ERRATA_A510_2347730
+ /*
+ * Set CPUACTLR_EL1[17] to 1'b1, which disables
+ * specific microarchitectural clock gating
+ * behaviour.
+ */
+ sysreg_bit_set CORTEX_A510_CPUACTLR_EL1, CORTEX_A510_CPUACTLR_EL1_BIT_17
+workaround_reset_end cortex_a510, ERRATUM(2347730)
+
+check_erratum_ls cortex_a510, ERRATUM(2347730), CPU_REV(1, 1)
+
+workaround_reset_start cortex_a510, ERRATUM(2371937), ERRATA_A510_2371937
+ /*
+ * Cacheable atomic operations can be forced
+ * to be executed near by setting
+ * IMP_CPUECTLR_EL1.ATOM=0b010. ATOM is found
+ * in [40:38] of CPUECTLR_EL1.
+ */
+ sysreg_bitfield_insert CORTEX_A510_CPUECTLR_EL1, CORTEX_A510_CPUECTLR_EL1_ATOM_EXECALLINSTRNEAR, \
+ CORTEX_A510_CPUECTLR_EL1_ATOM_SHIFT, CORTEX_A510_CPUECTLR_EL1_ATOM_WIDTH
+workaround_reset_end cortex_a510, ERRATUM(2371937)
+
+check_erratum_ls cortex_a510, ERRATUM(2371937), CPU_REV(1, 1)
+
+workaround_reset_start cortex_a510, ERRATUM(2666669), ERRATA_A510_2666669
+ sysreg_bit_set CORTEX_A510_CPUACTLR_EL1, CORTEX_A510_CPUACTLR_EL1_BIT_38
+workaround_reset_end cortex_a510, ERRATUM(2666669)
+
+check_erratum_ls cortex_a510, ERRATUM(2666669), CPU_REV(1, 1)
+
+.global erratum_cortex_a510_2684597_wa
+workaround_runtime_start cortex_a510, ERRATUM(2684597), ERRATA_A510_2684597, CORTEX_A510_MIDR
+ /*
+ * Many assemblers do not yet understand the "tsb csync" mnemonic,
+ * so use the equivalent hint instruction.
+ */
+ hint #18 /* tsb csync */
+workaround_runtime_end cortex_a510, ERRATUM(2684597)
+
+check_erratum_ls cortex_a510, ERRATUM(2684597), CPU_REV(1, 2)
+
+/*
+ * ERRATA_DSU_2313941 :
+ * The errata is defined in dsu_helpers.S but applies to cortex_a510
+ * as well. Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_cortex_a510_2313941, check_errata_dsu_2313941
+.equ erratum_cortex_a510_2313941_wa, errata_dsu_2313941_wa
+add_erratum_entry cortex_a510, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_a510_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A510_CPUPWRCTLR_EL1, CORTEX_A510_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ isb
+ ret
+endfunc cortex_a510_core_pwr_dwn
+
+errata_report_shim cortex_a510
+
+cpu_reset_func_start cortex_a510
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end cortex_a510
+
+ /* ---------------------------------------------
+ * This function provides Cortex-A510 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a510_regs, "aS"
+cortex_a510_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a510_cpu_reg_dump
+ adr x6, cortex_a510_regs
+ mrs x8, CORTEX_A510_CPUECTLR_EL1
+ ret
+endfunc cortex_a510_cpu_reg_dump
+
+declare_cpu_ops cortex_a510, CORTEX_A510_MIDR, \
+ cortex_a510_reset_func, \
+ cortex_a510_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a520.S b/lib/cpus/aarch64/cortex_a520.S
new file mode 100644
index 0000000..6c2f33e
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a520.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a520.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex A520 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex A520 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_a520_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A520_CPUPWRCTLR_EL1, CORTEX_A520_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ isb
+ ret
+endfunc cortex_a520_core_pwr_dwn
+
+errata_report_shim cortex_a520
+
+cpu_reset_func_start cortex_a520
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end cortex_a520
+
+ /* ---------------------------------------------
+ * This function provides Cortex A520 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a520_regs, "aS"
+cortex_a520_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a520_cpu_reg_dump
+ adr x6, cortex_a520_regs
+ mrs x8, CORTEX_A520_CPUECTLR_EL1
+ ret
+endfunc cortex_a520_cpu_reg_dump
+
+declare_cpu_ops cortex_a520, CORTEX_A520_MIDR, \
+ cortex_a520_reset_func, \
+ cortex_a520_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
new file mode 100644
index 0000000..e6fb08a
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cortex_a53.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include <lib/cpus/errata.h>
+
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func cortex_a53_disable_dcache
+ sysreg_bit_clear sctlr_el3, SCTLR_C_BIT
+ isb
+ ret
+endfunc cortex_a53_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a53_disable_smp
+ sysreg_bit_clear CORTEX_A53_ECTLR_EL1, CORTEX_A53_ECTLR_SMP_BIT
+ isb
+ dsb sy
+ ret
+endfunc cortex_a53_disable_smp
+
+/* Due to the nature of the errata it is applied unconditionally when chosen */
+check_erratum_ls cortex_a53, ERRATUM(819472), CPU_REV(0, 1)
+/* erratum workaround is interleaved with generic code */
+add_erratum_entry cortex_a53, ERRATUM(819472), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
+
+/* Due to the nature of the errata it is applied unconditionally when chosen */
+check_erratum_ls cortex_a53, ERRATUM(824069), CPU_REV(0, 2)
+/* erratum workaround is interleaved with generic code */
+add_erratum_entry cortex_a53, ERRATUM(824069), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
+
+workaround_reset_start cortex_a53, ERRATUM(826319), ERRATA_A53_826319
+ mrs x1, CORTEX_A53_L2ACTLR_EL1
+ bic x1, x1, #CORTEX_A53_L2ACTLR_ENABLE_UNIQUECLEAN
+ orr x1, x1, #CORTEX_A53_L2ACTLR_DISABLE_CLEAN_PUSH
+ msr CORTEX_A53_L2ACTLR_EL1, x1
+workaround_reset_end cortex_a53, ERRATUM(826319)
+
+check_erratum_ls cortex_a53, ERRATUM(826319), CPU_REV(0, 2)
+
+/* Due to the nature of the errata it is applied unconditionally when chosen */
+check_erratum_ls cortex_a53, ERRATUM(827319), CPU_REV(0, 2)
+/* erratum workaround is interleaved with generic code */
+add_erratum_entry cortex_a53, ERRATUM(827319), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
+
+check_erratum_custom_start cortex_a53, ERRATUM(835769)
+ cmp x0, CPU_REV(0, 4)
+ b.hi errata_not_applies
+ /*
+ * Fix potentially available for revisions r0p2, r0p3 and r0p4.
+ * If r0p2, r0p3 or r0p4; check for fix in REVIDR, else exit.
+ */
+ cmp x0, #0x01
+ mov x0, #ERRATA_APPLIES
+ b.ls exit_check_errata_835769
+ /* Load REVIDR. */
+ mrs x1, revidr_el1
+ /* If REVIDR[7] is set (fix exists) set ERRATA_NOT_APPLIES, else exit. */
+ tbz x1, #7, exit_check_errata_835769
+errata_not_applies:
+ mov x0, #ERRATA_NOT_APPLIES
+exit_check_errata_835769:
+ ret
+check_erratum_custom_end cortex_a53, ERRATUM(835769)
+
+/* workaround at build time */
+add_erratum_entry cortex_a53, ERRATUM(835769), ERRATA_A53_835769, NO_APPLY_AT_RESET
+
+ /*
+ * Disable the cache non-temporal hint.
+ *
+ * This ignores the Transient allocation hint in the MAIR and treats
+ * allocations the same as non-transient allocation types. As a result,
+ * the LDNP and STNP instructions in AArch64 behave the same as the
+ * equivalent LDP and STP instructions.
+ */
+workaround_reset_start cortex_a53, ERRATUM(836870), ERRATA_A53_836870 | A53_DISABLE_NON_TEMPORAL_HINT
+ sysreg_bit_set CORTEX_A53_CPUACTLR_EL1, CORTEX_A53_CPUACTLR_EL1_DTAH
+workaround_reset_end cortex_a53, ERRATUM(836870)
+
+check_erratum_ls cortex_a53, ERRATUM(836870), CPU_REV(0, 3)
+
+check_erratum_custom_start cortex_a53, ERRATUM(843419)
+ mov x1, #ERRATA_APPLIES
+ mov x2, #ERRATA_NOT_APPLIES
+ cmp x0, CPU_REV(0, 4)
+ csel x0, x1, x2, ls
+ /*
+ * Fix potentially available for revision r0p4.
+ * If r0p4 check for fix in REVIDR, else exit.
+ */
+ b.ne exit_check_errata_843419
+ /* Load REVIDR. */
+ mrs x3, revidr_el1
+ /* If REVIDR[8] is set (fix exists) set ERRATA_NOT_APPLIES, else exit. */
+ tbz x3, #8, exit_check_errata_843419
+ mov x0, x2
+exit_check_errata_843419:
+ ret
+check_erratum_custom_end cortex_a53, ERRATUM(843419)
+
+/* workaround at build time */
+add_erratum_entry cortex_a53, ERRATUM(843419), ERRATA_A53_843419, NO_APPLY_AT_RESET
+
+ /*
+ * Earlier revisions of the core are affected as well, but don't
+ * have the chicken bit in the CPUACTLR register. It is expected that
+ * the rich OS takes care of that, especially as the workaround is
+ * shared with other erratas in those revisions of the CPU.
+ */
+workaround_reset_start cortex_a53, ERRATUM(855873), ERRATA_A53_855873
+ sysreg_bit_set CORTEX_A53_CPUACTLR_EL1, CORTEX_A53_CPUACTLR_EL1_ENDCCASCI
+workaround_reset_end cortex_a53, ERRATUM(855873)
+
+check_erratum_hs cortex_a53, ERRATUM(855873), CPU_REV(0, 3)
+
+check_erratum_chosen cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924
+
+/* erratum has no workaround in the cpu. Generic code must take care */
+add_erratum_entry cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924, NO_APPLY_AT_RESET
+
+cpu_reset_func_start cortex_a53
+ /* Enable the SMP bit. */
+ sysreg_bit_set CORTEX_A53_ECTLR_EL1, CORTEX_A53_ECTLR_SMP_BIT
+cpu_reset_func_end cortex_a53
+
+func cortex_a53_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a53_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a53_disable_smp
+endfunc cortex_a53_core_pwr_dwn
+
+func cortex_a53_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a53_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a53_disable_smp
+endfunc cortex_a53_cluster_pwr_dwn
+
+errata_report_shim cortex_a53
+
+ /* ---------------------------------------------
+ * This function provides cortex_a53 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a53_regs, "aS"
+cortex_a53_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", "cpumerrsr_el1", "l2merrsr_el1", \
+ "cpuactlr_el1", ""
+
+func cortex_a53_cpu_reg_dump
+ adr x6, cortex_a53_regs
+ mrs x8, CORTEX_A53_ECTLR_EL1
+ mrs x9, CORTEX_A53_MERRSR_EL1
+ mrs x10, CORTEX_A53_L2MERRSR_EL1
+ mrs x11, CORTEX_A53_CPUACTLR_EL1
+ ret
+endfunc cortex_a53_cpu_reg_dump
+
+declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
+ cortex_a53_reset_func, \
+ cortex_a53_core_pwr_dwn, \
+ cortex_a53_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S
new file mode 100644
index 0000000..712b6e0
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a55.S
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a55.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A55 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+ .globl cortex_a55_reset_func
+ .globl cortex_a55_core_pwr_dwn
+
+/* ERRATA_DSU_798953:
+ * The errata is defined in dsu_helpers.S but applies to cortex_a55
+ * as well. Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_cortex_a55_798953, check_errata_dsu_798953
+.equ erratum_cortex_a55_798953_wa, errata_dsu_798953_wa
+add_erratum_entry cortex_a55, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET
+
+/* ERRATA_DSU_936184:
+ * The errata is defined in dsu_helpers.S but applies to cortex_a55
+ * as well. Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_cortex_a55_936184, check_errata_dsu_936184
+.equ erratum_cortex_a55_936184_wa, errata_dsu_936184_wa
+add_erratum_entry cortex_a55, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+
+workaround_reset_start cortex_a55, ERRATUM(768277), ERRATA_A55_768277
+ sysreg_bit_set CORTEX_A55_CPUACTLR_EL1, CORTEX_A55_CPUACTLR_EL1_DISABLE_DUAL_ISSUE
+workaround_reset_end cortex_a55, ERRATUM(768277)
+
+check_erratum_ls cortex_a55, ERRATUM(768277), CPU_REV(0, 0)
+
+workaround_reset_start cortex_a55, ERRATUM(778703), ERRATA_A55_778703
+ sysreg_bit_set CORTEX_A55_CPUECTLR_EL1, CORTEX_A55_CPUECTLR_EL1_L1WSCTL
+ sysreg_bit_set CORTEX_A55_CPUACTLR_EL1, CORTEX_A55_CPUACTLR_EL1_DISABLE_WRITE_STREAMING
+workaround_reset_end cortex_a55, ERRATUM(778703)
+
+check_erratum_custom_start cortex_a55, ERRATUM(778703)
+ mov x16, x30
+ mov x1, #0x00
+ bl cpu_rev_var_ls
+ /*
+ * Check that no private L2 cache is configured
+ */
+ mrs x1, CORTEX_A55_CLIDR_EL1
+ and x1, x1, CORTEX_A55_CLIDR_EL1_CTYPE3
+ cmp x1, #0
+ mov x2, #ERRATA_NOT_APPLIES
+ csel x0, x0, x2, eq
+ ret x16
+check_erratum_custom_end cortex_a55, ERRATUM(778703)
+
+workaround_reset_start cortex_a55, ERRATUM(798797), ERRATA_A55_798797
+ sysreg_bit_set CORTEX_A55_CPUACTLR_EL1, CORTEX_A55_CPUACTLR_EL1_DISABLE_L1_PAGEWALKS
+workaround_reset_end cortex_a55, ERRATUM(798797)
+
+check_erratum_ls cortex_a55, ERRATUM(798797), CPU_REV(0, 0)
+
+workaround_reset_start cortex_a55, ERRATUM(846532), ERRATA_A55_846532
+ sysreg_bit_set CORTEX_A55_CPUACTLR_EL1, CORTEX_A55_CPUACTLR_EL1_DISABLE_DUAL_ISSUE
+workaround_reset_end cortex_a55, ERRATUM(846532)
+
+check_erratum_ls cortex_a55, ERRATUM(846532), CPU_REV(0, 1)
+
+workaround_reset_start cortex_a55, ERRATUM(903758), ERRATA_A55_903758
+ sysreg_bit_set CORTEX_A55_CPUACTLR_EL1, CORTEX_A55_CPUACTLR_EL1_DISABLE_L1_PAGEWALKS
+workaround_reset_end cortex_a55, ERRATUM(903758)
+
+check_erratum_ls cortex_a55, ERRATUM(903758), CPU_REV(0, 1)
+
+workaround_reset_start cortex_a55, ERRATUM(1221012), ERRATA_A55_1221012
+ mov x0, #0x0020
+ movk x0, #0x0850, lsl #16
+ msr CPUPOR_EL3, x0
+ mov x0, #0x0000
+ movk x0, #0x1FF0, lsl #16
+ movk x0, #0x2, lsl #32
+ msr CPUPMR_EL3, x0
+ mov x0, #0x03fd
+ movk x0, #0x0110, lsl #16
+ msr CPUPCR_EL3, x0
+ mov x0, #0x1
+ msr CPUPSELR_EL3, x0
+ mov x0, #0x0040
+ movk x0, #0x08D0, lsl #16
+ msr CPUPOR_EL3, x0
+ mov x0, #0x0040
+ movk x0, #0x1FF0, lsl #16
+ movk x0, #0x2, lsl #32
+ msr CPUPMR_EL3, x0
+ mov x0, #0x03fd
+ movk x0, #0x0110, lsl #16
+ msr CPUPCR_EL3, x0
+workaround_reset_end cortex_a55, ERRATUM(1221012)
+
+check_erratum_ls cortex_a55, ERRATUM(1221012), CPU_REV(1, 0)
+
+check_erratum_chosen cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923
+
+/* erratum has no workaround in the cpu. Generic code must take care */
+add_erratum_entry cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923, NO_APPLY_AT_RESET
+
+cpu_reset_func_start cortex_a55
+cpu_reset_func_end cortex_a55
+
+errata_report_shim cortex_a55
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a55_core_pwr_dwn
+ sysreg_bit_set CORTEX_A55_CPUPWRCTLR_EL1, CORTEX_A55_CORE_PWRDN_EN_MASK
+ isb
+ ret
+endfunc cortex_a55_core_pwr_dwn
+
+ /* ---------------------------------------------
+ * This function provides cortex_a55 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a55_regs, "aS"
+cortex_a55_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a55_cpu_reg_dump
+ adr x6, cortex_a55_regs
+ mrs x8, CORTEX_A55_CPUECTLR_EL1
+ ret
+endfunc cortex_a55_cpu_reg_dump
+
+declare_cpu_ops cortex_a55, CORTEX_A55_MIDR, \
+ cortex_a55_reset_func, \
+ cortex_a55_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
new file mode 100644
index 0000000..8fafaca
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -0,0 +1,316 @@
+/*
+ * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cortex_a57.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_dcache
+ sysreg_bit_clear sctlr_el3, SCTLR_C_BIT
+ isb
+ ret
+endfunc cortex_a57_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable all types of L2 prefetches.
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_l2_prefetch
+ mrs x0, CORTEX_A57_ECTLR_EL1
+ orr x0, x0, #CORTEX_A57_ECTLR_DIS_TWD_ACC_PFTCH_BIT
+ mov x1, #CORTEX_A57_ECTLR_L2_IPFTCH_DIST_MASK
+ orr x1, x1, #CORTEX_A57_ECTLR_L2_DPFTCH_DIST_MASK
+ bic x0, x0, x1
+ msr CORTEX_A57_ECTLR_EL1, x0
+ isb
+ dsb ish
+ ret
+endfunc cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_smp
+ sysreg_bit_clear CORTEX_A57_ECTLR_EL1, CORTEX_A57_ECTLR_SMP_BIT
+ ret
+endfunc cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_ext_debug
+ mov x0, #1
+ msr osdlr_el1, x0
+ isb
+
+ apply_erratum cortex_a57, ERRATUM(817169), ERRATA_A57_817169
+
+ dsb sy
+ ret
+endfunc cortex_a57_disable_ext_debug
+
+/*
+ * Disable the over-read from the LDNP/STNP instruction. The SDEN doesn't
+ * provide and erratum number, so assign it an obvious 1
+ */
+workaround_reset_start cortex_a57, ERRATUM(1), A57_DISABLE_NON_TEMPORAL_HINT
+ sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_DIS_OVERREAD
+workaround_reset_end cortex_a57, ERRATUM(1)
+
+check_erratum_ls cortex_a57, ERRATUM(1), CPU_REV(1, 2)
+
+workaround_reset_start cortex_a57, ERRATUM(806969), ERRATA_A57_806969
+ sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA
+workaround_reset_end cortex_a57, ERRATUM(806969)
+
+check_erratum_ls cortex_a57, ERRATUM(806969), CPU_REV(0, 0)
+
+/* erratum always worked around, but report it correctly */
+check_erratum_ls cortex_a57, ERRATUM(813419), CPU_REV(0, 0)
+add_erratum_entry cortex_a57, ERRATUM(813419), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
+
+workaround_reset_start cortex_a57, ERRATUM(813420), ERRATA_A57_813420
+ sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_DCC_AS_DCCI
+workaround_reset_end cortex_a57, ERRATUM(813420)
+
+check_erratum_ls cortex_a57, ERRATUM(813420), CPU_REV(0, 0)
+
+workaround_reset_start cortex_a57, ERRATUM(814670), ERRATA_A57_814670
+ sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_DIS_DMB_NULLIFICATION
+workaround_reset_end cortex_a57, ERRATUM(814670)
+
+check_erratum_ls cortex_a57, ERRATUM(814670), CPU_REV(0, 0)
+
+workaround_runtime_start cortex_a57, ERRATUM(817169), ERRATA_A57_817169, CORTEX_A57_MIDR
+ /* Invalidate any TLB address */
+ mov x0, #0
+ tlbi vae3, x0
+workaround_runtime_end cortex_a57, ERRATUM(817169), NO_ISB
+
+check_erratum_ls cortex_a57, ERRATUM(817169), CPU_REV(0, 1)
+
+workaround_reset_start cortex_a57, ERRATUM(826974), ERRATA_A57_826974
+ sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_DMB
+workaround_reset_end cortex_a57, ERRATUM(826974)
+
+check_erratum_ls cortex_a57, ERRATUM(826974), CPU_REV(1, 1)
+
+workaround_reset_start cortex_a57, ERRATUM(826977), ERRATA_A57_826977
+ sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_GRE_NGRE_AS_NGNRE
+workaround_reset_end cortex_a57, ERRATUM(826977)
+
+check_erratum_ls cortex_a57, ERRATUM(826977), CPU_REV(1, 1)
+
+workaround_reset_start cortex_a57, ERRATUM(828024), ERRATA_A57_828024
+ mrs x1, CORTEX_A57_CPUACTLR_EL1
+ /*
+ * Setting the relevant bits in CPUACTLR_EL1 has to be done in 2
+ * instructions here because the resulting bitmask doesn't fit in a
+ * 16-bit value so it cannot be encoded in a single instruction.
+ */
+ orr x1, x1, #CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA
+ orr x1, x1, #(CORTEX_A57_CPUACTLR_EL1_DIS_L1_STREAMING | \
+ CORTEX_A57_CPUACTLR_EL1_DIS_STREAMING)
+ msr CORTEX_A57_CPUACTLR_EL1, x1
+workaround_reset_end cortex_a57, ERRATUM(828024)
+
+check_erratum_ls cortex_a57, ERRATUM(828024), CPU_REV(1, 1)
+
+workaround_reset_start cortex_a57, ERRATUM(829520), ERRATA_A57_829520
+ sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_DIS_INDIRECT_PREDICTOR
+workaround_reset_end cortex_a57, ERRATUM(829520)
+
+check_erratum_ls cortex_a57, ERRATUM(829520), CPU_REV(1, 2)
+
+workaround_reset_start cortex_a57, ERRATUM(833471), ERRATA_A57_833471
+ sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_FORCE_FPSCR_FLUSH
+workaround_reset_end cortex_a57, ERRATUM(833471)
+
+check_erratum_ls cortex_a57, ERRATUM(833471), CPU_REV(1, 2)
+
+workaround_reset_start cortex_a57, ERRATUM(859972), ERRATA_A57_859972
+ sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_DIS_INSTR_PREFETCH
+workaround_reset_end cortex_a57, ERRATUM(859972)
+
+check_erratum_ls cortex_a57, ERRATUM(859972), CPU_REV(1, 3)
+
+check_erratum_chosen cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537
+/* erratum has no workaround in the cpu. Generic code must take care */
+add_erratum_entry cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537, NO_APPLY_AT_RESET
+
+workaround_reset_start cortex_a57, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+#if IMAGE_BL31
+ override_vector_table wa_cve_2017_5715_mmu_vbar
+#endif
+workaround_reset_end cortex_a57, CVE(2017, 5715)
+
+check_erratum_chosen cortex_a57, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+
+workaround_reset_start cortex_a57, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+ sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_STORE
+ isb
+ dsb sy
+workaround_reset_end cortex_a57, CVE(2018, 3639)
+
+check_erratum_chosen cortex_a57, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+
+workaround_reset_start cortex_a57, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ override_vector_table wa_cve_2017_5715_mmu_vbar
+#endif
+workaround_reset_end cortex_a57, CVE(2022, 23960)
+
+check_erratum_chosen cortex_a57, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+cpu_reset_func_start cortex_a57
+#if A57_ENABLE_NONCACHEABLE_LOAD_FWD
+ /* Enable higher performance non-cacheable load forwarding */
+ sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_EN_NC_LOAD_FWD
+#endif
+ /* Enable the SMP bit. */
+ sysreg_bit_set CORTEX_A57_ECTLR_EL1, CORTEX_A57_ECTLR_SMP_BIT
+cpu_reset_func_end cortex_a57
+
+func check_smccc_arch_workaround_3
+ mov x0, #ERRATA_APPLIES
+ ret
+endfunc check_smccc_arch_workaround_3
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A57.
+ * ----------------------------------------------------
+ */
+func cortex_a57_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a57_disable_ext_debug
+endfunc cortex_a57_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A57.
+ * -------------------------------------------------------
+ */
+func cortex_a57_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_l2_prefetch
+
+#if !SKIP_A57_L1_FLUSH_PWR_DWN
+ /* -------------------------------------------------
+ * Flush the L1 caches.
+ * -------------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+#endif
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* -------------------------------------------------
+ * Flush the L2 caches.
+ * -------------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a57_disable_ext_debug
+endfunc cortex_a57_cluster_pwr_dwn
+
+errata_report_shim cortex_a57
+
+ /* ---------------------------------------------
+ * This function provides cortex_a57 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a57_regs, "aS"
+cortex_a57_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", "cpumerrsr_el1", "l2merrsr_el1", ""
+
+func cortex_a57_cpu_reg_dump
+ adr x6, cortex_a57_regs
+ mrs x8, CORTEX_A57_ECTLR_EL1
+ mrs x9, CORTEX_A57_MERRSR_EL1
+ mrs x10, CORTEX_A57_L2MERRSR_EL1
+ ret
+endfunc cortex_a57_cpu_reg_dump
+
+declare_cpu_ops_wa cortex_a57, CORTEX_A57_MIDR, \
+ cortex_a57_reset_func, \
+ check_erratum_cortex_a57_5715, \
+ CPU_NO_EXTRA2_FUNC, \
+ check_smccc_arch_workaround_3, \
+ cortex_a57_core_pwr_dwn, \
+ cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a65.S b/lib/cpus/aarch64/cortex_a65.S
new file mode 100644
index 0000000..666324c
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a65.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cortex_a65.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if !HW_ASSISTED_COHERENCY
+#error "Cortex-A65 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS
+#error "Cortex-A65 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+/* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A65.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func cortex_a65_reset_func
+ mov x19, x30
+
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+
+ ret x19
+endfunc cortex_a65_reset_func
+
+func cortex_a65_cpu_pwr_dwn
+ mrs x0, CORTEX_A65_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A65_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_A65_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a65_cpu_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-A65. Must follow AAPCS.
+ */
+func cortex_a65_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_DSU_936184, cortex_a65, dsu_936184
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a65_errata_report
+#endif
+
+.section .rodata.cortex_a65_regs, "aS"
+cortex_a65_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a65_cpu_reg_dump
+ adr x6, cortex_a65_regs
+ mrs x8, CORTEX_A65_ECTLR_EL1
+ ret
+endfunc cortex_a65_cpu_reg_dump
+
+declare_cpu_ops cortex_a65, CORTEX_A65_MIDR, \
+ cortex_a65_reset_func, \
+ cortex_a65_cpu_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a65ae.S b/lib/cpus/aarch64/cortex_a65ae.S
new file mode 100644
index 0000000..85d1894
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a65ae.S
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cortex_a65ae.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if !HW_ASSISTED_COHERENCY
+#error "Cortex-A65AE must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS
+#error "Cortex-A65AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ /*
+ * ERRATA_DSU_936184 :
+ * The errata is defined in dsu_helpers.S but applies to cortex_a65ae
+ * as well. Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_cortex_a65ae_936184, check_errata_dsu_936184
+.equ erratum_cortex_a65ae_936184_wa, errata_dsu_936184_wa
+add_erratum_entry cortex_a65ae, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+
+cpu_reset_func_start cortex_a65ae
+cpu_reset_func_end cortex_a65ae
+
+func cortex_a65ae_cpu_pwr_dwn
+ sysreg_bit_set CORTEX_A65AE_CPUPWRCTLR_EL1, CORTEX_A65AE_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ isb
+ ret
+endfunc cortex_a65ae_cpu_pwr_dwn
+
+errata_report_shim cortex_a65ae
+
+.section .rodata.cortex_a65ae_regs, "aS"
+cortex_a65ae_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a65ae_cpu_reg_dump
+ adr x6, cortex_a65ae_regs
+ mrs x8, CORTEX_A65AE_ECTLR_EL1
+ ret
+endfunc cortex_a65ae_cpu_reg_dump
+
+declare_cpu_ops cortex_a65ae, CORTEX_A65AE_MIDR, \
+ cortex_a65ae_reset_func, \
+ cortex_a65ae_cpu_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
new file mode 100644
index 0000000..f3931d7
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a710.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex A710 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex A710 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A710_BHB_LOOP_COUNT, cortex_a710
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start cortex_a710, ERRATUM(1987031), ERRATA_A710_1987031
+ ldr x0,=0x6
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xF3A08002
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFF0F7FE
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x40000001003ff
+ msr S3_6_c15_c8_1,x0
+ ldr x0,=0x7
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xBF200000
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFEF0000
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x40000001003f3
+ msr S3_6_c15_c8_1,x0
+workaround_reset_end cortex_a710, ERRATUM(1987031)
+
+check_erratum_ls cortex_a710, ERRATUM(1987031), CPU_REV(2, 0)
+
+workaround_runtime_start cortex_a710, ERRATUM(2008768), ERRATA_A710_2008768
+ /* Stash ERRSELR_EL1 in x2 */
+ mrs x2, ERRSELR_EL1
+
+ /* Select error record 0 and clear ED bit */
+ msr ERRSELR_EL1, xzr
+ mrs x1, ERXCTLR_EL1
+ bfi x1, xzr, #ERXCTLR_ED_SHIFT, #1
+ msr ERXCTLR_EL1, x1
+
+ /* Select error record 1 and clear ED bit */
+ mov x0, #1
+ msr ERRSELR_EL1, x0
+ mrs x1, ERXCTLR_EL1
+ bfi x1, xzr, #ERXCTLR_ED_SHIFT, #1
+ msr ERXCTLR_EL1, x1
+
+ /* Restore ERRSELR_EL1 from x2 */
+ msr ERRSELR_EL1, x2
+workaround_runtime_end cortex_a710, ERRATUM(2008768), NO_ISB
+
+check_erratum_ls cortex_a710, ERRATUM(2008768), CPU_REV(2, 0)
+
+workaround_reset_start cortex_a710, ERRATUM(2017096), ERRATA_A710_2017096
+ sysreg_bit_set CORTEX_A710_CPUECTLR_EL1, CORTEX_A710_CPUECTLR_EL1_PFSTIDIS_BIT
+workaround_reset_end cortex_a710, ERRATUM(2017096)
+
+check_erratum_ls cortex_a710, ERRATUM(2017096), CPU_REV(2, 0)
+
+workaround_reset_start cortex_a710, ERRATUM(2055002), ERRATA_A710_2055002
+ sysreg_bit_set CORTEX_A710_CPUACTLR_EL1, CORTEX_A710_CPUACTLR_EL1_BIT_46
+workaround_reset_end cortex_a710, ERRATUM(2055002)
+
+check_erratum_range cortex_a710, ERRATUM(2055002), CPU_REV(1, 0), CPU_REV(2, 0)
+
+workaround_reset_start cortex_a710, ERRATUM(2058056), ERRATA_A710_2058056
+ sysreg_bitfield_insert CORTEX_A710_CPUECTLR2_EL1, CORTEX_A710_CPUECTLR2_EL1_PF_MODE_CNSRV, \
+ CPUECTLR2_EL1_PF_MODE_LSB, CPUECTLR2_EL1_PF_MODE_WIDTH
+workaround_reset_end cortex_a710, ERRATUM(2058056)
+
+check_erratum_ls cortex_a710, ERRATUM(2058056), CPU_REV(2, 1)
+
+workaround_reset_start cortex_a710, ERRATUM(2081180), ERRATA_A710_2081180
+ ldr x0,=0x3
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xF3A08002
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFF0F7FE
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x10002001003FF
+ msr S3_6_c15_c8_1,x0
+ ldr x0,=0x4
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xBF200000
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFEF0000
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x10002001003F3
+ msr S3_6_c15_c8_1,x0
+workaround_reset_end cortex_a710, ERRATUM(2081180)
+
+check_erratum_ls cortex_a710, ERRATUM(2081180), CPU_REV(2, 0)
+
+workaround_reset_start cortex_a710, ERRATUM(2083908), ERRATA_A710_2083908
+ sysreg_bit_set CORTEX_A710_CPUACTLR5_EL1, CORTEX_A710_CPUACTLR5_EL1_BIT_13
+workaround_reset_end cortex_a710, ERRATUM(2083908)
+
+check_erratum_range cortex_a710, ERRATUM(2083908), CPU_REV(2, 0), CPU_REV(2, 0)
+
+workaround_reset_start cortex_a710, ERRATUM(2136059), ERRATA_A710_2136059
+ sysreg_bit_set CORTEX_A710_CPUACTLR5_EL1, CORTEX_A710_CPUACTLR5_EL1_BIT_44
+workaround_reset_end cortex_a710, ERRATUM(2136059)
+
+check_erratum_ls cortex_a710, ERRATUM(2136059), CPU_REV(2, 0)
+
+workaround_reset_start cortex_a710, ERRATUM(2147715), ERRATA_A710_2147715
+ sysreg_bit_set CORTEX_A710_CPUACTLR_EL1, CORTEX_A710_CPUACTLR_EL1_BIT_22
+workaround_reset_end cortex_a710, ERRATUM(2147715)
+
+check_erratum_range cortex_a710, ERRATUM(2147715), CPU_REV(2, 0), CPU_REV(2, 0)
+
+workaround_reset_start cortex_a710, ERRATUM(2216384), ERRATA_A710_2216384
+ sysreg_bit_set CORTEX_A710_CPUACTLR5_EL1, CORTEX_A710_CPUACTLR5_EL1_BIT_17
+
+ ldr x0,=0x5
+ msr CORTEX_A710_CPUPSELR_EL3, x0
+ ldr x0,=0x10F600E000
+ msr CORTEX_A710_CPUPOR_EL3, x0
+ ldr x0,=0x10FF80E000
+ msr CORTEX_A710_CPUPMR_EL3, x0
+ ldr x0,=0x80000000003FF
+ msr CORTEX_A710_CPUPCR_EL3, x0
+workaround_reset_end cortex_a710, ERRATUM(2216384)
+
+check_erratum_ls cortex_a710, ERRATUM(2216384), CPU_REV(2, 0)
+
+workaround_reset_start cortex_a710, ERRATUM(2267065), ERRATA_A710_2267065
+ sysreg_bit_set CORTEX_A710_CPUACTLR_EL1, CORTEX_A710_CPUACTLR_EL1_BIT_22
+workaround_reset_end cortex_a710, ERRATUM(2267065)
+
+check_erratum_ls cortex_a710, ERRATUM(2267065), CPU_REV(2, 0)
+
+workaround_reset_start cortex_a710, ERRATUM(2282622), ERRATA_A710_2282622
+ sysreg_bit_set CORTEX_A710_CPUACTLR2_EL1, BIT(0)
+workaround_reset_end cortex_a710, ERRATUM(2282622)
+
+check_erratum_ls cortex_a710, ERRATUM(2282622), CPU_REV(2, 1)
+
+workaround_runtime_start cortex_a710, ERRATUM(2291219), ERRATA_A710_2291219
+ /* Set bit 36 in ACTLR2_EL1 */
+ sysreg_bit_set CORTEX_A710_CPUACTLR2_EL1, CORTEX_A710_CPUACTLR2_EL1_BIT_36
+workaround_runtime_end cortex_a710, ERRATUM(2291219), NO_ISB
+
+check_erratum_ls cortex_a710, ERRATUM(2291219), CPU_REV(2, 0)
+
+/*
+ * ERRATA_DSU_2313941 is defined in dsu_helpers.S but applies to Cortex-A710 as
+ * well. Create a symbollic link to existing errata workaround to get them
+ * registered under the Errata Framework.
+ */
+.equ check_erratum_cortex_a710_2313941, check_errata_dsu_2313941
+.equ erratum_cortex_a710_2313941_wa, errata_dsu_2313941_wa
+add_erratum_entry cortex_a710, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET
+
+workaround_reset_start cortex_a710, ERRATUM(2371105), ERRATA_A710_2371105
+ /* Set bit 40 in CPUACTLR2_EL1 */
+ sysreg_bit_set CORTEX_A710_CPUACTLR2_EL1, CORTEX_A710_CPUACTLR2_EL1_BIT_40
+workaround_reset_end cortex_a710, ERRATUM(2371105)
+
+check_erratum_ls cortex_a710, ERRATUM(2371105), CPU_REV(2, 0)
+
+workaround_reset_start cortex_a710, ERRATUM(2742423), ERRATA_A710_2742423
+ /* Set CPUACTLR5_EL1[56:55] to 2'b01 */
+ sysreg_bit_set CORTEX_A710_CPUACTLR5_EL1, BIT(55)
+ sysreg_bit_clear CORTEX_A710_CPUACTLR5_EL1, BIT(56)
+workaround_reset_end cortex_a710, ERRATUM(2742423)
+
+check_erratum_ls cortex_a710, ERRATUM(2742423), CPU_REV(2, 1)
+
+workaround_runtime_start cortex_a710, ERRATUM(2768515), ERRATA_A710_2768515
+ /* dsb before isb of power down sequence */
+ dsb sy
+workaround_runtime_end cortex_a710, ERRATUM(2768515), NO_ISB
+
+check_erratum_ls cortex_a710, ERRATUM(2768515), CPU_REV(2, 1)
+
+workaround_reset_start cortex_a710, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Cortex-A710 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_cortex_a710
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a710, CVE(2022, 23960)
+
+check_erratum_chosen cortex_a710, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_a710_core_pwr_dwn
+ apply_erratum cortex_a710, ERRATUM(2008768), ERRATA_A710_2008768
+ apply_erratum cortex_a710, ERRATUM(2291219), ERRATA_A710_2291219, NO_GET_CPU_REV
+
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A710_CPUPWRCTLR_EL1, CORTEX_A710_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ apply_erratum cortex_a710, ERRATUM(2768515), ERRATA_A710_2768515, NO_GET_CPU_REV
+ isb
+ ret
+endfunc cortex_a710_core_pwr_dwn
+
+errata_report_shim cortex_a710
+
+cpu_reset_func_start cortex_a710
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end cortex_a710
+
+ /* ---------------------------------------------
+ * This function provides Cortex-A710 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a710_regs, "aS"
+cortex_a710_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a710_cpu_reg_dump
+ adr x6, cortex_a710_regs
+ mrs x8, CORTEX_A710_CPUECTLR_EL1
+ ret
+endfunc cortex_a710_cpu_reg_dump
+
+declare_cpu_ops cortex_a710, CORTEX_A710_MIDR, \
+ cortex_a710_reset_func, \
+ cortex_a710_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a715.S b/lib/cpus/aarch64/cortex_a715.S
new file mode 100644
index 0000000..dd4c307
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a715.S
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a715.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A715 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-A715 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A715_BHB_LOOP_COUNT, cortex_a715
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start cortex_a715, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Cortex-A715 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_cortex_a715
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a715, CVE(2022, 23960)
+
+check_erratum_chosen cortex_a715, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+cpu_reset_func_start cortex_a715
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end cortex_a715
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_a715_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ mrs x0, CORTEX_A715_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A715_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_A715_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a715_core_pwr_dwn
+
+errata_report_shim cortex_a715
+
+ /* ---------------------------------------------
+ * This function provides Cortex-A715 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a715_regs, "aS"
+cortex_a715_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a715_cpu_reg_dump
+ adr x6, cortex_a715_regs
+ mrs x8, CORTEX_A715_CPUECTLR_EL1
+ ret
+endfunc cortex_a715_cpu_reg_dump
+
+declare_cpu_ops cortex_a715, CORTEX_A715_MIDR, \
+ cortex_a715_reset_func, \
+ cortex_a715_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
new file mode 100644
index 0000000..997f261
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a72.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A72_BHB_LOOP_COUNT, cortex_a72
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_dcache
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+ ret
+endfunc cortex_a72_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable all types of L2 prefetches.
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_l2_prefetch
+ mrs x0, CORTEX_A72_ECTLR_EL1
+ orr x0, x0, #CORTEX_A72_ECTLR_DIS_TWD_ACC_PFTCH_BIT
+ mov x1, #CORTEX_A72_ECTLR_L2_IPFTCH_DIST_MASK
+ orr x1, x1, #CORTEX_A72_ECTLR_L2_DPFTCH_DIST_MASK
+ bic x0, x0, x1
+ msr CORTEX_A72_ECTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_hw_prefetcher
+ sysreg_bit_set CORTEX_A72_CPUACTLR_EL1, CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH
+ isb
+ dsb ish
+ ret
+endfunc cortex_a72_disable_hw_prefetcher
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_smp
+ sysreg_bit_clear CORTEX_A72_ECTLR_EL1, CORTEX_A72_ECTLR_SMP_BIT
+ ret
+endfunc cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_ext_debug
+ mov x0, #1
+ msr osdlr_el1, x0
+ isb
+ dsb sy
+ ret
+endfunc cortex_a72_disable_ext_debug
+
+func check_smccc_arch_workaround_3
+ cpu_check_csv2 x0, 1f
+ mov x0, #ERRATA_APPLIES
+ ret
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+endfunc check_smccc_arch_workaround_3
+
+workaround_reset_start cortex_a72, ERRATUM(859971), ERRATA_A72_859971
+ sysreg_bit_set CORTEX_A72_CPUACTLR_EL1, CORTEX_A72_CPUACTLR_EL1_DIS_INSTR_PREFETCH
+workaround_reset_end cortex_a72, ERRATUM(859971)
+
+check_erratum_ls cortex_a72, ERRATUM(859971), CPU_REV(0, 3)
+
+/* Due to the nature of the errata it is applied unconditionally when chosen */
+check_erratum_chosen cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367
+/* erratum workaround is interleaved with generic code */
+add_erratum_entry cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367, NO_APPLY_AT_RESET
+
+workaround_reset_start cortex_a72, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+#if IMAGE_BL31
+ override_vector_table wa_cve_2017_5715_mmu_vbar
+#endif
+workaround_reset_end cortex_a72, CVE(2017, 5715)
+
+check_erratum_custom_start cortex_a72, CVE(2017, 5715)
+ cpu_check_csv2 x0, 1f
+#if WORKAROUND_CVE_2017_5715
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+check_erratum_custom_end cortex_a72, CVE(2017, 5715)
+
+workaround_reset_start cortex_a72, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+ sysreg_bit_set CORTEX_A72_CPUACTLR_EL1, CORTEX_A72_CPUACTLR_EL1_DIS_LOAD_PASS_STORE
+ isb
+ dsb sy
+workaround_reset_end cortex_a72, CVE(2018, 3639)
+check_erratum_chosen cortex_a72, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+
+workaround_reset_start cortex_a72, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /* Skip installing vector table again if already done for CVE(2017, 5715) */
+ /*
+ * The Cortex-A72 generic vectors are overridden to apply the
+ * mitigation on exception entry from lower ELs for revisions >= r1p0
+ * which has CSV2 implemented.
+ */
+ adr x0, wa_cve_vbar_cortex_a72
+ mrs x1, vbar_el3
+ cmp x0, x1
+ b.eq 1f
+ msr vbar_el3, x0
+1:
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a72, CVE(2022, 23960)
+
+check_erratum_custom_start cortex_a72, CVE(2022, 23960)
+#if WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960
+ cpu_check_csv2 x0, 1f
+ mov x0, #ERRATA_APPLIES
+ ret
+1:
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif /* WORKAROUND_CVE_2022_23960 */
+ ret
+#endif /* WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960 */
+ mov x0, #ERRATA_MISSING
+ ret
+check_erratum_custom_end cortex_a72, CVE(2022, 23960)
+
+cpu_reset_func_start cortex_a72
+
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A72_ECTLR_EL1, CORTEX_A72_ECTLR_SMP_BIT
+
+cpu_reset_func_end cortex_a72
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A72.
+ * ----------------------------------------------------
+ */
+func cortex_a72_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_hw_prefetcher
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a72_disable_ext_debug
+endfunc cortex_a72_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A72.
+ * -------------------------------------------------------
+ */
+func cortex_a72_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_hw_prefetcher
+
+#if !SKIP_A72_L1_FLUSH_PWR_DWN
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+#endif
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* -------------------------------------------------
+ * Flush the L2 caches.
+ * -------------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a72_disable_ext_debug
+endfunc cortex_a72_cluster_pwr_dwn
+
+errata_report_shim cortex_a72
+
+ /* ---------------------------------------------
+ * This function provides cortex_a72 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a72_regs, "aS"
+cortex_a72_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", "cpumerrsr_el1", "l2merrsr_el1", ""
+
+func cortex_a72_cpu_reg_dump
+ adr x6, cortex_a72_regs
+ mrs x8, CORTEX_A72_ECTLR_EL1
+ mrs x9, CORTEX_A72_MERRSR_EL1
+ mrs x10, CORTEX_A72_L2MERRSR_EL1
+ ret
+endfunc cortex_a72_cpu_reg_dump
+
+declare_cpu_ops_wa cortex_a72, CORTEX_A72_MIDR, \
+ cortex_a72_reset_func, \
+ check_erratum_cortex_a72_5715, \
+ CPU_NO_EXTRA2_FUNC, \
+ check_smccc_arch_workaround_3, \
+ cortex_a72_core_pwr_dwn, \
+ cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a720.S b/lib/cpus/aarch64/cortex_a720.S
new file mode 100644
index 0000000..4b28fdb
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a720.S
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a720.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex A720 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex A720 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A720_BHB_LOOP_COUNT, cortex_a720
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start cortex_a720, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Cortex A720 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_cortex_a720
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a720, CVE(2022, 23960)
+
+check_erratum_chosen cortex_a720, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+cpu_reset_func_start cortex_a720
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end cortex_a720
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_a720_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A720_CPUPWRCTLR_EL1, CORTEX_A720_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+
+ isb
+ ret
+endfunc cortex_a720_core_pwr_dwn
+
+errata_report_shim cortex_a720
+
+ /* ---------------------------------------------
+ * This function provides Cortex A720-specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a720_regs, "aS"
+cortex_a720_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a720_cpu_reg_dump
+ adr x6, cortex_a720_regs
+ mrs x8, CORTEX_A720_CPUECTLR_EL1
+ ret
+endfunc cortex_a720_cpu_reg_dump
+
+declare_cpu_ops cortex_a720, CORTEX_A720_MIDR, \
+ cortex_a720_reset_func, \
+ cortex_a720_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
new file mode 100644
index 0000000..3a6b922
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a73.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+ /* ---------------------------------------------
+ * Disable L1 data cache
+ * ---------------------------------------------
+ */
+func cortex_a73_disable_dcache
+ sysreg_bit_clear sctlr_el3, SCTLR_C_BIT
+ isb
+ ret
+endfunc cortex_a73_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a73_disable_smp
+ sysreg_bit_clear CORTEX_A73_CPUECTLR_EL1, CORTEX_A73_CPUECTLR_SMP_BIT
+ isb
+ dsb sy
+ ret
+endfunc cortex_a73_disable_smp
+
+func check_smccc_arch_workaround_3
+ mov x0, #ERRATA_APPLIES
+ ret
+endfunc check_smccc_arch_workaround_3
+
+workaround_reset_start cortex_a73, ERRATUM(852427), ERRATA_A73_852427
+ sysreg_bit_set CORTEX_A73_DIAGNOSTIC_REGISTER, BIT(12)
+workaround_reset_end cortex_a73, ERRATUM(852427)
+
+check_erratum_ls cortex_a73, ERRATUM(852427), CPU_REV(0, 0)
+
+workaround_reset_start cortex_a73, ERRATUM(855423), ERRATA_A73_855423
+ sysreg_bit_set CORTEX_A73_IMP_DEF_REG2, BIT(7)
+workaround_reset_end cortex_a73, ERRATUM(855423)
+
+check_erratum_ls cortex_a73, ERRATUM(855423), CPU_REV(0, 1)
+
+workaround_reset_start cortex_a73, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+#if IMAGE_BL31
+ override_vector_table wa_cve_2017_5715_bpiall_vbar
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a73, CVE(2017, 5715)
+
+check_erratum_custom_start cortex_a73, CVE(2017, 5715)
+ cpu_check_csv2 x0, 1f
+#if WORKAROUND_CVE_2017_5715
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+check_erratum_custom_end cortex_a73, CVE(2017, 5715)
+
+workaround_reset_start cortex_a73, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+ sysreg_bit_set CORTEX_A73_IMP_DEF_REG1, CORTEX_A73_IMP_DEF_REG1_DISABLE_LOAD_PASS_STORE
+workaround_reset_end cortex_a73, CVE(2018, 3639)
+
+check_erratum_chosen cortex_a73, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+
+workaround_reset_start cortex_a73, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /* Skip installing vector table again for CVE_2022_23960 */
+ adr x0, wa_cve_2017_5715_bpiall_vbar
+ mrs x1, vbar_el3
+
+ cmp x0, x1
+ b.eq 1f
+ msr vbar_el3, x0
+1:
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a73, CVE(2022, 23960)
+
+check_erratum_custom_start cortex_a73, CVE(2022, 23960)
+#if WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960
+ cpu_check_csv2 x0, 1f
+ mov x0, #ERRATA_APPLIES
+ ret
+ 1:
+#if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif /* WORKAROUND_CVE_2022_23960 */
+ ret
+#endif /* WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960 */
+ mov x0, #ERRATA_MISSING
+ ret
+check_erratum_custom_end cortex_a73, CVE(2022, 23960)
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A73.
+ * -------------------------------------------------
+ */
+
+cpu_reset_func_start cortex_a73
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * Clobbers : x0
+ * ---------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A73_CPUECTLR_EL1, CORTEX_A73_CPUECTLR_SMP_BIT
+cpu_reset_func_end cortex_a73
+
+func cortex_a73_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a73_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a73_disable_smp
+endfunc cortex_a73_core_pwr_dwn
+
+func cortex_a73_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a73_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a73_disable_smp
+endfunc cortex_a73_cluster_pwr_dwn
+
+
+errata_report_shim cortex_a73
+
+ /* ---------------------------------------------
+ * This function provides cortex_a73 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a73_regs, "aS"
+cortex_a73_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", "l2merrsr_el1", ""
+
+func cortex_a73_cpu_reg_dump
+ adr x6, cortex_a73_regs
+ mrs x8, CORTEX_A73_CPUECTLR_EL1
+ mrs x9, CORTEX_A73_L2MERRSR_EL1
+ ret
+endfunc cortex_a73_cpu_reg_dump
+
+declare_cpu_ops_wa cortex_a73, CORTEX_A73_MIDR, \
+ cortex_a73_reset_func, \
+ check_erratum_cortex_a73_5715, \
+ CPU_NO_EXTRA2_FUNC, \
+ check_smccc_arch_workaround_3, \
+ cortex_a73_core_pwr_dwn, \
+ cortex_a73_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
new file mode 100644
index 0000000..c90be67
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cortex_a75.h>
+#include <cpuamu.h>
+#include <cpu_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A75 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+workaround_reset_start cortex_a75, ERRATUM(764081), ERRATA_A75_764081
+ sysreg_bit_set sctlr_el3, SCTLR_IESB_BIT
+workaround_reset_end cortex_a75, ERRATUM(764081)
+
+check_erratum_ls cortex_a75, ERRATUM(764081), CPU_REV(0, 0)
+
+workaround_reset_start cortex_a75, ERRATUM(790748), ERRATA_A75_790748
+ sysreg_bit_set CORTEX_A75_CPUACTLR_EL1, (1 << 13)
+workaround_reset_end cortex_a75, ERRATUM(790748)
+
+check_erratum_ls cortex_a75, ERRATUM(790748), CPU_REV(0, 0)
+
+/* ERRATA_DSU_798953 :
+ * The errata is defined in dsu_helpers.S but applies to cortex_a75
+ * as well. Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_cortex_a75_798953, check_errata_dsu_798953
+.equ erratum_cortex_a75_798953_wa, errata_dsu_798953_wa
+add_erratum_entry cortex_a75, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET
+
+/* ERRATA_DSU_936184 :
+ * The errata is defined in dsu_helpers.S but applies to cortex_a75
+ * as well. Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_cortex_a75_936184, check_errata_dsu_936184
+.equ erratum_cortex_a75_936184_wa, errata_dsu_936184_wa
+add_erratum_entry cortex_a75, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+
+workaround_reset_start cortex_a75, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+#if IMAGE_BL31
+ override_vector_table wa_cve_2017_5715_bpiall_vbar
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a75, CVE(2017, 5715)
+
+check_erratum_custom_start cortex_a75, CVE(2017, 5715)
+ cpu_check_csv2 x0, 1f
+#if WORKAROUND_CVE_2017_5715
+ mov x0, #ERRATA_APPLIES
+#else
+ mov x0, #ERRATA_MISSING
+#endif
+ ret
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+check_erratum_custom_end cortex_a75, CVE(2017, 5715)
+
+workaround_reset_start cortex_a75, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+ sysreg_bit_set CORTEX_A75_CPUACTLR_EL1, CORTEX_A75_CPUACTLR_EL1_DISABLE_LOAD_PASS_STORE
+workaround_reset_end cortex_a75, CVE(2018, 3639)
+
+check_erratum_chosen cortex_a75, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+
+workaround_reset_start cortex_a75, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /* Skip installing vector table again if already done for CVE(2017, 5715) */
+ adr x0, wa_cve_2017_5715_bpiall_vbar
+ mrs x1, vbar_el3
+ cmp x0, x1
+ b.eq 1f
+ msr vbar_el3, x0
+1:
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a75, CVE(2022, 23960)
+
+check_erratum_custom_start cortex_a75, CVE(2022, 23960)
+#if WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960
+ cpu_check_csv2 x0, 1f
+ mov x0, #ERRATA_APPLIES
+ ret
+1:
+# if WORKAROUND_CVE_2022_23960
+ mov x0, #ERRATA_APPLIES
+# else
+ mov x0, #ERRATA_MISSING
+# endif /* WORKAROUND_CVE_2022_23960 */
+ ret
+#endif /* WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960 */
+ mov x0, #ERRATA_MISSING
+ ret
+check_erratum_custom_end cortex_a75, CVE(2022, 23960)
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A75.
+ * -------------------------------------------------
+ */
+
+cpu_reset_func_start cortex_a75
+#if ENABLE_FEAT_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ sysreg_bit_set actlr_el3, CORTEX_A75_ACTLR_AMEN_BIT
+ isb
+
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ sysreg_bit_set actlr_el2, CORTEX_A75_ACTLR_AMEN_BIT
+ isb
+
+ /* Enable group0 counters */
+ mov x0, #CORTEX_A75_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET_EL0, x0
+ isb
+
+ /* Enable group1 counters */
+ mov x0, #CORTEX_A75_AMU_GROUP1_MASK
+ msr CPUAMCNTENSET_EL0, x0
+ /* isb included in cpu_reset_func_end macro */
+#endif
+cpu_reset_func_end cortex_a75
+
+func check_smccc_arch_workaround_3
+ mov x0, #ERRATA_APPLIES
+ ret
+endfunc check_smccc_arch_workaround_3
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a75_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A75_CPUPWRCTLR_EL1, \
+ CORTEX_A75_CORE_PWRDN_EN_MASK
+ isb
+ ret
+endfunc cortex_a75_core_pwr_dwn
+
+errata_report_shim cortex_a75
+
+ /* ---------------------------------------------
+ * This function provides cortex_a75 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a75_regs, "aS"
+cortex_a75_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a75_cpu_reg_dump
+ adr x6, cortex_a75_regs
+ mrs x8, CORTEX_A75_CPUECTLR_EL1
+ ret
+endfunc cortex_a75_cpu_reg_dump
+
+declare_cpu_ops_wa cortex_a75, CORTEX_A75_MIDR, \
+ cortex_a75_reset_func, \
+ check_erratum_cortex_a75_5715, \
+ CPU_NO_EXTRA2_FUNC, \
+ check_smccc_arch_workaround_3, \
+ cortex_a75_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a75_pubsub.c b/lib/cpus/aarch64/cortex_a75_pubsub.c
new file mode 100644
index 0000000..11190c8
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a75_pubsub.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017-2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cortex_a75.h>
+#include <cpuamu.h>
+#include <lib/el3_runtime/pubsub_events.h>
+
+static void *cortex_a75_context_save(const void *arg)
+{
+ if (midr_match(CORTEX_A75_MIDR) != 0)
+ cpuamu_context_save(CORTEX_A75_AMU_NR_COUNTERS);
+
+ return (void *)0;
+}
+
+static void *cortex_a75_context_restore(const void *arg)
+{
+ if (midr_match(CORTEX_A75_MIDR) != 0)
+ cpuamu_context_restore(CORTEX_A75_AMU_NR_COUNTERS);
+
+ return (void *)0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, cortex_a75_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, cortex_a75_context_restore);
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
new file mode 100644
index 0000000..8b3d730
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -0,0 +1,540 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a76.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include <services/arm_arch_svc.h>
+#include "wa_cve_2022_23960_bhb.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A76 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+ .globl cortex_a76_reset_func
+ .globl cortex_a76_core_pwr_dwn
+ .globl cortex_a76_disable_wa_cve_2018_3639
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-A76 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#define ESR_EL3_A64_SMC0 0x5e000000
+#define ESR_EL3_A32_SMC0 0x4e000000
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ /*
+ * This macro applies the mitigation for CVE-2018-3639.
+ * It implements a fast path where `SMCCC_ARCH_WORKAROUND_2`
+ * SMC calls from a lower EL running in AArch32 or AArch64
+ * will go through the fast and return early.
+ *
+ * The macro saves x2-x3 to the context. In the fast path
+ * x0-x3 registers do not need to be restored as the calling
+ * context will have saved them. The macro also saves
+ * x29-x30 to the context in the sync_exception path.
+ */
+ .macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ .if \_is_sync_exception
+ stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+ mov_imm w2, \_esr_el3_val
+ bl apply_cve_2018_3639_sync_wa
+ ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+ .endif
+ /*
+ * Always enable v4 mitigation during EL3 execution. This is not
+ * required for the fast path above because it does not perform any
+ * memory loads.
+ */
+ mrs x2, CORTEX_A76_CPUACTLR2_EL1
+ orr x2, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ msr CORTEX_A76_CPUACTLR2_EL1, x2
+ isb
+
+ /*
+ * The caller may have passed arguments to EL3 via x2-x3.
+ * Restore these registers from the context before jumping to the
+ * main runtime vector table entry.
+ */
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ .endm
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960
+vector_base cortex_a76_wa_cve_vbar
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry cortex_a76_sync_exception_sp_el0
+ b sync_exception_sp_el0
+end_vector_entry cortex_a76_sync_exception_sp_el0
+
+vector_entry cortex_a76_irq_sp_el0
+ b irq_sp_el0
+end_vector_entry cortex_a76_irq_sp_el0
+
+vector_entry cortex_a76_fiq_sp_el0
+ b fiq_sp_el0
+end_vector_entry cortex_a76_fiq_sp_el0
+
+vector_entry cortex_a76_serror_sp_el0
+ b serror_sp_el0
+end_vector_entry cortex_a76_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry cortex_a76_sync_exception_sp_elx
+ b sync_exception_sp_elx
+end_vector_entry cortex_a76_sync_exception_sp_elx
+
+vector_entry cortex_a76_irq_sp_elx
+ b irq_sp_elx
+end_vector_entry cortex_a76_irq_sp_elx
+
+vector_entry cortex_a76_fiq_sp_elx
+ b fiq_sp_elx
+end_vector_entry cortex_a76_fiq_sp_elx
+
+vector_entry cortex_a76_serror_sp_elx
+ b serror_sp_elx
+end_vector_entry cortex_a76_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry cortex_a76_sync_exception_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b sync_exception_aarch64
+end_vector_entry cortex_a76_sync_exception_aarch64
+
+vector_entry cortex_a76_irq_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b irq_aarch64
+end_vector_entry cortex_a76_irq_aarch64
+
+vector_entry cortex_a76_fiq_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b fiq_aarch64
+end_vector_entry cortex_a76_fiq_aarch64
+
+vector_entry cortex_a76_serror_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b serror_aarch64
+end_vector_entry cortex_a76_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry cortex_a76_sync_exception_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b sync_exception_aarch32
+end_vector_entry cortex_a76_sync_exception_aarch32
+
+vector_entry cortex_a76_irq_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b irq_aarch32
+end_vector_entry cortex_a76_irq_aarch32
+
+vector_entry cortex_a76_fiq_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b fiq_aarch32
+end_vector_entry cortex_a76_fiq_aarch32
+
+vector_entry cortex_a76_serror_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
+ b serror_aarch32
+end_vector_entry cortex_a76_serror_aarch32
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ /*
+ * -----------------------------------------------------------------
+ * This function applies the mitigation for CVE-2018-3639
+ * specifically for sync exceptions. It implements a fast path
+ * where `SMCCC_ARCH_WORKAROUND_2` SMC calls from a lower EL
+ * running in AArch64 will go through the fast and return early.
+ *
+ * In the fast path x0-x3 registers do not need to be restored as the
+ * calling context will have saved them.
+ *
+ * Caller must pass value of esr_el3 to compare via x2.
+ * Save and restore these registers outside of this function from the
+ * context before jumping to the main runtime vector table entry.
+ *
+ * Shall clobber: x0-x3, x30
+ * -----------------------------------------------------------------
+ */
+func apply_cve_2018_3639_sync_wa
+ /*
+ * Ensure SMC is coming from A64/A32 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_2
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
+ * X2 populated outside this function with the SMC FID.
+ */
+ orr w3, wzr, #SMCCC_ARCH_WORKAROUND_2
+ cmp x0, x3
+ mrs x3, esr_el3
+
+ ccmp w2, w3, #0, eq
+ /*
+ * Static predictor will predict a fall-through, optimizing
+ * the `SMCCC_ARCH_WORKAROUND_2` fast path.
+ */
+ bne 1f
+
+ /*
+ * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
+ * fast path.
+ */
+ cmp x1, xzr /* enable/disable check */
+
+ /*
+ * When the calling context wants mitigation disabled,
+ * we program the mitigation disable function in the
+ * CPU context, which gets invoked on subsequent exits from
+ * EL3 via the `el3_exit` function. Otherwise NULL is
+ * programmed in the CPU context, which results in caller's
+ * inheriting the EL3 mitigation state (enabled) on subsequent
+ * `el3_exit`.
+ */
+ mov x0, xzr
+ adr x1, cortex_a76_disable_wa_cve_2018_3639
+ csel x1, x1, x0, eq
+ str x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
+
+ mrs x2, CORTEX_A76_CPUACTLR2_EL1
+ orr x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ bic x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ csel x3, x3, x1, eq
+ msr CORTEX_A76_CPUACTLR2_EL1, x3
+ ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+ /*
+ * `SMCCC_ARCH_WORKAROUND_2`fast path return to lower EL.
+ */
+ exception_return /* exception_return contains ISB */
+1:
+ ret
+endfunc apply_cve_2018_3639_sync_wa
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
+
+workaround_reset_start cortex_a76, ERRATUM(1073348), ERRATA_A76_1073348
+ sysreg_bit_set CORTEX_A76_CPUACTLR_EL1 ,CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION
+workaround_reset_end cortex_a76, ERRATUM(1073348)
+
+check_erratum_ls cortex_a76, ERRATUM(1073348), CPU_REV(1, 0)
+
+workaround_reset_start cortex_a76, ERRATUM(1130799), ERRATA_A76_1130799
+ sysreg_bit_set CORTEX_A76_CPUACTLR2_EL1, CORTEX_A76_CPUACTLR2_EL1_BIT_59
+ msr CORTEX_A76_CPUACTLR2_EL1, x1
+workaround_reset_end cortex_a76, ERRATUM(1130799)
+
+check_erratum_ls cortex_a76, ERRATUM(1130799), CPU_REV(2, 0)
+
+workaround_reset_start cortex_a76, ERRATUM(1220197), ERRATA_A76_1220197
+ sysreg_bit_set CORTEX_A76_CPUECTLR_EL1, CORTEX_A76_CPUECTLR_EL1_WS_THR_L2
+workaround_reset_end cortex_a76, ERRATUM(1220197)
+
+check_erratum_ls cortex_a76, ERRATUM(1220197), CPU_REV(2, 0)
+
+workaround_reset_start cortex_a76, ERRATUM(1257314), ERRATA_A76_1257314
+ sysreg_bit_set CORTEX_A76_CPUACTLR3_EL1, CORTEX_A76_CPUACTLR3_EL1_BIT_10
+workaround_reset_end cortex_a76, ERRATUM(1257314)
+
+check_erratum_ls cortex_a76, ERRATUM(1257314), CPU_REV(3, 0)
+
+workaround_reset_start cortex_a76, ERRATUM(1262606), ERRATA_A76_1262606
+ sysreg_bit_set CORTEX_A76_CPUACTLR_EL1, CORTEX_A76_CPUACTLR_EL1_BIT_13
+workaround_reset_end cortex_a76, ERRATUM(1262606)
+
+check_erratum_ls cortex_a76, ERRATUM(1262606), CPU_REV(3, 0)
+
+workaround_reset_start cortex_a76, ERRATUM(1262888), ERRATA_A76_1262888
+ sysreg_bit_set CORTEX_A76_CPUECTLR_EL1, CORTEX_A76_CPUECTLR_EL1_BIT_51
+workaround_reset_end cortex_a76, ERRATUM(1262888)
+
+check_erratum_ls cortex_a76, ERRATUM(1262888), CPU_REV(3, 0)
+
+workaround_reset_start cortex_a76, ERRATUM(1275112), ERRATA_A76_1275112
+ sysreg_bit_set CORTEX_A76_CPUACTLR_EL1, CORTEX_A76_CPUACTLR_EL1_BIT_13
+workaround_reset_end cortex_a76, ERRATUM(1275112)
+
+check_erratum_ls cortex_a76, ERRATUM(1275112), CPU_REV(3, 0)
+
+check_erratum_custom_start cortex_a76, ERRATUM(1286807)
+#if ERRATA_A76_1286807
+ mov x0, #ERRATA_APPLIES
+ ret
+#else
+ mov x1, #0x30
+ b cpu_rev_var_ls
+#endif
+check_erratum_custom_end cortex_a76, ERRATUM(1286807)
+
+workaround_reset_start cortex_a76, ERRATUM(1791580), ERRATA_A76_1791580
+ sysreg_bit_set CORTEX_A76_CPUACTLR2_EL1, CORTEX_A76_CPUACTLR2_EL1_BIT_2
+workaround_reset_end cortex_a76, ERRATUM(1791580)
+
+check_erratum_ls cortex_a76, ERRATUM(1791580), CPU_REV(4, 0)
+
+workaround_reset_start cortex_a76, ERRATUM(1868343), ERRATA_A76_1868343
+ sysreg_bit_set CORTEX_A76_CPUACTLR_EL1, CORTEX_A76_CPUACTLR_EL1_BIT_13
+workaround_reset_end cortex_a76, ERRATUM(1868343)
+
+check_erratum_ls cortex_a76, ERRATUM(1868343), CPU_REV(4, 0)
+
+workaround_reset_start cortex_a76, ERRATUM(1946160), ERRATA_A76_1946160
+ mov x0, #3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3900002
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #4
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #5
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+workaround_reset_end cortex_a76, ERRATUM(1946160)
+
+check_erratum_range cortex_a76, ERRATUM(1946160), CPU_REV(3, 0), CPU_REV(4, 1)
+
+workaround_runtime_start cortex_a76, ERRATUM(2743102), ERRATA_A76_2743102
+ /* dsb before isb of power down sequence */
+ dsb sy
+workaround_runtime_end cortex_a76, ERRATUM(2743102)
+
+check_erratum_ls cortex_a76, ERRATUM(2743102), CPU_REV(4, 1)
+
+check_erratum_chosen cortex_a76, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+
+func cortex_a76_disable_wa_cve_2018_3639
+ sysreg_bit_clear CORTEX_A76_CPUACTLR2_EL1, CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ isb
+ ret
+endfunc cortex_a76_disable_wa_cve_2018_3639
+
+/* --------------------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1165522.
+ * This applies only to revisions <= r3p0 of Cortex A76.
+ * Due to the nature of the errata it is applied unconditionally
+ * when built in, report it as applicable in this case
+ * --------------------------------------------------------------
+ */
+check_erratum_custom_start cortex_a76, ERRATUM(1165522)
+#if ERRATA_A76_1165522
+ mov x0, #ERRATA_APPLIES
+ ret
+#else
+ mov x1, #0x30
+ b cpu_rev_var_ls
+#endif
+check_erratum_custom_end cortex_a76, ERRATUM(1165522)
+
+check_erratum_chosen cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+/* erratum has no workaround in the cpu. Generic code must take care */
+add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960, NO_APPLY_AT_RESET
+
+/* ERRATA_DSU_798953 :
+ * The errata is defined in dsu_helpers.S but applies to cortex_a76
+ * as well. Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_cortex_a76_798953, check_errata_dsu_798953
+.equ erratum_cortex_a76_798953_wa, errata_dsu_798953_wa
+add_erratum_entry cortex_a76, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET
+
+/* ERRATA_DSU_936184 :
+ * The errata is defined in dsu_helpers.S but applies to cortex_a76
+ * as well. Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_cortex_a76_936184, check_errata_dsu_936184
+.equ erratum_cortex_a76_936184_wa, errata_dsu_936184_wa
+add_erratum_entry cortex_a76, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+
+cpu_reset_func_start cortex_a76
+
+#if WORKAROUND_CVE_2018_3639
+ /* If the PE implements SSBS, we don't need the dynamic workaround */
+ mrs x0, id_aa64pfr1_el1
+ lsr x0, x0, #ID_AA64PFR1_EL1_SSBS_SHIFT
+ and x0, x0, #ID_AA64PFR1_EL1_SSBS_MASK
+#if !DYNAMIC_WORKAROUND_CVE_2018_3639 && ENABLE_ASSERTIONS
+ cmp x0, 0
+ ASM_ASSERT(ne)
+#endif
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ cbnz x0, 1f
+ sysreg_bit_set CORTEX_A76_CPUACTLR2_EL1, CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ isb
+
+#ifdef IMAGE_BL31
+ /*
+ * The Cortex-A76 generic vectors are overwritten to use the vectors
+ * defined above. This is required in order to apply mitigation
+ * against CVE-2018-3639 on exception entry from lower ELs.
+ * If the below vector table is used, skip overriding it again for
+ * CVE_2022_23960 as both use the same vbar.
+ */
+ override_vector_table cortex_a76_wa_cve_vbar
+ isb
+ b 2f
+#endif /* IMAGE_BL31 */
+
+1:
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
+#endif /* WORKAROUND_CVE_2018_3639 */
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A76 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs. This will be bypassed
+ * if DYNAMIC_WORKAROUND_CVE_2018_3639 has overridden the vectors.
+ */
+ override_vector_table cortex_a76_wa_cve_vbar
+ isb
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+2:
+cpu_reset_func_end cortex_a76
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a76_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A76_CPUPWRCTLR_EL1, CORTEX_A76_CORE_PWRDN_EN_MASK
+
+ apply_erratum cortex_a76, ERRATUM(2743102), ERRATA_A76_2743102
+
+ isb
+ ret
+endfunc cortex_a76_core_pwr_dwn
+
+errata_report_shim cortex_a76
+
+ /* ---------------------------------------------
+ * This function provides cortex_a76 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a76_regs, "aS"
+cortex_a76_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a76_cpu_reg_dump
+ adr x6, cortex_a76_regs
+ mrs x8, CORTEX_A76_CPUECTLR_EL1
+ ret
+endfunc cortex_a76_cpu_reg_dump
+
+declare_cpu_ops_wa cortex_a76, CORTEX_A76_MIDR, \
+ cortex_a76_reset_func, \
+ CPU_NO_EXTRA1_FUNC, \
+ cortex_a76_disable_wa_cve_2018_3639, \
+ CPU_NO_EXTRA3_FUNC, \
+ cortex_a76_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a76ae.S b/lib/cpus/aarch64/cortex_a76ae.S
new file mode 100644
index 0000000..08a6ef9
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a76ae.S
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a76ae.h>
+#include <cpu_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A76AE must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-A76AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A76AE_BHB_LOOP_COUNT, cortex_a76ae
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+check_erratum_chosen cortex_a76ae, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+workaround_reset_start cortex_a76ae, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Cortex-A76ae generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_cortex_a76ae
+ isb
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a76ae, CVE(2022, 23960)
+
+cpu_reset_func_start cortex_a76ae
+cpu_reset_func_end cortex_a76ae
+
+errata_report_shim cortex_a76ae
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_a76ae_core_pwr_dwn
+ sysreg_bit_set CORTEX_A76AE_CPUPWRCTLR_EL1, CORTEX_A76AE_CORE_PWRDN_EN_MASK
+ isb
+ ret
+endfunc cortex_a76ae_core_pwr_dwn
+
+ /* ---------------------------------------------
+ * This function provides cortex_a76ae specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a76ae_regs, "aS"
+cortex_a76ae_regs: /* The ASCII list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a76ae_cpu_reg_dump
+ adr x6, cortex_a76ae_regs
+ mrs x8, CORTEX_A76AE_CPUECTLR_EL1
+ ret
+endfunc cortex_a76ae_cpu_reg_dump
+
+declare_cpu_ops cortex_a76ae, CORTEX_A76AE_MIDR, cortex_a76ae_reset_func, \
+ cortex_a76ae_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S
new file mode 100644
index 0000000..86c2561
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a77.S
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a77.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-A77 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A77_BHB_LOOP_COUNT, cortex_a77
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start cortex_a77, ERRATUM(1508412), ERRATA_A77_1508412
+ /* move cpu revision in again and compare against r0p0 */
+ mov x0, x7
+ mov x1, #CPU_REV(0, 0)
+ bl cpu_rev_var_ls
+ cbz x0, 1f
+
+ ldr x0, =0x0
+ msr CORTEX_A77_CPUPSELR_EL3, x0
+ ldr x0, =0x00E8400000
+ msr CORTEX_A77_CPUPOR_EL3, x0
+ ldr x0, =0x00FFE00000
+ msr CORTEX_A77_CPUPMR_EL3, x0
+ ldr x0, =0x4004003FF
+ msr CORTEX_A77_CPUPCR_EL3, x0
+ ldr x0, =0x1
+ msr CORTEX_A77_CPUPSELR_EL3, x0
+ ldr x0, =0x00E8C00040
+ msr CORTEX_A77_CPUPOR_EL3, x0
+ ldr x0, =0x00FFE00040
+ msr CORTEX_A77_CPUPMR_EL3, x0
+ b 2f
+1:
+ ldr x0, =0x0
+ msr CORTEX_A77_CPUPSELR_EL3, x0
+ ldr x0, =0x00E8400000
+ msr CORTEX_A77_CPUPOR_EL3, x0
+ ldr x0, =0x00FF600000
+ msr CORTEX_A77_CPUPMR_EL3, x0
+ ldr x0, =0x00E8E00080
+ msr CORTEX_A77_CPUPOR2_EL3, x0
+ ldr x0, =0x00FFE000C0
+ msr CORTEX_A77_CPUPMR2_EL3, x0
+2:
+ ldr x0, =0x04004003FF
+ msr CORTEX_A77_CPUPCR_EL3, x0
+workaround_reset_end cortex_a77, ERRATUM(1508412)
+
+check_erratum_ls cortex_a77, ERRATUM(1508412), CPU_REV(1, 0)
+
+workaround_reset_start cortex_a77, ERRATUM(1791578), ERRATA_A77_1791578
+ sysreg_bit_set CORTEX_A77_ACTLR2_EL1, CORTEX_A77_ACTLR2_EL1_BIT_2
+workaround_reset_end cortex_a77, ERRATUM(1791578)
+
+check_erratum_ls cortex_a77, ERRATUM(1791578), CPU_REV(1, 1)
+
+workaround_reset_start cortex_a77, ERRATUM(1800714), ERRATA_A77_1800714
+ /* Disable allocation of splintered pages in the L2 TLB */
+ sysreg_bit_set CORTEX_A77_CPUECTLR_EL1, CORTEX_A77_CPUECTLR_EL1_BIT_53
+workaround_reset_end cortex_a77, ERRATUM(1800714)
+
+check_erratum_ls cortex_a77, ERRATUM(1800714), CPU_REV(1, 1)
+
+workaround_reset_start cortex_a77, ERRATUM(1925769), ERRATA_A77_1925769
+ sysreg_bit_set CORTEX_A77_CPUECTLR_EL1, CORTEX_A77_CPUECTLR_EL1_BIT_8
+workaround_reset_end cortex_a77, ERRATUM(1925769)
+
+check_erratum_ls cortex_a77, ERRATUM(1925769), CPU_REV(1, 1)
+
+workaround_reset_start cortex_a77, ERRATUM(1946167), ERRATA_A77_1946167
+ ldr x0,=0x4
+ msr CORTEX_A77_CPUPSELR_EL3,x0
+ ldr x0,=0x10E3900002
+ msr CORTEX_A77_CPUPOR_EL3,x0
+ ldr x0,=0x10FFF00083
+ msr CORTEX_A77_CPUPMR_EL3,x0
+ ldr x0,=0x2001003FF
+ msr CORTEX_A77_CPUPCR_EL3,x0
+
+ ldr x0,=0x5
+ msr CORTEX_A77_CPUPSELR_EL3,x0
+ ldr x0,=0x10E3800082
+ msr CORTEX_A77_CPUPOR_EL3,x0
+ ldr x0,=0x10FFF00083
+ msr CORTEX_A77_CPUPMR_EL3,x0
+ ldr x0,=0x2001003FF
+ msr CORTEX_A77_CPUPCR_EL3,x0
+
+ ldr x0,=0x6
+ msr CORTEX_A77_CPUPSELR_EL3,x0
+ ldr x0,=0x10E3800200
+ msr CORTEX_A77_CPUPOR_EL3,x0
+ ldr x0,=0x10FFF003E0
+ msr CORTEX_A77_CPUPMR_EL3,x0
+ ldr x0,=0x2001003FF
+ msr CORTEX_A77_CPUPCR_EL3,x0
+workaround_reset_end cortex_a77, ERRATUM(1946167)
+
+check_erratum_ls cortex_a77, ERRATUM(1946167), CPU_REV(1, 1)
+
+workaround_reset_start cortex_a77, ERRATUM(2356587), ERRATA_A77_2356587
+ sysreg_bit_set CORTEX_A77_ACTLR2_EL1, CORTEX_A77_ACTLR2_EL1_BIT_0
+workaround_reset_end cortex_a77, ERRATUM(2356587)
+
+check_erratum_ls cortex_a77, ERRATUM(2356587), CPU_REV(1, 1)
+
+workaround_runtime_start cortex_a77, ERRATUM(2743100), ERRATA_A77_2743100
+ /* dsb before isb of power down sequence */
+ dsb sy
+workaround_runtime_end cortex_a77, ERRATUM(2743100), NO_ISB
+
+check_erratum_ls cortex_a77, ERRATUM(2743100), CPU_REV(1, 1)
+
+workaround_reset_start cortex_a77, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Cortex-A77 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ adr x0, wa_cve_vbar_cortex_a77
+ msr vbar_el3, x0
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a77, CVE(2022, 23960)
+
+check_erratum_chosen cortex_a77, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A77. Must follow AAPCS.
+ * -------------------------------------------------
+ */
+cpu_reset_func_start cortex_a77
+cpu_reset_func_end cortex_a77
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a77_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A77_CPUPWRCTLR_EL1, \
+ CORTEX_A77_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+
+ apply_erratum cortex_a77, ERRATUM(2743100), ERRATA_A77_2743100
+
+ isb
+ ret
+endfunc cortex_a77_core_pwr_dwn
+
+errata_report_shim cortex_a77
+ /* ---------------------------------------------
+ * This function provides Cortex-A77 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a77_regs, "aS"
+cortex_a77_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a77_cpu_reg_dump
+ adr x6, cortex_a77_regs
+ mrs x8, CORTEX_A77_CPUECTLR_EL1
+ ret
+endfunc cortex_a77_cpu_reg_dump
+
+declare_cpu_ops cortex_a77, CORTEX_A77_MIDR, \
+ cortex_a77_reset_func, \
+ cortex_a77_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
new file mode 100644
index 0000000..b5c24e1
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a78.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "cortex_a78 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+.globl cortex_a78_reset_func
+.globl cortex_a78_core_pwr_dwn
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A78_BHB_LOOP_COUNT, cortex_a78
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start cortex_a78, ERRATUM(1688305), ERRATA_A78_1688305
+ sysreg_bit_set CORTEX_A78_ACTLR2_EL1, CORTEX_A78_ACTLR2_EL1_BIT_1
+workaround_reset_end cortex_a78, ERRATUM(1688305)
+
+check_erratum_ls cortex_a78, ERRATUM(1688305), CPU_REV(1, 0)
+
+workaround_reset_start cortex_a78, ERRATUM(1821534), ERRATA_A78_1821534
+ sysreg_bit_set CORTEX_A78_ACTLR2_EL1, CORTEX_A78_ACTLR2_EL1_BIT_2
+workaround_reset_end cortex_a78, ERRATUM(1821534)
+
+check_erratum_ls cortex_a78, ERRATUM(1821534), CPU_REV(1, 0)
+
+workaround_reset_start cortex_a78, ERRATUM(1941498), ERRATA_A78_1941498
+ sysreg_bit_set CORTEX_A78_CPUECTLR_EL1, CORTEX_A78_CPUECTLR_EL1_BIT_8
+workaround_reset_end cortex_a78, ERRATUM(1941498)
+
+check_erratum_ls cortex_a78, ERRATUM(1941498), CPU_REV(1, 1)
+
+workaround_reset_start cortex_a78, ERRATUM(1951500), ERRATA_A78_1951500
+ msr S3_6_c15_c8_0, xzr
+ ldr x0, =0x10E3900002
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ mov x0, #1
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ mov x0, #2
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+workaround_reset_end cortex_a78, ERRATUM(1951500)
+
+check_erratum_range cortex_a78, ERRATUM(1951500), CPU_REV(1, 0), CPU_REV(1, 1)
+
+workaround_reset_start cortex_a78, ERRATUM(1952683), ERRATA_A78_1952683
+ ldr x0,=0x5
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xEEE10A10
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFEF0FFF
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x0010F000
+ msr S3_6_c15_c8_4,x0
+ ldr x0,=0x0010F000
+ msr S3_6_c15_c8_5,x0
+ ldr x0,=0x40000080023ff
+ msr S3_6_c15_c8_1,x0
+ ldr x0,=0x6
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xEE640F34
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFEF0FFF
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x40000080023ff
+ msr S3_6_c15_c8_1,x0
+workaround_reset_end cortex_a78, ERRATUM(1952683)
+
+check_erratum_ls cortex_a78, ERRATUM(1952683), CPU_REV(0, 0)
+
+workaround_reset_start cortex_a78, ERRATUM(2132060), ERRATA_A78_2132060
+ /* Apply the workaround. */
+ mrs x1, CORTEX_A78_CPUECTLR_EL1
+ mov x0, #CORTEX_A78_CPUECTLR_EL1_PF_MODE_CNSRV
+ bfi x1, x0, #CPUECTLR_EL1_PF_MODE_LSB, #CPUECTLR_EL1_PF_MODE_WIDTH
+ msr CORTEX_A78_CPUECTLR_EL1, x1
+workaround_reset_end cortex_a78, ERRATUM(2132060)
+
+check_erratum_ls cortex_a78, ERRATUM(2132060), CPU_REV(1, 2)
+
+workaround_reset_start cortex_a78, ERRATUM(2242635), ERRATA_A78_2242635
+ ldr x0, =0x5
+ msr S3_6_c15_c8_0, x0 /* CPUPSELR_EL3 */
+ ldr x0, =0x10F600E000
+ msr S3_6_c15_c8_2, x0 /* CPUPOR_EL3 */
+ ldr x0, =0x10FF80E000
+ msr S3_6_c15_c8_3, x0 /* CPUPMR_EL3 */
+ ldr x0, =0x80000000003FF
+ msr S3_6_c15_c8_1, x0 /* CPUPCR_EL3 */
+workaround_reset_end cortex_a78, ERRATUM(2242635)
+
+check_erratum_range cortex_a78, ERRATUM(2242635), CPU_REV(1, 0), CPU_REV(1, 2)
+
+workaround_reset_start cortex_a78, ERRATUM(2376745), ERRATA_A78_2376745
+ sysreg_bit_set CORTEX_A78_ACTLR2_EL1, BIT(0)
+workaround_reset_end cortex_a78, ERRATUM(2376745)
+
+check_erratum_ls cortex_a78, ERRATUM(2376745), CPU_REV(1, 2)
+
+workaround_reset_start cortex_a78, ERRATUM(2395406), ERRATA_A78_2395406
+ sysreg_bit_set CORTEX_A78_ACTLR2_EL1, BIT(40)
+workaround_reset_end cortex_a78, ERRATUM(2395406)
+
+check_erratum_ls cortex_a78, ERRATUM(2395406), CPU_REV(1, 2)
+
+workaround_reset_start cortex_a78, ERRATUM(2742426), ERRATA_A78_2742426
+ /* Apply the workaround */
+ mrs x1, CORTEX_A78_ACTLR5_EL1
+ bic x1, x1, #BIT(56)
+ orr x1, x1, #BIT(55)
+ msr CORTEX_A78_ACTLR5_EL1, x1
+workaround_reset_end cortex_a78, ERRATUM(2742426)
+
+check_erratum_ls cortex_a78, ERRATUM(2742426), CPU_REV(1, 2)
+
+workaround_runtime_start cortex_a78, ERRATUM(2772019), ERRATA_A78_2772019
+ /* dsb before isb of power down sequence */
+ dsb sy
+workaround_runtime_end cortex_a78, ERRATUM(2772019)
+
+check_erratum_ls cortex_a78, ERRATUM(2772019), CPU_REV(1, 2)
+
+workaround_reset_start cortex_a78, ERRATUM(2779479), ERRATA_A78_2779479
+ sysreg_bit_set CORTEX_A78_ACTLR3_EL1, BIT(47)
+workaround_reset_end cortex_a78, ERRATUM(2779479)
+
+check_erratum_ls cortex_a78, ERRATUM(2779479), CPU_REV(1, 2)
+
+workaround_reset_start cortex_a78, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Cortex-X1 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_cortex_a78
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a78, CVE(2022, 23960)
+
+check_erratum_chosen cortex_a78, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+cpu_reset_func_start cortex_a78
+#if ENABLE_FEAT_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ sysreg_bit_clear actlr_el3, CORTEX_A78_ACTLR_TAM_BIT
+
+ /* Make sure accesses from non-secure EL0/EL1 are not trapped to EL2 */
+ sysreg_bit_clear actlr_el2, CORTEX_A78_ACTLR_TAM_BIT
+
+ /* Enable group0 counters */
+ mov x0, #CORTEX_A78_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET0_EL0, x0
+
+ /* Enable group1 counters */
+ mov x0, #CORTEX_A78_AMU_GROUP1_MASK
+ msr CPUAMCNTENSET1_EL0, x0
+#endif
+cpu_reset_func_end cortex_a78
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_a78_core_pwr_dwn
+ sysreg_bit_set CORTEX_A78_CPUPWRCTLR_EL1, CORTEX_A78_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+
+ apply_erratum cortex_a78, ERRATUM(2772019), ERRATA_A78_2772019
+
+ isb
+ ret
+endfunc cortex_a78_core_pwr_dwn
+
+errata_report_shim cortex_a78
+
+ /* ---------------------------------------------
+ * This function provides cortex_a78 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a78_regs, "aS"
+cortex_a78_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a78_cpu_reg_dump
+ adr x6, cortex_a78_regs
+ mrs x8, CORTEX_A78_CPUECTLR_EL1
+ ret
+endfunc cortex_a78_cpu_reg_dump
+
+declare_cpu_ops cortex_a78, CORTEX_A78_MIDR, \
+ cortex_a78_reset_func, \
+ cortex_a78_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a78_ae.S b/lib/cpus/aarch64/cortex_a78_ae.S
new file mode 100644
index 0000000..d3a3e5d
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a78_ae.S
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2023, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a78_ae.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "cortex_a78_ae must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A78_AE_BHB_LOOP_COUNT, cortex_a78_ae
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start cortex_a78_ae, ERRATUM(1941500), ERRATA_A78_AE_1941500
+ sysreg_bit_set CORTEX_A78_AE_CPUECTLR_EL1, CORTEX_A78_AE_CPUECTLR_EL1_BIT_8
+workaround_reset_end cortex_a78_ae, ERRATUM(1941500)
+
+check_erratum_ls cortex_a78_ae, ERRATUM(1941500), CPU_REV(0, 1)
+
+workaround_reset_start cortex_a78_ae, ERRATUM(1951502), ERRATA_A78_AE_1951502
+ msr S3_6_c15_c8_0, xzr
+ ldr x0, =0x10E3900002
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ mov x0, #1
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+
+ mov x0, #2
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_c15_c8_1, x0
+workaround_reset_end cortex_a78_ae, ERRATUM(1951502)
+
+check_erratum_ls cortex_a78_ae, ERRATUM(1951502), CPU_REV(0, 1)
+
+workaround_reset_start cortex_a78_ae, ERRATUM(2376748), ERRATA_A78_AE_2376748
+ /* -------------------------------------------------------
+ * Set CPUACTLR2_EL1[0] to 1 to force PLDW/PFRM ST to
+ * behave like PLD/PRFM LD and not cause invalidations to
+ * other PE caches. There might be a small performance
+ * degradation to this workaround for certain workloads
+ * that share data.
+ * -------------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A78_AE_ACTLR2_EL1, CORTEX_A78_AE_ACTLR2_EL1_BIT_0
+workaround_reset_end cortex_a78_ae, ERRATUM(2376748)
+
+check_erratum_ls cortex_a78_ae, ERRATUM(2376748), CPU_REV(0, 2)
+
+workaround_reset_start cortex_a78_ae, ERRATUM(2395408), ERRATA_A78_AE_2395408
+ /* --------------------------------------------------------
+ * Disable folding of demand requests into older prefetches
+ * with L2 miss requests outstanding by setting the
+ * CPUACTLR2_EL1[40] to 1.
+ * --------------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A78_AE_ACTLR2_EL1, CORTEX_A78_AE_ACTLR2_EL1_BIT_40
+workaround_reset_end cortex_a78_ae, ERRATUM(2395408)
+
+check_erratum_ls cortex_a78_ae, ERRATUM(2395408), CPU_REV(0, 1)
+
+workaround_reset_start cortex_a78_ae, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Cortex-A78AE generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_cortex_a78_ae
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a78_ae, CVE(2022, 23960)
+
+check_erratum_chosen cortex_a78_ae, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+cpu_reset_func_start cortex_a78_ae
+#if ENABLE_FEAT_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ sysreg_bit_clear actlr_el3, CORTEX_A78_ACTLR_TAM_BIT
+
+ /* Make sure accesses from non-secure EL0/EL1 are not trapped to EL2 */
+ sysreg_bit_clear actlr_el2, CORTEX_A78_ACTLR_TAM_BIT
+
+ /* Enable group0 counters */
+ mov x0, #CORTEX_A78_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET0_EL0, x0
+
+ /* Enable group1 counters */
+ mov x0, #CORTEX_A78_AMU_GROUP1_MASK
+ msr CPUAMCNTENSET1_EL0, x0
+#endif
+cpu_reset_func_end cortex_a78_ae
+
+ /* -------------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * -------------------------------------------------------
+ */
+func cortex_a78_ae_core_pwr_dwn
+ /* -------------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * -------------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A78_CPUPWRCTLR_EL1, CORTEX_A78_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+ isb
+ ret
+endfunc cortex_a78_ae_core_pwr_dwn
+
+errata_report_shim cortex_a78_ae
+
+ /* -------------------------------------------------------
+ * This function provides cortex_a78_ae specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * -------------------------------------------------------
+ */
+.section .rodata.cortex_a78_ae_regs, "aS"
+cortex_a78_ae_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a78_ae_cpu_reg_dump
+ adr x6, cortex_a78_ae_regs
+ mrs x8, CORTEX_A78_CPUECTLR_EL1
+ ret
+endfunc cortex_a78_ae_cpu_reg_dump
+
+declare_cpu_ops cortex_a78_ae, CORTEX_A78_AE_MIDR, \
+ cortex_a78_ae_reset_func, \
+ cortex_a78_ae_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a78c.S b/lib/cpus/aarch64/cortex_a78c.S
new file mode 100644
index 0000000..d19c693
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a78c.S
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_a78c.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "cortex_a78c must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_A78C_BHB_LOOP_COUNT, cortex_a78c
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start cortex_a78c, ERRATUM(1827430), ERRATA_A78C_1827430
+ /* Disable allocation of splintered pages in the L2 TLB */
+ sysreg_bit_set CORTEX_A78C_CPUECTLR_EL1, CORTEX_A78C_CPUECTLR_EL1_MM_ASP_EN
+workaround_reset_end cortex_a78c, ERRATUM(1827430)
+
+check_erratum_ls cortex_a78c, ERRATUM(1827430), CPU_REV(0, 0)
+
+workaround_reset_start cortex_a78c, ERRATUM(1827440), ERRATA_A78C_1827440
+ /* Force Atomic Store to WB memory be done in L1 data cache */
+ sysreg_bit_set CORTEX_A78C_CPUACTLR2_EL1, BIT(2)
+workaround_reset_end cortex_a78c, ERRATUM(1827440)
+
+check_erratum_ls cortex_a78c, ERRATUM(1827440), CPU_REV(0, 0)
+
+workaround_reset_start cortex_a78c, ERRATUM(2132064), ERRATA_A78C_2132064
+ /* --------------------------------------------------------
+ * Place the data prefetcher in the most conservative mode
+ * to reduce prefetches by writing the following bits to
+ * the value indicated: ecltr[7:6], PF_MODE = 2'b11
+ * --------------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A78C_CPUECTLR_EL1, (CORTEX_A78C_CPUECTLR_EL1_BIT_6 | CORTEX_A78C_CPUECTLR_EL1_BIT_7)
+workaround_reset_end cortex_a78c, ERRATUM(2132064)
+
+check_erratum_range cortex_a78c, ERRATUM(2132064), CPU_REV(0, 1), CPU_REV(0, 2)
+
+workaround_reset_start cortex_a78c, ERRATUM(2242638), ERRATA_A78C_2242638
+ ldr x0, =0x5
+ msr CORTEX_A78C_IMP_CPUPSELR_EL3, x0
+ ldr x0, =0x10F600E000
+ msr CORTEX_A78C_IMP_CPUPOR_EL3, x0
+ ldr x0, =0x10FF80E000
+ msr CORTEX_A78C_IMP_CPUPMR_EL3, x0
+ ldr x0, =0x80000000003FF
+ msr CORTEX_A78C_IMP_CPUPCR_EL3, x0
+workaround_reset_end cortex_a78c, ERRATUM(2242638)
+
+check_erratum_range cortex_a78c, ERRATUM(2242638), CPU_REV(0, 1), CPU_REV(0, 2)
+
+workaround_reset_start cortex_a78c, ERRATUM(2376749), ERRATA_A78C_2376749
+ sysreg_bit_set CORTEX_A78C_CPUACTLR2_EL1, CORTEX_A78C_CPUACTLR2_EL1_BIT_0
+workaround_reset_end cortex_a78c, ERRATUM(2376749)
+
+check_erratum_range cortex_a78c, ERRATUM(2376749), CPU_REV(0, 1), CPU_REV(0, 2)
+
+workaround_reset_start cortex_a78c, ERRATUM(2395411), ERRATA_A78C_2395411
+ sysreg_bit_set CORTEX_A78C_CPUACTLR2_EL1, CORTEX_A78C_CPUACTLR2_EL1_BIT_40
+workaround_reset_end cortex_a78c, ERRATUM(2395411)
+
+check_erratum_range cortex_a78c, ERRATUM(2395411), CPU_REV(0, 1), CPU_REV(0, 2)
+
+workaround_runtime_start cortex_a78c, ERRATUM(2772121), ERRATA_A78C_2772121
+ /* dsb before isb of power down sequence */
+ dsb sy
+workaround_runtime_end cortex_a78c, ERRATUM(2772121)
+
+check_erratum_ls cortex_a78c, ERRATUM(2772121), CPU_REV(0, 2)
+
+workaround_reset_start cortex_a78c, ERRATUM(2779484), ERRATA_A78C_2779484
+ sysreg_bit_set CORTEX_A78C_ACTLR3_EL1, BIT(47)
+workaround_reset_end cortex_a78c, ERRATUM(2779484)
+
+check_erratum_range cortex_a78c, ERRATUM(2779484), CPU_REV(0, 1), CPU_REV(0, 2)
+
+check_erratum_chosen cortex_a78c, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+workaround_reset_start cortex_a78c, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Cortex-A78c generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_cortex_a78c
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_a78c, CVE(2022, 23960)
+
+cpu_reset_func_start cortex_a78c
+cpu_reset_func_end cortex_a78c
+
+errata_report_shim cortex_a78c
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_a78c_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_A78C_CPUPWRCTLR_EL1, CORTEX_A78C_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+
+ apply_erratum cortex_a78c, ERRATUM(2772121), ERRATA_A78C_2772121
+
+ isb
+ ret
+endfunc cortex_a78c_core_pwr_dwn
+
+ /* ---------------------------------------------
+ * This function provides cortex_a78c specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_a78c_regs, "aS"
+cortex_a78c_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a78c_cpu_reg_dump
+ adr x6, cortex_a78c_regs
+ mrs x8, CORTEX_A78C_CPUECTLR_EL1
+ ret
+endfunc cortex_a78c_cpu_reg_dump
+
+declare_cpu_ops cortex_a78c, CORTEX_A78C_MIDR, \
+ cortex_a78c_reset_func, \
+ cortex_a78c_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_blackhawk.S b/lib/cpus/aarch64/cortex_blackhawk.S
new file mode 100644
index 0000000..b7b7a2d
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_blackhawk.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_blackhawk.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex blackhawk must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex blackhawk supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+cpu_reset_func_start cortex_blackhawk
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end cortex_blackhawk
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_blackhawk_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_BLACKHAWK_CPUPWRCTLR_EL1, CORTEX_BLACKHAWK_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ isb
+ ret
+endfunc cortex_blackhawk_core_pwr_dwn
+
+errata_report_shim cortex_blackhawk
+
+ /* ---------------------------------------------
+ * This function provides Cortex Blackhawk specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_blackhawk_regs, "aS"
+cortex_blackhawk_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_blackhawk_cpu_reg_dump
+ adr x6, cortex_blackhawk_regs
+ mrs x8, CORTEX_BLACKHAWK_CPUECTLR_EL1
+ ret
+endfunc cortex_blackhawk_cpu_reg_dump
+
+declare_cpu_ops cortex_blackhawk, CORTEX_BLACKHAWK_MIDR, \
+ cortex_blackhawk_reset_func, \
+ cortex_blackhawk_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_chaberton.S b/lib/cpus/aarch64/cortex_chaberton.S
new file mode 100644
index 0000000..596fe4a
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_chaberton.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_chaberton.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex Chaberton must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex Chaberton supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+cpu_reset_func_start cortex_chaberton
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end cortex_chaberton
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_chaberton_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_CHABERTON_CPUPWRCTLR_EL1, CORTEX_CHABERTON_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ isb
+ ret
+endfunc cortex_chaberton_core_pwr_dwn
+
+errata_report_shim cortex_chaberton
+
+ /* ---------------------------------------------
+ * This function provides Cortex Chaberton specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_chaberton_regs, "aS"
+cortex_chaberton_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_chaberton_cpu_reg_dump
+ adr x6, cortex_chaberton_regs
+ mrs x8, CORTEX_CHABERTON_CPUECTLR_EL1
+ ret
+endfunc cortex_chaberton_cpu_reg_dump
+
+declare_cpu_ops cortex_chaberton, CORTEX_CHABERTON_MIDR, \
+ cortex_chaberton_reset_func, \
+ cortex_chaberton_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_gelas.S b/lib/cpus/aarch64/cortex_gelas.S
new file mode 100644
index 0000000..dc704f2
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_gelas.S
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_gelas.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Gelas must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Gelas supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+cpu_reset_func_start cortex_gelas
+ /* ----------------------------------------------------
+ * Disable speculative loads
+ * ----------------------------------------------------
+ */
+ msr SSBS, xzr
+cpu_reset_func_end cortex_gelas
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_gelas_core_pwr_dwn
+#if ENABLE_SME_FOR_NS
+ /* ---------------------------------------------------
+ * Disable SME if enabled and supported
+ * ---------------------------------------------------
+ */
+ mrs x0, ID_AA64PFR1_EL1
+ ubfx x0, x0, #ID_AA64PFR1_EL1_SME_SHIFT, \
+ #ID_AA64PFR1_EL1_SME_WIDTH
+ cmp x0, #ID_AA64PFR1_EL1_SME_NOT_SUPPORTED
+ b.eq 1f
+ msr CORTEX_GELAS_SVCRSM, xzr
+ msr CORTEX_GELAS_SVCRZA, xzr
+1:
+#endif
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_GELAS_CPUPWRCTLR_EL1, \
+ CORTEX_GELAS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ isb
+ ret
+endfunc cortex_gelas_core_pwr_dwn
+
+errata_report_shim cortex_gelas
+
+ /* ---------------------------------------------
+ * This function provides Gelas specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_gelas_regs, "aS"
+cortex_gelas_regs: /* The ASCII list of register names to be reported */
+ .asciz "imp_cpuectlr_el1", ""
+
+func cortex_gelas_cpu_reg_dump
+ adr x6, cortex_gelas_regs
+ mrs x8, CORTEX_GELAS_IMP_CPUECTLR_EL1
+ ret
+endfunc cortex_gelas_cpu_reg_dump
+
+declare_cpu_ops cortex_gelas, CORTEX_GELAS_MIDR, \
+ cortex_gelas_reset_func, \
+ cortex_gelas_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x1.S b/lib/cpus/aarch64/cortex_x1.S
new file mode 100644
index 0000000..42634f1
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_x1.S
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2022-2023, Google LLC. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <cortex_x1.h>
+#include <cpu_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-X1 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-X1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_X1_BHB_LOOP_COUNT, cortex_x1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start cortex_x1, ERRATUM(1688305), ERRATA_X1_1688305
+ sysreg_bit_set CORTEX_X1_ACTLR2_EL1, BIT(1)
+workaround_reset_end cortex_x1, ERRATUM(1688305)
+
+check_erratum_ls cortex_x1, ERRATUM(1688305), CPU_REV(1, 0)
+
+workaround_reset_start cortex_x1, ERRATUM(1821534), ERRATA_X1_1821534
+ sysreg_bit_set CORTEX_X1_ACTLR2_EL1, BIT(2)
+workaround_reset_end cortex_x1, ERRATUM(1821534)
+
+check_erratum_ls cortex_x1, ERRATUM(1821534), CPU_REV(1, 0)
+
+workaround_reset_start cortex_x1, ERRATUM(1827429), ERRATA_X1_1827429
+ sysreg_bit_set CORTEX_X1_CPUECTLR_EL1, BIT(53)
+workaround_reset_end cortex_x1, ERRATUM(1827429)
+
+check_erratum_ls cortex_x1, ERRATUM(1827429), CPU_REV(1, 0)
+
+check_erratum_chosen cortex_x1, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+workaround_reset_start cortex_x1, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Cortex-X1 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_cortex_x1
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_x1, CVE(2022, 23960)
+
+cpu_reset_func_start cortex_x1
+cpu_reset_func_end cortex_x1
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func cortex_x1_core_pwr_dwn
+ sysreg_bit_set CORTEX_X1_CPUPWRCTLR_EL1, CORTEX_X1_CORE_PWRDN_EN_MASK
+ isb
+ ret
+endfunc cortex_x1_core_pwr_dwn
+
+errata_report_shim cortex_x1
+
+ /* ---------------------------------------------
+ * This function provides Cortex X1 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_x1_regs, "aS"
+cortex_x1_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_x1_cpu_reg_dump
+ adr x6, cortex_x1_regs
+ mrs x8, CORTEX_X1_CPUECTLR_EL1
+ ret
+endfunc cortex_x1_cpu_reg_dump
+
+declare_cpu_ops cortex_x1, CORTEX_X1_MIDR, \
+ cortex_x1_reset_func, \
+ cortex_x1_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
new file mode 100644
index 0000000..258288c
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_x2.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex X2 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex X2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_X2_BHB_LOOP_COUNT, cortex_x2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start cortex_x2, ERRATUM(2002765), ERRATA_X2_2002765
+ ldr x0, =0x6
+ msr S3_6_C15_C8_0, x0 /* CPUPSELR_EL3 */
+ ldr x0, =0xF3A08002
+ msr S3_6_C15_C8_2, x0 /* CPUPOR_EL3 */
+ ldr x0, =0xFFF0F7FE
+ msr S3_6_C15_C8_3, x0 /* CPUPMR_EL3 */
+ ldr x0, =0x40000001003ff
+ msr S3_6_C15_C8_1, x0 /* CPUPCR_EL3 */
+workaround_reset_end cortex_x2, ERRATUM(2002765)
+
+check_erratum_ls cortex_x2, ERRATUM(2002765), CPU_REV(2, 0)
+
+workaround_reset_start cortex_x2, ERRATUM(2017096), ERRATA_X2_2017096
+ sysreg_bit_set CORTEX_X2_CPUECTLR_EL1, CORTEX_X2_CPUECTLR_EL1_PFSTIDIS_BIT
+workaround_reset_end cortex_x2, ERRATUM(2017096)
+
+check_erratum_ls cortex_x2, ERRATUM(2017096), CPU_REV(2, 0)
+
+workaround_reset_start cortex_x2, ERRATUM(2058056), ERRATA_X2_2058056
+ sysreg_bitfield_insert CORTEX_X2_CPUECTLR2_EL1, CORTEX_X2_CPUECTLR2_EL1_PF_MODE_CNSRV, \
+ CORTEX_X2_CPUECTLR2_EL1_PF_MODE_SHIFT, CORTEX_X2_CPUECTLR2_EL1_PF_MODE_WIDTH
+workaround_reset_end cortex_x2, ERRATUM(2058056)
+
+check_erratum_ls cortex_x2, ERRATUM(2058056), CPU_REV(2, 1)
+
+workaround_reset_start cortex_x2, ERRATUM(2081180), ERRATA_X2_2081180
+ /* Apply instruction patching sequence */
+ ldr x0, =0x3
+ msr CORTEX_X2_IMP_CPUPSELR_EL3, x0
+ ldr x0, =0xF3A08002
+ msr CORTEX_X2_IMP_CPUPOR_EL3, x0
+ ldr x0, =0xFFF0F7FE
+ msr CORTEX_X2_IMP_CPUPMR_EL3, x0
+ ldr x0, =0x10002001003FF
+ msr CORTEX_X2_IMP_CPUPCR_EL3, x0
+ ldr x0, =0x4
+ msr CORTEX_X2_IMP_CPUPSELR_EL3, x0
+ ldr x0, =0xBF200000
+ msr CORTEX_X2_IMP_CPUPOR_EL3, x0
+ ldr x0, =0xFFEF0000
+ msr CORTEX_X2_IMP_CPUPMR_EL3, x0
+ ldr x0, =0x10002001003F3
+ msr CORTEX_X2_IMP_CPUPCR_EL3, x0
+workaround_reset_end cortex_x2, ERRATUM(2081180)
+
+check_erratum_ls cortex_x2, ERRATUM(2081180), CPU_REV(2, 0)
+
+workaround_reset_start cortex_x2, ERRATUM(2083908), ERRATA_X2_2083908
+ /* Apply the workaround by setting bit 13 in CPUACTLR5_EL1. */
+ sysreg_bit_set CORTEX_X2_CPUACTLR5_EL1, BIT(13)
+workaround_reset_end cortex_x2, ERRATUM(2083908)
+
+check_erratum_range cortex_x2, ERRATUM(2083908), CPU_REV(2, 0), CPU_REV(2, 0)
+
+workaround_reset_start cortex_x2, ERRATUM(2147715), ERRATA_X2_2147715
+ /* Apply the workaround by setting bit 22 in CPUACTLR_EL1. */
+ sysreg_bit_set CORTEX_X2_CPUACTLR_EL1, CORTEX_X2_CPUACTLR_EL1_BIT_22
+workaround_reset_end cortex_x2, ERRATUM(2147715)
+
+check_erratum_range cortex_x2, ERRATUM(2147715), CPU_REV(2, 0), CPU_REV(2, 0)
+
+workaround_reset_start cortex_x2, ERRATUM(2216384), ERRATA_X2_2216384
+ sysreg_bit_set CORTEX_X2_CPUACTLR5_EL1, CORTEX_X2_CPUACTLR5_EL1_BIT_17
+
+ /* Apply instruction patching sequence */
+ ldr x0, =0x5
+ msr CORTEX_X2_IMP_CPUPSELR_EL3, x0
+ ldr x0, =0x10F600E000
+ msr CORTEX_X2_IMP_CPUPOR_EL3, x0
+ ldr x0, =0x10FF80E000
+ msr CORTEX_X2_IMP_CPUPMR_EL3, x0
+ ldr x0, =0x80000000003FF
+ msr CORTEX_X2_IMP_CPUPCR_EL3, x0
+workaround_reset_end cortex_x2, ERRATUM(2216384)
+
+check_erratum_ls cortex_x2, ERRATUM(2216384), CPU_REV(2, 0)
+
+workaround_reset_start cortex_x2, ERRATUM(2282622), ERRATA_X2_2282622
+ /* Apply the workaround */
+ sysreg_bit_set CORTEX_X2_CPUACTLR2_EL1, BIT(0)
+workaround_reset_end cortex_x2, ERRATUM(2282622)
+
+check_erratum_ls cortex_x2, ERRATUM(2282622), CPU_REV(2, 1)
+
+workaround_reset_start cortex_x2, ERRATUM(2371105), ERRATA_X2_2371105
+ /* Set bit 40 in CPUACTLR2_EL1 */
+ sysreg_bit_set CORTEX_X2_CPUACTLR2_EL1, CORTEX_X2_CPUACTLR2_EL1_BIT_40
+workaround_reset_end cortex_x2, ERRATUM(2371105)
+
+check_erratum_ls cortex_x2, ERRATUM(2371105), CPU_REV(2, 0)
+
+workaround_reset_start cortex_x2, ERRATUM(2742423), ERRATA_X2_2742423
+ /* Set CPUACTLR5_EL1[56:55] to 2'b01 */
+ sysreg_bit_set CORTEX_X2_CPUACTLR5_EL1, BIT(55)
+ sysreg_bit_clear CORTEX_X2_CPUACTLR5_EL1, BIT(56)
+workaround_reset_end cortex_x2, ERRATUM(2742423)
+
+check_erratum_ls cortex_x2, ERRATUM(2742423), CPU_REV(2, 1)
+
+workaround_reset_start cortex_x2, ERRATUM(2768515), ERRATA_X2_2768515
+ /* dsb before isb of power down sequence */
+ dsb sy
+workaround_reset_end cortex_x2, ERRATUM(2768515)
+
+check_erratum_ls cortex_x2, ERRATUM(2768515), CPU_REV(2, 1)
+
+workaround_reset_start cortex_x2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Cortex-X2 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_cortex_x2
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_x2, CVE(2022, 23960)
+
+check_erratum_chosen cortex_x2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+/*
+ * ERRATA_DSU_2313941 :
+ * The errata is defined in dsu_helpers.S but applies to cortex_x2
+ * as well. Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_cortex_x2_2313941, check_errata_dsu_2313941
+.equ erratum_cortex_x2_2313941_wa, errata_dsu_2313941_wa
+add_erratum_entry cortex_x2, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_x2_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_X2_CPUPWRCTLR_EL1, CORTEX_X2_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+
+#if ERRATA_X2_2768515
+ mov x15, x30
+ bl cpu_get_rev_var
+ bl erratum_cortex_x2_2768515_wa
+ mov x30, x15
+#endif /* ERRATA_X2_2768515 */
+ isb
+ ret
+endfunc cortex_x2_core_pwr_dwn
+
+errata_report_shim cortex_x2
+
+cpu_reset_func_start cortex_x2
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end cortex_x2
+
+ /* ---------------------------------------------
+ * This function provides Cortex X2 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_x2_regs, "aS"
+cortex_x2_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_x2_cpu_reg_dump
+ adr x6, cortex_x2_regs
+ mrs x8, CORTEX_X2_CPUECTLR_EL1
+ ret
+endfunc cortex_x2_cpu_reg_dump
+
+declare_cpu_ops cortex_x2, CORTEX_X2_MIDR, \
+ cortex_x2_reset_func, \
+ cortex_x2_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x3.S b/lib/cpus/aarch64/cortex_x3.S
new file mode 100644
index 0000000..0cb3b97
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_x3.S
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_x3.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex-X3 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex-X3 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_X3_BHB_LOOP_COUNT, cortex_x3
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start cortex_x3, ERRATUM(2070301), ERRATA_X3_2070301
+ sysreg_bitfield_insert CORTEX_X3_CPUECTLR2_EL1, CORTEX_X3_CPUECTLR2_EL1_PF_MODE_CNSRV, \
+ CORTEX_X3_CPUECTLR2_EL1_PF_MODE_LSB, CORTEX_X3_CPUECTLR2_EL1_PF_MODE_WIDTH
+workaround_reset_end cortex_x3, ERRATUM(2070301)
+
+check_erratum_ls cortex_x3, ERRATUM(2070301), CPU_REV(1, 2)
+
+workaround_runtime_start cortex_x3, ERRATUM(2313909), ERRATA_X3_2313909
+ sysreg_bit_set CORTEX_X3_CPUACTLR2_EL1, CORTEX_X3_CPUACTLR2_EL1_BIT_36
+workaround_runtime_end cortex_x3, ERRATUM(2313909), NO_ISB
+
+check_erratum_ls cortex_x3, ERRATUM(2313909), CPU_REV(1, 0)
+
+workaround_reset_start cortex_x3, ERRATUM(2615812), ERRATA_X3_2615812
+ /* Disable retention control for WFI and WFE. */
+ mrs x0, CORTEX_X3_CPUPWRCTLR_EL1
+ bfi x0, xzr, #CORTEX_X3_CPUPWRCTLR_EL1_WFI_RET_CTRL_BITS_SHIFT, #3
+ bfi x0, xzr, #CORTEX_X3_CPUPWRCTLR_EL1_WFE_RET_CTRL_BITS_SHIFT, #3
+ msr CORTEX_X3_CPUPWRCTLR_EL1, x0
+workaround_reset_end cortex_x3, ERRATUM(2615812)
+
+check_erratum_ls cortex_x3, ERRATUM(2615812), CPU_REV(1, 1)
+
+workaround_reset_start cortex_x3, ERRATUM(2742421), ERRATA_X3_2742421
+ /* Set CPUACTLR5_EL1[56:55] to 2'b01 */
+ sysreg_bit_set CORTEX_X3_CPUACTLR5_EL1, CORTEX_X3_CPUACTLR5_EL1_BIT_55
+ sysreg_bit_clear CORTEX_X3_CPUACTLR5_EL1, CORTEX_X3_CPUACTLR5_EL1_BIT_56
+workaround_reset_end cortex_x3, ERRATUM(2742421)
+
+check_erratum_ls cortex_x3, ERRATUM(2742421), CPU_REV(1, 1)
+
+workaround_reset_start cortex_x3, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ override_vector_table wa_cve_vbar_cortex_x3
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_x3, CVE(2022, 23960)
+
+check_erratum_chosen cortex_x3, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+cpu_reset_func_start cortex_x3
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end cortex_x3
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_x3_core_pwr_dwn
+apply_erratum cortex_x3, ERRATUM(2313909), ERRATA_X3_2313909
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_X3_CPUPWRCTLR_EL1, CORTEX_X3_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ isb
+ ret
+endfunc cortex_x3_core_pwr_dwn
+
+errata_report_shim cortex_x3
+
+ /* ---------------------------------------------
+ * This function provides Cortex-X3-
+ * specific register information for crash
+ * reporting. It needs to return with x6
+ * pointing to a list of register names in ascii
+ * and x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_x3_regs, "aS"
+cortex_x3_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_x3_cpu_reg_dump
+ adr x6, cortex_x3_regs
+ mrs x8, CORTEX_X3_CPUECTLR_EL1
+ ret
+endfunc cortex_x3_cpu_reg_dump
+
+declare_cpu_ops cortex_x3, CORTEX_X3_MIDR, \
+ cortex_x3_reset_func, \
+ cortex_x3_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_x4.S b/lib/cpus/aarch64/cortex_x4.S
new file mode 100644
index 0000000..7619f9c
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_x4.S
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_x4.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Cortex X4 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Cortex X4 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table CORTEX_X4_BHB_LOOP_COUNT, cortex_x4
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start cortex_x4, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Cortex X4 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_cortex_x4
+#endif /* IMAGE_BL31 */
+workaround_reset_end cortex_x4, CVE(2022, 23960)
+
+check_erratum_chosen cortex_x4, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+cpu_reset_func_start cortex_x4
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end cortex_x4
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func cortex_x4_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_X4_CPUPWRCTLR_EL1, CORTEX_X4_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ isb
+ ret
+endfunc cortex_x4_core_pwr_dwn
+
+errata_report_shim cortex_x4
+
+ /* ---------------------------------------------
+ * This function provides Cortex X4-specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.cortex_x4_regs, "aS"
+cortex_x4_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_x4_cpu_reg_dump
+ adr x6, cortex_x4_regs
+ mrs x8, CORTEX_X4_CPUECTLR_EL1
+ ret
+endfunc cortex_x4_cpu_reg_dump
+
+declare_cpu_ops cortex_x4, CORTEX_X4_MIDR, \
+ cortex_x4_reset_func, \
+ cortex_x4_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
new file mode 100644
index 0000000..1ae3180
--- /dev/null
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -0,0 +1,393 @@
+/*
+ * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cpu_macros.S>
+#include <lib/cpus/cpu_ops.h>
+#include <lib/cpus/errata.h>
+#include <lib/el3_runtime/cpu_data.h>
+
+ /* Reset fn is needed in BL at reset vector */
+#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || \
+ (defined(IMAGE_BL2) && RESET_TO_BL2)
+ /*
+ * The reset handler common to all platforms. After a matching
+ * cpu_ops structure entry is found, the correponding reset_handler
+ * in the cpu_ops is invoked.
+ * Clobbers: x0 - x19, x30
+ */
+ .globl reset_handler
+func reset_handler
+ mov x19, x30
+
+ /* The plat_reset_handler can clobber x0 - x18, x30 */
+ bl plat_reset_handler
+
+ /* Get the matching cpu_ops pointer */
+ bl get_cpu_ops_ptr
+
+#if ENABLE_ASSERTIONS
+ /*
+ * Assert if invalid cpu_ops obtained. If this is not valid, it may
+ * suggest that the proper CPU file hasn't been included.
+ */
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the cpu_ops reset handler */
+ ldr x2, [x0, #CPU_RESET_FUNC]
+ mov x30, x19
+ cbz x2, 1f
+
+ /* The cpu_ops reset handler can clobber x0 - x19, x30 */
+ br x2
+1:
+ ret
+endfunc reset_handler
+
+#endif
+
+#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
+ /*
+ * void prepare_cpu_pwr_dwn(unsigned int power_level)
+ *
+ * Prepare CPU power down function for all platforms. The function takes
+ * a domain level to be powered down as its parameter. After the cpu_ops
+ * pointer is retrieved from cpu_data, the handler for requested power
+ * level is called.
+ */
+ .globl prepare_cpu_pwr_dwn
+func prepare_cpu_pwr_dwn
+ /*
+ * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
+ * power down handler for the last power level
+ */
+ mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
+ cmp x0, x2
+ csel x2, x2, x0, hi
+
+ mrs x1, tpidr_el3
+ ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the appropriate power down handler */
+ mov x1, #CPU_PWR_DWN_OPS
+ add x1, x1, x2, lsl #3
+ ldr x1, [x0, x1]
+#if ENABLE_ASSERTIONS
+ cmp x1, #0
+ ASM_ASSERT(ne)
+#endif
+ br x1
+endfunc prepare_cpu_pwr_dwn
+
+
+ /*
+ * Initializes the cpu_ops_ptr if not already initialized
+ * in cpu_data. This can be called without a runtime stack, but may
+ * only be called after the MMU is enabled.
+ * clobbers: x0 - x6, x10
+ */
+ .globl init_cpu_ops
+func init_cpu_ops
+ mrs x6, tpidr_el3
+ ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
+ cbnz x0, 1f
+ mov x10, x30
+ bl get_cpu_ops_ptr
+ str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
+ mov x30, x10
+1:
+ ret
+endfunc init_cpu_ops
+#endif /* IMAGE_BL31 */
+
+#if defined(IMAGE_BL31) && CRASH_REPORTING
+ /*
+ * The cpu specific registers which need to be reported in a crash
+ * are reported via cpu_ops cpu_reg_dump function. After a matching
+ * cpu_ops structure entry is found, the correponding cpu_reg_dump
+ * in the cpu_ops is invoked.
+ */
+ .globl do_cpu_reg_dump
+func do_cpu_reg_dump
+ mov x16, x30
+
+ /* Get the matching cpu_ops pointer */
+ bl get_cpu_ops_ptr
+ cbz x0, 1f
+
+ /* Get the cpu_ops cpu_reg_dump */
+ ldr x2, [x0, #CPU_REG_DUMP]
+ cbz x2, 1f
+ blr x2
+1:
+ mov x30, x16
+ ret
+endfunc do_cpu_reg_dump
+#endif
+
+ /*
+ * The below function returns the cpu_ops structure matching the
+ * midr of the core. It reads the MIDR_EL1 and finds the matching
+ * entry in cpu_ops entries. Only the implementation and part number
+ * are used to match the entries.
+ *
+ * If cpu_ops for the MIDR_EL1 cannot be found and
+ * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
+ * default cpu_ops with an MIDR value of 0.
+ * (Implementation number 0x0 should be reserved for software use
+ * and therefore no clashes should happen with that default value).
+ *
+ * Return :
+ * x0 - The matching cpu_ops pointer on Success
+ * x0 - 0 on failure.
+ * Clobbers : x0 - x5
+ */
+ .globl get_cpu_ops_ptr
+func get_cpu_ops_ptr
+ /* Read the MIDR_EL1 */
+ mrs x2, midr_el1
+ mov_imm x3, CPU_IMPL_PN_MASK
+
+ /* Retain only the implementation and part number using mask */
+ and w2, w2, w3
+
+ /* Get the cpu_ops end location */
+ adr x5, (__CPU_OPS_END__ + CPU_MIDR)
+
+ /* Initialize the return parameter */
+ mov x0, #0
+1:
+ /* Get the cpu_ops start location */
+ adr x4, (__CPU_OPS_START__ + CPU_MIDR)
+
+2:
+ /* Check if we have reached end of list */
+ cmp x4, x5
+ b.eq search_def_ptr
+
+ /* load the midr from the cpu_ops */
+ ldr x1, [x4], #CPU_OPS_SIZE
+ and w1, w1, w3
+
+ /* Check if midr matches to midr of this core */
+ cmp w1, w2
+ b.ne 2b
+
+ /* Subtract the increment and offset to get the cpu-ops pointer */
+ sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+#ifdef SUPPORT_UNKNOWN_MPID
+ cbnz x2, exit_mpid_found
+ /* Mark the unsupported MPID flag */
+ adrp x1, unsupported_mpid_flag
+ add x1, x1, :lo12:unsupported_mpid_flag
+ str w2, [x1]
+exit_mpid_found:
+#endif
+ ret
+
+ /*
+ * Search again for a default pointer (MIDR = 0x0)
+ * or return error if already searched.
+ */
+search_def_ptr:
+#ifdef SUPPORT_UNKNOWN_MPID
+ cbz x2, error_exit
+ mov x2, #0
+ b 1b
+error_exit:
+#endif
+ ret
+endfunc get_cpu_ops_ptr
+
+/*
+ * Extract CPU revision and variant, and combine them into a single numeric for
+ * easier comparison.
+ */
+ .globl cpu_get_rev_var
+func cpu_get_rev_var
+ mrs x1, midr_el1
+
+ /*
+ * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
+ * as variant[7:4] and revision[3:0] of x0.
+ *
+ * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
+ * extract x1[3:0] into x0[3:0] retaining other bits.
+ */
+ ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
+ bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
+ ret
+endfunc cpu_get_rev_var
+
+/*
+ * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
+ * application purposes. If the revision-variant is less than or same as a given
+ * value, indicates that errata applies; otherwise not.
+ *
+ * Shall clobber: x0-x3
+ */
+ .globl cpu_rev_var_ls
+func cpu_rev_var_ls
+ mov x2, #ERRATA_APPLIES
+ mov x3, #ERRATA_NOT_APPLIES
+ cmp x0, x1
+ csel x0, x2, x3, ls
+ ret
+endfunc cpu_rev_var_ls
+
+/*
+ * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
+ * application purposes. If the revision-variant is higher than or same as a
+ * given value, indicates that errata applies; otherwise not.
+ *
+ * Shall clobber: x0-x3
+ */
+ .globl cpu_rev_var_hs
+func cpu_rev_var_hs
+ mov x2, #ERRATA_APPLIES
+ mov x3, #ERRATA_NOT_APPLIES
+ cmp x0, x1
+ csel x0, x2, x3, hs
+ ret
+endfunc cpu_rev_var_hs
+
+/*
+ * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
+ * application purposes. If the revision-variant is between or includes the given
+ * values, this indicates that errata applies; otherwise not.
+ *
+ * Shall clobber: x0-x4
+ */
+ .globl cpu_rev_var_range
+func cpu_rev_var_range
+ mov x3, #ERRATA_APPLIES
+ mov x4, #ERRATA_NOT_APPLIES
+ cmp x0, x1
+ csel x1, x3, x4, hs
+ cbz x1, 1f
+ cmp x0, x2
+ csel x1, x3, x4, ls
+1:
+ mov x0, x1
+ ret
+endfunc cpu_rev_var_range
+
+/*
+ * int check_wa_cve_2017_5715(void);
+ *
+ * This function returns:
+ * - ERRATA_APPLIES when firmware mitigation is required.
+ * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
+ * - ERRATA_MISSING when firmware mitigation would be required but
+ * is not compiled in.
+ *
+ * NOTE: Must be called only after cpu_ops have been initialized
+ * in per-CPU data.
+ */
+ .globl check_wa_cve_2017_5715
+func check_wa_cve_2017_5715
+ mrs x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_EXTRA1_FUNC]
+ /*
+ * If the reserved function pointer is NULL, this CPU
+ * is unaffected by CVE-2017-5715 so bail out.
+ */
+ cmp x0, #CPU_NO_EXTRA1_FUNC
+ beq 1f
+ br x0
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+endfunc check_wa_cve_2017_5715
+
+/*
+ * void *wa_cve_2018_3639_get_disable_ptr(void);
+ *
+ * Returns a function pointer which is used to disable mitigation
+ * for CVE-2018-3639.
+ * The function pointer is only returned on cores that employ
+ * dynamic mitigation. If the core uses static mitigation or is
+ * unaffected by CVE-2018-3639 this function returns NULL.
+ *
+ * NOTE: Must be called only after cpu_ops have been initialized
+ * in per-CPU data.
+ */
+ .globl wa_cve_2018_3639_get_disable_ptr
+func wa_cve_2018_3639_get_disable_ptr
+ mrs x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_EXTRA2_FUNC]
+ ret
+endfunc wa_cve_2018_3639_get_disable_ptr
+
+/*
+ * int check_smccc_arch_wa3_applies(void);
+ *
+ * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
+ * CVE-2022-23960 for this CPU. It returns:
+ * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
+ * the CVE.
+ * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
+ * mitigate the CVE.
+ *
+ * NOTE: Must be called only after cpu_ops have been initialized
+ * in per-CPU data.
+ */
+ .globl check_smccc_arch_wa3_applies
+func check_smccc_arch_wa3_applies
+ mrs x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_EXTRA3_FUNC]
+ /*
+ * If the reserved function pointer is NULL, this CPU
+ * is unaffected by CVE-2022-23960 so bail out.
+ */
+ cmp x0, #CPU_NO_EXTRA3_FUNC
+ beq 1f
+ br x0
+1:
+ mov x0, #ERRATA_NOT_APPLIES
+ ret
+endfunc check_smccc_arch_wa3_applies
diff --git a/lib/cpus/aarch64/cpuamu.c b/lib/cpus/aarch64/cpuamu.c
new file mode 100644
index 0000000..3a2fa81
--- /dev/null
+++ b/lib/cpus/aarch64/cpuamu.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cpuamu.h>
+#include <lib/el3_runtime/pubsub_events.h>
+#include <plat/common/platform.h>
+
+#define CPUAMU_NR_COUNTERS 5U
+
+struct cpuamu_ctx {
+ uint64_t cnts[CPUAMU_NR_COUNTERS];
+ unsigned int mask;
+};
+
+static struct cpuamu_ctx cpuamu_ctxs[PLATFORM_CORE_COUNT];
+
+int midr_match(unsigned int cpu_midr)
+{
+ unsigned int midr, midr_mask;
+
+ midr = (unsigned int)read_midr();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ return ((midr & midr_mask) == (cpu_midr & midr_mask));
+}
+
+void cpuamu_context_save(unsigned int nr_counters)
+{
+ struct cpuamu_ctx *ctx = &cpuamu_ctxs[plat_my_core_pos()];
+ unsigned int i;
+
+ assert(nr_counters <= CPUAMU_NR_COUNTERS);
+
+ /* Save counter configuration */
+ ctx->mask = cpuamu_read_cpuamcntenset_el0();
+
+ /* Disable counters */
+ cpuamu_write_cpuamcntenclr_el0(ctx->mask);
+ isb();
+
+ /* Save counters */
+ for (i = 0; i < nr_counters; i++)
+ ctx->cnts[i] = cpuamu_cnt_read(i);
+}
+
+void cpuamu_context_restore(unsigned int nr_counters)
+{
+ struct cpuamu_ctx *ctx = &cpuamu_ctxs[plat_my_core_pos()];
+ unsigned int i;
+
+ assert(nr_counters <= CPUAMU_NR_COUNTERS);
+
+ /*
+ * Disable counters. They were enabled early in the
+ * CPU reset function.
+ */
+ cpuamu_write_cpuamcntenclr_el0(ctx->mask);
+ isb();
+
+ /* Restore counters */
+ for (i = 0; i < nr_counters; i++)
+ cpuamu_cnt_write(i, ctx->cnts[i]);
+ isb();
+
+ /* Restore counter configuration */
+ cpuamu_write_cpuamcntenset_el0(ctx->mask);
+}
diff --git a/lib/cpus/aarch64/cpuamu_helpers.S b/lib/cpus/aarch64/cpuamu_helpers.S
new file mode 100644
index 0000000..e71e345
--- /dev/null
+++ b/lib/cpus/aarch64/cpuamu_helpers.S
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpuamu.h>
+
+ .globl cpuamu_cnt_read
+ .globl cpuamu_cnt_write
+ .globl cpuamu_read_cpuamcntenset_el0
+ .globl cpuamu_read_cpuamcntenclr_el0
+ .globl cpuamu_write_cpuamcntenset_el0
+ .globl cpuamu_write_cpuamcntenclr_el0
+
+/*
+ * uint64_t cpuamu_cnt_read(unsigned int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func cpuamu_cnt_read
+ adr x1, 1f
+ add x1, x1, x0, lsl #3 /* each mrs/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x1, x1, x0, lsl #2 /* + "bti j" instruction */
+#endif
+ br x1
+
+1: read CPUAMEVCNTR0_EL0
+ read CPUAMEVCNTR1_EL0
+ read CPUAMEVCNTR2_EL0
+ read CPUAMEVCNTR3_EL0
+ read CPUAMEVCNTR4_EL0
+endfunc cpuamu_cnt_read
+
+/*
+ * void cpuamu_cnt_write(unsigned int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func cpuamu_cnt_write
+ adr x2, 1f
+ add x2, x2, x0, lsl #3 /* each msr/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x2, x2, x0, lsl #2 /* + "bti j" instruction */
+#endif
+ br x2
+
+1: write CPUAMEVCNTR0_EL0
+ write CPUAMEVCNTR1_EL0
+ write CPUAMEVCNTR2_EL0
+ write CPUAMEVCNTR3_EL0
+ write CPUAMEVCNTR4_EL0
+endfunc cpuamu_cnt_write
+
+/*
+ * unsigned int cpuamu_read_cpuamcntenset_el0(void);
+ *
+ * Read the `CPUAMCNTENSET_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cpuamu_read_cpuamcntenset_el0
+ mrs x0, CPUAMCNTENSET_EL0
+ ret
+endfunc cpuamu_read_cpuamcntenset_el0
+
+/*
+ * unsigned int cpuamu_read_cpuamcntenclr_el0(void);
+ *
+ * Read the `CPUAMCNTENCLR_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cpuamu_read_cpuamcntenclr_el0
+ mrs x0, CPUAMCNTENCLR_EL0
+ ret
+endfunc cpuamu_read_cpuamcntenclr_el0
+
+/*
+ * void cpuamu_write_cpuamcntenset_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENSET_EL0` CPU register.
+ */
+func cpuamu_write_cpuamcntenset_el0
+ msr CPUAMCNTENSET_EL0, x0
+ ret
+endfunc cpuamu_write_cpuamcntenset_el0
+
+/*
+ * void cpuamu_write_cpuamcntenclr_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENCLR_EL0` CPU register.
+ */
+func cpuamu_write_cpuamcntenclr_el0
+ msr CPUAMCNTENCLR_EL0, x0
+ ret
+endfunc cpuamu_write_cpuamcntenclr_el0
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
new file mode 100644
index 0000000..884281d
--- /dev/null
+++ b/lib/cpus/aarch64/denver.S
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <context.h>
+#include <denver.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+ /* -------------------------------------------------
+ * CVE-2017-5715 mitigation
+ *
+ * Flush the indirect branch predictor and RSB on
+ * entry to EL3 by issuing a newly added instruction
+ * for Denver CPUs.
+ *
+ * To achieve this without performing any branch
+ * instruction, a per-cpu vbar is installed which
+ * executes the workaround and then branches off to
+ * the corresponding vector entry in the main vector
+ * table.
+ * -------------------------------------------------
+ */
+vector_base workaround_bpflush_runtime_exceptions
+
+ .macro apply_workaround
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+
+ /* Disable cycle counter when event counting is prohibited */
+ mrs x1, pmcr_el0
+ orr x0, x1, #PMCR_EL0_DP_BIT
+ msr pmcr_el0, x0
+ isb
+
+ /* -------------------------------------------------
+ * A new write-only system register where a write of
+ * 1 to bit 0 will cause the indirect branch predictor
+ * and RSB to be flushed.
+ *
+ * A write of 0 to bit 0 will be ignored. A write of
+ * 1 to any other bit will cause an MCA.
+ * -------------------------------------------------
+ */
+ mov x0, #1
+ msr s3_0_c15_c0_6, x0
+ isb
+
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpflush_sync_exception_sp_el0
+ b sync_exception_sp_el0
+end_vector_entry workaround_bpflush_sync_exception_sp_el0
+
+vector_entry workaround_bpflush_irq_sp_el0
+ b irq_sp_el0
+end_vector_entry workaround_bpflush_irq_sp_el0
+
+vector_entry workaround_bpflush_fiq_sp_el0
+ b fiq_sp_el0
+end_vector_entry workaround_bpflush_fiq_sp_el0
+
+vector_entry workaround_bpflush_serror_sp_el0
+ b serror_sp_el0
+end_vector_entry workaround_bpflush_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpflush_sync_exception_sp_elx
+ b sync_exception_sp_elx
+end_vector_entry workaround_bpflush_sync_exception_sp_elx
+
+vector_entry workaround_bpflush_irq_sp_elx
+ b irq_sp_elx
+end_vector_entry workaround_bpflush_irq_sp_elx
+
+vector_entry workaround_bpflush_fiq_sp_elx
+ b fiq_sp_elx
+end_vector_entry workaround_bpflush_fiq_sp_elx
+
+vector_entry workaround_bpflush_serror_sp_elx
+ b serror_sp_elx
+end_vector_entry workaround_bpflush_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpflush_sync_exception_aarch64
+ apply_workaround
+ b sync_exception_aarch64
+end_vector_entry workaround_bpflush_sync_exception_aarch64
+
+vector_entry workaround_bpflush_irq_aarch64
+ apply_workaround
+ b irq_aarch64
+end_vector_entry workaround_bpflush_irq_aarch64
+
+vector_entry workaround_bpflush_fiq_aarch64
+ apply_workaround
+ b fiq_aarch64
+end_vector_entry workaround_bpflush_fiq_aarch64
+
+vector_entry workaround_bpflush_serror_aarch64
+ apply_workaround
+ b serror_aarch64
+end_vector_entry workaround_bpflush_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpflush_sync_exception_aarch32
+ apply_workaround
+ b sync_exception_aarch32
+end_vector_entry workaround_bpflush_sync_exception_aarch32
+
+vector_entry workaround_bpflush_irq_aarch32
+ apply_workaround
+ b irq_aarch32
+end_vector_entry workaround_bpflush_irq_aarch32
+
+vector_entry workaround_bpflush_fiq_aarch32
+ apply_workaround
+ b fiq_aarch32
+end_vector_entry workaround_bpflush_fiq_aarch32
+
+vector_entry workaround_bpflush_serror_aarch32
+ apply_workaround
+ b serror_aarch32
+end_vector_entry workaround_bpflush_serror_aarch32
+
+ .global denver_disable_dco
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func denver_disable_ext_debug
+ mov x0, #1
+ msr osdlr_el1, x0
+ isb
+ dsb sy
+ ret
+endfunc denver_disable_ext_debug
+
+ /* ----------------------------------------------------
+ * Enable dynamic code optimizer (DCO)
+ * ----------------------------------------------------
+ */
+func denver_enable_dco
+ /* DCO is not supported on PN5 and later */
+ mrs x1, midr_el1
+ mov_imm x2, DENVER_MIDR_PN4
+ cmp x1, x2
+ b.hi 1f
+
+ mov x18, x30
+ bl plat_my_core_pos
+ mov x1, #1
+ lsl x1, x1, x0
+ msr s3_0_c15_c0_2, x1
+ mov x30, x18
+1: ret
+endfunc denver_enable_dco
+
+ /* ----------------------------------------------------
+ * Disable dynamic code optimizer (DCO)
+ * ----------------------------------------------------
+ */
+func denver_disable_dco
+ /* DCO is not supported on PN5 and later */
+ mrs x1, midr_el1
+ mov_imm x2, DENVER_MIDR_PN4
+ cmp x1, x2
+ b.hi 2f
+
+ /* turn off background work */
+ mov x18, x30
+ bl plat_my_core_pos
+ mov x1, #1
+ lsl x1, x1, x0
+ lsl x2, x1, #16
+ msr s3_0_c15_c0_2, x2
+ isb
+
+ /* wait till the background work turns off */
+1: mrs x2, s3_0_c15_c0_2
+ lsr x2, x2, #32
+ and w2, w2, 0xFFFF
+ and x2, x2, x1
+ cbnz x2, 1b
+
+ mov x30, x18
+2: ret
+endfunc denver_disable_dco
+
+workaround_reset_start denver, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
+#if IMAGE_BL31
+ adr x1, workaround_bpflush_runtime_exceptions
+ msr vbar_el3, x1
+#endif
+workaround_reset_end denver, CVE(2017, 5715)
+
+check_erratum_custom_start denver, CVE(2017, 5715)
+ mov x0, #ERRATA_MISSING
+#if WORKAROUND_CVE_2017_5715
+ /*
+ * Check if the CPU supports the special instruction
+ * required to flush the indirect branch predictor and
+ * RSB. Support for this operation can be determined by
+ * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
+ */
+ mrs x1, id_afr0_el1
+ mov x2, #0x10000
+ and x1, x1, x2
+ cbz x1, 1f
+ mov x0, #ERRATA_APPLIES
+1:
+#endif
+ ret
+check_erratum_custom_end denver, CVE(2017, 5715)
+
+workaround_reset_start denver, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+ /*
+ * Denver CPUs with DENVER_MIDR_PN3 or earlier, use different
+ * bits in the ACTLR_EL3 register to disable speculative
+ * store buffer and memory disambiguation.
+ */
+ mrs x0, midr_el1
+ mov_imm x1, DENVER_MIDR_PN4
+ cmp x0, x1
+ mrs x0, actlr_el3
+ mov x1, #(DENVER_CPU_DIS_MD_EL3 | DENVER_CPU_DIS_SSB_EL3)
+ mov x2, #(DENVER_PN4_CPU_DIS_MD_EL3 | DENVER_PN4_CPU_DIS_SSB_EL3)
+ csel x3, x1, x2, ne
+ orr x0, x0, x3
+ msr actlr_el3, x0
+ isb
+ dsb sy
+workaround_reset_end denver, CVE(2018, 3639)
+
+check_erratum_chosen denver, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
+
+cpu_reset_func_start denver
+ /* ----------------------------------------------------
+ * Reset ACTLR.PMSTATE to C1 state
+ * ----------------------------------------------------
+ */
+ mrs x0, actlr_el1
+ bic x0, x0, #DENVER_CPU_PMSTATE_MASK
+ orr x0, x0, #DENVER_CPU_PMSTATE_C1
+ msr actlr_el1, x0
+
+ /* ----------------------------------------------------
+ * Enable dynamic code optimizer (DCO)
+ * ----------------------------------------------------
+ */
+ bl denver_enable_dco
+cpu_reset_func_end denver
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Denver.
+ * ----------------------------------------------------
+ */
+func denver_core_pwr_dwn
+
+ mov x19, x30
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ bl denver_disable_ext_debug
+
+ ret x19
+endfunc denver_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Denver.
+ * -------------------------------------------------------
+ */
+func denver_cluster_pwr_dwn
+ ret
+endfunc denver_cluster_pwr_dwn
+
+errata_report_shim denver
+
+ /* ---------------------------------------------
+ * This function provides Denver specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.denver_regs, "aS"
+denver_regs: /* The ascii list of register names to be reported */
+ .asciz "actlr_el1", ""
+
+func denver_cpu_reg_dump
+ adr x6, denver_regs
+ mrs x8, ACTLR_EL1
+ ret
+endfunc denver_cpu_reg_dump
+
+/* macro to declare cpu_ops for Denver SKUs */
+.macro denver_cpu_ops_wa midr
+ declare_cpu_ops_wa denver, \midr, \
+ denver_reset_func, \
+ check_erratum_denver_5715, \
+ CPU_NO_EXTRA2_FUNC, \
+ CPU_NO_EXTRA3_FUNC, \
+ denver_core_pwr_dwn, \
+ denver_cluster_pwr_dwn
+.endm
+
+denver_cpu_ops_wa DENVER_MIDR_PN0
+denver_cpu_ops_wa DENVER_MIDR_PN1
+denver_cpu_ops_wa DENVER_MIDR_PN2
+denver_cpu_ops_wa DENVER_MIDR_PN3
+denver_cpu_ops_wa DENVER_MIDR_PN4
+denver_cpu_ops_wa DENVER_MIDR_PN5
+denver_cpu_ops_wa DENVER_MIDR_PN6
+denver_cpu_ops_wa DENVER_MIDR_PN7
+denver_cpu_ops_wa DENVER_MIDR_PN8
+denver_cpu_ops_wa DENVER_MIDR_PN9
diff --git a/lib/cpus/aarch64/dsu_helpers.S b/lib/cpus/aarch64/dsu_helpers.S
new file mode 100644
index 0000000..a34b9a6
--- /dev/null
+++ b/lib/cpus/aarch64/dsu_helpers.S
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <dsu_def.h>
+#include <lib/cpus/errata.h>
+
+ /* -----------------------------------------------------------------------
+ * DSU erratum 798953 check function
+ * Checks the DSU variant, revision and configuration to determine if
+ * the erratum applies. Erratum applies on all configurations of the
+ * DSU and if revision-variant is r0p0.
+ *
+ * The erratum was fixed in r0p1.
+ *
+ * This function is called from both assembly and C environment. So it
+ * follows AAPCS.
+ *
+ * Clobbers: x0-x3
+ * -----------------------------------------------------------------------
+ */
+ .globl check_errata_dsu_798953
+ .globl errata_dsu_798953_wa
+
+func check_errata_dsu_798953
+ mov x2, #ERRATA_APPLIES
+ mov x3, #ERRATA_NOT_APPLIES
+
+ /* Check if DSU is equal to r0p0 */
+ mrs x1, CLUSTERIDR_EL1
+
+ /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+ ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\
+ #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+ mov x1, #(0x0 << CLUSTERIDR_REV_SHIFT)
+ cmp x0, x1
+ csel x0, x2, x3, EQ
+ ret
+endfunc check_errata_dsu_798953
+
+ /* --------------------------------------------------
+ * Errata Workaround for DSU erratum #798953.
+ *
+ * Can clobber only: x0-x8
+ * --------------------------------------------------
+ */
+func errata_dsu_798953_wa
+ mov x8, x30
+ bl check_errata_dsu_798953
+ cbz x0, 1f
+
+ /* If erratum applies, disable high-level clock gating */
+ mrs x0, CLUSTERACTLR_EL1
+ orr x0, x0, #CLUSTERACTLR_EL1_DISABLE_CLOCK_GATING
+ msr CLUSTERACTLR_EL1, x0
+ isb
+1:
+ ret x8
+endfunc errata_dsu_798953_wa
+
+ /* -----------------------------------------------------------------------
+ * DSU erratum 936184 check function
+ * Checks the DSU variant, revision and configuration to determine if
+ * the erratum applies. Erratum applies if ACP interface is present
+ * in the DSU and revision-variant < r2p0.
+ *
+ * The erratum was fixed in r2p0.
+ *
+ * This function is called from both assembly and C environment. So it
+ * follows AAPCS.
+ *
+ * Clobbers: x0-x4
+ * -----------------------------------------------------------------------
+ */
+ .globl check_errata_dsu_936184
+ .globl errata_dsu_936184_wa
+ .weak is_scu_present_in_dsu
+
+ /* --------------------------------------------------------------------
+ * Default behaviour respresents SCU is always present with DSU.
+ * CPUs can override this definition if required.
+ *
+ * Can clobber only: x0-x3
+ * --------------------------------------------------------------------
+ */
+func is_scu_present_in_dsu
+ mov x0, #1
+ ret
+endfunc is_scu_present_in_dsu
+
+func check_errata_dsu_936184
+ mov x4, x30
+ bl is_scu_present_in_dsu
+ cmp x0, xzr
+ /* Default error status */
+ mov x0, #ERRATA_NOT_APPLIES
+
+ /* If SCU is not present, return without applying patch */
+ b.eq 1f
+
+ /* Erratum applies only if DSU has the ACP interface */
+ mrs x1, CLUSTERCFR_EL1
+ ubfx x1, x1, #CLUSTERCFR_ACP_SHIFT, #1
+ cbz x1, 1f
+
+ /* If ACP is present, check if DSU is older than r2p0 */
+ mrs x1, CLUSTERIDR_EL1
+
+ /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+ ubfx x2, x1, #CLUSTERIDR_REV_SHIFT,\
+ #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+ cmp x2, #(0x2 << CLUSTERIDR_VAR_SHIFT)
+ b.hs 1f
+ mov x0, #ERRATA_APPLIES
+1:
+ ret x4
+endfunc check_errata_dsu_936184
+
+ /* --------------------------------------------------
+ * Errata Workaround for DSU erratum #936184.
+ *
+ * Can clobber only: x0-x8
+ * --------------------------------------------------
+ */
+func errata_dsu_936184_wa
+ mov x8, x30
+ bl check_errata_dsu_936184
+ cbz x0, 1f
+
+ /* If erratum applies, we set a mask to a DSU control register */
+ mrs x0, CLUSTERACTLR_EL1
+ ldr x1, =DSU_ERRATA_936184_MASK
+ orr x0, x0, x1
+ msr CLUSTERACTLR_EL1, x0
+ isb
+1:
+ ret x8
+endfunc errata_dsu_936184_wa
+
+ /* -----------------------------------------------------------------------
+ * DSU erratum 2313941 check function
+ * Checks the DSU variant, revision and configuration to determine if
+ * the erratum applies. Erratum applies on all configurations of the
+ * DSU and if revision-variant is r0p0, r1p0, r2p0, r2p1, r3p0, r3p1.
+ *
+ * The erratum is still open.
+ *
+ * This function is called from both assembly and C environment. So it
+ * follows AAPCS.
+ *
+ * Clobbers: x0-x3
+ * -----------------------------------------------------------------------
+ */
+ .globl check_errata_dsu_2313941
+ .globl errata_dsu_2313941_wa
+
+func check_errata_dsu_2313941
+ mov x2, #ERRATA_APPLIES
+ mov x3, #ERRATA_NOT_APPLIES
+
+ /* Check if DSU version is less than or equal to r3p1 */
+ mrs x1, CLUSTERIDR_EL1
+
+ /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+ ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\
+ #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+ mov x1, #(0x31 << CLUSTERIDR_REV_SHIFT)
+ cmp x0, x1
+ csel x0, x2, x3, LS
+ ret
+endfunc check_errata_dsu_2313941
+
+ /* --------------------------------------------------
+ * Errata Workaround for DSU erratum #2313941.
+ *
+ * Can clobber only: x0-x8
+ * --------------------------------------------------
+ */
+func errata_dsu_2313941_wa
+ mov x8, x30
+ bl check_errata_dsu_2313941
+ cbz x0, 1f
+
+ /* If erratum applies, disable high-level clock gating */
+ mrs x0, CLUSTERACTLR_EL1
+ orr x0, x0, #CLUSTERACTLR_EL1_DISABLE_SCLK_GATING
+ msr CLUSTERACTLR_EL1, x0
+ isb
+1:
+ ret x8
+endfunc errata_dsu_2313941_wa
diff --git a/lib/cpus/aarch64/generic.S b/lib/cpus/aarch64/generic.S
new file mode 100644
index 0000000..ef1f048
--- /dev/null
+++ b/lib/cpus/aarch64/generic.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <generic.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func generic_disable_dcache
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+ ret
+endfunc generic_disable_dcache
+
+func generic_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl generic_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ ret x18
+endfunc generic_core_pwr_dwn
+
+func generic_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl generic_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_level2
+
+ ret x18
+
+endfunc generic_cluster_pwr_dwn
+
+/* ---------------------------------------------
+ * Unimplemented functions.
+ * ---------------------------------------------
+ */
+.equ generic_errata_report, 0
+.equ generic_cpu_reg_dump, 0
+.equ generic_reset_func, 0
+
+declare_cpu_ops generic, AARCH64_GENERIC_MIDR, \
+ generic_reset_func, \
+ generic_core_pwr_dwn, \
+ generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_e1.S b/lib/cpus/aarch64/neoverse_e1.S
new file mode 100644
index 0000000..45bd8d3
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_e1.S
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <neoverse_e1.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse E1 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse-E1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+/*
+ * ERRATA_DSU_936184:
+ * The errata is defined in dsu_helpers.S and applies to neoverse_e1.
+ * Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_neoverse_e1_936184, check_errata_dsu_936184
+.equ erratum_neoverse_e1_936184_wa, errata_dsu_936184_wa
+add_erratum_entry neoverse_e1, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+
+cpu_reset_func_start neoverse_e1
+cpu_reset_func_end neoverse_e1
+
+func neoverse_e1_cpu_pwr_dwn
+ mrs x0, NEOVERSE_E1_CPUPWRCTLR_EL1
+ orr x0, x0, #NEOVERSE_E1_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr NEOVERSE_E1_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc neoverse_e1_cpu_pwr_dwn
+
+errata_report_shim neoverse_e1
+
+.section .rodata.neoverse_e1_regs, "aS"
+neoverse_e1_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func neoverse_e1_cpu_reg_dump
+ adr x6, neoverse_e1_regs
+ mrs x8, NEOVERSE_E1_ECTLR_EL1
+ ret
+endfunc neoverse_e1_cpu_reg_dump
+
+declare_cpu_ops neoverse_e1, NEOVERSE_E1_MIDR, \
+ neoverse_e1_reset_func, \
+ neoverse_e1_cpu_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_hermes.S b/lib/cpus/aarch64/neoverse_hermes.S
new file mode 100644
index 0000000..cb90b71
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_hermes.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <neoverse_hermes.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse Hermes must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse Hermes supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+cpu_reset_func_start neoverse_hermes
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end neoverse_hermes
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func neoverse_hermes_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set NEOVERSE_HERMES_CPUPWRCTLR_EL1, NEOVERSE_HERMES_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ isb
+ ret
+endfunc neoverse_hermes_core_pwr_dwn
+
+errata_report_shim neoverse_hermes
+
+ /* ---------------------------------------------
+ * This function provides Neoverse Hermes specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_hermes_regs, "aS"
+neoverse_hermes_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func neoverse_hermes_cpu_reg_dump
+ adr x6, neoverse_hermes_regs
+ mrs x8, NEOVERSE_HERMES_CPUECTLR_EL1
+ ret
+endfunc neoverse_hermes_cpu_reg_dump
+
+declare_cpu_ops neoverse_hermes, NEOVERSE_HERMES_MIDR, \
+ neoverse_hermes_reset_func, \
+ neoverse_hermes_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
new file mode 100644
index 0000000..36a7ee7
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpuamu.h>
+#include <cpu_macros.S>
+#include <neoverse_n1.h>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse N1 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse-N1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+ .global neoverse_n1_errata_ic_trap_handler
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+/*
+ * ERRATA_DSU_936184:
+ * The errata is defined in dsu_helpers.S and applies to Neoverse N1.
+ * Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_neoverse_n1_936184, check_errata_dsu_936184
+.equ erratum_neoverse_n1_936184_wa, errata_dsu_936184_wa
+add_erratum_entry neoverse_n1, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+
+workaround_reset_start neoverse_n1, ERRATUM(1043202), ERRATA_N1_1043202
+ /* Apply instruction patching sequence */
+ ldr x0, =0x0
+ msr CPUPSELR_EL3, x0
+ ldr x0, =0xF3BF8F2F
+ msr CPUPOR_EL3, x0
+ ldr x0, =0xFFFFFFFF
+ msr CPUPMR_EL3, x0
+ ldr x0, =0x800200071
+ msr CPUPCR_EL3, x0
+workaround_reset_end neoverse_n1, ERRATUM(1043202)
+
+check_erratum_ls neoverse_n1, ERRATUM(1043202), CPU_REV(1, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1073348), ERRATA_N1_1073348
+ sysreg_bit_set NEOVERSE_N1_CPUACTLR_EL1, NEOVERSE_N1_CPUACTLR_EL1_BIT_6
+workaround_reset_end neoverse_n1, ERRATUM(1073348)
+
+check_erratum_ls neoverse_n1, ERRATUM(1073348), CPU_REV(1, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1130799), ERRATA_N1_1130799
+ sysreg_bit_set NEOVERSE_N1_CPUACTLR2_EL1, NEOVERSE_N1_CPUACTLR2_EL1_BIT_59
+workaround_reset_end neoverse_n1, ERRATUM(1130799)
+
+check_erratum_ls neoverse_n1, ERRATUM(1130799), CPU_REV(2, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1165347), ERRATA_N1_1165347
+ sysreg_bit_set NEOVERSE_N1_CPUACTLR2_EL1, NEOVERSE_N1_CPUACTLR2_EL1_BIT_0
+ sysreg_bit_set NEOVERSE_N1_CPUACTLR2_EL1, NEOVERSE_N1_CPUACTLR2_EL1_BIT_15
+workaround_reset_end neoverse_n1, ERRATUM(1165347)
+
+check_erratum_ls neoverse_n1, ERRATUM(1165347), CPU_REV(2, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1207823), ERRATA_N1_1207823
+ sysreg_bit_set NEOVERSE_N1_CPUACTLR2_EL1, NEOVERSE_N1_CPUACTLR2_EL1_BIT_11
+workaround_reset_end neoverse_n1, ERRATUM(1207823)
+
+check_erratum_ls neoverse_n1, ERRATUM(1207823), CPU_REV(2, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1220197), ERRATA_N1_1220197
+ sysreg_bit_set NEOVERSE_N1_CPUECTLR_EL1, NEOVERSE_N1_WS_THR_L2_MASK
+workaround_reset_end neoverse_n1, ERRATUM(1220197)
+
+check_erratum_ls neoverse_n1, ERRATUM(1220197), CPU_REV(2, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1257314), ERRATA_N1_1257314
+ sysreg_bit_set NEOVERSE_N1_CPUACTLR3_EL1, NEOVERSE_N1_CPUACTLR3_EL1_BIT_10
+workaround_reset_end neoverse_n1, ERRATUM(1257314)
+
+check_erratum_ls neoverse_n1, ERRATUM(1257314), CPU_REV(3, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1262606), ERRATA_N1_1262606
+ sysreg_bit_set NEOVERSE_N1_CPUACTLR_EL1, NEOVERSE_N1_CPUACTLR_EL1_BIT_13
+workaround_reset_end neoverse_n1, ERRATUM(1262606)
+
+check_erratum_ls neoverse_n1, ERRATUM(1262606), CPU_REV(3, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1262888), ERRATA_N1_1262888
+ sysreg_bit_set NEOVERSE_N1_CPUECTLR_EL1, NEOVERSE_N1_CPUECTLR_EL1_MM_TLBPF_DIS_BIT
+workaround_reset_end neoverse_n1, ERRATUM(1262888)
+
+check_erratum_ls neoverse_n1, ERRATUM(1262888), CPU_REV(3, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1275112), ERRATA_N1_1275112
+ sysreg_bit_set NEOVERSE_N1_CPUACTLR_EL1, NEOVERSE_N1_CPUACTLR_EL1_BIT_13
+workaround_reset_end neoverse_n1, ERRATUM(1275112)
+
+check_erratum_ls neoverse_n1, ERRATUM(1275112), CPU_REV(3, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1315703), ERRATA_N1_1315703
+ sysreg_bit_set NEOVERSE_N1_CPUACTLR2_EL1, NEOVERSE_N1_CPUACTLR2_EL1_BIT_16
+workaround_reset_end neoverse_n1, ERRATUM(1315703)
+
+check_erratum_ls neoverse_n1, ERRATUM(1315703), CPU_REV(3, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1542419), ERRATA_N1_1542419
+ /* Apply instruction patching sequence */
+ ldr x0, =0x0
+ msr CPUPSELR_EL3, x0
+ ldr x0, =0xEE670D35
+ msr CPUPOR_EL3, x0
+ ldr x0, =0xFFFF0FFF
+ msr CPUPMR_EL3, x0
+ ldr x0, =0x08000020007D
+ msr CPUPCR_EL3, x0
+ isb
+workaround_reset_end neoverse_n1, ERRATUM(1542419)
+
+check_erratum_range neoverse_n1, ERRATUM(1542419), CPU_REV(3, 0), CPU_REV(4, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1868343), ERRATA_N1_1868343
+ sysreg_bit_set NEOVERSE_N1_CPUACTLR_EL1, NEOVERSE_N1_CPUACTLR_EL1_BIT_13
+workaround_reset_end neoverse_n1, ERRATUM(1868343)
+
+check_erratum_ls neoverse_n1, ERRATUM(1868343), CPU_REV(4, 0)
+
+workaround_reset_start neoverse_n1, ERRATUM(1946160), ERRATA_N1_1946160
+ mov x0, #3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3900002
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+ mov x0, #4
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+ mov x0, #5
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+ isb
+workaround_reset_end neoverse_n1, ERRATUM(1946160)
+
+check_erratum_range neoverse_n1, ERRATUM(1946160), CPU_REV(3, 0), CPU_REV(4, 1)
+
+workaround_runtime_start neoverse_n1, ERRATUM(2743102), ERRATA_N1_2743102
+ /* dsb before isb of power down sequence */
+ dsb sy
+workaround_runtime_end neoverse_n1, ERRATUM(2743102)
+
+check_erratum_ls neoverse_n1, ERRATUM(2743102), CPU_REV(4, 1)
+
+workaround_reset_start neoverse_n1, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Neoverse-N1 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_neoverse_n1
+#endif /* IMAGE_BL31 */
+workaround_reset_end neoverse_n1, CVE(2022, 23960)
+
+check_erratum_chosen neoverse_n1, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+/* --------------------------------------------------
+ * Disable speculative loads if Neoverse N1 supports
+ * SSBS.
+ *
+ * Shall clobber: x0.
+ * --------------------------------------------------
+ */
+func neoverse_n1_disable_speculative_loads
+ /* Check if the PE implements SSBS */
+ mrs x0, id_aa64pfr1_el1
+ tst x0, #(ID_AA64PFR1_EL1_SSBS_MASK << ID_AA64PFR1_EL1_SSBS_SHIFT)
+ b.eq 1f
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+1:
+ ret
+endfunc neoverse_n1_disable_speculative_loads
+
+cpu_reset_func_start neoverse_n1
+ bl neoverse_n1_disable_speculative_loads
+
+ /* Forces all cacheable atomic instructions to be near */
+ sysreg_bit_set NEOVERSE_N1_CPUACTLR2_EL1, NEOVERSE_N1_CPUACTLR2_EL1_BIT_2
+ isb
+
+#if ENABLE_FEAT_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ sysreg_bit_set actlr_el3, NEOVERSE_N1_ACTLR_AMEN_BIT
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ sysreg_bit_set actlr_el2, NEOVERSE_N1_ACTLR_AMEN_BIT
+ /* Enable group0 counters */
+ mov x0, #NEOVERSE_N1_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET_EL0, x0
+#endif
+
+#if NEOVERSE_Nx_EXTERNAL_LLC
+ /* Some system may have External LLC, core needs to be made aware */
+ sysreg_bit_set NEOVERSE_N1_CPUECTLR_EL1, NEOVERSE_N1_CPUECTLR_EL1_EXTLLC_BIT
+#endif
+cpu_reset_func_end neoverse_n1
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func neoverse_n1_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ sysreg_bit_set NEOVERSE_N1_CPUPWRCTLR_EL1, NEOVERSE_N1_CORE_PWRDN_EN_MASK
+
+ apply_erratum neoverse_n1, ERRATUM(2743102), ERRATA_N1_2743102
+
+ isb
+ ret
+endfunc neoverse_n1_core_pwr_dwn
+
+errata_report_shim neoverse_n1
+
+/*
+ * Handle trap of EL0 IC IVAU instructions to EL3 by executing a TLB
+ * inner-shareable invalidation to an arbitrary address followed by a DSB.
+ *
+ * x1: Exception Syndrome
+ */
+func neoverse_n1_errata_ic_trap_handler
+ cmp x1, #NEOVERSE_N1_EC_IC_TRAP
+ b.ne 1f
+ tlbi vae3is, xzr
+ dsb sy
+
+ # Skip the IC instruction itself
+ mrs x3, elr_el3
+ add x3, x3, #4
+ msr elr_el3, x3
+
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+ /*
+ * Issue Error Synchronization Barrier to synchronize SErrors before
+ * exiting EL3. We're running with EAs unmasked, so any synchronized
+ * errors would be taken immediately; therefore no need to inspect
+ * DISR_EL1 register.
+ */
+ esb
+ exception_return
+1:
+ ret
+endfunc neoverse_n1_errata_ic_trap_handler
+
+ /* ---------------------------------------------
+ * This function provides neoverse_n1 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_n1_regs, "aS"
+neoverse_n1_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func neoverse_n1_cpu_reg_dump
+ adr x6, neoverse_n1_regs
+ mrs x8, NEOVERSE_N1_CPUECTLR_EL1
+ ret
+endfunc neoverse_n1_cpu_reg_dump
+
+declare_cpu_ops_eh neoverse_n1, NEOVERSE_N1_MIDR, \
+ neoverse_n1_reset_func, \
+ neoverse_n1_errata_ic_trap_handler, \
+ neoverse_n1_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_n1_pubsub.c b/lib/cpus/aarch64/neoverse_n1_pubsub.c
new file mode 100644
index 0000000..ecfb501
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_n1_pubsub.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <neoverse_n1.h>
+#include <cpuamu.h>
+#include <lib/el3_runtime/pubsub_events.h>
+
+static void *neoverse_n1_context_save(const void *arg)
+{
+ if (midr_match(NEOVERSE_N1_MIDR) != 0)
+ cpuamu_context_save(NEOVERSE_N1_AMU_NR_COUNTERS);
+
+ return (void *)0;
+}
+
+static void *neoverse_n1_context_restore(const void *arg)
+{
+ if (midr_match(NEOVERSE_N1_MIDR) != 0)
+ cpuamu_context_restore(NEOVERSE_N1_AMU_NR_COUNTERS);
+
+ return (void *)0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, neoverse_n1_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, neoverse_n1_context_restore);
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
new file mode 100644
index 0000000..477522f
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+#include <neoverse_n2.h>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse N2 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse-N2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+/*
+ * ERRATA_DSU_2313941:
+ * The errata is defined in dsu_helpers.S and applies to Neoverse N2.
+ * Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_neoverse_n2_2313941, check_errata_dsu_2313941
+.equ erratum_neoverse_n2_2313941_wa, errata_dsu_2313941_wa
+add_erratum_entry neoverse_n2, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET
+
+workaround_reset_start neoverse_n2, ERRATUM(2002655), ERRATA_N2_2002655
+ /* Apply instruction patching sequence */
+ ldr x0,=0x6
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xF3A08002
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFF0F7FE
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x40000001003ff
+ msr S3_6_c15_c8_1,x0
+ ldr x0,=0x7
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xBF200000
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFEF0000
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x40000001003f3
+ msr S3_6_c15_c8_1,x0
+workaround_reset_end neoverse_n2, ERRATUM(2002655)
+
+check_erratum_ls neoverse_n2, ERRATUM(2002655), CPU_REV(0, 0)
+
+workaround_reset_start neoverse_n2, ERRATUM(2025414), ERRATA_N2_2025414
+ sysreg_bit_set NEOVERSE_N2_CPUECTLR_EL1, NEOVERSE_N2_CPUECTLR_EL1_PFSTIDIS_BIT
+workaround_reset_end neoverse_n2, ERRATUM(2025414)
+
+check_erratum_ls neoverse_n2, ERRATUM(2025414), CPU_REV(0, 0)
+
+workaround_reset_start neoverse_n2, ERRATUM(2067956), ERRATA_N2_2067956
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR_EL1, NEOVERSE_N2_CPUACTLR_EL1_BIT_46
+workaround_reset_end neoverse_n2, ERRATUM(2067956)
+
+check_erratum_ls neoverse_n2, ERRATUM(2067956), CPU_REV(0, 0)
+
+workaround_runtime_start neoverse_n2, ERRATUM(2009478), ERRATA_N2_2009478
+ /* Stash ERRSELR_EL1 in x2 */
+ mrs x2, ERRSELR_EL1
+
+ /* Select error record 0 and clear ED bit */
+ msr ERRSELR_EL1, xzr
+ mrs x1, ERXCTLR_EL1
+ bfi x1, xzr, #ERXCTLR_ED_SHIFT, #1
+ msr ERXCTLR_EL1, x1
+
+ /* Restore ERRSELR_EL1 from x2 */
+ msr ERRSELR_EL1, x2
+workaround_runtime_end neoverse_n2, ERRATUM(2009478), NO_ISB
+
+check_erratum_ls neoverse_n2, ERRATUM(2009478), CPU_REV(0, 0)
+
+workaround_reset_start neoverse_n2, ERRATUM(2138953), ERRATA_N2_2138953
+ /* Apply instruction patching sequence */
+ mrs x1, NEOVERSE_N2_CPUECTLR2_EL1
+ mov x0, #NEOVERSE_N2_CPUECTLR2_EL1_PF_MODE_CNSRV
+ bfi x1, x0, #CPUECTLR2_EL1_PF_MODE_LSB, #CPUECTLR2_EL1_PF_MODE_WIDTH
+ msr NEOVERSE_N2_CPUECTLR2_EL1, x1
+workaround_reset_end neoverse_n2, ERRATUM(2138953)
+
+check_erratum_ls neoverse_n2, ERRATUM(2138953), CPU_REV(0, 3)
+
+workaround_reset_start neoverse_n2, ERRATUM(2138956), ERRATA_N2_2138956
+ /* Apply instruction patching sequence */
+ ldr x0,=0x3
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xF3A08002
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFF0F7FE
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x10002001003FF
+ msr S3_6_c15_c8_1,x0
+ ldr x0,=0x4
+ msr S3_6_c15_c8_0,x0
+ ldr x0,=0xBF200000
+ msr S3_6_c15_c8_2,x0
+ ldr x0,=0xFFEF0000
+ msr S3_6_c15_c8_3,x0
+ ldr x0,=0x10002001003F3
+ msr S3_6_c15_c8_1,x0
+workaround_reset_end neoverse_n2, ERRATUM(2138956)
+
+check_erratum_ls neoverse_n2, ERRATUM(2138956), CPU_REV(0, 0)
+
+
+workaround_reset_start neoverse_n2, ERRATUM(2138958), ERRATA_N2_2138958
+ /* Apply instruction patching sequence */
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR5_EL1, NEOVERSE_N2_CPUACTLR5_EL1_BIT_13
+workaround_reset_end neoverse_n2, ERRATUM(2138958)
+
+check_erratum_ls neoverse_n2, ERRATUM(2138958), CPU_REV(0, 0)
+
+workaround_reset_start neoverse_n2, ERRATUM(2189731), ERRATA_N2_2189731
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR5_EL1, NEOVERSE_N2_CPUACTLR5_EL1_BIT_44
+workaround_reset_end neoverse_n2, ERRATUM(2189731)
+
+check_erratum_ls neoverse_n2, ERRATUM(2189731), CPU_REV(0, 0)
+
+workaround_reset_start neoverse_n2, ERRATUM(2242400), ERRATA_N2_2242400
+ /* Apply instruction patching sequence */
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR5_EL1, NEOVERSE_N2_CPUACTLR5_EL1_BIT_17
+ ldr x0, =0x2
+ msr S3_6_c15_c8_0, x0
+ ldr x0, =0x10F600E000
+ msr S3_6_c15_c8_2, x0
+ ldr x0, =0x10FF80E000
+ msr S3_6_c15_c8_3, x0
+ ldr x0, =0x80000000003FF
+ msr S3_6_c15_c8_1, x0
+workaround_reset_end neoverse_n2, ERRATUM(2242400)
+
+check_erratum_ls neoverse_n2, ERRATUM(2242400), CPU_REV(0, 0)
+
+workaround_reset_start neoverse_n2, ERRATUM(2242415), ERRATA_N2_2242415
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR_EL1, NEOVERSE_N2_CPUACTLR_EL1_BIT_22
+workaround_reset_end neoverse_n2, ERRATUM(2242415)
+
+check_erratum_ls neoverse_n2, ERRATUM(2242415), CPU_REV(0, 0)
+
+workaround_reset_start neoverse_n2, ERRATUM(2280757), ERRATA_N2_2280757
+ /* Apply instruction patching sequence */
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR_EL1, NEOVERSE_N2_CPUACTLR_EL1_BIT_22
+workaround_reset_end neoverse_n2, ERRATUM(2280757)
+
+check_erratum_ls neoverse_n2, ERRATUM(2280757), CPU_REV(0, 0)
+
+workaround_runtime_start neoverse_n2, ERRATUM(2326639), ERRATA_N2_2326639
+ /* Set bit 36 in ACTLR2_EL1 */
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR2_EL1, NEOVERSE_N2_CPUACTLR2_EL1_BIT_36
+workaround_runtime_end neoverse_n2, ERRATUM(2326639)
+
+check_erratum_ls neoverse_n2, ERRATUM(2326639), CPU_REV(0, 0)
+
+workaround_runtime_start neoverse_n2, ERRATUM(2340933), ERRATA_N2_2340933
+ /* Set bit 61 in CPUACTLR5_EL1 */
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR5_EL1, BIT(61)
+workaround_runtime_end neoverse_n2, ERRATUM(2340933)
+
+check_erratum_ls neoverse_n2, ERRATUM(2340933), CPU_REV(0, 0)
+
+workaround_runtime_start neoverse_n2, ERRATUM(2346952), ERRATA_N2_2346952
+ /* Set TXREQ to STATIC and full L2 TQ size */
+ mrs x1, NEOVERSE_N2_CPUECTLR2_EL1
+ mov x0, #CPUECTLR2_EL1_TXREQ_STATIC_FULL
+ bfi x1, x0, #CPUECTLR2_EL1_TXREQ_LSB, #CPUECTLR2_EL1_TXREQ_WIDTH
+ msr NEOVERSE_N2_CPUECTLR2_EL1, x1
+workaround_runtime_end neoverse_n2, ERRATUM(2346952)
+
+check_erratum_ls neoverse_n2, ERRATUM(2346952), CPU_REV(0, 2)
+
+workaround_reset_start neoverse_n2, ERRATUM(2376738), ERRATA_N2_2376738
+ /* Set CPUACTLR2_EL1[0] to 1 to force PLDW/PFRM
+ * ST to behave like PLD/PFRM LD and not cause
+ * invalidations to other PE caches.
+ */
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR2_EL1, NEOVERSE_N2_CPUACTLR2_EL1_BIT_0
+workaround_reset_end neoverse_n2, ERRATUM(2376738)
+
+check_erratum_ls neoverse_n2, ERRATUM(2376738), CPU_REV(0, 3)
+
+workaround_reset_start neoverse_n2, ERRATUM(2388450), ERRATA_N2_2388450
+ /*Set bit 40 in ACTLR2_EL1 */
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR2_EL1, NEOVERSE_N2_CPUACTLR2_EL1_BIT_40
+workaround_reset_end neoverse_n2, ERRATUM(2388450)
+
+check_erratum_ls neoverse_n2, ERRATUM(2388450), CPU_REV(0, 0)
+
+workaround_reset_start neoverse_n2, ERRATUM(2743014), ERRATA_N2_2743014
+ /* Set CPUACTLR5_EL1[56:55] to 2'b01 */
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR5_EL1, NEOVERSE_N2_CPUACTLR5_EL1_BIT_55
+ sysreg_bit_clear NEOVERSE_N2_CPUACTLR5_EL1, NEOVERSE_N2_CPUACTLR5_EL1_BIT_56
+workaround_reset_end neoverse_n2, ERRATUM(2743014)
+
+check_erratum_ls neoverse_n2, ERRATUM(2743014), CPU_REV(0, 2)
+
+workaround_runtime_start neoverse_n2, ERRATUM(2743089), ERRATA_N2_2743089
+ /* dsb before isb of power down sequence */
+ dsb sy
+workaround_runtime_end neoverse_n2, ERRATUM(2743089)
+
+check_erratum_ls neoverse_n2, ERRATUM(2743089), CPU_REV(0, 2)
+
+workaround_reset_start neoverse_n2, ERRATUM(2779511), ERRATA_N2_2779511
+ /* Set bit 47 in ACTLR3_EL1 */
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR3_EL1, NEOVERSE_N2_CPUACTLR3_EL1_BIT_47
+workaround_reset_end neoverse_n2, ERRATUM(2779511)
+
+check_erratum_ls neoverse_n2, ERRATUM(2779511), CPU_REV(0, 2)
+
+workaround_reset_start neoverse_n2, CVE(2022,23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Neoverse-N2 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_neoverse_n2
+#endif /* IMAGE_BL31 */
+workaround_reset_end neoverse_n2, CVE(2022,23960)
+
+check_erratum_chosen neoverse_n2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+ /* -------------------------------------------
+ * The CPU Ops reset function for Neoverse N2.
+ * -------------------------------------------
+ */
+cpu_reset_func_start neoverse_n2
+
+ /* Check if the PE implements SSBS */
+ mrs x0, id_aa64pfr1_el1
+ tst x0, #(ID_AA64PFR1_EL1_SSBS_MASK << ID_AA64PFR1_EL1_SSBS_SHIFT)
+ b.eq 1f
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+1:
+ /* Force all cacheable atomic instructions to be near */
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR2_EL1, NEOVERSE_N2_CPUACTLR2_EL1_BIT_2
+
+#if ENABLE_FEAT_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ sysreg_bit_set cptr_el3, TAM_BIT
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ sysreg_bit_set cptr_el2, TAM_BIT
+ /* No need to enable the counters as this would be done at el3 exit */
+#endif
+
+#if NEOVERSE_Nx_EXTERNAL_LLC
+ /* Some systems may have External LLC, core needs to be made aware */
+ sysreg_bit_set NEOVERSE_N2_CPUECTLR_EL1, NEOVERSE_N2_CPUECTLR_EL1_EXTLLC_BIT
+#endif
+cpu_reset_func_end neoverse_n2
+
+func neoverse_n2_core_pwr_dwn
+
+ apply_erratum neoverse_n2, ERRATUM(2009478), ERRATA_N2_2009478
+ apply_erratum neoverse_n2, ERRATUM(2326639), ERRATA_N2_2326639, NO_GET_CPU_REV
+
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * No need to do cache maintenance here.
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set NEOVERSE_N2_CPUPWRCTLR_EL1, NEOVERSE_N2_CORE_PWRDN_EN_BIT
+
+ apply_erratum neoverse_n2, ERRATUM(2743089), ERRATA_N2_2743089
+
+ isb
+ ret
+endfunc neoverse_n2_core_pwr_dwn
+
+errata_report_shim neoverse_n2
+
+ /* ---------------------------------------------
+ * This function provides Neoverse N2 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ASCII and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_n2_regs, "aS"
+neoverse_n2_regs: /* The ASCII list of register names to be reported */
+ .asciz "cpupwrctlr_el1", ""
+
+func neoverse_n2_cpu_reg_dump
+ adr x6, neoverse_n2_regs
+ mrs x8, NEOVERSE_N2_CPUPWRCTLR_EL1
+ ret
+endfunc neoverse_n2_cpu_reg_dump
+
+declare_cpu_ops neoverse_n2, NEOVERSE_N2_MIDR, \
+ neoverse_n2_reset_func, \
+ neoverse_n2_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_n_common.S b/lib/cpus/aarch64/neoverse_n_common.S
new file mode 100644
index 0000000..b816342
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_n_common.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <neoverse_n_common.h>
+
+ .global is_scu_present_in_dsu
+
+/*
+ * Check if the SCU L3 Unit is present on the DSU
+ * 1-> SCU present
+ * 0-> SCU not present
+ *
+ * This function is implemented as weak on dsu_helpers.S and must be
+ * overwritten for Neoverse Nx cores.
+ */
+
+func is_scu_present_in_dsu
+ mrs x0, CPUCFR_EL1
+ ubfx x0, x0, #SCU_SHIFT, #1
+ eor x0, x0, #1
+ ret
+endfunc is_scu_present_in_dsu
diff --git a/lib/cpus/aarch64/neoverse_poseidon.S b/lib/cpus/aarch64/neoverse_poseidon.S
new file mode 100644
index 0000000..3b3245d
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_poseidon.S
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <neoverse_poseidon.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse Poseidon must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse Poseidon supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_POSEIDON_BHB_LOOP_COUNT, neoverse_poseidon
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start neoverse_poseidon, CVE(2022,23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Neoverse-poseidon generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_neoverse_poseidon
+
+#endif /* IMAGE_BL31 */
+workaround_reset_end neoverse_poseidon, CVE(2022,23960)
+
+check_erratum_chosen neoverse_poseidon, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func neoverse_poseidon_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ sysreg_bit_set NEOVERSE_POSEIDON_CPUPWRCTLR_EL1, \
+ NEOVERSE_POSEIDON_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+
+ isb
+ ret
+endfunc neoverse_poseidon_core_pwr_dwn
+
+cpu_reset_func_start neoverse_poseidon
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end neoverse_poseidon
+
+errata_report_shim neoverse_poseidon
+
+ /* ---------------------------------------------
+ * This function provides Neoverse-Poseidon specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_poseidon_regs, "aS"
+neoverse_poseidon_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func neoverse_poseidon_cpu_reg_dump
+ adr x6, neoverse_poseidon_regs
+ mrs x8, NEOVERSE_POSEIDON_CPUECTLR_EL1
+ ret
+endfunc neoverse_poseidon_cpu_reg_dump
+
+declare_cpu_ops neoverse_poseidon, NEOVERSE_POSEIDON_MIDR, \
+ neoverse_poseidon_reset_func, \
+ neoverse_poseidon_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S
new file mode 100644
index 0000000..2a49134
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <neoverse_v1.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse V1 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_V1_BHB_LOOP_COUNT, neoverse_v1
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+workaround_reset_start neoverse_v1, ERRATUM(1618635), ERRATA_V1_1618635
+ /* Inserts a DMB SY before and after MRS PAR_EL1 */
+ ldr x0, =0x0
+ msr NEOVERSE_V1_CPUPSELR_EL3, x0
+ ldr x0, = 0xEE070F14
+ msr NEOVERSE_V1_CPUPOR_EL3, x0
+ ldr x0, = 0xFFFF0FFF
+ msr NEOVERSE_V1_CPUPMR_EL3, x0
+ ldr x0, =0x4005027FF
+ msr NEOVERSE_V1_CPUPCR_EL3, x0
+
+ /* Inserts a DMB SY before STREX imm offset */
+ ldr x0, =0x1
+ msr NEOVERSE_V1_CPUPSELR_EL3, x0
+ ldr x0, =0x00e8400000
+ msr NEOVERSE_V1_CPUPOR_EL3, x0
+ ldr x0, =0x00fff00000
+ msr NEOVERSE_V1_CPUPMR_EL3, x0
+ ldr x0, = 0x4001027FF
+ msr NEOVERSE_V1_CPUPCR_EL3, x0
+
+ /* Inserts a DMB SY before STREX[BHD}/STLEX* */
+ ldr x0, =0x2
+ msr NEOVERSE_V1_CPUPSELR_EL3, x0
+ ldr x0, =0x00e8c00040
+ msr NEOVERSE_V1_CPUPOR_EL3, x0
+ ldr x0, =0x00fff00040
+ msr NEOVERSE_V1_CPUPMR_EL3, x0
+ ldr x0, = 0x4001027FF
+ msr NEOVERSE_V1_CPUPCR_EL3, x0
+
+ /* Inserts a DMB SY after STREX imm offset */
+ ldr x0, =0x3
+ msr NEOVERSE_V1_CPUPSELR_EL3, x0
+ ldr x0, =0x00e8400000
+ msr NEOVERSE_V1_CPUPOR_EL3, x0
+ ldr x0, =0x00fff00000
+ msr NEOVERSE_V1_CPUPMR_EL3, x0
+ ldr x0, = 0x4004027FF
+ msr NEOVERSE_V1_CPUPCR_EL3, x0
+
+ /* Inserts a DMB SY after STREX[BHD}/STLEX* */
+ ldr x0, =0x4
+ msr NEOVERSE_V1_CPUPSELR_EL3, x0
+ ldr x0, =0x00e8c00040
+ msr NEOVERSE_V1_CPUPOR_EL3, x0
+ ldr x0, =0x00fff00040
+ msr NEOVERSE_V1_CPUPMR_EL3, x0
+ ldr x0, = 0x4004027FF
+ msr NEOVERSE_V1_CPUPCR_EL3, x0
+
+workaround_reset_end neoverse_v1, ERRATUM(1618635)
+
+check_erratum_ls neoverse_v1, ERRATUM(1618635), CPU_REV(0, 0)
+
+workaround_reset_start neoverse_v1, ERRATUM(1774420), ERRATA_V1_1774420
+ /* Set bit 53 in CPUECTLR_EL1 */
+ sysreg_bit_set NEOVERSE_V1_CPUECTLR_EL1, NEOVERSE_V1_CPUECTLR_EL1_BIT_53
+workaround_reset_end neoverse_v1, ERRATUM(1774420)
+
+check_erratum_ls neoverse_v1, ERRATUM(1774420), CPU_REV(1, 0)
+
+workaround_reset_start neoverse_v1, ERRATUM(1791573), ERRATA_V1_1791573
+ /* Set bit 2 in ACTLR2_EL1 */
+ sysreg_bit_set NEOVERSE_V1_ACTLR2_EL1, NEOVERSE_V1_ACTLR2_EL1_BIT_2
+workaround_reset_end neoverse_v1, ERRATUM(1791573)
+
+check_erratum_ls neoverse_v1, ERRATUM(1791573), CPU_REV(1, 0)
+
+workaround_reset_start neoverse_v1, ERRATUM(1852267), ERRATA_V1_1852267
+ /* Set bit 28 in ACTLR2_EL1 */
+ sysreg_bit_set NEOVERSE_V1_ACTLR2_EL1, NEOVERSE_V1_ACTLR2_EL1_BIT_28
+workaround_reset_end neoverse_v1, ERRATUM(1852267)
+
+check_erratum_ls neoverse_v1, ERRATUM(1852267), CPU_REV(1, 0)
+
+workaround_reset_start neoverse_v1, ERRATUM(1925756), ERRATA_V1_1925756
+ /* Set bit 8 in CPUECTLR_EL1 */
+ sysreg_bit_set NEOVERSE_V1_CPUECTLR_EL1, NEOVERSE_V1_CPUECTLR_EL1_BIT_8
+workaround_reset_end neoverse_v1, ERRATUM(1925756)
+
+check_erratum_ls neoverse_v1, ERRATUM(1925756), CPU_REV(1, 1)
+
+workaround_reset_start neoverse_v1, ERRATUM(1940577), ERRATA_V1_1940577
+ mov x0, #0
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3900002
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #1
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #2
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+workaround_reset_end neoverse_v1, ERRATUM(1940577)
+
+check_erratum_range neoverse_v1, ERRATUM(1940577), CPU_REV(1, 0), CPU_REV(1, 1)
+
+workaround_reset_start neoverse_v1, ERRATUM(1966096), ERRATA_V1_1966096
+ mov x0, #0x3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0xEE010F12
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0xFFFF0FFF
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x80000000003FF
+ msr S3_6_C15_C8_1, x0
+workaround_reset_end neoverse_v1, ERRATUM(1966096)
+
+check_erratum_range neoverse_v1, ERRATUM(1966096), CPU_REV(1, 0), CPU_REV(1, 1)
+
+workaround_reset_start neoverse_v1, ERRATUM(2108267), ERRATA_V1_2108267
+ mrs x1, NEOVERSE_V1_CPUECTLR_EL1
+ mov x0, #NEOVERSE_V1_CPUECTLR_EL1_PF_MODE_CNSRV
+ bfi x1, x0, #CPUECTLR_EL1_PF_MODE_LSB, #CPUECTLR_EL1_PF_MODE_WIDTH
+ msr NEOVERSE_V1_CPUECTLR_EL1, x1
+workaround_reset_end neoverse_v1, ERRATUM(2108267)
+
+check_erratum_ls neoverse_v1, ERRATUM(2108267), CPU_REV(1, 2)
+
+workaround_reset_start neoverse_v1, ERRATUM(2139242), ERRATA_V1_2139242
+ mov x0, #0x3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0xEE720F14
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0xFFFF0FDF
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x40000005003FF
+ msr S3_6_C15_C8_1, x0
+workaround_reset_end neoverse_v1, ERRATUM(2139242)
+
+check_erratum_ls neoverse_v1, ERRATUM(2139242), CPU_REV(1, 1)
+
+workaround_reset_start neoverse_v1, ERRATUM(2216392), ERRATA_V1_2216392
+ ldr x0, =0x5
+ msr S3_6_c15_c8_0, x0 /* CPUPSELR_EL3 */
+ ldr x0, =0x10F600E000
+ msr S3_6_c15_c8_2, x0 /* CPUPOR_EL3 */
+ ldr x0, =0x10FF80E000
+ msr S3_6_c15_c8_3, x0 /* CPUPMR_EL3 */
+ ldr x0, =0x80000000003FF
+ msr S3_6_c15_c8_1, x0 /* CPUPCR_EL3 */
+workaround_reset_end neoverse_v1, ERRATUM(2216392)
+
+check_erratum_range neoverse_v1, ERRATUM(2216392), CPU_REV(1, 0), CPU_REV(1, 1)
+
+workaround_reset_start neoverse_v1, ERRATUM(2294912), ERRATA_V1_2294912
+ /* Set bit 0 in ACTLR2_EL1 */
+ sysreg_bit_set NEOVERSE_V1_ACTLR2_EL1, NEOVERSE_V1_ACTLR2_EL1_BIT_0
+workaround_reset_end neoverse_v1, ERRATUM(2294912)
+
+check_erratum_ls neoverse_v1, ERRATUM(2294912), CPU_REV(1, 2)
+
+workaround_reset_start neoverse_v1, ERRATUM(2372203), ERRATA_V1_2372203
+ /* Set bit 40 in ACTLR2_EL1 */
+ sysreg_bit_set NEOVERSE_V1_ACTLR2_EL1, NEOVERSE_V1_ACTLR2_EL1_BIT_40
+workaround_reset_end neoverse_v1, ERRATUM(2372203)
+
+check_erratum_ls neoverse_v1, ERRATUM(2372203), CPU_REV(1, 1)
+
+workaround_runtime_start neoverse_v1, ERRATUM(2743093), ERRATA_V1_2743093
+ /* dsb before isb of power down sequence */
+ dsb sy
+workaround_runtime_end neoverse_v1, ERRATUM(2743093)
+
+check_erratum_ls neoverse_v1, ERRATUM(2743093), CPU_REV(1, 2)
+
+workaround_reset_start neoverse_v1, ERRATUM(2743233), ERRATA_V1_2743233
+ sysreg_bit_clear NEOVERSE_V1_ACTLR5_EL1, NEOVERSE_V1_ACTLR5_EL1_BIT_56
+ sysreg_bit_set NEOVERSE_V1_ACTLR5_EL1, NEOVERSE_V1_ACTLR5_EL1_BIT_55
+workaround_reset_end neoverse_v1, ERRATUM(2743233)
+
+check_erratum_ls neoverse_v1, ERRATUM(2743233), CPU_REV(1, 2)
+
+workaround_reset_start neoverse_v1, ERRATUM(2779461), ERRATA_V1_2779461
+ sysreg_bit_set NEOVERSE_V1_ACTLR3_EL1, NEOVERSE_V1_ACTLR3_EL1_BIT_47
+workaround_reset_end neoverse_v1, ERRATUM(2779461)
+
+check_erratum_ls neoverse_v1, ERRATUM(2779461), CPU_REV(1, 2)
+
+
+workaround_reset_start neoverse_v1, CVE(2022,23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Neoverse-V1 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_neoverse_v1
+#endif /* IMAGE_BL31 */
+workaround_reset_end neoverse_v1, CVE(2022,23960)
+
+check_erratum_chosen neoverse_v1, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func neoverse_v1_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ sysreg_bit_set NEOVERSE_V1_CPUPWRCTLR_EL1, NEOVERSE_V1_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ apply_erratum neoverse_v1, ERRATUM(2743093), ERRATA_V1_2743093
+
+ isb
+ ret
+endfunc neoverse_v1_core_pwr_dwn
+
+errata_report_shim neoverse_v1
+
+cpu_reset_func_start neoverse_v1
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end neoverse_v1
+
+ /* ---------------------------------------------
+ * This function provides Neoverse-V1 specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_v1_regs, "aS"
+neoverse_v1_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func neoverse_v1_cpu_reg_dump
+ adr x6, neoverse_v1_regs
+ mrs x8, NEOVERSE_V1_CPUECTLR_EL1
+ ret
+endfunc neoverse_v1_cpu_reg_dump
+
+declare_cpu_ops neoverse_v1, NEOVERSE_V1_MIDR, \
+ neoverse_v1_reset_func, \
+ neoverse_v1_core_pwr_dwn
diff --git a/lib/cpus/aarch64/neoverse_v2.S b/lib/cpus/aarch64/neoverse_v2.S
new file mode 100644
index 0000000..bfd088d
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_v2.S
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <neoverse_v2.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+#include "wa_cve_2022_23960_bhb_vector.S"
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Neoverse V2 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Neoverse V2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+workaround_reset_start neoverse_v2, ERRATUM(2331132), ERRATA_V2_2331132
+ sysreg_bitfield_insert NEOVERSE_V2_CPUECTLR2_EL1, NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_CNSRV, \
+ NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_LSB, NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_WIDTH
+workaround_reset_end neoverse_v2, ERRATUM(2331132)
+
+check_erratum_ls neoverse_v2, ERRATUM(2331132), CPU_REV(0, 2)
+
+workaround_reset_start neoverse_v2, ERRATUM(2719105), ERRATA_V2_2719105
+ sysreg_bit_set NEOVERSE_V2_CPUACTLR2_EL1, NEOVERSE_V2_CPUACTLR2_EL1_BIT_0
+workaround_reset_end neoverse_v2, ERRATUM(2719105)
+
+check_erratum_ls neoverse_v2, ERRATUM(2719105), CPU_REV(0, 1)
+
+workaround_reset_start neoverse_v2, ERRATUM(2743011), ERRATA_V2_2743011
+ sysreg_bit_set NEOVERSE_V2_CPUACTLR5_EL1, NEOVERSE_V2_CPUACTLR5_EL1_BIT_55
+ sysreg_bit_clear NEOVERSE_V2_CPUACTLR5_EL1, NEOVERSE_V2_CPUACTLR5_EL1_BIT_56
+workaround_reset_end neoverse_v2, ERRATUM(2743011)
+
+check_erratum_ls neoverse_v2, ERRATUM(2743011), CPU_REV(0, 1)
+
+workaround_reset_start neoverse_v2, ERRATUM(2779510), ERRATA_V2_2779510
+ sysreg_bit_set NEOVERSE_V2_CPUACTLR3_EL1, NEOVERSE_V2_CPUACTLR3_EL1_BIT_47
+workaround_reset_end neoverse_v2, ERRATUM(2779510)
+
+check_erratum_ls neoverse_v2, ERRATUM(2779510), CPU_REV(0, 1)
+
+workaround_runtime_start neoverse_v2, ERRATUM(2801372), ERRATA_V2_2801372
+ /* dsb before isb of power down sequence */
+ dsb sy
+workaround_runtime_end neoverse_v2, ERRATUM(2801372), ERRATA_V2_2801372
+
+check_erratum_ls neoverse_v2, ERRATUM(2801372), CPU_REV(0, 1)
+
+workaround_reset_start neoverse_v2, CVE(2022,23960), WORKAROUND_CVE_2022_23960
+#if IMAGE_BL31
+ /*
+ * The Neoverse-V2 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs.
+ */
+ override_vector_table wa_cve_vbar_neoverse_v2
+#endif /* IMAGE_BL31 */
+workaround_reset_end neoverse_v2, CVE(2022,23960)
+
+check_erratum_chosen neoverse_v2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+
+#if WORKAROUND_CVE_2022_23960
+ wa_cve_2022_23960_bhb_vector_table NEOVERSE_V2_BHB_LOOP_COUNT, neoverse_v2
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+ /* ----------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ----------------------------------------------------
+ */
+func neoverse_v2_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set NEOVERSE_V2_CPUPWRCTLR_EL1, NEOVERSE_V2_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ apply_erratum neoverse_v2, ERRATUM(2801372), ERRATA_V2_2801372
+
+ isb
+ ret
+endfunc neoverse_v2_core_pwr_dwn
+
+cpu_reset_func_start neoverse_v2
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end neoverse_v2
+
+errata_report_shim neoverse_v2
+ /* ---------------------------------------------
+ * This function provides Neoverse V2-
+ * specific register information for crash
+ * reporting. It needs to return with x6
+ * pointing to a list of register names in ascii
+ * and x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.neoverse_v2_regs, "aS"
+neoverse_v2_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func neoverse_v2_cpu_reg_dump
+ adr x6, neoverse_v2_regs
+ mrs x8, NEOVERSE_V2_CPUECTLR_EL1
+ ret
+endfunc neoverse_v2_cpu_reg_dump
+
+declare_cpu_ops neoverse_v2, NEOVERSE_V2_MIDR, \
+ neoverse_v2_reset_func, \
+ neoverse_v2_core_pwr_dwn
diff --git a/lib/cpus/aarch64/nevis.S b/lib/cpus/aarch64/nevis.S
new file mode 100644
index 0000000..36830a9
--- /dev/null
+++ b/lib/cpus/aarch64/nevis.S
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <nevis.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Nevis must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Nevis supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+cpu_reset_func_start nevis
+ /* ----------------------------------------------------
+ * Disable speculative loads
+ * ----------------------------------------------------
+ */
+ msr SSBS, xzr
+cpu_reset_func_end nevis
+
+func nevis_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set NEVIS_IMP_CPUPWRCTLR_EL1, \
+ NEVIS_IMP_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+ isb
+ ret
+endfunc nevis_core_pwr_dwn
+
+errata_report_shim nevis
+
+.section .rodata.nevis_regs, "aS"
+nevis_regs: /* The ASCII list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func nevis_cpu_reg_dump
+ adr x6, nevis_regs
+ mrs x8, NEVIS_CPUECTLR_EL1
+ ret
+endfunc nevis_cpu_reg_dump
+
+declare_cpu_ops nevis, NEVIS_MIDR, \
+ nevis_reset_func, \
+ nevis_core_pwr_dwn
diff --git a/lib/cpus/aarch64/qemu_max.S b/lib/cpus/aarch64/qemu_max.S
new file mode 100644
index 0000000..00963bc
--- /dev/null
+++ b/lib/cpus/aarch64/qemu_max.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+#include <qemu_max.h>
+
+func qemu_max_core_pwr_dwn
+ /* ---------------------------------------------
+ * Disable the Data Cache.
+ * ---------------------------------------------
+ */
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+
+ /* ---------------------------------------------
+ * Flush L1 cache to L2.
+ * ---------------------------------------------
+ */
+ mov x18, lr
+ mov x0, #DCCISW
+ bl dcsw_op_level1
+ mov lr, x18
+ ret
+endfunc qemu_max_core_pwr_dwn
+
+func qemu_max_cluster_pwr_dwn
+ /* ---------------------------------------------
+ * Disable the Data Cache.
+ * ---------------------------------------------
+ */
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+
+ /* ---------------------------------------------
+ * Flush all caches to PoC.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ b dcsw_op_all
+endfunc qemu_max_cluster_pwr_dwn
+
+errata_report_shim qemu_max
+
+ /* ---------------------------------------------
+ * This function provides cpu specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.qemu_max_regs, "aS"
+qemu_max_regs: /* The ascii list of register names to be reported */
+ .asciz "" /* no registers to report */
+
+func qemu_max_cpu_reg_dump
+ adr x6, qemu_max_regs
+ ret
+endfunc qemu_max_cpu_reg_dump
+
+
+/* cpu_ops for QEMU MAX */
+declare_cpu_ops qemu_max, QEMU_MAX_MIDR, CPU_NO_RESET_FUNC, \
+ qemu_max_core_pwr_dwn, \
+ qemu_max_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/rainier.S b/lib/cpus/aarch64/rainier.S
new file mode 100644
index 0000000..c770f54
--- /dev/null
+++ b/lib/cpus/aarch64/rainier.S
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <cpu_macros.S>
+#include <cpuamu.h>
+#include <rainier.h>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Rainier CPU must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Rainier CPU supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+/* --------------------------------------------------
+ * Disable speculative loads if Rainier supports
+ * SSBS.
+ *
+ * Shall clobber: x0.
+ * --------------------------------------------------
+ */
+func rainier_disable_speculative_loads
+ /* Check if the PE implements SSBS */
+ mrs x0, id_aa64pfr1_el1
+ tst x0, #(ID_AA64PFR1_EL1_SSBS_MASK << ID_AA64PFR1_EL1_SSBS_SHIFT)
+ b.eq 1f
+
+ /* Disable speculative loads */
+ msr SSBS, xzr
+
+1:
+ ret
+endfunc rainier_disable_speculative_loads
+
+ /* Rainier R0P0 is based on Neoverse N1 R4P0. */
+workaround_reset_start rainier, ERRATUM(1868343), ERRATA_N1_1868343
+ sysreg_bit_set RAINIER_CPUACTLR_EL1, RAINIER_CPUACTLR_EL1_BIT_13
+workaround_reset_end rainier, ERRATUM(1868343)
+
+check_erratum_ls rainier, ERRATUM(1868343), CPU_REV(0, 0)
+
+cpu_reset_func_start rainier
+ bl rainier_disable_speculative_loads
+ /* Forces all cacheable atomic instructions to be near */
+ sysreg_bit_set RAINIER_CPUACTLR2_EL1, RAINIER_CPUACTLR2_EL1_BIT_2
+
+#if ENABLE_FEAT_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ sysreg_bit_set actlr_el3, RAINIER_ACTLR_AMEN_BIT
+
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ sysreg_bit_set actlr_el2, RAINIER_ACTLR_AMEN_BIT
+
+ /* Enable group0 counters */
+ mov x0, #RAINIER_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET_EL0, x0
+#endif
+cpu_reset_func_end rainier
+
+ /* ---------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * ---------------------------------------------
+ */
+func rainier_core_pwr_dwn
+ /* ---------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------
+ */
+ sysreg_bit_set RAINIER_CPUPWRCTLR_EL1, RAINIER_CORE_PWRDN_EN_MASK
+ isb
+ ret
+endfunc rainier_core_pwr_dwn
+
+errata_report_shim rainier
+
+ /* ---------------------------------------------
+ * This function provides Rainier specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * ---------------------------------------------
+ */
+.section .rodata.rainier_regs, "aS"
+rainier_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func rainier_cpu_reg_dump
+ adr x6, rainier_regs
+ mrs x8, RAINIER_CPUECTLR_EL1
+ ret
+endfunc rainier_cpu_reg_dump
+
+declare_cpu_ops rainier, RAINIER_MIDR, \
+ rainier_reset_func, \
+ rainier_core_pwr_dwn
diff --git a/lib/cpus/aarch64/travis.S b/lib/cpus/aarch64/travis.S
new file mode 100644
index 0000000..2abefe9
--- /dev/null
+++ b/lib/cpus/aarch64/travis.S
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <travis.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Travis must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS == 1
+#error "Travis supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+cpu_reset_func_start travis
+ /* ----------------------------------------------------
+ * Disable speculative loads
+ * ----------------------------------------------------
+ */
+ msr SSBS, xzr
+cpu_reset_func_end travis
+
+func travis_core_pwr_dwn
+#if ENABLE_SME_FOR_NS
+ /* ---------------------------------------------------
+ * Disable SME if enabled and supported
+ * ---------------------------------------------------
+ */
+ mrs x0, ID_AA64PFR1_EL1
+ ubfx x0, x0, #ID_AA64PFR1_EL1_SME_SHIFT, \
+ #ID_AA64PFR1_EL1_SME_WIDTH
+ cmp x0, #ID_AA64PFR1_EL1_SME_NOT_SUPPORTED
+ b.eq 1f
+ msr TRAVIS_SVCRSM, xzr
+ msr TRAVIS_SVCRZA, xzr
+1:
+#endif
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set TRAVIS_IMP_CPUPWRCTLR_EL1, \
+ TRAVIS_IMP_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+ isb
+ ret
+endfunc travis_core_pwr_dwn
+
+errata_report_shim travis
+
+.section .rodata.travis_regs, "aS"
+travis_regs: /* The ASCII list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func travis_cpu_reg_dump
+ adr x6, travis_regs
+ mrs x8, TRAVIS_IMP_CPUECTLR_EL1
+ ret
+endfunc travis_cpu_reg_dump
+
+declare_cpu_ops travis, TRAVIS_MIDR, \
+ travis_reset_func, \
+ travis_core_pwr_dwn
diff --git a/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
new file mode 100644
index 0000000..8d9aa5e
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <services/arm_arch_svc.h>
+
+ .globl wa_cve_2017_5715_bpiall_vbar
+
+#define EMIT_BPIALL 0xee070fd5
+#define EMIT_SMC 0xe1600070
+#define ESR_EL3_A64_SMC0 0x5e000000
+
+ .macro apply_cve_2017_5715_wa _from_vector
+ /*
+ * Save register state to enable a call to AArch32 S-EL1 and return
+ * Identify the original calling vector in w2 (==_from_vector)
+ * Use w3-w6 for additional register state preservation while in S-EL1
+ */
+
+ /* Save GP regs */
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+
+ /* Identify the original exception vector */
+ mov w2, \_from_vector
+
+ /* Preserve 32-bit system registers in GP registers through the workaround */
+ mrs x3, esr_el3
+ mrs x4, spsr_el3
+ mrs x5, scr_el3
+ mrs x6, sctlr_el1
+
+ /*
+ * Preserve LR and ELR_EL3 registers in the GP regs context.
+ * Temporarily use the CTX_GPREG_SP_EL0 slot to preserve ELR_EL3
+ * through the workaround. This is OK because at this point the
+ * current state for this context's SP_EL0 is in the live system
+ * register, which is unmodified by the workaround.
+ */
+ mrs x7, elr_el3
+ stp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+ /*
+ * Load system registers for entry to S-EL1.
+ */
+
+ /* Mask all interrupts and set AArch32 Supervisor mode */
+ movz w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK)
+
+ /* Switch EL3 exception vectors while the workaround is executing. */
+ adr x9, wa_cve_2017_5715_bpiall_ret_vbar
+
+ /* Setup SCTLR_EL1 with MMU off and I$ on */
+ ldr x10, stub_sel1_sctlr
+
+ /* Land at the S-EL1 workaround stub */
+ adr x11, aarch32_stub
+
+ /*
+ * Setting SCR_EL3 to all zeroes means that the NS, RW
+ * and SMD bits are configured as expected.
+ */
+ msr scr_el3, xzr
+ msr spsr_el3, x8
+ msr vbar_el3, x9
+ msr sctlr_el1, x10
+ msr elr_el3, x11
+
+ eret
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * This vector table is used at runtime to enter the workaround at
+ * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions. If the workaround
+ * is not enabled, the existing runtime exception vector table is used.
+ * ---------------------------------------------------------------------
+ */
+vector_base wa_cve_2017_5715_bpiall_vbar
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_sync_exception_sp_el0
+ b sync_exception_sp_el0
+ nop /* to force 8 byte alignment for the following stub */
+
+ /*
+ * Since each vector table entry is 128 bytes, we can store the
+ * stub context in the unused space to minimize memory footprint.
+ */
+stub_sel1_sctlr:
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+
+aarch32_stub:
+ .word EMIT_BPIALL
+ .word EMIT_SMC
+
+end_vector_entry bpiall_sync_exception_sp_el0
+
+vector_entry bpiall_irq_sp_el0
+ b irq_sp_el0
+end_vector_entry bpiall_irq_sp_el0
+
+vector_entry bpiall_fiq_sp_el0
+ b fiq_sp_el0
+end_vector_entry bpiall_fiq_sp_el0
+
+vector_entry bpiall_serror_sp_el0
+ b serror_sp_el0
+end_vector_entry bpiall_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_sync_exception_sp_elx
+ b sync_exception_sp_elx
+end_vector_entry bpiall_sync_exception_sp_elx
+
+vector_entry bpiall_irq_sp_elx
+ b irq_sp_elx
+end_vector_entry bpiall_irq_sp_elx
+
+vector_entry bpiall_fiq_sp_elx
+ b fiq_sp_elx
+end_vector_entry bpiall_fiq_sp_elx
+
+vector_entry bpiall_serror_sp_elx
+ b serror_sp_elx
+end_vector_entry bpiall_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_sync_exception_aarch64
+ apply_cve_2017_5715_wa 1
+end_vector_entry bpiall_sync_exception_aarch64
+
+vector_entry bpiall_irq_aarch64
+ apply_cve_2017_5715_wa 2
+end_vector_entry bpiall_irq_aarch64
+
+vector_entry bpiall_fiq_aarch64
+ apply_cve_2017_5715_wa 4
+end_vector_entry bpiall_fiq_aarch64
+
+vector_entry bpiall_serror_aarch64
+ apply_cve_2017_5715_wa 8
+end_vector_entry bpiall_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_sync_exception_aarch32
+ apply_cve_2017_5715_wa 1
+end_vector_entry bpiall_sync_exception_aarch32
+
+vector_entry bpiall_irq_aarch32
+ apply_cve_2017_5715_wa 2
+end_vector_entry bpiall_irq_aarch32
+
+vector_entry bpiall_fiq_aarch32
+ apply_cve_2017_5715_wa 4
+end_vector_entry bpiall_fiq_aarch32
+
+vector_entry bpiall_serror_aarch32
+ apply_cve_2017_5715_wa 8
+end_vector_entry bpiall_serror_aarch32
+
+ /* ---------------------------------------------------------------------
+ * This vector table is used while the workaround is executing. It
+ * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError
+ * workaround stubs to enter EL3 from S-EL1. It restores the previous
+ * EL3 state before proceeding with the normal runtime exception vector.
+ * ---------------------------------------------------------------------
+ */
+vector_base wa_cve_2017_5715_bpiall_ret_vbar
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_ret_sync_exception_sp_el0
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_sync_exception_sp_el0
+
+vector_entry bpiall_ret_irq_sp_el0
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_irq_sp_el0
+
+vector_entry bpiall_ret_fiq_sp_el0
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_fiq_sp_el0
+
+vector_entry bpiall_ret_serror_sp_el0
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_ret_sync_exception_sp_elx
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_sync_exception_sp_elx
+
+vector_entry bpiall_ret_irq_sp_elx
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_irq_sp_elx
+
+vector_entry bpiall_ret_fiq_sp_elx
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_fiq_sp_elx
+
+vector_entry bpiall_ret_serror_sp_elx
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_ret_sync_exception_aarch64
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_sync_exception_aarch64
+
+vector_entry bpiall_ret_irq_aarch64
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_irq_aarch64
+
+vector_entry bpiall_ret_fiq_aarch64
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_fiq_aarch64
+
+vector_entry bpiall_ret_serror_aarch64
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_ret_sync_exception_aarch32
+ /*
+ * w2 indicates which SEL1 stub was run and thus which original vector was used
+ * w3-w6 contain saved system register state (esr_el3 in w3)
+ * Restore LR and ELR_EL3 register state from the GP regs context
+ */
+ ldp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+ /* Apply the restored system register state */
+ msr esr_el3, x3
+ msr spsr_el3, x4
+ msr scr_el3, x5
+ msr sctlr_el1, x6
+ msr elr_el3, x7
+
+ /*
+ * Workaround is complete, so swap VBAR_EL3 to point
+ * to workaround entry table in preparation for subsequent
+ * Sync/IRQ/FIQ/SError exceptions.
+ */
+ adr x0, wa_cve_2017_5715_bpiall_vbar
+ msr vbar_el3, x0
+
+ /*
+ * Restore all GP regs except x2 and x3 (esr). The value in x2
+ * indicates the type of the original exception.
+ */
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+
+ /* Fast path Sync exceptions. Static predictor will fall through. */
+ tbz w2, #0, workaround_not_sync
+
+ /*
+ * Check if SMC is coming from A64 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_1 or W0 = SMCCC_ARCH_WORKAROUND_3
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_1) || (W0==SMCCC_ARCH_WORKAROUND_3) ?
+ * (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
+ */
+ orr w2, wzr, #SMCCC_ARCH_WORKAROUND_1
+ cmp w0, w2
+ orr w2, wzr, #SMCCC_ARCH_WORKAROUND_3
+ ccmp w0, w2, #4, ne
+ mov_imm w2, ESR_EL3_A64_SMC0
+ ccmp w3, w2, #0, eq
+ /* Static predictor will predict a fall through */
+ bne 1f
+ eret
+1:
+ /* restore x2 and x3 and continue sync exception handling */
+ b bpiall_ret_sync_exception_aarch32_tail
+end_vector_entry bpiall_ret_sync_exception_aarch32
+
+vector_entry bpiall_ret_irq_aarch32
+ b report_unhandled_interrupt
+
+ /*
+ * Post-workaround fan-out for non-sync exceptions
+ */
+workaround_not_sync:
+ tbnz w2, #3, bpiall_ret_serror
+ tbnz w2, #2, bpiall_ret_fiq
+ /* IRQ */
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b irq_aarch64
+
+bpiall_ret_fiq:
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b fiq_aarch64
+
+bpiall_ret_serror:
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b serror_aarch64
+end_vector_entry bpiall_ret_irq_aarch32
+
+vector_entry bpiall_ret_fiq_aarch32
+ b report_unhandled_interrupt
+end_vector_entry bpiall_ret_fiq_aarch32
+
+vector_entry bpiall_ret_serror_aarch32
+ b report_unhandled_exception
+end_vector_entry bpiall_ret_serror_aarch32
+
+ /*
+ * Part of bpiall_ret_sync_exception_aarch32 to save vector space
+ */
+func bpiall_ret_sync_exception_aarch32_tail
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b sync_exception_aarch64
+endfunc bpiall_ret_sync_exception_aarch32_tail
diff --git a/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
new file mode 100644
index 0000000..823f849
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <services/arm_arch_svc.h>
+
+ .globl wa_cve_2017_5715_mmu_vbar
+
+#define ESR_EL3_A64_SMC0 0x5e000000
+#define ESR_EL3_A32_SMC0 0x4e000000
+
+vector_base wa_cve_2017_5715_mmu_vbar
+
+ .macro apply_cve_2017_5715_wa _is_sync_exception _esr_el3_val
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ mrs x1, sctlr_el3
+ /* Disable MMU */
+ bic x1, x1, #SCTLR_M_BIT
+ msr sctlr_el3, x1
+ isb
+ /* Enable MMU */
+ orr x1, x1, #SCTLR_M_BIT
+ msr sctlr_el3, x1
+ /*
+ * Defer ISB to avoid synchronizing twice in case we hit
+ * the workaround SMC call which will implicitly synchronize
+ * because of the ERET instruction.
+ */
+
+ /*
+ * Ensure SMC is coming from A64/A32 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_1 or W0 = SMCCC_ARCH_WORKAROUND_3
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_1) || (W0==SMCCC_ARCH_WORKAROUND_3) ?
+ * (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
+ */
+ .if \_is_sync_exception
+ orr w1, wzr, #SMCCC_ARCH_WORKAROUND_1
+ cmp w0, w1
+ orr w1, wzr, #SMCCC_ARCH_WORKAROUND_3
+ ccmp w0, w1, #4, ne
+ mrs x0, esr_el3
+ mov_imm w1, \_esr_el3_val
+ ccmp w0, w1, #0, eq
+ /* Static predictor will predict a fall through */
+ bne 1f
+ exception_return
+1:
+ .endif
+
+ /*
+ * Synchronize now to enable the MMU. This is required
+ * to ensure the load pair below reads the data stored earlier.
+ */
+ isb
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_sp_el0
+ b sync_exception_sp_el0
+end_vector_entry mmu_sync_exception_sp_el0
+
+vector_entry mmu_irq_sp_el0
+ b irq_sp_el0
+end_vector_entry mmu_irq_sp_el0
+
+vector_entry mmu_fiq_sp_el0
+ b fiq_sp_el0
+end_vector_entry mmu_fiq_sp_el0
+
+vector_entry mmu_serror_sp_el0
+ b serror_sp_el0
+end_vector_entry mmu_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_sp_elx
+ b sync_exception_sp_elx
+end_vector_entry mmu_sync_exception_sp_elx
+
+vector_entry mmu_irq_sp_elx
+ b irq_sp_elx
+end_vector_entry mmu_irq_sp_elx
+
+vector_entry mmu_fiq_sp_elx
+ b fiq_sp_elx
+end_vector_entry mmu_fiq_sp_elx
+
+vector_entry mmu_serror_sp_elx
+ b serror_sp_elx
+end_vector_entry mmu_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
+ b sync_exception_aarch64
+end_vector_entry mmu_sync_exception_aarch64
+
+vector_entry mmu_irq_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+ b irq_aarch64
+end_vector_entry mmu_irq_aarch64
+
+vector_entry mmu_fiq_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+ b fiq_aarch64
+end_vector_entry mmu_fiq_aarch64
+
+vector_entry mmu_serror_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+ b serror_aarch64
+end_vector_entry mmu_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
+ b sync_exception_aarch32
+end_vector_entry mmu_sync_exception_aarch32
+
+vector_entry mmu_irq_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+ b irq_aarch32
+end_vector_entry mmu_irq_aarch32
+
+vector_entry mmu_fiq_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+ b fiq_aarch32
+end_vector_entry mmu_fiq_aarch32
+
+vector_entry mmu_serror_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+ b serror_aarch32
+end_vector_entry mmu_serror_aarch32
diff --git a/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S b/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S
new file mode 100644
index 0000000..bf26658
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2022_23960_bhb.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <context.h>
+
+#if WORKAROUND_CVE_2022_23960
+ /*
+ * This macro applies the mitigation for CVE-2022-23960.
+ * The macro saves x2 to the CPU context.
+ * SP should point to the CPU context.
+ */
+ .macro apply_cve_2022_23960_bhb_wa _bhb_loop_count
+ str x2, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+
+ /* CVE-BHB-NUM loop count */
+ mov x2, \_bhb_loop_count
+
+1:
+ /* b pc+4 part of the workaround */
+ b 2f
+2:
+ subs x2, x2, #1
+ bne 1b
+ speculation_barrier
+ ldr x2, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ .endm
+#endif /* WORKAROUND_CVE_2022_23960 */
diff --git a/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S b/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S
new file mode 100644
index 0000000..220fa11
--- /dev/null
+++ b/lib/cpus/aarch64/wa_cve_2022_23960_bhb_vector.S
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <services/arm_arch_svc.h>
+#include "wa_cve_2022_23960_bhb.S"
+
+ /*
+ * This macro is used to isolate the vector table for relevant CPUs
+ * used in the mitigation for CVE_2022_23960.
+ */
+ .macro wa_cve_2022_23960_bhb_vector_table _bhb_loop_count, _cpu
+
+ .globl wa_cve_vbar_\_cpu
+
+vector_base wa_cve_vbar_\_cpu
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry bhb_sync_exception_sp_el0_\_cpu
+ b sync_exception_sp_el0
+end_vector_entry bhb_sync_exception_sp_el0_\_cpu
+
+vector_entry bhb_irq_sp_el0_\_cpu
+ b irq_sp_el0
+end_vector_entry bhb_irq_sp_el0_\_cpu
+
+vector_entry bhb_fiq_sp_el0_\_cpu
+ b fiq_sp_el0
+end_vector_entry bhb_fiq_sp_el0_\_cpu
+
+vector_entry bhb_serror_sp_el0_\_cpu
+ b serror_sp_el0
+end_vector_entry bhb_serror_sp_el0_\_cpu
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry bhb_sync_exception_sp_elx_\_cpu
+ b sync_exception_sp_elx
+end_vector_entry bhb_sync_exception_sp_elx_\_cpu
+
+vector_entry bhb_irq_sp_elx_\_cpu
+ b irq_sp_elx
+end_vector_entry bhb_irq_sp_elx_\_cpu
+
+vector_entry bhb_fiq_sp_elx_\_cpu
+ b fiq_sp_elx
+end_vector_entry bhb_fiq_sp_elx_\_cpu
+
+vector_entry bhb_serror_sp_elx_\_cpu
+ b serror_sp_elx
+end_vector_entry bhb_serror_sp_elx_\_cpu
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry bhb_sync_exception_aarch64_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b sync_exception_aarch64
+end_vector_entry bhb_sync_exception_aarch64_\_cpu
+
+vector_entry bhb_irq_aarch64_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b irq_aarch64
+end_vector_entry bhb_irq_aarch64_\_cpu
+
+vector_entry bhb_fiq_aarch64_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b fiq_aarch64
+end_vector_entry bhb_fiq_aarch64_\_cpu
+
+vector_entry bhb_serror_aarch64_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b serror_aarch64
+end_vector_entry bhb_serror_aarch64_\_cpu
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry bhb_sync_exception_aarch32_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b sync_exception_aarch32
+end_vector_entry bhb_sync_exception_aarch32_\_cpu
+
+vector_entry bhb_irq_aarch32_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b irq_aarch32
+end_vector_entry bhb_irq_aarch32_\_cpu
+
+vector_entry bhb_fiq_aarch32_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b fiq_aarch32
+end_vector_entry bhb_fiq_aarch32_\_cpu
+
+vector_entry bhb_serror_aarch32_\_cpu
+ apply_cve_2022_23960_bhb_wa \_bhb_loop_count
+ b serror_aarch32
+end_vector_entry bhb_serror_aarch32_\_cpu
+ .endm
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
new file mode 100644
index 0000000..434ee08
--- /dev/null
+++ b/lib/cpus/cpu-ops.mk
@@ -0,0 +1,893 @@
+#
+# Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include ${MAKE_HELPERS_DIRECTORY}$/build_macros.mk
+
+# Cortex A57 specific optimisation to skip L1 cache flush when
+# cluster is powered down.
+CPU_FLAG_LIST += SKIP_A57_L1_FLUSH_PWR_DWN
+
+# Flag to disable the cache non-temporal hint.
+# It is enabled by default.
+A53_DISABLE_NON_TEMPORAL_HINT ?=1
+CPU_FLAG_LIST += A53_DISABLE_NON_TEMPORAL_HINT
+
+# Flag to disable the cache non-temporal hint.
+# It is enabled by default.
+A57_DISABLE_NON_TEMPORAL_HINT ?=1
+CPU_FLAG_LIST += A57_DISABLE_NON_TEMPORAL_HINT
+
+# Flag to enable higher performance non-cacheable load forwarding.
+# It is disabled by default.
+CPU_FLAG_LIST += A57_ENABLE_NONCACHEABLE_LOAD_FWD
+
+WORKAROUND_CVE_2017_5715 ?=1
+CPU_FLAG_LIST += WORKAROUND_CVE_2017_5715
+WORKAROUND_CVE_2018_3639 ?=1
+CPU_FLAG_LIST += WORKAROUND_CVE_2018_3639
+CPU_FLAG_LIST += DYNAMIC_WORKAROUND_CVE_2018_3639
+WORKAROUND_CVE_2022_23960 ?=1
+CPU_FLAG_LIST += WORKAROUND_CVE_2022_23960
+
+# Flags to indicate internal or external Last level cache
+# By default internal
+CPU_FLAG_LIST += NEOVERSE_Nx_EXTERNAL_LLC
+
+# CPU Errata Build flags.
+# These should be enabled by the platform if the erratum workaround needs to be
+# applied.
+
+# Flag to apply erratum 794073 workaround when disabling mmu.
+CPU_FLAG_LIST += ERRATA_A9_794073
+
+# Flag to apply erratum 816470 workaround during power down. This erratum
+# applies only to revision >= r3p0 of the Cortex A15 cpu.
+CPU_FLAG_LIST += ERRATA_A15_816470
+
+# Flag to apply erratum 827671 workaround during reset. This erratum applies
+# only to revision >= r3p0 of the Cortex A15 cpu.
+CPU_FLAG_LIST += ERRATA_A15_827671
+
+# Flag to apply erratum 852421 workaround during reset. This erratum applies
+# only to revision <= r1p2 of the Cortex A17 cpu.
+CPU_FLAG_LIST += ERRATA_A17_852421
+
+# Flag to apply erratum 852423 workaround during reset. This erratum applies
+# only to revision <= r1p2 of the Cortex A17 cpu.
+CPU_FLAG_LIST += ERRATA_A17_852423
+
+# Flag to apply erratum 855472 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A35 cpu.
+CPU_FLAG_LIST += ERRATA_A35_855472
+
+# Flag to apply erratum 819472 workaround during reset. This erratum applies
+# only to revision <= r0p1 of the Cortex A53 cpu.
+CPU_FLAG_LIST += ERRATA_A53_819472
+
+# Flag to apply erratum 824069 workaround during reset. This erratum applies
+# only to revision <= r0p2 of the Cortex A53 cpu.
+CPU_FLAG_LIST += ERRATA_A53_824069
+
+# Flag to apply erratum 826319 workaround during reset. This erratum applies
+# only to revision <= r0p2 of the Cortex A53 cpu.
+CPU_FLAG_LIST += ERRATA_A53_826319
+
+# Flag to apply erratum 827319 workaround during reset. This erratum applies
+# only to revision <= r0p2 of the Cortex A53 cpu.
+CPU_FLAG_LIST += ERRATA_A53_827319
+
+# Flag to apply erratum 835769 workaround at compile and link time. This
+# erratum applies to revision <= r0p4 of the Cortex A53 cpu. Enabling this
+# workaround can lead the linker to create "*.stub" sections.
+CPU_FLAG_LIST += ERRATA_A53_835769
+
+# Flag to apply erratum 836870 workaround during reset. This erratum applies
+# only to revision <= r0p3 of the Cortex A53 cpu. From r0p4 and onwards, this
+# erratum workaround is enabled by default in hardware.
+CPU_FLAG_LIST += ERRATA_A53_836870
+
+# Flag to apply erratum 843419 workaround at link time.
+# This erratum applies to revision <= r0p4 of the Cortex A53 cpu. Enabling this
+# workaround could lead the linker to emit "*.stub" sections which are 4kB
+# aligned.
+CPU_FLAG_LIST += ERRATA_A53_843419
+
+# Flag to apply errata 855873 during reset. This errata applies to all
+# revisions of the Cortex A53 CPU, but this firmware workaround only works
+# for revisions r0p3 and higher. Earlier revisions are taken care
+# of by the rich OS.
+CPU_FLAG_LIST += ERRATA_A53_855873
+
+# Flag to apply erratum 1530924 workaround during reset. This erratum applies
+# to all revisions of Cortex A53 cpu.
+CPU_FLAG_LIST += ERRATA_A53_1530924
+
+# Flag to apply erratum 768277 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A55 cpu.
+CPU_FLAG_LIST += ERRATA_A55_768277
+
+# Flag to apply erratum 778703 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A55 cpu.
+CPU_FLAG_LIST += ERRATA_A55_778703
+
+# Flag to apply erratum 798797 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A55 cpu.
+CPU_FLAG_LIST += ERRATA_A55_798797
+
+# Flag to apply erratum 846532 workaround during reset. This erratum applies
+# only to revision <= r0p1 of the Cortex A55 cpu.
+CPU_FLAG_LIST += ERRATA_A55_846532
+
+# Flag to apply erratum 903758 workaround during reset. This erratum applies
+# only to revision <= r0p1 of the Cortex A55 cpu.
+CPU_FLAG_LIST += ERRATA_A55_903758
+
+# Flag to apply erratum 1221012 workaround during reset. This erratum applies
+# only to revision <= r1p0 of the Cortex A55 cpu.
+CPU_FLAG_LIST += ERRATA_A55_1221012
+
+# Flag to apply erratum 1530923 workaround during reset. This erratum applies
+# to all revisions of Cortex A55 cpu.
+CPU_FLAG_LIST += ERRATA_A55_1530923
+
+# Flag to apply erratum 806969 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
+CPU_FLAG_LIST += ERRATA_A57_806969
+
+# Flag to apply erratum 813419 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
+CPU_FLAG_LIST += ERRATA_A57_813419
+
+# Flag to apply erratum 813420 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
+CPU_FLAG_LIST += ERRATA_A57_813420
+
+# Flag to apply erratum 814670 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A57 cpu.
+CPU_FLAG_LIST += ERRATA_A57_814670
+
+# Flag to apply erratum 817169 workaround during power down. This erratum
+# applies only to revision <= r0p1 of the Cortex A57 cpu.
+CPU_FLAG_LIST += ERRATA_A57_817169
+
+# Flag to apply erratum 826974 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A57 cpu.
+CPU_FLAG_LIST += ERRATA_A57_826974
+
+# Flag to apply erratum 826977 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A57 cpu.
+CPU_FLAG_LIST += ERRATA_A57_826977
+
+# Flag to apply erratum 828024 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A57 cpu.
+CPU_FLAG_LIST += ERRATA_A57_828024
+
+# Flag to apply erratum 829520 workaround during reset. This erratum applies
+# only to revision <= r1p2 of the Cortex A57 cpu.
+CPU_FLAG_LIST += ERRATA_A57_829520
+
+# Flag to apply erratum 833471 workaround during reset. This erratum applies
+# only to revision <= r1p2 of the Cortex A57 cpu.
+CPU_FLAG_LIST += ERRATA_A57_833471
+
+# Flag to apply erratum 855972 workaround during reset. This erratum applies
+# only to revision <= r1p3 of the Cortex A57 cpu.
+CPU_FLAG_LIST += ERRATA_A57_859972
+
+# Flag to apply erratum 1319537 workaround during reset. This erratum applies
+# to all revisions of Cortex A57 cpu.
+CPU_FLAG_LIST += ERRATA_A57_1319537
+
+# Flag to apply erratum 855971 workaround during reset. This erratum applies
+# only to revision <= r0p3 of the Cortex A72 cpu.
+CPU_FLAG_LIST += ERRATA_A72_859971
+
+# Flag to apply erratum 1319367 workaround during reset. This erratum applies
+# to all revisions of Cortex A72 cpu.
+CPU_FLAG_LIST += ERRATA_A72_1319367
+
+# Flag to apply erratum 852427 workaround during reset. This erratum applies
+# only to revision r0p0 of the Cortex A73 cpu.
+CPU_FLAG_LIST += ERRATA_A73_852427
+
+# Flag to apply erratum 855423 workaround during reset. This erratum applies
+# only to revision <= r0p1 of the Cortex A73 cpu.
+CPU_FLAG_LIST += ERRATA_A73_855423
+
+# Flag to apply erratum 764081 workaround during reset. This erratum applies
+# only to revision <= r0p0 of the Cortex A75 cpu.
+CPU_FLAG_LIST += ERRATA_A75_764081
+
+# Flag to apply erratum 790748 workaround during reset. This erratum applies
+# only to revision <= r0p0 of the Cortex A75 cpu.
+CPU_FLAG_LIST += ERRATA_A75_790748
+
+# Flag to apply erratum 1073348 workaround during reset. This erratum applies
+# only to revision <= r1p0 of the Cortex A76 cpu.
+CPU_FLAG_LIST += ERRATA_A76_1073348
+
+# Flag to apply erratum 1130799 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Cortex A76 cpu.
+CPU_FLAG_LIST += ERRATA_A76_1130799
+
+# Flag to apply erratum 1220197 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Cortex A76 cpu.
+CPU_FLAG_LIST += ERRATA_A76_1220197
+
+# Flag to apply erratum 1257314 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Cortex A76 cpu.
+CPU_FLAG_LIST += ERRATA_A76_1257314
+
+# Flag to apply erratum 1262606 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Cortex A76 cpu.
+CPU_FLAG_LIST += ERRATA_A76_1262606
+
+# Flag to apply erratum 1262888 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Cortex A76 cpu.
+CPU_FLAG_LIST += ERRATA_A76_1262888
+
+# Flag to apply erratum 1275112 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Cortex A76 cpu.
+CPU_FLAG_LIST += ERRATA_A76_1275112
+
+# Flag to apply erratum 1286807 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Cortex A76 cpu.
+CPU_FLAG_LIST += ERRATA_A76_1286807
+
+# Flag to apply erratum 1791580 workaround during reset. This erratum applies
+# only to revision <= r4p0 of the Cortex A76 cpu.
+CPU_FLAG_LIST += ERRATA_A76_1791580
+
+# Flag to apply erratum 1165522 workaround during reset. This erratum applies
+# to all revisions of Cortex A76 cpu.
+CPU_FLAG_LIST += ERRATA_A76_1165522
+
+# Flag to apply erratum 1868343 workaround during reset. This erratum applies
+# only to revision <= r4p0 of the Cortex A76 cpu.
+CPU_FLAG_LIST += ERRATA_A76_1868343
+
+# Flag to apply erratum 1946160 workaround during reset. This erratum applies
+# only to revisions r3p0 - r4p1 of the Cortex A76 cpu.
+CPU_FLAG_LIST += ERRATA_A76_1946160
+
+# Flag to apply erratum 2743102 workaround during powerdown. This erratum
+# applies to all revisions <= r4p1 of the Cortex A76 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_A76_2743102
+
+# Flag to apply erratum 1508412 workaround during reset. This erratum applies
+# only to revision <= r1p0 of the Cortex A77 cpu.
+CPU_FLAG_LIST += ERRATA_A77_1508412
+
+# Flag to apply erratum 1925769 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A77 cpu.
+CPU_FLAG_LIST += ERRATA_A77_1925769
+
+# Flag to apply erratum 1946167 workaround during reset. This erratum applies
+# only to revision <= r1p1 of the Cortex A77 cpu.
+CPU_FLAG_LIST += ERRATA_A77_1946167
+
+# Flag to apply erratum 1791578 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1, it is still open.
+CPU_FLAG_LIST += ERRATA_A77_1791578
+
+# Flag to apply erratum 2356587 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1, it is still open.
+CPU_FLAG_LIST += ERRATA_A77_2356587
+
+# Flag to apply erratum 1800714 workaround during reset. This erratum applies
+# to revisions <= r1p1 of the Cortex A77 cpu.
+CPU_FLAG_LIST += ERRATA_A77_1800714
+
+# Flag to apply erratum 2743100 workaround during power down. This erratum
+# applies to revisions r0p0, r1p0, and r1p1, it is still open.
+CPU_FLAG_LIST += ERRATA_A77_2743100
+
+# Flag to apply erratum 1688305 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the A78 cpu.
+CPU_FLAG_LIST += ERRATA_A78_1688305
+
+# Flag to apply erratum 1941498 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1 of the A78 cpu.
+CPU_FLAG_LIST += ERRATA_A78_1941498
+
+# Flag to apply erratum 1951500 workaround during reset. This erratum applies
+# to revisions r1p0 and r1p1 of the A78 cpu. The issue is present in r0p0 as
+# well but there is no workaround for that revision.
+CPU_FLAG_LIST += ERRATA_A78_1951500
+
+# Flag to apply erratum 1821534 workaround during reset. This erratum applies
+# to revisions r0p0 and r1p0 of the A78 cpu.
+CPU_FLAG_LIST += ERRATA_A78_1821534
+
+# Flag to apply erratum 1952683 workaround during reset. This erratum applies
+# to revision r0p0 of the A78 cpu and was fixed in the revision r1p0.
+CPU_FLAG_LIST += ERRATA_A78_1952683
+
+# Flag to apply erratum 2132060 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78_2132060
+
+# Flag to apply erratum 2242635 workaround during reset. This erratum applies
+# to revisions r1p0, r1p1, and r1p2 of the A78 cpu and is open. The issue is
+# present in r0p0 as well but there is no workaround for that revision.
+CPU_FLAG_LIST += ERRATA_A78_2242635
+
+# Flag to apply erratum 2376745 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78_2376745
+
+# Flag to apply erratum 2395406 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78_2395406
+
+# Flag to apply erratum 2712571 workaround for non-arm interconnect ip. This
+# erratum applies to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu.
+# It is fixed in r1p2.
+CPU_FLAG_LIST += ERRATA_A78_2712571
+
+# Flag to apply erratum 2742426 workaround during reset. This erratum
+# applies to revisions r0p0, r1p0, r1p1 and r1p2 of the A78 cpu. It is still
+# open.
+CPU_FLAG_LIST += ERRATA_A78_2742426
+
+# Flag to apply erratum 2772019 workaround during powerdown. This erratum
+# applies to revisions r0p0, r1p0, r1p1 and r1p2 of the A78 cpu. It is still
+# open.
+CPU_FLAG_LIST += ERRATA_A78_2772019
+
+# Flag to apply erratum 2779479 workaround during reset. This erratum applies
+# to revision r0p0, r1p0, r1p1 and r1p2 of the A78 cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78_2779479
+
+# Flag to apply erratum 1941500 workaround during reset. This erratum applies
+# to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78_AE_1941500
+
+# Flag to apply erratum 1951502 workaround during reset. This erratum applies
+# to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78_AE_1951502
+
+# Flag to apply erratum 2376748 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1 and r0p2 of the A78 AE cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78_AE_2376748
+
+# Flag to apply erratum 2395408 workaround during reset. This erratum applies
+# to revisions r0p0 and r0p1 of the A78 AE cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78_AE_2395408
+
+# Flag to apply erratum 1827430 workaround during reset. This erratum applies
+# to revision r0p0 of the A78C cpu. It is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_A78C_1827430
+
+# Flag to apply erratum 1827440 workaround during reset. This erratum applies
+# to revision r0p0 of the A78C cpu. It is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_A78C_1827440
+
+# Flag to apply erratum 2712574 workaround for non-arm interconnect ip. This
+# erratum applies to revisions r0p0, r0p1 and r0p2 of the A78 AE cpu.
+# It is still open.
+CPU_FLAG_LIST += ERRATA_A78_AE_2712574
+
+# Flag to apply erratum 2132064 workaround during reset. This erratum applies
+# to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78C_2132064
+
+# Flag to apply erratum 2242638 workaround during reset. This erratum applies
+# to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78C_2242638
+
+# Flag to apply erratum 2376749 workaround during reset. This erratum applies
+# to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78C_2376749
+
+# Flag to apply erratum 2395411 workaround during reset. This erratum applies
+# to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78C_2395411
+
+# Flag to apply erratum 2712575 workaround for non-arm interconnect ip. This
+# erratum applies to revisions r0p1 and r0p2 of the A78C cpu.
+# It is still open.
+CPU_FLAG_LIST += ERRATA_A78C_2712575
+
+# Flag to apply erratum 2772121 workaround during powerdown. This erratum
+# applies to revisions r0p0, r0p1 and r0p2 of the A78C cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78C_2772121
+
+# Flag to apply erratum 2779484 workaround during reset. This erratum
+# applies to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
+CPU_FLAG_LIST += ERRATA_A78C_2779484
+
+# Flag to apply erratum 1821534 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the X1 cpu and fixed in r1p1.
+CPU_FLAG_LIST += ERRATA_X1_1821534
+
+# Flag to apply erratum 1688305 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the X1 cpu and fixed in r1p1.
+CPU_FLAG_LIST += ERRATA_X1_1688305
+
+# Flag to apply erratum 1827429 workaround during reset. This erratum applies
+# to revisions r0p0 - r1p0 of the X1 cpu and fixed in r1p1.
+CPU_FLAG_LIST += ERRATA_X1_1827429
+
+# Flag to apply T32 CLREX workaround during reset. This erratum applies
+# only to r0p0 and r1p0 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1043202
+
+# Flag to apply erratum 1073348 workaround during reset. This erratum applies
+# only to revision r0p0 and r1p0 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1073348
+
+# Flag to apply erratum 1130799 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1130799
+
+# Flag to apply erratum 1165347 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1165347
+
+# Flag to apply erratum 1207823 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1207823
+
+# Flag to apply erratum 1220197 workaround during reset. This erratum applies
+# only to revision <= r2p0 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1220197
+
+# Flag to apply erratum 1257314 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1257314
+
+# Flag to apply erratum 1262606 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1262606
+
+# Flag to apply erratum 1262888 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1262888
+
+# Flag to apply erratum 1275112 workaround during reset. This erratum applies
+# only to revision <= r3p0 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1275112
+
+# Flag to apply erratum 1315703 workaround during reset. This erratum applies
+# to revisions before r3p1 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1315703
+
+# Flag to apply erratum 1542419 workaround during reset. This erratum applies
+# to revisions r3p0 - r4p0 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1542419
+
+# Flag to apply erratum 1868343 workaround during reset. This erratum applies
+# to revision <= r4p0 of the Neoverse N1 cpu.
+CPU_FLAG_LIST += ERRATA_N1_1868343
+
+# Flag to apply erratum 1946160 workaround during reset. This erratum applies
+# to revisions r3p0, r3p1, r4p0, and r4p1 of the Neoverse N1 cpu. The issue
+# exists in revisions r0p0, r1p0, and r2p0 as well but there is no workaround.
+CPU_FLAG_LIST += ERRATA_N1_1946160
+
+# Flag to apply erratum 2743102 workaround during powerdown. This erratum
+# applies to all revisions <= r4p1 of the Neoverse N1 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_N1_2743102
+
+# Flag to apply erratum 1618635 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse V1 cpu and was fixed in the revision r1p0.
+CPU_FLAG_LIST += ERRATA_V1_1618635
+
+# Flag to apply erratum 1774420 workaround during reset. This erratum applies
+# to revisions r0p0 and r1p0 of the Neoverse V1 core, and was fixed in r1p1.
+CPU_FLAG_LIST += ERRATA_V1_1774420
+
+# Flag to apply erratum 1791573 workaround during reset. This erratum applies
+# to revisions r0p0 and r1p0 of the Neoverse V1 core, and was fixed in r1p1.
+CPU_FLAG_LIST += ERRATA_V1_1791573
+
+# Flag to apply erratum 1852267 workaround during reset. This erratum applies
+# to revisions r0p0 and r1p0 of the Neoverse V1 core, and was fixed in r1p1.
+CPU_FLAG_LIST += ERRATA_V1_1852267
+
+# Flag to apply erratum 1925756 workaround during reset. This needs to be
+# enabled for r0p0, r1p0, and r1p1 of the Neoverse V1 core, it is still open.
+CPU_FLAG_LIST += ERRATA_V1_1925756
+
+# Flag to apply erratum 1940577 workaround during reset. This erratum applies
+# to revisions r1p0 and r1p1 of the Neoverse V1 cpu.
+CPU_FLAG_LIST += ERRATA_V1_1940577
+
+# Flag to apply erratum 1966096 workaround during reset. This erratum applies
+# to revisions r1p0 and r1p1 of the Neoverse V1 CPU and is open. This issue
+# exists in r0p0 as well but there is no workaround for that revision.
+CPU_FLAG_LIST += ERRATA_V1_1966096
+
+# Flag to apply erratum 2139242 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1 of the Neoverse V1 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_V1_2139242
+
+# Flag to apply erratum 2108267 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1 of the Neoverse V1 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_V1_2108267
+
+# Flag to apply erratum 2216392 workaround during reset. This erratum applies
+# to revisions r1p0 and r1p1 of the Neoverse V1 cpu and is still open. This
+# issue exists in r0p0 as well but there is no workaround for that revision.
+CPU_FLAG_LIST += ERRATA_V1_2216392
+
+# Flag to apply erratum 2294912 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r1p1 and r1p2 of the Neoverse V1 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_V1_2294912
+
+# Flag to apply erratum 2372203 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0 and r1p1 of the Neoverse V1 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_V1_2372203
+
+# Flag to apply erratum 2701953 workaround to non-arm interconnect ip. This
+# erratum applies to revisions r0p0, r1p0, r1p1 of the Neoverse V1 cpu,
+# it is fixed in r1p2.
+CPU_FLAG_LIST += ERRATA_V1_2701953
+
+# Flag to apply erratum 2743093 workaround during powerdown. This erratum
+# applies to revisions r0p0, r1p0, r1p1 and r1p2 of the Neoverse V1 cpu and is
+# still open.
+CPU_FLAG_LIST += ERRATA_V1_2743093
+
+# Flag to apply erratum 2743233 workaround during powerdown. This erratum
+# applies to revisions r0p0, r1p0, r1p1 and r1p2 of the Neoverse V1 cpu and is
+# still open.
+CPU_FLAG_LIST += ERRATA_V1_2743233
+
+# Flag to apply erratum 2779461 workaround during powerdown. This erratum
+# applies to revisions r0p0, r1p0, r1p1 and r1p2 of the Neoverse V1 cpu and is
+# still open.
+CPU_FLAG_LIST += ERRATA_V1_2779461
+
+# Flag to apply erratum 1987031 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_A710_1987031
+
+# Flag to apply erratum 2081180 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_A710_2081180
+
+# Flag to apply erratum 2083908 workaround during reset. This erratum applies
+# to revision r2p0 of the Cortex-A710 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_A710_2083908
+
+# Flag to apply erratum 2058056 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r2p0 and r2p1 of the Cortex-A710 cpu and is still
+# open.
+CPU_FLAG_LIST += ERRATA_A710_2058056
+
+# Flag to apply erratum 2055002 workaround during reset. This erratum applies
+# to revision r1p0, r2p0 of the Cortex-A710 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_A710_2055002
+
+# Flag to apply erratum 2017096 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_A710_2017096
+
+# Flag to apply erratum 2267065 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+CPU_FLAG_LIST += ERRATA_A710_2267065
+
+# Flag to apply erratum 2136059 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+CPU_FLAG_LIST += ERRATA_A710_2136059
+
+# Flag to apply erratum 2147715 workaround during reset. This erratum applies
+# to revision r2p0 of the Cortex-A710 CPU and is fixed in revision r2p1.
+CPU_FLAG_LIST += ERRATA_A710_2147715
+
+# Flag to apply erratum 2216384 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+CPU_FLAG_LIST += ERRATA_A710_2216384
+
+# Flag to apply erratum 2282622 workaround during reset. This erratum applies
+# to revision r0p0, r1p0, r2p0 and r2p1 of the Cortex-A710 cpu and is still
+# open.
+CPU_FLAG_LIST += ERRATA_A710_2282622
+
+# Flag to apply erratum 2291219 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+CPU_FLAG_LIST += ERRATA_A710_2291219
+
+# Flag to apply erratum 2008768 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+CPU_FLAG_LIST += ERRATA_A710_2008768
+
+# Flag to apply erratum 2371105 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-A710 cpu and is fixed in r2p1.
+CPU_FLAG_LIST += ERRATA_A710_2371105
+
+# Flag to apply erratum 2701952 workaround for non-arm interconnect ip. This
+# erratum applies to revision r0p0, r1p0, r2p0, r2p1 of the Cortex-A710 cpu
+# and is still open.
+CPU_FLAG_LIST += ERRATA_A710_2701952
+
+# Flag to apply erratum 2742423 workaround during reset. This erratum applies
+# to revision r0p0, r1p0, r2p0 and r2p1 of the Cortex-A710 cpu and is still
+# open.
+CPU_FLAG_LIST += ERRATA_A710_2742423
+
+# Flag to apply erratum 2768515 workaround during power down. This erratum
+# applies to revision r0p0, r1p0, r2p0 and r2p1 of the Cortex-A710 cpu and is
+# still open.
+CPU_FLAG_LIST += ERRATA_A710_2768515
+
+# Flag to apply erratum 2002655 workaround during reset. This erratum applies
+# to revisions r0p0 of the Neoverse-N2 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2002655
+
+# Flag to apply erratum 2009478 workaround during powerdown. This erratum
+# applies to revision r0p0 of the Neoverse N2 cpu, it is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2009478
+
+# Flag to apply erratum 2067956 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2067956
+
+# Flag to apply erratum 2025414 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2025414
+
+# Flag to apply erratum 2189731 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2189731
+
+# Flag to apply erratum 2138956 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2138956
+
+# Flag to apply erratum 2138953 workaround during reset. This erratum applies
+# to revision r0p0, r0p1, r0p2, r0p3 of the Neoverse N2 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_N2_2138953
+
+# Flag to apply erratum 2242415 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2242415
+
+# Flag to apply erratum 2138958 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2138958
+
+# Flag to apply erratum 2242400 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2242400
+
+# Flag to apply erratum 2280757 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2280757
+
+# Flag to apply erratum 2326639 workaroud during powerdown. This erratum
+# applies to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2326639
+
+# Flag to apply erratum 2340933 workaroud during reset. This erratum
+# applies to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2340933
+
+# Flag to apply erratum 2346952 workaround during reset. This erratum applies
+# to r0p0, r0p1, r0p2 of the Neoverse N2 cpu, it is fixed in r0p3.
+CPU_FLAG_LIST += ERRATA_N2_2346952
+
+# Flag to apply erratum 2376738 workaround during reset. This erratum applies
+# to revision r0p0, r0p1, r0p2, r0p3 of the Neoverse N2 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_N2_2376738
+
+# Flag to apply erratum 2388450 workaround during reset. This erratum applies
+# to revision r0p0 of the Neoverse N2 cpu, it is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_N2_2388450
+
+# Flag to apply erratum 2728475 workaround for non-arm interconnect ip. This
+# erratum applies to r0p0, r0p1, r0p2 of the Neoverse N2 cpu, it is fixed in
+# r0p3.
+CPU_FLAG_LIST += ERRATA_N2_2728475
+
+# Flag to apply erratum 2743014 workaround during reset. This erratum applies
+# to r0p0, r0p1, r0p2 of the Neoverse N2 cpu, it is fixed in r0p3.
+CPU_FLAG_LIST += ERRATA_N2_2743014
+
+# Flag to apply erratum 2743089 workaround during during powerdown. This erratum
+# applies to all revisions <= r0p2 of the Neoverse N2 cpu, it is fixed in r0p3.
+CPU_FLAG_LIST += ERRATA_N2_2743089
+
+# Flag to apply erratum 2779511 workaround during reset. This erratum applies
+# to r0p0, r0p1, r0p2 of the Neoverse N2 cpu, it is fixed in r0p3.
+CPU_FLAG_LIST += ERRATA_N2_2779511
+
+# Flag to apply erratum 2002765 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, and r2p0 of the Cortex-X2 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_X2_2002765
+
+# Flag to apply erratum 2058056 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r2p0 and r2p1 of the Cortex-X2 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_X2_2058056
+
+# Flag to apply erratum 2083908 workaround during reset. This erratum applies
+# to revision r2p0 of the Cortex-X2 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_X2_2083908
+
+# Flag to apply erratum 2017096 workaround during reset. This erratum applies
+# only to revisions r0p0, r1p0 and r2p0 of the Cortex-X2 cpu, it is fixed in
+# r2p1.
+CPU_FLAG_LIST += ERRATA_X2_2017096
+
+# Flag to apply erratum 2081180 workaround during reset. This erratum applies
+# only to revisions r0p0, r1p0 and r2p0 of the Cortex-X2 cpu, it is fixed in
+# r2p1.
+CPU_FLAG_LIST += ERRATA_X2_2081180
+
+# Flag to apply erratum 2216384 workaround during reset. This erratum applies
+# only to revisions r0p0, r1p0 and r2p0 of the Cortex-X2 cpu, it is fixed in
+# r2p1.
+CPU_FLAG_LIST += ERRATA_X2_2216384
+
+# Flag to apply erratum 2147715 workaround during reset. This erratum applies
+# only to revision r2p0 of the Cortex-X2 cpu, it is fixed in r2p1.
+CPU_FLAG_LIST += ERRATA_X2_2147715
+
+# Flag to apply erratum 2282622 workaround during reset. This erratum applies
+# to revision r0p0, r1p0, r2p0 and r2p1 of the Cortex-X2 cpu and is still
+# open.
+CPU_FLAG_LIST += ERRATA_X2_2282622
+
+# Flag to apply erratum 2371105 workaround during reset. This erratum applies
+# to revision r0p0, r1p0 and r2p0 of the Cortex-X2 cpu and is fixed in r2p1.
+CPU_FLAG_LIST += ERRATA_X2_2371105
+
+# Flag to apply erratum 2701952 workaround for non-arm interconnect ip. This
+# erratum applies to revisions r0p0, r1p0, r2p0, r2p1 of the Cortex-x2 cpu
+# and is still open.
+CPU_FLAG_LIST += ERRATA_X2_2701952
+
+# Flag to apply erratum 2742423 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r2p0 and r2p1 of the Cortex-X2 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_X2_2742423
+
+# Flag to apply erratum 2768515 workaround during power down. This erratum
+# applies to revision r0p0, r1p0, r2p0 and r2p1 of the Cortex-X2 cpu and is
+# still open.
+CPU_FLAG_LIST += ERRATA_X2_2768515
+
+# Flag to apply erratum 2070301 workaround on reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1 and r1p2 of the Cortex-X3 cpu and is
+# still open.
+CPU_FLAG_LIST += ERRATA_X3_2070301
+
+# Flag to apply erratum 2313909 workaround on powerdown. This erratum applies
+# to revisions r0p0 and r1p0 of the Cortex-X3 cpu, it is fixed in r1p1.
+CPU_FLAG_LIST += ERRATA_X3_2313909
+
+# Flag to apply erratum 2615812 workaround on powerdown. This erratum applies
+# to revisions r0p0, r1p0, r1p1 of the Cortex-X3 cpu, it is still open.
+CPU_FLAG_LIST += ERRATA_X3_2615812
+
+# Flag to apply erratum 2742421 workaround on reset. This erratum applies
+# to revisions r0p0, r1p0 and r1p1 of the Cortex-X3 cpu, it is fixed in r1p2.
+CPU_FLAG_LIST += ERRATA_X3_2742421
+
+# Flag to apply erratum 1922240 workaround during reset. This erratum applies
+# to revision r0p0 of the Cortex-A510 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_A510_1922240
+
+# Flag to apply erratum 2288014 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3 and r1p0 of the Cortex-A510 cpu and is
+# fixed in r1p1.
+CPU_FLAG_LIST += ERRATA_A510_2288014
+
+# Flag to apply erratum 2042739 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1 and r0p2 of the Cortex-A510 cpu and is fixed in r0p3.
+CPU_FLAG_LIST += ERRATA_A510_2042739
+
+# Flag to apply erratum 2041909 workaround during reset. This erratum applies
+# to revision r0p2 of the Cortex-A510 cpu and is fixed in r0p3. The issue is
+# present in r0p0 and r0p1 but there is no workaround for those revisions.
+CPU_FLAG_LIST += ERRATA_A510_2041909
+
+# Flag to aply erratum 2080326 workaround during reset. This erratum applies
+# to revision r0p2 of the Cortex-A510 cpu and is fixed in r0p3. The issue is
+# also present in r0p0 and r0p1 but there is no workaround for those revisions.
+CPU_FLAG_LIST += ERRATA_A510_2080326
+
+# Flag to apply erratum 2250311 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3 and r1p0, and is fixed in r1p1.
+CPU_FLAG_LIST += ERRATA_A510_2250311
+
+# Flag to apply erratum 2218950 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3 and r1p0, and is fixed in r1p1.
+CPU_FLAG_LIST += ERRATA_A510_2218950
+
+# Flag to apply erratum 2172148 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3 and r1p0, and is fixed in r1p1.
+CPU_FLAG_LIST += ERRATA_A510_2172148
+
+# Flag to apply erratum 2347730 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3, r1p0 and r1p1 of the Cortex-A510 CPU,
+# and is fixed in r1p2.
+CPU_FLAG_LIST += ERRATA_A510_2347730
+
+# Flag to apply erratum 2371937 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3, r1p0, and r1p1. It is fixed in r1p2.
+CPU_FLAG_LIST += ERRATA_A510_2371937
+
+# Flag to apply erratum 2666669 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1, r0p2, r0p3, r1p0, and r1p1. It is fixed in r1p2.
+CPU_FLAG_LIST += ERRATA_A510_2666669
+
+# Flag to apply erratum 2684597 workaround during powerdown. This erratum
+# applies to revision r0p0, r0p1, r0p2, r0p3, r1p0, r1p1 and r1p2 of the
+# Cortex-A510 cpu and is fixed in r1p3.
+CPU_FLAG_LIST += ERRATA_A510_2684597
+
+# Flag to apply erratum 2331132 workaround during reset. This erratum applies
+# to revisions r0p0, r0p1 and r0p2. It is still open.
+CPU_FLAG_LIST += ERRATA_V2_2331132
+
+# Flag to apply erratum 2719103 workaround for non-arm interconnect ip. This
+# erratum applies to revisions r0p0, rop1. Fixed in r0p2.
+CPU_FLAG_LIST += ERRATA_V2_2719103
+
+# Flag to apply erratum 2719105 workaround during reset. This erratum applies
+# to revisions r0p0 and r0p1. It is fixed in r0p2.
+CPU_FLAG_LIST += ERRATA_V2_2719105
+
+# Flag to apply erratum 2743011 workaround during reset. This erratum applies
+# to revisions r0p0 and r0p1. It is fixed in r0p2.
+CPU_FLAG_LIST += ERRATA_V2_2743011
+
+# Flag to apply erratum 2779510 workaround during reset. This erratum applies
+# to revisions r0p0 and r0p1. It is fixed in r0p2.
+CPU_FLAG_LIST += ERRATA_V2_2779510
+
+# Flag to apply erratum 2801372 workaround for all configurations.
+# This erratum applies to revisions r0p0, r0p1. Fixed in r0p2.
+CPU_FLAG_LIST += ERRATA_V2_2801372
+
+# Flag to apply erratum 2701951 workaround for non-arm interconnect ip.
+# This erratum applies to revisions r0p0, r1p0, and r1p1. Its is fixed in r1p2.
+CPU_FLAG_LIST += ERRATA_A715_2701951
+
+# Flag to apply DSU erratum 798953. This erratum applies to DSUs revision r0p0.
+# Applying the workaround results in higher DSU power consumption on idle.
+CPU_FLAG_LIST += ERRATA_DSU_798953
+
+# Flag to apply DSU erratum 936184. This erratum applies to DSUs containing
+# the ACP interface and revision < r2p0. Applying the workaround results in
+# higher DSU power consumption on idle.
+CPU_FLAG_LIST += ERRATA_DSU_936184
+
+# Flag to apply DSU erratum 2313941. This erratum applies to DSUs revisions
+# r0p0, r1p0, r2p0, r2p1, r3p0, r3p1 and is still open. Applying the workaround
+# results in higher DSU power consumption on idle.
+CPU_FLAG_LIST += ERRATA_DSU_2313941
+
+ifneq (${DYNAMIC_WORKAROUND_CVE_2018_3639},0)
+ ifeq (${WORKAROUND_CVE_2018_3639},0)
+ $(error "Error: WORKAROUND_CVE_2018_3639 must be 1 if DYNAMIC_WORKAROUND_CVE_2018_3639 is 1")
+ endif
+endif
+
+# process all flags
+$(eval $(call default_zeros, $(CPU_FLAG_LIST)))
+$(eval $(call add_defines, $(CPU_FLAG_LIST)))
+$(eval $(call assert_booleans, $(CPU_FLAG_LIST)))
+
+# Errata build flags
+ifneq (${ERRATA_A53_843419},0)
+TF_LDFLAGS_aarch64 += --fix-cortex-a53-843419
+endif
+
+ifneq (${ERRATA_A53_835769},0)
+TF_CFLAGS_aarch64 += -mfix-cortex-a53-835769
+TF_LDFLAGS_aarch64 += --fix-cortex-a53-835769
+endif
+
+ifneq ($(filter 1,${ERRATA_A53_1530924} ${ERRATA_A55_1530923} \
+ ${ERRATA_A57_1319537} ${ERRATA_A72_1319367} ${ERRATA_A76_1165522}),)
+ERRATA_SPECULATIVE_AT := 1
+else
+ERRATA_SPECULATIVE_AT := 0
+endif
diff --git a/lib/cpus/errata_report.c b/lib/cpus/errata_report.c
new file mode 100644
index 0000000..4e9bdfc
--- /dev/null
+++ b/lib/cpus/errata_report.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* Runtime firmware routines to report errata status for the current CPU. */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/cpus/cpu_ops.h>
+#include <lib/cpus/errata.h>
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/spinlock.h>
+
+#ifdef IMAGE_BL1
+# define BL_STRING "BL1"
+#elif defined(__aarch64__) && defined(IMAGE_BL31)
+# define BL_STRING "BL31"
+#elif !defined(__aarch64__) && defined(IMAGE_BL32)
+# define BL_STRING "BL32"
+#elif defined(IMAGE_BL2) && RESET_TO_BL2
+# define BL_STRING "BL2"
+#else
+# error This image should not be printing errata status
+#endif
+
+/* Errata format: BL stage, CPU, errata ID, message */
+#define ERRATA_FORMAT "%s: %s: CPU workaround for %s was %s\n"
+
+#define CVE_FORMAT "%s: %s: CPU workaround for CVE %u_%u was %s\n"
+#define ERRATUM_FORMAT "%s: %s: CPU workaround for erratum %u was %s\n"
+
+
+static __unused void print_status(int status, char *cpu_str, uint16_t cve, uint32_t id)
+{
+ if (status == ERRATA_MISSING) {
+ if (cve) {
+ WARN(CVE_FORMAT, BL_STRING, cpu_str, cve, id, "missing!");
+ } else {
+ WARN(ERRATUM_FORMAT, BL_STRING, cpu_str, id, "missing!");
+ }
+ } else if (status == ERRATA_APPLIES) {
+ if (cve) {
+ INFO(CVE_FORMAT, BL_STRING, cpu_str, cve, id, "applied");
+ } else {
+ INFO(ERRATUM_FORMAT, BL_STRING, cpu_str, id, "applied");
+ }
+ } else {
+ if (cve) {
+ VERBOSE(CVE_FORMAT, BL_STRING, cpu_str, cve, id, "not applied");
+ } else {
+ VERBOSE(ERRATUM_FORMAT, BL_STRING, cpu_str, id, "not applied");
+ }
+ }
+}
+
+#if !REPORT_ERRATA
+void print_errata_status(void) {}
+#else /* !REPORT_ERRATA */
+/*
+ * New errata status message printer
+ * The order checking function is hidden behind the FEATURE_DETECTION flag to
+ * save space. This functionality is only useful on development and platform
+ * bringup builds, when FEATURE_DETECTION should be used anyway
+ */
+void __unused generic_errata_report(void)
+{
+ struct cpu_ops *cpu_ops = get_cpu_ops_ptr();
+ struct erratum_entry *entry = cpu_ops->errata_list_start;
+ struct erratum_entry *end = cpu_ops->errata_list_end;
+ long rev_var = cpu_get_rev_var();
+#if FEATURE_DETECTION
+ uint32_t last_erratum_id = 0;
+ uint16_t last_cve_yr = 0;
+ bool check_cve = false;
+ bool failed = false;
+#endif /* FEATURE_DETECTION */
+
+ for (; entry != end; entry += 1) {
+ uint64_t status = entry->check_func(rev_var);
+
+ assert(entry->id != 0);
+
+ /*
+ * Errata workaround has not been compiled in. If the errata
+ * would have applied had it been compiled in, print its status
+ * as missing.
+ */
+ if (status == ERRATA_APPLIES && entry->chosen == 0) {
+ status = ERRATA_MISSING;
+ }
+
+ print_status(status, cpu_ops->cpu_str, entry->cve, entry->id);
+
+#if FEATURE_DETECTION
+ if (entry->cve) {
+ if (last_cve_yr > entry->cve ||
+ (last_cve_yr == entry->cve && last_erratum_id >= entry->id)) {
+ ERROR("CVE %u_%u was out of order!\n",
+ entry->cve, entry->id);
+ failed = true;
+ }
+ check_cve = true;
+ last_cve_yr = entry->cve;
+ } else {
+ if (last_erratum_id >= entry->id || check_cve) {
+ ERROR("Erratum %u was out of order!\n",
+ entry->id);
+ failed = true;
+ }
+ }
+ last_erratum_id = entry->id;
+#endif /* FEATURE_DETECTION */
+ }
+
+#if FEATURE_DETECTION
+ /*
+ * enforce errata and CVEs are in ascending order and that CVEs are
+ * after errata
+ */
+ assert(!failed);
+#endif /* FEATURE_DETECTION */
+}
+
+/*
+ * Returns whether errata needs to be reported. Passed arguments are private to
+ * a CPU type.
+ */
+static __unused int errata_needs_reporting(spinlock_t *lock, uint32_t *reported)
+{
+ bool report_now;
+
+ /* If already reported, return false. */
+ if (*reported != 0U)
+ return 0;
+
+ /*
+ * Acquire lock. Determine whether status needs reporting, and then mark
+ * report status to true.
+ */
+ spin_lock(lock);
+ report_now = (*reported == 0U);
+ if (report_now)
+ *reported = 1;
+ spin_unlock(lock);
+
+ return report_now;
+}
+
+/*
+ * Function to print errata status for the calling CPU (and others of the same
+ * type). Must be called only:
+ * - when MMU and data caches are enabled;
+ * - after cpu_ops have been initialized in per-CPU data.
+ */
+void print_errata_status(void)
+{
+ struct cpu_ops *cpu_ops;
+#ifdef IMAGE_BL1
+ /*
+ * BL1 doesn't have per-CPU data. So retrieve the CPU operations
+ * directly.
+ */
+ cpu_ops = get_cpu_ops_ptr();
+
+ if (cpu_ops->errata_func != NULL) {
+ cpu_ops->errata_func();
+ }
+#else /* IMAGE_BL1 */
+ cpu_ops = (void *) get_cpu_data(cpu_ops_ptr);
+
+ assert(cpu_ops != NULL);
+
+ if (cpu_ops->errata_func == NULL) {
+ return;
+ }
+
+ if (errata_needs_reporting(cpu_ops->errata_lock, cpu_ops->errata_reported)) {
+ cpu_ops->errata_func();
+ }
+#endif /* IMAGE_BL1 */
+}
+
+/*
+ * Old errata status message printer
+ * TODO: remove once all cpus have been converted to the new printing method
+ */
+void __unused errata_print_msg(unsigned int status, const char *cpu, const char *id)
+{
+ /* Errata status strings */
+ static const char *const errata_status_str[] = {
+ [ERRATA_NOT_APPLIES] = "not applied",
+ [ERRATA_APPLIES] = "applied",
+ [ERRATA_MISSING] = "missing!"
+ };
+ static const char *const __unused bl_str = BL_STRING;
+ const char *msg __unused;
+
+
+ assert(status < ARRAY_SIZE(errata_status_str));
+ assert(cpu != NULL);
+ assert(id != NULL);
+
+ msg = errata_status_str[status];
+
+ switch (status) {
+ case ERRATA_NOT_APPLIES:
+ VERBOSE(ERRATA_FORMAT, bl_str, cpu, id, msg);
+ break;
+
+ case ERRATA_APPLIES:
+ INFO(ERRATA_FORMAT, bl_str, cpu, id, msg);
+ break;
+
+ case ERRATA_MISSING:
+ WARN(ERRATA_FORMAT, bl_str, cpu, id, msg);
+ break;
+
+ default:
+ WARN(ERRATA_FORMAT, bl_str, cpu, id, "unknown");
+ break;
+ }
+}
+#endif /* !REPORT_ERRATA */