summaryrefslogtreecommitdiffstats
path: root/include/arch/aarch64
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
commit102b0d2daa97dae68d3eed54d8fe37a9cc38a892 (patch)
treebcf648efac40ca6139842707f0eba5a4496a6dd2 /include/arch/aarch64
parentInitial commit. (diff)
downloadarm-trusted-firmware-upstream/2.8.0+dfsg.tar.xz
arm-trusted-firmware-upstream/2.8.0+dfsg.zip
Adding upstream version 2.8.0+dfsg.upstream/2.8.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include/arch/aarch64')
-rw-r--r--include/arch/aarch64/arch.h1316
-rw-r--r--include/arch/aarch64/arch_features.h268
-rw-r--r--include/arch/aarch64/arch_helpers.h671
-rw-r--r--include/arch/aarch64/asm_macros.S244
-rw-r--r--include/arch/aarch64/assert_macros.S29
-rw-r--r--include/arch/aarch64/console_macros.S54
-rw-r--r--include/arch/aarch64/el2_common_macros.S422
-rw-r--r--include/arch/aarch64/el3_common_macros.S570
-rw-r--r--include/arch/aarch64/smccc_helpers.h144
9 files changed, 3718 insertions, 0 deletions
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
new file mode 100644
index 0000000..f63e923
--- /dev/null
+++ b/include/arch/aarch64/arch.h
@@ -0,0 +1,1316 @@
+/*
+ * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_H
+#define ARCH_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * MIDR bit definitions
+ ******************************************************************************/
+#define MIDR_IMPL_MASK U(0xff)
+#define MIDR_IMPL_SHIFT U(0x18)
+#define MIDR_VAR_SHIFT U(20)
+#define MIDR_VAR_BITS U(4)
+#define MIDR_VAR_MASK U(0xf)
+#define MIDR_REV_SHIFT U(0)
+#define MIDR_REV_BITS U(4)
+#define MIDR_REV_MASK U(0xf)
+#define MIDR_PN_MASK U(0xfff)
+#define MIDR_PN_SHIFT U(0x4)
+
+/*******************************************************************************
+ * MPIDR macros
+ ******************************************************************************/
+#define MPIDR_MT_MASK (ULL(1) << 24)
+#define MPIDR_CPU_MASK MPIDR_AFFLVL_MASK
+#define MPIDR_CLUSTER_MASK (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFFINITY_BITS U(8)
+#define MPIDR_AFFLVL_MASK ULL(0xff)
+#define MPIDR_AFF0_SHIFT U(0)
+#define MPIDR_AFF1_SHIFT U(8)
+#define MPIDR_AFF2_SHIFT U(16)
+#define MPIDR_AFF3_SHIFT U(32)
+#define MPIDR_AFF_SHIFT(_n) MPIDR_AFF##_n##_SHIFT
+#define MPIDR_AFFINITY_MASK ULL(0xff00ffffff)
+#define MPIDR_AFFLVL_SHIFT U(3)
+#define MPIDR_AFFLVL0 ULL(0x0)
+#define MPIDR_AFFLVL1 ULL(0x1)
+#define MPIDR_AFFLVL2 ULL(0x2)
+#define MPIDR_AFFLVL3 ULL(0x3)
+#define MPIDR_AFFLVL(_n) MPIDR_AFFLVL##_n
+#define MPIDR_AFFLVL0_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL1_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL2_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL3_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK)
+/*
+ * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
+ * add one while using this macro to define array sizes.
+ * TODO: Support only the first 3 affinity levels for now.
+ */
+#define MPIDR_MAX_AFFLVL U(2)
+
+#define MPID_MASK (MPIDR_MT_MASK | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF3_SHIFT) | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT) | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT) | \
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF0_SHIFT))
+
+#define MPIDR_AFF_ID(mpid, n) \
+ (((mpid) >> MPIDR_AFF_SHIFT(n)) & MPIDR_AFFLVL_MASK)
+
+/*
+ * An invalid MPID. This value can be used by functions that return an MPID to
+ * indicate an error.
+ */
+#define INVALID_MPID U(0xFFFFFFFF)
+
+/*******************************************************************************
+ * Definitions for CPU system register interface to GICv3
+ ******************************************************************************/
+#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICC_SGI1R S3_0_C12_C11_5
+#define ICC_ASGI1R S3_0_C12_C11_6
+#define ICC_SRE_EL1 S3_0_C12_C12_5
+#define ICC_SRE_EL2 S3_4_C12_C9_5
+#define ICC_SRE_EL3 S3_6_C12_C12_5
+#define ICC_CTLR_EL1 S3_0_C12_C12_4
+#define ICC_CTLR_EL3 S3_6_C12_C12_4
+#define ICC_PMR_EL1 S3_0_C4_C6_0
+#define ICC_RPR_EL1 S3_0_C12_C11_3
+#define ICC_IGRPEN1_EL3 S3_6_c12_c12_7
+#define ICC_IGRPEN0_EL1 S3_0_c12_c12_6
+#define ICC_HPPIR0_EL1 S3_0_c12_c8_2
+#define ICC_HPPIR1_EL1 S3_0_c12_c12_2
+#define ICC_IAR0_EL1 S3_0_c12_c8_0
+#define ICC_IAR1_EL1 S3_0_c12_c12_0
+#define ICC_EOIR0_EL1 S3_0_c12_c8_1
+#define ICC_EOIR1_EL1 S3_0_c12_c12_1
+#define ICC_SGI0R_EL1 S3_0_c12_c11_7
+
+/*******************************************************************************
+ * Definitions for EL2 system registers for save/restore routine
+ ******************************************************************************/
+#define CNTPOFF_EL2 S3_4_C14_C0_6
+#define HAFGRTR_EL2 S3_4_C3_C1_6
+#define HDFGRTR_EL2 S3_4_C3_C1_4
+#define HDFGWTR_EL2 S3_4_C3_C1_5
+#define HFGITR_EL2 S3_4_C1_C1_6
+#define HFGRTR_EL2 S3_4_C1_C1_4
+#define HFGWTR_EL2 S3_4_C1_C1_5
+#define ICH_HCR_EL2 S3_4_C12_C11_0
+#define ICH_VMCR_EL2 S3_4_C12_C11_7
+#define MPAMVPM0_EL2 S3_4_C10_C6_0
+#define MPAMVPM1_EL2 S3_4_C10_C6_1
+#define MPAMVPM2_EL2 S3_4_C10_C6_2
+#define MPAMVPM3_EL2 S3_4_C10_C6_3
+#define MPAMVPM4_EL2 S3_4_C10_C6_4
+#define MPAMVPM5_EL2 S3_4_C10_C6_5
+#define MPAMVPM6_EL2 S3_4_C10_C6_6
+#define MPAMVPM7_EL2 S3_4_C10_C6_7
+#define MPAMVPMV_EL2 S3_4_C10_C4_1
+#define TRFCR_EL2 S3_4_C1_C2_1
+#define PMSCR_EL2 S3_4_C9_C9_0
+#define TFSR_EL2 S3_4_C5_C6_0
+
+/*******************************************************************************
+ * Generic timer memory mapped registers & offsets
+ ******************************************************************************/
+#define CNTCR_OFF U(0x000)
+#define CNTCV_OFF U(0x008)
+#define CNTFID_OFF U(0x020)
+
+#define CNTCR_EN (U(1) << 0)
+#define CNTCR_HDBG (U(1) << 1)
+#define CNTCR_FCREQ(x) ((x) << 8)
+
+/*******************************************************************************
+ * System register bit definitions
+ ******************************************************************************/
+/* CLIDR definitions */
+#define LOUIS_SHIFT U(21)
+#define LOC_SHIFT U(24)
+#define CTYPE_SHIFT(n) U(3 * (n - 1))
+#define CLIDR_FIELD_WIDTH U(3)
+
+/* CSSELR definitions */
+#define LEVEL_SHIFT U(1)
+
+/* Data cache set/way op type defines */
+#define DCISW U(0x0)
+#define DCCISW U(0x1)
+#if ERRATA_A53_827319
+#define DCCSW DCCISW
+#else
+#define DCCSW U(0x2)
+#endif
+
+/* ID_AA64PFR0_EL1 definitions */
+#define ID_AA64PFR0_EL0_SHIFT U(0)
+#define ID_AA64PFR0_EL1_SHIFT U(4)
+#define ID_AA64PFR0_EL2_SHIFT U(8)
+#define ID_AA64PFR0_EL3_SHIFT U(12)
+
+#define ID_AA64PFR0_AMU_SHIFT U(44)
+#define ID_AA64PFR0_AMU_MASK ULL(0xf)
+#define ID_AA64PFR0_AMU_NOT_SUPPORTED U(0x0)
+#define ID_AA64PFR0_AMU_V1 ULL(0x1)
+#define ID_AA64PFR0_AMU_V1P1 U(0x2)
+
+#define ID_AA64PFR0_ELX_MASK ULL(0xf)
+
+#define ID_AA64PFR0_GIC_SHIFT U(24)
+#define ID_AA64PFR0_GIC_WIDTH U(4)
+#define ID_AA64PFR0_GIC_MASK ULL(0xf)
+
+#define ID_AA64PFR0_SVE_SHIFT U(32)
+#define ID_AA64PFR0_SVE_MASK ULL(0xf)
+#define ID_AA64PFR0_SVE_SUPPORTED ULL(0x1)
+#define ID_AA64PFR0_SVE_LENGTH U(4)
+
+#define ID_AA64PFR0_SEL2_SHIFT U(36)
+#define ID_AA64PFR0_SEL2_MASK ULL(0xf)
+
+#define ID_AA64PFR0_MPAM_SHIFT U(40)
+#define ID_AA64PFR0_MPAM_MASK ULL(0xf)
+
+#define ID_AA64PFR0_DIT_SHIFT U(48)
+#define ID_AA64PFR0_DIT_MASK ULL(0xf)
+#define ID_AA64PFR0_DIT_LENGTH U(4)
+#define ID_AA64PFR0_DIT_SUPPORTED U(1)
+
+#define ID_AA64PFR0_CSV2_SHIFT U(56)
+#define ID_AA64PFR0_CSV2_MASK ULL(0xf)
+#define ID_AA64PFR0_CSV2_LENGTH U(4)
+#define ID_AA64PFR0_CSV2_2_SUPPORTED ULL(0x2)
+
+#define ID_AA64PFR0_FEAT_RME_SHIFT U(52)
+#define ID_AA64PFR0_FEAT_RME_MASK ULL(0xf)
+#define ID_AA64PFR0_FEAT_RME_LENGTH U(4)
+#define ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED U(0)
+#define ID_AA64PFR0_FEAT_RME_V1 U(1)
+
+#define ID_AA64PFR0_RAS_SHIFT U(28)
+#define ID_AA64PFR0_RAS_MASK ULL(0xf)
+#define ID_AA64PFR0_RAS_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR0_RAS_LENGTH U(4)
+
+/* Exception level handling */
+#define EL_IMPL_NONE ULL(0)
+#define EL_IMPL_A64ONLY ULL(1)
+#define EL_IMPL_A64_A32 ULL(2)
+
+/* ID_AA64DFR0_EL1.TraceVer definitions */
+#define ID_AA64DFR0_TRACEVER_SHIFT U(4)
+#define ID_AA64DFR0_TRACEVER_MASK ULL(0xf)
+#define ID_AA64DFR0_TRACEVER_SUPPORTED ULL(1)
+#define ID_AA64DFR0_TRACEVER_LENGTH U(4)
+#define ID_AA64DFR0_TRACEFILT_SHIFT U(40)
+#define ID_AA64DFR0_TRACEFILT_MASK U(0xf)
+#define ID_AA64DFR0_TRACEFILT_SUPPORTED U(1)
+#define ID_AA64DFR0_TRACEFILT_LENGTH U(4)
+
+/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
+#define ID_AA64DFR0_PMS_SHIFT U(32)
+#define ID_AA64DFR0_PMS_MASK ULL(0xf)
+#define ID_AA64DFR0_SPE_SUPPORTED ULL(0x1)
+#define ID_AA64DFR0_SPE_NOT_SUPPORTED ULL(0x0)
+
+/* ID_AA64DFR0_EL1.TraceBuffer definitions */
+#define ID_AA64DFR0_TRACEBUFFER_SHIFT U(44)
+#define ID_AA64DFR0_TRACEBUFFER_MASK ULL(0xf)
+#define ID_AA64DFR0_TRACEBUFFER_SUPPORTED ULL(1)
+
+/* ID_AA64DFR0_EL1.MTPMU definitions (for ARMv8.6+) */
+#define ID_AA64DFR0_MTPMU_SHIFT U(48)
+#define ID_AA64DFR0_MTPMU_MASK ULL(0xf)
+#define ID_AA64DFR0_MTPMU_SUPPORTED ULL(1)
+
+/* ID_AA64DFR0_EL1.BRBE definitions */
+#define ID_AA64DFR0_BRBE_SHIFT U(52)
+#define ID_AA64DFR0_BRBE_MASK ULL(0xf)
+#define ID_AA64DFR0_BRBE_SUPPORTED ULL(1)
+
+/* ID_AA64ISAR0_EL1 definitions */
+#define ID_AA64ISAR0_RNDR_SHIFT U(60)
+#define ID_AA64ISAR0_RNDR_MASK ULL(0xf)
+
+/* ID_AA64ISAR1_EL1 definitions */
+#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
+
+#define ID_AA64ISAR1_GPI_SHIFT U(28)
+#define ID_AA64ISAR1_GPI_MASK ULL(0xf)
+#define ID_AA64ISAR1_GPA_SHIFT U(24)
+#define ID_AA64ISAR1_GPA_MASK ULL(0xf)
+
+#define ID_AA64ISAR1_API_SHIFT U(8)
+#define ID_AA64ISAR1_API_MASK ULL(0xf)
+#define ID_AA64ISAR1_APA_SHIFT U(4)
+#define ID_AA64ISAR1_APA_MASK ULL(0xf)
+
+#define ID_AA64ISAR1_SB_SHIFT U(36)
+#define ID_AA64ISAR1_SB_MASK ULL(0xf)
+#define ID_AA64ISAR1_SB_SUPPORTED ULL(0x1)
+#define ID_AA64ISAR1_SB_NOT_SUPPORTED ULL(0x0)
+
+/* ID_AA64ISAR2_EL1 definitions */
+#define ID_AA64ISAR2_EL1 S3_0_C0_C6_2
+
+#define ID_AA64ISAR2_GPA3_SHIFT U(8)
+#define ID_AA64ISAR2_GPA3_MASK ULL(0xf)
+
+#define ID_AA64ISAR2_APA3_SHIFT U(12)
+#define ID_AA64ISAR2_APA3_MASK ULL(0xf)
+
+/* ID_AA64MMFR0_EL1 definitions */
+#define ID_AA64MMFR0_EL1_PARANGE_SHIFT U(0)
+#define ID_AA64MMFR0_EL1_PARANGE_MASK ULL(0xf)
+
+#define PARANGE_0000 U(32)
+#define PARANGE_0001 U(36)
+#define PARANGE_0010 U(40)
+#define PARANGE_0011 U(42)
+#define PARANGE_0100 U(44)
+#define PARANGE_0101 U(48)
+#define PARANGE_0110 U(52)
+
+#define ID_AA64MMFR0_EL1_ECV_SHIFT U(60)
+#define ID_AA64MMFR0_EL1_ECV_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_ECV_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_ECV_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_ECV_SELF_SYNCH ULL(0x2)
+
+#define ID_AA64MMFR0_EL1_FGT_SHIFT U(56)
+#define ID_AA64MMFR0_EL1_FGT_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_FGT_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_FGT_NOT_SUPPORTED ULL(0x0)
+
+#define ID_AA64MMFR0_EL1_TGRAN4_SHIFT U(28)
+#define ID_AA64MMFR0_EL1_TGRAN4_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN4_NOT_SUPPORTED ULL(0xf)
+
+#define ID_AA64MMFR0_EL1_TGRAN64_SHIFT U(24)
+#define ID_AA64MMFR0_EL1_TGRAN64_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN64_NOT_SUPPORTED ULL(0xf)
+
+#define ID_AA64MMFR0_EL1_TGRAN16_SHIFT U(20)
+#define ID_AA64MMFR0_EL1_TGRAN16_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN16_NOT_SUPPORTED ULL(0x0)
+
+/* ID_AA64MMFR1_EL1 definitions */
+#define ID_AA64MMFR1_EL1_TWED_SHIFT U(32)
+#define ID_AA64MMFR1_EL1_TWED_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_TWED_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_TWED_NOT_SUPPORTED ULL(0x0)
+
+#define ID_AA64MMFR1_EL1_PAN_SHIFT U(20)
+#define ID_AA64MMFR1_EL1_PAN_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_PAN_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR1_EL1_PAN_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_PAN2_SUPPORTED ULL(0x2)
+#define ID_AA64MMFR1_EL1_PAN3_SUPPORTED ULL(0x3)
+
+#define ID_AA64MMFR1_EL1_VHE_SHIFT U(8)
+#define ID_AA64MMFR1_EL1_VHE_MASK ULL(0xf)
+
+#define ID_AA64MMFR1_EL1_HCX_SHIFT U(40)
+#define ID_AA64MMFR1_EL1_HCX_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_HCX_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_HCX_NOT_SUPPORTED ULL(0x0)
+
+/* ID_AA64MMFR2_EL1 definitions */
+#define ID_AA64MMFR2_EL1 S3_0_C0_C7_2
+
+#define ID_AA64MMFR2_EL1_ST_SHIFT U(28)
+#define ID_AA64MMFR2_EL1_ST_MASK ULL(0xf)
+
+#define ID_AA64MMFR2_EL1_CCIDX_SHIFT U(20)
+#define ID_AA64MMFR2_EL1_CCIDX_MASK ULL(0xf)
+#define ID_AA64MMFR2_EL1_CCIDX_LENGTH U(4)
+
+#define ID_AA64MMFR2_EL1_CNP_SHIFT U(0)
+#define ID_AA64MMFR2_EL1_CNP_MASK ULL(0xf)
+
+#define ID_AA64MMFR2_EL1_NV_SHIFT U(24)
+#define ID_AA64MMFR2_EL1_NV_MASK ULL(0xf)
+#define ID_AA64MMFR2_EL1_NV_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR2_EL1_NV_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR2_EL1_NV2_SUPPORTED ULL(0x2)
+
+/* ID_AA64PFR1_EL1 definitions */
+#define ID_AA64PFR1_EL1_SSBS_SHIFT U(4)
+#define ID_AA64PFR1_EL1_SSBS_MASK ULL(0xf)
+
+#define SSBS_UNAVAILABLE ULL(0) /* No architectural SSBS support */
+
+#define ID_AA64PFR1_EL1_BT_SHIFT U(0)
+#define ID_AA64PFR1_EL1_BT_MASK ULL(0xf)
+
+#define BTI_IMPLEMENTED ULL(1) /* The BTI mechanism is implemented */
+
+#define ID_AA64PFR1_EL1_MTE_SHIFT U(8)
+#define ID_AA64PFR1_EL1_MTE_MASK ULL(0xf)
+
+#define ID_AA64PFR1_EL1_RNDR_TRAP_SHIFT U(28)
+#define ID_AA64PFR1_EL1_RNDR_TRAP_MASK U(0xf)
+
+#define ID_AA64PFR1_EL1_RNG_TRAP_SUPPORTED ULL(0x1)
+#define ID_AA64PFR1_EL1_RNG_TRAP_NOT_SUPPORTED ULL(0x0)
+
+/* Memory Tagging Extension is not implemented */
+#define MTE_UNIMPLEMENTED U(0)
+/* FEAT_MTE: MTE instructions accessible at EL0 are implemented */
+#define MTE_IMPLEMENTED_EL0 U(1)
+/* FEAT_MTE2: Full MTE is implemented */
+#define MTE_IMPLEMENTED_ELX U(2)
+/*
+ * FEAT_MTE3: MTE is implemented with support for
+ * asymmetric Tag Check Fault handling
+ */
+#define MTE_IMPLEMENTED_ASY U(3)
+
+#define ID_AA64PFR1_MPAM_FRAC_SHIFT ULL(16)
+#define ID_AA64PFR1_MPAM_FRAC_MASK ULL(0xf)
+
+#define ID_AA64PFR1_EL1_SME_SHIFT U(24)
+#define ID_AA64PFR1_EL1_SME_MASK ULL(0xf)
+
+/* ID_PFR1_EL1 definitions */
+#define ID_PFR1_VIRTEXT_SHIFT U(12)
+#define ID_PFR1_VIRTEXT_MASK U(0xf)
+#define GET_VIRT_EXT(id) (((id) >> ID_PFR1_VIRTEXT_SHIFT) \
+ & ID_PFR1_VIRTEXT_MASK)
+
+/* SCTLR definitions */
+#define SCTLR_EL2_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+ (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+ (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
+
+#define SCTLR_EL1_RES1 ((UL(1) << 29) | (UL(1) << 28) | (UL(1) << 23) | \
+ (UL(1) << 22) | (UL(1) << 20) | (UL(1) << 11))
+
+#define SCTLR_AARCH32_EL1_RES1 \
+ ((U(1) << 23) | (U(1) << 22) | (U(1) << 11) | \
+ (U(1) << 4) | (U(1) << 3))
+
+#define SCTLR_EL3_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+ (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+ (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
+
+#define SCTLR_M_BIT (ULL(1) << 0)
+#define SCTLR_A_BIT (ULL(1) << 1)
+#define SCTLR_C_BIT (ULL(1) << 2)
+#define SCTLR_SA_BIT (ULL(1) << 3)
+#define SCTLR_SA0_BIT (ULL(1) << 4)
+#define SCTLR_CP15BEN_BIT (ULL(1) << 5)
+#define SCTLR_nAA_BIT (ULL(1) << 6)
+#define SCTLR_ITD_BIT (ULL(1) << 7)
+#define SCTLR_SED_BIT (ULL(1) << 8)
+#define SCTLR_UMA_BIT (ULL(1) << 9)
+#define SCTLR_EnRCTX_BIT (ULL(1) << 10)
+#define SCTLR_EOS_BIT (ULL(1) << 11)
+#define SCTLR_I_BIT (ULL(1) << 12)
+#define SCTLR_EnDB_BIT (ULL(1) << 13)
+#define SCTLR_DZE_BIT (ULL(1) << 14)
+#define SCTLR_UCT_BIT (ULL(1) << 15)
+#define SCTLR_NTWI_BIT (ULL(1) << 16)
+#define SCTLR_NTWE_BIT (ULL(1) << 18)
+#define SCTLR_WXN_BIT (ULL(1) << 19)
+#define SCTLR_TSCXT_BIT (ULL(1) << 20)
+#define SCTLR_IESB_BIT (ULL(1) << 21)
+#define SCTLR_EIS_BIT (ULL(1) << 22)
+#define SCTLR_SPAN_BIT (ULL(1) << 23)
+#define SCTLR_E0E_BIT (ULL(1) << 24)
+#define SCTLR_EE_BIT (ULL(1) << 25)
+#define SCTLR_UCI_BIT (ULL(1) << 26)
+#define SCTLR_EnDA_BIT (ULL(1) << 27)
+#define SCTLR_nTLSMD_BIT (ULL(1) << 28)
+#define SCTLR_LSMAOE_BIT (ULL(1) << 29)
+#define SCTLR_EnIB_BIT (ULL(1) << 30)
+#define SCTLR_EnIA_BIT (ULL(1) << 31)
+#define SCTLR_BT0_BIT (ULL(1) << 35)
+#define SCTLR_BT1_BIT (ULL(1) << 36)
+#define SCTLR_BT_BIT (ULL(1) << 36)
+#define SCTLR_ITFSB_BIT (ULL(1) << 37)
+#define SCTLR_TCF0_SHIFT U(38)
+#define SCTLR_TCF0_MASK ULL(3)
+#define SCTLR_ENTP2_BIT (ULL(1) << 60)
+
+/* Tag Check Faults in EL0 have no effect on the PE */
+#define SCTLR_TCF0_NO_EFFECT U(0)
+/* Tag Check Faults in EL0 cause a synchronous exception */
+#define SCTLR_TCF0_SYNC U(1)
+/* Tag Check Faults in EL0 are asynchronously accumulated */
+#define SCTLR_TCF0_ASYNC U(2)
+/*
+ * Tag Check Faults in EL0 cause a synchronous exception on reads,
+ * and are asynchronously accumulated on writes
+ */
+#define SCTLR_TCF0_SYNCR_ASYNCW U(3)
+
+#define SCTLR_TCF_SHIFT U(40)
+#define SCTLR_TCF_MASK ULL(3)
+
+/* Tag Check Faults in EL1 have no effect on the PE */
+#define SCTLR_TCF_NO_EFFECT U(0)
+/* Tag Check Faults in EL1 cause a synchronous exception */
+#define SCTLR_TCF_SYNC U(1)
+/* Tag Check Faults in EL1 are asynchronously accumulated */
+#define SCTLR_TCF_ASYNC U(2)
+/*
+ * Tag Check Faults in EL1 cause a synchronous exception on reads,
+ * and are asynchronously accumulated on writes
+ */
+#define SCTLR_TCF_SYNCR_ASYNCW U(3)
+
+#define SCTLR_ATA0_BIT (ULL(1) << 42)
+#define SCTLR_ATA_BIT (ULL(1) << 43)
+#define SCTLR_DSSBS_SHIFT U(44)
+#define SCTLR_DSSBS_BIT (ULL(1) << SCTLR_DSSBS_SHIFT)
+#define SCTLR_TWEDEn_BIT (ULL(1) << 45)
+#define SCTLR_TWEDEL_SHIFT U(46)
+#define SCTLR_TWEDEL_MASK ULL(0xf)
+#define SCTLR_EnASR_BIT (ULL(1) << 54)
+#define SCTLR_EnAS0_BIT (ULL(1) << 55)
+#define SCTLR_EnALS_BIT (ULL(1) << 56)
+#define SCTLR_EPAN_BIT (ULL(1) << 57)
+#define SCTLR_RESET_VAL SCTLR_EL3_RES1
+
+/* CPACR_EL1 definitions */
+#define CPACR_EL1_FPEN(x) ((x) << 20)
+#define CPACR_EL1_FP_TRAP_EL0 UL(0x1)
+#define CPACR_EL1_FP_TRAP_ALL UL(0x2)
+#define CPACR_EL1_FP_TRAP_NONE UL(0x3)
+
+/* SCR definitions */
+#define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5))
+#define SCR_NSE_SHIFT U(62)
+#define SCR_NSE_BIT (ULL(1) << SCR_NSE_SHIFT)
+#define SCR_GPF_BIT (UL(1) << 48)
+#define SCR_TWEDEL_SHIFT U(30)
+#define SCR_TWEDEL_MASK ULL(0xf)
+#define SCR_TRNDR_BIT (UL(1) << 40)
+#define SCR_HXEn_BIT (UL(1) << 38)
+#define SCR_ENTP2_SHIFT U(41)
+#define SCR_ENTP2_BIT (UL(1) << SCR_ENTP2_SHIFT)
+#define SCR_AMVOFFEN_SHIFT U(35)
+#define SCR_AMVOFFEN_BIT (UL(1) << SCR_AMVOFFEN_SHIFT)
+#define SCR_TWEDEn_BIT (UL(1) << 29)
+#define SCR_ECVEN_BIT (UL(1) << 28)
+#define SCR_FGTEN_BIT (UL(1) << 27)
+#define SCR_ATA_BIT (UL(1) << 26)
+#define SCR_EnSCXT_BIT (UL(1) << 25)
+#define SCR_FIEN_BIT (UL(1) << 21)
+#define SCR_EEL2_BIT (UL(1) << 18)
+#define SCR_API_BIT (UL(1) << 17)
+#define SCR_APK_BIT (UL(1) << 16)
+#define SCR_TERR_BIT (UL(1) << 15)
+#define SCR_TWE_BIT (UL(1) << 13)
+#define SCR_TWI_BIT (UL(1) << 12)
+#define SCR_ST_BIT (UL(1) << 11)
+#define SCR_RW_BIT (UL(1) << 10)
+#define SCR_SIF_BIT (UL(1) << 9)
+#define SCR_HCE_BIT (UL(1) << 8)
+#define SCR_SMD_BIT (UL(1) << 7)
+#define SCR_EA_BIT (UL(1) << 3)
+#define SCR_FIQ_BIT (UL(1) << 2)
+#define SCR_IRQ_BIT (UL(1) << 1)
+#define SCR_NS_BIT (UL(1) << 0)
+#define SCR_VALID_BIT_MASK U(0x24000002F8F)
+#define SCR_RESET_VAL SCR_RES1_BITS
+
+/* MDCR_EL3 definitions */
+#define MDCR_EnPMSN_BIT (ULL(1) << 36)
+#define MDCR_MPMX_BIT (ULL(1) << 35)
+#define MDCR_MCCD_BIT (ULL(1) << 34)
+#define MDCR_SBRBE_SHIFT U(32)
+#define MDCR_SBRBE_MASK ULL(0x3)
+#define MDCR_NSTB(x) ((x) << 24)
+#define MDCR_NSTB_EL1 ULL(0x3)
+#define MDCR_NSTBE (ULL(1) << 26)
+#define MDCR_MTPME_BIT (ULL(1) << 28)
+#define MDCR_TDCC_BIT (ULL(1) << 27)
+#define MDCR_SCCD_BIT (ULL(1) << 23)
+#define MDCR_EPMAD_BIT (ULL(1) << 21)
+#define MDCR_EDAD_BIT (ULL(1) << 20)
+#define MDCR_TTRF_BIT (ULL(1) << 19)
+#define MDCR_STE_BIT (ULL(1) << 18)
+#define MDCR_SPME_BIT (ULL(1) << 17)
+#define MDCR_SDD_BIT (ULL(1) << 16)
+#define MDCR_SPD32(x) ((x) << 14)
+#define MDCR_SPD32_LEGACY ULL(0x0)
+#define MDCR_SPD32_DISABLE ULL(0x2)
+#define MDCR_SPD32_ENABLE ULL(0x3)
+#define MDCR_NSPB(x) ((x) << 12)
+#define MDCR_NSPB_EL1 ULL(0x3)
+#define MDCR_TDOSA_BIT (ULL(1) << 10)
+#define MDCR_TDA_BIT (ULL(1) << 9)
+#define MDCR_TPM_BIT (ULL(1) << 6)
+#define MDCR_EL3_RESET_VAL ULL(0x0)
+
+/* MDCR_EL2 definitions */
+#define MDCR_EL2_MTPME (U(1) << 28)
+#define MDCR_EL2_HLP (U(1) << 26)
+#define MDCR_EL2_E2TB(x) ((x) << 24)
+#define MDCR_EL2_E2TB_EL1 U(0x3)
+#define MDCR_EL2_HCCD (U(1) << 23)
+#define MDCR_EL2_TTRF (U(1) << 19)
+#define MDCR_EL2_HPMD (U(1) << 17)
+#define MDCR_EL2_TPMS (U(1) << 14)
+#define MDCR_EL2_E2PB(x) ((x) << 12)
+#define MDCR_EL2_E2PB_EL1 U(0x3)
+#define MDCR_EL2_TDRA_BIT (U(1) << 11)
+#define MDCR_EL2_TDOSA_BIT (U(1) << 10)
+#define MDCR_EL2_TDA_BIT (U(1) << 9)
+#define MDCR_EL2_TDE_BIT (U(1) << 8)
+#define MDCR_EL2_HPME_BIT (U(1) << 7)
+#define MDCR_EL2_TPM_BIT (U(1) << 6)
+#define MDCR_EL2_TPMCR_BIT (U(1) << 5)
+#define MDCR_EL2_RESET_VAL U(0x0)
+
+/* HSTR_EL2 definitions */
+#define HSTR_EL2_RESET_VAL U(0x0)
+#define HSTR_EL2_T_MASK U(0xff)
+
+/* CNTHP_CTL_EL2 definitions */
+#define CNTHP_CTL_ENABLE_BIT (U(1) << 0)
+#define CNTHP_CTL_RESET_VAL U(0x0)
+
+/* VTTBR_EL2 definitions */
+#define VTTBR_RESET_VAL ULL(0x0)
+#define VTTBR_VMID_MASK ULL(0xff)
+#define VTTBR_VMID_SHIFT U(48)
+#define VTTBR_BADDR_MASK ULL(0xffffffffffff)
+#define VTTBR_BADDR_SHIFT U(0)
+
+/* HCR definitions */
+#define HCR_RESET_VAL ULL(0x0)
+#define HCR_AMVOFFEN_SHIFT U(51)
+#define HCR_AMVOFFEN_BIT (ULL(1) << HCR_AMVOFFEN_SHIFT)
+#define HCR_TEA_BIT (ULL(1) << 47)
+#define HCR_API_BIT (ULL(1) << 41)
+#define HCR_APK_BIT (ULL(1) << 40)
+#define HCR_E2H_BIT (ULL(1) << 34)
+#define HCR_HCD_BIT (ULL(1) << 29)
+#define HCR_TGE_BIT (ULL(1) << 27)
+#define HCR_RW_SHIFT U(31)
+#define HCR_RW_BIT (ULL(1) << HCR_RW_SHIFT)
+#define HCR_TWE_BIT (ULL(1) << 14)
+#define HCR_TWI_BIT (ULL(1) << 13)
+#define HCR_AMO_BIT (ULL(1) << 5)
+#define HCR_IMO_BIT (ULL(1) << 4)
+#define HCR_FMO_BIT (ULL(1) << 3)
+
+/* ISR definitions */
+#define ISR_A_SHIFT U(8)
+#define ISR_I_SHIFT U(7)
+#define ISR_F_SHIFT U(6)
+
+/* CNTHCTL_EL2 definitions */
+#define CNTHCTL_RESET_VAL U(0x0)
+#define EVNTEN_BIT (U(1) << 2)
+#define EL1PCEN_BIT (U(1) << 1)
+#define EL1PCTEN_BIT (U(1) << 0)
+
+/* CNTKCTL_EL1 definitions */
+#define EL0PTEN_BIT (U(1) << 9)
+#define EL0VTEN_BIT (U(1) << 8)
+#define EL0PCTEN_BIT (U(1) << 0)
+#define EL0VCTEN_BIT (U(1) << 1)
+#define EVNTEN_BIT (U(1) << 2)
+#define EVNTDIR_BIT (U(1) << 3)
+#define EVNTI_SHIFT U(4)
+#define EVNTI_MASK U(0xf)
+
+/* CPTR_EL3 definitions */
+#define TCPAC_BIT (U(1) << 31)
+#define TAM_SHIFT U(30)
+#define TAM_BIT (U(1) << TAM_SHIFT)
+#define TTA_BIT (U(1) << 20)
+#define ESM_BIT (U(1) << 12)
+#define TFP_BIT (U(1) << 10)
+#define CPTR_EZ_BIT (U(1) << 8)
+#define CPTR_EL3_RESET_VAL ((TCPAC_BIT | TAM_BIT | TTA_BIT | TFP_BIT) & \
+ ~(CPTR_EZ_BIT | ESM_BIT))
+
+/* CPTR_EL2 definitions */
+#define CPTR_EL2_RES1 ((U(1) << 13) | (U(1) << 12) | (U(0x3ff)))
+#define CPTR_EL2_TCPAC_BIT (U(1) << 31)
+#define CPTR_EL2_TAM_SHIFT U(30)
+#define CPTR_EL2_TAM_BIT (U(1) << CPTR_EL2_TAM_SHIFT)
+#define CPTR_EL2_SMEN_MASK ULL(0x3)
+#define CPTR_EL2_SMEN_SHIFT U(24)
+#define CPTR_EL2_TTA_BIT (U(1) << 20)
+#define CPTR_EL2_TSM_BIT (U(1) << 12)
+#define CPTR_EL2_TFP_BIT (U(1) << 10)
+#define CPTR_EL2_TZ_BIT (U(1) << 8)
+#define CPTR_EL2_RESET_VAL CPTR_EL2_RES1
+
+/* VTCR_EL2 definitions */
+#define VTCR_RESET_VAL U(0x0)
+#define VTCR_EL2_MSA (U(1) << 31)
+
+/* CPSR/SPSR definitions */
+#define DAIF_FIQ_BIT (U(1) << 0)
+#define DAIF_IRQ_BIT (U(1) << 1)
+#define DAIF_ABT_BIT (U(1) << 2)
+#define DAIF_DBG_BIT (U(1) << 3)
+#define SPSR_DAIF_SHIFT U(6)
+#define SPSR_DAIF_MASK U(0xf)
+
+#define SPSR_AIF_SHIFT U(6)
+#define SPSR_AIF_MASK U(0x7)
+
+#define SPSR_E_SHIFT U(9)
+#define SPSR_E_MASK U(0x1)
+#define SPSR_E_LITTLE U(0x0)
+#define SPSR_E_BIG U(0x1)
+
+#define SPSR_T_SHIFT U(5)
+#define SPSR_T_MASK U(0x1)
+#define SPSR_T_ARM U(0x0)
+#define SPSR_T_THUMB U(0x1)
+
+#define SPSR_M_SHIFT U(4)
+#define SPSR_M_MASK U(0x1)
+#define SPSR_M_AARCH64 U(0x0)
+#define SPSR_M_AARCH32 U(0x1)
+#define SPSR_M_EL2H U(0x9)
+
+#define SPSR_EL_SHIFT U(2)
+#define SPSR_EL_WIDTH U(2)
+
+#define SPSR_SSBS_SHIFT_AARCH64 U(12)
+#define SPSR_SSBS_BIT_AARCH64 (ULL(1) << SPSR_SSBS_SHIFT_AARCH64)
+#define SPSR_SSBS_SHIFT_AARCH32 U(23)
+#define SPSR_SSBS_BIT_AARCH32 (ULL(1) << SPSR_SSBS_SHIFT_AARCH32)
+
+#define SPSR_PAN_BIT BIT_64(22)
+
+#define SPSR_DIT_BIT BIT(24)
+
+#define SPSR_TCO_BIT_AARCH64 BIT_64(25)
+
+#define DISABLE_ALL_EXCEPTIONS \
+ (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
+
+#define DISABLE_INTERRUPTS (DAIF_FIQ_BIT | DAIF_IRQ_BIT)
+
+/*
+ * RMR_EL3 definitions
+ */
+#define RMR_EL3_RR_BIT (U(1) << 1)
+#define RMR_EL3_AA64_BIT (U(1) << 0)
+
+/*
+ * HI-VECTOR address for AArch32 state
+ */
+#define HI_VECTOR_BASE U(0xFFFF0000)
+
+/*
+ * TCR defintions
+ */
+#define TCR_EL3_RES1 ((ULL(1) << 31) | (ULL(1) << 23))
+#define TCR_EL2_RES1 ((ULL(1) << 31) | (ULL(1) << 23))
+#define TCR_EL1_IPS_SHIFT U(32)
+#define TCR_EL2_PS_SHIFT U(16)
+#define TCR_EL3_PS_SHIFT U(16)
+
+#define TCR_TxSZ_MIN ULL(16)
+#define TCR_TxSZ_MAX ULL(39)
+#define TCR_TxSZ_MAX_TTST ULL(48)
+
+#define TCR_T0SZ_SHIFT U(0)
+#define TCR_T1SZ_SHIFT U(16)
+
+/* (internal) physical address size bits in EL3/EL1 */
+#define TCR_PS_BITS_4GB ULL(0x0)
+#define TCR_PS_BITS_64GB ULL(0x1)
+#define TCR_PS_BITS_1TB ULL(0x2)
+#define TCR_PS_BITS_4TB ULL(0x3)
+#define TCR_PS_BITS_16TB ULL(0x4)
+#define TCR_PS_BITS_256TB ULL(0x5)
+
+#define ADDR_MASK_48_TO_63 ULL(0xFFFF000000000000)
+#define ADDR_MASK_44_TO_47 ULL(0x0000F00000000000)
+#define ADDR_MASK_42_TO_43 ULL(0x00000C0000000000)
+#define ADDR_MASK_40_TO_41 ULL(0x0000030000000000)
+#define ADDR_MASK_36_TO_39 ULL(0x000000F000000000)
+#define ADDR_MASK_32_TO_35 ULL(0x0000000F00000000)
+
+#define TCR_RGN_INNER_NC (ULL(0x0) << 8)
+#define TCR_RGN_INNER_WBA (ULL(0x1) << 8)
+#define TCR_RGN_INNER_WT (ULL(0x2) << 8)
+#define TCR_RGN_INNER_WBNA (ULL(0x3) << 8)
+
+#define TCR_RGN_OUTER_NC (ULL(0x0) << 10)
+#define TCR_RGN_OUTER_WBA (ULL(0x1) << 10)
+#define TCR_RGN_OUTER_WT (ULL(0x2) << 10)
+#define TCR_RGN_OUTER_WBNA (ULL(0x3) << 10)
+
+#define TCR_SH_NON_SHAREABLE (ULL(0x0) << 12)
+#define TCR_SH_OUTER_SHAREABLE (ULL(0x2) << 12)
+#define TCR_SH_INNER_SHAREABLE (ULL(0x3) << 12)
+
+#define TCR_RGN1_INNER_NC (ULL(0x0) << 24)
+#define TCR_RGN1_INNER_WBA (ULL(0x1) << 24)
+#define TCR_RGN1_INNER_WT (ULL(0x2) << 24)
+#define TCR_RGN1_INNER_WBNA (ULL(0x3) << 24)
+
+#define TCR_RGN1_OUTER_NC (ULL(0x0) << 26)
+#define TCR_RGN1_OUTER_WBA (ULL(0x1) << 26)
+#define TCR_RGN1_OUTER_WT (ULL(0x2) << 26)
+#define TCR_RGN1_OUTER_WBNA (ULL(0x3) << 26)
+
+#define TCR_SH1_NON_SHAREABLE (ULL(0x0) << 28)
+#define TCR_SH1_OUTER_SHAREABLE (ULL(0x2) << 28)
+#define TCR_SH1_INNER_SHAREABLE (ULL(0x3) << 28)
+
+#define TCR_TG0_SHIFT U(14)
+#define TCR_TG0_MASK ULL(3)
+#define TCR_TG0_4K (ULL(0) << TCR_TG0_SHIFT)
+#define TCR_TG0_64K (ULL(1) << TCR_TG0_SHIFT)
+#define TCR_TG0_16K (ULL(2) << TCR_TG0_SHIFT)
+
+#define TCR_TG1_SHIFT U(30)
+#define TCR_TG1_MASK ULL(3)
+#define TCR_TG1_16K (ULL(1) << TCR_TG1_SHIFT)
+#define TCR_TG1_4K (ULL(2) << TCR_TG1_SHIFT)
+#define TCR_TG1_64K (ULL(3) << TCR_TG1_SHIFT)
+
+#define TCR_EPD0_BIT (ULL(1) << 7)
+#define TCR_EPD1_BIT (ULL(1) << 23)
+
+#define MODE_SP_SHIFT U(0x0)
+#define MODE_SP_MASK U(0x1)
+#define MODE_SP_EL0 U(0x0)
+#define MODE_SP_ELX U(0x1)
+
+#define MODE_RW_SHIFT U(0x4)
+#define MODE_RW_MASK U(0x1)
+#define MODE_RW_64 U(0x0)
+#define MODE_RW_32 U(0x1)
+
+#define MODE_EL_SHIFT U(0x2)
+#define MODE_EL_MASK U(0x3)
+#define MODE_EL_WIDTH U(0x2)
+#define MODE_EL3 U(0x3)
+#define MODE_EL2 U(0x2)
+#define MODE_EL1 U(0x1)
+#define MODE_EL0 U(0x0)
+
+#define MODE32_SHIFT U(0)
+#define MODE32_MASK U(0xf)
+#define MODE32_usr U(0x0)
+#define MODE32_fiq U(0x1)
+#define MODE32_irq U(0x2)
+#define MODE32_svc U(0x3)
+#define MODE32_mon U(0x6)
+#define MODE32_abt U(0x7)
+#define MODE32_hyp U(0xa)
+#define MODE32_und U(0xb)
+#define MODE32_sys U(0xf)
+
+#define GET_RW(mode) (((mode) >> MODE_RW_SHIFT) & MODE_RW_MASK)
+#define GET_EL(mode) (((mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
+#define GET_SP(mode) (((mode) >> MODE_SP_SHIFT) & MODE_SP_MASK)
+#define GET_M32(mode) (((mode) >> MODE32_SHIFT) & MODE32_MASK)
+
+#define SPSR_64(el, sp, daif) \
+ (((MODE_RW_64 << MODE_RW_SHIFT) | \
+ (((el) & MODE_EL_MASK) << MODE_EL_SHIFT) | \
+ (((sp) & MODE_SP_MASK) << MODE_SP_SHIFT) | \
+ (((daif) & SPSR_DAIF_MASK) << SPSR_DAIF_SHIFT)) & \
+ (~(SPSR_SSBS_BIT_AARCH64)))
+
+#define SPSR_MODE32(mode, isa, endian, aif) \
+ (((MODE_RW_32 << MODE_RW_SHIFT) | \
+ (((mode) & MODE32_MASK) << MODE32_SHIFT) | \
+ (((isa) & SPSR_T_MASK) << SPSR_T_SHIFT) | \
+ (((endian) & SPSR_E_MASK) << SPSR_E_SHIFT) | \
+ (((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT)) & \
+ (~(SPSR_SSBS_BIT_AARCH32)))
+
+/*
+ * TTBR Definitions
+ */
+#define TTBR_CNP_BIT ULL(0x1)
+
+/*
+ * CTR_EL0 definitions
+ */
+#define CTR_CWG_SHIFT U(24)
+#define CTR_CWG_MASK U(0xf)
+#define CTR_ERG_SHIFT U(20)
+#define CTR_ERG_MASK U(0xf)
+#define CTR_DMINLINE_SHIFT U(16)
+#define CTR_DMINLINE_MASK U(0xf)
+#define CTR_L1IP_SHIFT U(14)
+#define CTR_L1IP_MASK U(0x3)
+#define CTR_IMINLINE_SHIFT U(0)
+#define CTR_IMINLINE_MASK U(0xf)
+
+#define MAX_CACHE_LINE_SIZE U(0x800) /* 2KB */
+
+/* Physical timer control register bit fields shifts and masks */
+#define CNTP_CTL_ENABLE_SHIFT U(0)
+#define CNTP_CTL_IMASK_SHIFT U(1)
+#define CNTP_CTL_ISTATUS_SHIFT U(2)
+
+#define CNTP_CTL_ENABLE_MASK U(1)
+#define CNTP_CTL_IMASK_MASK U(1)
+#define CNTP_CTL_ISTATUS_MASK U(1)
+
+/* Physical timer control macros */
+#define CNTP_CTL_ENABLE_BIT (U(1) << CNTP_CTL_ENABLE_SHIFT)
+#define CNTP_CTL_IMASK_BIT (U(1) << CNTP_CTL_IMASK_SHIFT)
+
+/* Exception Syndrome register bits and bobs */
+#define ESR_EC_SHIFT U(26)
+#define ESR_EC_MASK U(0x3f)
+#define ESR_EC_LENGTH U(6)
+#define ESR_ISS_SHIFT U(0)
+#define ESR_ISS_LENGTH U(25)
+#define EC_UNKNOWN U(0x0)
+#define EC_WFE_WFI U(0x1)
+#define EC_AARCH32_CP15_MRC_MCR U(0x3)
+#define EC_AARCH32_CP15_MRRC_MCRR U(0x4)
+#define EC_AARCH32_CP14_MRC_MCR U(0x5)
+#define EC_AARCH32_CP14_LDC_STC U(0x6)
+#define EC_FP_SIMD U(0x7)
+#define EC_AARCH32_CP10_MRC U(0x8)
+#define EC_AARCH32_CP14_MRRC_MCRR U(0xc)
+#define EC_ILLEGAL U(0xe)
+#define EC_AARCH32_SVC U(0x11)
+#define EC_AARCH32_HVC U(0x12)
+#define EC_AARCH32_SMC U(0x13)
+#define EC_AARCH64_SVC U(0x15)
+#define EC_AARCH64_HVC U(0x16)
+#define EC_AARCH64_SMC U(0x17)
+#define EC_AARCH64_SYS U(0x18)
+#define EC_IABORT_LOWER_EL U(0x20)
+#define EC_IABORT_CUR_EL U(0x21)
+#define EC_PC_ALIGN U(0x22)
+#define EC_DABORT_LOWER_EL U(0x24)
+#define EC_DABORT_CUR_EL U(0x25)
+#define EC_SP_ALIGN U(0x26)
+#define EC_AARCH32_FP U(0x28)
+#define EC_AARCH64_FP U(0x2c)
+#define EC_SERROR U(0x2f)
+#define EC_BRK U(0x3c)
+
+/*
+ * External Abort bit in Instruction and Data Aborts synchronous exception
+ * syndromes.
+ */
+#define ESR_ISS_EABORT_EA_BIT U(9)
+
+#define EC_BITS(x) (((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
+
+/* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
+#define RMR_RESET_REQUEST_SHIFT U(0x1)
+#define RMR_WARM_RESET_CPU (U(1) << RMR_RESET_REQUEST_SHIFT)
+
+/*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT U(12)
+#define TLBI_ADDR_MASK ULL(0x00000FFFFFFFFFFF)
+#define TLBI_ADDR(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTCTLBase Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+#define CNTCTLBASE_CNTFRQ U(0x0)
+#define CNTNSAR U(0x4)
+#define CNTNSAR_NS_SHIFT(x) (x)
+
+#define CNTACR_BASE(x) (U(0x40) + ((x) << 2))
+#define CNTACR_RPCT_SHIFT U(0x0)
+#define CNTACR_RVCT_SHIFT U(0x1)
+#define CNTACR_RFRQ_SHIFT U(0x2)
+#define CNTACR_RVOFF_SHIFT U(0x3)
+#define CNTACR_RWVT_SHIFT U(0x4)
+#define CNTACR_RWPT_SHIFT U(0x5)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTBaseN Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+/* Physical Count register. */
+#define CNTPCT_LO U(0x0)
+/* Counter Frequency register. */
+#define CNTBASEN_CNTFRQ U(0x10)
+/* Physical Timer CompareValue register. */
+#define CNTP_CVAL_LO U(0x20)
+/* Physical Timer Control register. */
+#define CNTP_CTL U(0x2c)
+
+/* PMCR_EL0 definitions */
+#define PMCR_EL0_RESET_VAL U(0x0)
+#define PMCR_EL0_N_SHIFT U(11)
+#define PMCR_EL0_N_MASK U(0x1f)
+#define PMCR_EL0_N_BITS (PMCR_EL0_N_MASK << PMCR_EL0_N_SHIFT)
+#define PMCR_EL0_LP_BIT (U(1) << 7)
+#define PMCR_EL0_LC_BIT (U(1) << 6)
+#define PMCR_EL0_DP_BIT (U(1) << 5)
+#define PMCR_EL0_X_BIT (U(1) << 4)
+#define PMCR_EL0_D_BIT (U(1) << 3)
+#define PMCR_EL0_C_BIT (U(1) << 2)
+#define PMCR_EL0_P_BIT (U(1) << 1)
+#define PMCR_EL0_E_BIT (U(1) << 0)
+
+/*******************************************************************************
+ * Definitions for system register interface to SVE
+ ******************************************************************************/
+#define ZCR_EL3 S3_6_C1_C2_0
+#define ZCR_EL2 S3_4_C1_C2_0
+
+/* ZCR_EL3 definitions */
+#define ZCR_EL3_LEN_MASK U(0xf)
+
+/* ZCR_EL2 definitions */
+#define ZCR_EL2_LEN_MASK U(0xf)
+
+/*******************************************************************************
+ * Definitions for system register interface to SME as needed in EL3
+ ******************************************************************************/
+#define ID_AA64SMFR0_EL1 S3_0_C0_C4_5
+#define SMCR_EL3 S3_6_C1_C2_6
+
+/* ID_AA64SMFR0_EL1 definitions */
+#define ID_AA64SMFR0_EL1_FA64_BIT (UL(1) << 63)
+
+/* SMCR_ELx definitions */
+#define SMCR_ELX_LEN_SHIFT U(0)
+#define SMCR_ELX_LEN_MASK U(0x1ff)
+#define SMCR_ELX_FA64_BIT (U(1) << 31)
+
+/*******************************************************************************
+ * Definitions of MAIR encodings for device and normal memory
+ ******************************************************************************/
+/*
+ * MAIR encodings for device memory attributes.
+ */
+#define MAIR_DEV_nGnRnE ULL(0x0)
+#define MAIR_DEV_nGnRE ULL(0x4)
+#define MAIR_DEV_nGRE ULL(0x8)
+#define MAIR_DEV_GRE ULL(0xc)
+
+/*
+ * MAIR encodings for normal memory attributes.
+ *
+ * Cache Policy
+ * WT: Write Through
+ * WB: Write Back
+ * NC: Non-Cacheable
+ *
+ * Transient Hint
+ * NTR: Non-Transient
+ * TR: Transient
+ *
+ * Allocation Policy
+ * RA: Read Allocate
+ * WA: Write Allocate
+ * RWA: Read and Write Allocate
+ * NA: No Allocation
+ */
+#define MAIR_NORM_WT_TR_WA ULL(0x1)
+#define MAIR_NORM_WT_TR_RA ULL(0x2)
+#define MAIR_NORM_WT_TR_RWA ULL(0x3)
+#define MAIR_NORM_NC ULL(0x4)
+#define MAIR_NORM_WB_TR_WA ULL(0x5)
+#define MAIR_NORM_WB_TR_RA ULL(0x6)
+#define MAIR_NORM_WB_TR_RWA ULL(0x7)
+#define MAIR_NORM_WT_NTR_NA ULL(0x8)
+#define MAIR_NORM_WT_NTR_WA ULL(0x9)
+#define MAIR_NORM_WT_NTR_RA ULL(0xa)
+#define MAIR_NORM_WT_NTR_RWA ULL(0xb)
+#define MAIR_NORM_WB_NTR_NA ULL(0xc)
+#define MAIR_NORM_WB_NTR_WA ULL(0xd)
+#define MAIR_NORM_WB_NTR_RA ULL(0xe)
+#define MAIR_NORM_WB_NTR_RWA ULL(0xf)
+
+#define MAIR_NORM_OUTER_SHIFT U(4)
+
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer) \
+ ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
+
+/* PAR_EL1 fields */
+#define PAR_F_SHIFT U(0)
+#define PAR_F_MASK ULL(0x1)
+#define PAR_ADDR_SHIFT U(12)
+#define PAR_ADDR_MASK (BIT(40) - ULL(1)) /* 40-bits-wide page address */
+
+/*******************************************************************************
+ * Definitions for system register interface to SPE
+ ******************************************************************************/
+#define PMBLIMITR_EL1 S3_0_C9_C10_0
+
+/*******************************************************************************
+ * Definitions for system register interface to MPAM
+ ******************************************************************************/
+#define MPAMIDR_EL1 S3_0_C10_C4_4
+#define MPAM2_EL2 S3_4_C10_C5_0
+#define MPAMHCR_EL2 S3_4_C10_C4_0
+#define MPAM3_EL3 S3_6_C10_C5_0
+
+/*******************************************************************************
+ * Definitions for system register interface to AMU for FEAT_AMUv1
+ ******************************************************************************/
+#define AMCR_EL0 S3_3_C13_C2_0
+#define AMCFGR_EL0 S3_3_C13_C2_1
+#define AMCGCR_EL0 S3_3_C13_C2_2
+#define AMUSERENR_EL0 S3_3_C13_C2_3
+#define AMCNTENCLR0_EL0 S3_3_C13_C2_4
+#define AMCNTENSET0_EL0 S3_3_C13_C2_5
+#define AMCNTENCLR1_EL0 S3_3_C13_C3_0
+#define AMCNTENSET1_EL0 S3_3_C13_C3_1
+
+/* Activity Monitor Group 0 Event Counter Registers */
+#define AMEVCNTR00_EL0 S3_3_C13_C4_0
+#define AMEVCNTR01_EL0 S3_3_C13_C4_1
+#define AMEVCNTR02_EL0 S3_3_C13_C4_2
+#define AMEVCNTR03_EL0 S3_3_C13_C4_3
+
+/* Activity Monitor Group 0 Event Type Registers */
+#define AMEVTYPER00_EL0 S3_3_C13_C6_0
+#define AMEVTYPER01_EL0 S3_3_C13_C6_1
+#define AMEVTYPER02_EL0 S3_3_C13_C6_2
+#define AMEVTYPER03_EL0 S3_3_C13_C6_3
+
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10_EL0 S3_3_C13_C12_0
+#define AMEVCNTR11_EL0 S3_3_C13_C12_1
+#define AMEVCNTR12_EL0 S3_3_C13_C12_2
+#define AMEVCNTR13_EL0 S3_3_C13_C12_3
+#define AMEVCNTR14_EL0 S3_3_C13_C12_4
+#define AMEVCNTR15_EL0 S3_3_C13_C12_5
+#define AMEVCNTR16_EL0 S3_3_C13_C12_6
+#define AMEVCNTR17_EL0 S3_3_C13_C12_7
+#define AMEVCNTR18_EL0 S3_3_C13_C13_0
+#define AMEVCNTR19_EL0 S3_3_C13_C13_1
+#define AMEVCNTR1A_EL0 S3_3_C13_C13_2
+#define AMEVCNTR1B_EL0 S3_3_C13_C13_3
+#define AMEVCNTR1C_EL0 S3_3_C13_C13_4
+#define AMEVCNTR1D_EL0 S3_3_C13_C13_5
+#define AMEVCNTR1E_EL0 S3_3_C13_C13_6
+#define AMEVCNTR1F_EL0 S3_3_C13_C13_7
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10_EL0 S3_3_C13_C14_0
+#define AMEVTYPER11_EL0 S3_3_C13_C14_1
+#define AMEVTYPER12_EL0 S3_3_C13_C14_2
+#define AMEVTYPER13_EL0 S3_3_C13_C14_3
+#define AMEVTYPER14_EL0 S3_3_C13_C14_4
+#define AMEVTYPER15_EL0 S3_3_C13_C14_5
+#define AMEVTYPER16_EL0 S3_3_C13_C14_6
+#define AMEVTYPER17_EL0 S3_3_C13_C14_7
+#define AMEVTYPER18_EL0 S3_3_C13_C15_0
+#define AMEVTYPER19_EL0 S3_3_C13_C15_1
+#define AMEVTYPER1A_EL0 S3_3_C13_C15_2
+#define AMEVTYPER1B_EL0 S3_3_C13_C15_3
+#define AMEVTYPER1C_EL0 S3_3_C13_C15_4
+#define AMEVTYPER1D_EL0 S3_3_C13_C15_5
+#define AMEVTYPER1E_EL0 S3_3_C13_C15_6
+#define AMEVTYPER1F_EL0 S3_3_C13_C15_7
+
+/* AMCNTENSET0_EL0 definitions */
+#define AMCNTENSET0_EL0_Pn_SHIFT U(0)
+#define AMCNTENSET0_EL0_Pn_MASK ULL(0xffff)
+
+/* AMCNTENSET1_EL0 definitions */
+#define AMCNTENSET1_EL0_Pn_SHIFT U(0)
+#define AMCNTENSET1_EL0_Pn_MASK ULL(0xffff)
+
+/* AMCNTENCLR0_EL0 definitions */
+#define AMCNTENCLR0_EL0_Pn_SHIFT U(0)
+#define AMCNTENCLR0_EL0_Pn_MASK ULL(0xffff)
+
+/* AMCNTENCLR1_EL0 definitions */
+#define AMCNTENCLR1_EL0_Pn_SHIFT U(0)
+#define AMCNTENCLR1_EL0_Pn_MASK ULL(0xffff)
+
+/* AMCFGR_EL0 definitions */
+#define AMCFGR_EL0_NCG_SHIFT U(28)
+#define AMCFGR_EL0_NCG_MASK U(0xf)
+#define AMCFGR_EL0_N_SHIFT U(0)
+#define AMCFGR_EL0_N_MASK U(0xff)
+
+/* AMCGCR_EL0 definitions */
+#define AMCGCR_EL0_CG0NC_SHIFT U(0)
+#define AMCGCR_EL0_CG0NC_MASK U(0xff)
+#define AMCGCR_EL0_CG1NC_SHIFT U(8)
+#define AMCGCR_EL0_CG1NC_MASK U(0xff)
+
+/* MPAM register definitions */
+#define MPAM3_EL3_MPAMEN_BIT (ULL(1) << 63)
+#define MPAMHCR_EL2_TRAP_MPAMIDR_EL1 (ULL(1) << 31)
+
+#define MPAM2_EL2_TRAPMPAM0EL1 (ULL(1) << 49)
+#define MPAM2_EL2_TRAPMPAM1EL1 (ULL(1) << 48)
+
+#define MPAMIDR_HAS_HCR_BIT (ULL(1) << 17)
+
+/*******************************************************************************
+ * Definitions for system register interface to AMU for FEAT_AMUv1p1
+ ******************************************************************************/
+
+/* Definition for register defining which virtual offsets are implemented. */
+#define AMCG1IDR_EL0 S3_3_C13_C2_6
+#define AMCG1IDR_CTR_MASK ULL(0xffff)
+#define AMCG1IDR_CTR_SHIFT U(0)
+#define AMCG1IDR_VOFF_MASK ULL(0xffff)
+#define AMCG1IDR_VOFF_SHIFT U(16)
+
+/* New bit added to AMCR_EL0 */
+#define AMCR_CG1RZ_SHIFT U(17)
+#define AMCR_CG1RZ_BIT (ULL(0x1) << AMCR_CG1RZ_SHIFT)
+
+/*
+ * Definitions for virtual offset registers for architected activity monitor
+ * event counters.
+ * AMEVCNTVOFF01_EL2 intentionally left undefined, as it does not exist.
+ */
+#define AMEVCNTVOFF00_EL2 S3_4_C13_C8_0
+#define AMEVCNTVOFF02_EL2 S3_4_C13_C8_2
+#define AMEVCNTVOFF03_EL2 S3_4_C13_C8_3
+
+/*
+ * Definitions for virtual offset registers for auxiliary activity monitor event
+ * counters.
+ */
+#define AMEVCNTVOFF10_EL2 S3_4_C13_C10_0
+#define AMEVCNTVOFF11_EL2 S3_4_C13_C10_1
+#define AMEVCNTVOFF12_EL2 S3_4_C13_C10_2
+#define AMEVCNTVOFF13_EL2 S3_4_C13_C10_3
+#define AMEVCNTVOFF14_EL2 S3_4_C13_C10_4
+#define AMEVCNTVOFF15_EL2 S3_4_C13_C10_5
+#define AMEVCNTVOFF16_EL2 S3_4_C13_C10_6
+#define AMEVCNTVOFF17_EL2 S3_4_C13_C10_7
+#define AMEVCNTVOFF18_EL2 S3_4_C13_C11_0
+#define AMEVCNTVOFF19_EL2 S3_4_C13_C11_1
+#define AMEVCNTVOFF1A_EL2 S3_4_C13_C11_2
+#define AMEVCNTVOFF1B_EL2 S3_4_C13_C11_3
+#define AMEVCNTVOFF1C_EL2 S3_4_C13_C11_4
+#define AMEVCNTVOFF1D_EL2 S3_4_C13_C11_5
+#define AMEVCNTVOFF1E_EL2 S3_4_C13_C11_6
+#define AMEVCNTVOFF1F_EL2 S3_4_C13_C11_7
+
+/*******************************************************************************
+ * Realm management extension register definitions
+ ******************************************************************************/
+#define GPCCR_EL3 S3_6_C2_C1_6
+#define GPTBR_EL3 S3_6_C2_C1_4
+
+/*******************************************************************************
+ * RAS system registers
+ ******************************************************************************/
+#define DISR_EL1 S3_0_C12_C1_1
+#define DISR_A_BIT U(31)
+
+#define ERRIDR_EL1 S3_0_C5_C3_0
+#define ERRIDR_MASK U(0xffff)
+
+#define ERRSELR_EL1 S3_0_C5_C3_1
+
+/* System register access to Standard Error Record registers */
+#define ERXFR_EL1 S3_0_C5_C4_0
+#define ERXCTLR_EL1 S3_0_C5_C4_1
+#define ERXSTATUS_EL1 S3_0_C5_C4_2
+#define ERXADDR_EL1 S3_0_C5_C4_3
+#define ERXPFGF_EL1 S3_0_C5_C4_4
+#define ERXPFGCTL_EL1 S3_0_C5_C4_5
+#define ERXPFGCDN_EL1 S3_0_C5_C4_6
+#define ERXMISC0_EL1 S3_0_C5_C5_0
+#define ERXMISC1_EL1 S3_0_C5_C5_1
+
+#define ERXCTLR_ED_SHIFT U(0)
+#define ERXCTLR_ED_BIT (U(1) << ERXCTLR_ED_SHIFT)
+#define ERXCTLR_UE_BIT (U(1) << 4)
+
+#define ERXPFGCTL_UC_BIT (U(1) << 1)
+#define ERXPFGCTL_UEU_BIT (U(1) << 2)
+#define ERXPFGCTL_CDEN_BIT (U(1) << 31)
+
+/*******************************************************************************
+ * Armv8.3 Pointer Authentication Registers
+ ******************************************************************************/
+#define APIAKeyLo_EL1 S3_0_C2_C1_0
+#define APIAKeyHi_EL1 S3_0_C2_C1_1
+#define APIBKeyLo_EL1 S3_0_C2_C1_2
+#define APIBKeyHi_EL1 S3_0_C2_C1_3
+#define APDAKeyLo_EL1 S3_0_C2_C2_0
+#define APDAKeyHi_EL1 S3_0_C2_C2_1
+#define APDBKeyLo_EL1 S3_0_C2_C2_2
+#define APDBKeyHi_EL1 S3_0_C2_C2_3
+#define APGAKeyLo_EL1 S3_0_C2_C3_0
+#define APGAKeyHi_EL1 S3_0_C2_C3_1
+
+/*******************************************************************************
+ * Armv8.4 Data Independent Timing Registers
+ ******************************************************************************/
+#define DIT S3_3_C4_C2_5
+#define DIT_BIT BIT(24)
+
+/*******************************************************************************
+ * Armv8.5 - new MSR encoding to directly access PSTATE.SSBS field
+ ******************************************************************************/
+#define SSBS S3_3_C4_C2_6
+
+/*******************************************************************************
+ * Armv8.5 - Memory Tagging Extension Registers
+ ******************************************************************************/
+#define TFSRE0_EL1 S3_0_C5_C6_1
+#define TFSR_EL1 S3_0_C5_C6_0
+#define RGSR_EL1 S3_0_C1_C0_5
+#define GCR_EL1 S3_0_C1_C0_6
+
+/*******************************************************************************
+ * FEAT_HCX - Extended Hypervisor Configuration Register
+ ******************************************************************************/
+#define HCRX_EL2 S3_4_C1_C2_2
+#define HCRX_EL2_FGTnXS_BIT (UL(1) << 4)
+#define HCRX_EL2_FnXS_BIT (UL(1) << 3)
+#define HCRX_EL2_EnASR_BIT (UL(1) << 2)
+#define HCRX_EL2_EnALS_BIT (UL(1) << 1)
+#define HCRX_EL2_EnAS0_BIT (UL(1) << 0)
+
+/*******************************************************************************
+ * Definitions for DynamicIQ Shared Unit registers
+ ******************************************************************************/
+#define CLUSTERPWRDN_EL1 S3_0_c15_c3_6
+
+/* CLUSTERPWRDN_EL1 register definitions */
+#define DSU_CLUSTER_PWR_OFF 0
+#define DSU_CLUSTER_PWR_ON 1
+#define DSU_CLUSTER_PWR_MASK U(1)
+
+/*******************************************************************************
+ * Definitions for CPU Power/Performance Management registers
+ ******************************************************************************/
+
+#define CPUPPMCR_EL3 S3_6_C15_C2_0
+#define CPUPPMCR_EL3_MPMMPINCTL_SHIFT UINT64_C(0)
+#define CPUPPMCR_EL3_MPMMPINCTL_MASK UINT64_C(0x1)
+
+#define CPUMPMMCR_EL3 S3_6_C15_C2_1
+#define CPUMPMMCR_EL3_MPMM_EN_SHIFT UINT64_C(0)
+#define CPUMPMMCR_EL3_MPMM_EN_MASK UINT64_C(0x1)
+
+#endif /* ARCH_H */
diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h
new file mode 100644
index 0000000..932e885
--- /dev/null
+++ b/include/arch/aarch64/arch_features.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_FEATURES_H
+#define ARCH_FEATURES_H
+
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+
+static inline bool is_armv7_gentimer_present(void)
+{
+ /* The Generic Timer is always present in an ARMv8-A implementation */
+ return true;
+}
+
+static inline bool is_armv8_1_pan_present(void)
+{
+ return ((read_id_aa64mmfr1_el1() >> ID_AA64MMFR1_EL1_PAN_SHIFT) &
+ ID_AA64MMFR1_EL1_PAN_MASK) != 0U;
+}
+
+static inline bool is_armv8_1_vhe_present(void)
+{
+ return ((read_id_aa64mmfr1_el1() >> ID_AA64MMFR1_EL1_VHE_SHIFT) &
+ ID_AA64MMFR1_EL1_VHE_MASK) != 0U;
+}
+
+static inline bool is_armv8_2_ttcnp_present(void)
+{
+ return ((read_id_aa64mmfr2_el1() >> ID_AA64MMFR2_EL1_CNP_SHIFT) &
+ ID_AA64MMFR2_EL1_CNP_MASK) != 0U;
+}
+
+static inline bool is_feat_pacqarma3_present(void)
+{
+ uint64_t mask_id_aa64isar2 =
+ (ID_AA64ISAR2_GPA3_MASK << ID_AA64ISAR2_GPA3_SHIFT) |
+ (ID_AA64ISAR2_APA3_MASK << ID_AA64ISAR2_APA3_SHIFT);
+
+ /* If any of the fields is not zero, QARMA3 algorithm is present */
+ return (read_id_aa64isar2_el1() & mask_id_aa64isar2) != 0U;
+}
+
+static inline bool is_armv8_3_pauth_present(void)
+{
+ uint64_t mask_id_aa64isar1 =
+ (ID_AA64ISAR1_GPI_MASK << ID_AA64ISAR1_GPI_SHIFT) |
+ (ID_AA64ISAR1_GPA_MASK << ID_AA64ISAR1_GPA_SHIFT) |
+ (ID_AA64ISAR1_API_MASK << ID_AA64ISAR1_API_SHIFT) |
+ (ID_AA64ISAR1_APA_MASK << ID_AA64ISAR1_APA_SHIFT);
+
+ /*
+ * If any of the fields is not zero or QARMA3 is present,
+ * PAuth is present
+ */
+ return ((read_id_aa64isar1_el1() & mask_id_aa64isar1) != 0U ||
+ is_feat_pacqarma3_present());
+}
+
+static inline bool is_armv8_4_dit_present(void)
+{
+ return ((read_id_aa64pfr0_el1() >> ID_AA64PFR0_DIT_SHIFT) &
+ ID_AA64PFR0_DIT_MASK) == 1U;
+}
+
+static inline bool is_armv8_4_ttst_present(void)
+{
+ return ((read_id_aa64mmfr2_el1() >> ID_AA64MMFR2_EL1_ST_SHIFT) &
+ ID_AA64MMFR2_EL1_ST_MASK) == 1U;
+}
+
+static inline bool is_armv8_5_bti_present(void)
+{
+ return ((read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_BT_SHIFT) &
+ ID_AA64PFR1_EL1_BT_MASK) == BTI_IMPLEMENTED;
+}
+
+static inline unsigned int get_armv8_5_mte_support(void)
+{
+ return ((read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_MTE_SHIFT) &
+ ID_AA64PFR1_EL1_MTE_MASK);
+}
+
+static inline bool is_armv8_4_sel2_present(void)
+{
+ return ((read_id_aa64pfr0_el1() >> ID_AA64PFR0_SEL2_SHIFT) &
+ ID_AA64PFR0_SEL2_MASK) == 1ULL;
+}
+
+static inline bool is_armv8_6_twed_present(void)
+{
+ return (((read_id_aa64mmfr1_el1() >> ID_AA64MMFR1_EL1_TWED_SHIFT) &
+ ID_AA64MMFR1_EL1_TWED_MASK) == ID_AA64MMFR1_EL1_TWED_SUPPORTED);
+}
+
+static inline bool is_armv8_6_fgt_present(void)
+{
+ return ((read_id_aa64mmfr0_el1() >> ID_AA64MMFR0_EL1_FGT_SHIFT) &
+ ID_AA64MMFR0_EL1_FGT_MASK) != 0U;
+}
+
+static inline unsigned long int get_armv8_6_ecv_support(void)
+{
+ return ((read_id_aa64mmfr0_el1() >> ID_AA64MMFR0_EL1_ECV_SHIFT) &
+ ID_AA64MMFR0_EL1_ECV_MASK);
+}
+
+static inline bool is_armv8_5_rng_present(void)
+{
+ return ((read_id_aa64isar0_el1() >> ID_AA64ISAR0_RNDR_SHIFT) &
+ ID_AA64ISAR0_RNDR_MASK);
+}
+
+static inline bool is_armv8_6_feat_amuv1p1_present(void)
+{
+ return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
+ ID_AA64PFR0_AMU_MASK) >= ID_AA64PFR0_AMU_V1P1);
+}
+
+/*
+ * Return MPAM version:
+ *
+ * 0x00: None Armv8.0 or later
+ * 0x01: v0.1 Armv8.4 or later
+ * 0x10: v1.0 Armv8.2 or later
+ * 0x11: v1.1 Armv8.4 or later
+ *
+ */
+static inline unsigned int get_mpam_version(void)
+{
+ return (unsigned int)((((read_id_aa64pfr0_el1() >>
+ ID_AA64PFR0_MPAM_SHIFT) & ID_AA64PFR0_MPAM_MASK) << 4) |
+ ((read_id_aa64pfr1_el1() >>
+ ID_AA64PFR1_MPAM_FRAC_SHIFT) & ID_AA64PFR1_MPAM_FRAC_MASK));
+}
+
+static inline bool is_feat_hcx_present(void)
+{
+ return (((read_id_aa64mmfr1_el1() >> ID_AA64MMFR1_EL1_HCX_SHIFT) &
+ ID_AA64MMFR1_EL1_HCX_MASK) == ID_AA64MMFR1_EL1_HCX_SUPPORTED);
+}
+
+static inline bool is_feat_rng_trap_present(void)
+{
+ return (((read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_RNDR_TRAP_SHIFT) &
+ ID_AA64PFR1_EL1_RNDR_TRAP_MASK)
+ == ID_AA64PFR1_EL1_RNG_TRAP_SUPPORTED);
+}
+
+static inline unsigned int get_armv9_2_feat_rme_support(void)
+{
+ /*
+ * Return the RME version, zero if not supported. This function can be
+ * used as both an integer value for the RME version or compared to zero
+ * to detect RME presence.
+ */
+ return (unsigned int)(read_id_aa64pfr0_el1() >>
+ ID_AA64PFR0_FEAT_RME_SHIFT) & ID_AA64PFR0_FEAT_RME_MASK;
+}
+
+/*********************************************************************************
+ * Function to identify the presence of FEAT_SB (Speculation Barrier Instruction)
+ ********************************************************************************/
+static inline bool is_armv8_0_feat_sb_present(void)
+{
+ return (((read_id_aa64isar1_el1() >> ID_AA64ISAR1_SB_SHIFT) &
+ ID_AA64ISAR1_SB_MASK) == ID_AA64ISAR1_SB_SUPPORTED);
+}
+
+/*********************************************************************************
+ * Function to identify the presence of FEAT_CSV2_2 (Cache Speculation Variant 2)
+ ********************************************************************************/
+static inline bool is_armv8_0_feat_csv2_2_present(void)
+{
+ return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_CSV2_SHIFT) &
+ ID_AA64PFR0_CSV2_MASK) == ID_AA64PFR0_CSV2_2_SUPPORTED);
+}
+
+/**********************************************************************************
+ * Function to identify the presence of FEAT_SPE (Statistical Profiling Extension)
+ *********************************************************************************/
+static inline bool is_armv8_2_feat_spe_present(void)
+{
+ return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT) &
+ ID_AA64DFR0_PMS_MASK) != ID_AA64DFR0_SPE_NOT_SUPPORTED);
+}
+
+/*******************************************************************************
+ * Function to identify the presence of FEAT_SVE (Scalable Vector Extension)
+ ******************************************************************************/
+static inline bool is_armv8_2_feat_sve_present(void)
+{
+ return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT) &
+ ID_AA64PFR0_SVE_MASK) == ID_AA64PFR0_SVE_SUPPORTED);
+}
+
+/*******************************************************************************
+ * Function to identify the presence of FEAT_RAS (Reliability,Availability,
+ * and Serviceability Extension)
+ ******************************************************************************/
+static inline bool is_armv8_2_feat_ras_present(void)
+{
+ return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_RAS_SHIFT) &
+ ID_AA64PFR0_RAS_MASK) != ID_AA64PFR0_RAS_NOT_SUPPORTED);
+}
+
+/**************************************************************************
+ * Function to identify the presence of FEAT_DIT (Data Independent Timing)
+ *************************************************************************/
+static inline bool is_armv8_4_feat_dit_present(void)
+{
+ return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_DIT_SHIFT) &
+ ID_AA64PFR0_DIT_MASK) == ID_AA64PFR0_DIT_SUPPORTED);
+}
+
+/*************************************************************************
+ * Function to identify the presence of FEAT_TRF (TraceLift)
+ ************************************************************************/
+static inline bool is_arm8_4_feat_trf_present(void)
+{
+ return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_TRACEFILT_SHIFT) &
+ ID_AA64DFR0_TRACEFILT_MASK) == ID_AA64DFR0_TRACEFILT_SUPPORTED);
+}
+
+/*******************************************************************************
+ * Function to identify the presence of FEAT_AMUv1 (Activity Monitors-
+ * Extension v1)
+ ******************************************************************************/
+static inline bool is_armv8_4_feat_amuv1_present(void)
+{
+ return (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
+ ID_AA64PFR0_AMU_MASK) >= ID_AA64PFR0_AMU_V1);
+}
+
+/********************************************************************************
+ * Function to identify the presence of FEAT_NV2 (Enhanced Nested Virtualization
+ * Support)
+ *******************************************************************************/
+static inline unsigned int get_armv8_4_feat_nv_support(void)
+{
+ return (((read_id_aa64mmfr2_el1() >> ID_AA64MMFR2_EL1_NV_SHIFT) &
+ ID_AA64MMFR2_EL1_NV_MASK));
+}
+
+/*******************************************************************************
+ * Function to identify the presence of FEAT_BRBE (Branch Record Buffer
+ * Extension)
+ ******************************************************************************/
+static inline bool is_feat_brbe_present(void)
+{
+ return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_BRBE_SHIFT) &
+ ID_AA64DFR0_BRBE_MASK) == ID_AA64DFR0_BRBE_SUPPORTED);
+}
+
+/*******************************************************************************
+ * Function to identify the presence of FEAT_TRBE (Trace Buffer Extension)
+ ******************************************************************************/
+static inline bool is_feat_trbe_present(void)
+{
+ return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_TRACEBUFFER_SHIFT) &
+ ID_AA64DFR0_TRACEBUFFER_MASK) == ID_AA64DFR0_TRACEBUFFER_SUPPORTED);
+}
+
+#endif /* ARCH_FEATURES_H */
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
new file mode 100644
index 0000000..50a5ad4
--- /dev/null
+++ b/include/arch/aarch64/arch_helpers.h
@@ -0,0 +1,671 @@
+/*
+ * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_HELPERS_H
+#define ARCH_HELPERS_H
+
+#include <cdefs.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch.h>
+
+/**********************************************************************
+ * Macros which create inline functions to read or write CPU system
+ * registers
+ *********************************************************************/
+
+#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name) \
+static inline u_register_t read_ ## _name(void) \
+{ \
+ u_register_t v; \
+ __asm__ volatile ("mrs %0, " #_reg_name : "=r" (v)); \
+ return v; \
+}
+
+#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name) \
+static inline void write_ ## _name(u_register_t v) \
+{ \
+ __asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v)); \
+}
+
+#define SYSREG_WRITE_CONST(reg_name, v) \
+ __asm__ volatile ("msr " #reg_name ", %0" : : "i" (v))
+
+/* Define read function for system register */
+#define DEFINE_SYSREG_READ_FUNC(_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _name)
+
+/* Define read & write function for system register */
+#define DEFINE_SYSREG_RW_FUNCS(_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _name)
+
+/* Define read & write function for renamed system register */
+#define DEFINE_RENAME_SYSREG_RW_FUNCS(_name, _reg_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _reg_name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
+
+/* Define read function for renamed system register */
+#define DEFINE_RENAME_SYSREG_READ_FUNC(_name, _reg_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)
+
+/* Define write function for renamed system register */
+#define DEFINE_RENAME_SYSREG_WRITE_FUNC(_name, _reg_name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
+
+/**********************************************************************
+ * Macros to create inline functions for system instructions
+ *********************************************************************/
+
+/* Define function for simple system instruction */
+#define DEFINE_SYSOP_FUNC(_op) \
+static inline void _op(void) \
+{ \
+ __asm__ (#_op); \
+}
+
+/* Define function for system instruction with register parameter */
+#define DEFINE_SYSOP_PARAM_FUNC(_op) \
+static inline void _op(uint64_t v) \
+{ \
+ __asm__ (#_op " %0" : : "r" (v)); \
+}
+
+/* Define function for system instruction with type specifier */
+#define DEFINE_SYSOP_TYPE_FUNC(_op, _type) \
+static inline void _op ## _type(void) \
+{ \
+ __asm__ (#_op " " #_type : : : "memory"); \
+}
+
+/* Define function for system instruction with register parameter */
+#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type) \
+static inline void _op ## _type(uint64_t v) \
+{ \
+ __asm__ (#_op " " #_type ", %0" : : "r" (v)); \
+}
+
+/*******************************************************************************
+ * TLB maintenance accessor prototypes
+ ******************************************************************************/
+
+#if ERRATA_A57_813419 || ERRATA_A76_1286807
+/*
+ * Define function for TLBI instruction with type specifier that implements
+ * the workaround for errata 813419 of Cortex-A57 or errata 1286807 of
+ * Cortex-A76.
+ */
+#define DEFINE_TLBIOP_ERRATA_TYPE_FUNC(_type)\
+static inline void tlbi ## _type(void) \
+{ \
+ __asm__("tlbi " #_type "\n" \
+ "dsb ish\n" \
+ "tlbi " #_type); \
+}
+
+/*
+ * Define function for TLBI instruction with register parameter that implements
+ * the workaround for errata 813419 of Cortex-A57 or errata 1286807 of
+ * Cortex-A76.
+ */
+#define DEFINE_TLBIOP_ERRATA_TYPE_PARAM_FUNC(_type) \
+static inline void tlbi ## _type(uint64_t v) \
+{ \
+ __asm__("tlbi " #_type ", %0\n" \
+ "dsb ish\n" \
+ "tlbi " #_type ", %0" : : "r" (v)); \
+}
+#endif /* ERRATA_A57_813419 */
+
+#if ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319
+/*
+ * Define function for DC instruction with register parameter that enables
+ * the workaround for errata 819472, 824069 and 827319 of Cortex-A53.
+ */
+#define DEFINE_DCOP_ERRATA_A53_TYPE_PARAM_FUNC(_name, _type) \
+static inline void dc ## _name(uint64_t v) \
+{ \
+ __asm__("dc " #_type ", %0" : : "r" (v)); \
+}
+#endif /* ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319 */
+
+#if ERRATA_A57_813419
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2is)
+DEFINE_TLBIOP_ERRATA_TYPE_FUNC(alle3)
+DEFINE_TLBIOP_ERRATA_TYPE_FUNC(alle3is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, vmalle1)
+#elif ERRATA_A76_1286807
+DEFINE_TLBIOP_ERRATA_TYPE_FUNC(alle1)
+DEFINE_TLBIOP_ERRATA_TYPE_FUNC(alle1is)
+DEFINE_TLBIOP_ERRATA_TYPE_FUNC(alle2)
+DEFINE_TLBIOP_ERRATA_TYPE_FUNC(alle2is)
+DEFINE_TLBIOP_ERRATA_TYPE_FUNC(alle3)
+DEFINE_TLBIOP_ERRATA_TYPE_FUNC(alle3is)
+DEFINE_TLBIOP_ERRATA_TYPE_FUNC(vmalle1)
+#else
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, vmalle1)
+#endif
+
+#if ERRATA_A57_813419
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaae1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaale1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae2is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale2is)
+DEFINE_TLBIOP_ERRATA_TYPE_PARAM_FUNC(vae3is)
+DEFINE_TLBIOP_ERRATA_TYPE_PARAM_FUNC(vale3is)
+#elif ERRATA_A76_1286807
+DEFINE_TLBIOP_ERRATA_TYPE_PARAM_FUNC(vaae1is)
+DEFINE_TLBIOP_ERRATA_TYPE_PARAM_FUNC(vaale1is)
+DEFINE_TLBIOP_ERRATA_TYPE_PARAM_FUNC(vae2is)
+DEFINE_TLBIOP_ERRATA_TYPE_PARAM_FUNC(vale2is)
+DEFINE_TLBIOP_ERRATA_TYPE_PARAM_FUNC(vae3is)
+DEFINE_TLBIOP_ERRATA_TYPE_PARAM_FUNC(vale3is)
+#else
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaae1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaale1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae2is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale2is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae3is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale3is)
+#endif
+
+/*******************************************************************************
+ * Cache maintenance accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, isw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cisw)
+#if ERRATA_A53_827319
+DEFINE_DCOP_ERRATA_A53_TYPE_PARAM_FUNC(csw, cisw)
+#else
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, csw)
+#endif
+#if ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319
+DEFINE_DCOP_ERRATA_A53_TYPE_PARAM_FUNC(cvac, civac)
+#else
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvac)
+#endif
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, ivac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, civac)
+#if ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319
+DEFINE_DCOP_ERRATA_A53_TYPE_PARAM_FUNC(cvau, civac)
+#else
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvau)
+#endif
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, zva)
+
+/*******************************************************************************
+ * Address translation accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1w)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0w)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e1r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e2r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e3r)
+
+/*******************************************************************************
+ * Strip Pointer Authentication Code
+ ******************************************************************************/
+DEFINE_SYSOP_PARAM_FUNC(xpaci)
+
+void flush_dcache_range(uintptr_t addr, size_t size);
+void flush_dcache_to_popa_range(uintptr_t addr, size_t size);
+void clean_dcache_range(uintptr_t addr, size_t size);
+void inv_dcache_range(uintptr_t addr, size_t size);
+bool is_dcache_enabled(void);
+
+void dcsw_op_louis(u_register_t op_type);
+void dcsw_op_all(u_register_t op_type);
+
+void disable_mmu_el1(void);
+void disable_mmu_el3(void);
+void disable_mpu_el2(void);
+void disable_mmu_icache_el1(void);
+void disable_mmu_icache_el3(void);
+void disable_mpu_icache_el2(void);
+
+/*******************************************************************************
+ * Misc. accessor prototypes
+ ******************************************************************************/
+
+#define write_daifclr(val) SYSREG_WRITE_CONST(daifclr, val)
+#define write_daifset(val) SYSREG_WRITE_CONST(daifset, val)
+
+DEFINE_SYSREG_RW_FUNCS(par_el1)
+DEFINE_SYSREG_READ_FUNC(id_pfr1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64isar0_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64isar1_el1)
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64isar2_el1, ID_AA64ISAR2_EL1)
+DEFINE_SYSREG_READ_FUNC(id_aa64pfr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64pfr1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64dfr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_afr0_el1)
+DEFINE_SYSREG_READ_FUNC(CurrentEl)
+DEFINE_SYSREG_READ_FUNC(ctr_el0)
+DEFINE_SYSREG_RW_FUNCS(daif)
+DEFINE_SYSREG_RW_FUNCS(spsr_el1)
+DEFINE_SYSREG_RW_FUNCS(spsr_el2)
+DEFINE_SYSREG_RW_FUNCS(spsr_el3)
+DEFINE_SYSREG_RW_FUNCS(elr_el1)
+DEFINE_SYSREG_RW_FUNCS(elr_el2)
+DEFINE_SYSREG_RW_FUNCS(elr_el3)
+DEFINE_SYSREG_RW_FUNCS(mdccsr_el0)
+DEFINE_SYSREG_RW_FUNCS(dbgdtrrx_el0)
+DEFINE_SYSREG_RW_FUNCS(dbgdtrtx_el0)
+DEFINE_SYSREG_RW_FUNCS(sp_el1)
+DEFINE_SYSREG_RW_FUNCS(sp_el2)
+
+DEFINE_SYSOP_FUNC(wfi)
+DEFINE_SYSOP_FUNC(wfe)
+DEFINE_SYSOP_FUNC(sev)
+DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, st)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, osh)
+DEFINE_SYSOP_TYPE_FUNC(dsb, nsh)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dsb, oshst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, oshld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, oshst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, osh)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nshld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nshst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nsh)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
+DEFINE_SYSOP_FUNC(isb)
+
+static inline void enable_irq(void)
+{
+ /*
+ * The compiler memory barrier will prevent the compiler from
+ * scheduling non-volatile memory access after the write to the
+ * register.
+ *
+ * This could happen if some initialization code issues non-volatile
+ * accesses to an area used by an interrupt handler, in the assumption
+ * that it is safe as the interrupts are disabled at the time it does
+ * that (according to program order). However, non-volatile accesses
+ * are not necessarily in program order relatively with volatile inline
+ * assembly statements (and volatile accesses).
+ */
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_IRQ_BIT);
+ isb();
+}
+
+static inline void enable_fiq(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_FIQ_BIT);
+ isb();
+}
+
+static inline void enable_serror(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_ABT_BIT);
+ isb();
+}
+
+static inline void enable_debug_exceptions(void)
+{
+ COMPILER_BARRIER();
+ write_daifclr(DAIF_DBG_BIT);
+ isb();
+}
+
+static inline void disable_irq(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_IRQ_BIT);
+ isb();
+}
+
+static inline void disable_fiq(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_FIQ_BIT);
+ isb();
+}
+
+static inline void disable_serror(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_ABT_BIT);
+ isb();
+}
+
+static inline void disable_debug_exceptions(void)
+{
+ COMPILER_BARRIER();
+ write_daifset(DAIF_DBG_BIT);
+ isb();
+}
+
+void __dead2 smc(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
+ uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
+
+/*******************************************************************************
+ * System register accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSREG_READ_FUNC(midr_el1)
+DEFINE_SYSREG_READ_FUNC(mpidr_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64mmfr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64mmfr1_el1)
+
+DEFINE_SYSREG_RW_FUNCS(scr_el3)
+DEFINE_SYSREG_RW_FUNCS(hcr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(vbar_el1)
+DEFINE_SYSREG_RW_FUNCS(vbar_el2)
+DEFINE_SYSREG_RW_FUNCS(vbar_el3)
+
+DEFINE_SYSREG_RW_FUNCS(sctlr_el1)
+DEFINE_SYSREG_RW_FUNCS(sctlr_el2)
+DEFINE_SYSREG_RW_FUNCS(sctlr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(actlr_el1)
+DEFINE_SYSREG_RW_FUNCS(actlr_el2)
+DEFINE_SYSREG_RW_FUNCS(actlr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(esr_el1)
+DEFINE_SYSREG_RW_FUNCS(esr_el2)
+DEFINE_SYSREG_RW_FUNCS(esr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(afsr0_el1)
+DEFINE_SYSREG_RW_FUNCS(afsr0_el2)
+DEFINE_SYSREG_RW_FUNCS(afsr0_el3)
+
+DEFINE_SYSREG_RW_FUNCS(afsr1_el1)
+DEFINE_SYSREG_RW_FUNCS(afsr1_el2)
+DEFINE_SYSREG_RW_FUNCS(afsr1_el3)
+
+DEFINE_SYSREG_RW_FUNCS(far_el1)
+DEFINE_SYSREG_RW_FUNCS(far_el2)
+DEFINE_SYSREG_RW_FUNCS(far_el3)
+
+DEFINE_SYSREG_RW_FUNCS(mair_el1)
+DEFINE_SYSREG_RW_FUNCS(mair_el2)
+DEFINE_SYSREG_RW_FUNCS(mair_el3)
+
+DEFINE_SYSREG_RW_FUNCS(amair_el1)
+DEFINE_SYSREG_RW_FUNCS(amair_el2)
+DEFINE_SYSREG_RW_FUNCS(amair_el3)
+
+DEFINE_SYSREG_READ_FUNC(rvbar_el1)
+DEFINE_SYSREG_READ_FUNC(rvbar_el2)
+DEFINE_SYSREG_READ_FUNC(rvbar_el3)
+
+DEFINE_SYSREG_RW_FUNCS(rmr_el1)
+DEFINE_SYSREG_RW_FUNCS(rmr_el2)
+DEFINE_SYSREG_RW_FUNCS(rmr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(tcr_el1)
+DEFINE_SYSREG_RW_FUNCS(tcr_el2)
+DEFINE_SYSREG_RW_FUNCS(tcr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el1)
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el2)
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el3)
+
+DEFINE_SYSREG_RW_FUNCS(ttbr1_el1)
+
+DEFINE_SYSREG_RW_FUNCS(vttbr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(cptr_el2)
+DEFINE_SYSREG_RW_FUNCS(cptr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(cpacr_el1)
+DEFINE_SYSREG_RW_FUNCS(cntfrq_el0)
+DEFINE_SYSREG_RW_FUNCS(cnthp_ctl_el2)
+DEFINE_SYSREG_RW_FUNCS(cnthp_tval_el2)
+DEFINE_SYSREG_RW_FUNCS(cnthp_cval_el2)
+DEFINE_SYSREG_RW_FUNCS(cntps_ctl_el1)
+DEFINE_SYSREG_RW_FUNCS(cntps_tval_el1)
+DEFINE_SYSREG_RW_FUNCS(cntps_cval_el1)
+DEFINE_SYSREG_RW_FUNCS(cntp_ctl_el0)
+DEFINE_SYSREG_RW_FUNCS(cntp_tval_el0)
+DEFINE_SYSREG_RW_FUNCS(cntp_cval_el0)
+DEFINE_SYSREG_READ_FUNC(cntpct_el0)
+DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
+
+DEFINE_SYSREG_RW_FUNCS(vtcr_el2)
+
+#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
+ CNTP_CTL_ENABLE_MASK)
+#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
+ CNTP_CTL_IMASK_MASK)
+#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
+ CNTP_CTL_ISTATUS_MASK)
+
+#define set_cntp_ctl_enable(x) ((x) |= (U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define set_cntp_ctl_imask(x) ((x) |= (U(1) << CNTP_CTL_IMASK_SHIFT))
+
+#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
+
+DEFINE_SYSREG_RW_FUNCS(tpidr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(cntvoff_el2)
+
+DEFINE_SYSREG_RW_FUNCS(vpidr_el2)
+DEFINE_SYSREG_RW_FUNCS(vmpidr_el2)
+
+DEFINE_SYSREG_READ_FUNC(isr_el1)
+
+DEFINE_SYSREG_RW_FUNCS(mdcr_el2)
+DEFINE_SYSREG_RW_FUNCS(mdcr_el3)
+DEFINE_SYSREG_RW_FUNCS(hstr_el2)
+DEFINE_SYSREG_RW_FUNCS(pmcr_el0)
+
+/* GICv3 System Registers */
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el2, ICC_SRE_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el3, ICC_SRE_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_pmr_el1, ICC_PMR_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_rpr_el1, ICC_RPR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen1_el3, ICC_IGRPEN1_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen1_el1, ICC_IGRPEN1_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir0_el1, ICC_HPPIR0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir1_el1, ICC_HPPIR1_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar0_el1, ICC_IAR0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar1_el1, ICC_IAR1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir0_el1, ICC_EOIR0_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sgi1r, ICC_SGI1R)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_asgi1r, ICC_ASGI1R)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(amcfgr_el0, AMCFGR_EL0)
+DEFINE_RENAME_SYSREG_READ_FUNC(amcgcr_el0, AMCGCR_EL0)
+DEFINE_RENAME_SYSREG_READ_FUNC(amcg1idr_el0, AMCG1IDR_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcr_el0, AMCR_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset1_el0, AMCNTENSET1_EL0)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(mpamidr_el1, MPAMIDR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpam3_el3, MPAM3_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpam2_el2, MPAM2_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpamhcr_el2, MPAMHCR_EL2)
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
+
+DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el2, ZCR_EL2)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64smfr0_el1, ID_AA64SMFR0_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(smcr_el3, SMCR_EL3)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(erridr_el1, ERRIDR_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(errselr_el1, ERRSELR_EL1)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(erxfr_el1, ERXFR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxctlr_el1, ERXCTLR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxstatus_el1, ERXSTATUS_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxaddr_el1, ERXADDR_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc0_el1, ERXMISC0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc1_el1, ERXMISC1_EL1)
+
+/* Armv8.2 Registers */
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64mmfr2_el1, ID_AA64MMFR2_EL1)
+
+/* Armv8.3 Pointer Authentication Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(apiakeyhi_el1, APIAKeyHi_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(apiakeylo_el1, APIAKeyLo_EL1)
+
+/* Armv8.4 Data Independent Timing Register */
+DEFINE_RENAME_SYSREG_RW_FUNCS(dit, DIT)
+
+/* Armv8.5 MTE Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(tfsre0_el1, TFSRE0_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(tfsr_el1, TFSR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(rgsr_el1, RGSR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(gcr_el1, GCR_EL1)
+
+/* Armv8.5 FEAT_RNG Registers */
+DEFINE_SYSREG_READ_FUNC(rndr)
+DEFINE_SYSREG_READ_FUNC(rndrrs)
+
+/* FEAT_HCX Register */
+DEFINE_RENAME_SYSREG_RW_FUNCS(hcrx_el2, HCRX_EL2)
+
+/* DynamIQ Shared Unit power management */
+DEFINE_RENAME_SYSREG_RW_FUNCS(clusterpwrdn_el1, CLUSTERPWRDN_EL1)
+
+/* CPU Power/Performance Management registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(cpuppmcr_el3, CPUPPMCR_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(cpumpmmcr_el3, CPUMPMMCR_EL3)
+
+/* Armv9.2 RME Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(gptbr_el3, GPTBR_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(gpccr_el3, GPCCR_EL3)
+
+#define IS_IN_EL(x) \
+ (GET_EL(read_CurrentEl()) == MODE_EL##x)
+
+#define IS_IN_EL1() IS_IN_EL(1)
+#define IS_IN_EL2() IS_IN_EL(2)
+#define IS_IN_EL3() IS_IN_EL(3)
+
+static inline unsigned int get_current_el(void)
+{
+ return GET_EL(read_CurrentEl());
+}
+
+static inline unsigned int get_current_el_maybe_constant(void)
+{
+#if defined(IMAGE_AT_EL1)
+ return 1;
+#elif defined(IMAGE_AT_EL2)
+ return 2; /* no use-case in TF-A */
+#elif defined(IMAGE_AT_EL3)
+ return 3;
+#else
+ /*
+ * If we do not know which exception level this is being built for
+ * (e.g. built for library), fall back to run-time detection.
+ */
+ return get_current_el();
+#endif
+}
+
+/*
+ * Check if an EL is implemented from AA64PFR0 register fields.
+ */
+static inline uint64_t el_implemented(unsigned int el)
+{
+ if (el > 3U) {
+ return EL_IMPL_NONE;
+ } else {
+ unsigned int shift = ID_AA64PFR0_EL1_SHIFT * el;
+
+ return (read_id_aa64pfr0_el1() >> shift) & ID_AA64PFR0_ELX_MASK;
+ }
+}
+
+/*
+ * TLBIPAALLOS instruction
+ * (TLB Inivalidate GPT Information by PA,
+ * All Entries, Outer Shareable)
+ */
+static inline void tlbipaallos(void)
+{
+ __asm__("SYS #6,c8,c1,#4");
+}
+
+/*
+ * Invalidate TLBs of GPT entries by Physical address, last level.
+ *
+ * @pa: the starting address for the range
+ * of invalidation
+ * @size: size of the range of invalidation
+ */
+void gpt_tlbi_by_pa_ll(uint64_t pa, size_t size);
+
+
+/* Previously defined accessor functions with incomplete register names */
+
+#define read_current_el() read_CurrentEl()
+
+#define dsb() dsbsy()
+
+#define read_midr() read_midr_el1()
+
+#define read_mpidr() read_mpidr_el1()
+
+#define read_scr() read_scr_el3()
+#define write_scr(_v) write_scr_el3(_v)
+
+#define read_hcr() read_hcr_el2()
+#define write_hcr(_v) write_hcr_el2(_v)
+
+#define read_cpacr() read_cpacr_el1()
+#define write_cpacr(_v) write_cpacr_el1(_v)
+
+#define read_clusterpwrdn() read_clusterpwrdn_el1()
+#define write_clusterpwrdn(_v) write_clusterpwrdn_el1(_v)
+
+#if ERRATA_SPECULATIVE_AT
+/*
+ * Assuming SCTLR.M bit is already enabled
+ * 1. Enable page table walk by clearing TCR_EL1.EPDx bits
+ * 2. Execute AT instruction for lower EL1/0
+ * 3. Disable page table walk by setting TCR_EL1.EPDx bits
+ */
+#define AT(_at_inst, _va) \
+{ \
+ assert((read_sctlr_el1() & SCTLR_M_BIT) != 0ULL); \
+ write_tcr_el1(read_tcr_el1() & ~(TCR_EPD0_BIT | TCR_EPD1_BIT)); \
+ isb(); \
+ _at_inst(_va); \
+ write_tcr_el1(read_tcr_el1() | (TCR_EPD0_BIT | TCR_EPD1_BIT)); \
+ isb(); \
+}
+#else
+#define AT(_at_inst, _va) _at_inst(_va);
+#endif
+
+#endif /* ARCH_HELPERS_H */
diff --git a/include/arch/aarch64/asm_macros.S b/include/arch/aarch64/asm_macros.S
new file mode 100644
index 0000000..66c39e5
--- /dev/null
+++ b/include/arch/aarch64/asm_macros.S
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ASM_MACROS_S
+#define ASM_MACROS_S
+
+#include <arch.h>
+#include <common/asm_macros_common.S>
+#include <lib/spinlock.h>
+
+/*
+ * TLBI instruction with type specifier that implements the workaround for
+ * errata 813419 of Cortex-A57 or errata 1286807 of Cortex-A76.
+ */
+#if ERRATA_A57_813419 || ERRATA_A76_1286807
+#define TLB_INVALIDATE(_type) \
+ tlbi _type; \
+ dsb ish; \
+ tlbi _type
+#else
+#define TLB_INVALIDATE(_type) \
+ tlbi _type
+#endif
+
+
+ .macro func_prologue
+ stp x29, x30, [sp, #-0x10]!
+ mov x29,sp
+ .endm
+
+ .macro func_epilogue
+ ldp x29, x30, [sp], #0x10
+ .endm
+
+
+ .macro dcache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ ubfx \tmp, \tmp, #16, #4
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+
+ .macro icache_line_size reg, tmp
+ mrs \tmp, ctr_el0
+ and \tmp, \tmp, #0xf
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
+ .endm
+
+
+ .macro smc_check label
+ mrs x0, esr_el3
+ ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+ cmp x0, #EC_AARCH64_SMC
+ b.ne $label
+ .endm
+
+ /*
+ * Declare the exception vector table, enforcing it is aligned on a
+ * 2KB boundary, as required by the ARMv8 architecture.
+ * Use zero bytes as the fill value to be stored in the padding bytes
+ * so that it inserts illegal AArch64 instructions. This increases
+ * security, robustness and potentially facilitates debugging.
+ */
+ .macro vector_base label, section_name=.vectors
+ .section \section_name, "ax"
+ .align 11, 0
+ \label:
+ .endm
+
+ /*
+ * Create an entry in the exception vector table, enforcing it is
+ * aligned on a 128-byte boundary, as required by the ARMv8 architecture.
+ * Use zero bytes as the fill value to be stored in the padding bytes
+ * so that it inserts illegal AArch64 instructions. This increases
+ * security, robustness and potentially facilitates debugging.
+ */
+ .macro vector_entry label, section_name=.vectors
+ .cfi_sections .debug_frame
+ .section \section_name, "ax"
+ .align 7, 0
+ .type \label, %function
+ .cfi_startproc
+ \label:
+ .endm
+
+ /*
+ * Add the bytes until fill the full exception vector, whose size is always
+ * 32 instructions. If there are more than 32 instructions in the
+ * exception vector then an error is emitted.
+ */
+ .macro end_vector_entry label
+ .cfi_endproc
+ .fill \label + (32 * 4) - .
+ .endm
+
+ /*
+ * This macro calculates the base address of the current CPU's MP stack
+ * using the plat_my_core_pos() index, the name of the stack storage
+ * and the size of each stack
+ * Out: X0 = physical address of stack base
+ * Clobber: X30, X1, X2
+ */
+ .macro get_my_mp_stack _name, _size
+ bl plat_my_core_pos
+ adrp x2, (\_name + \_size)
+ add x2, x2, :lo12:(\_name + \_size)
+ mov x1, #\_size
+ madd x0, x0, x1, x2
+ .endm
+
+ /*
+ * This macro calculates the base address of a UP stack using the
+ * name of the stack storage and the size of the stack
+ * Out: X0 = physical address of stack base
+ */
+ .macro get_up_stack _name, _size
+ adrp x0, (\_name + \_size)
+ add x0, x0, :lo12:(\_name + \_size)
+ .endm
+
+ /*
+ * Helper macro to generate the best mov/movk combinations according
+ * the value to be moved. The 16 bits from '_shift' are tested and
+ * if not zero, they are moved into '_reg' without affecting
+ * other bits.
+ */
+ .macro _mov_imm16 _reg, _val, _shift
+ .if (\_val >> \_shift) & 0xffff
+ .if (\_val & (1 << \_shift - 1))
+ movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
+ .else
+ mov \_reg, \_val & (0xffff << \_shift)
+ .endif
+ .endif
+ .endm
+
+ /*
+ * Helper macro to load arbitrary values into 32 or 64-bit registers
+ * which generates the best mov/movk combinations. Many base addresses
+ * are 64KB aligned the macro will eliminate updating bits 15:0 in
+ * that case
+ */
+ .macro mov_imm _reg, _val
+ .if (\_val) == 0
+ mov \_reg, #0
+ .else
+ _mov_imm16 \_reg, (\_val), 0
+ _mov_imm16 \_reg, (\_val), 16
+ _mov_imm16 \_reg, (\_val), 32
+ _mov_imm16 \_reg, (\_val), 48
+ .endif
+ .endm
+
+ /*
+ * Macro to mark instances where we're jumping to a function and don't
+ * expect a return. To provide the function being jumped to with
+ * additional information, we use 'bl' instruction to jump rather than
+ * 'b'.
+ *
+ * Debuggers infer the location of a call from where LR points to, which
+ * is usually the instruction after 'bl'. If this macro expansion
+ * happens to be the last location in a function, that'll cause the LR
+ * to point a location beyond the function, thereby misleading debugger
+ * back trace. We therefore insert a 'nop' after the function call for
+ * debug builds, unless 'skip_nop' parameter is non-zero.
+ */
+ .macro no_ret _func:req, skip_nop=0
+ bl \_func
+#if DEBUG
+ .ifeq \skip_nop
+ nop
+ .endif
+#endif
+ .endm
+
+ /*
+ * Reserve space for a spin lock in assembly file.
+ */
+ .macro define_asm_spinlock _name:req
+ .align SPINLOCK_ASM_ALIGN
+ \_name:
+ .space SPINLOCK_ASM_SIZE
+ .endm
+
+#if RAS_EXTENSION
+ .macro esb
+ .inst 0xd503221f
+ .endm
+#endif
+
+ /*
+ * Helper macro to read system register value into x0
+ */
+ .macro read reg:req
+#if ENABLE_BTI
+ bti j
+#endif
+ mrs x0, \reg
+ ret
+ .endm
+
+ /*
+ * Helper macro to write value from x1 to system register
+ */
+ .macro write reg:req
+#if ENABLE_BTI
+ bti j
+#endif
+ msr \reg, x1
+ ret
+ .endm
+
+ /*
+ * Macro for using speculation barrier instruction introduced by
+ * FEAT_SB, if it's enabled.
+ */
+ .macro speculation_barrier
+#if ENABLE_FEAT_SB
+ sb
+#else
+ dsb sy
+ isb
+#endif
+ .endm
+
+ /*
+ * Macro for mitigating against speculative execution beyond ERET. Uses the
+ * speculation barrier instruction introduced by FEAT_SB, if it's enabled.
+ */
+ .macro exception_return
+ eret
+#if ENABLE_FEAT_SB
+ sb
+#else
+ dsb nsh
+ isb
+#endif
+ .endm
+
+#endif /* ASM_MACROS_S */
diff --git a/include/arch/aarch64/assert_macros.S b/include/arch/aarch64/assert_macros.S
new file mode 100644
index 0000000..06371c4
--- /dev/null
+++ b/include/arch/aarch64/assert_macros.S
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ASSERT_MACROS_S
+#define ASSERT_MACROS_S
+
+ /*
+ * Assembler macro to enable asm_assert. Use this macro wherever
+ * assert is required in assembly. Please note that the macro makes
+ * use of label '300' to provide the logic and the caller
+ * should make sure that this label is not used to branch prior
+ * to calling this macro.
+ */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+ .pushsection .rodata.str1.1, "aS" ;\
+ .L_assert_filename: ;\
+ .string __FILE__ ;\
+ .popsection ;\
+.endif ;\
+ b._cc 300f ;\
+ adr x0, .L_assert_filename ;\
+ mov x1, __LINE__ ;\
+ b asm_assert ;\
+300:
+
+#endif /* ASSERT_MACROS_S */
diff --git a/include/arch/aarch64/console_macros.S b/include/arch/aarch64/console_macros.S
new file mode 100644
index 0000000..3285d85
--- /dev/null
+++ b/include/arch/aarch64/console_macros.S
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef CONSOLE_MACROS_S
+#define CONSOLE_MACROS_S
+
+#include <drivers/console.h>
+
+/*
+ * This macro encapsulates the common setup that has to be done at the end of
+ * a console driver's register function. It will register all of the driver's
+ * callbacks in the console_t structure and initialize the flags field (by
+ * default consoles are enabled for the "boot" and "crash" states, this can be
+ * changed after registration with the console_set_scope() function). It ends
+ * with a tail call that will include return to the caller.
+ * REQUIRES console_t pointer in x0 and a valid return address in x30.
+ */
+ .macro finish_console_register _driver, putc=0, getc=0, flush=0
+ /*
+ * If any of the callback is not specified or set as 0, then the
+ * corresponding callback entry in console_t is set to 0.
+ */
+ .ifne \putc
+ adrp x1, console_\_driver\()_putc
+ add x1, x1, :lo12:console_\_driver\()_putc
+ str x1, [x0, #CONSOLE_T_PUTC]
+ .else
+ str xzr, [x0, #CONSOLE_T_PUTC]
+ .endif
+
+ .ifne \getc
+ adrp x1, console_\_driver\()_getc
+ add x1, x1, :lo12:console_\_driver\()_getc
+ str x1, [x0, #CONSOLE_T_GETC]
+ .else
+ str xzr, [x0, #CONSOLE_T_GETC]
+ .endif
+
+ .ifne \flush
+ adrp x1, console_\_driver\()_flush
+ add x1, x1, :lo12:console_\_driver\()_flush
+ str x1, [x0, #CONSOLE_T_FLUSH]
+ .else
+ str xzr, [x0, #CONSOLE_T_FLUSH]
+ .endif
+
+ mov x1, #(CONSOLE_FLAG_BOOT | CONSOLE_FLAG_CRASH)
+ str x1, [x0, #CONSOLE_T_FLAGS]
+ b console_register
+ .endm
+
+#endif /* CONSOLE_MACROS_S */
diff --git a/include/arch/aarch64/el2_common_macros.S b/include/arch/aarch64/el2_common_macros.S
new file mode 100644
index 0000000..7bf4806
--- /dev/null
+++ b/include/arch/aarch64/el2_common_macros.S
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef EL2_COMMON_MACROS_S
+#define EL2_COMMON_MACROS_S
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+#include <platform_def.h>
+
+ /*
+ * Helper macro to initialise system registers at EL2.
+ */
+ .macro el2_arch_init_common
+
+ /* ---------------------------------------------------------------------
+ * SCTLR_EL2 has already been initialised - read current value before
+ * modifying.
+ *
+ * SCTLR_EL2.I: Enable the instruction cache.
+ *
+ * SCTLR_EL2.SA: Enable Stack Alignment check. A SP alignment fault
+ * exception is generated if a load or store instruction executed at
+ * EL2 uses the SP as the base address and the SP is not aligned to a
+ * 16-byte boundary.
+ *
+ * SCTLR_EL2.A: Enable Alignment fault checking. All instructions that
+ * load or store one or more registers have an alignment check that the
+ * address being accessed is aligned to the size of the data element(s)
+ * being accessed.
+ * ---------------------------------------------------------------------
+ */
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ mrs x0, sctlr_el2
+ orr x0, x0, x1
+ msr sctlr_el2, x0
+ isb
+
+ /* ---------------------------------------------------------------------
+ * Initialise HCR_EL2, setting all fields rather than relying on HW.
+ * All fields are architecturally UNKNOWN on reset. The following fields
+ * do not change during the TF lifetime. The remaining fields are set to
+ * zero here but are updated ahead of transitioning to a lower EL in the
+ * function cm_init_context_common().
+ *
+ * HCR_EL2.TWE: Set to zero so that execution of WFE instructions at
+ * EL2, EL1 and EL0 are not trapped to EL2.
+ *
+ * HCR_EL2.TWI: Set to zero so that execution of WFI instructions at
+ * EL2, EL1 and EL0 are not trapped to EL2.
+ *
+ * HCR_EL2.HCD: Set to zero to enable HVC calls at EL1 and above,
+ * from both Security states and both Execution states.
+ *
+ * HCR_EL2.TEA: Set to one to route External Aborts and SError
+ * Interrupts to EL2 when executing at any EL.
+ *
+ * HCR_EL2.{API,APK}: For Armv8.3 pointer authentication feature,
+ * disable traps to EL2 when accessing key registers or using
+ * pointer authentication instructions from lower ELs.
+ * ---------------------------------------------------------------------
+ */
+ mov_imm x0, ((HCR_RESET_VAL | HCR_TEA_BIT) \
+ & ~(HCR_TWE_BIT | HCR_TWI_BIT | HCR_HCD_BIT))
+#if CTX_INCLUDE_PAUTH_REGS
+ /*
+ * If the pointer authentication registers are saved during world
+ * switches, enable pointer authentication everywhere, as it is safe to
+ * do so.
+ */
+ orr x0, x0, #(HCR_API_BIT | HCR_APK_BIT)
+#endif /* CTX_INCLUDE_PAUTH_REGS */
+ msr hcr_el2, x0
+
+ /* ---------------------------------------------------------------------
+ * Initialise MDCR_EL2, setting all fields rather than relying on
+ * hw. Some fields are architecturally UNKNOWN on reset.
+ *
+ * MDCR_EL2.TDOSA: Set to zero so that EL2 and EL2 System register
+ * access to the powerdown debug registers do not trap to EL2.
+ *
+ * MDCR_EL2.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
+ * debug registers, other than those registers that are controlled by
+ * MDCR_EL2.TDOSA.
+ *
+ * MDCR_EL2.TPM: Set to zero so that EL0, EL1, and EL2 System
+ * register accesses to all Performance Monitors registers do not trap
+ * to EL2.
+ *
+ * MDCR_EL2.HPMD: Set to zero so that event counting by the program-
+ * mable counters PMEVCNTR<n>_EL0 is prohibited in Secure state. If
+ * ARMv8.2 Debug is not implemented this bit does not have any effect
+ * on the counters unless there is support for the implementation
+ * defined authentication interface
+ * ExternalSecureNoninvasiveDebugEnabled().
+ * ---------------------------------------------------------------------
+ */
+ mov_imm x0, ((MDCR_EL2_RESET_VAL | \
+ MDCR_SPD32(MDCR_SPD32_DISABLE)) \
+ & ~(MDCR_EL2_HPMD | MDCR_TDOSA_BIT | \
+ MDCR_TDA_BIT | MDCR_TPM_BIT))
+
+ msr mdcr_el2, x0
+
+ /* ---------------------------------------------------------------------
+ * Initialise PMCR_EL0 setting all fields rather than relying
+ * on hw. Some fields are architecturally UNKNOWN on reset.
+ *
+ * PMCR_EL0.DP: Set to one so that the cycle counter,
+ * PMCCNTR_EL0 does not count when event counting is prohibited.
+ *
+ * PMCR_EL0.X: Set to zero to disable export of events.
+ *
+ * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
+ * counts on every clock cycle.
+ * ---------------------------------------------------------------------
+ */
+ mov_imm x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_DP_BIT) & \
+ ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT))
+
+ msr pmcr_el0, x0
+
+ /* ---------------------------------------------------------------------
+ * Enable External Aborts and SError Interrupts now that the exception
+ * vectors have been setup.
+ * ---------------------------------------------------------------------
+ */
+ msr daifclr, #DAIF_ABT_BIT
+
+ /* ---------------------------------------------------------------------
+ * Initialise CPTR_EL2, setting all fields rather than relying on hw.
+ * All fields are architecturally UNKNOWN on reset.
+ *
+ * CPTR_EL2.TCPAC: Set to zero so that any accesses to CPACR_EL1 do
+ * not trap to EL2.
+ *
+ * CPTR_EL2.TTA: Set to zero so that System register accesses to the
+ * trace registers do not trap to EL2.
+ *
+ * CPTR_EL2.TFP: Set to zero so that accesses to the V- or Z- registers
+ * by Advanced SIMD, floating-point or SVE instructions (if implemented)
+ * do not trap to EL2.
+ */
+
+ mov_imm x0, (CPTR_EL2_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
+ msr cptr_el2, x0
+
+ /*
+ * If Data Independent Timing (DIT) functionality is implemented,
+ * always enable DIT in EL2
+ */
+ mrs x0, id_aa64pfr0_el1
+ ubfx x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
+ cmp x0, #ID_AA64PFR0_DIT_SUPPORTED
+ bne 1f
+ mov x0, #DIT_BIT
+ msr DIT, x0
+1:
+ .endm
+
+/* -----------------------------------------------------------------------------
+ * This is the super set of actions that need to be performed during a cold boot
+ * or a warm boot in EL2. This code is shared by BL1 and BL31.
+ *
+ * This macro will always perform reset handling, architectural initialisations
+ * and stack setup. The rest of the actions are optional because they might not
+ * be needed, depending on the context in which this macro is called. This is
+ * why this macro is parameterised ; each parameter allows to enable/disable
+ * some actions.
+ *
+ * _init_sctlr:
+ * Whether the macro needs to initialise SCTLR_EL2, including configuring
+ * the endianness of data accesses.
+ *
+ * _warm_boot_mailbox:
+ * Whether the macro needs to detect the type of boot (cold/warm). The
+ * detection is based on the platform entrypoint address : if it is zero
+ * then it is a cold boot, otherwise it is a warm boot. In the latter case,
+ * this macro jumps on the platform entrypoint address.
+ *
+ * _secondary_cold_boot:
+ * Whether the macro needs to identify the CPU that is calling it: primary
+ * CPU or secondary CPU. The primary CPU will be allowed to carry on with
+ * the platform initialisations, while the secondaries will be put in a
+ * platform-specific state in the meantime.
+ *
+ * If the caller knows this macro will only be called by the primary CPU
+ * then this parameter can be defined to 0 to skip this step.
+ *
+ * _init_memory:
+ * Whether the macro needs to initialise the memory.
+ *
+ * _init_c_runtime:
+ * Whether the macro needs to initialise the C runtime environment.
+ *
+ * _exception_vectors:
+ * Address of the exception vectors to program in the VBAR_EL2 register.
+ *
+ * _pie_fixup_size:
+ * Size of memory region to fixup Global Descriptor Table (GDT).
+ *
+ * A non-zero value is expected when firmware needs GDT to be fixed-up.
+ *
+ * -----------------------------------------------------------------------------
+ */
+ .macro el2_entrypoint_common \
+ _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot, \
+ _init_memory, _init_c_runtime, _exception_vectors, \
+ _pie_fixup_size
+
+ .if \_init_sctlr
+ /* -------------------------------------------------------------
+ * This is the initialisation of SCTLR_EL2 and so must ensure
+ * that all fields are explicitly set rather than relying on hw.
+ * Some fields reset to an IMPLEMENTATION DEFINED value and
+ * others are architecturally UNKNOWN on reset.
+ *
+ * SCTLR.EE: Set the CPU endianness before doing anything that
+ * might involve memory reads or writes. Set to zero to select
+ * Little Endian.
+ *
+ * SCTLR_EL2.WXN: For the EL2 translation regime, this field can
+ * force all memory regions that are writeable to be treated as
+ * XN (Execute-never). Set to zero so that this control has no
+ * effect on memory access permissions.
+ *
+ * SCTLR_EL2.SA: Set to zero to disable Stack Alignment check.
+ *
+ * SCTLR_EL2.A: Set to zero to disable Alignment fault checking.
+ *
+ * SCTLR.DSSBS: Set to zero to disable speculation store bypass
+ * safe behaviour upon exception entry to EL2.
+ * -------------------------------------------------------------
+ */
+ mov_imm x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
+ | SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
+ msr sctlr_el2, x0
+ isb
+ .endif /* _init_sctlr */
+
+#if DISABLE_MTPMU
+ bl mtpmu_disable
+#endif
+
+ .if \_warm_boot_mailbox
+ /* -------------------------------------------------------------
+ * This code will be executed for both warm and cold resets.
+ * Now is the time to distinguish between the two.
+ * Query the platform entrypoint address and if it is not zero
+ * then it means it is a warm boot so jump to this address.
+ * -------------------------------------------------------------
+ */
+ bl plat_get_my_entrypoint
+ cbz x0, do_cold_boot
+ br x0
+
+ do_cold_boot:
+ .endif /* _warm_boot_mailbox */
+
+ .if \_pie_fixup_size
+#if ENABLE_PIE
+ /*
+ * ------------------------------------------------------------
+ * If PIE is enabled fixup the Global descriptor Table only
+ * once during primary core cold boot path.
+ *
+ * Compile time base address, required for fixup, is calculated
+ * using "pie_fixup" label present within first page.
+ * ------------------------------------------------------------
+ */
+ pie_fixup:
+ ldr x0, =pie_fixup
+ and x0, x0, #~(PAGE_SIZE_MASK)
+ mov_imm x1, \_pie_fixup_size
+ add x1, x1, x0
+ bl fixup_gdt_reloc
+#endif /* ENABLE_PIE */
+ .endif /* _pie_fixup_size */
+
+ /* ---------------------------------------------------------------------
+ * Set the exception vectors.
+ * ---------------------------------------------------------------------
+ */
+ adr x0, \_exception_vectors
+ msr vbar_el2, x0
+ isb
+
+ /* ---------------------------------------------------------------------
+ * It is a cold boot.
+ * Perform any processor specific actions upon reset e.g. cache, TLB
+ * invalidations etc.
+ * ---------------------------------------------------------------------
+ */
+ bl reset_handler
+
+ el2_arch_init_common
+
+ .if \_secondary_cold_boot
+ /* -------------------------------------------------------------
+ * Check if this is a primary or secondary CPU cold boot.
+ * The primary CPU will set up the platform while the
+ * secondaries are placed in a platform-specific state until the
+ * primary CPU performs the necessary actions to bring them out
+ * of that state and allows entry into the OS.
+ * -------------------------------------------------------------
+ */
+ bl plat_is_my_cpu_primary
+ cbnz w0, do_primary_cold_boot
+
+ /* This is a cold boot on a secondary CPU */
+ bl plat_secondary_cold_boot_setup
+ /* plat_secondary_cold_boot_setup() is not supposed to return */
+ bl el2_panic
+ do_primary_cold_boot:
+ .endif /* _secondary_cold_boot */
+
+ /* ---------------------------------------------------------------------
+ * Initialize memory now. Secondary CPU initialization won't get to this
+ * point.
+ * ---------------------------------------------------------------------
+ */
+
+ .if \_init_memory
+ bl platform_mem_init
+ .endif /* _init_memory */
+
+ /* ---------------------------------------------------------------------
+ * Init C runtime environment:
+ * - Zero-initialise the NOBITS sections. There are 2 of them:
+ * - the .bss section;
+ * - the coherent memory section (if any).
+ * - Relocate the data section from ROM to RAM, if required.
+ * ---------------------------------------------------------------------
+ */
+ .if \_init_c_runtime
+ adrp x0, __BSS_START__
+ add x0, x0, :lo12:__BSS_START__
+
+ adrp x1, __BSS_END__
+ add x1, x1, :lo12:__BSS_END__
+ sub x1, x1, x0
+ bl zeromem
+
+#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
+ adrp x0, __DATA_RAM_START__
+ add x0, x0, :lo12:__DATA_RAM_START__
+ adrp x1, __DATA_ROM_START__
+ add x1, x1, :lo12:__DATA_ROM_START__
+ adrp x2, __DATA_RAM_END__
+ add x2, x2, :lo12:__DATA_RAM_END__
+ sub x2, x2, x0
+ bl memcpy16
+#endif
+ .endif /* _init_c_runtime */
+
+ /* ---------------------------------------------------------------------
+ * Use SP_EL0 for the C runtime stack.
+ * ---------------------------------------------------------------------
+ */
+ msr spsel, #0
+
+ /* ---------------------------------------------------------------------
+ * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
+ * the MMU is enabled. There is no risk of reading stale stack memory
+ * after enabling the MMU as only the primary CPU is running at the
+ * moment.
+ * ---------------------------------------------------------------------
+ */
+ bl plat_set_my_stack
+
+#if STACK_PROTECTOR_ENABLED
+ .if \_init_c_runtime
+ bl update_stack_protector_canary
+ .endif /* _init_c_runtime */
+#endif
+ .endm
+
+ .macro apply_at_speculative_wa
+#if ERRATA_SPECULATIVE_AT
+ /*
+ * Explicitly save x30 so as to free up a register and to enable
+ * branching and also, save x29 which will be used in the called
+ * function
+ */
+ stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+ bl save_and_update_ptw_el1_sys_regs
+ ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+#endif
+ .endm
+
+ .macro restore_ptw_el1_sys_regs
+#if ERRATA_SPECULATIVE_AT
+ /* -----------------------------------------------------------
+ * In case of ERRATA_SPECULATIVE_AT, must follow below order
+ * to ensure that page table walk is not enabled until
+ * restoration of all EL1 system registers. TCR_EL1 register
+ * should be updated at the end which restores previous page
+ * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB
+ * ensures that CPU does below steps in order.
+ *
+ * 1. Ensure all other system registers are written before
+ * updating SCTLR_EL1 using ISB.
+ * 2. Restore SCTLR_EL1 register.
+ * 3. Ensure SCTLR_EL1 written successfully using ISB.
+ * 4. Restore TCR_EL1 register.
+ * -----------------------------------------------------------
+ */
+ isb
+ ldp x28, x29, [sp, #CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1]
+ msr sctlr_el1, x28
+ isb
+ msr tcr_el1, x29
+#endif
+ .endm
+
+#endif /* EL2_COMMON_MACROS_S */
diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S
new file mode 100644
index 0000000..de2b931
--- /dev/null
+++ b/include/arch/aarch64/el3_common_macros.S
@@ -0,0 +1,570 @@
+/*
+ * Copyright (c) 2015-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef EL3_COMMON_MACROS_S
+#define EL3_COMMON_MACROS_S
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <context.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+ /*
+ * Helper macro to initialise EL3 registers we care about.
+ */
+ .macro el3_arch_init_common
+ /* ---------------------------------------------------------------------
+ * SCTLR_EL3 has already been initialised - read current value before
+ * modifying.
+ *
+ * SCTLR_EL3.I: Enable the instruction cache.
+ *
+ * SCTLR_EL3.SA: Enable Stack Alignment check. A SP alignment fault
+ * exception is generated if a load or store instruction executed at
+ * EL3 uses the SP as the base address and the SP is not aligned to a
+ * 16-byte boundary.
+ *
+ * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that
+ * load or store one or more registers have an alignment check that the
+ * address being accessed is aligned to the size of the data element(s)
+ * being accessed.
+ * ---------------------------------------------------------------------
+ */
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ mrs x0, sctlr_el3
+ orr x0, x0, x1
+ msr sctlr_el3, x0
+ isb
+
+#ifdef IMAGE_BL31
+ /* ---------------------------------------------------------------------
+ * Initialise the per-cpu cache pointer to the CPU.
+ * This is done early to enable crash reporting to have access to crash
+ * stack. Since crash reporting depends on cpu_data to report the
+ * unhandled exception, not doing so can lead to recursive exceptions
+ * due to a NULL TPIDR_EL3.
+ * ---------------------------------------------------------------------
+ */
+ bl init_cpu_data_ptr
+#endif /* IMAGE_BL31 */
+
+ /* ---------------------------------------------------------------------
+ * Initialise SCR_EL3, setting all fields rather than relying on hw.
+ * All fields are architecturally UNKNOWN on reset. The following fields
+ * do not change during the TF lifetime. The remaining fields are set to
+ * zero here but are updated ahead of transitioning to a lower EL in the
+ * function cm_init_context_common().
+ *
+ * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
+ * EL2, EL1 and EL0 are not trapped to EL3.
+ *
+ * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
+ * EL2, EL1 and EL0 are not trapped to EL3.
+ *
+ * SCR_EL3.SIF: Set to one to disable instruction fetches from
+ * Non-secure memory.
+ *
+ * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
+ * both Security states and both Execution states.
+ *
+ * SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts
+ * to EL3 when executing at any EL.
+ *
+ * SCR_EL3.{API,APK}: For Armv8.3 pointer authentication feature,
+ * disable traps to EL3 when accessing key registers or using pointer
+ * authentication instructions from lower ELs.
+ * ---------------------------------------------------------------------
+ */
+ mov_imm x0, ((SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT) \
+ & ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT))
+#if CTX_INCLUDE_PAUTH_REGS
+ /*
+ * If the pointer authentication registers are saved during world
+ * switches, enable pointer authentication everywhere, as it is safe to
+ * do so.
+ */
+ orr x0, x0, #(SCR_API_BIT | SCR_APK_BIT)
+#endif
+#if ENABLE_RME
+ /*
+ * TODO: Settting the EEL2 bit to allow EL3 access to secure only registers
+ * in context management. This will need to be refactored.
+ */
+ orr x0, x0, #SCR_EEL2_BIT
+#endif
+ msr scr_el3, x0
+
+ /* ---------------------------------------------------------------------
+ * Initialise MDCR_EL3, setting all fields rather than relying on hw.
+ * Some fields are architecturally UNKNOWN on reset.
+ *
+ * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
+ * Debug exceptions, other than Breakpoint Instruction exceptions, are
+ * disabled from all ELs in Secure state.
+ *
+ * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
+ * privileged debug from S-EL1.
+ *
+ * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
+ * access to the powerdown debug registers do not trap to EL3.
+ *
+ * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
+ * debug registers, other than those registers that are controlled by
+ * MDCR_EL3.TDOSA.
+ *
+ * MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register
+ * accesses to all Performance Monitors registers do not trap to EL3.
+ *
+ * MDCR_EL3.SCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
+ * prohibited in Secure state. This bit is RES0 in versions of the
+ * architecture with FEAT_PMUv3p5 not implemented, setting it to 1
+ * doesn't have any effect on them.
+ *
+ * MDCR_EL3.MCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
+ * prohibited in EL3. This bit is RES0 in versions of the
+ * architecture with FEAT_PMUv3p7 not implemented, setting it to 1
+ * doesn't have any effect on them.
+ *
+ * MDCR_EL3.SPME: Set to zero so that event counting by the programmable
+ * counters PMEVCNTR<n>_EL0 is prohibited in Secure state. If ARMv8.2
+ * Debug is not implemented this bit does not have any effect on the
+ * counters unless there is support for the implementation defined
+ * authentication interface ExternalSecureNoninvasiveDebugEnabled().
+ *
+ * MDCR_EL3.NSTB, MDCR_EL3.NSTBE: Set to zero so that Trace Buffer
+ * owning security state is Secure state. If FEAT_TRBE is implemented,
+ * accesses to Trace Buffer control registers at EL2 and EL1 in any
+ * security state generates trap exceptions to EL3.
+ * If FEAT_TRBE is not implemented, these bits are RES0.
+ *
+ * MDCR_EL3.TTRF: Set to one so that access to trace filter control
+ * registers in non-monitor mode generate EL3 trap exception,
+ * unless the access generates a higher priority exception when trace
+ * filter control(FEAT_TRF) is implemented.
+ * When FEAT_TRF is not implemented, this bit is RES0.
+ * ---------------------------------------------------------------------
+ */
+ mov_imm x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | \
+ MDCR_SPD32(MDCR_SPD32_DISABLE) | MDCR_SCCD_BIT | \
+ MDCR_MCCD_BIT) & ~(MDCR_SPME_BIT | MDCR_TDOSA_BIT | \
+ MDCR_TDA_BIT | MDCR_TPM_BIT | MDCR_NSTB(MDCR_NSTB_EL1) | \
+ MDCR_NSTBE | MDCR_TTRF_BIT))
+
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_TRACEFILT_SHIFT, #ID_AA64DFR0_TRACEFILT_LENGTH
+ cbz x1, 1f
+ orr x0, x0, #MDCR_TTRF_BIT
+1:
+ msr mdcr_el3, x0
+
+ /* ---------------------------------------------------------------------
+ * Initialise PMCR_EL0 setting all fields rather than relying
+ * on hw. Some fields are architecturally UNKNOWN on reset.
+ *
+ * PMCR_EL0.LP: Set to one so that event counter overflow, that
+ * is recorded in PMOVSCLR_EL0[0-30], occurs on the increment
+ * that changes PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU
+ * is implemented. This bit is RES0 in versions of the architecture
+ * earlier than ARMv8.5, setting it to 1 doesn't have any effect
+ * on them.
+ *
+ * PMCR_EL0.LC: Set to one so that cycle counter overflow, that
+ * is recorded in PMOVSCLR_EL0[31], occurs on the increment
+ * that changes PMCCNTR_EL0[63] from 1 to 0.
+ *
+ * PMCR_EL0.DP: Set to one so that the cycle counter,
+ * PMCCNTR_EL0 does not count when event counting is prohibited.
+ *
+ * PMCR_EL0.X: Set to zero to disable export of events.
+ *
+ * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
+ * counts on every clock cycle.
+ * ---------------------------------------------------------------------
+ */
+ mov_imm x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_LP_BIT | \
+ PMCR_EL0_LC_BIT | PMCR_EL0_DP_BIT) & \
+ ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT))
+
+ msr pmcr_el0, x0
+
+ /* ---------------------------------------------------------------------
+ * Enable External Aborts and SError Interrupts now that the exception
+ * vectors have been setup.
+ * ---------------------------------------------------------------------
+ */
+ msr daifclr, #DAIF_ABT_BIT
+
+ /* ---------------------------------------------------------------------
+ * Initialise CPTR_EL3, setting all fields rather than relying on hw.
+ * All fields are architecturally UNKNOWN on reset.
+ *
+ * CPTR_EL3.TCPAC: Set to zero so that any accesses to CPACR_EL1,
+ * CPTR_EL2, CPACR, or HCPTR do not trap to EL3.
+ *
+ * CPTR_EL3.TTA: Set to one so that accesses to the trace system
+ * registers trap to EL3 from all exception levels and security
+ * states when system register trace is implemented.
+ * When system register trace is not implemented, this bit is RES0 and
+ * hence set to zero.
+ *
+ * CPTR_EL3.TTA: Set to zero so that System register accesses to the
+ * trace registers do not trap to EL3.
+ *
+ * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers
+ * by Advanced SIMD, floating-point or SVE instructions (if implemented)
+ * do not trap to EL3.
+ *
+ * CPTR_EL3.TAM: Set to one so that Activity Monitor access is
+ * trapped to EL3 by default.
+ *
+ * CPTR_EL3.EZ: Set to zero so that all SVE functionality is trapped
+ * to EL3 by default.
+ *
+ * CPTR_EL3.ESM: Set to zero so that all SME functionality is trapped
+ * to EL3 by default.
+ */
+
+ mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_TRACEVER_SHIFT, #ID_AA64DFR0_TRACEVER_LENGTH
+ cbz x1, 1f
+ orr x0, x0, #TTA_BIT
+1:
+ msr cptr_el3, x0
+
+ /*
+ * If Data Independent Timing (DIT) functionality is implemented,
+ * always enable DIT in EL3.
+ * First assert that the FEAT_DIT build flag matches the feature id
+ * register value for DIT.
+ */
+#if ENABLE_FEAT_DIT
+#if ENABLE_ASSERTIONS
+ mrs x0, id_aa64pfr0_el1
+ ubfx x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
+ cmp x0, #ID_AA64PFR0_DIT_SUPPORTED
+ ASM_ASSERT(eq)
+#endif /* ENABLE_ASSERTIONS */
+ mov x0, #DIT_BIT
+ msr DIT, x0
+#endif
+ .endm
+
+/* -----------------------------------------------------------------------------
+ * This is the super set of actions that need to be performed during a cold boot
+ * or a warm boot in EL3. This code is shared by BL1 and BL31.
+ *
+ * This macro will always perform reset handling, architectural initialisations
+ * and stack setup. The rest of the actions are optional because they might not
+ * be needed, depending on the context in which this macro is called. This is
+ * why this macro is parameterised ; each parameter allows to enable/disable
+ * some actions.
+ *
+ * _init_sctlr:
+ * Whether the macro needs to initialise SCTLR_EL3, including configuring
+ * the endianness of data accesses.
+ *
+ * _warm_boot_mailbox:
+ * Whether the macro needs to detect the type of boot (cold/warm). The
+ * detection is based on the platform entrypoint address : if it is zero
+ * then it is a cold boot, otherwise it is a warm boot. In the latter case,
+ * this macro jumps on the platform entrypoint address.
+ *
+ * _secondary_cold_boot:
+ * Whether the macro needs to identify the CPU that is calling it: primary
+ * CPU or secondary CPU. The primary CPU will be allowed to carry on with
+ * the platform initialisations, while the secondaries will be put in a
+ * platform-specific state in the meantime.
+ *
+ * If the caller knows this macro will only be called by the primary CPU
+ * then this parameter can be defined to 0 to skip this step.
+ *
+ * _init_memory:
+ * Whether the macro needs to initialise the memory.
+ *
+ * _init_c_runtime:
+ * Whether the macro needs to initialise the C runtime environment.
+ *
+ * _exception_vectors:
+ * Address of the exception vectors to program in the VBAR_EL3 register.
+ *
+ * _pie_fixup_size:
+ * Size of memory region to fixup Global Descriptor Table (GDT).
+ *
+ * A non-zero value is expected when firmware needs GDT to be fixed-up.
+ *
+ * -----------------------------------------------------------------------------
+ */
+ .macro el3_entrypoint_common \
+ _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot, \
+ _init_memory, _init_c_runtime, _exception_vectors, \
+ _pie_fixup_size
+
+ .if \_init_sctlr
+ /* -------------------------------------------------------------
+ * This is the initialisation of SCTLR_EL3 and so must ensure
+ * that all fields are explicitly set rather than relying on hw.
+ * Some fields reset to an IMPLEMENTATION DEFINED value and
+ * others are architecturally UNKNOWN on reset.
+ *
+ * SCTLR.EE: Set the CPU endianness before doing anything that
+ * might involve memory reads or writes. Set to zero to select
+ * Little Endian.
+ *
+ * SCTLR_EL3.WXN: For the EL3 translation regime, this field can
+ * force all memory regions that are writeable to be treated as
+ * XN (Execute-never). Set to zero so that this control has no
+ * effect on memory access permissions.
+ *
+ * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check.
+ *
+ * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
+ *
+ * SCTLR.DSSBS: Set to zero to disable speculation store bypass
+ * safe behaviour upon exception entry to EL3.
+ * -------------------------------------------------------------
+ */
+ mov_imm x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
+ | SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
+ msr sctlr_el3, x0
+ isb
+ .endif /* _init_sctlr */
+
+#if DISABLE_MTPMU
+ bl mtpmu_disable
+#endif
+
+ .if \_warm_boot_mailbox
+ /* -------------------------------------------------------------
+ * This code will be executed for both warm and cold resets.
+ * Now is the time to distinguish between the two.
+ * Query the platform entrypoint address and if it is not zero
+ * then it means it is a warm boot so jump to this address.
+ * -------------------------------------------------------------
+ */
+ bl plat_get_my_entrypoint
+ cbz x0, do_cold_boot
+ br x0
+
+ do_cold_boot:
+ .endif /* _warm_boot_mailbox */
+
+ .if \_pie_fixup_size
+#if ENABLE_PIE
+ /*
+ * ------------------------------------------------------------
+ * If PIE is enabled fixup the Global descriptor Table only
+ * once during primary core cold boot path.
+ *
+ * Compile time base address, required for fixup, is calculated
+ * using "pie_fixup" label present within first page.
+ * ------------------------------------------------------------
+ */
+ pie_fixup:
+ ldr x0, =pie_fixup
+ and x0, x0, #~(PAGE_SIZE_MASK)
+ mov_imm x1, \_pie_fixup_size
+ add x1, x1, x0
+ bl fixup_gdt_reloc
+#endif /* ENABLE_PIE */
+ .endif /* _pie_fixup_size */
+
+ /* ---------------------------------------------------------------------
+ * Set the exception vectors.
+ * ---------------------------------------------------------------------
+ */
+ adr x0, \_exception_vectors
+ msr vbar_el3, x0
+ isb
+
+#if !(defined(IMAGE_BL2) && ENABLE_RME)
+ /* ---------------------------------------------------------------------
+ * It is a cold boot.
+ * Perform any processor specific actions upon reset e.g. cache, TLB
+ * invalidations etc.
+ * ---------------------------------------------------------------------
+ */
+ bl reset_handler
+#endif
+
+ el3_arch_init_common
+
+ .if \_secondary_cold_boot
+ /* -------------------------------------------------------------
+ * Check if this is a primary or secondary CPU cold boot.
+ * The primary CPU will set up the platform while the
+ * secondaries are placed in a platform-specific state until the
+ * primary CPU performs the necessary actions to bring them out
+ * of that state and allows entry into the OS.
+ * -------------------------------------------------------------
+ */
+ bl plat_is_my_cpu_primary
+ cbnz w0, do_primary_cold_boot
+
+ /* This is a cold boot on a secondary CPU */
+ bl plat_secondary_cold_boot_setup
+ /* plat_secondary_cold_boot_setup() is not supposed to return */
+ bl el3_panic
+
+ do_primary_cold_boot:
+ .endif /* _secondary_cold_boot */
+
+ /* ---------------------------------------------------------------------
+ * Initialize memory now. Secondary CPU initialization won't get to this
+ * point.
+ * ---------------------------------------------------------------------
+ */
+
+ .if \_init_memory
+ bl platform_mem_init
+ .endif /* _init_memory */
+
+ /* ---------------------------------------------------------------------
+ * Init C runtime environment:
+ * - Zero-initialise the NOBITS sections. There are 2 of them:
+ * - the .bss section;
+ * - the coherent memory section (if any).
+ * - Relocate the data section from ROM to RAM, if required.
+ * ---------------------------------------------------------------------
+ */
+ .if \_init_c_runtime
+#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \
+ ((BL2_AT_EL3 && BL2_INV_DCACHE) || ENABLE_RME))
+ /* -------------------------------------------------------------
+ * Invalidate the RW memory used by the BL31 image. This
+ * includes the data and NOBITS sections. This is done to
+ * safeguard against possible corruption of this memory by
+ * dirty cache lines in a system cache as a result of use by
+ * an earlier boot loader stage. If PIE is enabled however,
+ * RO sections including the GOT may be modified during
+ * pie fixup. Therefore, to be on the safe side, invalidate
+ * the entire image region if PIE is enabled.
+ * -------------------------------------------------------------
+ */
+#if ENABLE_PIE
+#if SEPARATE_CODE_AND_RODATA
+ adrp x0, __TEXT_START__
+ add x0, x0, :lo12:__TEXT_START__
+#else
+ adrp x0, __RO_START__
+ add x0, x0, :lo12:__RO_START__
+#endif /* SEPARATE_CODE_AND_RODATA */
+#else
+ adrp x0, __RW_START__
+ add x0, x0, :lo12:__RW_START__
+#endif /* ENABLE_PIE */
+ adrp x1, __RW_END__
+ add x1, x1, :lo12:__RW_END__
+ sub x1, x1, x0
+ bl inv_dcache_range
+#if defined(IMAGE_BL31) && SEPARATE_NOBITS_REGION
+ adrp x0, __NOBITS_START__
+ add x0, x0, :lo12:__NOBITS_START__
+ adrp x1, __NOBITS_END__
+ add x1, x1, :lo12:__NOBITS_END__
+ sub x1, x1, x0
+ bl inv_dcache_range
+#endif
+#if defined(IMAGE_BL2) && SEPARATE_BL2_NOLOAD_REGION
+ adrp x0, __BL2_NOLOAD_START__
+ add x0, x0, :lo12:__BL2_NOLOAD_START__
+ adrp x1, __BL2_NOLOAD_END__
+ add x1, x1, :lo12:__BL2_NOLOAD_END__
+ sub x1, x1, x0
+ bl inv_dcache_range
+#endif
+#endif
+ adrp x0, __BSS_START__
+ add x0, x0, :lo12:__BSS_START__
+
+ adrp x1, __BSS_END__
+ add x1, x1, :lo12:__BSS_END__
+ sub x1, x1, x0
+ bl zeromem
+
+#if USE_COHERENT_MEM
+ adrp x0, __COHERENT_RAM_START__
+ add x0, x0, :lo12:__COHERENT_RAM_START__
+ adrp x1, __COHERENT_RAM_END_UNALIGNED__
+ add x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__
+ sub x1, x1, x0
+ bl zeromem
+#endif
+
+#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
+ adrp x0, __DATA_RAM_START__
+ add x0, x0, :lo12:__DATA_RAM_START__
+ adrp x1, __DATA_ROM_START__
+ add x1, x1, :lo12:__DATA_ROM_START__
+ adrp x2, __DATA_RAM_END__
+ add x2, x2, :lo12:__DATA_RAM_END__
+ sub x2, x2, x0
+ bl memcpy16
+#endif
+ .endif /* _init_c_runtime */
+
+ /* ---------------------------------------------------------------------
+ * Use SP_EL0 for the C runtime stack.
+ * ---------------------------------------------------------------------
+ */
+ msr spsel, #0
+
+ /* ---------------------------------------------------------------------
+ * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
+ * the MMU is enabled. There is no risk of reading stale stack memory
+ * after enabling the MMU as only the primary CPU is running at the
+ * moment.
+ * ---------------------------------------------------------------------
+ */
+ bl plat_set_my_stack
+
+#if STACK_PROTECTOR_ENABLED
+ .if \_init_c_runtime
+ bl update_stack_protector_canary
+ .endif /* _init_c_runtime */
+#endif
+ .endm
+
+ .macro apply_at_speculative_wa
+#if ERRATA_SPECULATIVE_AT
+ /*
+ * Explicitly save x30 so as to free up a register and to enable
+ * branching and also, save x29 which will be used in the called
+ * function
+ */
+ stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+ bl save_and_update_ptw_el1_sys_regs
+ ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+#endif
+ .endm
+
+ .macro restore_ptw_el1_sys_regs
+#if ERRATA_SPECULATIVE_AT
+ /* -----------------------------------------------------------
+ * In case of ERRATA_SPECULATIVE_AT, must follow below order
+ * to ensure that page table walk is not enabled until
+ * restoration of all EL1 system registers. TCR_EL1 register
+ * should be updated at the end which restores previous page
+ * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB
+ * ensures that CPU does below steps in order.
+ *
+ * 1. Ensure all other system registers are written before
+ * updating SCTLR_EL1 using ISB.
+ * 2. Restore SCTLR_EL1 register.
+ * 3. Ensure SCTLR_EL1 written successfully using ISB.
+ * 4. Restore TCR_EL1 register.
+ * -----------------------------------------------------------
+ */
+ isb
+ ldp x28, x29, [sp, #CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1]
+ msr sctlr_el1, x28
+ isb
+ msr tcr_el1, x29
+#endif
+ .endm
+
+#endif /* EL3_COMMON_MACROS_S */
diff --git a/include/arch/aarch64/smccc_helpers.h b/include/arch/aarch64/smccc_helpers.h
new file mode 100644
index 0000000..920f294
--- /dev/null
+++ b/include/arch/aarch64/smccc_helpers.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SMCCC_HELPERS_H
+#define SMCCC_HELPERS_H
+
+#include <lib/smccc.h>
+
+/* Definitions to help the assembler access the SMC/ERET args structure */
+#define SMC_ARGS_SIZE 0x40
+#define SMC_ARG0 0x0
+#define SMC_ARG1 0x8
+#define SMC_ARG2 0x10
+#define SMC_ARG3 0x18
+#define SMC_ARG4 0x20
+#define SMC_ARG5 0x28
+#define SMC_ARG6 0x30
+#define SMC_ARG7 0x38
+#define SMC_ARGS_END 0x40
+
+#ifndef __ASSEMBLER__
+
+#include <stdbool.h>
+
+#include <context.h>
+
+#include <platform_def.h> /* For CACHE_WRITEBACK_GRANULE */
+
+/* Convenience macros to return from SMC handler */
+#define SMC_RET0(_h) { \
+ return (uint64_t) (_h); \
+}
+#define SMC_RET1(_h, _x0) { \
+ write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X0), (_x0)); \
+ SMC_RET0(_h); \
+}
+#define SMC_RET2(_h, _x0, _x1) { \
+ write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X1), (_x1)); \
+ SMC_RET1(_h, (_x0)); \
+}
+#define SMC_RET3(_h, _x0, _x1, _x2) { \
+ write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X2), (_x2)); \
+ SMC_RET2(_h, (_x0), (_x1)); \
+}
+#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \
+ write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X3), (_x3)); \
+ SMC_RET3(_h, (_x0), (_x1), (_x2)); \
+}
+#define SMC_RET5(_h, _x0, _x1, _x2, _x3, _x4) { \
+ write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X4), (_x4)); \
+ SMC_RET4(_h, (_x0), (_x1), (_x2), (_x3)); \
+}
+#define SMC_RET6(_h, _x0, _x1, _x2, _x3, _x4, _x5) { \
+ write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X5), (_x5)); \
+ SMC_RET5(_h, (_x0), (_x1), (_x2), (_x3), (_x4)); \
+}
+#define SMC_RET7(_h, _x0, _x1, _x2, _x3, _x4, _x5, _x6) { \
+ write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X6), (_x6)); \
+ SMC_RET6(_h, (_x0), (_x1), (_x2), (_x3), (_x4), (_x5)); \
+}
+#define SMC_RET8(_h, _x0, _x1, _x2, _x3, _x4, _x5, _x6, _x7) { \
+ write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X7), (_x7)); \
+ SMC_RET7(_h, (_x0), (_x1), (_x2), (_x3), (_x4), (_x5), (_x6)); \
+}
+
+/*
+ * Convenience macros to access general purpose registers using handle provided
+ * to SMC handler. These take the offset values defined in context.h
+ */
+#define SMC_GET_GP(_h, _g) \
+ read_ctx_reg((get_gpregs_ctx(_h)), (_g))
+#define SMC_SET_GP(_h, _g, _v) \
+ write_ctx_reg((get_gpregs_ctx(_h)), (_g), (_v))
+
+/*
+ * Convenience macros to access EL3 context registers using handle provided to
+ * SMC handler. These take the offset values defined in context.h
+ */
+#define SMC_GET_EL3(_h, _e) \
+ read_ctx_reg((get_el3state_ctx(_h)), (_e))
+#define SMC_SET_EL3(_h, _e, _v) \
+ write_ctx_reg((get_el3state_ctx(_h)), (_e), (_v))
+
+/*
+ * Helper macro to retrieve the SMC parameters from cpu_context_t.
+ */
+#define get_smc_params_from_ctx(_hdl, _x1, _x2, _x3, _x4) \
+ do { \
+ const gp_regs_t *regs = get_gpregs_ctx(_hdl); \
+ _x1 = read_ctx_reg(regs, CTX_GPREG_X1); \
+ _x2 = read_ctx_reg(regs, CTX_GPREG_X2); \
+ _x3 = read_ctx_reg(regs, CTX_GPREG_X3); \
+ _x4 = read_ctx_reg(regs, CTX_GPREG_X4); \
+ } while (false)
+
+typedef struct {
+ uint64_t _regs[SMC_ARGS_END >> 3];
+} __aligned(CACHE_WRITEBACK_GRANULE) smc_args_t;
+
+/*
+ * Ensure that the assembler's view of the size of the tsp_args is the
+ * same as the compilers.
+ */
+CASSERT(sizeof(smc_args_t) == SMC_ARGS_SIZE, assert_sp_args_size_mismatch);
+
+static inline smc_args_t smc_helper(uint32_t func, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4,
+ uint64_t arg5, uint64_t arg6)
+{
+ smc_args_t ret_args = {0};
+
+ register uint64_t r0 __asm__("x0") = func;
+ register uint64_t r1 __asm__("x1") = arg0;
+ register uint64_t r2 __asm__("x2") = arg1;
+ register uint64_t r3 __asm__("x3") = arg2;
+ register uint64_t r4 __asm__("x4") = arg3;
+ register uint64_t r5 __asm__("x5") = arg4;
+ register uint64_t r6 __asm__("x6") = arg5;
+ register uint64_t r7 __asm__("x7") = arg6;
+
+ /* Output registers, also used as inputs ('+' constraint). */
+ __asm__ volatile("smc #0"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4),
+ "+r"(r5), "+r"(r6), "+r"(r7));
+
+ ret_args._regs[0] = r0;
+ ret_args._regs[1] = r1;
+ ret_args._regs[2] = r2;
+ ret_args._regs[3] = r3;
+ ret_args._regs[4] = r4;
+ ret_args._regs[5] = r5;
+ ret_args._regs[6] = r6;
+ ret_args._regs[7] = r7;
+
+ return ret_args;
+}
+
+#endif /*__ASSEMBLER__*/
+
+#endif /* SMCCC_HELPERS_H */