summaryrefslogtreecommitdiffstats
path: root/include/arch/aarch32
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
commit102b0d2daa97dae68d3eed54d8fe37a9cc38a892 (patch)
treebcf648efac40ca6139842707f0eba5a4496a6dd2 /include/arch/aarch32
parentInitial commit. (diff)
downloadarm-trusted-firmware-upstream/2.8.0+dfsg.tar.xz
arm-trusted-firmware-upstream/2.8.0+dfsg.zip
Adding upstream version 2.8.0+dfsg.upstream/2.8.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include/arch/aarch32')
-rw-r--r--include/arch/aarch32/arch.h784
-rw-r--r--include/arch/aarch32/arch_features.h26
-rw-r--r--include/arch/aarch32/arch_helpers.h474
-rw-r--r--include/arch/aarch32/asm_macros.S237
-rw-r--r--include/arch/aarch32/assert_macros.S26
-rw-r--r--include/arch/aarch32/console_macros.S51
-rw-r--r--include/arch/aarch32/el3_common_macros.S458
-rw-r--r--include/arch/aarch32/smccc_helpers.h177
-rw-r--r--include/arch/aarch32/smccc_macros.S241
9 files changed, 2474 insertions, 0 deletions
diff --git a/include/arch/aarch32/arch.h b/include/arch/aarch32/arch.h
new file mode 100644
index 0000000..8678bf3
--- /dev/null
+++ b/include/arch/aarch32/arch.h
@@ -0,0 +1,784 @@
+/*
+ * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_H
+#define ARCH_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * MIDR bit definitions
+ ******************************************************************************/
+#define MIDR_IMPL_MASK U(0xff)
+#define MIDR_IMPL_SHIFT U(24)
+#define MIDR_VAR_SHIFT U(20)
+#define MIDR_VAR_BITS U(4)
+#define MIDR_REV_SHIFT U(0)
+#define MIDR_REV_BITS U(4)
+#define MIDR_PN_MASK U(0xfff)
+#define MIDR_PN_SHIFT U(4)
+
+/*******************************************************************************
+ * MPIDR macros
+ ******************************************************************************/
+#define MPIDR_MT_MASK (U(1) << 24)
+#define MPIDR_CPU_MASK MPIDR_AFFLVL_MASK
+#define MPIDR_CLUSTER_MASK (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFFINITY_BITS U(8)
+#define MPIDR_AFFLVL_MASK U(0xff)
+#define MPIDR_AFFLVL_SHIFT U(3)
+#define MPIDR_AFF0_SHIFT U(0)
+#define MPIDR_AFF1_SHIFT U(8)
+#define MPIDR_AFF2_SHIFT U(16)
+#define MPIDR_AFF_SHIFT(_n) MPIDR_AFF##_n##_SHIFT
+#define MPIDR_AFFINITY_MASK U(0x00ffffff)
+#define MPIDR_AFFLVL0 U(0)
+#define MPIDR_AFFLVL1 U(1)
+#define MPIDR_AFFLVL2 U(2)
+#define MPIDR_AFFLVL(_n) MPIDR_AFFLVL##_n
+
+#define MPIDR_AFFLVL0_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL1_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL2_VAL(mpidr) \
+ (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL3_VAL(mpidr) U(0)
+
+#define MPIDR_AFF_ID(mpid, n) \
+ (((mpid) >> MPIDR_AFF_SHIFT(n)) & MPIDR_AFFLVL_MASK)
+
+#define MPID_MASK (MPIDR_MT_MASK |\
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT)|\
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT)|\
+ (MPIDR_AFFLVL_MASK << MPIDR_AFF0_SHIFT))
+
+/*
+ * An invalid MPID. This value can be used by functions that return an MPID to
+ * indicate an error.
+ */
+#define INVALID_MPID U(0xFFFFFFFF)
+
+/*
+ * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
+ * add one while using this macro to define array sizes.
+ */
+#define MPIDR_MAX_AFFLVL U(2)
+
+/* Data Cache set/way op type defines */
+#define DC_OP_ISW U(0x0)
+#define DC_OP_CISW U(0x1)
+#if ERRATA_A53_827319
+#define DC_OP_CSW DC_OP_CISW
+#else
+#define DC_OP_CSW U(0x2)
+#endif
+
+/*******************************************************************************
+ * Generic timer memory mapped registers & offsets
+ ******************************************************************************/
+#define CNTCR_OFF U(0x000)
+/* Counter Count Value Lower register */
+#define CNTCVL_OFF U(0x008)
+/* Counter Count Value Upper register */
+#define CNTCVU_OFF U(0x00C)
+#define CNTFID_OFF U(0x020)
+
+#define CNTCR_EN (U(1) << 0)
+#define CNTCR_HDBG (U(1) << 1)
+#define CNTCR_FCREQ(x) ((x) << 8)
+
+/*******************************************************************************
+ * System register bit definitions
+ ******************************************************************************/
+/* CLIDR definitions */
+#define LOUIS_SHIFT U(21)
+#define LOC_SHIFT U(24)
+#define CLIDR_FIELD_WIDTH U(3)
+
+/* CSSELR definitions */
+#define LEVEL_SHIFT U(1)
+
+/* ID_DFR0_EL1 definitions */
+#define ID_DFR0_COPTRC_SHIFT U(12)
+#define ID_DFR0_COPTRC_MASK U(0xf)
+#define ID_DFR0_COPTRC_SUPPORTED U(1)
+#define ID_DFR0_COPTRC_LENGTH U(4)
+#define ID_DFR0_TRACEFILT_SHIFT U(28)
+#define ID_DFR0_TRACEFILT_MASK U(0xf)
+#define ID_DFR0_TRACEFILT_SUPPORTED U(1)
+#define ID_DFR0_TRACEFILT_LENGTH U(4)
+
+/* ID_DFR1_EL1 definitions */
+#define ID_DFR1_MTPMU_SHIFT U(0)
+#define ID_DFR1_MTPMU_MASK U(0xf)
+#define ID_DFR1_MTPMU_SUPPORTED U(1)
+
+/* ID_MMFR4 definitions */
+#define ID_MMFR4_CNP_SHIFT U(12)
+#define ID_MMFR4_CNP_LENGTH U(4)
+#define ID_MMFR4_CNP_MASK U(0xf)
+
+#define ID_MMFR4_CCIDX_SHIFT U(24)
+#define ID_MMFR4_CCIDX_LENGTH U(4)
+#define ID_MMFR4_CCIDX_MASK U(0xf)
+
+/* ID_PFR0 definitions */
+#define ID_PFR0_AMU_SHIFT U(20)
+#define ID_PFR0_AMU_LENGTH U(4)
+#define ID_PFR0_AMU_MASK U(0xf)
+#define ID_PFR0_AMU_NOT_SUPPORTED U(0x0)
+#define ID_PFR0_AMU_V1 U(0x1)
+#define ID_PFR0_AMU_V1P1 U(0x2)
+
+#define ID_PFR0_DIT_SHIFT U(24)
+#define ID_PFR0_DIT_LENGTH U(4)
+#define ID_PFR0_DIT_MASK U(0xf)
+#define ID_PFR0_DIT_SUPPORTED (U(1) << ID_PFR0_DIT_SHIFT)
+
+/* ID_PFR1 definitions */
+#define ID_PFR1_VIRTEXT_SHIFT U(12)
+#define ID_PFR1_VIRTEXT_MASK U(0xf)
+#define GET_VIRT_EXT(id) (((id) >> ID_PFR1_VIRTEXT_SHIFT) \
+ & ID_PFR1_VIRTEXT_MASK)
+#define ID_PFR1_GENTIMER_SHIFT U(16)
+#define ID_PFR1_GENTIMER_MASK U(0xf)
+#define ID_PFR1_GIC_SHIFT U(28)
+#define ID_PFR1_GIC_MASK U(0xf)
+#define ID_PFR1_SEC_SHIFT U(4)
+#define ID_PFR1_SEC_MASK U(0xf)
+#define ID_PFR1_ELx_ENABLED U(1)
+
+/* SCTLR definitions */
+#define SCTLR_RES1_DEF ((U(1) << 23) | (U(1) << 22) | (U(1) << 4) | \
+ (U(1) << 3))
+#if ARM_ARCH_MAJOR == 7
+#define SCTLR_RES1 SCTLR_RES1_DEF
+#else
+#define SCTLR_RES1 (SCTLR_RES1_DEF | (U(1) << 11))
+#endif
+#define SCTLR_M_BIT (U(1) << 0)
+#define SCTLR_A_BIT (U(1) << 1)
+#define SCTLR_C_BIT (U(1) << 2)
+#define SCTLR_CP15BEN_BIT (U(1) << 5)
+#define SCTLR_ITD_BIT (U(1) << 7)
+#define SCTLR_Z_BIT (U(1) << 11)
+#define SCTLR_I_BIT (U(1) << 12)
+#define SCTLR_V_BIT (U(1) << 13)
+#define SCTLR_RR_BIT (U(1) << 14)
+#define SCTLR_NTWI_BIT (U(1) << 16)
+#define SCTLR_NTWE_BIT (U(1) << 18)
+#define SCTLR_WXN_BIT (U(1) << 19)
+#define SCTLR_UWXN_BIT (U(1) << 20)
+#define SCTLR_EE_BIT (U(1) << 25)
+#define SCTLR_TRE_BIT (U(1) << 28)
+#define SCTLR_AFE_BIT (U(1) << 29)
+#define SCTLR_TE_BIT (U(1) << 30)
+#define SCTLR_DSSBS_BIT (U(1) << 31)
+#define SCTLR_RESET_VAL (SCTLR_RES1 | SCTLR_NTWE_BIT | \
+ SCTLR_NTWI_BIT | SCTLR_CP15BEN_BIT)
+
+/* SDCR definitions */
+#define SDCR_SPD(x) ((x) << 14)
+#define SDCR_SPD_LEGACY U(0x0)
+#define SDCR_SPD_DISABLE U(0x2)
+#define SDCR_SPD_ENABLE U(0x3)
+#define SDCR_SCCD_BIT (U(1) << 23)
+#define SDCR_TTRF_BIT (U(1) << 19)
+#define SDCR_SPME_BIT (U(1) << 17)
+#define SDCR_RESET_VAL U(0x0)
+#define SDCR_MTPME_BIT (U(1) << 28)
+
+/* HSCTLR definitions */
+#define HSCTLR_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+ (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+ (U(1) << 11) | (U(1) << 4) | (U(1) << 3))
+
+#define HSCTLR_M_BIT (U(1) << 0)
+#define HSCTLR_A_BIT (U(1) << 1)
+#define HSCTLR_C_BIT (U(1) << 2)
+#define HSCTLR_CP15BEN_BIT (U(1) << 5)
+#define HSCTLR_ITD_BIT (U(1) << 7)
+#define HSCTLR_SED_BIT (U(1) << 8)
+#define HSCTLR_I_BIT (U(1) << 12)
+#define HSCTLR_WXN_BIT (U(1) << 19)
+#define HSCTLR_EE_BIT (U(1) << 25)
+#define HSCTLR_TE_BIT (U(1) << 30)
+
+/* CPACR definitions */
+#define CPACR_FPEN(x) ((x) << 20)
+#define CPACR_FP_TRAP_PL0 UL(0x1)
+#define CPACR_FP_TRAP_ALL UL(0x2)
+#define CPACR_FP_TRAP_NONE UL(0x3)
+
+/* SCR definitions */
+#define SCR_TWE_BIT (UL(1) << 13)
+#define SCR_TWI_BIT (UL(1) << 12)
+#define SCR_SIF_BIT (UL(1) << 9)
+#define SCR_HCE_BIT (UL(1) << 8)
+#define SCR_SCD_BIT (UL(1) << 7)
+#define SCR_NET_BIT (UL(1) << 6)
+#define SCR_AW_BIT (UL(1) << 5)
+#define SCR_FW_BIT (UL(1) << 4)
+#define SCR_EA_BIT (UL(1) << 3)
+#define SCR_FIQ_BIT (UL(1) << 2)
+#define SCR_IRQ_BIT (UL(1) << 1)
+#define SCR_NS_BIT (UL(1) << 0)
+#define SCR_VALID_BIT_MASK U(0x33ff)
+#define SCR_RESET_VAL U(0x0)
+
+#define GET_NS_BIT(scr) ((scr) & SCR_NS_BIT)
+
+/* HCR definitions */
+#define HCR_TGE_BIT (U(1) << 27)
+#define HCR_AMO_BIT (U(1) << 5)
+#define HCR_IMO_BIT (U(1) << 4)
+#define HCR_FMO_BIT (U(1) << 3)
+#define HCR_RESET_VAL U(0x0)
+
+/* CNTHCTL definitions */
+#define CNTHCTL_RESET_VAL U(0x0)
+#define PL1PCEN_BIT (U(1) << 1)
+#define PL1PCTEN_BIT (U(1) << 0)
+
+/* CNTKCTL definitions */
+#define PL0PTEN_BIT (U(1) << 9)
+#define PL0VTEN_BIT (U(1) << 8)
+#define PL0PCTEN_BIT (U(1) << 0)
+#define PL0VCTEN_BIT (U(1) << 1)
+#define EVNTEN_BIT (U(1) << 2)
+#define EVNTDIR_BIT (U(1) << 3)
+#define EVNTI_SHIFT U(4)
+#define EVNTI_MASK U(0xf)
+
+/* HCPTR definitions */
+#define HCPTR_RES1 ((U(1) << 13) | (U(1) << 12) | U(0x3ff))
+#define TCPAC_BIT (U(1) << 31)
+#define TAM_SHIFT U(30)
+#define TAM_BIT (U(1) << TAM_SHIFT)
+#define TTA_BIT (U(1) << 20)
+#define TCP11_BIT (U(1) << 11)
+#define TCP10_BIT (U(1) << 10)
+#define HCPTR_RESET_VAL HCPTR_RES1
+
+/* VTTBR defintions */
+#define VTTBR_RESET_VAL ULL(0x0)
+#define VTTBR_VMID_MASK ULL(0xff)
+#define VTTBR_VMID_SHIFT U(48)
+#define VTTBR_BADDR_MASK ULL(0xffffffffffff)
+#define VTTBR_BADDR_SHIFT U(0)
+
+/* HDCR definitions */
+#define HDCR_MTPME_BIT (U(1) << 28)
+#define HDCR_HLP_BIT (U(1) << 26)
+#define HDCR_HPME_BIT (U(1) << 7)
+#define HDCR_RESET_VAL U(0x0)
+
+/* HSTR definitions */
+#define HSTR_RESET_VAL U(0x0)
+
+/* CNTHP_CTL definitions */
+#define CNTHP_CTL_RESET_VAL U(0x0)
+
+/* NSACR definitions */
+#define NSASEDIS_BIT (U(1) << 15)
+#define NSTRCDIS_BIT (U(1) << 20)
+#define NSACR_CP11_BIT (U(1) << 11)
+#define NSACR_CP10_BIT (U(1) << 10)
+#define NSACR_IMP_DEF_MASK (U(0x7) << 16)
+#define NSACR_ENABLE_FP_ACCESS (NSACR_CP11_BIT | NSACR_CP10_BIT)
+#define NSACR_RESET_VAL U(0x0)
+
+/* CPACR definitions */
+#define ASEDIS_BIT (U(1) << 31)
+#define TRCDIS_BIT (U(1) << 28)
+#define CPACR_CP11_SHIFT U(22)
+#define CPACR_CP10_SHIFT U(20)
+#define CPACR_ENABLE_FP_ACCESS ((U(0x3) << CPACR_CP11_SHIFT) |\
+ (U(0x3) << CPACR_CP10_SHIFT))
+#define CPACR_RESET_VAL U(0x0)
+
+/* FPEXC definitions */
+#define FPEXC_RES1 ((U(1) << 10) | (U(1) << 9) | (U(1) << 8))
+#define FPEXC_EN_BIT (U(1) << 30)
+#define FPEXC_RESET_VAL FPEXC_RES1
+
+/* SPSR/CPSR definitions */
+#define SPSR_FIQ_BIT (U(1) << 0)
+#define SPSR_IRQ_BIT (U(1) << 1)
+#define SPSR_ABT_BIT (U(1) << 2)
+#define SPSR_AIF_SHIFT U(6)
+#define SPSR_AIF_MASK U(0x7)
+
+#define SPSR_E_SHIFT U(9)
+#define SPSR_E_MASK U(0x1)
+#define SPSR_E_LITTLE U(0)
+#define SPSR_E_BIG U(1)
+
+#define SPSR_T_SHIFT U(5)
+#define SPSR_T_MASK U(0x1)
+#define SPSR_T_ARM U(0)
+#define SPSR_T_THUMB U(1)
+
+#define SPSR_MODE_SHIFT U(0)
+#define SPSR_MODE_MASK U(0x7)
+
+#define SPSR_SSBS_BIT BIT_32(23)
+
+#define DISABLE_ALL_EXCEPTIONS \
+ (SPSR_FIQ_BIT | SPSR_IRQ_BIT | SPSR_ABT_BIT)
+
+#define CPSR_DIT_BIT (U(1) << 21)
+/*
+ * TTBCR definitions
+ */
+#define TTBCR_EAE_BIT (U(1) << 31)
+
+#define TTBCR_SH1_NON_SHAREABLE (U(0x0) << 28)
+#define TTBCR_SH1_OUTER_SHAREABLE (U(0x2) << 28)
+#define TTBCR_SH1_INNER_SHAREABLE (U(0x3) << 28)
+
+#define TTBCR_RGN1_OUTER_NC (U(0x0) << 26)
+#define TTBCR_RGN1_OUTER_WBA (U(0x1) << 26)
+#define TTBCR_RGN1_OUTER_WT (U(0x2) << 26)
+#define TTBCR_RGN1_OUTER_WBNA (U(0x3) << 26)
+
+#define TTBCR_RGN1_INNER_NC (U(0x0) << 24)
+#define TTBCR_RGN1_INNER_WBA (U(0x1) << 24)
+#define TTBCR_RGN1_INNER_WT (U(0x2) << 24)
+#define TTBCR_RGN1_INNER_WBNA (U(0x3) << 24)
+
+#define TTBCR_EPD1_BIT (U(1) << 23)
+#define TTBCR_A1_BIT (U(1) << 22)
+
+#define TTBCR_T1SZ_SHIFT U(16)
+#define TTBCR_T1SZ_MASK U(0x7)
+#define TTBCR_TxSZ_MIN U(0)
+#define TTBCR_TxSZ_MAX U(7)
+
+#define TTBCR_SH0_NON_SHAREABLE (U(0x0) << 12)
+#define TTBCR_SH0_OUTER_SHAREABLE (U(0x2) << 12)
+#define TTBCR_SH0_INNER_SHAREABLE (U(0x3) << 12)
+
+#define TTBCR_RGN0_OUTER_NC (U(0x0) << 10)
+#define TTBCR_RGN0_OUTER_WBA (U(0x1) << 10)
+#define TTBCR_RGN0_OUTER_WT (U(0x2) << 10)
+#define TTBCR_RGN0_OUTER_WBNA (U(0x3) << 10)
+
+#define TTBCR_RGN0_INNER_NC (U(0x0) << 8)
+#define TTBCR_RGN0_INNER_WBA (U(0x1) << 8)
+#define TTBCR_RGN0_INNER_WT (U(0x2) << 8)
+#define TTBCR_RGN0_INNER_WBNA (U(0x3) << 8)
+
+#define TTBCR_EPD0_BIT (U(1) << 7)
+#define TTBCR_T0SZ_SHIFT U(0)
+#define TTBCR_T0SZ_MASK U(0x7)
+
+/*
+ * HTCR definitions
+ */
+#define HTCR_RES1 ((U(1) << 31) | (U(1) << 23))
+
+#define HTCR_SH0_NON_SHAREABLE (U(0x0) << 12)
+#define HTCR_SH0_OUTER_SHAREABLE (U(0x2) << 12)
+#define HTCR_SH0_INNER_SHAREABLE (U(0x3) << 12)
+
+#define HTCR_RGN0_OUTER_NC (U(0x0) << 10)
+#define HTCR_RGN0_OUTER_WBA (U(0x1) << 10)
+#define HTCR_RGN0_OUTER_WT (U(0x2) << 10)
+#define HTCR_RGN0_OUTER_WBNA (U(0x3) << 10)
+
+#define HTCR_RGN0_INNER_NC (U(0x0) << 8)
+#define HTCR_RGN0_INNER_WBA (U(0x1) << 8)
+#define HTCR_RGN0_INNER_WT (U(0x2) << 8)
+#define HTCR_RGN0_INNER_WBNA (U(0x3) << 8)
+
+#define HTCR_T0SZ_SHIFT U(0)
+#define HTCR_T0SZ_MASK U(0x7)
+
+#define MODE_RW_SHIFT U(0x4)
+#define MODE_RW_MASK U(0x1)
+#define MODE_RW_32 U(0x1)
+
+#define MODE32_SHIFT U(0)
+#define MODE32_MASK U(0x1f)
+#define MODE32_usr U(0x10)
+#define MODE32_fiq U(0x11)
+#define MODE32_irq U(0x12)
+#define MODE32_svc U(0x13)
+#define MODE32_mon U(0x16)
+#define MODE32_abt U(0x17)
+#define MODE32_hyp U(0x1a)
+#define MODE32_und U(0x1b)
+#define MODE32_sys U(0x1f)
+
+#define GET_M32(mode) (((mode) >> MODE32_SHIFT) & MODE32_MASK)
+
+#define SPSR_MODE32(mode, isa, endian, aif) \
+( \
+ ( \
+ (MODE_RW_32 << MODE_RW_SHIFT) | \
+ (((mode) & MODE32_MASK) << MODE32_SHIFT) | \
+ (((isa) & SPSR_T_MASK) << SPSR_T_SHIFT) | \
+ (((endian) & SPSR_E_MASK) << SPSR_E_SHIFT) | \
+ (((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT) \
+ ) & \
+ (~(SPSR_SSBS_BIT)) \
+)
+
+/*
+ * TTBR definitions
+ */
+#define TTBR_CNP_BIT ULL(0x1)
+
+/*
+ * CTR definitions
+ */
+#define CTR_CWG_SHIFT U(24)
+#define CTR_CWG_MASK U(0xf)
+#define CTR_ERG_SHIFT U(20)
+#define CTR_ERG_MASK U(0xf)
+#define CTR_DMINLINE_SHIFT U(16)
+#define CTR_DMINLINE_WIDTH U(4)
+#define CTR_DMINLINE_MASK ((U(1) << 4) - U(1))
+#define CTR_L1IP_SHIFT U(14)
+#define CTR_L1IP_MASK U(0x3)
+#define CTR_IMINLINE_SHIFT U(0)
+#define CTR_IMINLINE_MASK U(0xf)
+
+#define MAX_CACHE_LINE_SIZE U(0x800) /* 2KB */
+
+/* PMCR definitions */
+#define PMCR_N_SHIFT U(11)
+#define PMCR_N_MASK U(0x1f)
+#define PMCR_N_BITS (PMCR_N_MASK << PMCR_N_SHIFT)
+#define PMCR_LP_BIT (U(1) << 7)
+#define PMCR_LC_BIT (U(1) << 6)
+#define PMCR_DP_BIT (U(1) << 5)
+#define PMCR_RESET_VAL U(0x0)
+
+/*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT U(0)
+#define TLBI_ADDR_MASK U(0xFFFFF000)
+#define TLBI_ADDR(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTCTLBase Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+#define CNTCTLBASE_CNTFRQ U(0x0)
+#define CNTNSAR U(0x4)
+#define CNTNSAR_NS_SHIFT(x) (x)
+
+#define CNTACR_BASE(x) (U(0x40) + ((x) << 2))
+#define CNTACR_RPCT_SHIFT U(0x0)
+#define CNTACR_RVCT_SHIFT U(0x1)
+#define CNTACR_RFRQ_SHIFT U(0x2)
+#define CNTACR_RVOFF_SHIFT U(0x3)
+#define CNTACR_RWVT_SHIFT U(0x4)
+#define CNTACR_RWPT_SHIFT U(0x5)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTBaseN Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+/* Physical Count register. */
+#define CNTPCT_LO U(0x0)
+/* Counter Frequency register. */
+#define CNTBASEN_CNTFRQ U(0x10)
+/* Physical Timer CompareValue register. */
+#define CNTP_CVAL_LO U(0x20)
+/* Physical Timer Control register. */
+#define CNTP_CTL U(0x2c)
+
+/* Physical timer control register bit fields shifts and masks */
+#define CNTP_CTL_ENABLE_SHIFT 0
+#define CNTP_CTL_IMASK_SHIFT 1
+#define CNTP_CTL_ISTATUS_SHIFT 2
+
+#define CNTP_CTL_ENABLE_MASK U(1)
+#define CNTP_CTL_IMASK_MASK U(1)
+#define CNTP_CTL_ISTATUS_MASK U(1)
+
+/* MAIR macros */
+#define MAIR0_ATTR_SET(attr, index) ((attr) << ((index) << U(3)))
+#define MAIR1_ATTR_SET(attr, index) ((attr) << (((index) - U(3)) << U(3)))
+
+/* System register defines The format is: coproc, opt1, CRn, CRm, opt2 */
+#define SCR p15, 0, c1, c1, 0
+#define SCTLR p15, 0, c1, c0, 0
+#define ACTLR p15, 0, c1, c0, 1
+#define SDCR p15, 0, c1, c3, 1
+#define MPIDR p15, 0, c0, c0, 5
+#define MIDR p15, 0, c0, c0, 0
+#define HVBAR p15, 4, c12, c0, 0
+#define VBAR p15, 0, c12, c0, 0
+#define MVBAR p15, 0, c12, c0, 1
+#define NSACR p15, 0, c1, c1, 2
+#define CPACR p15, 0, c1, c0, 2
+#define DCCIMVAC p15, 0, c7, c14, 1
+#define DCCMVAC p15, 0, c7, c10, 1
+#define DCIMVAC p15, 0, c7, c6, 1
+#define DCCISW p15, 0, c7, c14, 2
+#define DCCSW p15, 0, c7, c10, 2
+#define DCISW p15, 0, c7, c6, 2
+#define CTR p15, 0, c0, c0, 1
+#define CNTFRQ p15, 0, c14, c0, 0
+#define ID_MMFR4 p15, 0, c0, c2, 6
+#define ID_DFR0 p15, 0, c0, c1, 2
+#define ID_DFR1 p15, 0, c0, c3, 5
+#define ID_PFR0 p15, 0, c0, c1, 0
+#define ID_PFR1 p15, 0, c0, c1, 1
+#define MAIR0 p15, 0, c10, c2, 0
+#define MAIR1 p15, 0, c10, c2, 1
+#define TTBCR p15, 0, c2, c0, 2
+#define TTBR0 p15, 0, c2, c0, 0
+#define TTBR1 p15, 0, c2, c0, 1
+#define TLBIALL p15, 0, c8, c7, 0
+#define TLBIALLH p15, 4, c8, c7, 0
+#define TLBIALLIS p15, 0, c8, c3, 0
+#define TLBIMVA p15, 0, c8, c7, 1
+#define TLBIMVAA p15, 0, c8, c7, 3
+#define TLBIMVAAIS p15, 0, c8, c3, 3
+#define TLBIMVAHIS p15, 4, c8, c3, 1
+#define BPIALLIS p15, 0, c7, c1, 6
+#define BPIALL p15, 0, c7, c5, 6
+#define ICIALLU p15, 0, c7, c5, 0
+#define HSCTLR p15, 4, c1, c0, 0
+#define HCR p15, 4, c1, c1, 0
+#define HCPTR p15, 4, c1, c1, 2
+#define HSTR p15, 4, c1, c1, 3
+#define CNTHCTL p15, 4, c14, c1, 0
+#define CNTKCTL p15, 0, c14, c1, 0
+#define VPIDR p15, 4, c0, c0, 0
+#define VMPIDR p15, 4, c0, c0, 5
+#define ISR p15, 0, c12, c1, 0
+#define CLIDR p15, 1, c0, c0, 1
+#define CSSELR p15, 2, c0, c0, 0
+#define CCSIDR p15, 1, c0, c0, 0
+#define CCSIDR2 p15, 1, c0, c0, 2
+#define HTCR p15, 4, c2, c0, 2
+#define HMAIR0 p15, 4, c10, c2, 0
+#define ATS1CPR p15, 0, c7, c8, 0
+#define ATS1HR p15, 4, c7, c8, 0
+#define DBGOSDLR p14, 0, c1, c3, 4
+
+/* Debug register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
+#define HDCR p15, 4, c1, c1, 1
+#define PMCR p15, 0, c9, c12, 0
+#define CNTHP_TVAL p15, 4, c14, c2, 0
+#define CNTHP_CTL p15, 4, c14, c2, 1
+
+/* AArch32 coproc registers for 32bit MMU descriptor support */
+#define PRRR p15, 0, c10, c2, 0
+#define NMRR p15, 0, c10, c2, 1
+#define DACR p15, 0, c3, c0, 0
+
+/* GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
+#define ICC_IAR1 p15, 0, c12, c12, 0
+#define ICC_IAR0 p15, 0, c12, c8, 0
+#define ICC_EOIR1 p15, 0, c12, c12, 1
+#define ICC_EOIR0 p15, 0, c12, c8, 1
+#define ICC_HPPIR1 p15, 0, c12, c12, 2
+#define ICC_HPPIR0 p15, 0, c12, c8, 2
+#define ICC_BPR1 p15, 0, c12, c12, 3
+#define ICC_BPR0 p15, 0, c12, c8, 3
+#define ICC_DIR p15, 0, c12, c11, 1
+#define ICC_PMR p15, 0, c4, c6, 0
+#define ICC_RPR p15, 0, c12, c11, 3
+#define ICC_CTLR p15, 0, c12, c12, 4
+#define ICC_MCTLR p15, 6, c12, c12, 4
+#define ICC_SRE p15, 0, c12, c12, 5
+#define ICC_HSRE p15, 4, c12, c9, 5
+#define ICC_MSRE p15, 6, c12, c12, 5
+#define ICC_IGRPEN0 p15, 0, c12, c12, 6
+#define ICC_IGRPEN1 p15, 0, c12, c12, 7
+#define ICC_MGRPEN1 p15, 6, c12, c12, 7
+
+/* 64 bit system register defines The format is: coproc, opt1, CRm */
+#define TTBR0_64 p15, 0, c2
+#define TTBR1_64 p15, 1, c2
+#define CNTVOFF_64 p15, 4, c14
+#define VTTBR_64 p15, 6, c2
+#define CNTPCT_64 p15, 0, c14
+#define HTTBR_64 p15, 4, c2
+#define CNTHP_CVAL_64 p15, 6, c14
+#define PAR_64 p15, 0, c7
+
+/* 64 bit GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRm */
+#define ICC_SGI1R_EL1_64 p15, 0, c12
+#define ICC_ASGI1R_EL1_64 p15, 1, c12
+#define ICC_SGI0R_EL1_64 p15, 2, c12
+
+/* Fault registers. The format is: coproc, opt1, CRn, CRm, opt2 */
+#define DFSR p15, 0, c5, c0, 0
+#define IFSR p15, 0, c5, c0, 1
+#define DFAR p15, 0, c6, c0, 0
+#define IFAR p15, 0, c6, c0, 2
+
+/*******************************************************************************
+ * Definitions of MAIR encodings for device and normal memory
+ ******************************************************************************/
+/*
+ * MAIR encodings for device memory attributes.
+ */
+#define MAIR_DEV_nGnRnE U(0x0)
+#define MAIR_DEV_nGnRE U(0x4)
+#define MAIR_DEV_nGRE U(0x8)
+#define MAIR_DEV_GRE U(0xc)
+
+/*
+ * MAIR encodings for normal memory attributes.
+ *
+ * Cache Policy
+ * WT: Write Through
+ * WB: Write Back
+ * NC: Non-Cacheable
+ *
+ * Transient Hint
+ * NTR: Non-Transient
+ * TR: Transient
+ *
+ * Allocation Policy
+ * RA: Read Allocate
+ * WA: Write Allocate
+ * RWA: Read and Write Allocate
+ * NA: No Allocation
+ */
+#define MAIR_NORM_WT_TR_WA U(0x1)
+#define MAIR_NORM_WT_TR_RA U(0x2)
+#define MAIR_NORM_WT_TR_RWA U(0x3)
+#define MAIR_NORM_NC U(0x4)
+#define MAIR_NORM_WB_TR_WA U(0x5)
+#define MAIR_NORM_WB_TR_RA U(0x6)
+#define MAIR_NORM_WB_TR_RWA U(0x7)
+#define MAIR_NORM_WT_NTR_NA U(0x8)
+#define MAIR_NORM_WT_NTR_WA U(0x9)
+#define MAIR_NORM_WT_NTR_RA U(0xa)
+#define MAIR_NORM_WT_NTR_RWA U(0xb)
+#define MAIR_NORM_WB_NTR_NA U(0xc)
+#define MAIR_NORM_WB_NTR_WA U(0xd)
+#define MAIR_NORM_WB_NTR_RA U(0xe)
+#define MAIR_NORM_WB_NTR_RWA U(0xf)
+
+#define MAIR_NORM_OUTER_SHIFT U(4)
+
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer) \
+ ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
+
+/* PAR fields */
+#define PAR_F_SHIFT U(0)
+#define PAR_F_MASK ULL(0x1)
+#define PAR_ADDR_SHIFT U(12)
+#define PAR_ADDR_MASK (BIT_64(40) - ULL(1)) /* 40-bits-wide page address */
+
+/*******************************************************************************
+ * Definitions for system register interface to AMU for FEAT_AMUv1
+ ******************************************************************************/
+#define AMCR p15, 0, c13, c2, 0
+#define AMCFGR p15, 0, c13, c2, 1
+#define AMCGCR p15, 0, c13, c2, 2
+#define AMUSERENR p15, 0, c13, c2, 3
+#define AMCNTENCLR0 p15, 0, c13, c2, 4
+#define AMCNTENSET0 p15, 0, c13, c2, 5
+#define AMCNTENCLR1 p15, 0, c13, c3, 0
+#define AMCNTENSET1 p15, 0, c13, c3, 1
+
+/* Activity Monitor Group 0 Event Counter Registers */
+#define AMEVCNTR00 p15, 0, c0
+#define AMEVCNTR01 p15, 1, c0
+#define AMEVCNTR02 p15, 2, c0
+#define AMEVCNTR03 p15, 3, c0
+
+/* Activity Monitor Group 0 Event Type Registers */
+#define AMEVTYPER00 p15, 0, c13, c6, 0
+#define AMEVTYPER01 p15, 0, c13, c6, 1
+#define AMEVTYPER02 p15, 0, c13, c6, 2
+#define AMEVTYPER03 p15, 0, c13, c6, 3
+
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10 p15, 0, c4
+#define AMEVCNTR11 p15, 1, c4
+#define AMEVCNTR12 p15, 2, c4
+#define AMEVCNTR13 p15, 3, c4
+#define AMEVCNTR14 p15, 4, c4
+#define AMEVCNTR15 p15, 5, c4
+#define AMEVCNTR16 p15, 6, c4
+#define AMEVCNTR17 p15, 7, c4
+#define AMEVCNTR18 p15, 0, c5
+#define AMEVCNTR19 p15, 1, c5
+#define AMEVCNTR1A p15, 2, c5
+#define AMEVCNTR1B p15, 3, c5
+#define AMEVCNTR1C p15, 4, c5
+#define AMEVCNTR1D p15, 5, c5
+#define AMEVCNTR1E p15, 6, c5
+#define AMEVCNTR1F p15, 7, c5
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10 p15, 0, c13, c14, 0
+#define AMEVTYPER11 p15, 0, c13, c14, 1
+#define AMEVTYPER12 p15, 0, c13, c14, 2
+#define AMEVTYPER13 p15, 0, c13, c14, 3
+#define AMEVTYPER14 p15, 0, c13, c14, 4
+#define AMEVTYPER15 p15, 0, c13, c14, 5
+#define AMEVTYPER16 p15, 0, c13, c14, 6
+#define AMEVTYPER17 p15, 0, c13, c14, 7
+#define AMEVTYPER18 p15, 0, c13, c15, 0
+#define AMEVTYPER19 p15, 0, c13, c15, 1
+#define AMEVTYPER1A p15, 0, c13, c15, 2
+#define AMEVTYPER1B p15, 0, c13, c15, 3
+#define AMEVTYPER1C p15, 0, c13, c15, 4
+#define AMEVTYPER1D p15, 0, c13, c15, 5
+#define AMEVTYPER1E p15, 0, c13, c15, 6
+#define AMEVTYPER1F p15, 0, c13, c15, 7
+
+/* AMCNTENSET0 definitions */
+#define AMCNTENSET0_Pn_SHIFT U(0)
+#define AMCNTENSET0_Pn_MASK U(0xffff)
+
+/* AMCNTENSET1 definitions */
+#define AMCNTENSET1_Pn_SHIFT U(0)
+#define AMCNTENSET1_Pn_MASK U(0xffff)
+
+/* AMCNTENCLR0 definitions */
+#define AMCNTENCLR0_Pn_SHIFT U(0)
+#define AMCNTENCLR0_Pn_MASK U(0xffff)
+
+/* AMCNTENCLR1 definitions */
+#define AMCNTENCLR1_Pn_SHIFT U(0)
+#define AMCNTENCLR1_Pn_MASK U(0xffff)
+
+/* AMCR definitions */
+#define AMCR_CG1RZ_SHIFT U(17)
+#define AMCR_CG1RZ_BIT (ULL(1) << AMCR_CG1RZ_SHIFT)
+
+/* AMCFGR definitions */
+#define AMCFGR_NCG_SHIFT U(28)
+#define AMCFGR_NCG_MASK U(0xf)
+#define AMCFGR_N_SHIFT U(0)
+#define AMCFGR_N_MASK U(0xff)
+
+/* AMCGCR definitions */
+#define AMCGCR_CG0NC_SHIFT U(0)
+#define AMCGCR_CG0NC_MASK U(0xff)
+#define AMCGCR_CG1NC_SHIFT U(8)
+#define AMCGCR_CG1NC_MASK U(0xff)
+
+/*******************************************************************************
+ * Definitions for DynamicIQ Shared Unit registers
+ ******************************************************************************/
+#define CLUSTERPWRDN p15, 0, c15, c3, 6
+
+/* CLUSTERPWRDN register definitions */
+#define DSU_CLUSTER_PWR_OFF 0
+#define DSU_CLUSTER_PWR_ON 1
+#define DSU_CLUSTER_PWR_MASK U(1)
+
+#endif /* ARCH_H */
diff --git a/include/arch/aarch32/arch_features.h b/include/arch/aarch32/arch_features.h
new file mode 100644
index 0000000..ddf0968
--- /dev/null
+++ b/include/arch/aarch32/arch_features.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_FEATURES_H
+#define ARCH_FEATURES_H
+
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+
+static inline bool is_armv7_gentimer_present(void)
+{
+ return ((read_id_pfr1() >> ID_PFR1_GENTIMER_SHIFT) &
+ ID_PFR1_GENTIMER_MASK) != 0U;
+}
+
+static inline bool is_armv8_2_ttcnp_present(void)
+{
+ return ((read_id_mmfr4() >> ID_MMFR4_CNP_SHIFT) &
+ ID_MMFR4_CNP_MASK) != 0U;
+}
+
+#endif /* ARCH_FEATURES_H */
diff --git a/include/arch/aarch32/arch_helpers.h b/include/arch/aarch32/arch_helpers.h
new file mode 100644
index 0000000..95d056f
--- /dev/null
+++ b/include/arch/aarch32/arch_helpers.h
@@ -0,0 +1,474 @@
+/*
+ * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
+ * Portions copyright (c) 2021-2022, ProvenRun S.A.S. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_HELPERS_H
+#define ARCH_HELPERS_H
+
+#include <cdefs.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch.h>
+
+/**********************************************************************
+ * Macros which create inline functions to read or write CPU system
+ * registers
+ *********************************************************************/
+
+#define _DEFINE_COPROCR_WRITE_FUNC(_name, coproc, opc1, CRn, CRm, opc2) \
+static inline void write_## _name(u_register_t v) \
+{ \
+ __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_COPROCR_READ_FUNC(_name, coproc, opc1, CRn, CRm, opc2) \
+static inline u_register_t read_ ## _name(void) \
+{ \
+ u_register_t v; \
+ __asm__ volatile ("mrc "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : "=r" (v));\
+ return v; \
+}
+
+/*
+ * The undocumented %Q and %R extended asm are used to implemented the below
+ * 64 bit `mrrc` and `mcrr` instructions.
+ */
+
+#define _DEFINE_COPROCR_WRITE_FUNC_64(_name, coproc, opc1, CRm) \
+static inline void write64_## _name(uint64_t v) \
+{ \
+ __asm__ volatile ("mcrr "#coproc","#opc1", %Q0, %R0,"#CRm : : "r" (v));\
+}
+
+#define _DEFINE_COPROCR_READ_FUNC_64(_name, coproc, opc1, CRm) \
+static inline uint64_t read64_## _name(void) \
+{ uint64_t v; \
+ __asm__ volatile ("mrrc "#coproc","#opc1", %Q0, %R0,"#CRm : "=r" (v));\
+ return v; \
+}
+
+#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name) \
+static inline u_register_t read_ ## _name(void) \
+{ \
+ u_register_t v; \
+ __asm__ volatile ("mrs %0, " #_reg_name : "=r" (v)); \
+ return v; \
+}
+
+#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name) \
+static inline void write_ ## _name(u_register_t v) \
+{ \
+ __asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v)); \
+}
+
+#define _DEFINE_SYSREG_WRITE_CONST_FUNC(_name, _reg_name) \
+static inline void write_ ## _name(const u_register_t v) \
+{ \
+ __asm__ volatile ("msr " #_reg_name ", %0" : : "i" (v)); \
+}
+
+/* Define read function for coproc register */
+#define DEFINE_COPROCR_READ_FUNC(_name, ...) \
+ _DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__)
+
+/* Define write function for coproc register */
+#define DEFINE_COPROCR_WRITE_FUNC(_name, ...) \
+ _DEFINE_COPROCR_WRITE_FUNC(_name, __VA_ARGS__)
+
+/* Define read & write function for coproc register */
+#define DEFINE_COPROCR_RW_FUNCS(_name, ...) \
+ _DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__) \
+ _DEFINE_COPROCR_WRITE_FUNC(_name, __VA_ARGS__)
+
+/* Define 64 bit read function for coproc register */
+#define DEFINE_COPROCR_READ_FUNC_64(_name, ...) \
+ _DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__)
+
+/* Define 64 bit write function for coproc register */
+#define DEFINE_COPROCR_WRITE_FUNC_64(_name, ...) \
+ _DEFINE_COPROCR_WRITE_FUNC_64(_name, __VA_ARGS__)
+
+/* Define 64 bit read & write function for coproc register */
+#define DEFINE_COPROCR_RW_FUNCS_64(_name, ...) \
+ _DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__) \
+ _DEFINE_COPROCR_WRITE_FUNC_64(_name, __VA_ARGS__)
+
+/* Define read & write function for system register */
+#define DEFINE_SYSREG_RW_FUNCS(_name) \
+ _DEFINE_SYSREG_READ_FUNC(_name, _name) \
+ _DEFINE_SYSREG_WRITE_FUNC(_name, _name)
+
+/**********************************************************************
+ * Macros to create inline functions for tlbi operations
+ *********************************************************************/
+
+#define _DEFINE_TLBIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \
+static inline void tlbi##_op(void) \
+{ \
+ u_register_t v = 0; \
+ __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_BPIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \
+static inline void bpi##_op(void) \
+{ \
+ u_register_t v = 0; \
+ __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_TLBIOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \
+static inline void tlbi##_op(u_register_t v) \
+{ \
+ __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+/* Define function for simple TLBI operation */
+#define DEFINE_TLBIOP_FUNC(_op, ...) \
+ _DEFINE_TLBIOP_FUNC(_op, __VA_ARGS__)
+
+/* Define function for TLBI operation with register parameter */
+#define DEFINE_TLBIOP_PARAM_FUNC(_op, ...) \
+ _DEFINE_TLBIOP_PARAM_FUNC(_op, __VA_ARGS__)
+
+/* Define function for simple BPI operation */
+#define DEFINE_BPIOP_FUNC(_op, ...) \
+ _DEFINE_BPIOP_FUNC(_op, __VA_ARGS__)
+
+/**********************************************************************
+ * Macros to create inline functions for DC operations
+ *********************************************************************/
+#define _DEFINE_DCOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \
+static inline void dc##_op(u_register_t v) \
+{ \
+ __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+/* Define function for DC operation with register parameter */
+#define DEFINE_DCOP_PARAM_FUNC(_op, ...) \
+ _DEFINE_DCOP_PARAM_FUNC(_op, __VA_ARGS__)
+
+/**********************************************************************
+ * Macros to create inline functions for system instructions
+ *********************************************************************/
+ /* Define function for simple system instruction */
+#define DEFINE_SYSOP_FUNC(_op) \
+static inline void _op(void) \
+{ \
+ __asm__ (#_op); \
+}
+
+
+/* Define function for system instruction with type specifier */
+#define DEFINE_SYSOP_TYPE_FUNC(_op, _type) \
+static inline void _op ## _type(void) \
+{ \
+ __asm__ (#_op " " #_type : : : "memory"); \
+}
+
+/* Define function for system instruction with register parameter */
+#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type) \
+static inline void _op ## _type(u_register_t v) \
+{ \
+ __asm__ (#_op " " #_type ", %0" : : "r" (v)); \
+}
+
+void flush_dcache_range(uintptr_t addr, size_t size);
+void clean_dcache_range(uintptr_t addr, size_t size);
+void inv_dcache_range(uintptr_t addr, size_t size);
+bool is_dcache_enabled(void);
+
+void dcsw_op_louis(u_register_t op_type);
+void dcsw_op_all(u_register_t op_type);
+
+void disable_mmu_secure(void);
+void disable_mmu_icache_secure(void);
+
+DEFINE_SYSOP_FUNC(wfi)
+DEFINE_SYSOP_FUNC(wfe)
+DEFINE_SYSOP_FUNC(sev)
+DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, st)
+
+/* dmb ld is not valid for armv7/thumb machines */
+#if ARM_ARCH_MAJOR != 7
+DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
+#endif
+
+DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
+DEFINE_SYSOP_FUNC(isb)
+
+void __dead2 smc(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3,
+ uint32_t r4, uint32_t r5, uint32_t r6, uint32_t r7);
+
+DEFINE_SYSREG_RW_FUNCS(spsr)
+DEFINE_SYSREG_RW_FUNCS(cpsr)
+
+/*******************************************************************************
+ * System register accessor prototypes
+ ******************************************************************************/
+DEFINE_COPROCR_READ_FUNC(mpidr, MPIDR)
+DEFINE_COPROCR_READ_FUNC(midr, MIDR)
+DEFINE_COPROCR_READ_FUNC(id_mmfr4, ID_MMFR4)
+DEFINE_COPROCR_READ_FUNC(id_dfr0, ID_DFR0)
+DEFINE_COPROCR_READ_FUNC(id_pfr0, ID_PFR0)
+DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1)
+DEFINE_COPROCR_READ_FUNC(isr, ISR)
+DEFINE_COPROCR_READ_FUNC(clidr, CLIDR)
+DEFINE_COPROCR_READ_FUNC_64(cntpct, CNTPCT_64)
+
+DEFINE_COPROCR_RW_FUNCS(scr, SCR)
+DEFINE_COPROCR_RW_FUNCS(ctr, CTR)
+DEFINE_COPROCR_RW_FUNCS(sctlr, SCTLR)
+DEFINE_COPROCR_RW_FUNCS(actlr, ACTLR)
+DEFINE_COPROCR_RW_FUNCS(hsctlr, HSCTLR)
+DEFINE_COPROCR_RW_FUNCS(hcr, HCR)
+DEFINE_COPROCR_RW_FUNCS(hcptr, HCPTR)
+DEFINE_COPROCR_RW_FUNCS(cntfrq, CNTFRQ)
+DEFINE_COPROCR_RW_FUNCS(cnthctl, CNTHCTL)
+DEFINE_COPROCR_RW_FUNCS(mair0, MAIR0)
+DEFINE_COPROCR_RW_FUNCS(mair1, MAIR1)
+DEFINE_COPROCR_RW_FUNCS(hmair0, HMAIR0)
+DEFINE_COPROCR_RW_FUNCS(ttbcr, TTBCR)
+DEFINE_COPROCR_RW_FUNCS(htcr, HTCR)
+DEFINE_COPROCR_RW_FUNCS(ttbr0, TTBR0)
+DEFINE_COPROCR_RW_FUNCS_64(ttbr0, TTBR0_64)
+DEFINE_COPROCR_RW_FUNCS(ttbr1, TTBR1)
+DEFINE_COPROCR_RW_FUNCS_64(httbr, HTTBR_64)
+DEFINE_COPROCR_RW_FUNCS(vpidr, VPIDR)
+DEFINE_COPROCR_RW_FUNCS(vmpidr, VMPIDR)
+DEFINE_COPROCR_RW_FUNCS_64(vttbr, VTTBR_64)
+DEFINE_COPROCR_RW_FUNCS_64(ttbr1, TTBR1_64)
+DEFINE_COPROCR_RW_FUNCS_64(cntvoff, CNTVOFF_64)
+DEFINE_COPROCR_RW_FUNCS(csselr, CSSELR)
+DEFINE_COPROCR_RW_FUNCS(hstr, HSTR)
+DEFINE_COPROCR_RW_FUNCS(cnthp_ctl_el2, CNTHP_CTL)
+DEFINE_COPROCR_RW_FUNCS(cnthp_tval_el2, CNTHP_TVAL)
+DEFINE_COPROCR_RW_FUNCS_64(cnthp_cval_el2, CNTHP_CVAL_64)
+
+#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
+ CNTP_CTL_ENABLE_MASK)
+#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
+ CNTP_CTL_IMASK_MASK)
+#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
+ CNTP_CTL_ISTATUS_MASK)
+
+#define set_cntp_ctl_enable(x) ((x) |= U(1) << CNTP_CTL_ENABLE_SHIFT)
+#define set_cntp_ctl_imask(x) ((x) |= U(1) << CNTP_CTL_IMASK_SHIFT)
+
+#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
+
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el1, ICC_SRE)
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el2, ICC_HSRE)
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el3, ICC_MSRE)
+DEFINE_COPROCR_RW_FUNCS(icc_pmr_el1, ICC_PMR)
+DEFINE_COPROCR_RW_FUNCS(icc_rpr_el1, ICC_RPR)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen1_el3, ICC_MGRPEN1)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen1_el1, ICC_IGRPEN1)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0)
+DEFINE_COPROCR_RW_FUNCS(icc_hppir0_el1, ICC_HPPIR0)
+DEFINE_COPROCR_RW_FUNCS(icc_hppir1_el1, ICC_HPPIR1)
+DEFINE_COPROCR_RW_FUNCS(icc_iar0_el1, ICC_IAR0)
+DEFINE_COPROCR_RW_FUNCS(icc_iar1_el1, ICC_IAR1)
+DEFINE_COPROCR_RW_FUNCS(icc_eoir0_el1, ICC_EOIR0)
+DEFINE_COPROCR_RW_FUNCS(icc_eoir1_el1, ICC_EOIR1)
+DEFINE_COPROCR_RW_FUNCS_64(icc_sgi0r_el1, ICC_SGI0R_EL1_64)
+DEFINE_COPROCR_WRITE_FUNC_64(icc_sgi1r, ICC_SGI1R_EL1_64)
+DEFINE_COPROCR_WRITE_FUNC_64(icc_asgi1r, ICC_ASGI1R_EL1_64)
+
+DEFINE_COPROCR_RW_FUNCS(sdcr, SDCR)
+DEFINE_COPROCR_RW_FUNCS(hdcr, HDCR)
+DEFINE_COPROCR_RW_FUNCS(cnthp_ctl, CNTHP_CTL)
+DEFINE_COPROCR_READ_FUNC(pmcr, PMCR)
+
+/*
+ * Address translation
+ */
+DEFINE_COPROCR_WRITE_FUNC(ats1cpr, ATS1CPR)
+DEFINE_COPROCR_WRITE_FUNC(ats1hr, ATS1HR)
+DEFINE_COPROCR_RW_FUNCS_64(par, PAR_64)
+
+DEFINE_COPROCR_RW_FUNCS(nsacr, NSACR)
+
+/* AArch32 coproc registers for 32bit MMU descriptor support */
+DEFINE_COPROCR_RW_FUNCS(prrr, PRRR)
+DEFINE_COPROCR_RW_FUNCS(nmrr, NMRR)
+DEFINE_COPROCR_RW_FUNCS(dacr, DACR)
+
+/* Coproc registers for 32bit AMU support */
+DEFINE_COPROCR_READ_FUNC(amcfgr, AMCFGR)
+DEFINE_COPROCR_READ_FUNC(amcgcr, AMCGCR)
+DEFINE_COPROCR_RW_FUNCS(amcr, AMCR)
+
+DEFINE_COPROCR_RW_FUNCS(amcntenset0, AMCNTENSET0)
+DEFINE_COPROCR_RW_FUNCS(amcntenset1, AMCNTENSET1)
+DEFINE_COPROCR_RW_FUNCS(amcntenclr0, AMCNTENCLR0)
+DEFINE_COPROCR_RW_FUNCS(amcntenclr1, AMCNTENCLR1)
+
+/* Coproc registers for 64bit AMU support */
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr00, AMEVCNTR00)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr01, AMEVCNTR01)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr02, AMEVCNTR02)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr03, AMEVCNTR03)
+
+/*
+ * TLBI operation prototypes
+ */
+DEFINE_TLBIOP_FUNC(all, TLBIALL)
+DEFINE_TLBIOP_FUNC(allis, TLBIALLIS)
+DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA)
+DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA)
+DEFINE_TLBIOP_PARAM_FUNC(mvaais, TLBIMVAAIS)
+DEFINE_TLBIOP_PARAM_FUNC(mvahis, TLBIMVAHIS)
+
+/*
+ * BPI operation prototypes.
+ */
+DEFINE_BPIOP_FUNC(allis, BPIALLIS)
+
+/*
+ * DC operation prototypes
+ */
+DEFINE_DCOP_PARAM_FUNC(civac, DCCIMVAC)
+DEFINE_DCOP_PARAM_FUNC(ivac, DCIMVAC)
+#if ERRATA_A53_819472 || ERRATA_A53_824069 || ERRATA_A53_827319
+DEFINE_DCOP_PARAM_FUNC(cvac, DCCIMVAC)
+#else
+DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC)
+#endif
+
+/*
+ * DynamIQ Shared Unit power management
+ */
+DEFINE_COPROCR_RW_FUNCS(clusterpwrdn, CLUSTERPWRDN)
+
+/* Previously defined accessor functions with incomplete register names */
+#define dsb() dsbsy()
+#define dmb() dmbsy()
+
+/* dmb ld is not valid for armv7/thumb machines, so alias it to dmb */
+#if ARM_ARCH_MAJOR == 7
+#define dmbld() dmb()
+#endif
+
+#define IS_IN_SECURE() \
+ (GET_NS_BIT(read_scr()) == 0)
+
+#define IS_IN_HYP() (GET_M32(read_cpsr()) == MODE32_hyp)
+#define IS_IN_SVC() (GET_M32(read_cpsr()) == MODE32_svc)
+#define IS_IN_MON() (GET_M32(read_cpsr()) == MODE32_mon)
+#define IS_IN_EL2() IS_IN_HYP()
+/* If EL3 is AArch32, then secure PL1 and monitor mode correspond to EL3 */
+#define IS_IN_EL3() \
+ ((GET_M32(read_cpsr()) == MODE32_mon) || \
+ (IS_IN_SECURE() && (GET_M32(read_cpsr()) != MODE32_usr)))
+
+static inline unsigned int get_current_el(void)
+{
+ if (IS_IN_EL3()) {
+ return 3U;
+ } else if (IS_IN_EL2()) {
+ return 2U;
+ } else {
+ return 1U;
+ }
+}
+
+/* Macros for compatibility with AArch64 system registers */
+#define read_mpidr_el1() read_mpidr()
+
+#define read_scr_el3() read_scr()
+#define write_scr_el3(_v) write_scr(_v)
+
+#define read_hcr_el2() read_hcr()
+#define write_hcr_el2(_v) write_hcr(_v)
+
+#define read_cpacr_el1() read_cpacr()
+#define write_cpacr_el1(_v) write_cpacr(_v)
+
+#define read_cntfrq_el0() read_cntfrq()
+#define write_cntfrq_el0(_v) write_cntfrq(_v)
+#define read_isr_el1() read_isr()
+
+#define read_cntpct_el0() read64_cntpct()
+
+#define read_ctr_el0() read_ctr()
+
+#define write_icc_sgi0r_el1(_v) write64_icc_sgi0r_el1(_v)
+#define write_icc_sgi1r(_v) write64_icc_sgi1r(_v)
+#define write_icc_asgi1r(_v) write64_icc_asgi1r(_v)
+
+#define read_daif() read_cpsr()
+#define write_daif(flags) write_cpsr(flags)
+
+#define read_cnthp_cval_el2() read64_cnthp_cval_el2()
+#define write_cnthp_cval_el2(v) write64_cnthp_cval_el2(v)
+
+#define read_amcntenset0_el0() read_amcntenset0()
+#define read_amcntenset1_el0() read_amcntenset1()
+
+/* Helper functions to manipulate CPSR */
+static inline void enable_irq(void)
+{
+ /*
+ * The compiler memory barrier will prevent the compiler from
+ * scheduling non-volatile memory access after the write to the
+ * register.
+ *
+ * This could happen if some initialization code issues non-volatile
+ * accesses to an area used by an interrupt handler, in the assumption
+ * that it is safe as the interrupts are disabled at the time it does
+ * that (according to program order). However, non-volatile accesses
+ * are not necessarily in program order relatively with volatile inline
+ * assembly statements (and volatile accesses).
+ */
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsie i");
+ isb();
+}
+
+static inline void enable_serror(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsie a");
+ isb();
+}
+
+static inline void enable_fiq(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsie f");
+ isb();
+}
+
+static inline void disable_irq(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsid i");
+ isb();
+}
+
+static inline void disable_serror(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsid a");
+ isb();
+}
+
+static inline void disable_fiq(void)
+{
+ COMPILER_BARRIER();
+ __asm__ volatile ("cpsid f");
+ isb();
+}
+
+#endif /* ARCH_HELPERS_H */
diff --git a/include/arch/aarch32/asm_macros.S b/include/arch/aarch32/asm_macros.S
new file mode 100644
index 0000000..483f9fe
--- /dev/null
+++ b/include/arch/aarch32/asm_macros.S
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ASM_MACROS_S
+#define ASM_MACROS_S
+
+#include <arch.h>
+#include <common/asm_macros_common.S>
+#include <lib/spinlock.h>
+
+/*
+ * TLBI instruction with type specifier that implements the workaround for
+ * errata 813419 of Cortex-A57.
+ */
+#if ERRATA_A57_813419
+#define TLB_INVALIDATE(_reg, _coproc) \
+ stcopr _reg, _coproc; \
+ dsb ish; \
+ stcopr _reg, _coproc
+#else
+#define TLB_INVALIDATE(_reg, _coproc) \
+ stcopr _reg, _coproc
+#endif
+
+#define WORD_SIZE 4
+
+ /*
+ * Co processor register accessors
+ */
+ .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
+ mrc \coproc, \opc1, \reg, \CRn, \CRm, \opc2
+ .endm
+
+ .macro ldcopr16 reg1, reg2, coproc, opc1, CRm
+ mrrc \coproc, \opc1, \reg1, \reg2, \CRm
+ .endm
+
+ .macro stcopr reg, coproc, opc1, CRn, CRm, opc2
+ mcr \coproc, \opc1, \reg, \CRn, \CRm, \opc2
+ .endm
+
+ .macro stcopr16 reg1, reg2, coproc, opc1, CRm
+ mcrr \coproc, \opc1, \reg1, \reg2, \CRm
+ .endm
+
+ /* Cache line size helpers */
+ .macro dcache_line_size reg, tmp
+ ldcopr \tmp, CTR
+ ubfx \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
+ mov \reg, #WORD_SIZE
+ lsl \reg, \reg, \tmp
+ .endm
+
+ .macro icache_line_size reg, tmp
+ ldcopr \tmp, CTR
+ and \tmp, \tmp, #CTR_IMINLINE_MASK
+ mov \reg, #WORD_SIZE
+ lsl \reg, \reg, \tmp
+ .endm
+
+ /*
+ * Declare the exception vector table, enforcing it is aligned on a
+ * 32 byte boundary.
+ */
+ .macro vector_base label
+ .section .vectors, "ax"
+ .align 5
+ \label:
+ .endm
+
+ /*
+ * This macro calculates the base address of the current CPU's multi
+ * processor(MP) stack using the plat_my_core_pos() index, the name of
+ * the stack storage and the size of each stack.
+ * Out: r0 = physical address of stack base
+ * Clobber: r14, r1, r2
+ */
+ .macro get_my_mp_stack _name, _size
+ bl plat_my_core_pos
+ ldr r2, =(\_name + \_size)
+ mov r1, #\_size
+ mla r0, r0, r1, r2
+ .endm
+
+ /*
+ * This macro calculates the base address of a uniprocessor(UP) stack
+ * using the name of the stack storage and the size of the stack
+ * Out: r0 = physical address of stack base
+ */
+ .macro get_up_stack _name, _size
+ ldr r0, =(\_name + \_size)
+ .endm
+
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
+ /*
+ * Macro for mitigating against speculative execution.
+ * ARMv7 cores without Virtualization extension do not support the
+ * eret instruction.
+ */
+ .macro exception_return
+ movs pc, lr
+ dsb nsh
+ isb
+ .endm
+
+#else
+ /*
+ * Macro for mitigating against speculative execution beyond ERET. Uses the
+ * speculation barrier instruction introduced by FEAT_SB, if it's enabled.
+ */
+ .macro exception_return
+ eret
+#if ENABLE_FEAT_SB
+ sb
+#else
+ dsb nsh
+ isb
+#endif
+ .endm
+#endif
+
+#if (ARM_ARCH_MAJOR == 7)
+ /* ARMv7 does not support stl instruction */
+ .macro stl _reg, _write_lock
+ dmb
+ str \_reg, \_write_lock
+ dsb
+ .endm
+#endif
+
+ /*
+ * Helper macro to generate the best mov/movw/movt combinations
+ * according to the value to be moved.
+ */
+ .macro mov_imm _reg, _val
+ .if ((\_val) & 0xffff0000) == 0
+ mov \_reg, #(\_val)
+ .else
+ movw \_reg, #((\_val) & 0xffff)
+ movt \_reg, #((\_val) >> 16)
+ .endif
+ .endm
+
+ /*
+ * Macro to mark instances where we're jumping to a function and don't
+ * expect a return. To provide the function being jumped to with
+ * additional information, we use 'bl' instruction to jump rather than
+ * 'b'.
+ *
+ * Debuggers infer the location of a call from where LR points to, which
+ * is usually the instruction after 'bl'. If this macro expansion
+ * happens to be the last location in a function, that'll cause the LR
+ * to point a location beyond the function, thereby misleading debugger
+ * back trace. We therefore insert a 'nop' after the function call for
+ * debug builds, unless 'skip_nop' parameter is non-zero.
+ */
+ .macro no_ret _func:req, skip_nop=0
+ bl \_func
+#if DEBUG
+ .ifeq \skip_nop
+ nop
+ .endif
+#endif
+ .endm
+
+ /*
+ * Reserve space for a spin lock in assembly file.
+ */
+ .macro define_asm_spinlock _name:req
+ .align SPINLOCK_ASM_ALIGN
+ \_name:
+ .space SPINLOCK_ASM_SIZE
+ .endm
+
+ /*
+ * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
+ * and the top 32 bits of `_val` into `_reg_h`. If either the bottom
+ * or top word of `_val` is zero, the corresponding OR operation
+ * is skipped.
+ */
+ .macro orr64_imm _reg_l, _reg_h, _val
+ .if (\_val >> 32)
+ orr \_reg_h, \_reg_h, #(\_val >> 32)
+ .endif
+ .if (\_val & 0xffffffff)
+ orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
+ .endif
+ .endm
+
+ /*
+ * Helper macro to bitwise-clear bits in `_reg_l` and
+ * `_reg_h` given a 64 bit immediate `_val`. The set bits
+ * in the bottom word of `_val` dictate which bits from
+ * `_reg_l` should be cleared. Similarly, the set bits in
+ * the top word of `_val` dictate which bits from `_reg_h`
+ * should be cleared. If either the bottom or top word of
+ * `_val` is zero, the corresponding BIC operation is skipped.
+ */
+ .macro bic64_imm _reg_l, _reg_h, _val
+ .if (\_val >> 32)
+ bic \_reg_h, \_reg_h, #(\_val >> 32)
+ .endif
+ .if (\_val & 0xffffffff)
+ bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
+ .endif
+ .endm
+
+ /*
+ * Helper macro for carrying out division in software when
+ * hardware division is not suported. \top holds the dividend
+ * in the function call and the remainder after
+ * the function is executed. \bot holds the divisor. \div holds
+ * the quotient and \temp is a temporary registed used in calcualtion.
+ * The division algorithm has been obtained from:
+ * http://www.keil.com/support/man/docs/armasm/armasm_dom1359731155623.htm
+ */
+ .macro softudiv div:req,top:req,bot:req,temp:req
+
+ mov \temp, \bot
+ cmp \temp, \top, lsr #1
+div1:
+ movls \temp, \temp, lsl #1
+ cmp \temp, \top, lsr #1
+ bls div1
+ mov \div, #0
+
+div2:
+ cmp \top, \temp
+ subcs \top, \top,\temp
+ ADC \div, \div, \div
+ mov \temp, \temp, lsr #1
+ cmp \temp, \bot
+ bhs div2
+ .endm
+#endif /* ASM_MACROS_S */
diff --git a/include/arch/aarch32/assert_macros.S b/include/arch/aarch32/assert_macros.S
new file mode 100644
index 0000000..ab3a2eb
--- /dev/null
+++ b/include/arch/aarch32/assert_macros.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ASSERT_MACROS_S
+#define ASSERT_MACROS_S
+
+ /*
+ * Assembler macro to enable asm_assert. We assume that the stack is
+ * initialized prior to invoking this macro.
+ */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+ .pushsection .rodata.str1.1, "aS" ;\
+ .L_assert_filename: ;\
+ .string __FILE__ ;\
+ .popsection ;\
+.endif ;\
+ b##_cc 300f ;\
+ ldr r0, =.L_assert_filename ;\
+ ldr r1, =__LINE__ ;\
+ b asm_assert;\
+300:
+
+#endif /* ASSERT_MACROS_S */
diff --git a/include/arch/aarch32/console_macros.S b/include/arch/aarch32/console_macros.S
new file mode 100644
index 0000000..996cb32
--- /dev/null
+++ b/include/arch/aarch32/console_macros.S
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef CONSOLE_MACROS_S
+#define CONSOLE_MACROS_S
+
+#include <drivers/console.h>
+
+/*
+ * This macro encapsulates the common setup that has to be done at the end of
+ * a console driver's register function. It will register all of the driver's
+ * callbacks in the console_t structure and initialize the flags field (by
+ * default consoles are enabled for the "boot" and "crash" states, this can be
+ * changed after registration with the console_set_scope() function). It ends
+ * with a tail call that will include return to the caller.
+ * REQUIRES console_t pointer in r0 and a valid return address in lr.
+ */
+ .macro finish_console_register _driver, putc=0, getc=0, flush=0
+ /*
+ * If any of the callback is not specified or set as 0, then the
+ * corresponding callback entry in console_t is set to 0.
+ */
+ .ifne \putc
+ ldr r1, =console_\_driver\()_putc
+ .else
+ mov r1, #0
+ .endif
+ str r1, [r0, #CONSOLE_T_PUTC]
+
+ .ifne \getc
+ ldr r1, =console_\_driver\()_getc
+ .else
+ mov r1, #0
+ .endif
+ str r1, [r0, #CONSOLE_T_GETC]
+
+ .ifne \flush
+ ldr r1, =console_\_driver\()_flush
+ .else
+ mov r1, #0
+ .endif
+ str r1, [r0, #CONSOLE_T_FLUSH]
+
+ mov r1, #(CONSOLE_FLAG_BOOT | CONSOLE_FLAG_CRASH)
+ str r1, [r0, #CONSOLE_T_FLAGS]
+ b console_register
+ .endm
+
+#endif /* CONSOLE_MACROS_S */
diff --git a/include/arch/aarch32/el3_common_macros.S b/include/arch/aarch32/el3_common_macros.S
new file mode 100644
index 0000000..8b6765a
--- /dev/null
+++ b/include/arch/aarch32/el3_common_macros.S
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef EL3_COMMON_MACROS_S
+#define EL3_COMMON_MACROS_S
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+#define PAGE_START_MASK ~(PAGE_SIZE_MASK)
+
+ /*
+ * Helper macro to initialise EL3 registers we care about.
+ */
+ .macro el3_arch_init_common
+ /* ---------------------------------------------------------------------
+ * SCTLR has already been initialised - read current value before
+ * modifying.
+ *
+ * SCTLR.I: Enable the instruction cache.
+ *
+ * SCTLR.A: Enable Alignment fault checking. All instructions that load
+ * or store one or more registers have an alignment check that the
+ * address being accessed is aligned to the size of the data element(s)
+ * being accessed.
+ * ---------------------------------------------------------------------
+ */
+ ldr r1, =(SCTLR_I_BIT | SCTLR_A_BIT)
+ ldcopr r0, SCTLR
+ orr r0, r0, r1
+ stcopr r0, SCTLR
+ isb
+
+ /* ---------------------------------------------------------------------
+ * Initialise SCR, setting all fields rather than relying on the hw.
+ *
+ * SCR.SIF: Enabled so that Secure state instruction fetches from
+ * Non-secure memory are not permitted.
+ * ---------------------------------------------------------------------
+ */
+ ldr r0, =(SCR_RESET_VAL | SCR_SIF_BIT)
+ stcopr r0, SCR
+
+ /* -----------------------------------------------------
+ * Enable the Asynchronous data abort now that the
+ * exception vectors have been setup.
+ * -----------------------------------------------------
+ */
+ cpsie a
+ isb
+
+ /* ---------------------------------------------------------------------
+ * Initialise NSACR, setting all the fields, except for the
+ * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some
+ * fields are architecturally UNKNOWN on reset.
+ *
+ * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The
+ * cp11 field is ignored, but is set to same value as cp10. The cp10
+ * field is set to allow access to Advanced SIMD and floating point
+ * features from both Security states.
+ *
+ * NSACR.NSTRCDIS: When system register trace implemented, Set to one
+ * so that NS System register accesses to all implemented trace
+ * registers are disabled.
+ * When system register trace is not implemented, this bit is RES0 and
+ * hence set to zero.
+ * ---------------------------------------------------------------------
+ */
+ ldcopr r0, NSACR
+ and r0, r0, #NSACR_IMP_DEF_MASK
+ orr r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS)
+ ldcopr r1, ID_DFR0
+ ubfx r1, r1, #ID_DFR0_COPTRC_SHIFT, #ID_DFR0_COPTRC_LENGTH
+ cmp r1, #ID_DFR0_COPTRC_SUPPORTED
+ bne 1f
+ orr r0, r0, #NSTRCDIS_BIT
+1:
+ stcopr r0, NSACR
+ isb
+
+ /* ---------------------------------------------------------------------
+ * Initialise CPACR, setting all fields rather than relying on hw. Some
+ * fields are architecturally UNKNOWN on reset.
+ *
+ * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses
+ * to trace registers. Set to zero to allow access.
+ *
+ * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The
+ * cp11 field is ignored, but is set to same value as cp10. The cp10
+ * field is set to allow full access from PL0 and PL1 to floating-point
+ * and Advanced SIMD features.
+ * ---------------------------------------------------------------------
+ */
+ ldr r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT))
+ stcopr r0, CPACR
+ isb
+
+ /* ---------------------------------------------------------------------
+ * Initialise FPEXC, setting all fields rather than relying on hw. Some
+ * fields are architecturally UNKNOWN on reset and are set to zero
+ * except for field(s) listed below.
+ *
+ * FPEXC.EN: Enable access to Advanced SIMD and floating point features
+ * from all exception levels.
+ *
+ * __SOFTFP__: Predefined macro exposed by soft-float toolchain.
+ * ARMv7 and Cortex-A32(ARMv8/aarch32) has both soft-float and
+ * hard-float variants of toolchain, avoid compiling below code with
+ * soft-float toolchain as "vmsr" instruction will not be recognized.
+ * ---------------------------------------------------------------------
+ */
+#if ((ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_VFP)) && !(__SOFTFP__)
+ ldr r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT)
+ vmsr FPEXC, r0
+ isb
+#endif
+
+#if (ARM_ARCH_MAJOR > 7)
+ /* ---------------------------------------------------------------------
+ * Initialise SDCR, setting all the fields rather than relying on hw.
+ *
+ * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from
+ * Secure EL1 are disabled.
+ *
+ * SDCR.SCCD: Set to one so that cycle counting by PMCCNTR is prohibited
+ * in Secure state. This bit is RES0 in versions of the architecture
+ * earlier than ARMv8.5, setting it to 1 doesn't have any effect on
+ * them.
+ *
+ * SDCR.TTRF: Set to one so that access to trace filter control
+ * registers in non-monitor mode generate Monitor trap exception,
+ * unless the access generates a higher priority exception when
+ * trace filter control(FEAT_TRF) is implemented.
+ * When FEAT_TRF is not implemented, this bit is RES0.
+ * ---------------------------------------------------------------------
+ */
+ ldr r0, =((SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE) | \
+ SDCR_SCCD_BIT) & ~SDCR_TTRF_BIT)
+ ldcopr r1, ID_DFR0
+ ubfx r1, r1, #ID_DFR0_TRACEFILT_SHIFT, #ID_DFR0_TRACEFILT_LENGTH
+ cmp r1, #ID_DFR0_TRACEFILT_SUPPORTED
+ bne 1f
+ orr r0, r0, #SDCR_TTRF_BIT
+1:
+ stcopr r0, SDCR
+
+ /* ---------------------------------------------------------------------
+ * Initialise PMCR, setting all fields rather than relying
+ * on hw. Some fields are architecturally UNKNOWN on reset.
+ *
+ * PMCR.LP: Set to one so that event counter overflow, that
+ * is recorded in PMOVSCLR[0-30], occurs on the increment
+ * that changes PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU
+ * is implemented. This bit is RES0 in versions of the architecture
+ * earlier than ARMv8.5, setting it to 1 doesn't have any effect
+ * on them.
+ * This bit is Reserved, UNK/SBZP in ARMv7.
+ *
+ * PMCR.LC: Set to one so that cycle counter overflow, that
+ * is recorded in PMOVSCLR[31], occurs on the increment
+ * that changes PMCCNTR[63] from 1 to 0.
+ * This bit is Reserved, UNK/SBZP in ARMv7.
+ *
+ * PMCR.DP: Set to one to prohibit cycle counting whilst in Secure mode.
+ * ---------------------------------------------------------------------
+ */
+ ldr r0, =(PMCR_RESET_VAL | PMCR_DP_BIT | PMCR_LC_BIT | \
+ PMCR_LP_BIT)
+#else
+ ldr r0, =(PMCR_RESET_VAL | PMCR_DP_BIT)
+#endif
+ stcopr r0, PMCR
+
+ /*
+ * If Data Independent Timing (DIT) functionality is implemented,
+ * always enable DIT in EL3
+ */
+ ldcopr r0, ID_PFR0
+ and r0, r0, #(ID_PFR0_DIT_MASK << ID_PFR0_DIT_SHIFT)
+ cmp r0, #ID_PFR0_DIT_SUPPORTED
+ bne 1f
+ mrs r0, cpsr
+ orr r0, r0, #CPSR_DIT_BIT
+ msr cpsr_cxsf, r0
+1:
+ .endm
+
+/* -----------------------------------------------------------------------------
+ * This is the super set of actions that need to be performed during a cold boot
+ * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
+ *
+ * This macro will always perform reset handling, architectural initialisations
+ * and stack setup. The rest of the actions are optional because they might not
+ * be needed, depending on the context in which this macro is called. This is
+ * why this macro is parameterised ; each parameter allows to enable/disable
+ * some actions.
+ *
+ * _init_sctlr:
+ * Whether the macro needs to initialise the SCTLR register including
+ * configuring the endianness of data accesses.
+ *
+ * _warm_boot_mailbox:
+ * Whether the macro needs to detect the type of boot (cold/warm). The
+ * detection is based on the platform entrypoint address : if it is zero
+ * then it is a cold boot, otherwise it is a warm boot. In the latter case,
+ * this macro jumps on the platform entrypoint address.
+ *
+ * _secondary_cold_boot:
+ * Whether the macro needs to identify the CPU that is calling it: primary
+ * CPU or secondary CPU. The primary CPU will be allowed to carry on with
+ * the platform initialisations, while the secondaries will be put in a
+ * platform-specific state in the meantime.
+ *
+ * If the caller knows this macro will only be called by the primary CPU
+ * then this parameter can be defined to 0 to skip this step.
+ *
+ * _init_memory:
+ * Whether the macro needs to initialise the memory.
+ *
+ * _init_c_runtime:
+ * Whether the macro needs to initialise the C runtime environment.
+ *
+ * _exception_vectors:
+ * Address of the exception vectors to program in the VBAR_EL3 register.
+ *
+ * _pie_fixup_size:
+ * Size of memory region to fixup Global Descriptor Table (GDT).
+ *
+ * A non-zero value is expected when firmware needs GDT to be fixed-up.
+ *
+ * -----------------------------------------------------------------------------
+ */
+ .macro el3_entrypoint_common \
+ _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot, \
+ _init_memory, _init_c_runtime, _exception_vectors, \
+ _pie_fixup_size
+
+ /* Make sure we are in Secure Mode */
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCR
+ tst r0, #SCR_NS_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ .if \_init_sctlr
+ /* -------------------------------------------------------------
+ * This is the initialisation of SCTLR and so must ensure that
+ * all fields are explicitly set rather than relying on hw. Some
+ * fields reset to an IMPLEMENTATION DEFINED value.
+ *
+ * SCTLR.TE: Set to zero so that exceptions to an Exception
+ * Level executing at PL1 are taken to A32 state.
+ *
+ * SCTLR.EE: Set the CPU endianness before doing anything that
+ * might involve memory reads or writes. Set to zero to select
+ * Little Endian.
+ *
+ * SCTLR.V: Set to zero to select the normal exception vectors
+ * with base address held in VBAR.
+ *
+ * SCTLR.DSSBS: Set to zero to disable speculation store bypass
+ * safe behaviour upon exception entry to EL3.
+ * -------------------------------------------------------------
+ */
+ ldr r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | \
+ SCTLR_V_BIT | SCTLR_DSSBS_BIT))
+ stcopr r0, SCTLR
+ isb
+ .endif /* _init_sctlr */
+
+ /* Switch to monitor mode */
+ cps #MODE32_mon
+ isb
+
+#if DISABLE_MTPMU
+ bl mtpmu_disable
+#endif
+
+ .if \_warm_boot_mailbox
+ /* -------------------------------------------------------------
+ * This code will be executed for both warm and cold resets.
+ * Now is the time to distinguish between the two.
+ * Query the platform entrypoint address and if it is not zero
+ * then it means it is a warm boot so jump to this address.
+ * -------------------------------------------------------------
+ */
+ bl plat_get_my_entrypoint
+ cmp r0, #0
+ bxne r0
+ .endif /* _warm_boot_mailbox */
+
+ .if \_pie_fixup_size
+#if ENABLE_PIE
+ /*
+ * ------------------------------------------------------------
+ * If PIE is enabled fixup the Global descriptor Table only
+ * once during primary core cold boot path.
+ *
+ * Compile time base address, required for fixup, is calculated
+ * using "pie_fixup" label present within first page.
+ * ------------------------------------------------------------
+ */
+ pie_fixup:
+ ldr r0, =pie_fixup
+ ldr r1, =PAGE_START_MASK
+ and r0, r0, r1
+ mov_imm r1, \_pie_fixup_size
+ add r1, r1, r0
+ bl fixup_gdt_reloc
+#endif /* ENABLE_PIE */
+ .endif /* _pie_fixup_size */
+
+ /* ---------------------------------------------------------------------
+ * Set the exception vectors (VBAR/MVBAR).
+ * ---------------------------------------------------------------------
+ */
+ ldr r0, =\_exception_vectors
+ stcopr r0, VBAR
+ stcopr r0, MVBAR
+ isb
+
+ /* ---------------------------------------------------------------------
+ * It is a cold boot.
+ * Perform any processor specific actions upon reset e.g. cache, TLB
+ * invalidations etc.
+ * ---------------------------------------------------------------------
+ */
+ bl reset_handler
+
+ el3_arch_init_common
+
+ .if \_secondary_cold_boot
+ /* -------------------------------------------------------------
+ * Check if this is a primary or secondary CPU cold boot.
+ * The primary CPU will set up the platform while the
+ * secondaries are placed in a platform-specific state until the
+ * primary CPU performs the necessary actions to bring them out
+ * of that state and allows entry into the OS.
+ * -------------------------------------------------------------
+ */
+ bl plat_is_my_cpu_primary
+ cmp r0, #0
+ bne do_primary_cold_boot
+
+ /* This is a cold boot on a secondary CPU */
+ bl plat_secondary_cold_boot_setup
+ /* plat_secondary_cold_boot_setup() is not supposed to return */
+ no_ret plat_panic_handler
+
+ do_primary_cold_boot:
+ .endif /* _secondary_cold_boot */
+
+ /* ---------------------------------------------------------------------
+ * Initialize memory now. Secondary CPU initialization won't get to this
+ * point.
+ * ---------------------------------------------------------------------
+ */
+
+ .if \_init_memory
+ bl platform_mem_init
+ .endif /* _init_memory */
+
+ /* ---------------------------------------------------------------------
+ * Init C runtime environment:
+ * - Zero-initialise the NOBITS sections. There are 2 of them:
+ * - the .bss section;
+ * - the coherent memory section (if any).
+ * - Relocate the data section from ROM to RAM, if required.
+ * ---------------------------------------------------------------------
+ */
+ .if \_init_c_runtime
+#if defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
+ /* -----------------------------------------------------------------
+ * Invalidate the RW memory used by the image. This
+ * includes the data and NOBITS sections. This is done to
+ * safeguard against possible corruption of this memory by
+ * dirty cache lines in a system cache as a result of use by
+ * an earlier boot loader stage. If PIE is enabled however,
+ * RO sections including the GOT may be modified during
+ * pie fixup. Therefore, to be on the safe side, invalidate
+ * the entire image region if PIE is enabled.
+ * -----------------------------------------------------------------
+ */
+#if ENABLE_PIE
+#if SEPARATE_CODE_AND_RODATA
+ ldr r0, =__TEXT_START__
+#else
+ ldr r0, =__RO_START__
+#endif /* SEPARATE_CODE_AND_RODATA */
+#else
+ ldr r0, =__RW_START__
+#endif /* ENABLE_PIE */
+ ldr r1, =__RW_END__
+ sub r1, r1, r0
+ bl inv_dcache_range
+#if defined(IMAGE_BL2) && SEPARATE_BL2_NOLOAD_REGION
+ ldr r0, =__BL2_NOLOAD_START__
+ ldr r1, =__BL2_NOLOAD_END__
+ sub r1, r1, r0
+ bl inv_dcache_range
+#endif
+#endif
+
+ /*
+ * zeromem uses r12 whereas it is used to save previous BL arg3,
+ * save it in r7
+ */
+ mov r7, r12
+ ldr r0, =__BSS_START__
+ ldr r1, =__BSS_END__
+ sub r1, r1, r0
+ bl zeromem
+
+#if USE_COHERENT_MEM
+ ldr r0, =__COHERENT_RAM_START__
+ ldr r1, =__COHERENT_RAM_END_UNALIGNED__
+ sub r1, r1, r0
+ bl zeromem
+#endif
+
+ /* Restore r12 */
+ mov r12, r7
+
+#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
+ /* -----------------------------------------------------
+ * Copy data from ROM to RAM.
+ * -----------------------------------------------------
+ */
+ ldr r0, =__DATA_RAM_START__
+ ldr r1, =__DATA_ROM_START__
+ ldr r2, =__DATA_RAM_END__
+ sub r2, r2, r0
+ bl memcpy4
+#endif
+ .endif /* _init_c_runtime */
+
+ /* ---------------------------------------------------------------------
+ * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
+ * the MMU is enabled. There is no risk of reading stale stack memory
+ * after enabling the MMU as only the primary CPU is running at the
+ * moment.
+ * ---------------------------------------------------------------------
+ */
+ bl plat_set_my_stack
+
+#if STACK_PROTECTOR_ENABLED
+ .if \_init_c_runtime
+ bl update_stack_protector_canary
+ .endif /* _init_c_runtime */
+#endif
+ .endm
+
+#endif /* EL3_COMMON_MACROS_S */
diff --git a/include/arch/aarch32/smccc_helpers.h b/include/arch/aarch32/smccc_helpers.h
new file mode 100644
index 0000000..2ce7874
--- /dev/null
+++ b/include/arch/aarch32/smccc_helpers.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SMCCC_HELPERS_H
+#define SMCCC_HELPERS_H
+
+#include <lib/smccc.h>
+
+/* These are offsets to registers in smc_ctx_t */
+#define SMC_CTX_GPREG_R0 U(0x0)
+#define SMC_CTX_GPREG_R1 U(0x4)
+#define SMC_CTX_GPREG_R2 U(0x8)
+#define SMC_CTX_GPREG_R3 U(0xC)
+#define SMC_CTX_GPREG_R4 U(0x10)
+#define SMC_CTX_GPREG_R5 U(0x14)
+#define SMC_CTX_SP_USR U(0x34)
+#define SMC_CTX_SPSR_MON U(0x78)
+#define SMC_CTX_SP_MON U(0x7C)
+#define SMC_CTX_LR_MON U(0x80)
+#define SMC_CTX_SCR U(0x84)
+#define SMC_CTX_PMCR U(0x88)
+#define SMC_CTX_SIZE U(0x90)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#include <lib/cassert.h>
+
+/*
+ * The generic structure to save arguments and callee saved registers during
+ * an SMC. Also this structure is used to store the result return values after
+ * the completion of SMC service.
+ */
+typedef struct smc_ctx {
+ u_register_t r0;
+ u_register_t r1;
+ u_register_t r2;
+ u_register_t r3;
+ u_register_t r4;
+ u_register_t r5;
+ u_register_t r6;
+ u_register_t r7;
+ u_register_t r8;
+ u_register_t r9;
+ u_register_t r10;
+ u_register_t r11;
+ u_register_t r12;
+ /* spsr_usr doesn't exist */
+ u_register_t sp_usr;
+ u_register_t lr_usr;
+ u_register_t spsr_irq;
+ u_register_t sp_irq;
+ u_register_t lr_irq;
+ u_register_t spsr_fiq;
+ u_register_t sp_fiq;
+ u_register_t lr_fiq;
+ u_register_t spsr_svc;
+ u_register_t sp_svc;
+ u_register_t lr_svc;
+ u_register_t spsr_abt;
+ u_register_t sp_abt;
+ u_register_t lr_abt;
+ u_register_t spsr_und;
+ u_register_t sp_und;
+ u_register_t lr_und;
+ u_register_t spsr_mon;
+ /*
+ * `sp_mon` will point to the C runtime stack in monitor mode. But prior
+ * to exit from SMC, this will point to the `smc_ctx_t` so that
+ * on next entry due to SMC, the `smc_ctx_t` can be easily accessed.
+ */
+ u_register_t sp_mon;
+ u_register_t lr_mon;
+ u_register_t scr;
+ u_register_t pmcr;
+ /*
+ * The workaround for CVE-2017-5715 requires storing information in
+ * the bottom 3 bits of the stack pointer. Add a padding field to
+ * force the size of the struct to be a multiple of 8.
+ */
+ u_register_t pad;
+} smc_ctx_t __aligned(8);
+
+/*
+ * Compile time assertions related to the 'smc_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(SMC_CTX_GPREG_R0 == __builtin_offsetof(smc_ctx_t, r0), \
+ assert_smc_ctx_greg_r0_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R1 == __builtin_offsetof(smc_ctx_t, r1), \
+ assert_smc_ctx_greg_r1_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R2 == __builtin_offsetof(smc_ctx_t, r2), \
+ assert_smc_ctx_greg_r2_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R3 == __builtin_offsetof(smc_ctx_t, r3), \
+ assert_smc_ctx_greg_r3_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R4 == __builtin_offsetof(smc_ctx_t, r4), \
+ assert_smc_ctx_greg_r4_offset_mismatch);
+CASSERT(SMC_CTX_SP_USR == __builtin_offsetof(smc_ctx_t, sp_usr), \
+ assert_smc_ctx_sp_usr_offset_mismatch);
+CASSERT(SMC_CTX_LR_MON == __builtin_offsetof(smc_ctx_t, lr_mon), \
+ assert_smc_ctx_lr_mon_offset_mismatch);
+CASSERT(SMC_CTX_SPSR_MON == __builtin_offsetof(smc_ctx_t, spsr_mon), \
+ assert_smc_ctx_spsr_mon_offset_mismatch);
+
+CASSERT((sizeof(smc_ctx_t) & 0x7U) == 0U, assert_smc_ctx_not_aligned);
+CASSERT(SMC_CTX_SIZE == sizeof(smc_ctx_t), assert_smc_ctx_size_mismatch);
+
+/* Convenience macros to return from SMC handler */
+#define SMC_RET0(_h) { \
+ return (uintptr_t)(_h); \
+}
+#define SMC_RET1(_h, _r0) { \
+ ((smc_ctx_t *)(_h))->r0 = (_r0); \
+ SMC_RET0(_h); \
+}
+#define SMC_RET2(_h, _r0, _r1) { \
+ ((smc_ctx_t *)(_h))->r1 = (_r1); \
+ SMC_RET1(_h, (_r0)); \
+}
+#define SMC_RET3(_h, _r0, _r1, _r2) { \
+ ((smc_ctx_t *)(_h))->r2 = (_r2); \
+ SMC_RET2(_h, (_r0), (_r1)); \
+}
+#define SMC_RET4(_h, _r0, _r1, _r2, _r3) { \
+ ((smc_ctx_t *)(_h))->r3 = (_r3); \
+ SMC_RET3(_h, (_r0), (_r1), (_r2)); \
+}
+#define SMC_RET5(_h, _r0, _r1, _r2, _r3, _r4) { \
+ ((smc_ctx_t *)(_h))->r4 = (_r4); \
+ SMC_RET4(_h, (_r0), (_r1), (_r2), (_r3)); \
+}
+#define SMC_RET6(_h, _r0, _r1, _r2, _r3, _r4, _r5) { \
+ ((smc_ctx_t *)(_h))->r5 = (_r5); \
+ SMC_RET5(_h, (_r0), (_r1), (_r2), (_r3), (_r4)); \
+}
+#define SMC_RET7(_h, _r0, _r1, _r2, _r3, _r4, _r5, _r6) { \
+ ((smc_ctx_t *)(_h))->r6 = (_r6); \
+ SMC_RET6(_h, (_r0), (_r1), (_r2), (_r3), (_r4), (_r5)); \
+}
+#define SMC_RET8(_h, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) { \
+ ((smc_ctx_t *)(_h))->r7 = (_r7); \
+ SMC_RET7(_h, (_r0), (_r1), (_r2), (_r3), (_r4), (_r5), (_r6)); \
+}
+
+/*
+ * Helper macro to retrieve the SMC parameters from smc_ctx_t.
+ */
+#define get_smc_params_from_ctx(_hdl, _r1, _r2, _r3, _r4) { \
+ _r1 = ((smc_ctx_t *)_hdl)->r1; \
+ _r2 = ((smc_ctx_t *)_hdl)->r2; \
+ _r3 = ((smc_ctx_t *)_hdl)->r3; \
+ _r4 = ((smc_ctx_t *)_hdl)->r4; \
+ }
+
+/* ------------------------------------------------------------------------
+ * Helper APIs for setting and retrieving appropriate `smc_ctx_t`.
+ * These functions need to implemented by the BL including this library.
+ * ------------------------------------------------------------------------
+ */
+
+/* Get the pointer to `smc_ctx_t` corresponding to the security state. */
+void *smc_get_ctx(unsigned int security_state);
+
+/* Set the next `smc_ctx_t` corresponding to the security state. */
+void smc_set_next_ctx(unsigned int security_state);
+
+/* Get the pointer to next `smc_ctx_t` already set by `smc_set_next_ctx()`. */
+void *smc_get_next_ctx(void);
+
+#endif /*__ASSEMBLER__*/
+
+#endif /* SMCCC_HELPERS_H */
diff --git a/include/arch/aarch32/smccc_macros.S b/include/arch/aarch32/smccc_macros.S
new file mode 100644
index 0000000..ea7835a
--- /dev/null
+++ b/include/arch/aarch32/smccc_macros.S
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef SMCCC_MACROS_S
+#define SMCCC_MACROS_S
+
+#include <arch.h>
+
+/*
+ * Macro to save the General purpose registers (r0 - r12), the banked
+ * spsr, lr, sp registers and the `scr` register to the SMC context on entry
+ * due a SMC call. The `lr` of the current mode (monitor) is expected to be
+ * already saved. The `sp` must point to the `smc_ctx_t` to save to.
+ * Additionally, also save the 'pmcr' register as this is updated whilst
+ * executing in the secure world.
+ */
+ .macro smccc_save_gp_mode_regs
+ /* Save r0 - r12 in the SMC context */
+ stm sp, {r0-r12}
+ mov r0, sp
+ add r0, r0, #SMC_CTX_SP_USR
+
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
+ /* Must be in secure state to restore Monitor mode */
+ ldcopr r4, SCR
+ bic r2, r4, #SCR_NS_BIT
+ stcopr r2, SCR
+ isb
+
+ cps #MODE32_sys
+ stm r0!, {sp, lr}
+
+ cps #MODE32_irq
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #MODE32_fiq
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #MODE32_svc
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #MODE32_abt
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #MODE32_und
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ /* lr_mon is already saved by caller */
+ cps #MODE32_mon
+ mrs r2, spsr
+ stm r0!, {r2}
+
+ stcopr r4, SCR
+#else
+ /* Save the banked registers including the current SPSR and LR */
+ mrs r4, sp_usr
+ mrs r5, lr_usr
+ mrs r6, spsr_irq
+ mrs r7, sp_irq
+ mrs r8, lr_irq
+ mrs r9, spsr_fiq
+ mrs r10, sp_fiq
+ mrs r11, lr_fiq
+ mrs r12, spsr_svc
+ stm r0!, {r4-r12}
+
+ mrs r4, sp_svc
+ mrs r5, lr_svc
+ mrs r6, spsr_abt
+ mrs r7, sp_abt
+ mrs r8, lr_abt
+ mrs r9, spsr_und
+ mrs r10, sp_und
+ mrs r11, lr_und
+ mrs r12, spsr
+ stm r0!, {r4-r12}
+ /* lr_mon is already saved by caller */
+
+ ldcopr r4, SCR
+
+#if ARM_ARCH_MAJOR > 7
+ /*
+ * Check if earlier initialization of SDCR.SCCD to 1
+ * failed, meaning that ARMv8-PMU is not implemented,
+ * cycle counting is not disabled and PMCR should be
+ * saved in Non-secure context.
+ */
+ ldcopr r5, SDCR
+ tst r5, #SDCR_SCCD_BIT
+ bne 1f
+#endif
+ /* Secure Cycle Counter is not disabled */
+#endif
+ ldcopr r5, PMCR
+
+ /* Check caller's security state */
+ tst r4, #SCR_NS_BIT
+ beq 2f
+
+ /* Save PMCR if called from Non-secure state */
+ str r5, [sp, #SMC_CTX_PMCR]
+
+ /* Disable cycle counter when event counting is prohibited */
+2: orr r5, r5, #PMCR_DP_BIT
+ stcopr r5, PMCR
+ isb
+1: str r4, [sp, #SMC_CTX_SCR]
+ .endm
+
+/*
+ * Macro to restore the `smc_ctx_t`, which includes the General purpose
+ * registers and banked mode registers, and exit from the monitor mode.
+ * r0 must point to the `smc_ctx_t` to restore from.
+ */
+ .macro monitor_exit
+ /*
+ * Save the current sp and restore the smc context
+ * pointer to sp which will be used for handling the
+ * next SMC.
+ */
+ str sp, [r0, #SMC_CTX_SP_MON]
+ mov sp, r0
+
+ /*
+ * Restore SCR first so that we access the right banked register
+ * when the other mode registers are restored.
+ */
+ ldr r1, [r0, #SMC_CTX_SCR]
+ stcopr r1, SCR
+ isb
+
+ /*
+ * Restore PMCR when returning to Non-secure state
+ */
+ tst r1, #SCR_NS_BIT
+ beq 2f
+
+ /*
+ * Back to Non-secure state
+ */
+#if ARM_ARCH_MAJOR > 7
+ /*
+ * Check if earlier initialization SDCR.SCCD to 1
+ * failed, meaning that ARMv8-PMU is not implemented and
+ * PMCR should be restored from Non-secure context.
+ */
+ ldcopr r1, SDCR
+ tst r1, #SDCR_SCCD_BIT
+ bne 2f
+#endif
+ /*
+ * Restore the PMCR register.
+ */
+ ldr r1, [r0, #SMC_CTX_PMCR]
+ stcopr r1, PMCR
+2:
+ /* Restore the banked registers including the current SPSR */
+ add r1, r0, #SMC_CTX_SP_USR
+
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
+ /* Must be in secure state to restore Monitor mode */
+ ldcopr r4, SCR
+ bic r2, r4, #SCR_NS_BIT
+ stcopr r2, SCR
+ isb
+
+ cps #MODE32_sys
+ ldm r1!, {sp, lr}
+
+ cps #MODE32_irq
+ ldm r1!, {r2, sp, lr}
+ msr spsr_fsxc, r2
+
+ cps #MODE32_fiq
+ ldm r1!, {r2, sp, lr}
+ msr spsr_fsxc, r2
+
+ cps #MODE32_svc
+ ldm r1!, {r2, sp, lr}
+ msr spsr_fsxc, r2
+
+ cps #MODE32_abt
+ ldm r1!, {r2, sp, lr}
+ msr spsr_fsxc, r2
+
+ cps #MODE32_und
+ ldm r1!, {r2, sp, lr}
+ msr spsr_fsxc, r2
+
+ cps #MODE32_mon
+ ldm r1!, {r2}
+ msr spsr_fsxc, r2
+
+ stcopr r4, SCR
+ isb
+#else
+ ldm r1!, {r4-r12}
+ msr sp_usr, r4
+ msr lr_usr, r5
+ msr spsr_irq, r6
+ msr sp_irq, r7
+ msr lr_irq, r8
+ msr spsr_fiq, r9
+ msr sp_fiq, r10
+ msr lr_fiq, r11
+ msr spsr_svc, r12
+
+ ldm r1!, {r4-r12}
+ msr sp_svc, r4
+ msr lr_svc, r5
+ msr spsr_abt, r6
+ msr sp_abt, r7
+ msr lr_abt, r8
+ msr spsr_und, r9
+ msr sp_und, r10
+ msr lr_und, r11
+ /*
+ * Use the `_fsxc` suffix explicitly to instruct the assembler
+ * to update all the 32 bits of SPSR. Else, by default, the
+ * assembler assumes `_fc` suffix which only modifies
+ * f->[31:24] and c->[7:0] bits of SPSR.
+ */
+ msr spsr_fsxc, r12
+#endif
+
+ /* Restore the LR */
+ ldr lr, [r0, #SMC_CTX_LR_MON]
+
+ /* Restore the rest of the general purpose registers */
+ ldm r0, {r0-r12}
+ exception_return
+ .endm
+
+#endif /* SMCCC_MACROS_S */