summaryrefslogtreecommitdiffstats
path: root/bl31
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 17:43:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 17:43:51 +0000
commitbe58c81aff4cd4c0ccf43dbd7998da4a6a08c03b (patch)
tree779c248fb61c83f65d1f0dc867f2053d76b4e03a /bl31
parentInitial commit. (diff)
downloadarm-trusted-firmware-be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b.tar.xz
arm-trusted-firmware-be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b.zip
Adding upstream version 2.10.0+dfsg.upstream/2.10.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'bl31')
-rw-r--r--bl31/aarch64/bl31_entrypoint.S229
-rw-r--r--bl31/aarch64/crash_reporting.S469
-rw-r--r--bl31/aarch64/ea_delegate.S325
-rw-r--r--bl31/aarch64/runtime_exceptions.S747
-rw-r--r--bl31/bl31.ld.S210
-rw-r--r--bl31/bl31.mk186
-rw-r--r--bl31/bl31_context_mgmt.c66
-rw-r--r--bl31/bl31_main.c306
-rw-r--r--bl31/bl31_traps.c30
-rw-r--r--bl31/ehf.c533
-rw-r--r--bl31/interrupt_mgmt.c227
11 files changed, 3328 insertions, 0 deletions
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
new file mode 100644
index 0000000..dfb14e9
--- /dev/null
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <common/bl_common.h>
+#include <el3_common_macros.S>
+#include <lib/pmf/aarch64/pmf_asm_macros.S>
+#include <lib/runtime_instr.h>
+#include <lib/xlat_tables/xlat_mmu_helpers.h>
+
+ .globl bl31_entrypoint
+ .globl bl31_warm_entrypoint
+
+ /* -----------------------------------------------------
+ * bl31_entrypoint() is the cold boot entrypoint,
+ * executed only by the primary cpu.
+ * -----------------------------------------------------
+ */
+
+func bl31_entrypoint
+ /* ---------------------------------------------------------------
+ * Stash the previous bootloader arguments x0 - x3 for later use.
+ * ---------------------------------------------------------------
+ */
+ mov x20, x0
+ mov x21, x1
+ mov x22, x2
+ mov x23, x3
+
+#if !RESET_TO_BL31
+ /* ---------------------------------------------------------------------
+ * For !RESET_TO_BL31 systems, only the primary CPU ever reaches
+ * bl31_entrypoint() during the cold boot flow, so the cold/warm boot
+ * and primary/secondary CPU logic should not be executed in this case.
+ *
+ * Also, assume that the previous bootloader has already initialised the
+ * SCTLR_EL3, including the endianness, and has initialised the memory.
+ * ---------------------------------------------------------------------
+ */
+ el3_entrypoint_common \
+ _init_sctlr=0 \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=1 \
+ _exception_vectors=runtime_exceptions \
+ _pie_fixup_size=BL31_LIMIT - BL31_BASE
+#else
+
+ /* ---------------------------------------------------------------------
+ * For RESET_TO_BL31 systems which have a programmable reset address,
+ * bl31_entrypoint() is executed only on the cold boot path so we can
+ * skip the warm boot mailbox mechanism.
+ * ---------------------------------------------------------------------
+ */
+ el3_entrypoint_common \
+ _init_sctlr=1 \
+ _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
+ _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
+ _init_memory=1 \
+ _init_c_runtime=1 \
+ _exception_vectors=runtime_exceptions \
+ _pie_fixup_size=BL31_LIMIT - BL31_BASE
+#endif /* RESET_TO_BL31 */
+
+ /* --------------------------------------------------------------------
+ * Perform BL31 setup
+ * --------------------------------------------------------------------
+ */
+ mov x0, x20
+ mov x1, x21
+ mov x2, x22
+ mov x3, x23
+ bl bl31_setup
+
+#if ENABLE_PAUTH
+ /* --------------------------------------------------------------------
+ * Program APIAKey_EL1 and enable pointer authentication
+ * --------------------------------------------------------------------
+ */
+ bl pauth_init_enable_el3
+#endif /* ENABLE_PAUTH */
+
+ /* --------------------------------------------------------------------
+ * Jump to main function
+ * --------------------------------------------------------------------
+ */
+ bl bl31_main
+
+ /* --------------------------------------------------------------------
+ * Clean the .data & .bss sections to main memory. This ensures
+ * that any global data which was initialised by the primary CPU
+ * is visible to secondary CPUs before they enable their data
+ * caches and participate in coherency.
+ * --------------------------------------------------------------------
+ */
+ adrp x0, __DATA_START__
+ add x0, x0, :lo12:__DATA_START__
+ adrp x1, __DATA_END__
+ add x1, x1, :lo12:__DATA_END__
+ sub x1, x1, x0
+ bl clean_dcache_range
+
+ adrp x0, __BSS_START__
+ add x0, x0, :lo12:__BSS_START__
+ adrp x1, __BSS_END__
+ add x1, x1, :lo12:__BSS_END__
+ sub x1, x1, x0
+ bl clean_dcache_range
+
+ b el3_exit
+endfunc bl31_entrypoint
+
+ /* --------------------------------------------------------------------
+ * This CPU has been physically powered up. It is either resuming from
+ * suspend or has simply been turned on. In both cases, call the BL31
+ * warmboot entrypoint
+ * --------------------------------------------------------------------
+ */
+func bl31_warm_entrypoint
+#if ENABLE_RUNTIME_INSTRUMENTATION
+
+ /*
+ * This timestamp update happens with cache off. The next
+ * timestamp collection will need to do cache maintenance prior
+ * to timestamp update.
+ */
+ pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR
+ mrs x1, cntpct_el0
+ str x1, [x0]
+#endif
+
+ /*
+ * On the warm boot path, most of the EL3 initialisations performed by
+ * 'el3_entrypoint_common' must be skipped:
+ *
+ * - Only when the platform bypasses the BL1/BL31 entrypoint by
+ * programming the reset address do we need to initialise SCTLR_EL3.
+ * In other cases, we assume this has been taken care by the
+ * entrypoint code.
+ *
+ * - No need to determine the type of boot, we know it is a warm boot.
+ *
+ * - Do not try to distinguish between primary and secondary CPUs, this
+ * notion only exists for a cold boot.
+ *
+ * - No need to initialise the memory or the C runtime environment,
+ * it has been done once and for all on the cold boot path.
+ */
+ el3_entrypoint_common \
+ _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=0 \
+ _exception_vectors=runtime_exceptions \
+ _pie_fixup_size=0
+
+ /*
+ * We're about to enable MMU and participate in PSCI state coordination.
+ *
+ * The PSCI implementation invokes platform routines that enable CPUs to
+ * participate in coherency. On a system where CPUs are not
+ * cache-coherent without appropriate platform specific programming,
+ * having caches enabled until such time might lead to coherency issues
+ * (resulting from stale data getting speculatively fetched, among
+ * others). Therefore we keep data caches disabled even after enabling
+ * the MMU for such platforms.
+ *
+ * On systems with hardware-assisted coherency, or on single cluster
+ * platforms, such platform specific programming is not required to
+ * enter coherency (as CPUs already are); and there's no reason to have
+ * caches disabled either.
+ */
+#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
+ mov x0, xzr
+#else
+ mov x0, #DISABLE_DCACHE
+#endif
+ bl bl31_plat_enable_mmu
+
+#if ENABLE_RME
+ /*
+ * At warm boot GPT data structures have already been initialized in RAM
+ * but the sysregs for this CPU need to be initialized. Note that the GPT
+ * accesses are controlled attributes in GPCCR and do not depend on the
+ * SCR_EL3.C bit.
+ */
+ bl gpt_enable
+ cbz x0, 1f
+ no_ret plat_panic_handler
+1:
+#endif
+
+#if ENABLE_PAUTH
+ /* --------------------------------------------------------------------
+ * Program APIAKey_EL1 and enable pointer authentication
+ * --------------------------------------------------------------------
+ */
+ bl pauth_init_enable_el3
+#endif /* ENABLE_PAUTH */
+
+ bl psci_warmboot_entrypoint
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI
+ mov x19, x0
+
+ /*
+ * Invalidate before updating timestamp to ensure previous timestamp
+ * updates on the same cache line with caches disabled are properly
+ * seen by the same core. Without the cache invalidate, the core might
+ * write into a stale cache line.
+ */
+ mov x1, #PMF_TS_SIZE
+ mov x20, x30
+ bl inv_dcache_range
+ mov x30, x20
+
+ mrs x0, cntpct_el0
+ str x0, [x19]
+#endif
+ b el3_exit
+endfunc bl31_warm_entrypoint
diff --git a/bl31/aarch64/crash_reporting.S b/bl31/aarch64/crash_reporting.S
new file mode 100644
index 0000000..4cec110
--- /dev/null
+++ b/bl31/aarch64/crash_reporting.S
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat_macros.S>
+#include <platform_def.h>
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/utils_def.h>
+
+ .globl report_unhandled_exception
+ .globl report_unhandled_interrupt
+ .globl report_el3_panic
+ .globl report_elx_panic
+
+#if CRASH_REPORTING
+
+ /* ------------------------------------------------------
+ * The below section deals with dumping the system state
+ * when an unhandled exception is taken in EL3.
+ * The layout and the names of the registers which will
+ * be dumped during a unhandled exception is given below.
+ * ------------------------------------------------------
+ */
+.section .rodata.crash_prints, "aS"
+print_spacer:
+ .asciz " = 0x"
+
+gp_regs:
+ .asciz "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",\
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",\
+ "x16", "x17", "x18", "x19", "x20", "x21", "x22",\
+ "x23", "x24", "x25", "x26", "x27", "x28", "x29", ""
+el3_sys_regs:
+ .asciz "scr_el3", "sctlr_el3", "cptr_el3", "tcr_el3",\
+ "daif", "mair_el3", "spsr_el3", "elr_el3", "ttbr0_el3",\
+ "esr_el3", "far_el3", ""
+
+non_el3_sys_regs:
+ .asciz "spsr_el1", "elr_el1", "spsr_abt", "spsr_und",\
+ "spsr_irq", "spsr_fiq", "sctlr_el1", "actlr_el1", "cpacr_el1",\
+ "csselr_el1", "sp_el1", "esr_el1", "ttbr0_el1", "ttbr1_el1",\
+ "mair_el1", "amair_el1", "tcr_el1", "tpidr_el1", "tpidr_el0",\
+ "tpidrro_el0", "par_el1", "mpidr_el1", "afsr0_el1", "afsr1_el1",\
+ "contextidr_el1", "vbar_el1", "cntp_ctl_el0", "cntp_cval_el0",\
+ "cntv_ctl_el0", "cntv_cval_el0", "cntkctl_el1", "sp_el0", "isr_el1", ""
+
+#if CTX_INCLUDE_AARCH32_REGS
+aarch32_regs:
+ .asciz "dacr32_el2", "ifsr32_el2", ""
+#endif /* CTX_INCLUDE_AARCH32_REGS */
+
+panic_msg:
+ .asciz "PANIC in EL3.\nx30"
+excpt_msg:
+ .asciz "Unhandled Exception in EL3.\nx30"
+intr_excpt_msg:
+ .ascii "Unhandled Interrupt Exception in EL3.\n"
+x30_msg:
+ .asciz "x30"
+excpt_msg_el:
+ .asciz "Unhandled Exception from lower EL.\n"
+
+ /*
+ * Helper function to print from crash buf.
+ * The print loop is controlled by the buf size and
+ * ascii reg name list which is passed in x6. The
+ * function returns the crash buf address in x0.
+ * Clobbers : x0 - x7, sp
+ */
+func size_controlled_print
+ /* Save the lr */
+ mov sp, x30
+ /* load the crash buf address */
+ mrs x7, tpidr_el3
+test_size_list:
+ /* Calculate x5 always as it will be clobbered by asm_print_hex */
+ mrs x5, tpidr_el3
+ add x5, x5, #CPU_DATA_CRASH_BUF_SIZE
+ /* Test whether we have reached end of crash buf */
+ cmp x7, x5
+ b.eq exit_size_print
+ ldrb w4, [x6]
+ /* Test whether we are at end of list */
+ cbz w4, exit_size_print
+ mov x4, x6
+ /* asm_print_str updates x4 to point to next entry in list */
+ bl asm_print_str
+ /* x0 = number of symbols printed + 1 */
+ sub x0, x4, x6
+ /* update x6 with the updated list pointer */
+ mov x6, x4
+ bl print_alignment
+ ldr x4, [x7], #REGSZ
+ bl asm_print_hex
+ bl asm_print_newline
+ b test_size_list
+exit_size_print:
+ mov x30, sp
+ ret
+endfunc size_controlled_print
+
+ /* -----------------------------------------------------
+ * This function calculates and prints required number
+ * of space characters followed by "= 0x", based on the
+ * length of ascii register name.
+ * x0: length of ascii register name + 1
+ * ------------------------------------------------------
+ */
+func print_alignment
+ /* The minimum ascii length is 3, e.g. for "x0" */
+ adr x4, print_spacer - 3
+ add x4, x4, x0
+ b asm_print_str
+endfunc print_alignment
+
+ /*
+ * Helper function to store x8 - x15 registers to
+ * the crash buf. The system registers values are
+ * copied to x8 to x15 by the caller which are then
+ * copied to the crash buf by this function.
+ * x0 points to the crash buf. It then calls
+ * size_controlled_print to print to console.
+ * Clobbers : x0 - x7, sp
+ */
+func str_in_crash_buf_print
+ /* restore the crash buf address in x0 */
+ mrs x0, tpidr_el3
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #REGSZ * 2]
+ stp x12, x13, [x0, #REGSZ * 4]
+ stp x14, x15, [x0, #REGSZ * 6]
+ b size_controlled_print
+endfunc str_in_crash_buf_print
+
+ /* ------------------------------------------------------
+ * This macro calculates the offset to crash buf from
+ * cpu_data and stores it in tpidr_el3. It also saves x0
+ * and x1 in the crash buf by using sp as a temporary
+ * register.
+ * ------------------------------------------------------
+ */
+ .macro prepare_crash_buf_save_x0_x1
+ /* we can corrupt this reg to free up x0 */
+ mov sp, x0
+ /* tpidr_el3 contains the address to cpu_data structure */
+ mrs x0, tpidr_el3
+ /* Calculate the Crash buffer offset in cpu_data */
+ add x0, x0, #CPU_DATA_CRASH_BUF_OFFSET
+ /* Store crash buffer address in tpidr_el3 */
+ msr tpidr_el3, x0
+ str x1, [x0, #REGSZ]
+ mov x1, sp
+ str x1, [x0]
+ .endm
+
+ /* -----------------------------------------------------
+ * This function allows to report a crash (if crash
+ * reporting is enabled) when an unhandled exception
+ * occurs. It prints the CPU state via the crash console
+ * making use of the crash buf. This function will
+ * not return.
+ * -----------------------------------------------------
+ */
+func report_unhandled_exception
+ prepare_crash_buf_save_x0_x1
+ adr x0, excpt_msg
+ mov sp, x0
+ /* This call will not return */
+ b do_crash_reporting
+endfunc report_unhandled_exception
+
+ /* -----------------------------------------------------
+ * This function allows to report a crash (if crash
+ * reporting is enabled) when an unhandled interrupt
+ * occurs. It prints the CPU state via the crash console
+ * making use of the crash buf. This function will
+ * not return.
+ * -----------------------------------------------------
+ */
+func report_unhandled_interrupt
+ prepare_crash_buf_save_x0_x1
+ adr x0, intr_excpt_msg
+ mov sp, x0
+ /* This call will not return */
+ b do_crash_reporting
+endfunc report_unhandled_interrupt
+
+ /* -----------------------------------------------------
+ * This function allows to report a crash from the lower
+ * exception level (if crash reporting is enabled) when
+ * lower_el_panic() is invoked from C Runtime.
+ * It prints the CPU state via the crash console making
+ * use of 'cpu_context' structure where general purpose
+ * registers are saved and the crash buf.
+ * This function will not return.
+ * -----------------------------------------------------
+ */
+func report_elx_panic
+ msr spsel, #MODE_SP_ELX
+
+ /* Print the crash message */
+ adr x4, excpt_msg_el
+ bl asm_print_str
+
+ /* Report x0 - x29 values stored in 'gpregs_ctx' structure */
+ /* Store the ascii list pointer in x6 */
+ adr x6, gp_regs
+ add x7, sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0
+
+print_next:
+ ldrb w4, [x6]
+ /* Test whether we are at end of list */
+ cbz w4, print_x30
+ mov x4, x6
+ /* asm_print_str updates x4 to point to next entry in list */
+ bl asm_print_str
+ /* x0 = number of symbols printed + 1 */
+ sub x0, x4, x6
+ /* Update x6 with the updated list pointer */
+ mov x6, x4
+ bl print_alignment
+ ldr x4, [x7], #REGSZ
+ bl asm_print_hex
+ bl asm_print_newline
+ b print_next
+
+print_x30:
+ adr x4, x30_msg
+ bl asm_print_str
+
+ /* Print spaces to align "x30" string */
+ mov x0, #4
+ bl print_alignment
+
+ /* Report x30 */
+ ldr x4, [x7]
+
+ /* ----------------------------------------------------------------
+ * Different virtual address space size can be defined for each EL.
+ * Ensure that we use the proper one by reading the corresponding
+ * TCR_ELx register.
+ * ----------------------------------------------------------------
+ */
+ cmp x8, #MODE_EL2
+ b.lt from_el1 /* EL1 */
+ mrs x2, sctlr_el2
+ mrs x1, tcr_el2
+
+ /* ----------------------------------------------------------------
+ * Check if pointer authentication is enabled at the specified EL.
+ * If it isn't, we can then skip stripping a PAC code.
+ * ----------------------------------------------------------------
+ */
+test_pauth:
+ tst x2, #(SCTLR_EnIA_BIT | SCTLR_EnIB_BIT)
+ b.eq no_pauth
+
+ /* Demangle address */
+ and x1, x1, #0x3F /* T0SZ = TCR_ELx[5:0] */
+ sub x1, x1, #64
+ neg x1, x1 /* bottom_pac_bit = 64 - T0SZ */
+ mov x2, #-1
+ lsl x2, x2, x1
+ bic x4, x4, x2
+
+no_pauth:
+ bl asm_print_hex
+ bl asm_print_newline
+
+ /* tpidr_el3 contains the address to cpu_data structure */
+ mrs x0, tpidr_el3
+ /* Calculate the Crash buffer offset in cpu_data */
+ add x0, x0, #CPU_DATA_CRASH_BUF_OFFSET
+ /* Store crash buffer address in tpidr_el3 */
+ msr tpidr_el3, x0
+
+ /* Print the rest of crash dump */
+ b print_el3_sys_regs
+
+from_el1:
+ mrs x2, sctlr_el1
+ mrs x1, tcr_el1
+ b test_pauth
+endfunc report_elx_panic
+
+ /* -----------------------------------------------------
+ * This function allows to report a crash (if crash
+ * reporting is enabled) when panic() is invoked from
+ * C Runtime. It prints the CPU state via the crash
+ * console making use of the crash buf. This function
+ * will not return.
+ * -----------------------------------------------------
+ */
+func report_el3_panic
+ msr spsel, #MODE_SP_ELX
+ prepare_crash_buf_save_x0_x1
+ adr x0, panic_msg
+ mov sp, x0
+ /* Fall through to 'do_crash_reporting' */
+
+ /* ------------------------------------------------------------
+ * The common crash reporting functionality. It requires x0
+ * and x1 has already been stored in crash buf, sp points to
+ * crash message and tpidr_el3 contains the crash buf address.
+ * The function does the following:
+ * - Retrieve the crash buffer from tpidr_el3
+ * - Store x2 to x6 in the crash buffer
+ * - Initialise the crash console.
+ * - Print the crash message by using the address in sp.
+ * - Print x30 value to the crash console.
+ * - Print x0 - x7 from the crash buf to the crash console.
+ * - Print x8 - x29 (in groups of 8 registers) using the
+ * crash buf to the crash console.
+ * - Print el3 sys regs (in groups of 8 registers) using the
+ * crash buf to the crash console.
+ * - Print non el3 sys regs (in groups of 8 registers) using
+ * the crash buf to the crash console.
+ * ------------------------------------------------------------
+ */
+do_crash_reporting:
+ /* Retrieve the crash buf from tpidr_el3 */
+ mrs x0, tpidr_el3
+ /* Store x2 - x6, x30 in the crash buffer */
+ stp x2, x3, [x0, #REGSZ * 2]
+ stp x4, x5, [x0, #REGSZ * 4]
+ stp x6, x30, [x0, #REGSZ * 6]
+ /* Initialize the crash console */
+ bl plat_crash_console_init
+ /* Verify the console is initialized */
+ cbz x0, crash_panic
+ /* Print the crash message. sp points to the crash message */
+ mov x4, sp
+ bl asm_print_str
+ /* Print spaces to align "x30" string */
+ mov x0, #4
+ bl print_alignment
+ /* Load the crash buf address */
+ mrs x0, tpidr_el3
+ /* Report x30 first from the crash buf */
+ ldr x4, [x0, #REGSZ * 7]
+
+#if ENABLE_PAUTH
+ /* Demangle address */
+ xpaci x4
+#endif
+ bl asm_print_hex
+ bl asm_print_newline
+ /* Load the crash buf address */
+ mrs x0, tpidr_el3
+ /* Now mov x7 into crash buf */
+ str x7, [x0, #REGSZ * 7]
+
+ /* Report x0 - x29 values stored in crash buf */
+ /* Store the ascii list pointer in x6 */
+ adr x6, gp_regs
+ /* Print x0 to x7 from the crash buf */
+ bl size_controlled_print
+ /* Store x8 - x15 in crash buf and print */
+ bl str_in_crash_buf_print
+ /* Load the crash buf address */
+ mrs x0, tpidr_el3
+ /* Store the rest of gp regs and print */
+ stp x16, x17, [x0]
+ stp x18, x19, [x0, #REGSZ * 2]
+ stp x20, x21, [x0, #REGSZ * 4]
+ stp x22, x23, [x0, #REGSZ * 6]
+ bl size_controlled_print
+ /* Load the crash buf address */
+ mrs x0, tpidr_el3
+ stp x24, x25, [x0]
+ stp x26, x27, [x0, #REGSZ * 2]
+ stp x28, x29, [x0, #REGSZ * 4]
+ bl size_controlled_print
+
+ /* Print the el3 sys registers */
+print_el3_sys_regs:
+ adr x6, el3_sys_regs
+ mrs x8, scr_el3
+ mrs x9, sctlr_el3
+ mrs x10, cptr_el3
+ mrs x11, tcr_el3
+ mrs x12, daif
+ mrs x13, mair_el3
+ mrs x14, spsr_el3
+ mrs x15, elr_el3
+ bl str_in_crash_buf_print
+ mrs x8, ttbr0_el3
+ mrs x9, esr_el3
+ mrs x10, far_el3
+ bl str_in_crash_buf_print
+
+ /* Print the non el3 sys registers */
+ adr x6, non_el3_sys_regs
+ mrs x8, spsr_el1
+ mrs x9, elr_el1
+ mrs x10, spsr_abt
+ mrs x11, spsr_und
+ mrs x12, spsr_irq
+ mrs x13, spsr_fiq
+ mrs x14, sctlr_el1
+ mrs x15, actlr_el1
+ bl str_in_crash_buf_print
+ mrs x8, cpacr_el1
+ mrs x9, csselr_el1
+ mrs x10, sp_el1
+ mrs x11, esr_el1
+ mrs x12, ttbr0_el1
+ mrs x13, ttbr1_el1
+ mrs x14, mair_el1
+ mrs x15, amair_el1
+ bl str_in_crash_buf_print
+ mrs x8, tcr_el1
+ mrs x9, tpidr_el1
+ mrs x10, tpidr_el0
+ mrs x11, tpidrro_el0
+ mrs x12, par_el1
+ mrs x13, mpidr_el1
+ mrs x14, afsr0_el1
+ mrs x15, afsr1_el1
+ bl str_in_crash_buf_print
+ mrs x8, contextidr_el1
+ mrs x9, vbar_el1
+ mrs x10, cntp_ctl_el0
+ mrs x11, cntp_cval_el0
+ mrs x12, cntv_ctl_el0
+ mrs x13, cntv_cval_el0
+ mrs x14, cntkctl_el1
+ mrs x15, sp_el0
+ bl str_in_crash_buf_print
+ mrs x8, isr_el1
+ bl str_in_crash_buf_print
+
+#if CTX_INCLUDE_AARCH32_REGS
+ /* Print the AArch32 registers */
+ adr x6, aarch32_regs
+ mrs x8, dacr32_el2
+ mrs x9, ifsr32_el2
+ bl str_in_crash_buf_print
+#endif /* CTX_INCLUDE_AARCH32_REGS */
+
+ /* Get the cpu specific registers to report */
+ bl do_cpu_reg_dump
+ bl str_in_crash_buf_print
+
+ /* Print some platform registers */
+ plat_crash_print_regs
+
+ bl plat_crash_console_flush
+
+ /* Done reporting */
+ no_ret plat_panic_handler
+endfunc report_el3_panic
+
+#else /* CRASH_REPORTING */
+func report_unhandled_exception
+report_unhandled_interrupt:
+ no_ret plat_panic_handler
+endfunc report_unhandled_exception
+#endif /* CRASH_REPORTING */
+
+func crash_panic
+ no_ret plat_panic_handler
+endfunc crash_panic
diff --git a/bl31/aarch64/ea_delegate.S b/bl31/aarch64/ea_delegate.S
new file mode 100644
index 0000000..28d2187
--- /dev/null
+++ b/bl31/aarch64/ea_delegate.S
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <assert_macros.S>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <bl31/ea_handle.h>
+#include <context.h>
+#include <lib/extensions/ras_arch.h>
+#include <cpu_macros.S>
+#include <context.h>
+
+ .globl handle_lower_el_sync_ea
+ .globl handle_lower_el_async_ea
+ .globl handle_pending_async_ea
+/*
+ * This function handles Synchronous External Aborts from lower EL.
+ *
+ * It delegates the handling of the EA to platform handler, and upon successfully
+ * handling the EA, exits EL3; otherwise panics.
+ *
+ * This function assumes x30 has been saved.
+ */
+func handle_lower_el_sync_ea
+ mrs x30, esr_el3
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+ /* Check for I/D aborts from lower EL */
+ cmp x30, #EC_IABORT_LOWER_EL
+ b.eq 1f
+
+ cmp x30, #EC_DABORT_LOWER_EL
+ b.eq 1f
+
+ /* EA other than above are unhandled exceptions */
+ no_ret report_unhandled_exception
+1:
+ /*
+ * Save general purpose and ARMv8.3-PAuth registers (if enabled).
+ * Also save PMCR_EL0 and set the PSTATE to a known state.
+ */
+ bl prepare_el3_entry
+
+#if ENABLE_PAUTH
+ /* Load and program APIAKey firmware key */
+ bl pauth_load_bl31_apiakey
+#endif
+
+ /* Setup exception class and syndrome arguments for platform handler */
+ mov x0, #ERROR_EA_SYNC
+ mrs x1, esr_el3
+ bl delegate_sync_ea
+
+ /* el3_exit assumes SP_EL0 on entry */
+ msr spsel, #MODE_SP_EL0
+ b el3_exit
+endfunc handle_lower_el_sync_ea
+
+
+/*
+ * This function handles SErrors from lower ELs.
+ *
+ * It delegates the handling of the EA to platform handler, and upon successfully
+ * handling the EA, exits EL3; otherwise panics.
+ *
+ * This function assumes x30 has been saved.
+ */
+func handle_lower_el_async_ea
+
+ /*
+ * Save general purpose and ARMv8.3-PAuth registers (if enabled).
+ * Also save PMCR_EL0 and set the PSTATE to a known state.
+ */
+ bl prepare_el3_entry
+
+#if ENABLE_PAUTH
+ /* Load and program APIAKey firmware key */
+ bl pauth_load_bl31_apiakey
+#endif
+
+ /* Setup exception class and syndrome arguments for platform handler */
+ mov x0, #ERROR_EA_ASYNC
+ mrs x1, esr_el3
+ bl delegate_async_ea
+
+ /* el3_exit assumes SP_EL0 on entry */
+ msr spsel, #MODE_SP_EL0
+ b el3_exit
+endfunc handle_lower_el_async_ea
+
+/*
+ * Handler for async EA from lower EL synchronized at EL3 entry in FFH mode.
+ *
+ * This scenario may arise when there is an error (EA) in the system which is not
+ * yet signaled to PE while executing in lower EL. During entry into EL3, the errors
+ * are synchronized either implicitly or explicitly causing async EA to pend at EL3.
+ *
+ * On detecting the pending EA (via ISR_EL1.A), if the EA routing model is Firmware
+ * First handling (FFH, SCR_EL3.EA = 1) this handler first handles the pending EA
+ * and then handles the original exception.
+ *
+ * This function assumes x30 has been saved.
+ */
+func handle_pending_async_ea
+ /*
+ * Prepare for nested handling of EA. Stash sysregs clobbered by nested
+ * exception and handler
+ */
+ str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_GPREG_LR]
+ mrs x30, esr_el3
+ str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ESR_EL3]
+ mrs x30, spsr_el3
+ str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_SPSR_EL3]
+ mrs x30, elr_el3
+ str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
+
+ mov x30, #1
+ str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
+ /*
+ * Restore the original x30 saved as part of entering EL3. This is not
+ * required for the current function but for EL3 SError vector entry
+ * once PSTATE.A bit is unmasked. We restore x30 and then the same
+ * value is stored in EL3 SError vector entry.
+ */
+ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+ /*
+ * After clearing PSTATE.A bit pending SError will trigger at current EL.
+ * Put explicit synchronization event to ensure newly unmasked interrupt
+ * is taken immediately.
+ */
+ unmask_async_ea
+
+ /* Restore the original exception information along with zeroing the storage */
+ ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
+ msr elr_el3, x30
+ str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
+ ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_SPSR_EL3]
+ msr spsr_el3, x30
+ str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_SPSR_EL3]
+ ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ESR_EL3]
+ msr esr_el3, x30
+ str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ESR_EL3]
+
+ /*
+ * If the original exception corresponds to SError from lower El, eret back
+ * to lower EL, otherwise return to vector table for original exception handling.
+ */
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+ cmp x30, #EC_SERROR
+ ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_GPREG_LR]
+ str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_GPREG_LR]
+ b.eq 1f
+ ret
+1:
+ exception_return
+endfunc handle_pending_async_ea
+
+/*
+ * Prelude for Synchronous External Abort handling. This function assumes that
+ * all GP registers have been saved by the caller.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ */
+func delegate_sync_ea
+#if ENABLE_FEAT_RAS
+ /*
+ * Check for Uncontainable error type. If so, route to the platform
+ * fatal error handler rather than the generic EA one.
+ */
+ ubfx x2, x1, #EABORT_SET_SHIFT, #EABORT_SET_WIDTH
+ cmp x2, #ERROR_STATUS_SET_UC
+ b.ne 1f
+
+ /* Check fault status code */
+ ubfx x3, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
+ cmp x3, #SYNC_EA_FSC
+ b.ne 1f
+
+ no_ret plat_handle_uncontainable_ea
+1:
+#endif
+
+ b ea_proceed
+endfunc delegate_sync_ea
+
+
+/*
+ * Prelude for Asynchronous External Abort handling. This function assumes that
+ * all GP registers have been saved by the caller.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ */
+func delegate_async_ea
+#if ENABLE_FEAT_RAS
+ /* Check Exception Class to ensure SError, as this function should
+ * only be invoked for SError. If that is not the case, which implies
+ * either an HW error or programming error, panic.
+ */
+ ubfx x2, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+ cmp x2, EC_SERROR
+ b.ne el3_panic
+ /*
+ * Check for Implementation Defined Syndrome. If so, skip checking
+ * Uncontainable error type from the syndrome as the format is unknown.
+ */
+ tbnz x1, #SERROR_IDS_BIT, 1f
+
+ /* AET only valid when DFSC is 0x11 */
+ ubfx x2, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
+ cmp x2, #DFSC_SERROR
+ b.ne 1f
+
+ /*
+ * Check for Uncontainable error type. If so, route to the platform
+ * fatal error handler rather than the generic EA one.
+ */
+ ubfx x3, x1, #EABORT_AET_SHIFT, #EABORT_AET_WIDTH
+ cmp x3, #ERROR_STATUS_UET_UC
+ b.ne 1f
+
+ no_ret plat_handle_uncontainable_ea
+1:
+#endif
+
+ b ea_proceed
+endfunc delegate_async_ea
+
+
+/*
+ * Delegate External Abort handling to platform's EA handler. This function
+ * assumes that all GP registers have been saved by the caller.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ */
+func ea_proceed
+ /*
+ * If the ESR loaded earlier is not zero, we were processing an EA
+ * already, and this is a double fault.
+ */
+ ldr x5, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
+ cbz x5, 1f
+ no_ret plat_handle_double_fault
+
+1:
+ /* Save EL3 state */
+ mrs x2, spsr_el3
+ mrs x3, elr_el3
+ stp x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+
+ /*
+ * Save ESR as handling might involve lower ELs, and returning back to
+ * EL3 from there would trample the original ESR.
+ */
+ mrs x4, scr_el3
+ mrs x5, esr_el3
+ stp x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+
+ /*
+ * Setup rest of arguments, and call platform External Abort handler.
+ *
+ * x0: EA reason (already in place)
+ * x1: Exception syndrome (already in place).
+ * x2: Cookie (unused for now).
+ * x3: Context pointer.
+ * x4: Flags (security state from SCR for now).
+ */
+ mov x2, xzr
+ mov x3, sp
+ ubfx x4, x4, #0, #1
+
+ /* Switch to runtime stack */
+ ldr x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+ msr spsel, #MODE_SP_EL0
+ mov sp, x5
+
+ mov x29, x30
+#if ENABLE_ASSERTIONS
+ /* Stash the stack pointer */
+ mov x28, sp
+#endif
+ bl plat_ea_handler
+
+#if ENABLE_ASSERTIONS
+ /*
+ * Error handling flows might involve long jumps; so upon returning from
+ * the platform error handler, validate that the we've completely
+ * unwound the stack.
+ */
+ mov x27, sp
+ cmp x28, x27
+ ASM_ASSERT(eq)
+#endif
+
+ /* Make SP point to context */
+ msr spsel, #MODE_SP_ELX
+
+ /* Restore EL3 state and ESR */
+ ldp x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+ msr spsr_el3, x1
+ msr elr_el3, x2
+
+ /* Restore ESR_EL3 and SCR_EL3 */
+ ldp x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+ msr scr_el3, x3
+ msr esr_el3, x4
+
+#if ENABLE_ASSERTIONS
+ cmp x4, xzr
+ ASM_ASSERT(ne)
+#endif
+
+ /* Clear ESR storage */
+ str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
+
+ ret x29
+endfunc ea_proceed
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
new file mode 100644
index 0000000..ed48311
--- /dev/null
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -0,0 +1,747 @@
+/*
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl31/ea_handle.h>
+#include <bl31/interrupt_mgmt.h>
+#include <bl31/sync_handle.h>
+#include <common/runtime_svc.h>
+#include <context.h>
+#include <cpu_macros.S>
+#include <el3_common_macros.S>
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/smccc.h>
+
+ .globl runtime_exceptions
+
+ .globl sync_exception_sp_el0
+ .globl irq_sp_el0
+ .globl fiq_sp_el0
+ .globl serror_sp_el0
+
+ .globl sync_exception_sp_elx
+ .globl irq_sp_elx
+ .globl fiq_sp_elx
+ .globl serror_sp_elx
+
+ .globl sync_exception_aarch64
+ .globl irq_aarch64
+ .globl fiq_aarch64
+ .globl serror_aarch64
+
+ .globl sync_exception_aarch32
+ .globl irq_aarch32
+ .globl fiq_aarch32
+ .globl serror_aarch32
+
+ /*
+ * Save LR and make x30 available as most of the routines in vector entry
+ * need a free register
+ */
+ .macro save_x30
+ str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ .endm
+
+ .macro restore_x30
+ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ .endm
+
+ /*
+ * Macro that synchronizes errors (EA) and checks for pending SError.
+ * On detecting a pending SError it either reflects it back to lower
+ * EL (KFH) or handles it in EL3 (FFH) based on EA routing model.
+ */
+ .macro sync_and_handle_pending_serror
+ synchronize_errors
+ mrs x30, ISR_EL1
+ tbz x30, #ISR_A_SHIFT, 2f
+#if FFH_SUPPORT
+ mrs x30, scr_el3
+ tst x30, #SCR_EA_BIT
+ b.eq 1f
+ bl handle_pending_async_ea
+ b 2f
+#endif
+1:
+ /* This function never returns, but need LR for decision making */
+ bl reflect_pending_async_ea_to_lower_el
+2:
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * This macro handles Synchronous exceptions.
+ * Only SMC exceptions are supported.
+ * ---------------------------------------------------------------------
+ */
+ .macro handle_sync_exception
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ /*
+ * Read the timestamp value and store it in per-cpu data. The value
+ * will be extracted from per-cpu data by the C level SMC handler and
+ * saved to the PMF timestamp region.
+ */
+ mrs x30, cntpct_el0
+ str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+ mrs x29, tpidr_el3
+ str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
+ ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+#endif
+
+ mrs x30, esr_el3
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+ /* Handle SMC exceptions separately from other synchronous exceptions */
+ cmp x30, #EC_AARCH32_SMC
+ b.eq smc_handler32
+
+ cmp x30, #EC_AARCH64_SMC
+ b.eq sync_handler64
+
+ cmp x30, #EC_AARCH64_SYS
+ b.eq sync_handler64
+
+ cmp x30, #EC_IMP_DEF_EL3
+ b.eq imp_def_el3_handler
+
+ /* If FFH Support then try to handle lower EL EA exceptions. */
+#if FFH_SUPPORT
+ mrs x30, scr_el3
+ tst x30, #SCR_EA_BIT
+ b.eq 1f
+ b handle_lower_el_sync_ea
+#endif
+1:
+ /* Synchronous exceptions other than the above are unhandled */
+ b report_unhandled_exception
+ .endm
+
+vector_base runtime_exceptions
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry sync_exception_sp_el0
+#ifdef MONITOR_TRAPS
+ stp x29, x30, [sp, #-16]!
+
+ mrs x30, esr_el3
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+ /* Check for BRK */
+ cmp x30, #EC_BRK
+ b.eq brk_handler
+
+ ldp x29, x30, [sp], #16
+#endif /* MONITOR_TRAPS */
+
+ /* We don't expect any synchronous exceptions from EL3 */
+ b report_unhandled_exception
+end_vector_entry sync_exception_sp_el0
+
+vector_entry irq_sp_el0
+ /*
+ * EL3 code is non-reentrant. Any asynchronous exception is a serious
+ * error. Loop infinitely.
+ */
+ b report_unhandled_interrupt
+end_vector_entry irq_sp_el0
+
+
+vector_entry fiq_sp_el0
+ b report_unhandled_interrupt
+end_vector_entry fiq_sp_el0
+
+
+vector_entry serror_sp_el0
+ no_ret plat_handle_el3_ea
+end_vector_entry serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry sync_exception_sp_elx
+ /*
+ * This exception will trigger if anything went wrong during a previous
+ * exception entry or exit or while handling an earlier unexpected
+ * synchronous exception. There is a high probability that SP_EL3 is
+ * corrupted.
+ */
+ b report_unhandled_exception
+end_vector_entry sync_exception_sp_elx
+
+vector_entry irq_sp_elx
+ b report_unhandled_interrupt
+end_vector_entry irq_sp_elx
+
+vector_entry fiq_sp_elx
+ b report_unhandled_interrupt
+end_vector_entry fiq_sp_elx
+
+vector_entry serror_sp_elx
+#if FFH_SUPPORT
+ /*
+ * This will trigger if the exception was taken due to SError in EL3 or
+ * because of pending asynchronous external aborts from lower EL that got
+ * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1)
+ * during EL3 entry. For the former case we continue with "plat_handle_el3_ea".
+ * The later case will occur when PSTATE.A bit is cleared in
+ * "handle_pending_async_ea". This means we are doing a nested
+ * exception in EL3. Call the handler for async EA which will eret back to
+ * original el3 handler if it is nested exception. Also, unmask EA so that we
+ * catch any further EA arise when handling this nested exception at EL3.
+ */
+ save_x30
+ ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
+ cbz x30, 1f
+ /*
+ * This is nested exception handling, clear the flag to avoid taking this
+ * path for further exceptions caused by EA handling
+ */
+ str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
+ unmask_async_ea
+ b handle_lower_el_async_ea
+1:
+ restore_x30
+#endif
+ no_ret plat_handle_el3_ea
+
+end_vector_entry serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry sync_exception_aarch64
+ /*
+ * This exception vector will be the entry point for SMCs and traps
+ * that are unhandled at lower ELs most commonly. SP_EL3 should point
+ * to a valid cpu context where the general purpose and system register
+ * state can be saved.
+ */
+ save_x30
+ apply_at_speculative_wa
+ sync_and_handle_pending_serror
+ unmask_async_ea
+ handle_sync_exception
+end_vector_entry sync_exception_aarch64
+
+vector_entry irq_aarch64
+ save_x30
+ apply_at_speculative_wa
+ sync_and_handle_pending_serror
+ unmask_async_ea
+ b handle_interrupt_exception
+end_vector_entry irq_aarch64
+
+vector_entry fiq_aarch64
+ save_x30
+ apply_at_speculative_wa
+ sync_and_handle_pending_serror
+ unmask_async_ea
+ b handle_interrupt_exception
+end_vector_entry fiq_aarch64
+
+ /*
+ * Need to synchronize any outstanding SError since we can get a burst of errors.
+ * So reuse the sync mechanism to catch any further errors which are pending.
+ */
+vector_entry serror_aarch64
+#if FFH_SUPPORT
+ save_x30
+ apply_at_speculative_wa
+ sync_and_handle_pending_serror
+ unmask_async_ea
+ b handle_lower_el_async_ea
+#else
+ b report_unhandled_exception
+#endif
+end_vector_entry serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry sync_exception_aarch32
+ /*
+ * This exception vector will be the entry point for SMCs and traps
+ * that are unhandled at lower ELs most commonly. SP_EL3 should point
+ * to a valid cpu context where the general purpose and system register
+ * state can be saved.
+ */
+ save_x30
+ apply_at_speculative_wa
+ sync_and_handle_pending_serror
+ unmask_async_ea
+ handle_sync_exception
+end_vector_entry sync_exception_aarch32
+
+vector_entry irq_aarch32
+ save_x30
+ apply_at_speculative_wa
+ sync_and_handle_pending_serror
+ unmask_async_ea
+ b handle_interrupt_exception
+end_vector_entry irq_aarch32
+
+vector_entry fiq_aarch32
+ save_x30
+ apply_at_speculative_wa
+ sync_and_handle_pending_serror
+ unmask_async_ea
+ b handle_interrupt_exception
+end_vector_entry fiq_aarch32
+
+ /*
+ * Need to synchronize any outstanding SError since we can get a burst of errors.
+ * So reuse the sync mechanism to catch any further errors which are pending.
+ */
+vector_entry serror_aarch32
+#if FFH_SUPPORT
+ save_x30
+ apply_at_speculative_wa
+ sync_and_handle_pending_serror
+ unmask_async_ea
+ b handle_lower_el_async_ea
+#else
+ b report_unhandled_exception
+#endif
+end_vector_entry serror_aarch32
+
+#ifdef MONITOR_TRAPS
+ .section .rodata.brk_string, "aS"
+brk_location:
+ .asciz "Error at instruction 0x"
+brk_message:
+ .asciz "Unexpected BRK instruction with value 0x"
+#endif /* MONITOR_TRAPS */
+
+ /* ---------------------------------------------------------------------
+ * The following code handles secure monitor calls.
+ * Depending upon the execution state from where the SMC has been
+ * invoked, it frees some general purpose registers to perform the
+ * remaining tasks. They involve finding the runtime service handler
+ * that is the target of the SMC & switching to runtime stacks (SP_EL0)
+ * before calling the handler.
+ *
+ * Note that x30 has been explicitly saved and can be used here
+ * ---------------------------------------------------------------------
+ */
+func sync_exception_handler
+smc_handler32:
+ /* Check whether aarch32 issued an SMC64 */
+ tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
+
+sync_handler64:
+ /* NOTE: The code below must preserve x0-x4 */
+
+ /*
+ * Save general purpose and ARMv8.3-PAuth registers (if enabled).
+ * Also save PMCR_EL0 and set the PSTATE to a known state.
+ */
+ bl prepare_el3_entry
+
+#if ENABLE_PAUTH
+ /* Load and program APIAKey firmware key */
+ bl pauth_load_bl31_apiakey
+#endif
+
+ /*
+ * Populate the parameters for the SMC handler.
+ * We already have x0-x4 in place. x5 will point to a cookie (not used
+ * now). x6 will point to the context structure (SP_EL3) and x7 will
+ * contain flags we need to pass to the handler.
+ */
+ mov x5, xzr
+ mov x6, sp
+
+ /*
+ * Restore the saved C runtime stack value which will become the new
+ * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
+ * structure prior to the last ERET from EL3.
+ */
+ ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+
+ /* Switch to SP_EL0 */
+ msr spsel, #MODE_SP_EL0
+
+ /*
+ * Save the SPSR_EL3 and ELR_EL3 in case there is a world
+ * switch during SMC handling.
+ * TODO: Revisit if all system registers can be saved later.
+ */
+ mrs x16, spsr_el3
+ mrs x17, elr_el3
+ stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+
+ /* Load SCR_EL3 */
+ mrs x18, scr_el3
+
+ /* check for system register traps */
+ mrs x16, esr_el3
+ ubfx x17, x16, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+ cmp x17, #EC_AARCH64_SYS
+ b.eq sysreg_handler64
+
+ /* Clear flag register */
+ mov x7, xzr
+
+#if ENABLE_RME
+ /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */
+ ubfx x7, x18, #SCR_NSE_SHIFT, #1
+
+ /*
+ * Shift copied SCR_EL3.NSE bit by 5 to create space for
+ * SCR_EL3.NS bit. Bit 5 of the flag corresponds to
+ * the SCR_EL3.NSE bit.
+ */
+ lsl x7, x7, #5
+#endif /* ENABLE_RME */
+
+ /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
+ bfi x7, x18, #0, #1
+
+ mov sp, x12
+
+ /*
+ * Per SMCCC documentation, bits [23:17] must be zero for Fast
+ * SMCs. Other values are reserved for future use. Ensure that
+ * these bits are zeroes, if not report as unknown SMC.
+ */
+ tbz x0, #FUNCID_TYPE_SHIFT, 2f /* Skip check if its a Yield Call*/
+ tst x0, #(FUNCID_FC_RESERVED_MASK << FUNCID_FC_RESERVED_SHIFT)
+ b.ne smc_unknown
+
+ /*
+ * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID
+ * passed through x0. Copy the SVE hint bit to flags and mask the
+ * bit in smc_fid passed to the standard service dispatcher.
+ * A service/dispatcher can retrieve the SVE hint bit state from
+ * flags using the appropriate helper.
+ */
+2:
+ and x16, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
+ orr x7, x7, x16
+ bic x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
+
+ /* Get the unique owning entity number */
+ ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
+ ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
+ orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH
+
+ /* Load descriptor index from array of indices */
+ adrp x14, rt_svc_descs_indices
+ add x14, x14, :lo12:rt_svc_descs_indices
+ ldrb w15, [x14, x16]
+
+ /* Any index greater than 127 is invalid. Check bit 7. */
+ tbnz w15, 7, smc_unknown
+
+ /*
+ * Get the descriptor using the index
+ * x11 = (base + off), w15 = index
+ *
+ * handler = (base + off) + (index << log2(size))
+ */
+ adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
+ lsl w10, w15, #RT_SVC_SIZE_LOG2
+ ldr x15, [x11, w10, uxtw]
+
+ /*
+ * Call the Secure Monitor Call handler and then drop directly into
+ * el3_exit() which will program any remaining architectural state
+ * prior to issuing the ERET to the desired lower EL.
+ */
+#if DEBUG
+ cbz x15, rt_svc_fw_critical_error
+#endif
+ blr x15
+
+ b el3_exit
+
+sysreg_handler64:
+ mov x0, x16 /* ESR_EL3, containing syndrome information */
+ mov x1, x6 /* lower EL's context */
+ mov x19, x6 /* save context pointer for after the call */
+ mov sp, x12 /* EL3 runtime stack, as loaded above */
+
+ /* int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx); */
+ bl handle_sysreg_trap
+ /*
+ * returns:
+ * -1: unhandled trap, panic
+ * 0: handled trap, return to the trapping instruction (repeating it)
+ * 1: handled trap, return to the next instruction
+ */
+
+ tst w0, w0
+ b.mi elx_panic /* negative return value: panic */
+ b.eq 1f /* zero: do not change ELR_EL3 */
+
+ /* advance the PC to continue after the instruction */
+ ldr x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
+ add x1, x1, #4
+ str x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
+1:
+ b el3_exit
+
+smc_unknown:
+ /*
+ * Unknown SMC call. Populate return value with SMC_UNK and call
+ * el3_exit() which will restore the remaining architectural state
+ * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
+ * to the desired lower EL.
+ */
+ mov x0, #SMC_UNK
+ str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b el3_exit
+
+smc_prohibited:
+ restore_ptw_el1_sys_regs
+ ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ mov x0, #SMC_UNK
+ exception_return
+
+#if DEBUG
+rt_svc_fw_critical_error:
+ /* Switch to SP_ELx */
+ msr spsel, #MODE_SP_ELX
+ no_ret report_unhandled_exception
+#endif
+endfunc sync_exception_handler
+
+ /* ---------------------------------------------------------------------
+ * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
+ * interrupts.
+ *
+ * Note that x30 has been explicitly saved and can be used here
+ * ---------------------------------------------------------------------
+ */
+func handle_interrupt_exception
+ /*
+ * Save general purpose and ARMv8.3-PAuth registers (if enabled).
+ * Also save PMCR_EL0 and set the PSTATE to a known state.
+ */
+ bl prepare_el3_entry
+
+#if ENABLE_PAUTH
+ /* Load and program APIAKey firmware key */
+ bl pauth_load_bl31_apiakey
+#endif
+
+ /* Save the EL3 system registers needed to return from this exception */
+ mrs x0, spsr_el3
+ mrs x1, elr_el3
+ stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+
+ /* Switch to the runtime stack i.e. SP_EL0 */
+ ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+ mov x20, sp
+ msr spsel, #MODE_SP_EL0
+ mov sp, x2
+
+ /*
+ * Find out whether this is a valid interrupt type.
+ * If the interrupt controller reports a spurious interrupt then return
+ * to where we came from.
+ */
+ bl plat_ic_get_pending_interrupt_type
+ cmp x0, #INTR_TYPE_INVAL
+ b.eq interrupt_exit
+
+ /*
+ * Get the registered handler for this interrupt type.
+ * A NULL return value could be 'cause of the following conditions:
+ *
+ * a. An interrupt of a type was routed correctly but a handler for its
+ * type was not registered.
+ *
+ * b. An interrupt of a type was not routed correctly so a handler for
+ * its type was not registered.
+ *
+ * c. An interrupt of a type was routed correctly to EL3, but was
+ * deasserted before its pending state could be read. Another
+ * interrupt of a different type pended at the same time and its
+ * type was reported as pending instead. However, a handler for this
+ * type was not registered.
+ *
+ * a. and b. can only happen due to a programming error. The
+ * occurrence of c. could be beyond the control of Trusted Firmware.
+ * It makes sense to return from this exception instead of reporting an
+ * error.
+ */
+ bl get_interrupt_type_handler
+ cbz x0, interrupt_exit
+ mov x21, x0
+
+ mov x0, #INTR_ID_UNAVAILABLE
+
+ /* Set the current security state in the 'flags' parameter */
+ mrs x2, scr_el3
+ ubfx x1, x2, #0, #1
+
+ /* Restore the reference to the 'handle' i.e. SP_EL3 */
+ mov x2, x20
+
+ /* x3 will point to a cookie (not used now) */
+ mov x3, xzr
+
+ /* Call the interrupt type handler */
+ blr x21
+
+interrupt_exit:
+ /* Return from exception, possibly in a different security state */
+ b el3_exit
+endfunc handle_interrupt_exception
+
+func imp_def_el3_handler
+ /* Save GP registers */
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+
+ /* Get the cpu_ops pointer */
+ bl get_cpu_ops_ptr
+
+ /* Get the cpu_ops exception handler */
+ ldr x0, [x0, #CPU_E_HANDLER_FUNC]
+
+ /*
+ * If the reserved function pointer is NULL, this CPU does not have an
+ * implementation defined exception handler function
+ */
+ cbz x0, el3_handler_exit
+ mrs x1, esr_el3
+ ubfx x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+ blr x0
+el3_handler_exit:
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ restore_x30
+ no_ret report_unhandled_exception
+endfunc imp_def_el3_handler
+
+/*
+ * Handler for async EA from lower EL synchronized at EL3 entry in KFH mode.
+ *
+ * This scenario may arise when there is an error (EA) in the system which is not
+ * yet signaled to PE while executing in lower EL. During entry into EL3, the errors
+ * are synchronized either implicitly or explicitly causing async EA to pend at EL3.
+ *
+ * On detecting the pending EA (via ISR_EL1.A) and if the EA routing model is
+ * KFH (SCR_EL3.EA = 1) this handler reflects ther error back to lower EL.
+ *
+ * This function assumes x30 has been saved.
+ */
+func reflect_pending_async_ea_to_lower_el
+ /*
+ * As the original exception was not handled we need to ensure that we return
+ * back to the instruction which caused the exception. To acheive that, eret
+ * to "elr-4" (Label "subtract_elr_el3") for SMC or simply eret otherwise
+ * (Label "skip_smc_check").
+ *
+ * LIMITATION: It could be that async EA is masked at the target exception level
+ * or the priority of async EA wrt to the EL3/secure interrupt is lower, which
+ * causes back and forth between lower EL and EL3. In case of back and forth between
+ * lower EL and EL3, we can track the loop count in "CTX_NESTED_EA_FLAG" and leverage
+ * previous ELR in "CTX_SAVED_ELR_EL3" to detect this cycle and further panic
+ * to indicate a problem here (Label "check_loop_ctr"). If we are in this cycle, loop
+ * counter retains its value but if we do a normal el3_exit this flag gets cleared.
+ * However, setting SCR_EL3.IESB = 1, should give priority to SError handling
+ * as per AArch64.TakeException pseudo code in Arm ARM.
+ *
+ * TODO: In future if EL3 gets a capability to inject a virtual SError to lower
+ * ELs, we can remove the el3_panic and handle the original exception first and
+ * inject SError to lower EL before ereting back.
+ */
+ stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+ ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
+ mrs x28, elr_el3
+ cmp x29, x28
+ b.eq check_loop_ctr
+ str x28, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
+ /* Zero the loop counter */
+ str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
+ b skip_loop_ctr
+check_loop_ctr:
+ ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
+ add x29, x29, #1
+ str x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
+ cmp x29, #ASYNC_EA_REPLAY_COUNTER
+ b.ge el3_panic
+skip_loop_ctr:
+ /*
+ * Logic to distinguish if we came from SMC or any other exception.
+ * Use offsets in vector entry to get which exception we are handling.
+ * In each vector entry of size 0x200, address "0x0-0x80" is for sync
+ * exception and "0x80-0x200" is for async exceptions.
+ * Use vector base address (vbar_el3) and exception offset (LR) to
+ * calculate whether the address we came from is any of the following
+ * "0x0-0x80", "0x200-0x280", "0x400-0x480" or "0x600-0x680"
+ */
+ mrs x29, vbar_el3
+ sub x30, x30, x29
+ and x30, x30, #0x1ff
+ cmp x30, #0x80
+ b.ge skip_smc_check
+ /* Its a synchronous exception, Now check if it is SMC or not? */
+ mrs x30, esr_el3
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+ cmp x30, #EC_AARCH32_SMC
+ b.eq subtract_elr_el3
+ cmp x30, #EC_AARCH64_SMC
+ b.eq subtract_elr_el3
+ b skip_smc_check
+subtract_elr_el3:
+ sub x28, x28, #4
+skip_smc_check:
+ msr elr_el3, x28
+ ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ exception_return
+endfunc reflect_pending_async_ea_to_lower_el
+
+ /* ---------------------------------------------------------------------
+ * The following code handles exceptions caused by BRK instructions.
+ * Following a BRK instruction, the only real valid cause of action is
+ * to print some information and panic, as the code that caused it is
+ * likely in an inconsistent internal state.
+ *
+ * This is initially intended to be used in conjunction with
+ * __builtin_trap.
+ * ---------------------------------------------------------------------
+ */
+#ifdef MONITOR_TRAPS
+func brk_handler
+ /* Extract the ISS */
+ mrs x10, esr_el3
+ ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH
+
+ /* Ensure the console is initialized */
+ bl plat_crash_console_init
+
+ adr x4, brk_location
+ bl asm_print_str
+ mrs x4, elr_el3
+ bl asm_print_hex
+ bl asm_print_newline
+
+ adr x4, brk_message
+ bl asm_print_str
+ mov x4, x10
+ mov x5, #28
+ bl asm_print_hex_bits
+ bl asm_print_newline
+
+ no_ret plat_panic_handler
+endfunc brk_handler
+#endif /* MONITOR_TRAPS */
diff --git a/bl31/bl31.ld.S b/bl31/bl31.ld.S
new file mode 100644
index 0000000..773b41d
--- /dev/null
+++ b/bl31/bl31.ld.S
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/bl_common.ld.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+ENTRY(bl31_entrypoint)
+
+MEMORY {
+ RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
+
+#if SEPARATE_NOBITS_REGION
+ NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
+#else /* SEPARATE_NOBITS_REGION */
+# define NOBITS RAM
+#endif /* SEPARATE_NOBITS_REGION */
+}
+
+#ifdef PLAT_EXTRA_LD_SCRIPT
+# include <plat.ld.S>
+#endif /* PLAT_EXTRA_LD_SCRIPT */
+
+SECTIONS {
+ RAM_REGION_START = ORIGIN(RAM);
+ RAM_REGION_LENGTH = LENGTH(RAM);
+ . = BL31_BASE;
+
+ ASSERT(. == ALIGN(PAGE_SIZE),
+ "BL31_BASE address is not aligned on a page boundary.")
+
+ __BL31_START__ = .;
+
+#if SEPARATE_CODE_AND_RODATA
+ .text . : {
+ __TEXT_START__ = .;
+
+ *bl31_entrypoint.o(.text*)
+ *(SORT_BY_ALIGNMENT(SORT(.text*)))
+ *(.vectors)
+ __TEXT_END_UNALIGNED__ = .;
+
+ . = ALIGN(PAGE_SIZE);
+
+ __TEXT_END__ = .;
+ } >RAM
+
+ .rodata . : {
+ __RODATA_START__ = .;
+
+ *(SORT_BY_ALIGNMENT(.rodata*))
+
+# if PLAT_EXTRA_RODATA_INCLUDES
+# include <plat.ld.rodata.inc>
+# endif /* PLAT_EXTRA_RODATA_INCLUDES */
+
+ RODATA_COMMON
+
+ . = ALIGN(8);
+
+# include <lib/el3_runtime/pubsub_events.h>
+ __RODATA_END_UNALIGNED__ = .;
+
+ . = ALIGN(PAGE_SIZE);
+
+ __RODATA_END__ = .;
+ } >RAM
+#else /* SEPARATE_CODE_AND_RODATA */
+ .ro . : {
+ __RO_START__ = .;
+
+ *bl31_entrypoint.o(.text*)
+ *(SORT_BY_ALIGNMENT(.text*))
+ *(SORT_BY_ALIGNMENT(.rodata*))
+
+ RODATA_COMMON
+
+ . = ALIGN(8);
+
+# include <lib/el3_runtime/pubsub_events.h>
+
+ *(.vectors)
+
+ __RO_END_UNALIGNED__ = .;
+
+ /*
+ * Memory page(s) mapped to this section will be marked as read-only,
+ * executable. No RW data from the next section must creep in. Ensure
+ * that the rest of the current memory page is unused.
+ */
+ . = ALIGN(PAGE_SIZE);
+
+ __RO_END__ = .;
+ } >RAM
+#endif /* SEPARATE_CODE_AND_RODATA */
+
+ ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
+ "cpu_ops not defined for this platform.")
+
+#if SPM_MM || (SPMC_AT_EL3 && SPMC_AT_EL3_SEL0_SP)
+# ifndef SPM_SHIM_EXCEPTIONS_VMA
+# define SPM_SHIM_EXCEPTIONS_VMA RAM
+# endif /* SPM_SHIM_EXCEPTIONS_VMA */
+
+ /*
+ * Exception vectors of the SPM shim layer. They must be aligned to a 2K
+ * address but we need to place them in a separate page so that we can set
+ * individual permissions on them, so the actual alignment needed is the
+ * page size.
+ *
+ * There's no need to include this into the RO section of BL31 because it
+ * doesn't need to be accessed by BL31.
+ */
+ .spm_shim_exceptions : ALIGN(PAGE_SIZE) {
+ __SPM_SHIM_EXCEPTIONS_START__ = .;
+
+ *(.spm_shim_exceptions)
+
+ . = ALIGN(PAGE_SIZE);
+
+ __SPM_SHIM_EXCEPTIONS_END__ = .;
+ } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
+
+ PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(.spm_shim_exceptions));
+
+ . = LOADADDR(.spm_shim_exceptions) + SIZEOF(.spm_shim_exceptions);
+#endif /* SPM_MM || (SPMC_AT_EL3 && SPMC_AT_EL3_SEL0_SP) */
+
+ __RW_START__ = .;
+
+ DATA_SECTION >RAM
+ RELA_SECTION >RAM
+
+#ifdef BL31_PROGBITS_LIMIT
+ ASSERT(
+ . <= BL31_PROGBITS_LIMIT,
+ "BL31 progbits has exceeded its limit. Consider disabling some features."
+ )
+#endif /* BL31_PROGBITS_LIMIT */
+
+#if SEPARATE_NOBITS_REGION
+ . = ALIGN(PAGE_SIZE);
+
+ __RW_END__ = .;
+ __BL31_END__ = .;
+
+ ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
+
+ . = BL31_NOBITS_BASE;
+
+ ASSERT(. == ALIGN(PAGE_SIZE),
+ "BL31 NOBITS base address is not aligned on a page boundary.")
+
+ __NOBITS_START__ = .;
+#endif /* SEPARATE_NOBITS_REGION */
+
+ STACK_SECTION >NOBITS
+ BSS_SECTION >NOBITS
+ XLAT_TABLE_SECTION >NOBITS
+
+#if USE_COHERENT_MEM
+ /*
+ * The base address of the coherent memory section must be page-aligned to
+ * guarantee that the coherent data are stored on their own pages and are
+ * not mixed with normal data. This is required to set up the correct
+ * memory attributes for the coherent data page tables.
+ */
+ .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+ __COHERENT_RAM_START__ = .;
+
+ /*
+ * Bakery locks are stored in coherent memory. Each lock's data is
+ * contiguous and fully allocated by the compiler.
+ */
+ *(.bakery_lock)
+ *(.tzfw_coherent_mem)
+
+ __COHERENT_RAM_END_UNALIGNED__ = .;
+
+ /*
+ * Memory page(s) mapped to this section will be marked as device
+ * memory. No other unexpected data must creep in. Ensure the rest of
+ * the current memory page is unused.
+ */
+ . = ALIGN(PAGE_SIZE);
+
+ __COHERENT_RAM_END__ = .;
+ } >NOBITS
+#endif /* USE_COHERENT_MEM */
+
+#if SEPARATE_NOBITS_REGION
+ __NOBITS_END__ = .;
+
+ ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
+#else /* SEPARATE_NOBITS_REGION */
+ __RW_END__ = .;
+ __BL31_END__ = .;
+
+ ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
+#endif /* SEPARATE_NOBITS_REGION */
+ RAM_REGION_END = .;
+
+ /DISCARD/ : {
+ *(.dynsym .dynstr .hash .gnu.hash)
+ }
+}
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
new file mode 100644
index 0000000..f0776c4
--- /dev/null
+++ b/bl31/bl31.mk
@@ -0,0 +1,186 @@
+#
+# Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+################################################################################
+# Include Makefile for the SPM-MM implementation
+################################################################################
+ifeq (${SUPPORT_UNKNOWN_MPID},1)
+ ifeq (${DEBUG},0)
+ $(warning WARNING: SUPPORT_UNKNOWN_MPID enabled)
+ endif
+endif
+
+ifeq (${SPM_MM},1)
+ ifeq (${EL3_EXCEPTION_HANDLING},0)
+ $(error EL3_EXCEPTION_HANDLING must be 1 for SPM-MM support)
+ else
+ $(info Including SPM Management Mode (MM) makefile)
+ include services/std_svc/spm/common/spm.mk
+ include services/std_svc/spm/spm_mm/spm_mm.mk
+ endif
+endif
+
+include lib/extensions/amu/amu.mk
+include lib/mpmm/mpmm.mk
+
+ifeq (${SPMC_AT_EL3},1)
+ $(info Including EL3 SPMC makefile)
+ include services/std_svc/spm/common/spm.mk
+ include services/std_svc/spm/el3_spmc/spmc.mk
+endif
+
+include lib/psci/psci_lib.mk
+
+BL31_SOURCES += bl31/bl31_main.c \
+ bl31/interrupt_mgmt.c \
+ bl31/aarch64/bl31_entrypoint.S \
+ bl31/aarch64/crash_reporting.S \
+ bl31/aarch64/runtime_exceptions.S \
+ bl31/bl31_context_mgmt.c \
+ bl31/bl31_traps.c \
+ common/runtime_svc.c \
+ lib/cpus/aarch64/dsu_helpers.S \
+ plat/common/aarch64/platform_mp_stack.S \
+ services/arm_arch_svc/arm_arch_svc_setup.c \
+ services/std_svc/std_svc_setup.c \
+ ${PSCI_LIB_SOURCES} \
+ ${SPMD_SOURCES} \
+ ${SPM_MM_SOURCES} \
+ ${SPMC_SOURCES} \
+ ${SPM_SOURCES}
+
+ifeq (${ENABLE_PMF}, 1)
+BL31_SOURCES += lib/pmf/pmf_main.c
+endif
+
+include lib/debugfs/debugfs.mk
+ifeq (${USE_DEBUGFS},1)
+ BL31_SOURCES += $(DEBUGFS_SRCS)
+endif
+
+ifeq (${EL3_EXCEPTION_HANDLING},1)
+BL31_SOURCES += bl31/ehf.c
+endif
+
+ifeq (${FFH_SUPPORT},1)
+BL31_SOURCES += bl31/aarch64/ea_delegate.S
+endif
+
+ifeq (${SDEI_SUPPORT},1)
+ifeq (${EL3_EXCEPTION_HANDLING},0)
+ $(error EL3_EXCEPTION_HANDLING must be 1 for SDEI support)
+endif
+BL31_SOURCES += services/std_svc/sdei/sdei_dispatch.S \
+ services/std_svc/sdei/sdei_event.c \
+ services/std_svc/sdei/sdei_intr_mgmt.c \
+ services/std_svc/sdei/sdei_main.c \
+ services/std_svc/sdei/sdei_state.c
+endif
+
+ifeq (${TRNG_SUPPORT},1)
+BL31_SOURCES += services/std_svc/trng/trng_main.c \
+ services/std_svc/trng/trng_entropy_pool.c
+endif
+
+ifneq (${ENABLE_SPE_FOR_NS},0)
+BL31_SOURCES += lib/extensions/spe/spe.c
+endif
+
+ifeq (${ERRATA_ABI_SUPPORT},1)
+BL31_SOURCES += services/std_svc/errata_abi/errata_abi_main.c
+endif
+
+ifneq (${ENABLE_FEAT_AMU},0)
+BL31_SOURCES += ${AMU_SOURCES}
+endif
+
+ifeq (${ENABLE_MPMM},1)
+BL31_SOURCES += ${MPMM_SOURCES}
+endif
+
+ifneq (${ENABLE_SME_FOR_NS},0)
+BL31_SOURCES += lib/extensions/sme/sme.c
+endif
+ifneq (${ENABLE_SVE_FOR_NS},0)
+BL31_SOURCES += lib/extensions/sve/sve.c
+endif
+
+ifneq (${ENABLE_FEAT_MPAM},0)
+BL31_SOURCES += lib/extensions/mpam/mpam.c
+endif
+
+ifneq (${ENABLE_TRBE_FOR_NS},0)
+BL31_SOURCES += lib/extensions/trbe/trbe.c
+endif
+
+ifneq (${ENABLE_BRBE_FOR_NS},0)
+BL31_SOURCES += lib/extensions/brbe/brbe.c
+endif
+
+ifneq (${ENABLE_SYS_REG_TRACE_FOR_NS},0)
+BL31_SOURCES += lib/extensions/sys_reg_trace/aarch64/sys_reg_trace.c
+endif
+
+ifneq (${ENABLE_TRF_FOR_NS},0)
+BL31_SOURCES += lib/extensions/trf/aarch64/trf.c
+endif
+
+ifeq (${WORKAROUND_CVE_2017_5715},1)
+BL31_SOURCES += lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S \
+ lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
+endif
+
+ifeq ($(SMC_PCI_SUPPORT),1)
+BL31_SOURCES += services/std_svc/pci_svc.c
+endif
+
+ifeq (${ENABLE_RME},1)
+include lib/gpt_rme/gpt_rme.mk
+
+BL31_SOURCES += ${GPT_LIB_SRCS} \
+ ${RMMD_SOURCES}
+endif
+
+ifeq ($(FEATURE_DETECTION),1)
+BL31_SOURCES += common/feat_detect.c
+endif
+
+ifeq (${DRTM_SUPPORT},1)
+BL31_SOURCES += services/std_svc/drtm/drtm_main.c \
+ services/std_svc/drtm/drtm_dma_prot.c \
+ services/std_svc/drtm/drtm_res_address_map.c \
+ services/std_svc/drtm/drtm_measurements.c \
+ services/std_svc/drtm/drtm_remediation.c \
+ ${MBEDTLS_SOURCES}
+endif
+
+BL31_DEFAULT_LINKER_SCRIPT_SOURCE := bl31/bl31.ld.S
+
+ifneq ($(findstring gcc,$(notdir $(LD))),)
+ BL31_LDFLAGS += -Wl,--sort-section=alignment
+else ifneq ($(findstring ld,$(notdir $(LD))),)
+ BL31_LDFLAGS += --sort-section=alignment
+endif
+
+# Flag used to indicate if Crash reporting via console should be included
+# in BL31. This defaults to being present in DEBUG builds only
+ifndef CRASH_REPORTING
+CRASH_REPORTING := $(DEBUG)
+endif
+
+$(eval $(call assert_booleans,\
+ $(sort \
+ CRASH_REPORTING \
+ EL3_EXCEPTION_HANDLING \
+ SDEI_SUPPORT \
+)))
+
+$(eval $(call add_defines,\
+ $(sort \
+ CRASH_REPORTING \
+ EL3_EXCEPTION_HANDLING \
+ SDEI_SUPPORT \
+)))
diff --git a/bl31/bl31_context_mgmt.c b/bl31/bl31_context_mgmt.c
new file mode 100644
index 0000000..34f69ad
--- /dev/null
+++ b/bl31/bl31_context_mgmt.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <bl31/bl31.h>
+#include <common/bl_common.h>
+#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/el3_runtime/cpu_data.h>
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the calling CPU that was set as the context for the specified security
+ * state. NULL is returned if no such structure has been specified.
+ ******************************************************************************/
+void *cm_get_context(uint32_t security_state)
+{
+ assert(sec_state_is_valid(security_state));
+
+ return get_cpu_data(cpu_context[get_cpu_context_index(security_state)]);
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the calling CPU
+ ******************************************************************************/
+void cm_set_context(void *context, uint32_t security_state)
+{
+ assert(sec_state_is_valid(security_state));
+
+ set_cpu_data(cpu_context[get_cpu_context_index(security_state)],
+ context);
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the CPU identified by `cpu_idx` that was set as the context for the
+ * specified security state. NULL is returned if no such structure has been
+ * specified.
+ ******************************************************************************/
+void *cm_get_context_by_index(unsigned int cpu_idx,
+ unsigned int security_state)
+{
+ assert(sec_state_is_valid(security_state));
+
+ return get_cpu_data_by_index(cpu_idx,
+ cpu_context[get_cpu_context_index(security_state)]);
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the CPU identified by CPU index.
+ ******************************************************************************/
+void cm_set_context_by_index(unsigned int cpu_idx, void *context,
+ unsigned int security_state)
+{
+ assert(sec_state_is_valid(security_state));
+
+ set_cpu_data_by_index(cpu_idx,
+ cpu_context[get_cpu_context_index(security_state)],
+ context);
+}
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
new file mode 100644
index 0000000..925c6a6
--- /dev/null
+++ b/bl31/bl31_main.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch.h>
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <bl31/ehf.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/feat_detect.h>
+#include <common/runtime_svc.h>
+#include <drivers/console.h>
+#include <lib/bootmarker_capture.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/pmf/pmf.h>
+#include <lib/runtime_instr.h>
+#include <plat/common/platform.h>
+#include <services/std_svc.h>
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ PMF_REGISTER_SERVICE_SMC(rt_instr_svc, PMF_RT_INSTR_SVC_ID,
+ RT_INSTR_TOTAL_IDS, PMF_STORE_ENABLE)
+#endif
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ PMF_REGISTER_SERVICE(bl_svc, PMF_RT_INSTR_SVC_ID,
+ BL_TOTAL_IDS, PMF_DUMP_ENABLE)
+#endif
+
+/*******************************************************************************
+ * This function pointer is used to initialise the BL32 image. It's initialized
+ * by SPD calling bl31_register_bl32_init after setting up all things necessary
+ * for SP execution. In cases where both SPD and SP are absent, or when SPD
+ * finds it impossible to execute SP, this pointer is left as NULL
+ ******************************************************************************/
+static int32_t (*bl32_init)(void);
+
+/*****************************************************************************
+ * Function used to initialise RMM if RME is enabled
+ *****************************************************************************/
+#if ENABLE_RME
+static int32_t (*rmm_init)(void);
+#endif
+
+/*******************************************************************************
+ * Variable to indicate whether next image to execute after BL31 is BL33
+ * (non-secure & default) or BL32 (secure).
+ ******************************************************************************/
+static uint32_t next_image_type = NON_SECURE;
+
+#ifdef SUPPORT_UNKNOWN_MPID
+/*
+ * Flag to know whether an unsupported MPID has been detected. To avoid having it
+ * landing on the .bss section, it is initialized to a non-zero value, this way
+ * we avoid potential WAW hazards during system bring up.
+ * */
+volatile uint32_t unsupported_mpid_flag = 1;
+#endif
+
+/*
+ * Implement the ARM Standard Service function to get arguments for a
+ * particular service.
+ */
+uintptr_t get_arm_std_svc_args(unsigned int svc_mask)
+{
+ /* Setup the arguments for PSCI Library */
+ DEFINE_STATIC_PSCI_LIB_ARGS_V1(psci_args, bl31_warm_entrypoint);
+
+ /* PSCI is the only ARM Standard Service implemented */
+ assert(svc_mask == PSCI_FID_MASK);
+
+ return (uintptr_t)&psci_args;
+}
+
+/*******************************************************************************
+ * Simple function to initialise all BL31 helper libraries.
+ ******************************************************************************/
+void __init bl31_lib_init(void)
+{
+ cm_init();
+}
+
+/*******************************************************************************
+ * Setup function for BL31.
+ ******************************************************************************/
+void bl31_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
+ u_register_t arg3)
+{
+ /* Perform early platform-specific setup */
+ bl31_early_platform_setup2(arg0, arg1, arg2, arg3);
+
+ /* Perform late platform-specific setup */
+ bl31_plat_arch_setup();
+
+#if CTX_INCLUDE_PAUTH_REGS
+ /*
+ * Assert that the ARMv8.3-PAuth registers are present or an access
+ * fault will be triggered when they are being saved or restored.
+ */
+ assert(is_armv8_3_pauth_present());
+#endif /* CTX_INCLUDE_PAUTH_REGS */
+}
+
+/*******************************************************************************
+ * BL31 is responsible for setting up the runtime services for the primary cpu
+ * before passing control to the bootloader or an Operating System. This
+ * function calls runtime_svc_init() which initializes all registered runtime
+ * services. The run time services would setup enough context for the core to
+ * switch to the next exception level. When this function returns, the core will
+ * switch to the programmed exception level via an ERET.
+ ******************************************************************************/
+void bl31_main(void)
+{
+ /* Init registers that never change for the lifetime of TF-A */
+ cm_manage_extensions_el3();
+
+ /* Init per-world context registers for non-secure world */
+ manage_extensions_nonsecure_per_world();
+
+ NOTICE("BL31: %s\n", version_string);
+ NOTICE("BL31: %s\n", build_message);
+
+#if FEATURE_DETECTION
+ /* Detect if features enabled during compilation are supported by PE. */
+ detect_arch_features();
+#endif /* FEATURE_DETECTION */
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ PMF_CAPTURE_TIMESTAMP(bl_svc, BL31_ENTRY, PMF_CACHE_MAINT);
+#endif
+
+#ifdef SUPPORT_UNKNOWN_MPID
+ if (unsupported_mpid_flag == 0) {
+ NOTICE("Unsupported MPID detected!\n");
+ }
+#endif
+
+ /* Perform platform setup in BL31 */
+ bl31_platform_setup();
+
+ /* Initialise helper libraries */
+ bl31_lib_init();
+
+#if EL3_EXCEPTION_HANDLING
+ INFO("BL31: Initialising Exception Handling Framework\n");
+ ehf_init();
+#endif
+
+ /* Initialize the runtime services e.g. psci. */
+ INFO("BL31: Initializing runtime services\n");
+ runtime_svc_init();
+
+ /*
+ * All the cold boot actions on the primary cpu are done. We now need to
+ * decide which is the next image and how to execute it.
+ * If the SPD runtime service is present, it would want to pass control
+ * to BL32 first in S-EL1. In that case, SPD would have registered a
+ * function to initialize bl32 where it takes responsibility of entering
+ * S-EL1 and returning control back to bl31_main. Similarly, if RME is
+ * enabled and a function is registered to initialize RMM, control is
+ * transferred to RMM in R-EL2. After RMM initialization, control is
+ * returned back to bl31_main. Once this is done we can prepare entry
+ * into BL33 as normal.
+ */
+
+ /*
+ * If SPD had registered an init hook, invoke it.
+ */
+ if (bl32_init != NULL) {
+ INFO("BL31: Initializing BL32\n");
+
+ console_flush();
+ int32_t rc = (*bl32_init)();
+
+ if (rc == 0) {
+ WARN("BL31: BL32 initialization failed\n");
+ }
+ }
+
+ /*
+ * If RME is enabled and init hook is registered, initialize RMM
+ * in R-EL2.
+ */
+#if ENABLE_RME
+ if (rmm_init != NULL) {
+ INFO("BL31: Initializing RMM\n");
+
+ console_flush();
+ int32_t rc = (*rmm_init)();
+
+ if (rc == 0) {
+ WARN("BL31: RMM initialization failed\n");
+ }
+ }
+#endif
+
+ /*
+ * We are ready to enter the next EL. Prepare entry into the image
+ * corresponding to the desired security state after the next ERET.
+ */
+ bl31_prepare_next_image_entry();
+
+ console_flush();
+
+ /*
+ * Perform any platform specific runtime setup prior to cold boot exit
+ * from BL31
+ */
+ bl31_plat_runtime_setup();
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ PMF_CAPTURE_TIMESTAMP(bl_svc, BL31_EXIT, PMF_CACHE_MAINT);
+ console_flush();
+#endif
+}
+
+/*******************************************************************************
+ * Accessor functions to help runtime services decide which image should be
+ * executed after BL31. This is BL33 or the non-secure bootloader image by
+ * default but the Secure payload dispatcher could override this by requesting
+ * an entry into BL32 (Secure payload) first. If it does so then it should use
+ * the same API to program an entry into BL33 once BL32 initialisation is
+ * complete.
+ ******************************************************************************/
+void bl31_set_next_image_type(uint32_t security_state)
+{
+ assert(sec_state_is_valid(security_state));
+ next_image_type = security_state;
+}
+
+uint32_t bl31_get_next_image_type(void)
+{
+ return next_image_type;
+}
+
+/*******************************************************************************
+ * This function programs EL3 registers and performs other setup to enable entry
+ * into the next image after BL31 at the next ERET.
+ ******************************************************************************/
+void __init bl31_prepare_next_image_entry(void)
+{
+ entry_point_info_t *next_image_info;
+ uint32_t image_type;
+
+#if CTX_INCLUDE_AARCH32_REGS
+ /*
+ * Ensure that the build flag to save AArch32 system registers in CPU
+ * context is not set for AArch64-only platforms.
+ */
+ if (el_implemented(1) == EL_IMPL_A64ONLY) {
+ ERROR("EL1 supports AArch64-only. Please set build flag "
+ "CTX_INCLUDE_AARCH32_REGS = 0\n");
+ panic();
+ }
+#endif
+
+ /* Determine which image to execute next */
+ image_type = bl31_get_next_image_type();
+
+ /* Program EL3 registers to enable entry into the next EL */
+ next_image_info = bl31_plat_get_next_image_ep_info(image_type);
+ assert(next_image_info != NULL);
+ assert(image_type == GET_SECURITY_STATE(next_image_info->h.attr));
+
+ INFO("BL31: Preparing for EL3 exit to %s world\n",
+ (image_type == SECURE) ? "secure" : "normal");
+ print_entry_point_info(next_image_info);
+ cm_init_my_context(next_image_info);
+
+ /*
+ * If we are entering the Non-secure world, use
+ * 'cm_prepare_el3_exit_ns' to exit.
+ */
+ if (image_type == NON_SECURE) {
+ cm_prepare_el3_exit_ns();
+ } else {
+ cm_prepare_el3_exit(image_type);
+ }
+}
+
+/*******************************************************************************
+ * This function initializes the pointer to BL32 init function. This is expected
+ * to be called by the SPD after it finishes all its initialization
+ ******************************************************************************/
+void bl31_register_bl32_init(int32_t (*func)(void))
+{
+ bl32_init = func;
+}
+
+#if ENABLE_RME
+/*******************************************************************************
+ * This function initializes the pointer to RMM init function. This is expected
+ * to be called by the RMMD after it finishes all its initialization
+ ******************************************************************************/
+void bl31_register_rmm_init(int32_t (*func)(void))
+{
+ rmm_init = func;
+}
+#endif
diff --git a/bl31/bl31_traps.c b/bl31/bl31_traps.c
new file mode 100644
index 0000000..2cfe14a
--- /dev/null
+++ b/bl31/bl31_traps.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2022, ARM Limited. All rights reserved.
+ * Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Dispatch synchronous system register traps from lower ELs.
+ */
+
+#include <bl31/sync_handle.h>
+#include <context.h>
+
+int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx)
+{
+ uint64_t __unused opcode = esr_el3 & ISS_SYSREG_OPCODE_MASK;
+
+#if ENABLE_FEAT_RNG_TRAP
+ if ((opcode == ISS_SYSREG_OPCODE_RNDR) || (opcode == ISS_SYSREG_OPCODE_RNDRRS)) {
+ return plat_handle_rng_trap(esr_el3, ctx);
+ }
+#endif
+
+#if IMPDEF_SYSREG_TRAP
+ if ((opcode & ISS_SYSREG_OPCODE_IMPDEF) == ISS_SYSREG_OPCODE_IMPDEF) {
+ return plat_handle_impdef_trap(esr_el3, ctx);
+ }
+#endif
+
+ return TRAP_RET_UNHANDLED;
+}
diff --git a/bl31/ehf.c b/bl31/ehf.c
new file mode 100644
index 0000000..6f3d941
--- /dev/null
+++ b/bl31/ehf.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Exception handlers at EL3, their priority levels, and management.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <bl31/ehf.h>
+#include <bl31/interrupt_mgmt.h>
+#include <context.h>
+#include <common/debug.h>
+#include <drivers/arm/gic_common.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/el3_runtime/pubsub_events.h>
+#include <plat/common/platform.h>
+
+/* Output EHF logs as verbose */
+#define EHF_LOG(...) VERBOSE("EHF: " __VA_ARGS__)
+
+#define EHF_INVALID_IDX (-1)
+
+/* For a valid handler, return the actual function pointer; otherwise, 0. */
+#define RAW_HANDLER(h) \
+ ((ehf_handler_t) ((((h) & EHF_PRI_VALID_) != 0U) ? \
+ ((h) & ~EHF_PRI_VALID_) : 0U))
+
+#define PRI_BIT(idx) (((ehf_pri_bits_t) 1u) << (idx))
+
+/*
+ * Convert index into secure priority using the platform-defined priority bits
+ * field.
+ */
+#define IDX_TO_PRI(idx) \
+ ((((unsigned) idx) << (7u - exception_data.pri_bits)) & 0x7fU)
+
+/* Check whether a given index is valid */
+#define IS_IDX_VALID(idx) \
+ ((exception_data.ehf_priorities[idx].ehf_handler & EHF_PRI_VALID_) != 0U)
+
+/* Returns whether given priority is in secure priority range */
+#define IS_PRI_SECURE(pri) (((pri) & 0x80U) == 0U)
+
+/* To be defined by the platform */
+extern const ehf_priorities_t exception_data;
+
+/* Translate priority to the index in the priority array */
+static unsigned int pri_to_idx(unsigned int priority)
+{
+ unsigned int idx;
+
+ idx = EHF_PRI_TO_IDX(priority, exception_data.pri_bits);
+ assert(idx < exception_data.num_priorities);
+ assert(IS_IDX_VALID(idx));
+
+ return idx;
+}
+
+/* Return whether there are outstanding priority activation */
+static bool has_valid_pri_activations(pe_exc_data_t *pe_data)
+{
+ return pe_data->active_pri_bits != 0U;
+}
+
+static pe_exc_data_t *this_cpu_data(void)
+{
+ return &get_cpu_data(ehf_data);
+}
+
+/*
+ * Return the current priority index of this CPU. If no priority is active,
+ * return EHF_INVALID_IDX.
+ */
+static int get_pe_highest_active_idx(pe_exc_data_t *pe_data)
+{
+ if (!has_valid_pri_activations(pe_data))
+ return EHF_INVALID_IDX;
+
+ /* Current priority is the right-most bit */
+ return (int) __builtin_ctz(pe_data->active_pri_bits);
+}
+
+/*
+ * Mark priority active by setting the corresponding bit in active_pri_bits and
+ * programming the priority mask.
+ *
+ * This API is to be used as part of delegating to lower ELs other than for
+ * interrupts; e.g. while handling synchronous exceptions.
+ *
+ * This API is expected to be invoked before restoring context (Secure or
+ * Non-secure) in preparation for the respective dispatch.
+ */
+void ehf_activate_priority(unsigned int priority)
+{
+ int cur_pri_idx;
+ unsigned int old_mask, run_pri, idx;
+ pe_exc_data_t *pe_data = this_cpu_data();
+
+ /*
+ * Query interrupt controller for the running priority, or idle priority
+ * if no interrupts are being handled. The requested priority must be
+ * less (higher priority) than the active running priority.
+ */
+ run_pri = plat_ic_get_running_priority();
+ if (priority >= run_pri) {
+ ERROR("Running priority higher (0x%x) than requested (0x%x)\n",
+ run_pri, priority);
+ panic();
+ }
+
+ /*
+ * If there were priority activations already, the requested priority
+ * must be less (higher priority) than the current highest priority
+ * activation so far.
+ */
+ cur_pri_idx = get_pe_highest_active_idx(pe_data);
+ idx = pri_to_idx(priority);
+ if ((cur_pri_idx != EHF_INVALID_IDX) &&
+ (idx >= ((unsigned int) cur_pri_idx))) {
+ ERROR("Activation priority mismatch: req=0x%x current=0x%x\n",
+ priority, IDX_TO_PRI(cur_pri_idx));
+ panic();
+ }
+
+ /* Set the bit corresponding to the requested priority */
+ pe_data->active_pri_bits |= PRI_BIT(idx);
+
+ /*
+ * Program priority mask for the activated level. Check that the new
+ * priority mask is setting a higher priority level than the existing
+ * mask.
+ */
+ old_mask = plat_ic_set_priority_mask(priority);
+ if (priority >= old_mask) {
+ ERROR("Requested priority (0x%x) lower than Priority Mask (0x%x)\n",
+ priority, old_mask);
+ panic();
+ }
+
+ /*
+ * If this is the first activation, save the priority mask. This will be
+ * restored after the last deactivation.
+ */
+ if (cur_pri_idx == EHF_INVALID_IDX)
+ pe_data->init_pri_mask = (uint8_t) old_mask;
+
+ EHF_LOG("activate prio=%d\n", get_pe_highest_active_idx(pe_data));
+}
+
+/*
+ * Mark priority inactive by clearing the corresponding bit in active_pri_bits,
+ * and programming the priority mask.
+ *
+ * This API is expected to be used as part of delegating to to lower ELs other
+ * than for interrupts; e.g. while handling synchronous exceptions.
+ *
+ * This API is expected to be invoked after saving context (Secure or
+ * Non-secure), having concluded the respective dispatch.
+ */
+void ehf_deactivate_priority(unsigned int priority)
+{
+ int cur_pri_idx;
+ pe_exc_data_t *pe_data = this_cpu_data();
+ unsigned int old_mask, run_pri, idx;
+
+ /*
+ * Query interrupt controller for the running priority, or idle priority
+ * if no interrupts are being handled. The requested priority must be
+ * less (higher priority) than the active running priority.
+ */
+ run_pri = plat_ic_get_running_priority();
+ if (priority >= run_pri) {
+ ERROR("Running priority higher (0x%x) than requested (0x%x)\n",
+ run_pri, priority);
+ panic();
+ }
+
+ /*
+ * Deactivation is allowed only when there are priority activations, and
+ * the deactivation priority level must match the current activated
+ * priority.
+ */
+ cur_pri_idx = get_pe_highest_active_idx(pe_data);
+ idx = pri_to_idx(priority);
+ if ((cur_pri_idx == EHF_INVALID_IDX) ||
+ (idx != ((unsigned int) cur_pri_idx))) {
+ ERROR("Deactivation priority mismatch: req=0x%x current=0x%x\n",
+ priority, IDX_TO_PRI(cur_pri_idx));
+ panic();
+ }
+
+ /* Clear bit corresponding to highest priority */
+ pe_data->active_pri_bits &= (pe_data->active_pri_bits - 1u);
+
+ /*
+ * Restore priority mask corresponding to the next priority, or the
+ * one stashed earlier if there are no more to deactivate.
+ */
+ cur_pri_idx = get_pe_highest_active_idx(pe_data);
+ if (cur_pri_idx == EHF_INVALID_IDX)
+ old_mask = plat_ic_set_priority_mask(pe_data->init_pri_mask);
+ else
+ old_mask = plat_ic_set_priority_mask(priority);
+
+ if (old_mask > priority) {
+ ERROR("Deactivation priority (0x%x) lower than Priority Mask (0x%x)\n",
+ priority, old_mask);
+ panic();
+ }
+
+ EHF_LOG("deactivate prio=%d\n", get_pe_highest_active_idx(pe_data));
+}
+
+/*
+ * After leaving Non-secure world, stash current Non-secure Priority Mask, and
+ * set Priority Mask to the highest Non-secure priority so that Non-secure
+ * interrupts cannot preempt Secure execution.
+ *
+ * If the current running priority is in the secure range, or if there are
+ * outstanding priority activations, this function does nothing.
+ *
+ * This function subscribes to the 'cm_exited_normal_world' event published by
+ * the Context Management Library.
+ */
+static void *ehf_exited_normal_world(const void *arg)
+{
+ unsigned int run_pri;
+ pe_exc_data_t *pe_data = this_cpu_data();
+
+ /* If the running priority is in the secure range, do nothing */
+ run_pri = plat_ic_get_running_priority();
+ if (IS_PRI_SECURE(run_pri))
+ return NULL;
+
+ /* Do nothing if there are explicit activations */
+ if (has_valid_pri_activations(pe_data))
+ return NULL;
+
+ assert(pe_data->ns_pri_mask == 0u);
+
+ pe_data->ns_pri_mask =
+ (uint8_t) plat_ic_set_priority_mask(GIC_HIGHEST_NS_PRIORITY);
+
+ /* The previous Priority Mask is not expected to be in secure range */
+ if (IS_PRI_SECURE(pe_data->ns_pri_mask)) {
+ ERROR("Priority Mask (0x%x) already in secure range\n",
+ pe_data->ns_pri_mask);
+ panic();
+ }
+
+ EHF_LOG("Priority Mask: 0x%x => 0x%x\n", pe_data->ns_pri_mask,
+ GIC_HIGHEST_NS_PRIORITY);
+
+ return NULL;
+}
+
+/*
+ * Conclude Secure execution and prepare for return to Non-secure world. Restore
+ * the Non-secure Priority Mask previously stashed upon leaving Non-secure
+ * world.
+ *
+ * If there the current running priority is in the secure range, or if there are
+ * outstanding priority activations, this function does nothing.
+ *
+ * This function subscribes to the 'cm_entering_normal_world' event published by
+ * the Context Management Library.
+ */
+static void *ehf_entering_normal_world(const void *arg)
+{
+ unsigned int old_pmr, run_pri;
+ pe_exc_data_t *pe_data = this_cpu_data();
+
+ /* If the running priority is in the secure range, do nothing */
+ run_pri = plat_ic_get_running_priority();
+ if (IS_PRI_SECURE(run_pri))
+ return NULL;
+
+ /*
+ * If there are explicit activations, do nothing. The Priority Mask will
+ * be restored upon the last deactivation.
+ */
+ if (has_valid_pri_activations(pe_data))
+ return NULL;
+
+ /* Do nothing if we don't have a valid Priority Mask to restore */
+ if (pe_data->ns_pri_mask == 0U)
+ return NULL;
+
+ old_pmr = plat_ic_set_priority_mask(pe_data->ns_pri_mask);
+
+ /*
+ * When exiting secure world, the current Priority Mask must be
+ * GIC_HIGHEST_NS_PRIORITY (as set during entry), or the Non-secure
+ * priority mask set upon calling ehf_allow_ns_preemption()
+ */
+ if ((old_pmr != GIC_HIGHEST_NS_PRIORITY) &&
+ (old_pmr != pe_data->ns_pri_mask)) {
+ ERROR("Invalid Priority Mask (0x%x) restored\n", old_pmr);
+ panic();
+ }
+
+ EHF_LOG("Priority Mask: 0x%x => 0x%x\n", old_pmr, pe_data->ns_pri_mask);
+
+ pe_data->ns_pri_mask = 0;
+
+ return NULL;
+}
+
+/*
+ * Program Priority Mask to the original Non-secure priority such that
+ * Non-secure interrupts may preempt Secure execution (for example, during
+ * Yielding SMC calls). The 'preempt_ret_code' parameter indicates the Yielding
+ * SMC's return value in case the call was preempted.
+ *
+ * This API is expected to be invoked before delegating a yielding SMC to Secure
+ * EL1. I.e. within the window of secure execution after Non-secure context is
+ * saved (after entry into EL3) and Secure context is restored (before entering
+ * Secure EL1).
+ */
+void ehf_allow_ns_preemption(uint64_t preempt_ret_code)
+{
+ cpu_context_t *ns_ctx;
+ unsigned int old_pmr __unused;
+ pe_exc_data_t *pe_data = this_cpu_data();
+
+ /*
+ * We should have been notified earlier of entering secure world, and
+ * therefore have stashed the Non-secure priority mask.
+ */
+ assert(pe_data->ns_pri_mask != 0U);
+
+ /* Make sure no priority levels are active when requesting this */
+ if (has_valid_pri_activations(pe_data)) {
+ ERROR("PE %lx has priority activations: 0x%x\n",
+ read_mpidr_el1(), pe_data->active_pri_bits);
+ panic();
+ }
+
+ /*
+ * Program preempted return code to x0 right away so that, if the
+ * Yielding SMC was indeed preempted before a dispatcher gets a chance
+ * to populate it, the caller would find the correct return value.
+ */
+ ns_ctx = cm_get_context(NON_SECURE);
+ assert(ns_ctx != NULL);
+ write_ctx_reg(get_gpregs_ctx(ns_ctx), CTX_GPREG_X0, preempt_ret_code);
+
+ old_pmr = plat_ic_set_priority_mask(pe_data->ns_pri_mask);
+
+ EHF_LOG("Priority Mask: 0x%x => 0x%x\n", old_pmr, pe_data->ns_pri_mask);
+
+ pe_data->ns_pri_mask = 0;
+}
+
+/*
+ * Return whether Secure execution has explicitly allowed Non-secure interrupts
+ * to preempt itself (for example, during Yielding SMC calls).
+ */
+unsigned int ehf_is_ns_preemption_allowed(void)
+{
+ unsigned int run_pri;
+ pe_exc_data_t *pe_data = this_cpu_data();
+
+ /* If running priority is in secure range, return false */
+ run_pri = plat_ic_get_running_priority();
+ if (IS_PRI_SECURE(run_pri))
+ return 0;
+
+ /*
+ * If Non-secure preemption was permitted by calling
+ * ehf_allow_ns_preemption() earlier:
+ *
+ * - There wouldn't have been priority activations;
+ * - We would have cleared the stashed the Non-secure Priority Mask.
+ */
+ if (has_valid_pri_activations(pe_data))
+ return 0;
+ if (pe_data->ns_pri_mask != 0U)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Top-level EL3 interrupt handler.
+ */
+static uint64_t ehf_el3_interrupt_handler(uint32_t id, uint32_t flags,
+ void *handle, void *cookie)
+{
+ int ret = 0;
+ uint32_t intr_raw;
+ unsigned int intr, pri, idx;
+ ehf_handler_t handler;
+
+ /*
+ * Top-level interrupt type handler from Interrupt Management Framework
+ * doesn't acknowledge the interrupt; so the interrupt ID must be
+ * invalid.
+ */
+ assert(id == INTR_ID_UNAVAILABLE);
+
+ /*
+ * Acknowledge interrupt. Proceed with handling only for valid interrupt
+ * IDs. This situation may arise because of Interrupt Management
+ * Framework identifying an EL3 interrupt, but before it's been
+ * acknowledged here, the interrupt was either deasserted, or there was
+ * a higher-priority interrupt of another type.
+ */
+ intr_raw = plat_ic_acknowledge_interrupt();
+ intr = plat_ic_get_interrupt_id(intr_raw);
+ if (intr == INTR_ID_UNAVAILABLE)
+ return 0;
+
+ /* Having acknowledged the interrupt, get the running priority */
+ pri = plat_ic_get_running_priority();
+
+ /* Check EL3 interrupt priority is in secure range */
+ assert(IS_PRI_SECURE(pri));
+
+ /*
+ * Translate the priority to a descriptor index. We do this by masking
+ * and shifting the running priority value (platform-supplied).
+ */
+ idx = pri_to_idx(pri);
+
+ /* Validate priority */
+ assert(pri == IDX_TO_PRI(idx));
+
+ handler = (ehf_handler_t) RAW_HANDLER(
+ exception_data.ehf_priorities[idx].ehf_handler);
+ if (handler == NULL) {
+ ERROR("No EL3 exception handler for priority 0x%x\n",
+ IDX_TO_PRI(idx));
+ panic();
+ }
+
+ /*
+ * Call registered handler. Pass the raw interrupt value to registered
+ * handlers.
+ */
+ ret = handler(intr_raw, flags, handle, cookie);
+
+ return (uint64_t) ret;
+}
+
+/*
+ * Initialize the EL3 exception handling.
+ */
+void __init ehf_init(void)
+{
+ unsigned int flags = 0;
+ int ret __unused;
+
+ /* Ensure EL3 interrupts are supported */
+ assert(plat_ic_has_interrupt_type(INTR_TYPE_EL3));
+
+ /*
+ * Make sure that priority water mark has enough bits to represent the
+ * whole priority array.
+ */
+ assert(exception_data.num_priorities <= (sizeof(ehf_pri_bits_t) * 8U));
+
+ assert(exception_data.ehf_priorities != NULL);
+
+ /*
+ * Bit 7 of GIC priority must be 0 for secure interrupts. This means
+ * platforms must use at least 1 of the remaining 7 bits.
+ */
+ assert((exception_data.pri_bits >= 1U) ||
+ (exception_data.pri_bits < 8U));
+
+ /* Route EL3 interrupts when in Non-secure. */
+ set_interrupt_rm_flag(flags, NON_SECURE);
+
+ /*
+ * Route EL3 interrupts when in secure, only when SPMC is not present
+ * in S-EL2.
+ */
+#if !(defined(SPD_spmd) && (SPMD_SPM_AT_SEL2 == 1))
+ set_interrupt_rm_flag(flags, SECURE);
+#endif /* !(defined(SPD_spmd) && (SPMD_SPM_AT_SEL2 == 1)) */
+
+ /* Register handler for EL3 interrupts */
+ ret = register_interrupt_type_handler(INTR_TYPE_EL3,
+ ehf_el3_interrupt_handler, flags);
+ assert(ret == 0);
+}
+
+/*
+ * Register a handler at the supplied priority. Registration is allowed only if
+ * a handler hasn't been registered before, or one wasn't provided at build
+ * time. The priority for which the handler is being registered must also accord
+ * with the platform-supplied data.
+ */
+void ehf_register_priority_handler(unsigned int pri, ehf_handler_t handler)
+{
+ unsigned int idx;
+
+ /* Sanity check for handler */
+ assert(handler != NULL);
+
+ /* Handler ought to be 4-byte aligned */
+ assert((((uintptr_t) handler) & 3U) == 0U);
+
+ /* Ensure we register for valid priority */
+ idx = pri_to_idx(pri);
+ assert(idx < exception_data.num_priorities);
+ assert(IDX_TO_PRI(idx) == pri);
+
+ /* Return failure if a handler was already registered */
+ if (exception_data.ehf_priorities[idx].ehf_handler != EHF_NO_HANDLER_) {
+ ERROR("Handler already registered for priority 0x%x\n", pri);
+ panic();
+ }
+
+ /*
+ * Install handler, and retain the valid bit. We assume that the handler
+ * is 4-byte aligned, which is usually the case.
+ */
+ exception_data.ehf_priorities[idx].ehf_handler =
+ (((uintptr_t) handler) | EHF_PRI_VALID_);
+
+ EHF_LOG("register pri=0x%x handler=%p\n", pri, handler);
+}
+
+SUBSCRIBE_TO_EVENT(cm_entering_normal_world, ehf_entering_normal_world);
+SUBSCRIBE_TO_EVENT(cm_exited_normal_world, ehf_exited_normal_world);
diff --git a/bl31/interrupt_mgmt.c b/bl31/interrupt_mgmt.c
new file mode 100644
index 0000000..68c7f10
--- /dev/null
+++ b/bl31/interrupt_mgmt.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <common/bl_common.h>
+#include <bl31/interrupt_mgmt.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <plat/common/platform.h>
+
+/*******************************************************************************
+ * Local structure and corresponding array to keep track of the state of the
+ * registered interrupt handlers for each interrupt type.
+ * The field descriptions are:
+ *
+ * 'scr_el3[2]' : Mapping of the routing model in the 'flags' field to the
+ * value of the SCR_EL3.IRQ or FIQ bit for each security state.
+ * There are two instances of this field corresponding to the
+ * two security states.
+ *
+ * 'flags' : Bit[0], Routing model for this interrupt type when execution is
+ * not in EL3 in the secure state. '1' implies that this
+ * interrupt will be routed to EL3. '0' implies that this
+ * interrupt will be routed to the current exception level.
+ *
+ * Bit[1], Routing model for this interrupt type when execution is
+ * not in EL3 in the non-secure state. '1' implies that this
+ * interrupt will be routed to EL3. '0' implies that this
+ * interrupt will be routed to the current exception level.
+ *
+ * All other bits are reserved and SBZ.
+ ******************************************************************************/
+typedef struct intr_type_desc {
+ interrupt_type_handler_t handler;
+ u_register_t scr_el3[2];
+ uint32_t flags;
+} intr_type_desc_t;
+
+static intr_type_desc_t intr_type_descs[MAX_INTR_TYPES];
+
+/*******************************************************************************
+ * This function validates the interrupt type.
+ ******************************************************************************/
+static int32_t validate_interrupt_type(uint32_t type)
+{
+ if (plat_ic_has_interrupt_type(type)) {
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*******************************************************************************
+* This function validates the routing model for this type of interrupt
+ ******************************************************************************/
+static int32_t validate_routing_model(uint32_t type, uint32_t flags)
+{
+ uint32_t rm_flags = (flags >> INTR_RM_FLAGS_SHIFT) & INTR_RM_FLAGS_MASK;
+
+ if (type == INTR_TYPE_S_EL1)
+ return validate_sel1_interrupt_rm(rm_flags);
+
+ if (type == INTR_TYPE_NS)
+ return validate_ns_interrupt_rm(rm_flags);
+
+ if (type == INTR_TYPE_EL3)
+ return validate_el3_interrupt_rm(rm_flags);
+
+ return -EINVAL;
+}
+
+/*******************************************************************************
+ * This function returns the cached copy of the SCR_EL3 which contains the
+ * routing model (expressed through the IRQ and FIQ bits) for a security state
+ * which was stored through a call to 'set_routing_model()' earlier.
+ ******************************************************************************/
+u_register_t get_scr_el3_from_routing_model(uint32_t security_state)
+{
+ u_register_t scr_el3;
+
+ assert(sec_state_is_valid(security_state));
+ scr_el3 = intr_type_descs[INTR_TYPE_NS].scr_el3[security_state];
+ scr_el3 |= intr_type_descs[INTR_TYPE_S_EL1].scr_el3[security_state];
+ scr_el3 |= intr_type_descs[INTR_TYPE_EL3].scr_el3[security_state];
+ return scr_el3;
+}
+
+/*******************************************************************************
+ * This function uses the 'interrupt_type_flags' parameter to obtain the value
+ * of the trap bit (IRQ/FIQ) in the SCR_EL3 for a security state for this
+ * interrupt type. It uses it to update the SCR_EL3 in the cpu context and the
+ * 'intr_type_desc' for that security state.
+ ******************************************************************************/
+static void set_scr_el3_from_rm(uint32_t type,
+ uint32_t interrupt_type_flags,
+ uint32_t security_state)
+{
+ uint32_t flag, bit_pos;
+
+ flag = get_interrupt_rm_flag(interrupt_type_flags, security_state);
+ bit_pos = plat_interrupt_type_to_line(type, security_state);
+ intr_type_descs[type].scr_el3[security_state] = (u_register_t)flag << bit_pos;
+
+ /*
+ * Update scr_el3 only if there is a context available. If not, it
+ * will be updated later during context initialization which will obtain
+ * the scr_el3 value to be used via get_scr_el3_from_routing_model()
+ */
+ if (cm_get_context(security_state) != NULL)
+ cm_write_scr_el3_bit(security_state, bit_pos, flag);
+}
+
+/*******************************************************************************
+ * This function validates the routing model specified in the 'flags' and
+ * updates internal data structures to reflect the new routing model. It also
+ * updates the copy of SCR_EL3 for each security state with the new routing
+ * model in the 'cpu_context' structure for this cpu.
+ ******************************************************************************/
+int32_t set_routing_model(uint32_t type, uint32_t flags)
+{
+ int32_t rc;
+
+ rc = validate_interrupt_type(type);
+ if (rc != 0)
+ return rc;
+
+ rc = validate_routing_model(type, flags);
+ if (rc != 0)
+ return rc;
+
+ /* Update the routing model in internal data structures */
+ intr_type_descs[type].flags = flags;
+ set_scr_el3_from_rm(type, flags, SECURE);
+ set_scr_el3_from_rm(type, flags, NON_SECURE);
+
+ return 0;
+}
+
+/******************************************************************************
+ * This function disables the routing model of interrupt 'type' from the
+ * specified 'security_state' on the local core. The disable is in effect
+ * till the core powers down or till the next enable for that interrupt
+ * type.
+ *****************************************************************************/
+int disable_intr_rm_local(uint32_t type, uint32_t security_state)
+{
+ uint32_t bit_pos, flag;
+
+ assert(intr_type_descs[type].handler != NULL);
+
+ flag = get_interrupt_rm_flag(INTR_DEFAULT_RM, security_state);
+
+ bit_pos = plat_interrupt_type_to_line(type, security_state);
+ cm_write_scr_el3_bit(security_state, bit_pos, flag);
+
+ return 0;
+}
+
+/******************************************************************************
+ * This function enables the routing model of interrupt 'type' from the
+ * specified 'security_state' on the local core.
+ *****************************************************************************/
+int enable_intr_rm_local(uint32_t type, uint32_t security_state)
+{
+ uint32_t bit_pos, flag;
+
+ assert(intr_type_descs[type].handler != NULL);
+
+ flag = get_interrupt_rm_flag(intr_type_descs[type].flags,
+ security_state);
+
+ bit_pos = plat_interrupt_type_to_line(type, security_state);
+ cm_write_scr_el3_bit(security_state, bit_pos, flag);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function registers a handler for the 'type' of interrupt specified. It
+ * also validates the routing model specified in the 'flags' for this type of
+ * interrupt.
+ ******************************************************************************/
+int32_t register_interrupt_type_handler(uint32_t type,
+ interrupt_type_handler_t handler,
+ uint32_t flags)
+{
+ int32_t rc;
+
+ /* Validate the 'handler' parameter */
+ if (handler == NULL)
+ return -EINVAL;
+
+ /* Validate the 'flags' parameter */
+ if ((flags & INTR_TYPE_FLAGS_MASK) != 0U)
+ return -EINVAL;
+
+ /* Check if a handler has already been registered */
+ if (intr_type_descs[type].handler != NULL)
+ return -EALREADY;
+
+ rc = set_routing_model(type, flags);
+ if (rc != 0)
+ return rc;
+
+ /* Save the handler */
+ intr_type_descs[type].handler = handler;
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function is called when an interrupt is generated and returns the
+ * handler for the interrupt type (if registered). It returns NULL if the
+ * interrupt type is not supported or its handler has not been registered.
+ ******************************************************************************/
+interrupt_type_handler_t get_interrupt_type_handler(uint32_t type)
+{
+ if (validate_interrupt_type(type) != 0)
+ return NULL;
+
+ return intr_type_descs[type].handler;
+}
+