summaryrefslogtreecommitdiffstats
path: root/bl31/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'bl31/aarch64')
-rw-r--r--bl31/aarch64/bl31_entrypoint.S242
-rw-r--r--bl31/aarch64/crash_reporting.S477
-rw-r--r--bl31/aarch64/ea_delegate.S320
-rw-r--r--bl31/aarch64/runtime_exceptions.S631
4 files changed, 1670 insertions, 0 deletions
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
new file mode 100644
index 0000000..b0c46dc
--- /dev/null
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <common/bl_common.h>
+#include <el3_common_macros.S>
+#include <lib/pmf/aarch64/pmf_asm_macros.S>
+#include <lib/runtime_instr.h>
+#include <lib/xlat_tables/xlat_mmu_helpers.h>
+
+ .globl bl31_entrypoint
+ .globl bl31_warm_entrypoint
+
+ /* -----------------------------------------------------
+ * bl31_entrypoint() is the cold boot entrypoint,
+ * executed only by the primary cpu.
+ * -----------------------------------------------------
+ */
+
+func bl31_entrypoint
+ /* ---------------------------------------------------------------
+ * Stash the previous bootloader arguments x0 - x3 for later use.
+ * ---------------------------------------------------------------
+ */
+ mov x20, x0
+ mov x21, x1
+ mov x22, x2
+ mov x23, x3
+
+#if !RESET_TO_BL31
+ /* ---------------------------------------------------------------------
+ * For !RESET_TO_BL31 systems, only the primary CPU ever reaches
+ * bl31_entrypoint() during the cold boot flow, so the cold/warm boot
+ * and primary/secondary CPU logic should not be executed in this case.
+ *
+ * Also, assume that the previous bootloader has already initialised the
+ * SCTLR_EL3, including the endianness, and has initialised the memory.
+ * ---------------------------------------------------------------------
+ */
+ el3_entrypoint_common \
+ _init_sctlr=0 \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=1 \
+ _exception_vectors=runtime_exceptions \
+ _pie_fixup_size=BL31_LIMIT - BL31_BASE
+#else
+
+ /* ---------------------------------------------------------------------
+ * For RESET_TO_BL31 systems which have a programmable reset address,
+ * bl31_entrypoint() is executed only on the cold boot path so we can
+ * skip the warm boot mailbox mechanism.
+ * ---------------------------------------------------------------------
+ */
+ el3_entrypoint_common \
+ _init_sctlr=1 \
+ _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
+ _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
+ _init_memory=1 \
+ _init_c_runtime=1 \
+ _exception_vectors=runtime_exceptions \
+ _pie_fixup_size=BL31_LIMIT - BL31_BASE
+
+#if !RESET_TO_BL31_WITH_PARAMS
+ /* ---------------------------------------------------------------------
+ * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
+ * there's no argument to relay from a previous bootloader. Zero the
+ * arguments passed to the platform layer to reflect that.
+ * ---------------------------------------------------------------------
+ */
+ mov x20, 0
+ mov x21, 0
+ mov x22, 0
+ mov x23, 0
+#endif /* RESET_TO_BL31_WITH_PARAMS */
+#endif /* RESET_TO_BL31 */
+
+ /* --------------------------------------------------------------------
+ * Perform BL31 setup
+ * --------------------------------------------------------------------
+ */
+ mov x0, x20
+ mov x1, x21
+ mov x2, x22
+ mov x3, x23
+ bl bl31_setup
+
+#if ENABLE_PAUTH
+ /* --------------------------------------------------------------------
+ * Program APIAKey_EL1 and enable pointer authentication
+ * --------------------------------------------------------------------
+ */
+ bl pauth_init_enable_el3
+#endif /* ENABLE_PAUTH */
+
+ /* --------------------------------------------------------------------
+ * Jump to main function
+ * --------------------------------------------------------------------
+ */
+ bl bl31_main
+
+ /* --------------------------------------------------------------------
+ * Clean the .data & .bss sections to main memory. This ensures
+ * that any global data which was initialised by the primary CPU
+ * is visible to secondary CPUs before they enable their data
+ * caches and participate in coherency.
+ * --------------------------------------------------------------------
+ */
+ adrp x0, __DATA_START__
+ add x0, x0, :lo12:__DATA_START__
+ adrp x1, __DATA_END__
+ add x1, x1, :lo12:__DATA_END__
+ sub x1, x1, x0
+ bl clean_dcache_range
+
+ adrp x0, __BSS_START__
+ add x0, x0, :lo12:__BSS_START__
+ adrp x1, __BSS_END__
+ add x1, x1, :lo12:__BSS_END__
+ sub x1, x1, x0
+ bl clean_dcache_range
+
+ b el3_exit
+endfunc bl31_entrypoint
+
+ /* --------------------------------------------------------------------
+ * This CPU has been physically powered up. It is either resuming from
+ * suspend or has simply been turned on. In both cases, call the BL31
+ * warmboot entrypoint
+ * --------------------------------------------------------------------
+ */
+func bl31_warm_entrypoint
+#if ENABLE_RUNTIME_INSTRUMENTATION
+
+ /*
+ * This timestamp update happens with cache off. The next
+ * timestamp collection will need to do cache maintenance prior
+ * to timestamp update.
+ */
+ pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR
+ mrs x1, cntpct_el0
+ str x1, [x0]
+#endif
+
+ /*
+ * On the warm boot path, most of the EL3 initialisations performed by
+ * 'el3_entrypoint_common' must be skipped:
+ *
+ * - Only when the platform bypasses the BL1/BL31 entrypoint by
+ * programming the reset address do we need to initialise SCTLR_EL3.
+ * In other cases, we assume this has been taken care by the
+ * entrypoint code.
+ *
+ * - No need to determine the type of boot, we know it is a warm boot.
+ *
+ * - Do not try to distinguish between primary and secondary CPUs, this
+ * notion only exists for a cold boot.
+ *
+ * - No need to initialise the memory or the C runtime environment,
+ * it has been done once and for all on the cold boot path.
+ */
+ el3_entrypoint_common \
+ _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=0 \
+ _exception_vectors=runtime_exceptions \
+ _pie_fixup_size=0
+
+ /*
+ * We're about to enable MMU and participate in PSCI state coordination.
+ *
+ * The PSCI implementation invokes platform routines that enable CPUs to
+ * participate in coherency. On a system where CPUs are not
+ * cache-coherent without appropriate platform specific programming,
+ * having caches enabled until such time might lead to coherency issues
+ * (resulting from stale data getting speculatively fetched, among
+ * others). Therefore we keep data caches disabled even after enabling
+ * the MMU for such platforms.
+ *
+ * On systems with hardware-assisted coherency, or on single cluster
+ * platforms, such platform specific programming is not required to
+ * enter coherency (as CPUs already are); and there's no reason to have
+ * caches disabled either.
+ */
+#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
+ mov x0, xzr
+#else
+ mov x0, #DISABLE_DCACHE
+#endif
+ bl bl31_plat_enable_mmu
+
+#if ENABLE_RME
+ /*
+ * At warm boot GPT data structures have already been initialized in RAM
+ * but the sysregs for this CPU need to be initialized. Note that the GPT
+ * accesses are controlled attributes in GPCCR and do not depend on the
+ * SCR_EL3.C bit.
+ */
+ bl gpt_enable
+ cbz x0, 1f
+ no_ret plat_panic_handler
+1:
+#endif
+
+#if ENABLE_PAUTH
+ /* --------------------------------------------------------------------
+ * Program APIAKey_EL1 and enable pointer authentication
+ * --------------------------------------------------------------------
+ */
+ bl pauth_init_enable_el3
+#endif /* ENABLE_PAUTH */
+
+ bl psci_warmboot_entrypoint
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI
+ mov x19, x0
+
+ /*
+ * Invalidate before updating timestamp to ensure previous timestamp
+ * updates on the same cache line with caches disabled are properly
+ * seen by the same core. Without the cache invalidate, the core might
+ * write into a stale cache line.
+ */
+ mov x1, #PMF_TS_SIZE
+ mov x20, x30
+ bl inv_dcache_range
+ mov x30, x20
+
+ mrs x0, cntpct_el0
+ str x0, [x19]
+#endif
+ b el3_exit
+endfunc bl31_warm_entrypoint
diff --git a/bl31/aarch64/crash_reporting.S b/bl31/aarch64/crash_reporting.S
new file mode 100644
index 0000000..d56b513
--- /dev/null
+++ b/bl31/aarch64/crash_reporting.S
@@ -0,0 +1,477 @@
+/*
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat_macros.S>
+#include <platform_def.h>
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/utils_def.h>
+
+ .globl report_unhandled_exception
+ .globl report_unhandled_interrupt
+ .globl el3_panic
+ .globl elx_panic
+
+#if CRASH_REPORTING
+
+ /* ------------------------------------------------------
+ * The below section deals with dumping the system state
+ * when an unhandled exception is taken in EL3.
+ * The layout and the names of the registers which will
+ * be dumped during a unhandled exception is given below.
+ * ------------------------------------------------------
+ */
+.section .rodata.crash_prints, "aS"
+print_spacer:
+ .asciz " = 0x"
+
+gp_regs:
+ .asciz "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",\
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",\
+ "x16", "x17", "x18", "x19", "x20", "x21", "x22",\
+ "x23", "x24", "x25", "x26", "x27", "x28", "x29", ""
+el3_sys_regs:
+ .asciz "scr_el3", "sctlr_el3", "cptr_el3", "tcr_el3",\
+ "daif", "mair_el3", "spsr_el3", "elr_el3", "ttbr0_el3",\
+ "esr_el3", "far_el3", ""
+
+non_el3_sys_regs:
+ .asciz "spsr_el1", "elr_el1", "spsr_abt", "spsr_und",\
+ "spsr_irq", "spsr_fiq", "sctlr_el1", "actlr_el1", "cpacr_el1",\
+ "csselr_el1", "sp_el1", "esr_el1", "ttbr0_el1", "ttbr1_el1",\
+ "mair_el1", "amair_el1", "tcr_el1", "tpidr_el1", "tpidr_el0",\
+ "tpidrro_el0", "par_el1", "mpidr_el1", "afsr0_el1", "afsr1_el1",\
+ "contextidr_el1", "vbar_el1", "cntp_ctl_el0", "cntp_cval_el0",\
+ "cntv_ctl_el0", "cntv_cval_el0", "cntkctl_el1", "sp_el0", "isr_el1", ""
+
+#if CTX_INCLUDE_AARCH32_REGS
+aarch32_regs:
+ .asciz "dacr32_el2", "ifsr32_el2", ""
+#endif /* CTX_INCLUDE_AARCH32_REGS */
+
+panic_msg:
+ .asciz "PANIC in EL3.\nx30"
+excpt_msg:
+ .asciz "Unhandled Exception in EL3.\nx30"
+intr_excpt_msg:
+ .ascii "Unhandled Interrupt Exception in EL3.\n"
+x30_msg:
+ .asciz "x30"
+excpt_msg_el:
+ .asciz "Unhandled Exception from EL"
+
+ /*
+ * Helper function to print from crash buf.
+ * The print loop is controlled by the buf size and
+ * ascii reg name list which is passed in x6. The
+ * function returns the crash buf address in x0.
+ * Clobbers : x0 - x7, sp
+ */
+func size_controlled_print
+ /* Save the lr */
+ mov sp, x30
+ /* load the crash buf address */
+ mrs x7, tpidr_el3
+test_size_list:
+ /* Calculate x5 always as it will be clobbered by asm_print_hex */
+ mrs x5, tpidr_el3
+ add x5, x5, #CPU_DATA_CRASH_BUF_SIZE
+ /* Test whether we have reached end of crash buf */
+ cmp x7, x5
+ b.eq exit_size_print
+ ldrb w4, [x6]
+ /* Test whether we are at end of list */
+ cbz w4, exit_size_print
+ mov x4, x6
+ /* asm_print_str updates x4 to point to next entry in list */
+ bl asm_print_str
+ /* x0 = number of symbols printed + 1 */
+ sub x0, x4, x6
+ /* update x6 with the updated list pointer */
+ mov x6, x4
+ bl print_alignment
+ ldr x4, [x7], #REGSZ
+ bl asm_print_hex
+ bl asm_print_newline
+ b test_size_list
+exit_size_print:
+ mov x30, sp
+ ret
+endfunc size_controlled_print
+
+ /* -----------------------------------------------------
+ * This function calculates and prints required number
+ * of space characters followed by "= 0x", based on the
+ * length of ascii register name.
+ * x0: length of ascii register name + 1
+ * ------------------------------------------------------
+ */
+func print_alignment
+ /* The minimum ascii length is 3, e.g. for "x0" */
+ adr x4, print_spacer - 3
+ add x4, x4, x0
+ b asm_print_str
+endfunc print_alignment
+
+ /*
+ * Helper function to store x8 - x15 registers to
+ * the crash buf. The system registers values are
+ * copied to x8 to x15 by the caller which are then
+ * copied to the crash buf by this function.
+ * x0 points to the crash buf. It then calls
+ * size_controlled_print to print to console.
+ * Clobbers : x0 - x7, sp
+ */
+func str_in_crash_buf_print
+ /* restore the crash buf address in x0 */
+ mrs x0, tpidr_el3
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #REGSZ * 2]
+ stp x12, x13, [x0, #REGSZ * 4]
+ stp x14, x15, [x0, #REGSZ * 6]
+ b size_controlled_print
+endfunc str_in_crash_buf_print
+
+ /* ------------------------------------------------------
+ * This macro calculates the offset to crash buf from
+ * cpu_data and stores it in tpidr_el3. It also saves x0
+ * and x1 in the crash buf by using sp as a temporary
+ * register.
+ * ------------------------------------------------------
+ */
+ .macro prepare_crash_buf_save_x0_x1
+ /* we can corrupt this reg to free up x0 */
+ mov sp, x0
+ /* tpidr_el3 contains the address to cpu_data structure */
+ mrs x0, tpidr_el3
+ /* Calculate the Crash buffer offset in cpu_data */
+ add x0, x0, #CPU_DATA_CRASH_BUF_OFFSET
+ /* Store crash buffer address in tpidr_el3 */
+ msr tpidr_el3, x0
+ str x1, [x0, #REGSZ]
+ mov x1, sp
+ str x1, [x0]
+ .endm
+
+ /* -----------------------------------------------------
+ * This function allows to report a crash (if crash
+ * reporting is enabled) when an unhandled exception
+ * occurs. It prints the CPU state via the crash console
+ * making use of the crash buf. This function will
+ * not return.
+ * -----------------------------------------------------
+ */
+func report_unhandled_exception
+ prepare_crash_buf_save_x0_x1
+ adr x0, excpt_msg
+ mov sp, x0
+ /* This call will not return */
+ b do_crash_reporting
+endfunc report_unhandled_exception
+
+ /* -----------------------------------------------------
+ * This function allows to report a crash (if crash
+ * reporting is enabled) when an unhandled interrupt
+ * occurs. It prints the CPU state via the crash console
+ * making use of the crash buf. This function will
+ * not return.
+ * -----------------------------------------------------
+ */
+func report_unhandled_interrupt
+ prepare_crash_buf_save_x0_x1
+ adr x0, intr_excpt_msg
+ mov sp, x0
+ /* This call will not return */
+ b do_crash_reporting
+endfunc report_unhandled_interrupt
+
+ /* -----------------------------------------------------
+ * This function allows to report a crash from the lower
+ * exception level (if crash reporting is enabled) when
+ * panic() is invoked from C Runtime.
+ * It prints the CPU state via the crash console making
+ * use of 'cpu_context' structure where general purpose
+ * registers are saved and the crash buf.
+ * This function will not return.
+ *
+ * x0: Exception level
+ * -----------------------------------------------------
+ */
+func elx_panic
+ msr spsel, #MODE_SP_ELX
+ mov x8, x0
+
+ /* Print the crash message */
+ adr x4, excpt_msg_el
+ bl asm_print_str
+
+ /* Print exception level */
+ add x0, x8, #'0'
+ bl plat_crash_console_putc
+ bl asm_print_newline
+
+ /* Report x0 - x29 values stored in 'gpregs_ctx' structure */
+ /* Store the ascii list pointer in x6 */
+ adr x6, gp_regs
+ add x7, sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0
+
+print_next:
+ ldrb w4, [x6]
+ /* Test whether we are at end of list */
+ cbz w4, print_x30
+ mov x4, x6
+ /* asm_print_str updates x4 to point to next entry in list */
+ bl asm_print_str
+ /* x0 = number of symbols printed + 1 */
+ sub x0, x4, x6
+ /* Update x6 with the updated list pointer */
+ mov x6, x4
+ bl print_alignment
+ ldr x4, [x7], #REGSZ
+ bl asm_print_hex
+ bl asm_print_newline
+ b print_next
+
+print_x30:
+ adr x4, x30_msg
+ bl asm_print_str
+
+ /* Print spaces to align "x30" string */
+ mov x0, #4
+ bl print_alignment
+
+ /* Report x30 */
+ ldr x4, [x7]
+
+ /* ----------------------------------------------------------------
+ * Different virtual address space size can be defined for each EL.
+ * Ensure that we use the proper one by reading the corresponding
+ * TCR_ELx register.
+ * ----------------------------------------------------------------
+ */
+ cmp x8, #MODE_EL2
+ b.lt from_el1 /* EL1 */
+ mrs x2, sctlr_el2
+ mrs x1, tcr_el2
+
+ /* ----------------------------------------------------------------
+ * Check if pointer authentication is enabled at the specified EL.
+ * If it isn't, we can then skip stripping a PAC code.
+ * ----------------------------------------------------------------
+ */
+test_pauth:
+ tst x2, #(SCTLR_EnIA_BIT | SCTLR_EnIB_BIT)
+ b.eq no_pauth
+
+ /* Demangle address */
+ and x1, x1, #0x3F /* T0SZ = TCR_ELx[5:0] */
+ sub x1, x1, #64
+ neg x1, x1 /* bottom_pac_bit = 64 - T0SZ */
+ mov x2, #-1
+ lsl x2, x2, x1
+ bic x4, x4, x2
+
+no_pauth:
+ bl asm_print_hex
+ bl asm_print_newline
+
+ /* tpidr_el3 contains the address to cpu_data structure */
+ mrs x0, tpidr_el3
+ /* Calculate the Crash buffer offset in cpu_data */
+ add x0, x0, #CPU_DATA_CRASH_BUF_OFFSET
+ /* Store crash buffer address in tpidr_el3 */
+ msr tpidr_el3, x0
+
+ /* Print the rest of crash dump */
+ b print_el3_sys_regs
+
+from_el1:
+ mrs x2, sctlr_el1
+ mrs x1, tcr_el1
+ b test_pauth
+endfunc elx_panic
+
+ /* -----------------------------------------------------
+ * This function allows to report a crash (if crash
+ * reporting is enabled) when panic() is invoked from
+ * C Runtime. It prints the CPU state via the crash
+ * console making use of the crash buf. This function
+ * will not return.
+ * -----------------------------------------------------
+ */
+func el3_panic
+ msr spsel, #MODE_SP_ELX
+ prepare_crash_buf_save_x0_x1
+ adr x0, panic_msg
+ mov sp, x0
+ /* Fall through to 'do_crash_reporting' */
+
+ /* ------------------------------------------------------------
+ * The common crash reporting functionality. It requires x0
+ * and x1 has already been stored in crash buf, sp points to
+ * crash message and tpidr_el3 contains the crash buf address.
+ * The function does the following:
+ * - Retrieve the crash buffer from tpidr_el3
+ * - Store x2 to x6 in the crash buffer
+ * - Initialise the crash console.
+ * - Print the crash message by using the address in sp.
+ * - Print x30 value to the crash console.
+ * - Print x0 - x7 from the crash buf to the crash console.
+ * - Print x8 - x29 (in groups of 8 registers) using the
+ * crash buf to the crash console.
+ * - Print el3 sys regs (in groups of 8 registers) using the
+ * crash buf to the crash console.
+ * - Print non el3 sys regs (in groups of 8 registers) using
+ * the crash buf to the crash console.
+ * ------------------------------------------------------------
+ */
+do_crash_reporting:
+ /* Retrieve the crash buf from tpidr_el3 */
+ mrs x0, tpidr_el3
+ /* Store x2 - x6, x30 in the crash buffer */
+ stp x2, x3, [x0, #REGSZ * 2]
+ stp x4, x5, [x0, #REGSZ * 4]
+ stp x6, x30, [x0, #REGSZ * 6]
+ /* Initialize the crash console */
+ bl plat_crash_console_init
+ /* Verify the console is initialized */
+ cbz x0, crash_panic
+ /* Print the crash message. sp points to the crash message */
+ mov x4, sp
+ bl asm_print_str
+ /* Print spaces to align "x30" string */
+ mov x0, #4
+ bl print_alignment
+ /* Load the crash buf address */
+ mrs x0, tpidr_el3
+ /* Report x30 first from the crash buf */
+ ldr x4, [x0, #REGSZ * 7]
+
+#if ENABLE_PAUTH
+ /* Demangle address */
+ xpaci x4
+#endif
+ bl asm_print_hex
+ bl asm_print_newline
+ /* Load the crash buf address */
+ mrs x0, tpidr_el3
+ /* Now mov x7 into crash buf */
+ str x7, [x0, #REGSZ * 7]
+
+ /* Report x0 - x29 values stored in crash buf */
+ /* Store the ascii list pointer in x6 */
+ adr x6, gp_regs
+ /* Print x0 to x7 from the crash buf */
+ bl size_controlled_print
+ /* Store x8 - x15 in crash buf and print */
+ bl str_in_crash_buf_print
+ /* Load the crash buf address */
+ mrs x0, tpidr_el3
+ /* Store the rest of gp regs and print */
+ stp x16, x17, [x0]
+ stp x18, x19, [x0, #REGSZ * 2]
+ stp x20, x21, [x0, #REGSZ * 4]
+ stp x22, x23, [x0, #REGSZ * 6]
+ bl size_controlled_print
+ /* Load the crash buf address */
+ mrs x0, tpidr_el3
+ stp x24, x25, [x0]
+ stp x26, x27, [x0, #REGSZ * 2]
+ stp x28, x29, [x0, #REGSZ * 4]
+ bl size_controlled_print
+
+ /* Print the el3 sys registers */
+print_el3_sys_regs:
+ adr x6, el3_sys_regs
+ mrs x8, scr_el3
+ mrs x9, sctlr_el3
+ mrs x10, cptr_el3
+ mrs x11, tcr_el3
+ mrs x12, daif
+ mrs x13, mair_el3
+ mrs x14, spsr_el3
+ mrs x15, elr_el3
+ bl str_in_crash_buf_print
+ mrs x8, ttbr0_el3
+ mrs x9, esr_el3
+ mrs x10, far_el3
+ bl str_in_crash_buf_print
+
+ /* Print the non el3 sys registers */
+ adr x6, non_el3_sys_regs
+ mrs x8, spsr_el1
+ mrs x9, elr_el1
+ mrs x10, spsr_abt
+ mrs x11, spsr_und
+ mrs x12, spsr_irq
+ mrs x13, spsr_fiq
+ mrs x14, sctlr_el1
+ mrs x15, actlr_el1
+ bl str_in_crash_buf_print
+ mrs x8, cpacr_el1
+ mrs x9, csselr_el1
+ mrs x10, sp_el1
+ mrs x11, esr_el1
+ mrs x12, ttbr0_el1
+ mrs x13, ttbr1_el1
+ mrs x14, mair_el1
+ mrs x15, amair_el1
+ bl str_in_crash_buf_print
+ mrs x8, tcr_el1
+ mrs x9, tpidr_el1
+ mrs x10, tpidr_el0
+ mrs x11, tpidrro_el0
+ mrs x12, par_el1
+ mrs x13, mpidr_el1
+ mrs x14, afsr0_el1
+ mrs x15, afsr1_el1
+ bl str_in_crash_buf_print
+ mrs x8, contextidr_el1
+ mrs x9, vbar_el1
+ mrs x10, cntp_ctl_el0
+ mrs x11, cntp_cval_el0
+ mrs x12, cntv_ctl_el0
+ mrs x13, cntv_cval_el0
+ mrs x14, cntkctl_el1
+ mrs x15, sp_el0
+ bl str_in_crash_buf_print
+ mrs x8, isr_el1
+ bl str_in_crash_buf_print
+
+#if CTX_INCLUDE_AARCH32_REGS
+ /* Print the AArch32 registers */
+ adr x6, aarch32_regs
+ mrs x8, dacr32_el2
+ mrs x9, ifsr32_el2
+ bl str_in_crash_buf_print
+#endif /* CTX_INCLUDE_AARCH32_REGS */
+
+ /* Get the cpu specific registers to report */
+ bl do_cpu_reg_dump
+ bl str_in_crash_buf_print
+
+ /* Print some platform registers */
+ plat_crash_print_regs
+
+ bl plat_crash_console_flush
+
+ /* Done reporting */
+ no_ret plat_panic_handler
+endfunc el3_panic
+
+#else /* CRASH_REPORTING */
+func report_unhandled_exception
+report_unhandled_interrupt:
+ no_ret plat_panic_handler
+endfunc report_unhandled_exception
+#endif /* CRASH_REPORTING */
+
+func crash_panic
+ no_ret plat_panic_handler
+endfunc crash_panic
diff --git a/bl31/aarch64/ea_delegate.S b/bl31/aarch64/ea_delegate.S
new file mode 100644
index 0000000..dbb3234
--- /dev/null
+++ b/bl31/aarch64/ea_delegate.S
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <assert_macros.S>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <bl31/ea_handle.h>
+#include <context.h>
+#include <lib/extensions/ras_arch.h>
+#include <cpu_macros.S>
+#include <context.h>
+
+ .globl handle_lower_el_ea_esb
+ .globl handle_lower_el_async_ea
+ .globl enter_lower_el_sync_ea
+ .globl enter_lower_el_async_ea
+
+
+/*
+ * Function to delegate External Aborts synchronized by ESB instruction at EL3
+ * vector entry. This function assumes GP registers x0-x29 have been saved, and
+ * are available for use. It delegates the handling of the EA to platform
+ * handler, and returns only upon successfully handling the EA; otherwise
+ * panics. On return from this function, the original exception handler is
+ * expected to resume.
+ */
+func handle_lower_el_ea_esb
+ mov x0, #ERROR_EA_ESB
+ mrs x1, DISR_EL1
+ b ea_proceed
+endfunc handle_lower_el_ea_esb
+
+
+/*
+ * This function forms the tail end of Synchronous Exception entry from lower
+ * EL, and expects to handle Synchronous External Aborts from lower EL and CPU
+ * Implementation Defined Exceptions. If any other kind of exception is detected,
+ * then this function reports unhandled exception.
+ *
+ * Since it's part of exception vector, this function doesn't expect any GP
+ * registers to have been saved. It delegates the handling of the EA to platform
+ * handler, and upon successfully handling the EA, exits EL3; otherwise panics.
+ */
+func enter_lower_el_sync_ea
+ /*
+ * Explicitly save x30 so as to free up a register and to enable
+ * branching.
+ */
+ str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+ mrs x30, esr_el3
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+ /* Check for I/D aborts from lower EL */
+ cmp x30, #EC_IABORT_LOWER_EL
+ b.eq 1f
+
+ cmp x30, #EC_DABORT_LOWER_EL
+ b.eq 1f
+
+ /* Save GP registers */
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+
+ /* Get the cpu_ops pointer */
+ bl get_cpu_ops_ptr
+
+ /* Get the cpu_ops exception handler */
+ ldr x0, [x0, #CPU_E_HANDLER_FUNC]
+
+ /*
+ * If the reserved function pointer is NULL, this CPU does not have an
+ * implementation defined exception handler function
+ */
+ cbz x0, 2f
+ mrs x1, esr_el3
+ ubfx x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+ blr x0
+ b 2f
+
+1:
+ /*
+ * Save general purpose and ARMv8.3-PAuth registers (if enabled).
+ * If Secure Cycle Counter is not disabled in MDCR_EL3 when
+ * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
+ * Also set the PSTATE to a known state.
+ */
+ bl prepare_el3_entry
+
+#if ENABLE_PAUTH
+ /* Load and program APIAKey firmware key */
+ bl pauth_load_bl31_apiakey
+#endif
+
+ /* Setup exception class and syndrome arguments for platform handler */
+ mov x0, #ERROR_EA_SYNC
+ mrs x1, esr_el3
+ bl delegate_sync_ea
+
+ /* el3_exit assumes SP_EL0 on entry */
+ msr spsel, #MODE_SP_EL0
+ b el3_exit
+2:
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+
+ /* Synchronous exceptions other than the above are assumed to be EA */
+ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ no_ret report_unhandled_exception
+endfunc enter_lower_el_sync_ea
+
+
+/*
+ * This function handles SErrors from lower ELs.
+ *
+ * Since it's part of exception vector, this function doesn't expect any GP
+ * registers to have been saved. It delegates the handling of the EA to platform
+ * handler, and upon successfully handling the EA, exits EL3; otherwise panics.
+ */
+func enter_lower_el_async_ea
+ /*
+ * Explicitly save x30 so as to free up a register and to enable
+ * branching
+ */
+ str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+handle_lower_el_async_ea:
+ /*
+ * Save general purpose and ARMv8.3-PAuth registers (if enabled).
+ * If Secure Cycle Counter is not disabled in MDCR_EL3 when
+ * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
+ * Also set the PSTATE to a known state.
+ */
+ bl prepare_el3_entry
+
+#if ENABLE_PAUTH
+ /* Load and program APIAKey firmware key */
+ bl pauth_load_bl31_apiakey
+#endif
+
+ /* Setup exception class and syndrome arguments for platform handler */
+ mov x0, #ERROR_EA_ASYNC
+ mrs x1, esr_el3
+ bl delegate_async_ea
+
+ /* el3_exit assumes SP_EL0 on entry */
+ msr spsel, #MODE_SP_EL0
+ b el3_exit
+endfunc enter_lower_el_async_ea
+
+
+/*
+ * Prelude for Synchronous External Abort handling. This function assumes that
+ * all GP registers have been saved by the caller.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ */
+func delegate_sync_ea
+#if RAS_EXTENSION
+ /*
+ * Check for Uncontainable error type. If so, route to the platform
+ * fatal error handler rather than the generic EA one.
+ */
+ ubfx x2, x1, #EABORT_SET_SHIFT, #EABORT_SET_WIDTH
+ cmp x2, #ERROR_STATUS_SET_UC
+ b.ne 1f
+
+ /* Check fault status code */
+ ubfx x3, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
+ cmp x3, #SYNC_EA_FSC
+ b.ne 1f
+
+ no_ret plat_handle_uncontainable_ea
+1:
+#endif
+
+ b ea_proceed
+endfunc delegate_sync_ea
+
+
+/*
+ * Prelude for Asynchronous External Abort handling. This function assumes that
+ * all GP registers have been saved by the caller.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ */
+func delegate_async_ea
+#if RAS_EXTENSION
+ /* Check Exception Class to ensure SError, as this function should
+ * only be invoked for SError. If that is not the case, which implies
+ * either an HW error or programming error, panic.
+ */
+ ubfx x2, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+ cmp x2, EC_SERROR
+ b.ne do_panic
+ /*
+ * Check for Implementation Defined Syndrome. If so, skip checking
+ * Uncontainable error type from the syndrome as the format is unknown.
+ */
+ tbnz x1, #SERROR_IDS_BIT, 1f
+
+ /* AET only valid when DFSC is 0x11 */
+ ubfx x2, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
+ cmp x2, #DFSC_SERROR
+ b.ne 1f
+
+ /*
+ * Check for Uncontainable error type. If so, route to the platform
+ * fatal error handler rather than the generic EA one.
+ */
+ ubfx x3, x1, #EABORT_AET_SHIFT, #EABORT_AET_WIDTH
+ cmp x3, #ERROR_STATUS_UET_UC
+ b.ne 1f
+
+ no_ret plat_handle_uncontainable_ea
+1:
+#endif
+
+ b ea_proceed
+endfunc delegate_async_ea
+
+
+/*
+ * Delegate External Abort handling to platform's EA handler. This function
+ * assumes that all GP registers have been saved by the caller.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ */
+func ea_proceed
+ /*
+ * If the ESR loaded earlier is not zero, we were processing an EA
+ * already, and this is a double fault.
+ */
+ ldr x5, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
+ cbz x5, 1f
+ no_ret plat_handle_double_fault
+
+1:
+ /* Save EL3 state */
+ mrs x2, spsr_el3
+ mrs x3, elr_el3
+ stp x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+
+ /*
+ * Save ESR as handling might involve lower ELs, and returning back to
+ * EL3 from there would trample the original ESR.
+ */
+ mrs x4, scr_el3
+ mrs x5, esr_el3
+ stp x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+
+ /*
+ * Setup rest of arguments, and call platform External Abort handler.
+ *
+ * x0: EA reason (already in place)
+ * x1: Exception syndrome (already in place).
+ * x2: Cookie (unused for now).
+ * x3: Context pointer.
+ * x4: Flags (security state from SCR for now).
+ */
+ mov x2, xzr
+ mov x3, sp
+ ubfx x4, x4, #0, #1
+
+ /* Switch to runtime stack */
+ ldr x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+ msr spsel, #MODE_SP_EL0
+ mov sp, x5
+
+ mov x29, x30
+#if ENABLE_ASSERTIONS
+ /* Stash the stack pointer */
+ mov x28, sp
+#endif
+ bl plat_ea_handler
+
+#if ENABLE_ASSERTIONS
+ /*
+ * Error handling flows might involve long jumps; so upon returning from
+ * the platform error handler, validate that the we've completely
+ * unwound the stack.
+ */
+ mov x27, sp
+ cmp x28, x27
+ ASM_ASSERT(eq)
+#endif
+
+ /* Make SP point to context */
+ msr spsel, #MODE_SP_ELX
+
+ /* Restore EL3 state and ESR */
+ ldp x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+ msr spsr_el3, x1
+ msr elr_el3, x2
+
+ /* Restore ESR_EL3 and SCR_EL3 */
+ ldp x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+ msr scr_el3, x3
+ msr esr_el3, x4
+
+#if ENABLE_ASSERTIONS
+ cmp x4, xzr
+ ASM_ASSERT(ne)
+#endif
+
+ /* Clear ESR storage */
+ str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
+
+ ret x29
+endfunc ea_proceed
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
new file mode 100644
index 0000000..0283553
--- /dev/null
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -0,0 +1,631 @@
+/*
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl31/ea_handle.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/runtime_svc.h>
+#include <context.h>
+#include <el3_common_macros.S>
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/smccc.h>
+
+ .globl runtime_exceptions
+
+ .globl sync_exception_sp_el0
+ .globl irq_sp_el0
+ .globl fiq_sp_el0
+ .globl serror_sp_el0
+
+ .globl sync_exception_sp_elx
+ .globl irq_sp_elx
+ .globl fiq_sp_elx
+ .globl serror_sp_elx
+
+ .globl sync_exception_aarch64
+ .globl irq_aarch64
+ .globl fiq_aarch64
+ .globl serror_aarch64
+
+ .globl sync_exception_aarch32
+ .globl irq_aarch32
+ .globl fiq_aarch32
+ .globl serror_aarch32
+
+ /*
+ * Macro that prepares entry to EL3 upon taking an exception.
+ *
+ * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
+ * instruction. When an error is thus synchronized, the handling is
+ * delegated to platform EA handler.
+ *
+ * Without RAS_EXTENSION, this macro synchronizes pending errors using
+ * a DSB, unmasks Asynchronous External Aborts and saves X30 before
+ * setting the flag CTX_IS_IN_EL3.
+ */
+ .macro check_and_unmask_ea
+#if RAS_EXTENSION
+ /* Synchronize pending External Aborts */
+ esb
+
+ /* Unmask the SError interrupt */
+ msr daifclr, #DAIF_ABT_BIT
+
+ /*
+ * Explicitly save x30 so as to free up a register and to enable
+ * branching
+ */
+ str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+ /* Check for SErrors synchronized by the ESB instruction */
+ mrs x30, DISR_EL1
+ tbz x30, #DISR_A_BIT, 1f
+
+ /*
+ * Save general purpose and ARMv8.3-PAuth registers (if enabled).
+ * If Secure Cycle Counter is not disabled in MDCR_EL3 when
+ * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
+ * Also set the PSTATE to a known state.
+ */
+ bl prepare_el3_entry
+
+ bl handle_lower_el_ea_esb
+
+ /* Restore general purpose, PMCR_EL0 and ARMv8.3-PAuth registers */
+ bl restore_gp_pmcr_pauth_regs
+1:
+#else
+ /*
+ * For SoCs which do not implement RAS, use DSB as a barrier to
+ * synchronize pending external aborts.
+ */
+ dsb sy
+
+ /* Unmask the SError interrupt */
+ msr daifclr, #DAIF_ABT_BIT
+
+ /* Use ISB for the above unmask operation to take effect immediately */
+ isb
+
+ /*
+ * Refer Note 1. No need to restore X30 as both handle_sync_exception
+ * and handle_interrupt_exception macro which follow this macro modify
+ * X30 anyway.
+ */
+ str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ mov x30, #1
+ str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
+ dmb sy
+#endif
+ .endm
+
+#if !RAS_EXTENSION
+ /*
+ * Note 1: The explicit DSB at the entry of various exception vectors
+ * for handling exceptions from lower ELs can inadvertently trigger an
+ * SError exception in EL3 due to pending asynchronous aborts in lower
+ * ELs. This will end up being handled by serror_sp_elx which will
+ * ultimately panic and die.
+ * The way to workaround is to update a flag to indicate if the exception
+ * truly came from EL3. This flag is allocated in the cpu_context
+ * structure and located at offset "CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3"
+ * This is not a bullet proof solution to the problem at hand because
+ * we assume the instructions following "isb" that help to update the
+ * flag execute without causing further exceptions.
+ */
+
+ /* ---------------------------------------------------------------------
+ * This macro handles Asynchronous External Aborts.
+ * ---------------------------------------------------------------------
+ */
+ .macro handle_async_ea
+ /*
+ * Use a barrier to synchronize pending external aborts.
+ */
+ dsb sy
+
+ /* Unmask the SError interrupt */
+ msr daifclr, #DAIF_ABT_BIT
+
+ /* Use ISB for the above unmask operation to take effect immediately */
+ isb
+
+ /* Refer Note 1 */
+ str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ mov x30, #1
+ str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
+ dmb sy
+
+ b handle_lower_el_async_ea
+ .endm
+
+ /*
+ * This macro checks if the exception was taken due to SError in EL3 or
+ * because of pending asynchronous external aborts from lower EL that got
+ * triggered due to explicit synchronization in EL3. Refer Note 1.
+ */
+ .macro check_if_serror_from_EL3
+ /* Assumes SP_EL3 on entry */
+ str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
+ cbnz x30, exp_from_EL3
+
+ /* Handle asynchronous external abort from lower EL */
+ b handle_lower_el_async_ea
+
+exp_from_EL3:
+ /* Jump to plat_handle_el3_ea which does not return */
+ .endm
+#endif
+
+ /* ---------------------------------------------------------------------
+ * This macro handles Synchronous exceptions.
+ * Only SMC exceptions are supported.
+ * ---------------------------------------------------------------------
+ */
+ .macro handle_sync_exception
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ /*
+ * Read the timestamp value and store it in per-cpu data. The value
+ * will be extracted from per-cpu data by the C level SMC handler and
+ * saved to the PMF timestamp region.
+ */
+ mrs x30, cntpct_el0
+ str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+ mrs x29, tpidr_el3
+ str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
+ ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+#endif
+
+ mrs x30, esr_el3
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+ /* Handle SMC exceptions separately from other synchronous exceptions */
+ cmp x30, #EC_AARCH32_SMC
+ b.eq smc_handler32
+
+ cmp x30, #EC_AARCH64_SMC
+ b.eq smc_handler64
+
+ /* Synchronous exceptions other than the above are assumed to be EA */
+ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ b enter_lower_el_sync_ea
+ .endm
+
+
+ /* ---------------------------------------------------------------------
+ * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
+ * interrupts.
+ * ---------------------------------------------------------------------
+ */
+ .macro handle_interrupt_exception label
+
+ /*
+ * Save general purpose and ARMv8.3-PAuth registers (if enabled).
+ * If Secure Cycle Counter is not disabled in MDCR_EL3 when
+ * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
+ * Also set the PSTATE to a known state.
+ */
+ bl prepare_el3_entry
+
+#if ENABLE_PAUTH
+ /* Load and program APIAKey firmware key */
+ bl pauth_load_bl31_apiakey
+#endif
+
+ /* Save the EL3 system registers needed to return from this exception */
+ mrs x0, spsr_el3
+ mrs x1, elr_el3
+ stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+
+ /* Switch to the runtime stack i.e. SP_EL0 */
+ ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+ mov x20, sp
+ msr spsel, #MODE_SP_EL0
+ mov sp, x2
+
+ /*
+ * Find out whether this is a valid interrupt type.
+ * If the interrupt controller reports a spurious interrupt then return
+ * to where we came from.
+ */
+ bl plat_ic_get_pending_interrupt_type
+ cmp x0, #INTR_TYPE_INVAL
+ b.eq interrupt_exit_\label
+
+ /*
+ * Get the registered handler for this interrupt type.
+ * A NULL return value could be 'cause of the following conditions:
+ *
+ * a. An interrupt of a type was routed correctly but a handler for its
+ * type was not registered.
+ *
+ * b. An interrupt of a type was not routed correctly so a handler for
+ * its type was not registered.
+ *
+ * c. An interrupt of a type was routed correctly to EL3, but was
+ * deasserted before its pending state could be read. Another
+ * interrupt of a different type pended at the same time and its
+ * type was reported as pending instead. However, a handler for this
+ * type was not registered.
+ *
+ * a. and b. can only happen due to a programming error. The
+ * occurrence of c. could be beyond the control of Trusted Firmware.
+ * It makes sense to return from this exception instead of reporting an
+ * error.
+ */
+ bl get_interrupt_type_handler
+ cbz x0, interrupt_exit_\label
+ mov x21, x0
+
+ mov x0, #INTR_ID_UNAVAILABLE
+
+ /* Set the current security state in the 'flags' parameter */
+ mrs x2, scr_el3
+ ubfx x1, x2, #0, #1
+
+ /* Restore the reference to the 'handle' i.e. SP_EL3 */
+ mov x2, x20
+
+ /* x3 will point to a cookie (not used now) */
+ mov x3, xzr
+
+ /* Call the interrupt type handler */
+ blr x21
+
+interrupt_exit_\label:
+ /* Return from exception, possibly in a different security state */
+ b el3_exit
+
+ .endm
+
+
+vector_base runtime_exceptions
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry sync_exception_sp_el0
+#ifdef MONITOR_TRAPS
+ stp x29, x30, [sp, #-16]!
+
+ mrs x30, esr_el3
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+ /* Check for BRK */
+ cmp x30, #EC_BRK
+ b.eq brk_handler
+
+ ldp x29, x30, [sp], #16
+#endif /* MONITOR_TRAPS */
+
+ /* We don't expect any synchronous exceptions from EL3 */
+ b report_unhandled_exception
+end_vector_entry sync_exception_sp_el0
+
+vector_entry irq_sp_el0
+ /*
+ * EL3 code is non-reentrant. Any asynchronous exception is a serious
+ * error. Loop infinitely.
+ */
+ b report_unhandled_interrupt
+end_vector_entry irq_sp_el0
+
+
+vector_entry fiq_sp_el0
+ b report_unhandled_interrupt
+end_vector_entry fiq_sp_el0
+
+
+vector_entry serror_sp_el0
+ no_ret plat_handle_el3_ea
+end_vector_entry serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry sync_exception_sp_elx
+ /*
+ * This exception will trigger if anything went wrong during a previous
+ * exception entry or exit or while handling an earlier unexpected
+ * synchronous exception. There is a high probability that SP_EL3 is
+ * corrupted.
+ */
+ b report_unhandled_exception
+end_vector_entry sync_exception_sp_elx
+
+vector_entry irq_sp_elx
+ b report_unhandled_interrupt
+end_vector_entry irq_sp_elx
+
+vector_entry fiq_sp_elx
+ b report_unhandled_interrupt
+end_vector_entry fiq_sp_elx
+
+vector_entry serror_sp_elx
+#if !RAS_EXTENSION
+ check_if_serror_from_EL3
+#endif
+ no_ret plat_handle_el3_ea
+end_vector_entry serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry sync_exception_aarch64
+ /*
+ * This exception vector will be the entry point for SMCs and traps
+ * that are unhandled at lower ELs most commonly. SP_EL3 should point
+ * to a valid cpu context where the general purpose and system register
+ * state can be saved.
+ */
+ apply_at_speculative_wa
+ check_and_unmask_ea
+ handle_sync_exception
+end_vector_entry sync_exception_aarch64
+
+vector_entry irq_aarch64
+ apply_at_speculative_wa
+ check_and_unmask_ea
+ handle_interrupt_exception irq_aarch64
+end_vector_entry irq_aarch64
+
+vector_entry fiq_aarch64
+ apply_at_speculative_wa
+ check_and_unmask_ea
+ handle_interrupt_exception fiq_aarch64
+end_vector_entry fiq_aarch64
+
+vector_entry serror_aarch64
+ apply_at_speculative_wa
+#if RAS_EXTENSION
+ msr daifclr, #DAIF_ABT_BIT
+ b enter_lower_el_async_ea
+#else
+ handle_async_ea
+#endif
+end_vector_entry serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry sync_exception_aarch32
+ /*
+ * This exception vector will be the entry point for SMCs and traps
+ * that are unhandled at lower ELs most commonly. SP_EL3 should point
+ * to a valid cpu context where the general purpose and system register
+ * state can be saved.
+ */
+ apply_at_speculative_wa
+ check_and_unmask_ea
+ handle_sync_exception
+end_vector_entry sync_exception_aarch32
+
+vector_entry irq_aarch32
+ apply_at_speculative_wa
+ check_and_unmask_ea
+ handle_interrupt_exception irq_aarch32
+end_vector_entry irq_aarch32
+
+vector_entry fiq_aarch32
+ apply_at_speculative_wa
+ check_and_unmask_ea
+ handle_interrupt_exception fiq_aarch32
+end_vector_entry fiq_aarch32
+
+vector_entry serror_aarch32
+ apply_at_speculative_wa
+#if RAS_EXTENSION
+ msr daifclr, #DAIF_ABT_BIT
+ b enter_lower_el_async_ea
+#else
+ handle_async_ea
+#endif
+end_vector_entry serror_aarch32
+
+#ifdef MONITOR_TRAPS
+ .section .rodata.brk_string, "aS"
+brk_location:
+ .asciz "Error at instruction 0x"
+brk_message:
+ .asciz "Unexpected BRK instruction with value 0x"
+#endif /* MONITOR_TRAPS */
+
+ /* ---------------------------------------------------------------------
+ * The following code handles secure monitor calls.
+ * Depending upon the execution state from where the SMC has been
+ * invoked, it frees some general purpose registers to perform the
+ * remaining tasks. They involve finding the runtime service handler
+ * that is the target of the SMC & switching to runtime stacks (SP_EL0)
+ * before calling the handler.
+ *
+ * Note that x30 has been explicitly saved and can be used here
+ * ---------------------------------------------------------------------
+ */
+func smc_handler
+smc_handler32:
+ /* Check whether aarch32 issued an SMC64 */
+ tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
+
+smc_handler64:
+ /* NOTE: The code below must preserve x0-x4 */
+
+ /*
+ * Save general purpose and ARMv8.3-PAuth registers (if enabled).
+ * If Secure Cycle Counter is not disabled in MDCR_EL3 when
+ * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
+ * Also set the PSTATE to a known state.
+ */
+ bl prepare_el3_entry
+
+#if ENABLE_PAUTH
+ /* Load and program APIAKey firmware key */
+ bl pauth_load_bl31_apiakey
+#endif
+
+ /*
+ * Populate the parameters for the SMC handler.
+ * We already have x0-x4 in place. x5 will point to a cookie (not used
+ * now). x6 will point to the context structure (SP_EL3) and x7 will
+ * contain flags we need to pass to the handler.
+ */
+ mov x5, xzr
+ mov x6, sp
+
+ /*
+ * Restore the saved C runtime stack value which will become the new
+ * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
+ * structure prior to the last ERET from EL3.
+ */
+ ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+
+ /* Switch to SP_EL0 */
+ msr spsel, #MODE_SP_EL0
+
+ /*
+ * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
+ * switch during SMC handling.
+ * TODO: Revisit if all system registers can be saved later.
+ */
+ mrs x16, spsr_el3
+ mrs x17, elr_el3
+ mrs x18, scr_el3
+ stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+ str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+
+ /* Clear flag register */
+ mov x7, xzr
+
+#if ENABLE_RME
+ /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */
+ ubfx x7, x18, #SCR_NSE_SHIFT, 1
+
+ /*
+ * Shift copied SCR_EL3.NSE bit by 5 to create space for
+ * SCR_EL3.NS bit. Bit 5 of the flag corresponds to
+ * the SCR_EL3.NSE bit.
+ */
+ lsl x7, x7, #5
+#endif /* ENABLE_RME */
+
+ /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
+ bfi x7, x18, #0, #1
+
+ /*
+ * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID
+ * passed through x0. Copy the SVE hint bit to flags and mask the
+ * bit in smc_fid passed to the standard service dispatcher.
+ * A service/dispatcher can retrieve the SVE hint bit state from
+ * flags using the appropriate helper.
+ */
+ bfi x7, x0, #FUNCID_SVE_HINT_SHIFT, #FUNCID_SVE_HINT_MASK
+ bic x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
+
+ mov sp, x12
+
+ /* Get the unique owning entity number */
+ ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
+ ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
+ orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH
+
+ /* Load descriptor index from array of indices */
+ adrp x14, rt_svc_descs_indices
+ add x14, x14, :lo12:rt_svc_descs_indices
+ ldrb w15, [x14, x16]
+
+ /* Any index greater than 127 is invalid. Check bit 7. */
+ tbnz w15, 7, smc_unknown
+
+ /*
+ * Get the descriptor using the index
+ * x11 = (base + off), w15 = index
+ *
+ * handler = (base + off) + (index << log2(size))
+ */
+ adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
+ lsl w10, w15, #RT_SVC_SIZE_LOG2
+ ldr x15, [x11, w10, uxtw]
+
+ /*
+ * Call the Secure Monitor Call handler and then drop directly into
+ * el3_exit() which will program any remaining architectural state
+ * prior to issuing the ERET to the desired lower EL.
+ */
+#if DEBUG
+ cbz x15, rt_svc_fw_critical_error
+#endif
+ blr x15
+
+ b el3_exit
+
+smc_unknown:
+ /*
+ * Unknown SMC call. Populate return value with SMC_UNK and call
+ * el3_exit() which will restore the remaining architectural state
+ * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
+ * to the desired lower EL.
+ */
+ mov x0, #SMC_UNK
+ str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b el3_exit
+
+smc_prohibited:
+ restore_ptw_el1_sys_regs
+ ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ mov x0, #SMC_UNK
+ exception_return
+
+#if DEBUG
+rt_svc_fw_critical_error:
+ /* Switch to SP_ELx */
+ msr spsel, #MODE_SP_ELX
+ no_ret report_unhandled_exception
+#endif
+endfunc smc_handler
+
+ /* ---------------------------------------------------------------------
+ * The following code handles exceptions caused by BRK instructions.
+ * Following a BRK instruction, the only real valid cause of action is
+ * to print some information and panic, as the code that caused it is
+ * likely in an inconsistent internal state.
+ *
+ * This is initially intended to be used in conjunction with
+ * __builtin_trap.
+ * ---------------------------------------------------------------------
+ */
+#ifdef MONITOR_TRAPS
+func brk_handler
+ /* Extract the ISS */
+ mrs x10, esr_el3
+ ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH
+
+ /* Ensure the console is initialized */
+ bl plat_crash_console_init
+
+ adr x4, brk_location
+ bl asm_print_str
+ mrs x4, elr_el3
+ bl asm_print_hex
+ bl asm_print_newline
+
+ adr x4, brk_message
+ bl asm_print_str
+ mov x4, x10
+ mov x5, #28
+ bl asm_print_hex_bits
+ bl asm_print_newline
+
+ no_ret plat_panic_handler
+endfunc brk_handler
+#endif /* MONITOR_TRAPS */