diff options
Diffstat (limited to 'lib/el3_runtime')
-rw-r--r-- | lib/el3_runtime/aarch32/context_mgmt.c | 343 | ||||
-rw-r--r-- | lib/el3_runtime/aarch32/cpu_data.S | 42 | ||||
-rw-r--r-- | lib/el3_runtime/aarch64/context.S | 1136 | ||||
-rw-r--r-- | lib/el3_runtime/aarch64/context_mgmt.c | 1098 | ||||
-rw-r--r-- | lib/el3_runtime/aarch64/cpu_data.S | 48 | ||||
-rw-r--r-- | lib/el3_runtime/cpu_data_array.c | 13 |
6 files changed, 2680 insertions, 0 deletions
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c new file mode 100644 index 0000000..af8edf5 --- /dev/null +++ b/lib/el3_runtime/aarch32/context_mgmt.c @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stdbool.h> +#include <string.h> + +#include <platform_def.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <context.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <lib/extensions/amu.h> +#include <lib/extensions/sys_reg_trace.h> +#include <lib/extensions/trf.h> +#include <lib/utils.h> + +/******************************************************************************* + * Context management library initialisation routine. This library is used by + * runtime services to share pointers to 'cpu_context' structures for the secure + * and non-secure states. Management of the structures and their associated + * memory is not done by the context management library e.g. the PSCI service + * manages the cpu context used for entry from and exit to the non-secure state. + * The Secure payload manages the context(s) corresponding to the secure state. + * It also uses this library to get access to the non-secure + * state cpu context pointers. + ******************************************************************************/ +void cm_init(void) +{ + /* + * The context management library has only global data to initialize, but + * that will be done when the BSS is zeroed out + */ +} + +/******************************************************************************* + * The following function initializes the cpu_context 'ctx' for + * first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + * + * The security state to initialize is determined by the SECURE attribute + * of the entry_point_info. + * + * The EE and ST attributes are used to configure the endianness and secure + * timer availability for the new execution context. + * + * To prepare the register state for entry call cm_prepare_el3_exit() and + * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to + * cm_el1_sysregs_context_restore(). + ******************************************************************************/ +void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) +{ + unsigned int security_state; + uint32_t scr, sctlr; + regs_t *reg_ctx; + + assert(ctx != NULL); + + security_state = GET_SECURITY_STATE(ep->h.attr); + + /* Clear any residual register values from the context */ + zeromem(ctx, sizeof(*ctx)); + + reg_ctx = get_regs_ctx(ctx); + + /* + * Base the context SCR on the current value, adjust for entry point + * specific requirements + */ + scr = read_scr(); + scr &= ~(SCR_NS_BIT | SCR_HCE_BIT); + + if (security_state != SECURE) + scr |= SCR_NS_BIT; + + if (security_state != SECURE) { + /* + * Set up SCTLR for the Non-secure context. + * + * SCTLR.EE: Endianness is taken from the entrypoint attributes. + * + * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as + * required by PSCI specification) + * + * Set remaining SCTLR fields to their architecturally defined + * values. Some fields reset to an IMPLEMENTATION DEFINED value: + * + * SCTLR.TE: Set to zero so that exceptions to an Exception + * Level executing at PL1 are taken to A32 state. + * + * SCTLR.V: Set to zero to select the normal exception vectors + * with base address held in VBAR. + */ + assert(((ep->spsr >> SPSR_E_SHIFT) & SPSR_E_MASK) == + (EP_GET_EE(ep->h.attr) >> EP_EE_SHIFT)); + + sctlr = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U; + sctlr |= (SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_V_BIT)); + write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr); + } + + /* + * The target exception level is based on the spsr mode requested. If + * execution is requested to hyp mode, HVC is enabled via SCR.HCE. + */ + if (GET_M32(ep->spsr) == MODE32_hyp) + scr |= SCR_HCE_BIT; + + /* + * Store the initialised values for SCTLR and SCR in the cpu_context. + * The Hyp mode registers are not part of the saved context and are + * set-up in cm_prepare_el3_exit(). + */ + write_ctx_reg(reg_ctx, CTX_SCR, scr); + write_ctx_reg(reg_ctx, CTX_LR, ep->pc); + write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr); + + /* + * Store the r0-r3 value from the entrypoint into the context + * Use memcpy as we are in control of the layout of the structures + */ + memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t)); +} + +/******************************************************************************* + * Enable architecture extensions on first entry to Non-secure world. + * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise + * it is zero. + ******************************************************************************/ +static void enable_extensions_nonsecure(bool el2_unused) +{ +#if IMAGE_BL32 +#if ENABLE_AMU + amu_enable(el2_unused); +#endif + +#if ENABLE_SYS_REG_TRACE_FOR_NS + sys_reg_trace_enable(); +#endif /* ENABLE_SYS_REG_TRACE_FOR_NS */ + +#if ENABLE_TRF_FOR_NS + trf_enable(); +#endif /* ENABLE_TRF_FOR_NS */ +#endif +} + +/******************************************************************************* + * The following function initializes the cpu_context for a CPU specified by + * its `cpu_idx` for first use, and sets the initial entrypoint state as + * specified by the entry_point_info structure. + ******************************************************************************/ +void cm_init_context_by_index(unsigned int cpu_idx, + const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); + cm_setup_context(ctx, ep); +} + +/******************************************************************************* + * The following function initializes the cpu_context for the current CPU + * for first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + ******************************************************************************/ +void cm_init_my_context(const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); + cm_setup_context(ctx, ep); +} + +/******************************************************************************* + * Prepare the CPU system registers for first entry into secure or normal world + * + * If execution is requested to hyp mode, HSCTLR is initialized + * If execution is requested to non-secure PL1, and the CPU supports + * HYP mode then HYP mode is disabled by configuring all necessary HYP mode + * registers. + ******************************************************************************/ +void cm_prepare_el3_exit(uint32_t security_state) +{ + uint32_t hsctlr, scr; + cpu_context_t *ctx = cm_get_context(security_state); + bool el2_unused = false; + + assert(ctx != NULL); + + if (security_state == NON_SECURE) { + scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); + if ((scr & SCR_HCE_BIT) != 0U) { + /* Use SCTLR value to initialize HSCTLR */ + hsctlr = read_ctx_reg(get_regs_ctx(ctx), + CTX_NS_SCTLR); + hsctlr |= HSCTLR_RES1; + /* Temporarily set the NS bit to access HSCTLR */ + write_scr(read_scr() | SCR_NS_BIT); + /* + * Make sure the write to SCR is complete so that + * we can access HSCTLR + */ + isb(); + write_hsctlr(hsctlr); + isb(); + + write_scr(read_scr() & ~SCR_NS_BIT); + isb(); + } else if ((read_id_pfr1() & + (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) != 0U) { + el2_unused = true; + + /* + * Set the NS bit to access NS copies of certain banked + * registers + */ + write_scr(read_scr() | SCR_NS_BIT); + isb(); + + /* + * Hyp / PL2 present but unused, need to disable safely. + * HSCTLR can be ignored in this case. + * + * Set HCR to its architectural reset value so that + * Non-secure operations do not trap to Hyp mode. + */ + write_hcr(HCR_RESET_VAL); + + /* + * Set HCPTR to its architectural reset value so that + * Non-secure access from EL1 or EL0 to trace and to + * Advanced SIMD and floating point functionality does + * not trap to Hyp mode. + */ + write_hcptr(HCPTR_RESET_VAL); + + /* + * Initialise CNTHCTL. All fields are architecturally + * UNKNOWN on reset and are set to zero except for + * field(s) listed below. + * + * CNTHCTL.PL1PCEN: Disable traps to Hyp mode of + * Non-secure EL0 and EL1 accessed to the physical + * timer registers. + * + * CNTHCTL.PL1PCTEN: Disable traps to Hyp mode of + * Non-secure EL0 and EL1 accessed to the physical + * counter registers. + */ + write_cnthctl(CNTHCTL_RESET_VAL | + PL1PCEN_BIT | PL1PCTEN_BIT); + + /* + * Initialise CNTVOFF to zero as it resets to an + * IMPLEMENTATION DEFINED value. + */ + write64_cntvoff(0); + + /* + * Set VPIDR and VMPIDR to match MIDR_EL1 and MPIDR + * respectively. + */ + write_vpidr(read_midr()); + write_vmpidr(read_mpidr()); + + /* + * Initialise VTTBR, setting all fields rather than + * relying on the hw. Some fields are architecturally + * UNKNOWN at reset. + * + * VTTBR.VMID: Set to zero which is the architecturally + * defined reset value. Even though EL1&0 stage 2 + * address translation is disabled, cache maintenance + * operations depend on the VMID. + * + * VTTBR.BADDR: Set to zero as EL1&0 stage 2 address + * translation is disabled. + */ + write64_vttbr(VTTBR_RESET_VAL & + ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) + | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); + + /* + * Initialise HDCR, setting all the fields rather than + * relying on hw. + * + * HDCR.HPMN: Set to value of PMCR.N which is the + * architecturally-defined reset value. + * + * HDCR.HLP: Set to one so that event counter + * overflow, that is recorded in PMOVSCLR[0-30], + * occurs on the increment that changes + * PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU is + * implemented. This bit is RES0 in versions of the + * architecture earlier than ARMv8.5, setting it to 1 + * doesn't have any effect on them. + * This bit is Reserved, UNK/SBZP in ARMv7. + * + * HDCR.HPME: Set to zero to disable EL2 Event + * counters. + */ +#if (ARM_ARCH_MAJOR > 7) + write_hdcr((HDCR_RESET_VAL | HDCR_HLP_BIT | + ((read_pmcr() & PMCR_N_BITS) >> + PMCR_N_SHIFT)) & ~HDCR_HPME_BIT); +#else + write_hdcr((HDCR_RESET_VAL | + ((read_pmcr() & PMCR_N_BITS) >> + PMCR_N_SHIFT)) & ~HDCR_HPME_BIT); +#endif + /* + * Set HSTR to its architectural reset value so that + * access to system registers in the cproc=1111 + * encoding space do not trap to Hyp mode. + */ + write_hstr(HSTR_RESET_VAL); + /* + * Set CNTHP_CTL to its architectural reset value to + * disable the EL2 physical timer and prevent timer + * interrupts. Some fields are architecturally UNKNOWN + * on reset and are set to zero. + */ + write_cnthp_ctl(CNTHP_CTL_RESET_VAL); + isb(); + + write_scr(read_scr() & ~SCR_NS_BIT); + isb(); + } + enable_extensions_nonsecure(el2_unused); + } +} + +/******************************************************************************* + * This function is used to exit to Non-secure world. It simply calls the + * cm_prepare_el3_exit function for AArch32. + ******************************************************************************/ +void cm_prepare_el3_exit_ns(void) +{ + cm_prepare_el3_exit(NON_SECURE); +} diff --git a/lib/el3_runtime/aarch32/cpu_data.S b/lib/el3_runtime/aarch32/cpu_data.S new file mode 100644 index 0000000..bdad2c1 --- /dev/null +++ b/lib/el3_runtime/aarch32/cpu_data.S @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <asm_macros.S> +#include <lib/el3_runtime/cpu_data.h> + + .globl _cpu_data + .globl _cpu_data_by_index + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data(void) + * + * Return the cpu_data structure for the current CPU. + * ----------------------------------------------------------------- + */ +func _cpu_data + /* r12 is pushed to meet the 8 byte stack alignment requirement */ + push {r12, lr} + bl plat_my_core_pos + pop {r12, lr} + b _cpu_data_by_index +endfunc _cpu_data + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index) + * + * Return the cpu_data structure for the CPU with given linear index + * + * This can be called without a valid stack. + * clobbers: r0, r1 + * ----------------------------------------------------------------- + */ +func _cpu_data_by_index + mov_imm r1, CPU_DATA_SIZE + mul r0, r0, r1 + ldr r1, =percpu_data + add r0, r0, r1 + bx lr +endfunc _cpu_data_by_index diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S new file mode 100644 index 0000000..6b88a90 --- /dev/null +++ b/lib/el3_runtime/aarch64/context.S @@ -0,0 +1,1136 @@ +/* + * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <assert_macros.S> +#include <context.h> +#include <el3_common_macros.S> + +#if CTX_INCLUDE_EL2_REGS + .global el2_sysregs_context_save_common + .global el2_sysregs_context_restore_common +#if ENABLE_SPE_FOR_LOWER_ELS + .global el2_sysregs_context_save_spe + .global el2_sysregs_context_restore_spe +#endif /* ENABLE_SPE_FOR_LOWER_ELS */ +#if CTX_INCLUDE_MTE_REGS + .global el2_sysregs_context_save_mte + .global el2_sysregs_context_restore_mte +#endif /* CTX_INCLUDE_MTE_REGS */ +#if ENABLE_MPAM_FOR_LOWER_ELS + .global el2_sysregs_context_save_mpam + .global el2_sysregs_context_restore_mpam +#endif /* ENABLE_MPAM_FOR_LOWER_ELS */ +#if ENABLE_FEAT_FGT + .global el2_sysregs_context_save_fgt + .global el2_sysregs_context_restore_fgt +#endif /* ENABLE_FEAT_FGT */ +#if ENABLE_FEAT_ECV + .global el2_sysregs_context_save_ecv + .global el2_sysregs_context_restore_ecv +#endif /* ENABLE_FEAT_ECV */ +#if ENABLE_FEAT_VHE + .global el2_sysregs_context_save_vhe + .global el2_sysregs_context_restore_vhe +#endif /* ENABLE_FEAT_VHE */ +#if RAS_EXTENSION + .global el2_sysregs_context_save_ras + .global el2_sysregs_context_restore_ras +#endif /* RAS_EXTENSION */ +#if CTX_INCLUDE_NEVE_REGS + .global el2_sysregs_context_save_nv2 + .global el2_sysregs_context_restore_nv2 +#endif /* CTX_INCLUDE_NEVE_REGS */ +#if ENABLE_TRF_FOR_NS + .global el2_sysregs_context_save_trf + .global el2_sysregs_context_restore_trf +#endif /* ENABLE_TRF_FOR_NS */ +#if ENABLE_FEAT_CSV2_2 + .global el2_sysregs_context_save_csv2 + .global el2_sysregs_context_restore_csv2 +#endif /* ENABLE_FEAT_CSV2_2 */ +#if ENABLE_FEAT_HCX + .global el2_sysregs_context_save_hcx + .global el2_sysregs_context_restore_hcx +#endif /* ENABLE_FEAT_HCX */ +#endif /* CTX_INCLUDE_EL2_REGS */ + + .global el1_sysregs_context_save + .global el1_sysregs_context_restore +#if CTX_INCLUDE_FPREGS + .global fpregs_context_save + .global fpregs_context_restore +#endif /* CTX_INCLUDE_FPREGS */ + .global prepare_el3_entry + .global restore_gp_pmcr_pauth_regs + .global save_and_update_ptw_el1_sys_regs + .global el3_exit + +#if CTX_INCLUDE_EL2_REGS + +/* ----------------------------------------------------- + * The following functions strictly follow the AArch64 + * PCS to use x9-x16 (temporary caller-saved registers) + * to save/restore EL2 system register context. + * el2_sysregs_context_save/restore_common functions + * save and restore registers that are common to all + * configurations. The rest of the functions save and + * restore EL2 system registers that are present when a + * particular feature is enabled. All functions assume + * that 'x0' is pointing to a 'el2_sys_regs' structure + * where the register context will be saved/restored. + * + * The following registers are not added. + * AMEVCNTVOFF0<n>_EL2 + * AMEVCNTVOFF1<n>_EL2 + * ICH_AP0R<n>_EL2 + * ICH_AP1R<n>_EL2 + * ICH_LR<n>_EL2 + * ----------------------------------------------------- + */ +func el2_sysregs_context_save_common + mrs x9, actlr_el2 + mrs x10, afsr0_el2 + stp x9, x10, [x0, #CTX_ACTLR_EL2] + + mrs x11, afsr1_el2 + mrs x12, amair_el2 + stp x11, x12, [x0, #CTX_AFSR1_EL2] + + mrs x13, cnthctl_el2 + mrs x14, cntvoff_el2 + stp x13, x14, [x0, #CTX_CNTHCTL_EL2] + + mrs x15, cptr_el2 + str x15, [x0, #CTX_CPTR_EL2] + +#if CTX_INCLUDE_AARCH32_REGS + mrs x16, dbgvcr32_el2 + str x16, [x0, #CTX_DBGVCR32_EL2] +#endif /* CTX_INCLUDE_AARCH32_REGS */ + + mrs x9, elr_el2 + mrs x10, esr_el2 + stp x9, x10, [x0, #CTX_ELR_EL2] + + mrs x11, far_el2 + mrs x12, hacr_el2 + stp x11, x12, [x0, #CTX_FAR_EL2] + + mrs x13, hcr_el2 + mrs x14, hpfar_el2 + stp x13, x14, [x0, #CTX_HCR_EL2] + + mrs x15, hstr_el2 + mrs x16, ICC_SRE_EL2 + stp x15, x16, [x0, #CTX_HSTR_EL2] + + mrs x9, ICH_HCR_EL2 + mrs x10, ICH_VMCR_EL2 + stp x9, x10, [x0, #CTX_ICH_HCR_EL2] + + mrs x11, mair_el2 + mrs x12, mdcr_el2 + stp x11, x12, [x0, #CTX_MAIR_EL2] + + mrs x14, sctlr_el2 + str x14, [x0, #CTX_SCTLR_EL2] + + mrs x15, spsr_el2 + mrs x16, sp_el2 + stp x15, x16, [x0, #CTX_SPSR_EL2] + + mrs x9, tcr_el2 + mrs x10, tpidr_el2 + stp x9, x10, [x0, #CTX_TCR_EL2] + + mrs x11, ttbr0_el2 + mrs x12, vbar_el2 + stp x11, x12, [x0, #CTX_TTBR0_EL2] + + mrs x13, vmpidr_el2 + mrs x14, vpidr_el2 + stp x13, x14, [x0, #CTX_VMPIDR_EL2] + + mrs x15, vtcr_el2 + mrs x16, vttbr_el2 + stp x15, x16, [x0, #CTX_VTCR_EL2] + ret +endfunc el2_sysregs_context_save_common + +func el2_sysregs_context_restore_common + ldp x9, x10, [x0, #CTX_ACTLR_EL2] + msr actlr_el2, x9 + msr afsr0_el2, x10 + + ldp x11, x12, [x0, #CTX_AFSR1_EL2] + msr afsr1_el2, x11 + msr amair_el2, x12 + + ldp x13, x14, [x0, #CTX_CNTHCTL_EL2] + msr cnthctl_el2, x13 + msr cntvoff_el2, x14 + + ldr x15, [x0, #CTX_CPTR_EL2] + msr cptr_el2, x15 + +#if CTX_INCLUDE_AARCH32_REGS + ldr x16, [x0, #CTX_DBGVCR32_EL2] + msr dbgvcr32_el2, x16 +#endif /* CTX_INCLUDE_AARCH32_REGS */ + + ldp x9, x10, [x0, #CTX_ELR_EL2] + msr elr_el2, x9 + msr esr_el2, x10 + + ldp x11, x12, [x0, #CTX_FAR_EL2] + msr far_el2, x11 + msr hacr_el2, x12 + + ldp x13, x14, [x0, #CTX_HCR_EL2] + msr hcr_el2, x13 + msr hpfar_el2, x14 + + ldp x15, x16, [x0, #CTX_HSTR_EL2] + msr hstr_el2, x15 + msr ICC_SRE_EL2, x16 + + ldp x9, x10, [x0, #CTX_ICH_HCR_EL2] + msr ICH_HCR_EL2, x9 + msr ICH_VMCR_EL2, x10 + + ldp x11, x12, [x0, #CTX_MAIR_EL2] + msr mair_el2, x11 + msr mdcr_el2, x12 + + ldr x14, [x0, #CTX_SCTLR_EL2] + msr sctlr_el2, x14 + + ldp x15, x16, [x0, #CTX_SPSR_EL2] + msr spsr_el2, x15 + msr sp_el2, x16 + + ldp x9, x10, [x0, #CTX_TCR_EL2] + msr tcr_el2, x9 + msr tpidr_el2, x10 + + ldp x11, x12, [x0, #CTX_TTBR0_EL2] + msr ttbr0_el2, x11 + msr vbar_el2, x12 + + ldp x13, x14, [x0, #CTX_VMPIDR_EL2] + msr vmpidr_el2, x13 + msr vpidr_el2, x14 + + ldp x15, x16, [x0, #CTX_VTCR_EL2] + msr vtcr_el2, x15 + msr vttbr_el2, x16 + ret +endfunc el2_sysregs_context_restore_common + +#if ENABLE_SPE_FOR_LOWER_ELS +func el2_sysregs_context_save_spe + mrs x13, PMSCR_EL2 + str x13, [x0, #CTX_PMSCR_EL2] + ret +endfunc el2_sysregs_context_save_spe + +func el2_sysregs_context_restore_spe + ldr x13, [x0, #CTX_PMSCR_EL2] + msr PMSCR_EL2, x13 + ret +endfunc el2_sysregs_context_restore_spe +#endif /* ENABLE_SPE_FOR_LOWER_ELS */ + +#if CTX_INCLUDE_MTE_REGS +func el2_sysregs_context_save_mte + mrs x9, TFSR_EL2 + str x9, [x0, #CTX_TFSR_EL2] + ret +endfunc el2_sysregs_context_save_mte + +func el2_sysregs_context_restore_mte + ldr x9, [x0, #CTX_TFSR_EL2] + msr TFSR_EL2, x9 + ret +endfunc el2_sysregs_context_restore_mte +#endif /* CTX_INCLUDE_MTE_REGS */ + +#if ENABLE_MPAM_FOR_LOWER_ELS +func el2_sysregs_context_save_mpam + mrs x10, MPAM2_EL2 + str x10, [x0, #CTX_MPAM2_EL2] + + mrs x11, MPAMHCR_EL2 + mrs x12, MPAMVPM0_EL2 + stp x11, x12, [x0, #CTX_MPAMHCR_EL2] + + mrs x13, MPAMVPM1_EL2 + mrs x14, MPAMVPM2_EL2 + stp x13, x14, [x0, #CTX_MPAMVPM1_EL2] + + mrs x15, MPAMVPM3_EL2 + mrs x16, MPAMVPM4_EL2 + stp x15, x16, [x0, #CTX_MPAMVPM3_EL2] + + mrs x9, MPAMVPM5_EL2 + mrs x10, MPAMVPM6_EL2 + stp x9, x10, [x0, #CTX_MPAMVPM5_EL2] + + mrs x11, MPAMVPM7_EL2 + mrs x12, MPAMVPMV_EL2 + stp x11, x12, [x0, #CTX_MPAMVPM7_EL2] + ret +endfunc func el2_sysregs_context_save_mpam + +func el2_sysregs_context_restore_mpam + ldr x10, [x0, #CTX_MPAM2_EL2] + msr MPAM2_EL2, x10 + + ldp x11, x12, [x0, #CTX_MPAMHCR_EL2] + msr MPAMHCR_EL2, x11 + msr MPAMVPM0_EL2, x12 + + ldp x13, x14, [x0, #CTX_MPAMVPM1_EL2] + msr MPAMVPM1_EL2, x13 + msr MPAMVPM2_EL2, x14 + + ldp x15, x16, [x0, #CTX_MPAMVPM3_EL2] + msr MPAMVPM3_EL2, x15 + msr MPAMVPM4_EL2, x16 + + ldp x9, x10, [x0, #CTX_MPAMVPM5_EL2] + msr MPAMVPM5_EL2, x9 + msr MPAMVPM6_EL2, x10 + + ldp x11, x12, [x0, #CTX_MPAMVPM7_EL2] + msr MPAMVPM7_EL2, x11 + msr MPAMVPMV_EL2, x12 + ret +endfunc el2_sysregs_context_restore_mpam +#endif /* ENABLE_MPAM_FOR_LOWER_ELS */ + +#if ENABLE_FEAT_FGT +func el2_sysregs_context_save_fgt + mrs x13, HDFGRTR_EL2 +#if ENABLE_FEAT_AMUv1 + mrs x14, HAFGRTR_EL2 + stp x13, x14, [x0, #CTX_HDFGRTR_EL2] +#else + str x13, [x0, #CTX_HDFGRTR_EL2] +#endif /* ENABLE_FEAT_AMUv1 */ + mrs x15, HDFGWTR_EL2 + mrs x16, HFGITR_EL2 + stp x15, x16, [x0, #CTX_HDFGWTR_EL2] + + mrs x9, HFGRTR_EL2 + mrs x10, HFGWTR_EL2 + stp x9, x10, [x0, #CTX_HFGRTR_EL2] + ret +endfunc el2_sysregs_context_save_fgt + +func el2_sysregs_context_restore_fgt + #if ENABLE_FEAT_AMUv1 + ldp x13, x14, [x0, #CTX_HDFGRTR_EL2] + msr HAFGRTR_EL2, x14 +#else + ldr x13, [x0, #CTX_HDFGRTR_EL2] +#endif /* ENABLE_FEAT_AMUv1 */ + msr HDFGRTR_EL2, x13 + + ldp x15, x16, [x0, #CTX_HDFGWTR_EL2] + msr HDFGWTR_EL2, x15 + msr HFGITR_EL2, x16 + + ldp x9, x10, [x0, #CTX_HFGRTR_EL2] + msr HFGRTR_EL2, x9 + msr HFGWTR_EL2, x10 + ret +endfunc el2_sysregs_context_restore_fgt +#endif /* ENABLE_FEAT_FGT */ + +#if ENABLE_FEAT_ECV +func el2_sysregs_context_save_ecv + mrs x11, CNTPOFF_EL2 + str x11, [x0, #CTX_CNTPOFF_EL2] + ret +endfunc el2_sysregs_context_save_ecv + +func el2_sysregs_context_restore_ecv + ldr x11, [x0, #CTX_CNTPOFF_EL2] + msr CNTPOFF_EL2, x11 + ret +endfunc el2_sysregs_context_restore_ecv +#endif /* ENABLE_FEAT_ECV */ + +#if ENABLE_FEAT_VHE +func el2_sysregs_context_save_vhe + /* + * CONTEXTIDR_EL2 register is saved only when FEAT_VHE or + * FEAT_Debugv8p2 (currently not in TF-A) is supported. + */ + mrs x9, contextidr_el2 + mrs x10, ttbr1_el2 + stp x9, x10, [x0, #CTX_CONTEXTIDR_EL2] + ret +endfunc el2_sysregs_context_save_vhe + +func el2_sysregs_context_restore_vhe + /* + * CONTEXTIDR_EL2 register is restored only when FEAT_VHE or + * FEAT_Debugv8p2 (currently not in TF-A) is supported. + */ + ldp x9, x10, [x0, #CTX_CONTEXTIDR_EL2] + msr contextidr_el2, x9 + msr ttbr1_el2, x10 + ret +endfunc el2_sysregs_context_restore_vhe +#endif /* ENABLE_FEAT_VHE */ + +#if RAS_EXTENSION +func el2_sysregs_context_save_ras + /* + * VDISR_EL2 and VSESR_EL2 registers are saved only when + * FEAT_RAS is supported. + */ + mrs x11, vdisr_el2 + mrs x12, vsesr_el2 + stp x11, x12, [x0, #CTX_VDISR_EL2] + ret +endfunc el2_sysregs_context_save_ras + +func el2_sysregs_context_restore_ras + /* + * VDISR_EL2 and VSESR_EL2 registers are restored only when FEAT_RAS + * is supported. + */ + ldp x11, x12, [x0, #CTX_VDISR_EL2] + msr vdisr_el2, x11 + msr vsesr_el2, x12 + ret +endfunc el2_sysregs_context_restore_ras +#endif /* RAS_EXTENSION */ + +#if CTX_INCLUDE_NEVE_REGS +func el2_sysregs_context_save_nv2 + /* + * VNCR_EL2 register is saved only when FEAT_NV2 is supported. + */ + mrs x16, vncr_el2 + str x16, [x0, #CTX_VNCR_EL2] + ret +endfunc el2_sysregs_context_save_nv2 + +func el2_sysregs_context_restore_nv2 + /* + * VNCR_EL2 register is restored only when FEAT_NV2 is supported. + */ + ldr x16, [x0, #CTX_VNCR_EL2] + msr vncr_el2, x16 + ret +endfunc el2_sysregs_context_restore_nv2 +#endif /* CTX_INCLUDE_NEVE_REGS */ + +#if ENABLE_TRF_FOR_NS +func el2_sysregs_context_save_trf + /* + * TRFCR_EL2 register is saved only when FEAT_TRF is supported. + */ + mrs x12, TRFCR_EL2 + str x12, [x0, #CTX_TRFCR_EL2] + ret +endfunc el2_sysregs_context_save_trf + +func el2_sysregs_context_restore_trf + /* + * TRFCR_EL2 register is restored only when FEAT_TRF is supported. + */ + ldr x12, [x0, #CTX_TRFCR_EL2] + msr TRFCR_EL2, x12 + ret +endfunc el2_sysregs_context_restore_trf +#endif /* ENABLE_TRF_FOR_NS */ + +#if ENABLE_FEAT_CSV2_2 +func el2_sysregs_context_save_csv2 + /* + * SCXTNUM_EL2 register is saved only when FEAT_CSV2_2 is supported. + */ + mrs x13, scxtnum_el2 + str x13, [x0, #CTX_SCXTNUM_EL2] + ret +endfunc el2_sysregs_context_save_csv2 + +func el2_sysregs_context_restore_csv2 + /* + * SCXTNUM_EL2 register is restored only when FEAT_CSV2_2 is supported. + */ + ldr x13, [x0, #CTX_SCXTNUM_EL2] + msr scxtnum_el2, x13 + ret +endfunc el2_sysregs_context_restore_csv2 +#endif /* ENABLE_FEAT_CSV2_2 */ + +#if ENABLE_FEAT_HCX +func el2_sysregs_context_save_hcx + mrs x14, hcrx_el2 + str x14, [x0, #CTX_HCRX_EL2] + ret +endfunc el2_sysregs_context_save_hcx + +func el2_sysregs_context_restore_hcx + ldr x14, [x0, #CTX_HCRX_EL2] + msr hcrx_el2, x14 + ret +endfunc el2_sysregs_context_restore_hcx +#endif /* ENABLE_FEAT_HCX */ +#endif /* CTX_INCLUDE_EL2_REGS */ + +/* ------------------------------------------------------------------ + * The following function strictly follows the AArch64 PCS to use + * x9-x17 (temporary caller-saved registers) to save EL1 system + * register context. It assumes that 'x0' is pointing to a + * 'el1_sys_regs' structure where the register context will be saved. + * ------------------------------------------------------------------ + */ +func el1_sysregs_context_save + + mrs x9, spsr_el1 + mrs x10, elr_el1 + stp x9, x10, [x0, #CTX_SPSR_EL1] + +#if !ERRATA_SPECULATIVE_AT + mrs x15, sctlr_el1 + mrs x16, tcr_el1 + stp x15, x16, [x0, #CTX_SCTLR_EL1] +#endif /* ERRATA_SPECULATIVE_AT */ + + mrs x17, cpacr_el1 + mrs x9, csselr_el1 + stp x17, x9, [x0, #CTX_CPACR_EL1] + + mrs x10, sp_el1 + mrs x11, esr_el1 + stp x10, x11, [x0, #CTX_SP_EL1] + + mrs x12, ttbr0_el1 + mrs x13, ttbr1_el1 + stp x12, x13, [x0, #CTX_TTBR0_EL1] + + mrs x14, mair_el1 + mrs x15, amair_el1 + stp x14, x15, [x0, #CTX_MAIR_EL1] + + mrs x16, actlr_el1 + mrs x17, tpidr_el1 + stp x16, x17, [x0, #CTX_ACTLR_EL1] + + mrs x9, tpidr_el0 + mrs x10, tpidrro_el0 + stp x9, x10, [x0, #CTX_TPIDR_EL0] + + mrs x13, par_el1 + mrs x14, far_el1 + stp x13, x14, [x0, #CTX_PAR_EL1] + + mrs x15, afsr0_el1 + mrs x16, afsr1_el1 + stp x15, x16, [x0, #CTX_AFSR0_EL1] + + mrs x17, contextidr_el1 + mrs x9, vbar_el1 + stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] + + /* Save AArch32 system registers if the build has instructed so */ +#if CTX_INCLUDE_AARCH32_REGS + mrs x11, spsr_abt + mrs x12, spsr_und + stp x11, x12, [x0, #CTX_SPSR_ABT] + + mrs x13, spsr_irq + mrs x14, spsr_fiq + stp x13, x14, [x0, #CTX_SPSR_IRQ] + + mrs x15, dacr32_el2 + mrs x16, ifsr32_el2 + stp x15, x16, [x0, #CTX_DACR32_EL2] +#endif /* CTX_INCLUDE_AARCH32_REGS */ + + /* Save NS timer registers if the build has instructed so */ +#if NS_TIMER_SWITCH + mrs x10, cntp_ctl_el0 + mrs x11, cntp_cval_el0 + stp x10, x11, [x0, #CTX_CNTP_CTL_EL0] + + mrs x12, cntv_ctl_el0 + mrs x13, cntv_cval_el0 + stp x12, x13, [x0, #CTX_CNTV_CTL_EL0] + + mrs x14, cntkctl_el1 + str x14, [x0, #CTX_CNTKCTL_EL1] +#endif /* NS_TIMER_SWITCH */ + + /* Save MTE system registers if the build has instructed so */ +#if CTX_INCLUDE_MTE_REGS + mrs x15, TFSRE0_EL1 + mrs x16, TFSR_EL1 + stp x15, x16, [x0, #CTX_TFSRE0_EL1] + + mrs x9, RGSR_EL1 + mrs x10, GCR_EL1 + stp x9, x10, [x0, #CTX_RGSR_EL1] +#endif /* CTX_INCLUDE_MTE_REGS */ + + ret +endfunc el1_sysregs_context_save + +/* ------------------------------------------------------------------ + * The following function strictly follows the AArch64 PCS to use + * x9-x17 (temporary caller-saved registers) to restore EL1 system + * register context. It assumes that 'x0' is pointing to a + * 'el1_sys_regs' structure from where the register context will be + * restored + * ------------------------------------------------------------------ + */ +func el1_sysregs_context_restore + + ldp x9, x10, [x0, #CTX_SPSR_EL1] + msr spsr_el1, x9 + msr elr_el1, x10 + +#if !ERRATA_SPECULATIVE_AT + ldp x15, x16, [x0, #CTX_SCTLR_EL1] + msr sctlr_el1, x15 + msr tcr_el1, x16 +#endif /* ERRATA_SPECULATIVE_AT */ + + ldp x17, x9, [x0, #CTX_CPACR_EL1] + msr cpacr_el1, x17 + msr csselr_el1, x9 + + ldp x10, x11, [x0, #CTX_SP_EL1] + msr sp_el1, x10 + msr esr_el1, x11 + + ldp x12, x13, [x0, #CTX_TTBR0_EL1] + msr ttbr0_el1, x12 + msr ttbr1_el1, x13 + + ldp x14, x15, [x0, #CTX_MAIR_EL1] + msr mair_el1, x14 + msr amair_el1, x15 + + ldp x16, x17, [x0, #CTX_ACTLR_EL1] + msr actlr_el1, x16 + msr tpidr_el1, x17 + + ldp x9, x10, [x0, #CTX_TPIDR_EL0] + msr tpidr_el0, x9 + msr tpidrro_el0, x10 + + ldp x13, x14, [x0, #CTX_PAR_EL1] + msr par_el1, x13 + msr far_el1, x14 + + ldp x15, x16, [x0, #CTX_AFSR0_EL1] + msr afsr0_el1, x15 + msr afsr1_el1, x16 + + ldp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] + msr contextidr_el1, x17 + msr vbar_el1, x9 + + /* Restore AArch32 system registers if the build has instructed so */ +#if CTX_INCLUDE_AARCH32_REGS + ldp x11, x12, [x0, #CTX_SPSR_ABT] + msr spsr_abt, x11 + msr spsr_und, x12 + + ldp x13, x14, [x0, #CTX_SPSR_IRQ] + msr spsr_irq, x13 + msr spsr_fiq, x14 + + ldp x15, x16, [x0, #CTX_DACR32_EL2] + msr dacr32_el2, x15 + msr ifsr32_el2, x16 +#endif /* CTX_INCLUDE_AARCH32_REGS */ + + /* Restore NS timer registers if the build has instructed so */ +#if NS_TIMER_SWITCH + ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0] + msr cntp_ctl_el0, x10 + msr cntp_cval_el0, x11 + + ldp x12, x13, [x0, #CTX_CNTV_CTL_EL0] + msr cntv_ctl_el0, x12 + msr cntv_cval_el0, x13 + + ldr x14, [x0, #CTX_CNTKCTL_EL1] + msr cntkctl_el1, x14 +#endif /* NS_TIMER_SWITCH */ + + /* Restore MTE system registers if the build has instructed so */ +#if CTX_INCLUDE_MTE_REGS + ldp x11, x12, [x0, #CTX_TFSRE0_EL1] + msr TFSRE0_EL1, x11 + msr TFSR_EL1, x12 + + ldp x13, x14, [x0, #CTX_RGSR_EL1] + msr RGSR_EL1, x13 + msr GCR_EL1, x14 +#endif /* CTX_INCLUDE_MTE_REGS */ + + /* No explict ISB required here as ERET covers it */ + ret +endfunc el1_sysregs_context_restore + +/* ------------------------------------------------------------------ + * The following function follows the aapcs_64 strictly to use + * x9-x17 (temporary caller-saved registers according to AArch64 PCS) + * to save floating point register context. It assumes that 'x0' is + * pointing to a 'fp_regs' structure where the register context will + * be saved. + * + * Access to VFP registers will trap if CPTR_EL3.TFP is set. + * However currently we don't use VFP registers nor set traps in + * Trusted Firmware, and assume it's cleared. + * + * TODO: Revisit when VFP is used in secure world + * ------------------------------------------------------------------ + */ +#if CTX_INCLUDE_FPREGS +func fpregs_context_save + stp q0, q1, [x0, #CTX_FP_Q0] + stp q2, q3, [x0, #CTX_FP_Q2] + stp q4, q5, [x0, #CTX_FP_Q4] + stp q6, q7, [x0, #CTX_FP_Q6] + stp q8, q9, [x0, #CTX_FP_Q8] + stp q10, q11, [x0, #CTX_FP_Q10] + stp q12, q13, [x0, #CTX_FP_Q12] + stp q14, q15, [x0, #CTX_FP_Q14] + stp q16, q17, [x0, #CTX_FP_Q16] + stp q18, q19, [x0, #CTX_FP_Q18] + stp q20, q21, [x0, #CTX_FP_Q20] + stp q22, q23, [x0, #CTX_FP_Q22] + stp q24, q25, [x0, #CTX_FP_Q24] + stp q26, q27, [x0, #CTX_FP_Q26] + stp q28, q29, [x0, #CTX_FP_Q28] + stp q30, q31, [x0, #CTX_FP_Q30] + + mrs x9, fpsr + str x9, [x0, #CTX_FP_FPSR] + + mrs x10, fpcr + str x10, [x0, #CTX_FP_FPCR] + +#if CTX_INCLUDE_AARCH32_REGS + mrs x11, fpexc32_el2 + str x11, [x0, #CTX_FP_FPEXC32_EL2] +#endif /* CTX_INCLUDE_AARCH32_REGS */ + ret +endfunc fpregs_context_save + +/* ------------------------------------------------------------------ + * The following function follows the aapcs_64 strictly to use x9-x17 + * (temporary caller-saved registers according to AArch64 PCS) to + * restore floating point register context. It assumes that 'x0' is + * pointing to a 'fp_regs' structure from where the register context + * will be restored. + * + * Access to VFP registers will trap if CPTR_EL3.TFP is set. + * However currently we don't use VFP registers nor set traps in + * Trusted Firmware, and assume it's cleared. + * + * TODO: Revisit when VFP is used in secure world + * ------------------------------------------------------------------ + */ +func fpregs_context_restore + ldp q0, q1, [x0, #CTX_FP_Q0] + ldp q2, q3, [x0, #CTX_FP_Q2] + ldp q4, q5, [x0, #CTX_FP_Q4] + ldp q6, q7, [x0, #CTX_FP_Q6] + ldp q8, q9, [x0, #CTX_FP_Q8] + ldp q10, q11, [x0, #CTX_FP_Q10] + ldp q12, q13, [x0, #CTX_FP_Q12] + ldp q14, q15, [x0, #CTX_FP_Q14] + ldp q16, q17, [x0, #CTX_FP_Q16] + ldp q18, q19, [x0, #CTX_FP_Q18] + ldp q20, q21, [x0, #CTX_FP_Q20] + ldp q22, q23, [x0, #CTX_FP_Q22] + ldp q24, q25, [x0, #CTX_FP_Q24] + ldp q26, q27, [x0, #CTX_FP_Q26] + ldp q28, q29, [x0, #CTX_FP_Q28] + ldp q30, q31, [x0, #CTX_FP_Q30] + + ldr x9, [x0, #CTX_FP_FPSR] + msr fpsr, x9 + + ldr x10, [x0, #CTX_FP_FPCR] + msr fpcr, x10 + +#if CTX_INCLUDE_AARCH32_REGS + ldr x11, [x0, #CTX_FP_FPEXC32_EL2] + msr fpexc32_el2, x11 +#endif /* CTX_INCLUDE_AARCH32_REGS */ + + /* + * No explict ISB required here as ERET to + * switch to secure EL1 or non-secure world + * covers it + */ + + ret +endfunc fpregs_context_restore +#endif /* CTX_INCLUDE_FPREGS */ + + /* + * Set the PSTATE bits not set when the exception was taken as + * described in the AArch64.TakeException() pseudocode function + * in ARM DDI 0487F.c page J1-7635 to a default value. + */ + .macro set_unset_pstate_bits + /* + * If Data Independent Timing (DIT) functionality is implemented, + * always enable DIT in EL3 + */ +#if ENABLE_FEAT_DIT + mov x8, #DIT_BIT + msr DIT, x8 +#endif /* ENABLE_FEAT_DIT */ + .endm /* set_unset_pstate_bits */ + +/* ------------------------------------------------------------------ + * The following macro is used to save and restore all the general + * purpose and ARMv8.3-PAuth (if enabled) registers. + * It also checks if the Secure Cycle Counter (PMCCNTR_EL0) + * is disabled in EL3/Secure (ARMv8.5-PMU), wherein PMCCNTR_EL0 + * needs not to be saved/restored during world switch. + * + * Ideally we would only save and restore the callee saved registers + * when a world switch occurs but that type of implementation is more + * complex. So currently we will always save and restore these + * registers on entry and exit of EL3. + * clobbers: x18 + * ------------------------------------------------------------------ + */ + .macro save_gp_pmcr_pauth_regs + stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + mrs x18, sp_el0 + str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] + + /* ---------------------------------------------------------- + * Check if earlier initialization of MDCR_EL3.SCCD/MCCD to 1 + * has failed. + * + * MDCR_EL3: + * MCCD bit set, Prohibits the Cycle Counter PMCCNTR_EL0 from + * counting at EL3. + * SCCD bit set, Secure Cycle Counter Disable. Prohibits PMCCNTR_EL0 + * from counting in Secure state. + * If these bits are not set, meaning that FEAT_PMUv3p5/7 is + * not implemented and PMCR_EL0 should be saved in non-secure + * context. + * ---------------------------------------------------------- + */ + mov_imm x10, (MDCR_SCCD_BIT | MDCR_MCCD_BIT) + mrs x9, mdcr_el3 + tst x9, x10 + bne 1f + + /* ---------------------------------------------------------- + * If control reaches here, it ensures the Secure Cycle + * Counter (PMCCNTR_EL0) is not prohibited from counting at + * EL3 and in secure states. + * Henceforth, PMCR_EL0 to be saved before world switch. + * ---------------------------------------------------------- + */ + mrs x9, pmcr_el0 + + /* Check caller's security state */ + mrs x10, scr_el3 + tst x10, #SCR_NS_BIT + beq 2f + + /* Save PMCR_EL0 if called from Non-secure state */ + str x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0] + + /* Disable cycle counter when event counting is prohibited */ +2: orr x9, x9, #PMCR_EL0_DP_BIT + msr pmcr_el0, x9 + isb +1: +#if CTX_INCLUDE_PAUTH_REGS + /* ---------------------------------------------------------- + * Save the ARMv8.3-PAuth keys as they are not banked + * by exception level + * ---------------------------------------------------------- + */ + add x19, sp, #CTX_PAUTH_REGS_OFFSET + + mrs x20, APIAKeyLo_EL1 /* x21:x20 = APIAKey */ + mrs x21, APIAKeyHi_EL1 + mrs x22, APIBKeyLo_EL1 /* x23:x22 = APIBKey */ + mrs x23, APIBKeyHi_EL1 + mrs x24, APDAKeyLo_EL1 /* x25:x24 = APDAKey */ + mrs x25, APDAKeyHi_EL1 + mrs x26, APDBKeyLo_EL1 /* x27:x26 = APDBKey */ + mrs x27, APDBKeyHi_EL1 + mrs x28, APGAKeyLo_EL1 /* x29:x28 = APGAKey */ + mrs x29, APGAKeyHi_EL1 + + stp x20, x21, [x19, #CTX_PACIAKEY_LO] + stp x22, x23, [x19, #CTX_PACIBKEY_LO] + stp x24, x25, [x19, #CTX_PACDAKEY_LO] + stp x26, x27, [x19, #CTX_PACDBKEY_LO] + stp x28, x29, [x19, #CTX_PACGAKEY_LO] +#endif /* CTX_INCLUDE_PAUTH_REGS */ + .endm /* save_gp_pmcr_pauth_regs */ + +/* ----------------------------------------------------------------- + * This function saves the context and sets the PSTATE to a known + * state, preparing entry to el3. + * Save all the general purpose and ARMv8.3-PAuth (if enabled) + * registers. + * Then set any of the PSTATE bits that are not set by hardware + * according to the Aarch64.TakeException pseudocode in the Arm + * Architecture Reference Manual to a default value for EL3. + * clobbers: x17 + * ----------------------------------------------------------------- + */ +func prepare_el3_entry + save_gp_pmcr_pauth_regs + /* + * Set the PSTATE bits not described in the Aarch64.TakeException + * pseudocode to their default values. + */ + set_unset_pstate_bits + ret +endfunc prepare_el3_entry + +/* ------------------------------------------------------------------ + * This function restores ARMv8.3-PAuth (if enabled) and all general + * purpose registers except x30 from the CPU context. + * x30 register must be explicitly restored by the caller. + * ------------------------------------------------------------------ + */ +func restore_gp_pmcr_pauth_regs +#if CTX_INCLUDE_PAUTH_REGS + /* Restore the ARMv8.3 PAuth keys */ + add x10, sp, #CTX_PAUTH_REGS_OFFSET + + ldp x0, x1, [x10, #CTX_PACIAKEY_LO] /* x1:x0 = APIAKey */ + ldp x2, x3, [x10, #CTX_PACIBKEY_LO] /* x3:x2 = APIBKey */ + ldp x4, x5, [x10, #CTX_PACDAKEY_LO] /* x5:x4 = APDAKey */ + ldp x6, x7, [x10, #CTX_PACDBKEY_LO] /* x7:x6 = APDBKey */ + ldp x8, x9, [x10, #CTX_PACGAKEY_LO] /* x9:x8 = APGAKey */ + + msr APIAKeyLo_EL1, x0 + msr APIAKeyHi_EL1, x1 + msr APIBKeyLo_EL1, x2 + msr APIBKeyHi_EL1, x3 + msr APDAKeyLo_EL1, x4 + msr APDAKeyHi_EL1, x5 + msr APDBKeyLo_EL1, x6 + msr APDBKeyHi_EL1, x7 + msr APGAKeyLo_EL1, x8 + msr APGAKeyHi_EL1, x9 +#endif /* CTX_INCLUDE_PAUTH_REGS */ + + /* ---------------------------------------------------------- + * Restore PMCR_EL0 when returning to Non-secure state if + * Secure Cycle Counter is not disabled in MDCR_EL3 when + * ARMv8.5-PMU is implemented. + * ---------------------------------------------------------- + */ + mrs x0, scr_el3 + tst x0, #SCR_NS_BIT + beq 2f + + /* ---------------------------------------------------------- + * Back to Non-secure state. + * Check if earlier initialization MDCR_EL3.SCCD/MCCD to 1 + * failed, meaning that FEAT_PMUv3p5/7 is not implemented and + * PMCR_EL0 should be restored from non-secure context. + * ---------------------------------------------------------- + */ + mov_imm x1, (MDCR_SCCD_BIT | MDCR_MCCD_BIT) + mrs x0, mdcr_el3 + tst x0, x1 + bne 2f + ldr x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0] + msr pmcr_el0, x0 +2: + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + ldr x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] + msr sp_el0, x28 + ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + ret +endfunc restore_gp_pmcr_pauth_regs + +/* + * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1 + * registers and update EL1 registers to disable stage1 and stage2 + * page table walk + */ +func save_and_update_ptw_el1_sys_regs + /* ---------------------------------------------------------- + * Save only sctlr_el1 and tcr_el1 registers + * ---------------------------------------------------------- + */ + mrs x29, sctlr_el1 + str x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1)] + mrs x29, tcr_el1 + str x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_TCR_EL1)] + + /* ------------------------------------------------------------ + * Must follow below order in order to disable page table + * walk for lower ELs (EL1 and EL0). First step ensures that + * page table walk is disabled for stage1 and second step + * ensures that page table walker should use TCR_EL1.EPDx + * bits to perform address translation. ISB ensures that CPU + * does these 2 steps in order. + * + * 1. Update TCR_EL1.EPDx bits to disable page table walk by + * stage1. + * 2. Enable MMU bit to avoid identity mapping via stage2 + * and force TCR_EL1.EPDx to be used by the page table + * walker. + * ------------------------------------------------------------ + */ + orr x29, x29, #(TCR_EPD0_BIT) + orr x29, x29, #(TCR_EPD1_BIT) + msr tcr_el1, x29 + isb + mrs x29, sctlr_el1 + orr x29, x29, #SCTLR_M_BIT + msr sctlr_el1, x29 + isb + + ret +endfunc save_and_update_ptw_el1_sys_regs + +/* ------------------------------------------------------------------ + * This routine assumes that the SP_EL3 is pointing to a valid + * context structure from where the gp regs and other special + * registers can be retrieved. + * ------------------------------------------------------------------ + */ +func el3_exit +#if ENABLE_ASSERTIONS + /* el3_exit assumes SP_EL0 on entry */ + mrs x17, spsel + cmp x17, #MODE_SP_EL0 + ASM_ASSERT(eq) +#endif /* ENABLE_ASSERTIONS */ + + /* ---------------------------------------------------------- + * Save the current SP_EL0 i.e. the EL3 runtime stack which + * will be used for handling the next SMC. + * Then switch to SP_EL3. + * ---------------------------------------------------------- + */ + mov x17, sp + msr spsel, #MODE_SP_ELX + str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] + + /* ---------------------------------------------------------- + * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET + * ---------------------------------------------------------- + */ + ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] + ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] + msr scr_el3, x18 + msr spsr_el3, x16 + msr elr_el3, x17 + +#if IMAGE_BL31 + /* ---------------------------------------------------------- + * Restore CPTR_EL3. + * ZCR is only restored if SVE is supported and enabled. + * Synchronization is required before zcr_el3 is addressed. + * ---------------------------------------------------------- + */ + ldp x19, x20, [sp, #CTX_EL3STATE_OFFSET + CTX_CPTR_EL3] + msr cptr_el3, x19 + + ands x19, x19, #CPTR_EZ_BIT + beq sve_not_enabled + + isb + msr S3_6_C1_C2_0, x20 /* zcr_el3 */ +sve_not_enabled: +#endif /* IMAGE_BL31 */ + +#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 + /* ---------------------------------------------------------- + * Restore mitigation state as it was on entry to EL3 + * ---------------------------------------------------------- + */ + ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE] + cbz x17, 1f + blr x17 +1: +#endif /* IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 */ + + restore_ptw_el1_sys_regs + + /* ---------------------------------------------------------- + * Restore general purpose (including x30), PMCR_EL0 and + * ARMv8.3-PAuth registers. + * Exit EL3 via ERET to a lower exception level. + * ---------------------------------------------------------- + */ + bl restore_gp_pmcr_pauth_regs + ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + +#if IMAGE_BL31 && RAS_EXTENSION + /* ---------------------------------------------------------- + * Issue Error Synchronization Barrier to synchronize SErrors + * before exiting EL3. We're running with EAs unmasked, so + * any synchronized errors would be taken immediately; + * therefore no need to inspect DISR_EL1 register. + * ---------------------------------------------------------- + */ + esb +#else + dsb sy +#endif /* IMAGE_BL31 && RAS_EXTENSION */ + +#ifdef IMAGE_BL31 + str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] +#endif /* IMAGE_BL31 */ + + exception_return + +endfunc el3_exit diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c new file mode 100644 index 0000000..866ac41 --- /dev/null +++ b/lib/el3_runtime/aarch64/context_mgmt.c @@ -0,0 +1,1098 @@ +/* + * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2022, NVIDIA Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stdbool.h> +#include <string.h> + +#include <platform_def.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <arch_features.h> +#include <bl31/interrupt_mgmt.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <context.h> +#include <drivers/arm/gicv3.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <lib/el3_runtime/pubsub_events.h> +#include <lib/extensions/amu.h> +#include <lib/extensions/brbe.h> +#include <lib/extensions/mpam.h> +#include <lib/extensions/sme.h> +#include <lib/extensions/spe.h> +#include <lib/extensions/sve.h> +#include <lib/extensions/sys_reg_trace.h> +#include <lib/extensions/trbe.h> +#include <lib/extensions/trf.h> +#include <lib/utils.h> + +#if ENABLE_FEAT_TWED +/* Make sure delay value fits within the range(0-15) */ +CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check); +#endif /* ENABLE_FEAT_TWED */ + +static void manage_extensions_secure(cpu_context_t *ctx); + +static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep) +{ + u_register_t sctlr_elx, actlr_elx; + + /* + * Initialise SCTLR_EL1 to the reset value corresponding to the target + * execution state setting all fields rather than relying on the hw. + * Some fields have architecturally UNKNOWN reset values and these are + * set to zero. + * + * SCTLR.EE: Endianness is taken from the entrypoint attributes. + * + * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as + * required by PSCI specification) + */ + sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL; + if (GET_RW(ep->spsr) == MODE_RW_64) { + sctlr_elx |= SCTLR_EL1_RES1; + } else { + /* + * If the target execution state is AArch32 then the following + * fields need to be set. + * + * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE + * instructions are not trapped to EL1. + * + * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI + * instructions are not trapped to EL1. + * + * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the + * CP15DMB, CP15DSB, and CP15ISB instructions. + */ + sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT + | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT; + } + +#if ERRATA_A75_764081 + /* + * If workaround of errata 764081 for Cortex-A75 is used then set + * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier. + */ + sctlr_elx |= SCTLR_IESB_BIT; +#endif + /* Store the initialised SCTLR_EL1 value in the cpu_context */ + write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); + + /* + * Base the context ACTLR_EL1 on the current value, as it is + * implementation defined. The context restore process will write + * the value from the context to the actual register and can cause + * problems for processor cores that don't expect certain bits to + * be zero. + */ + actlr_elx = read_actlr_el1(); + write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); +} + +/****************************************************************************** + * This function performs initializations that are specific to SECURE state + * and updates the cpu context specified by 'ctx'. + *****************************************************************************/ +static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep) +{ + u_register_t scr_el3; + el3_state_t *state; + + state = get_el3state_ctx(ctx); + scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); + +#if defined(IMAGE_BL31) && !defined(SPD_spmd) + /* + * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as + * indicated by the interrupt routing model for BL31. + */ + scr_el3 |= get_scr_el3_from_routing_model(SECURE); +#endif + +#if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS + /* Get Memory Tagging Extension support level */ + unsigned int mte = get_armv8_5_mte_support(); +#endif + /* + * Allow access to Allocation Tags when CTX_INCLUDE_MTE_REGS + * is set, or when MTE is only implemented at EL0. + */ +#if CTX_INCLUDE_MTE_REGS + assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY)); + scr_el3 |= SCR_ATA_BIT; +#else + if (mte == MTE_IMPLEMENTED_EL0) { + scr_el3 |= SCR_ATA_BIT; + } +#endif /* CTX_INCLUDE_MTE_REGS */ + + /* Enable S-EL2 if the next EL is EL2 and S-EL2 is present */ + if ((GET_EL(ep->spsr) == MODE_EL2) && is_armv8_4_sel2_present()) { + if (GET_RW(ep->spsr) != MODE_RW_64) { + ERROR("S-EL2 can not be used in AArch32\n."); + panic(); + } + + scr_el3 |= SCR_EEL2_BIT; + } + + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); + + /* + * Initialize EL1 context registers unless SPMC is running + * at S-EL2. + */ +#if !SPMD_SPM_AT_SEL2 + setup_el1_context(ctx, ep); +#endif + + manage_extensions_secure(ctx); +} + +#if ENABLE_RME +/****************************************************************************** + * This function performs initializations that are specific to REALM state + * and updates the cpu context specified by 'ctx'. + *****************************************************************************/ +static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep) +{ + u_register_t scr_el3; + el3_state_t *state; + + state = get_el3state_ctx(ctx); + scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); + + scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT | SCR_EnSCXT_BIT; + + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); +} +#endif /* ENABLE_RME */ + +/****************************************************************************** + * This function performs initializations that are specific to NON-SECURE state + * and updates the cpu context specified by 'ctx'. + *****************************************************************************/ +static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep) +{ + u_register_t scr_el3; + el3_state_t *state; + + state = get_el3state_ctx(ctx); + scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); + + /* SCR_NS: Set the NS bit */ + scr_el3 |= SCR_NS_BIT; + +#if !CTX_INCLUDE_PAUTH_REGS + /* + * If the pointer authentication registers aren't saved during world + * switches the value of the registers can be leaked from the Secure to + * the Non-secure world. To prevent this, rather than enabling pointer + * authentication everywhere, we only enable it in the Non-secure world. + * + * If the Secure world wants to use pointer authentication, + * CTX_INCLUDE_PAUTH_REGS must be set to 1. + */ + scr_el3 |= SCR_API_BIT | SCR_APK_BIT; +#endif /* !CTX_INCLUDE_PAUTH_REGS */ + + /* Allow access to Allocation Tags when MTE is implemented. */ + scr_el3 |= SCR_ATA_BIT; + +#if HANDLE_EA_EL3_FIRST_NS + /* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */ + scr_el3 |= SCR_EA_BIT; +#endif + +#if RAS_TRAP_NS_ERR_REC_ACCESS + /* + * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR + * and RAS ERX registers from EL1 and EL2(from any security state) + * are trapped to EL3. + * Set here to trap only for NS EL1/EL2 + * + */ + scr_el3 |= SCR_TERR_BIT; +#endif + +#ifdef IMAGE_BL31 + /* + * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as + * indicated by the interrupt routing model for BL31. + */ + scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE); +#endif + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); + + /* Initialize EL1 context registers */ + setup_el1_context(ctx, ep); + + /* Initialize EL2 context registers */ +#if CTX_INCLUDE_EL2_REGS + + /* + * Initialize SCTLR_EL2 context register using Endianness value + * taken from the entrypoint attribute. + */ + u_register_t sctlr_el2 = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL; + sctlr_el2 |= SCTLR_EL2_RES1; + write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_SCTLR_EL2, + sctlr_el2); + + /* + * Program the ICC_SRE_EL2 to make sure the correct bits are set + * when restoring NS context. + */ + u_register_t icc_sre_el2 = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT | + ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT; + write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_ICC_SRE_EL2, + icc_sre_el2); + + /* + * Initialize MDCR_EL2.HPMN to its hardware reset value so we don't + * throw anyone off who expects this to be sensible. + * TODO: A similar thing happens in cm_prepare_el3_exit. They should be + * unified with the proper PMU implementation + */ + u_register_t mdcr_el2 = ((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & + PMCR_EL0_N_MASK); + write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_MDCR_EL2, mdcr_el2); +#endif /* CTX_INCLUDE_EL2_REGS */ +} + +/******************************************************************************* + * The following function performs initialization of the cpu_context 'ctx' + * for first use that is common to all security states, and sets the + * initial entrypoint state as specified by the entry_point_info structure. + * + * The EE and ST attributes are used to configure the endianness and secure + * timer availability for the new execution context. + ******************************************************************************/ +static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) +{ + u_register_t scr_el3; + el3_state_t *state; + gp_regs_t *gp_regs; + + /* Clear any residual register values from the context */ + zeromem(ctx, sizeof(*ctx)); + + /* + * SCR_EL3 was initialised during reset sequence in macro + * el3_arch_init_common. This code modifies the SCR_EL3 fields that + * affect the next EL. + * + * The following fields are initially set to zero and then updated to + * the required value depending on the state of the SPSR_EL3 and the + * Security state and entrypoint attributes of the next EL. + */ + scr_el3 = read_scr(); + scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_EA_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT | + SCR_ST_BIT | SCR_HCE_BIT | SCR_NSE_BIT); + + /* + * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next + * Exception level as specified by SPSR. + */ + if (GET_RW(ep->spsr) == MODE_RW_64) { + scr_el3 |= SCR_RW_BIT; + } + + /* + * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical + * Secure timer registers to EL3, from AArch64 state only, if specified + * by the entrypoint attributes. If SEL2 is present and enabled, the ST + * bit always behaves as 1 (i.e. secure physical timer register access + * is not trapped) + */ + if (EP_GET_ST(ep->h.attr) != 0U) { + scr_el3 |= SCR_ST_BIT; + } + + /* + * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting + * SCR_EL3.HXEn. + */ +#if ENABLE_FEAT_HCX + scr_el3 |= SCR_HXEn_BIT; +#endif + + /* + * If FEAT_RNG_TRAP is enabled, all reads of the RNDR and RNDRRS + * registers are trapped to EL3. + */ +#if ENABLE_FEAT_RNG_TRAP + scr_el3 |= SCR_TRNDR_BIT; +#endif + +#if FAULT_INJECTION_SUPPORT + /* Enable fault injection from lower ELs */ + scr_el3 |= SCR_FIEN_BIT; +#endif + + /* + * CPTR_EL3 was initialized out of reset, copy that value to the + * context register. + */ + write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, read_cptr_el3()); + + /* + * SCR_EL3.HCE: Enable HVC instructions if next execution state is + * AArch64 and next EL is EL2, or if next execution state is AArch32 and + * next mode is Hyp. + * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the + * same conditions as HVC instructions and when the processor supports + * ARMv8.6-FGT. + * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV) + * CNTPOFF_EL2 register under the same conditions as HVC instructions + * and when the processor supports ECV. + */ + if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2)) + || ((GET_RW(ep->spsr) != MODE_RW_64) + && (GET_M32(ep->spsr) == MODE32_hyp))) { + scr_el3 |= SCR_HCE_BIT; + + if (is_armv8_6_fgt_present()) { + scr_el3 |= SCR_FGTEN_BIT; + } + + if (get_armv8_6_ecv_support() + == ID_AA64MMFR0_EL1_ECV_SELF_SYNCH) { + scr_el3 |= SCR_ECVEN_BIT; + } + } + +#if ENABLE_FEAT_TWED + /* Enable WFE trap delay in SCR_EL3 if supported and configured */ + /* Set delay in SCR_EL3 */ + scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT); + scr_el3 |= ((TWED_DELAY & SCR_TWEDEL_MASK) + << SCR_TWEDEL_SHIFT); + + /* Enable WFE delay */ + scr_el3 |= SCR_TWEDEn_BIT; +#endif /* ENABLE_FEAT_TWED */ + + /* + * Populate EL3 state so that we've the right context + * before doing ERET + */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); + write_ctx_reg(state, CTX_ELR_EL3, ep->pc); + write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); + + /* + * Store the X0-X7 value from the entrypoint into the context + * Use memcpy as we are in control of the layout of the structures + */ + gp_regs = get_gpregs_ctx(ctx); + memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); +} + +/******************************************************************************* + * Context management library initialization routine. This library is used by + * runtime services to share pointers to 'cpu_context' structures for secure + * non-secure and realm states. Management of the structures and their associated + * memory is not done by the context management library e.g. the PSCI service + * manages the cpu context used for entry from and exit to the non-secure state. + * The Secure payload dispatcher service manages the context(s) corresponding to + * the secure state. It also uses this library to get access to the non-secure + * state cpu context pointers. + * Lastly, this library provides the API to make SP_EL3 point to the cpu context + * which will be used for programming an entry into a lower EL. The same context + * will be used to save state upon exception entry from that EL. + ******************************************************************************/ +void __init cm_init(void) +{ + /* + * The context management library has only global data to intialize, but + * that will be done when the BSS is zeroed out. + */ +} + +/******************************************************************************* + * This is the high-level function used to initialize the cpu_context 'ctx' for + * first use. It performs initializations that are common to all security states + * and initializations specific to the security state specified in 'ep' + ******************************************************************************/ +void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) +{ + unsigned int security_state; + + assert(ctx != NULL); + + /* + * Perform initializations that are common + * to all security states + */ + setup_context_common(ctx, ep); + + security_state = GET_SECURITY_STATE(ep->h.attr); + + /* Perform security state specific initializations */ + switch (security_state) { + case SECURE: + setup_secure_context(ctx, ep); + break; +#if ENABLE_RME + case REALM: + setup_realm_context(ctx, ep); + break; +#endif + case NON_SECURE: + setup_ns_context(ctx, ep); + break; + default: + ERROR("Invalid security state\n"); + panic(); + break; + } +} + +/******************************************************************************* + * Enable architecture extensions on first entry to Non-secure world. + * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise + * it is zero. + ******************************************************************************/ +static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx) +{ +#if IMAGE_BL31 +#if ENABLE_SPE_FOR_LOWER_ELS + spe_enable(el2_unused); +#endif + +#if ENABLE_AMU + amu_enable(el2_unused, ctx); +#endif + +#if ENABLE_SME_FOR_NS + /* Enable SME, SVE, and FPU/SIMD for non-secure world. */ + sme_enable(ctx); +#elif ENABLE_SVE_FOR_NS + /* Enable SVE and FPU/SIMD for non-secure world. */ + sve_enable(ctx); +#endif + +#if ENABLE_MPAM_FOR_LOWER_ELS + mpam_enable(el2_unused); +#endif + +#if ENABLE_TRBE_FOR_NS + trbe_enable(); +#endif /* ENABLE_TRBE_FOR_NS */ + +#if ENABLE_BRBE_FOR_NS + brbe_enable(); +#endif /* ENABLE_BRBE_FOR_NS */ + +#if ENABLE_SYS_REG_TRACE_FOR_NS + sys_reg_trace_enable(ctx); +#endif /* ENABLE_SYS_REG_TRACE_FOR_NS */ + +#if ENABLE_TRF_FOR_NS + trf_enable(); +#endif /* ENABLE_TRF_FOR_NS */ +#endif +} + +/******************************************************************************* + * Enable architecture extensions on first entry to Secure world. + ******************************************************************************/ +static void manage_extensions_secure(cpu_context_t *ctx) +{ +#if IMAGE_BL31 + #if ENABLE_SME_FOR_NS + #if ENABLE_SME_FOR_SWD + /* + * Enable SME, SVE, FPU/SIMD in secure context, secure manager must + * ensure SME, SVE, and FPU/SIMD context properly managed. + */ + sme_enable(ctx); + #else /* ENABLE_SME_FOR_SWD */ + /* + * Disable SME, SVE, FPU/SIMD in secure context so non-secure world can + * safely use the associated registers. + */ + sme_disable(ctx); + #endif /* ENABLE_SME_FOR_SWD */ + #elif ENABLE_SVE_FOR_NS + #if ENABLE_SVE_FOR_SWD + /* + * Enable SVE and FPU in secure context, secure manager must ensure that + * the SVE and FPU register contexts are properly managed. + */ + sve_enable(ctx); + #else /* ENABLE_SVE_FOR_SWD */ + /* + * Disable SVE and FPU in secure context so non-secure world can safely + * use them. + */ + sve_disable(ctx); + #endif /* ENABLE_SVE_FOR_SWD */ + #endif /* ENABLE_SVE_FOR_NS */ +#endif /* IMAGE_BL31 */ +} + +/******************************************************************************* + * The following function initializes the cpu_context for a CPU specified by + * its `cpu_idx` for first use, and sets the initial entrypoint state as + * specified by the entry_point_info structure. + ******************************************************************************/ +void cm_init_context_by_index(unsigned int cpu_idx, + const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); + cm_setup_context(ctx, ep); +} + +/******************************************************************************* + * The following function initializes the cpu_context for the current CPU + * for first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + ******************************************************************************/ +void cm_init_my_context(const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); + cm_setup_context(ctx, ep); +} + +/******************************************************************************* + * Prepare the CPU system registers for first entry into realm, secure, or + * normal world. + * + * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized + * If execution is requested to non-secure EL1 or svc mode, and the CPU supports + * EL2 then EL2 is disabled by configuring all necessary EL2 registers. + * For all entries, the EL1 registers are initialized from the cpu_context + ******************************************************************************/ +void cm_prepare_el3_exit(uint32_t security_state) +{ + u_register_t sctlr_elx, scr_el3, mdcr_el2; + cpu_context_t *ctx = cm_get_context(security_state); + bool el2_unused = false; + uint64_t hcr_el2 = 0U; + + assert(ctx != NULL); + + if (security_state == NON_SECURE) { + scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), + CTX_SCR_EL3); + if ((scr_el3 & SCR_HCE_BIT) != 0U) { + /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ + sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx), + CTX_SCTLR_EL1); + sctlr_elx &= SCTLR_EE_BIT; + sctlr_elx |= SCTLR_EL2_RES1; +#if ERRATA_A75_764081 + /* + * If workaround of errata 764081 for Cortex-A75 is used + * then set SCTLR_EL2.IESB to enable Implicit Error + * Synchronization Barrier. + */ + sctlr_elx |= SCTLR_IESB_BIT; +#endif + write_sctlr_el2(sctlr_elx); + } else if (el_implemented(2) != EL_IMPL_NONE) { + el2_unused = true; + + /* + * EL2 present but unused, need to disable safely. + * SCTLR_EL2 can be ignored in this case. + * + * Set EL2 register width appropriately: Set HCR_EL2 + * field to match SCR_EL3.RW. + */ + if ((scr_el3 & SCR_RW_BIT) != 0U) + hcr_el2 |= HCR_RW_BIT; + + /* + * For Armv8.3 pointer authentication feature, disable + * traps to EL2 when accessing key registers or using + * pointer authentication instructions from lower ELs. + */ + hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT); + + write_hcr_el2(hcr_el2); + + /* + * Initialise CPTR_EL2 setting all fields rather than + * relying on the hw. All fields have architecturally + * UNKNOWN reset values. + * + * CPTR_EL2.TCPAC: Set to zero so that Non-secure EL1 + * accesses to the CPACR_EL1 or CPACR from both + * Execution states do not trap to EL2. + * + * CPTR_EL2.TTA: Set to zero so that Non-secure System + * register accesses to the trace registers from both + * Execution states do not trap to EL2. + * If PE trace unit System registers are not implemented + * then this bit is reserved, and must be set to zero. + * + * CPTR_EL2.TFP: Set to zero so that Non-secure accesses + * to SIMD and floating-point functionality from both + * Execution states do not trap to EL2. + */ + write_cptr_el2(CPTR_EL2_RESET_VAL & + ~(CPTR_EL2_TCPAC_BIT | CPTR_EL2_TTA_BIT + | CPTR_EL2_TFP_BIT)); + + /* + * Initialise CNTHCTL_EL2. All fields are + * architecturally UNKNOWN on reset and are set to zero + * except for field(s) listed below. + * + * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to + * Hyp mode of Non-secure EL0 and EL1 accesses to the + * physical timer registers. + * + * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to + * Hyp mode of Non-secure EL0 and EL1 accesses to the + * physical counter registers. + */ + write_cnthctl_el2(CNTHCTL_RESET_VAL | + EL1PCEN_BIT | EL1PCTEN_BIT); + + /* + * Initialise CNTVOFF_EL2 to zero as it resets to an + * architecturally UNKNOWN value. + */ + write_cntvoff_el2(0); + + /* + * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and + * MPIDR_EL1 respectively. + */ + write_vpidr_el2(read_midr_el1()); + write_vmpidr_el2(read_mpidr_el1()); + + /* + * Initialise VTTBR_EL2. All fields are architecturally + * UNKNOWN on reset. + * + * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage + * 2 address translation is disabled, cache maintenance + * operations depend on the VMID. + * + * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address + * translation is disabled. + */ + write_vttbr_el2(VTTBR_RESET_VAL & + ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) + | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); + + /* + * Initialise MDCR_EL2, setting all fields rather than + * relying on hw. Some fields are architecturally + * UNKNOWN on reset. + * + * MDCR_EL2.HLP: Set to one so that event counter + * overflow, that is recorded in PMOVSCLR_EL0[0-30], + * occurs on the increment that changes + * PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is + * implemented. This bit is RES0 in versions of the + * architecture earlier than ARMv8.5, setting it to 1 + * doesn't have any effect on them. + * + * MDCR_EL2.TTRF: Set to zero so that access to Trace + * Filter Control register TRFCR_EL1 at EL1 is not + * trapped to EL2. This bit is RES0 in versions of + * the architecture earlier than ARMv8.4. + * + * MDCR_EL2.HPMD: Set to one so that event counting is + * prohibited at EL2. This bit is RES0 in versions of + * the architecture earlier than ARMv8.1, setting it + * to 1 doesn't have any effect on them. + * + * MDCR_EL2.TPMS: Set to zero so that accesses to + * Statistical Profiling control registers from EL1 + * do not trap to EL2. This bit is RES0 when SPE is + * not implemented. + * + * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and + * EL1 System register accesses to the Debug ROM + * registers are not trapped to EL2. + * + * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1 + * System register accesses to the powerdown debug + * registers are not trapped to EL2. + * + * MDCR_EL2.TDA: Set to zero so that System register + * accesses to the debug registers do not trap to EL2. + * + * MDCR_EL2.TDE: Set to zero so that debug exceptions + * are not routed to EL2. + * + * MDCR_EL2.HPME: Set to zero to disable EL2 Performance + * Monitors. + * + * MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and + * EL1 accesses to all Performance Monitors registers + * are not trapped to EL2. + * + * MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0 + * and EL1 accesses to the PMCR_EL0 or PMCR are not + * trapped to EL2. + * + * MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the + * architecturally-defined reset value. + * + * MDCR_EL2.E2TB: Set to zero so that the trace Buffer + * owning exception level is NS-EL1 and, tracing is + * prohibited at NS-EL2. These bits are RES0 when + * FEAT_TRBE is not implemented. + */ + mdcr_el2 = ((MDCR_EL2_RESET_VAL | MDCR_EL2_HLP | + MDCR_EL2_HPMD) | + ((read_pmcr_el0() & PMCR_EL0_N_BITS) + >> PMCR_EL0_N_SHIFT)) & + ~(MDCR_EL2_TTRF | MDCR_EL2_TPMS | + MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT | + MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT | + MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT | + MDCR_EL2_TPMCR_BIT | + MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1)); + + write_mdcr_el2(mdcr_el2); + + /* + * Initialise HSTR_EL2. All fields are architecturally + * UNKNOWN on reset. + * + * HSTR_EL2.T<n>: Set all these fields to zero so that + * Non-secure EL0 or EL1 accesses to System registers + * do not trap to EL2. + */ + write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK)); + /* + * Initialise CNTHP_CTL_EL2. All fields are + * architecturally UNKNOWN on reset. + * + * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2 + * physical timer and prevent timer interrupts. + */ + write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & + ~(CNTHP_CTL_ENABLE_BIT)); + } + manage_extensions_nonsecure(el2_unused, ctx); + } + + cm_el1_sysregs_context_restore(security_state); + cm_set_next_eret_context(security_state); +} + +#if CTX_INCLUDE_EL2_REGS +/******************************************************************************* + * Save EL2 sysreg context + ******************************************************************************/ +void cm_el2_sysregs_context_save(uint32_t security_state) +{ + u_register_t scr_el3 = read_scr(); + + /* + * Always save the non-secure and realm EL2 context, only save the + * S-EL2 context if S-EL2 is enabled. + */ + if ((security_state != SECURE) || + ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) { + cpu_context_t *ctx; + el2_sysregs_t *el2_sysregs_ctx; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); + + el2_sysregs_context_save_common(el2_sysregs_ctx); +#if ENABLE_SPE_FOR_LOWER_ELS + el2_sysregs_context_save_spe(el2_sysregs_ctx); +#endif +#if CTX_INCLUDE_MTE_REGS + el2_sysregs_context_save_mte(el2_sysregs_ctx); +#endif +#if ENABLE_MPAM_FOR_LOWER_ELS + el2_sysregs_context_save_mpam(el2_sysregs_ctx); +#endif +#if ENABLE_FEAT_FGT + el2_sysregs_context_save_fgt(el2_sysregs_ctx); +#endif +#if ENABLE_FEAT_ECV + el2_sysregs_context_save_ecv(el2_sysregs_ctx); +#endif +#if ENABLE_FEAT_VHE + el2_sysregs_context_save_vhe(el2_sysregs_ctx); +#endif +#if RAS_EXTENSION + el2_sysregs_context_save_ras(el2_sysregs_ctx); +#endif +#if CTX_INCLUDE_NEVE_REGS + el2_sysregs_context_save_nv2(el2_sysregs_ctx); +#endif +#if ENABLE_TRF_FOR_NS + el2_sysregs_context_save_trf(el2_sysregs_ctx); +#endif +#if ENABLE_FEAT_CSV2_2 + el2_sysregs_context_save_csv2(el2_sysregs_ctx); +#endif +#if ENABLE_FEAT_HCX + el2_sysregs_context_save_hcx(el2_sysregs_ctx); +#endif + } +} + +/******************************************************************************* + * Restore EL2 sysreg context + ******************************************************************************/ +void cm_el2_sysregs_context_restore(uint32_t security_state) +{ + u_register_t scr_el3 = read_scr(); + + /* + * Always restore the non-secure and realm EL2 context, only restore the + * S-EL2 context if S-EL2 is enabled. + */ + if ((security_state != SECURE) || + ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) { + cpu_context_t *ctx; + el2_sysregs_t *el2_sysregs_ctx; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + el2_sysregs_ctx = get_el2_sysregs_ctx(ctx); + + el2_sysregs_context_restore_common(el2_sysregs_ctx); +#if ENABLE_SPE_FOR_LOWER_ELS + el2_sysregs_context_restore_spe(el2_sysregs_ctx); +#endif +#if CTX_INCLUDE_MTE_REGS + el2_sysregs_context_restore_mte(el2_sysregs_ctx); +#endif +#if ENABLE_MPAM_FOR_LOWER_ELS + el2_sysregs_context_restore_mpam(el2_sysregs_ctx); +#endif +#if ENABLE_FEAT_FGT + el2_sysregs_context_restore_fgt(el2_sysregs_ctx); +#endif +#if ENABLE_FEAT_ECV + el2_sysregs_context_restore_ecv(el2_sysregs_ctx); +#endif +#if ENABLE_FEAT_VHE + el2_sysregs_context_restore_vhe(el2_sysregs_ctx); +#endif +#if RAS_EXTENSION + el2_sysregs_context_restore_ras(el2_sysregs_ctx); +#endif +#if CTX_INCLUDE_NEVE_REGS + el2_sysregs_context_restore_nv2(el2_sysregs_ctx); +#endif +#if ENABLE_TRF_FOR_NS + el2_sysregs_context_restore_trf(el2_sysregs_ctx); +#endif +#if ENABLE_FEAT_CSV2_2 + el2_sysregs_context_restore_csv2(el2_sysregs_ctx); +#endif +#if ENABLE_FEAT_HCX + el2_sysregs_context_restore_hcx(el2_sysregs_ctx); +#endif + } +} +#endif /* CTX_INCLUDE_EL2_REGS */ + +/******************************************************************************* + * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS + * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly + * updating EL1 and EL2 registers. Otherwise, it calls the generic + * cm_prepare_el3_exit function. + ******************************************************************************/ +void cm_prepare_el3_exit_ns(void) +{ +#if CTX_INCLUDE_EL2_REGS + cpu_context_t *ctx = cm_get_context(NON_SECURE); + assert(ctx != NULL); + + /* Assert that EL2 is used. */ +#if ENABLE_ASSERTIONS + el3_state_t *state = get_el3state_ctx(ctx); + u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); +#endif + assert(((scr_el3 & SCR_HCE_BIT) != 0UL) && + (el_implemented(2U) != EL_IMPL_NONE)); + + /* + * Currently some extensions are configured using + * direct register updates. Therefore, do this here + * instead of when setting up context. + */ + manage_extensions_nonsecure(0, ctx); + + /* + * Set the NS bit to be able to access the ICC_SRE_EL2 + * register when restoring context. + */ + write_scr_el3(read_scr_el3() | SCR_NS_BIT); + + /* + * Ensure the NS bit change is committed before the EL2/EL1 + * state restoration. + */ + isb(); + + /* Restore EL2 and EL1 sysreg contexts */ + cm_el2_sysregs_context_restore(NON_SECURE); + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); +#else + cm_prepare_el3_exit(NON_SECURE); +#endif /* CTX_INCLUDE_EL2_REGS */ +} + +/******************************************************************************* + * The next four functions are used by runtime services to save and restore + * EL1 context on the 'cpu_context' structure for the specified security + * state. + ******************************************************************************/ +void cm_el1_sysregs_context_save(uint32_t security_state) +{ + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + el1_sysregs_context_save(get_el1_sysregs_ctx(ctx)); + +#if IMAGE_BL31 + if (security_state == SECURE) + PUBLISH_EVENT(cm_exited_secure_world); + else + PUBLISH_EVENT(cm_exited_normal_world); +#endif +} + +void cm_el1_sysregs_context_restore(uint32_t security_state) +{ + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx)); + +#if IMAGE_BL31 + if (security_state == SECURE) + PUBLISH_EVENT(cm_entering_secure_world); + else + PUBLISH_EVENT(cm_entering_normal_world); +#endif +} + +/******************************************************************************* + * This function populates ELR_EL3 member of 'cpu_context' pertaining to the + * given security state with the given entrypoint + ******************************************************************************/ +void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint) +{ + cpu_context_t *ctx; + el3_state_t *state; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + /* Populate EL3 state so that ERET jumps to the correct entry */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_ELR_EL3, entrypoint); +} + +/******************************************************************************* + * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' + * pertaining to the given security state + ******************************************************************************/ +void cm_set_elr_spsr_el3(uint32_t security_state, + uintptr_t entrypoint, uint32_t spsr) +{ + cpu_context_t *ctx; + el3_state_t *state; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + /* Populate EL3 state so that ERET jumps to the correct entry */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_ELR_EL3, entrypoint); + write_ctx_reg(state, CTX_SPSR_EL3, spsr); +} + +/******************************************************************************* + * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' + * pertaining to the given security state using the value and bit position + * specified in the parameters. It preserves all other bits. + ******************************************************************************/ +void cm_write_scr_el3_bit(uint32_t security_state, + uint32_t bit_pos, + uint32_t value) +{ + cpu_context_t *ctx; + el3_state_t *state; + u_register_t scr_el3; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + /* Ensure that the bit position is a valid one */ + assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U); + + /* Ensure that the 'value' is only a bit wide */ + assert(value <= 1U); + + /* + * Get the SCR_EL3 value from the cpu context, clear the desired bit + * and set it to its new value. + */ + state = get_el3state_ctx(ctx); + scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); + scr_el3 &= ~(1UL << bit_pos); + scr_el3 |= (u_register_t)value << bit_pos; + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); +} + +/******************************************************************************* + * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the + * given security state. + ******************************************************************************/ +u_register_t cm_get_scr_el3(uint32_t security_state) +{ + cpu_context_t *ctx; + el3_state_t *state; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + /* Populate EL3 state so that ERET jumps to the correct entry */ + state = get_el3state_ctx(ctx); + return read_ctx_reg(state, CTX_SCR_EL3); +} + +/******************************************************************************* + * This function is used to program the context that's used for exception + * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for + * the required security state + ******************************************************************************/ +void cm_set_next_eret_context(uint32_t security_state) +{ + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + cm_set_next_context(ctx); +} diff --git a/lib/el3_runtime/aarch64/cpu_data.S b/lib/el3_runtime/aarch64/cpu_data.S new file mode 100644 index 0000000..2392d6b --- /dev/null +++ b/lib/el3_runtime/aarch64/cpu_data.S @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <asm_macros.S> +#include <lib/el3_runtime/cpu_data.h> + +.globl init_cpu_data_ptr +.globl _cpu_data_by_index + +/* ----------------------------------------------------------------- + * void init_cpu_data_ptr(void) + * + * Initialise the TPIDR_EL3 register to refer to the cpu_data_t + * for the calling CPU. This must be called before cm_get_cpu_data() + * + * This can be called without a valid stack. It assumes that + * plat_my_core_pos() does not clobber register x10. + * clobbers: x0, x1, x10 + * ----------------------------------------------------------------- + */ +func init_cpu_data_ptr + mov x10, x30 + bl plat_my_core_pos + bl _cpu_data_by_index + msr tpidr_el3, x0 + ret x10 +endfunc init_cpu_data_ptr + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index) + * + * Return the cpu_data structure for the CPU with given linear index + * + * This can be called without a valid stack. + * clobbers: x0, x1 + * ----------------------------------------------------------------- + */ +func _cpu_data_by_index + mov_imm x1, CPU_DATA_SIZE + mul x0, x0, x1 + adrp x1, percpu_data + add x1, x1, :lo12:percpu_data + add x0, x0, x1 + ret +endfunc _cpu_data_by_index diff --git a/lib/el3_runtime/cpu_data_array.c b/lib/el3_runtime/cpu_data_array.c new file mode 100644 index 0000000..13d464c --- /dev/null +++ b/lib/el3_runtime/cpu_data_array.c @@ -0,0 +1,13 @@ +/* + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <platform_def.h> + +#include <lib/cassert.h> +#include <lib/el3_runtime/cpu_data.h> + +/* The per_cpu_ptr_cache_t space allocation */ +cpu_data_t percpu_data[PLATFORM_CORE_COUNT]; |