From 102b0d2daa97dae68d3eed54d8fe37a9cc38a892 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 28 Apr 2024 11:13:47 +0200 Subject: Adding upstream version 2.8.0+dfsg. Signed-off-by: Daniel Baumann --- services/spd/tspd/tspd.mk | 46 +++ services/spd/tspd/tspd_common.c | 140 +++++++ services/spd/tspd/tspd_helpers.S | 79 ++++ services/spd/tspd/tspd_main.c | 819 +++++++++++++++++++++++++++++++++++++++ services/spd/tspd/tspd_pm.c | 254 ++++++++++++ services/spd/tspd/tspd_private.h | 233 +++++++++++ 6 files changed, 1571 insertions(+) create mode 100644 services/spd/tspd/tspd.mk create mode 100644 services/spd/tspd/tspd_common.c create mode 100644 services/spd/tspd/tspd_helpers.S create mode 100644 services/spd/tspd/tspd_main.c create mode 100644 services/spd/tspd/tspd_pm.c create mode 100644 services/spd/tspd/tspd_private.h (limited to 'services/spd/tspd') diff --git a/services/spd/tspd/tspd.mk b/services/spd/tspd/tspd.mk new file mode 100644 index 0000000..bda8338 --- /dev/null +++ b/services/spd/tspd/tspd.mk @@ -0,0 +1,46 @@ +# +# Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +TSPD_DIR := services/spd/tspd + +ifeq (${ERROR_DEPRECATED},0) +SPD_INCLUDES := -Iinclude/bl32/tsp +endif + +SPD_SOURCES := services/spd/tspd/tspd_common.c \ + services/spd/tspd/tspd_helpers.S \ + services/spd/tspd/tspd_main.c \ + services/spd/tspd/tspd_pm.c + +# This dispatcher is paired with a Test Secure Payload source and we intend to +# build the Test Secure Payload along with this dispatcher. +# +# In cases where an associated Secure Payload lies outside this build +# system/source tree, the the dispatcher Makefile can either invoke an external +# build command or assume it pre-built + +BL32_ROOT := bl32/tsp + +# Include SP's Makefile. The assumption is that the TSP's build system is +# compatible with that of Trusted Firmware, and it'll add and populate necessary +# build targets and variables +include ${BL32_ROOT}/tsp.mk + +# Let the top-level Makefile know that we intend to build the SP from source +NEED_BL32 := yes + +# Flag used to enable routing of non-secure interrupts to EL3 when they are +# generated while the code is executing in S-EL1/0. +TSP_NS_INTR_ASYNC_PREEMPT := 0 + +ifeq ($(EL3_EXCEPTION_HANDLING),1) +ifeq ($(TSP_NS_INTR_ASYNC_PREEMPT),0) +$(error When EL3_EXCEPTION_HANDLING=1, TSP_NS_INTR_ASYNC_PREEMPT must also be 1) +endif +endif + +$(eval $(call assert_boolean,TSP_NS_INTR_ASYNC_PREEMPT)) +$(eval $(call add_define,TSP_NS_INTR_ASYNC_PREEMPT)) diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c new file mode 100644 index 0000000..063fd01 --- /dev/null +++ b/services/spd/tspd/tspd_common.c @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "tspd_private.h" + +/******************************************************************************* + * Given a secure payload entrypoint info pointer, entry point PC, register + * width, cpu id & pointer to a context data structure, this function will + * initialize tsp context and entry point info for the secure payload + ******************************************************************************/ +void tspd_init_tsp_ep_state(struct entry_point_info *tsp_entry_point, + uint32_t rw, + uint64_t pc, + tsp_context_t *tsp_ctx) +{ + uint32_t ep_attr; + + /* Passing a NULL context is a critical programming error */ + assert(tsp_ctx); + assert(tsp_entry_point); + assert(pc); + + /* + * We support AArch64 TSP for now. + * TODO: Add support for AArch32 TSP + */ + assert(rw == TSP_AARCH64); + + /* Associate this context with the cpu specified */ + tsp_ctx->mpidr = read_mpidr_el1(); + tsp_ctx->state = 0; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF); + clr_yield_smc_active_flag(tsp_ctx->state); + + cm_set_context(&tsp_ctx->cpu_ctx, SECURE); + + /* initialise an entrypoint to set up the CPU context */ + ep_attr = SECURE | EP_ST_ENABLE; + if (read_sctlr_el3() & SCTLR_EE_BIT) + ep_attr |= EP_EE_BIG; + SET_PARAM_HEAD(tsp_entry_point, PARAM_EP, VERSION_1, ep_attr); + + tsp_entry_point->pc = pc; + tsp_entry_point->spsr = SPSR_64(MODE_EL1, + MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + zeromem(&tsp_entry_point->args, sizeof(tsp_entry_point->args)); +} + +/******************************************************************************* + * This function takes an SP context pointer and: + * 1. Applies the S-EL1 system register context from tsp_ctx->cpu_ctx. + * 2. Saves the current C runtime state (callee saved registers) on the stack + * frame and saves a reference to this state. + * 3. Calls el3_exit() so that the EL3 system and general purpose registers + * from the tsp_ctx->cpu_ctx are used to enter the secure payload image. + ******************************************************************************/ +uint64_t tspd_synchronous_sp_entry(tsp_context_t *tsp_ctx) +{ + uint64_t rc; + + assert(tsp_ctx != NULL); + assert(tsp_ctx->c_rt_ctx == 0); + + /* Apply the Secure EL1 system register context and switch to it */ + assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx); + cm_el1_sysregs_context_restore(SECURE); + cm_set_next_eret_context(SECURE); + + rc = tspd_enter_sp(&tsp_ctx->c_rt_ctx); +#if ENABLE_ASSERTIONS + tsp_ctx->c_rt_ctx = 0; +#endif + + return rc; +} + + +/******************************************************************************* + * This function takes an SP context pointer and: + * 1. Saves the S-EL1 system register context tp tsp_ctx->cpu_ctx. + * 2. Restores the current C runtime state (callee saved registers) from the + * stack frame using the reference to this state saved in tspd_enter_sp(). + * 3. It does not need to save any general purpose or EL3 system register state + * as the generic smc entry routine should have saved those. + ******************************************************************************/ +void tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret) +{ + assert(tsp_ctx != NULL); + /* Save the Secure EL1 system register context */ + assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx); + cm_el1_sysregs_context_save(SECURE); + + assert(tsp_ctx->c_rt_ctx != 0); + tspd_exit_sp(tsp_ctx->c_rt_ctx, ret); + + /* Should never reach here */ + assert(0); +} + +/******************************************************************************* + * This function takes an SP context pointer and abort any preempted SMC + * request. + * Return 1 if there was a preempted SMC request, 0 otherwise. + ******************************************************************************/ +int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx) +{ + if (!get_yield_smc_active_flag(tsp_ctx->state)) + return 0; + + /* Abort any preempted SMC request */ + clr_yield_smc_active_flag(tsp_ctx->state); + + /* + * Arrange for an entry into the test secure payload. It will + * be returned via TSP_ABORT_DONE case in tspd_smc_handler. + */ + cm_set_elr_el3(SECURE, + (uint64_t) &tsp_vectors->abort_yield_smc_entry); + uint64_t rc = tspd_synchronous_sp_entry(tsp_ctx); + + if (rc != 0) + panic(); + + return 1; +} + diff --git a/services/spd/tspd/tspd_helpers.S b/services/spd/tspd/tspd_helpers.S new file mode 100644 index 0000000..f15d66b --- /dev/null +++ b/services/spd/tspd/tspd_helpers.S @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include "tspd_private.h" + + .global tspd_enter_sp + /* --------------------------------------------- + * This function is called with SP_EL0 as stack. + * Here we stash our EL3 callee-saved registers + * on to the stack as a part of saving the C + * runtime and enter the secure payload. + * 'x0' contains a pointer to the memory where + * the address of the C runtime context is to be + * saved. + * --------------------------------------------- + */ +func tspd_enter_sp + /* Make space for the registers that we're going to save */ + mov x3, sp + str x3, [x0, #0] + sub sp, sp, #TSPD_C_RT_CTX_SIZE + + /* Save callee-saved registers on to the stack */ + stp x19, x20, [sp, #TSPD_C_RT_CTX_X19] + stp x21, x22, [sp, #TSPD_C_RT_CTX_X21] + stp x23, x24, [sp, #TSPD_C_RT_CTX_X23] + stp x25, x26, [sp, #TSPD_C_RT_CTX_X25] + stp x27, x28, [sp, #TSPD_C_RT_CTX_X27] + stp x29, x30, [sp, #TSPD_C_RT_CTX_X29] + + /* --------------------------------------------- + * Everything is setup now. el3_exit() will + * use the secure context to restore to the + * general purpose and EL3 system registers to + * ERET into the secure payload. + * --------------------------------------------- + */ + b el3_exit +endfunc tspd_enter_sp + + /* --------------------------------------------- + * This function is called 'x0' pointing to a C + * runtime context saved in tspd_enter_sp(). It + * restores the saved registers and jumps to + * that runtime with 'x0' as the new sp. This + * destroys the C runtime context that had been + * built on the stack below the saved context by + * the caller. Later the second parameter 'x1' + * is passed as return value to the caller + * --------------------------------------------- + */ + .global tspd_exit_sp +func tspd_exit_sp + /* Restore the previous stack */ + mov sp, x0 + + /* Restore callee-saved registers on to the stack */ + ldp x19, x20, [x0, #(TSPD_C_RT_CTX_X19 - TSPD_C_RT_CTX_SIZE)] + ldp x21, x22, [x0, #(TSPD_C_RT_CTX_X21 - TSPD_C_RT_CTX_SIZE)] + ldp x23, x24, [x0, #(TSPD_C_RT_CTX_X23 - TSPD_C_RT_CTX_SIZE)] + ldp x25, x26, [x0, #(TSPD_C_RT_CTX_X25 - TSPD_C_RT_CTX_SIZE)] + ldp x27, x28, [x0, #(TSPD_C_RT_CTX_X27 - TSPD_C_RT_CTX_SIZE)] + ldp x29, x30, [x0, #(TSPD_C_RT_CTX_X29 - TSPD_C_RT_CTX_SIZE)] + + /* --------------------------------------------- + * This should take us back to the instruction + * after the call to the last tspd_enter_sp(). + * Place the second parameter to x0 so that the + * caller will see it as a return value from the + * original entry call + * --------------------------------------------- + */ + mov x0, x1 + ret +endfunc tspd_exit_sp diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c new file mode 100644 index 0000000..6cb4992 --- /dev/null +++ b/services/spd/tspd/tspd_main.c @@ -0,0 +1,819 @@ +/* + * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + + +/******************************************************************************* + * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a + * plug-in component to the Secure Monitor, registered as a runtime service. The + * SPD is expected to be a functional extension of the Secure Payload (SP) that + * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting + * the Trusted OS/Applications range to the dispatcher. The SPD will either + * handle the request locally or delegate it to the Secure Payload. It is also + * responsible for initialising and maintaining communication with the SP. + ******************************************************************************/ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tspd_private.h" + +/******************************************************************************* + * Address of the entrypoint vector table in the Secure Payload. It is + * initialised once on the primary core after a cold boot. + ******************************************************************************/ +tsp_vectors_t *tsp_vectors; + +/******************************************************************************* + * Array to keep track of per-cpu Secure Payload state + ******************************************************************************/ +tsp_context_t tspd_sp_context[TSPD_CORE_COUNT]; + + +/* TSP UID */ +DEFINE_SVC_UUID2(tsp_uuid, + 0xa056305b, 0x9132, 0x7b42, 0x98, 0x11, + 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa); + +int32_t tspd_init(void); + +/* + * This helper function handles Secure EL1 preemption. The preemption could be + * due Non Secure interrupts or EL3 interrupts. In both the cases we context + * switch to the normal world and in case of EL3 interrupts, it will again be + * routed to EL3 which will get handled at the exception vectors. + */ +uint64_t tspd_handle_sp_preemption(void *handle) +{ + cpu_context_t *ns_cpu_context; + + assert(handle == cm_get_context(SECURE)); + cm_el1_sysregs_context_save(SECURE); + /* Get a reference to the non-secure context */ + ns_cpu_context = cm_get_context(NON_SECURE); + assert(ns_cpu_context); + + /* + * To allow Secure EL1 interrupt handler to re-enter TSP while TSP + * is preempted, the secure system register context which will get + * overwritten must be additionally saved. This is currently done + * by the TSPD S-EL1 interrupt handler. + */ + + /* + * Restore non-secure state. + */ + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); + + /* + * The TSP was preempted during execution of a Yielding SMC Call. + * Return back to the normal world with SMC_PREEMPTED as error + * code in x0. + */ + SMC_RET1(ns_cpu_context, SMC_PREEMPTED); +} + +/******************************************************************************* + * This function is the handler registered for S-EL1 interrupts by the TSPD. It + * validates the interrupt and upon success arranges entry into the TSP at + * 'tsp_sel1_intr_entry()' for handling the interrupt. + * Typically, interrupts for a specific security state get handled in the same + * security execption level if the execution is in the same security state. For + * example, if a non-secure interrupt gets fired when CPU is executing in NS-EL2 + * it gets handled in the non-secure world. + * However, interrupts belonging to the opposite security state typically demand + * a world(context) switch. This is inline with the security principle which + * states a secure interrupt has to be handled in the secure world. + * Hence, the TSPD in EL3 expects the context(handle) for a secure interrupt to + * be non-secure and vice versa. + * However, a race condition between non-secure and secure interrupts can lead to + * a scenario where the above assumptions do not hold true. This is demonstrated + * below through Note 1. + ******************************************************************************/ +static uint64_t tspd_sel1_interrupt_handler(uint32_t id, + uint32_t flags, + void *handle, + void *cookie) +{ + uint32_t linear_id; + tsp_context_t *tsp_ctx; + + /* Get a reference to this cpu's TSP context */ + linear_id = plat_my_core_pos(); + tsp_ctx = &tspd_sp_context[linear_id]; + +#if TSP_NS_INTR_ASYNC_PREEMPT + + /* + * Note 1: + * Under the current interrupt routing model, interrupts from other + * world are routed to EL3 when TSP_NS_INTR_ASYNC_PREEMPT is enabled. + * Consider the following scenario: + * 1/ A non-secure payload(like tftf) requests a secure service from + * TSP by invoking a yielding SMC call. + * 2/ Later, execution jumps to TSP in S-EL1 with the help of TSP + * Dispatcher in Secure Monitor(EL3). + * 3/ While CPU is executing TSP, a Non-secure interrupt gets fired. + * this demands a context switch to the non-secure world through + * secure monitor. + * 4/ Consequently, TSP in S-EL1 get asynchronously pre-empted and + * execution switches to secure monitor(EL3). + * 5/ EL3 tries to triage the (Non-secure) interrupt based on the + * highest pending interrupt. + * 6/ However, while the NS Interrupt was pending, secure timer gets + * fired which makes a S-EL1 interrupt to be pending. + * 7/ Hence, execution jumps to this companion handler of S-EL1 + * interrupt (i.e., tspd_sel1_interrupt_handler) even though the TSP + * was pre-empted due to non-secure interrupt. + * 8/ The above sequence of events explain how TSP was pre-empted by + * S-EL1 interrupt indirectly in an asynchronous way. + * 9/ Hence, we track the TSP pre-emption by S-EL1 interrupt using a + * boolean variable per each core. + * 10/ This helps us to indicate that SMC call for TSP service was + * pre-empted when execution resumes in non-secure world. + */ + + /* Check the security state when the exception was generated */ + if (get_interrupt_src_ss(flags) == NON_SECURE) { + /* Sanity check the pointer to this cpu's context */ + assert(handle == cm_get_context(NON_SECURE)); + + /* Save the non-secure context before entering the TSP */ + cm_el1_sysregs_context_save(NON_SECURE); + tsp_ctx->preempted_by_sel1_intr = false; + } else { + /* Sanity check the pointer to this cpu's context */ + assert(handle == cm_get_context(SECURE)); + + /* Save the secure context before entering the TSP for S-EL1 + * interrupt handling + */ + cm_el1_sysregs_context_save(SECURE); + tsp_ctx->preempted_by_sel1_intr = true; + } +#else + /* Check the security state when the exception was generated */ + assert(get_interrupt_src_ss(flags) == NON_SECURE); + + /* Sanity check the pointer to this cpu's context */ + assert(handle == cm_get_context(NON_SECURE)); + + /* Save the non-secure context before entering the TSP */ + cm_el1_sysregs_context_save(NON_SECURE); +#endif + + assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); + + /* + * Determine if the TSP was previously preempted. Its last known + * context has to be preserved in this case. + * The TSP should return control to the TSPD after handling this + * S-EL1 interrupt. Preserve essential EL3 context to allow entry into + * the TSP at the S-EL1 interrupt entry point using the 'cpu_context' + * structure. There is no need to save the secure system register + * context since the TSP is supposed to preserve it during S-EL1 + * interrupt handling. + */ + if (get_yield_smc_active_flag(tsp_ctx->state)) { + tsp_ctx->saved_spsr_el3 = (uint32_t)SMC_GET_EL3(&tsp_ctx->cpu_ctx, + CTX_SPSR_EL3); + tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, + CTX_ELR_EL3); +#if TSP_NS_INTR_ASYNC_PREEMPT + memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE); +#endif + } + + cm_el1_sysregs_context_restore(SECURE); + cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry, + SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); + + cm_set_next_eret_context(SECURE); + + /* + * Tell the TSP that it has to handle a S-EL1 interrupt synchronously. + * Also the instruction in normal world where the interrupt was + * generated is passed for debugging purposes. It is safe to retrieve + * this address from ELR_EL3 as the secure context will not take effect + * until el3_exit(). + */ + SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3()); +} + +#if TSP_NS_INTR_ASYNC_PREEMPT +/******************************************************************************* + * This function is the handler registered for Non secure interrupts by the + * TSPD. It validates the interrupt and upon success arranges entry into the + * normal world for handling the interrupt. + ******************************************************************************/ +static uint64_t tspd_ns_interrupt_handler(uint32_t id, + uint32_t flags, + void *handle, + void *cookie) +{ + /* Check the security state when the exception was generated */ + assert(get_interrupt_src_ss(flags) == SECURE); + + /* + * Disable the routing of NS interrupts from secure world to EL3 while + * interrupted on this core. + */ + disable_intr_rm_local(INTR_TYPE_NS, SECURE); + + return tspd_handle_sp_preemption(handle); +} +#endif + +/******************************************************************************* + * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type + * (aarch32/aarch64) if not already known and initialises the context for entry + * into the SP for its initialisation. + ******************************************************************************/ +static int32_t tspd_setup(void) +{ + entry_point_info_t *tsp_ep_info; + uint32_t linear_id; + + linear_id = plat_my_core_pos(); + + /* + * Get information about the Secure Payload (BL32) image. Its + * absence is a critical failure. TODO: Add support to + * conditionally include the SPD service + */ + tsp_ep_info = bl31_plat_get_next_image_ep_info(SECURE); + if (!tsp_ep_info) { + WARN("No TSP provided by BL2 boot loader, Booting device" + " without TSP initialization. SMC`s destined for TSP" + " will return SMC_UNK\n"); + return 1; + } + + /* + * If there's no valid entry point for SP, we return a non-zero value + * signalling failure initializing the service. We bail out without + * registering any handlers + */ + if (!tsp_ep_info->pc) + return 1; + + /* + * We could inspect the SP image and determine its execution + * state i.e whether AArch32 or AArch64. Assuming it's AArch64 + * for the time being. + */ + tspd_init_tsp_ep_state(tsp_ep_info, + TSP_AARCH64, + tsp_ep_info->pc, + &tspd_sp_context[linear_id]); + +#if TSP_INIT_ASYNC + bl31_set_next_image_type(SECURE); +#else + /* + * All TSPD initialization done. Now register our init function with + * BL31 for deferred invocation + */ + bl31_register_bl32_init(&tspd_init); +#endif + return 0; +} + +/******************************************************************************* + * This function passes control to the Secure Payload image (BL32) for the first + * time on the primary cpu after a cold boot. It assumes that a valid secure + * context has already been created by tspd_setup() which can be directly used. + * It also assumes that a valid non-secure context has been initialised by PSCI + * so it does not need to save and restore any non-secure state. This function + * performs a synchronous entry into the Secure payload. The SP passes control + * back to this routine through a SMC. + ******************************************************************************/ +int32_t tspd_init(void) +{ + uint32_t linear_id = plat_my_core_pos(); + tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; + entry_point_info_t *tsp_entry_point; + uint64_t rc; + + /* + * Get information about the Secure Payload (BL32) image. Its + * absence is a critical failure. + */ + tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE); + assert(tsp_entry_point); + + cm_init_my_context(tsp_entry_point); + + /* + * Arrange for an entry into the test secure payload. It will be + * returned via TSP_ENTRY_DONE case + */ + rc = tspd_synchronous_sp_entry(tsp_ctx); + assert(rc != 0); + + return rc; +} + + +/******************************************************************************* + * This function is responsible for handling all SMCs in the Trusted OS/App + * range from the non-secure state as defined in the SMC Calling Convention + * Document. It is also responsible for communicating with the Secure payload + * to delegate work and return results back to the non-secure state. Lastly it + * will also return any information that the secure payload needs to do the + * work assigned to it. + ******************************************************************************/ +static uintptr_t tspd_smc_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + cpu_context_t *ns_cpu_context; + uint32_t linear_id = plat_my_core_pos(), ns; + tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; + uint64_t rc; +#if TSP_INIT_ASYNC + entry_point_info_t *next_image_info; +#endif + + /* Determine which security state this SMC originated from */ + ns = is_caller_non_secure(flags); + + switch (smc_fid) { + + /* + * This function ID is used by TSP to indicate that it was + * preempted by a normal world IRQ. + * + */ + case TSP_PREEMPTED: + if (ns) + SMC_RET1(handle, SMC_UNK); + + return tspd_handle_sp_preemption(handle); + + /* + * This function ID is used only by the TSP to indicate that it has + * finished handling a S-EL1 interrupt or was preempted by a higher + * priority pending EL3 interrupt. Execution should resume + * in the normal world. + */ + case TSP_HANDLED_S_EL1_INTR: + if (ns) + SMC_RET1(handle, SMC_UNK); + + assert(handle == cm_get_context(SECURE)); + + /* + * Restore the relevant EL3 state which saved to service + * this SMC. + */ + if (get_yield_smc_active_flag(tsp_ctx->state)) { + SMC_SET_EL3(&tsp_ctx->cpu_ctx, + CTX_SPSR_EL3, + tsp_ctx->saved_spsr_el3); + SMC_SET_EL3(&tsp_ctx->cpu_ctx, + CTX_ELR_EL3, + tsp_ctx->saved_elr_el3); +#if TSP_NS_INTR_ASYNC_PREEMPT + /* + * Need to restore the previously interrupted + * secure context. + */ + memcpy(&tsp_ctx->cpu_ctx, &tsp_ctx->sp_ctx, + TSPD_SP_CTX_SIZE); +#endif + } + + /* Get a reference to the non-secure context */ + ns_cpu_context = cm_get_context(NON_SECURE); + assert(ns_cpu_context); + + /* + * Restore non-secure state. There is no need to save the + * secure system register context since the TSP was supposed + * to preserve it during S-EL1 interrupt handling. + */ + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); + + /* Refer to Note 1 in function tspd_sel1_interrupt_handler()*/ +#if TSP_NS_INTR_ASYNC_PREEMPT + if (tsp_ctx->preempted_by_sel1_intr) { + /* Reset the flag */ + tsp_ctx->preempted_by_sel1_intr = false; + + SMC_RET1(ns_cpu_context, SMC_PREEMPTED); + } else { + SMC_RET0((uint64_t) ns_cpu_context); + } +#else + SMC_RET0((uint64_t) ns_cpu_context); +#endif + + + /* + * This function ID is used only by the SP to indicate it has + * finished initialising itself after a cold boot + */ + case TSP_ENTRY_DONE: + if (ns) + SMC_RET1(handle, SMC_UNK); + + /* + * Stash the SP entry points information. This is done + * only once on the primary cpu + */ + assert(tsp_vectors == NULL); + tsp_vectors = (tsp_vectors_t *) x1; + + if (tsp_vectors) { + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); + + /* + * TSP has been successfully initialized. Register power + * management hooks with PSCI + */ + psci_register_spd_pm_hook(&tspd_pm); + + /* + * Register an interrupt handler for S-EL1 interrupts + * when generated during code executing in the + * non-secure state. + */ + flags = 0; + set_interrupt_rm_flag(flags, NON_SECURE); + rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, + tspd_sel1_interrupt_handler, + flags); + if (rc) + panic(); + +#if TSP_NS_INTR_ASYNC_PREEMPT + /* + * Register an interrupt handler for NS interrupts when + * generated during code executing in secure state are + * routed to EL3. + */ + flags = 0; + set_interrupt_rm_flag(flags, SECURE); + + rc = register_interrupt_type_handler(INTR_TYPE_NS, + tspd_ns_interrupt_handler, + flags); + if (rc) + panic(); + + /* + * Disable the NS interrupt locally. + */ + disable_intr_rm_local(INTR_TYPE_NS, SECURE); +#endif + } + + +#if TSP_INIT_ASYNC + /* Save the Secure EL1 system register context */ + assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx); + cm_el1_sysregs_context_save(SECURE); + + /* Program EL3 registers to enable entry into the next EL */ + next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE); + assert(next_image_info); + assert(NON_SECURE == + GET_SECURITY_STATE(next_image_info->h.attr)); + + cm_init_my_context(next_image_info); + cm_prepare_el3_exit(NON_SECURE); + SMC_RET0(cm_get_context(NON_SECURE)); +#else + /* + * SP reports completion. The SPD must have initiated + * the original request through a synchronous entry + * into the SP. Jump back to the original C runtime + * context. + */ + tspd_synchronous_sp_exit(tsp_ctx, x1); + break; +#endif + /* + * This function ID is used only by the SP to indicate it has finished + * aborting a preempted Yielding SMC Call. + */ + case TSP_ABORT_DONE: + + /* + * These function IDs are used only by the SP to indicate it has + * finished: + * 1. turning itself on in response to an earlier psci + * cpu_on request + * 2. resuming itself after an earlier psci cpu_suspend + * request. + */ + case TSP_ON_DONE: + case TSP_RESUME_DONE: + + /* + * These function IDs are used only by the SP to indicate it has + * finished: + * 1. suspending itself after an earlier psci cpu_suspend + * request. + * 2. turning itself off in response to an earlier psci + * cpu_off request. + */ + case TSP_OFF_DONE: + case TSP_SUSPEND_DONE: + case TSP_SYSTEM_OFF_DONE: + case TSP_SYSTEM_RESET_DONE: + if (ns) + SMC_RET1(handle, SMC_UNK); + + /* + * SP reports completion. The SPD must have initiated the + * original request through a synchronous entry into the SP. + * Jump back to the original C runtime context, and pass x1 as + * return value to the caller + */ + tspd_synchronous_sp_exit(tsp_ctx, x1); + break; + + /* + * Request from non-secure client to perform an + * arithmetic operation or response from secure + * payload to an earlier request. + */ + case TSP_FAST_FID(TSP_ADD): + case TSP_FAST_FID(TSP_SUB): + case TSP_FAST_FID(TSP_MUL): + case TSP_FAST_FID(TSP_DIV): + + case TSP_YIELD_FID(TSP_ADD): + case TSP_YIELD_FID(TSP_SUB): + case TSP_YIELD_FID(TSP_MUL): + case TSP_YIELD_FID(TSP_DIV): + /* + * Request from non-secure client to perform a check + * of the DIT PSTATE bit. + */ + case TSP_YIELD_FID(TSP_CHECK_DIT): + if (ns) { + /* + * This is a fresh request from the non-secure client. + * The parameters are in x1 and x2. Figure out which + * registers need to be preserved, save the non-secure + * state and send the request to the secure payload. + */ + assert(handle == cm_get_context(NON_SECURE)); + + /* Check if we are already preempted */ + if (get_yield_smc_active_flag(tsp_ctx->state)) + SMC_RET1(handle, SMC_UNK); + + cm_el1_sysregs_context_save(NON_SECURE); + + /* Save x1 and x2 for use by TSP_GET_ARGS call below */ + store_tsp_args(tsp_ctx, x1, x2); + + /* + * We are done stashing the non-secure context. Ask the + * secure payload to do the work now. + */ + + /* + * Verify if there is a valid context to use, copy the + * operation type and parameters to the secure context + * and jump to the fast smc entry point in the secure + * payload. Entry into S-EL1 will take place upon exit + * from this function. + */ + assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); + + /* Set appropriate entry for SMC. + * We expect the TSP to manage the PSTATE.I and PSTATE.F + * flags as appropriate. + */ + if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { + cm_set_elr_el3(SECURE, (uint64_t) + &tsp_vectors->fast_smc_entry); + } else { + set_yield_smc_active_flag(tsp_ctx->state); + cm_set_elr_el3(SECURE, (uint64_t) + &tsp_vectors->yield_smc_entry); +#if TSP_NS_INTR_ASYNC_PREEMPT + /* + * Enable the routing of NS interrupts to EL3 + * during processing of a Yielding SMC Call on + * this core. + */ + enable_intr_rm_local(INTR_TYPE_NS, SECURE); +#endif + +#if EL3_EXCEPTION_HANDLING + /* + * With EL3 exception handling, while an SMC is + * being processed, Non-secure interrupts can't + * preempt Secure execution. However, for + * yielding SMCs, we want preemption to happen; + * so explicitly allow NS preemption in this + * case, and supply the preemption return code + * for TSP. + */ + ehf_allow_ns_preemption(TSP_PREEMPTED); +#endif + } + + cm_el1_sysregs_context_restore(SECURE); + cm_set_next_eret_context(SECURE); + SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2); + } else { + /* + * This is the result from the secure client of an + * earlier request. The results are in x1-x3. Copy it + * into the non-secure context, save the secure state + * and return to the non-secure state. + */ + assert(handle == cm_get_context(SECURE)); + cm_el1_sysregs_context_save(SECURE); + + /* Get a reference to the non-secure context */ + ns_cpu_context = cm_get_context(NON_SECURE); + assert(ns_cpu_context); + + /* Restore non-secure state */ + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); + if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_YIELD) { + clr_yield_smc_active_flag(tsp_ctx->state); +#if TSP_NS_INTR_ASYNC_PREEMPT + /* + * Disable the routing of NS interrupts to EL3 + * after processing of a Yielding SMC Call on + * this core is finished. + */ + disable_intr_rm_local(INTR_TYPE_NS, SECURE); +#endif + } + + SMC_RET3(ns_cpu_context, x1, x2, x3); + } + assert(0); /* Unreachable */ + + /* + * Request from the non-secure world to abort a preempted Yielding SMC + * Call. + */ + case TSP_FID_ABORT: + /* ABORT should only be invoked by normal world */ + if (!ns) { + assert(0); + break; + } + + assert(handle == cm_get_context(NON_SECURE)); + cm_el1_sysregs_context_save(NON_SECURE); + + /* Abort the preempted SMC request */ + if (!tspd_abort_preempted_smc(tsp_ctx)) { + /* + * If there was no preempted SMC to abort, return + * SMC_UNK. + * + * Restoring the NON_SECURE context is not necessary as + * the synchronous entry did not take place if the + * return code of tspd_abort_preempted_smc is zero. + */ + cm_set_next_eret_context(NON_SECURE); + break; + } + + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); + SMC_RET1(handle, SMC_OK); + + /* + * Request from non secure world to resume the preempted + * Yielding SMC Call. + */ + case TSP_FID_RESUME: + /* RESUME should be invoked only by normal world */ + if (!ns) { + assert(0); + break; + } + + /* + * This is a resume request from the non-secure client. + * save the non-secure state and send the request to + * the secure payload. + */ + assert(handle == cm_get_context(NON_SECURE)); + + /* Check if we are already preempted before resume */ + if (!get_yield_smc_active_flag(tsp_ctx->state)) + SMC_RET1(handle, SMC_UNK); + + cm_el1_sysregs_context_save(NON_SECURE); + + /* + * We are done stashing the non-secure context. Ask the + * secure payload to do the work now. + */ +#if TSP_NS_INTR_ASYNC_PREEMPT + /* + * Enable the routing of NS interrupts to EL3 during resumption + * of a Yielding SMC Call on this core. + */ + enable_intr_rm_local(INTR_TYPE_NS, SECURE); +#endif + +#if EL3_EXCEPTION_HANDLING + /* + * Allow the resumed yielding SMC processing to be preempted by + * Non-secure interrupts. Also, supply the preemption return + * code for TSP. + */ + ehf_allow_ns_preemption(TSP_PREEMPTED); +#endif + + /* We just need to return to the preempted point in + * TSP and the execution will resume as normal. + */ + cm_el1_sysregs_context_restore(SECURE); + cm_set_next_eret_context(SECURE); + SMC_RET0(&tsp_ctx->cpu_ctx); + + /* + * This is a request from the secure payload for more arguments + * for an ongoing arithmetic operation requested by the + * non-secure world. Simply return the arguments from the non- + * secure client in the original call. + */ + case TSP_GET_ARGS: + if (ns) + SMC_RET1(handle, SMC_UNK); + + get_tsp_args(tsp_ctx, x1, x2); + SMC_RET2(handle, x1, x2); + + case TOS_CALL_COUNT: + /* + * Return the number of service function IDs implemented to + * provide service to non-secure + */ + SMC_RET1(handle, TSP_NUM_FID); + + case TOS_UID: + /* Return TSP UID to the caller */ + SMC_UUID_RET(handle, tsp_uuid); + + case TOS_CALL_VERSION: + /* Return the version of current implementation */ + SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR); + + default: + break; + } + + SMC_RET1(handle, SMC_UNK); +} + +/* Define a SPD runtime service descriptor for fast SMC calls */ +DECLARE_RT_SVC( + tspd_fast, + + OEN_TOS_START, + OEN_TOS_END, + SMC_TYPE_FAST, + tspd_setup, + tspd_smc_handler +); + +/* Define a SPD runtime service descriptor for Yielding SMC Calls */ +DECLARE_RT_SVC( + tspd_std, + + OEN_TOS_START, + OEN_TOS_END, + SMC_TYPE_YIELD, + NULL, + tspd_smc_handler +); diff --git a/services/spd/tspd/tspd_pm.c b/services/spd/tspd/tspd_pm.c new file mode 100644 index 0000000..b95ee8f --- /dev/null +++ b/services/spd/tspd/tspd_pm.c @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "tspd_private.h" + +/******************************************************************************* + * The target cpu is being turned on. Allow the TSPD/TSP to perform any actions + * needed. Nothing at the moment. + ******************************************************************************/ +static void tspd_cpu_on_handler(u_register_t target_cpu) +{ +} + +/******************************************************************************* + * This cpu is being turned off. Allow the TSPD/TSP to perform any actions + * needed + ******************************************************************************/ +static int32_t tspd_cpu_off_handler(u_register_t unused) +{ + int32_t rc = 0; + uint32_t linear_id = plat_my_core_pos(); + tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; + + assert(tsp_vectors); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON); + + /* + * Abort any preempted SMC request before overwriting the SECURE + * context. + */ + tspd_abort_preempted_smc(tsp_ctx); + + /* Program the entry point and enter the TSP */ + cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_off_entry); + rc = tspd_synchronous_sp_entry(tsp_ctx); + + /* + * Read the response from the TSP. A non-zero return means that + * something went wrong while communicating with the TSP. + */ + if (rc != 0) + panic(); + + /* + * Reset TSP's context for a fresh start when this cpu is turned on + * subsequently. + */ + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF); + + return 0; +} + +/******************************************************************************* + * This cpu is being suspended. S-EL1 state must have been saved in the + * resident cpu (mpidr format) if it is a UP/UP migratable TSP. + ******************************************************************************/ +static void tspd_cpu_suspend_handler(u_register_t max_off_pwrlvl) +{ + int32_t rc = 0; + uint32_t linear_id = plat_my_core_pos(); + tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; + + assert(tsp_vectors); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON); + + /* + * Abort any preempted SMC request before overwriting the SECURE + * context. + */ + tspd_abort_preempted_smc(tsp_ctx); + + /* Program the entry point and enter the TSP */ + cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_suspend_entry); + rc = tspd_synchronous_sp_entry(tsp_ctx); + + /* + * Read the response from the TSP. A non-zero return means that + * something went wrong while communicating with the TSP. + */ + if (rc) + panic(); + + /* Update its context to reflect the state the TSP is in */ + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_SUSPEND); +} + +/******************************************************************************* + * This cpu has been turned on. Enter the TSP to initialise S-EL1 and other bits + * before passing control back to the Secure Monitor. Entry in S-EL1 is done + * after initialising minimal architectural state that guarantees safe + * execution. + ******************************************************************************/ +static void tspd_cpu_on_finish_handler(u_register_t unused) +{ + int32_t rc = 0; + uint32_t linear_id = plat_my_core_pos(); + tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; + entry_point_info_t tsp_on_entrypoint; + + assert(tsp_vectors); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_OFF); + + tspd_init_tsp_ep_state(&tsp_on_entrypoint, + TSP_AARCH64, + (uint64_t) &tsp_vectors->cpu_on_entry, + tsp_ctx); + + /* Initialise this cpu's secure context */ + cm_init_my_context(&tsp_on_entrypoint); + +#if TSP_NS_INTR_ASYNC_PREEMPT + /* + * Disable the NS interrupt locally since it will be enabled globally + * within cm_init_my_context. + */ + disable_intr_rm_local(INTR_TYPE_NS, SECURE); +#endif + + /* Enter the TSP */ + rc = tspd_synchronous_sp_entry(tsp_ctx); + + /* + * Read the response from the TSP. A non-zero return means that + * something went wrong while communicating with the SP. + */ + if (rc != 0) + panic(); + + /* Update its context to reflect the state the SP is in */ + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); +} + +/******************************************************************************* + * This cpu has resumed from suspend. The SPD saved the TSP context when it + * completed the preceding suspend call. Use that context to program an entry + * into the TSP to allow it to do any remaining book keeping + ******************************************************************************/ +static void tspd_cpu_suspend_finish_handler(u_register_t max_off_pwrlvl) +{ + int32_t rc = 0; + uint32_t linear_id = plat_my_core_pos(); + tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; + + assert(tsp_vectors); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_SUSPEND); + + /* Program the entry point, max_off_pwrlvl and enter the SP */ + write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), + CTX_GPREG_X0, + max_off_pwrlvl); + cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_resume_entry); + rc = tspd_synchronous_sp_entry(tsp_ctx); + + /* + * Read the response from the TSP. A non-zero return means that + * something went wrong while communicating with the TSP. + */ + if (rc != 0) + panic(); + + /* Update its context to reflect the state the SP is in */ + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); +} + +/******************************************************************************* + * Return the type of TSP the TSPD is dealing with. Report the current resident + * cpu (mpidr format) if it is a UP/UP migratable TSP. + ******************************************************************************/ +static int32_t tspd_cpu_migrate_info(u_register_t *resident_cpu) +{ + return TSP_MIGRATE_INFO; +} + +/******************************************************************************* + * System is about to be switched off. Allow the TSPD/TSP to perform + * any actions needed. + ******************************************************************************/ +static void tspd_system_off(void) +{ + uint32_t linear_id = plat_my_core_pos(); + tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; + + assert(tsp_vectors); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON); + + /* + * Abort any preempted SMC request before overwriting the SECURE + * context. + */ + tspd_abort_preempted_smc(tsp_ctx); + + /* Program the entry point */ + cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->system_off_entry); + + /* Enter the TSP. We do not care about the return value because we + * must continue the shutdown anyway */ + tspd_synchronous_sp_entry(tsp_ctx); +} + +/******************************************************************************* + * System is about to be reset. Allow the TSPD/TSP to perform + * any actions needed. + ******************************************************************************/ +static void tspd_system_reset(void) +{ + uint32_t linear_id = plat_my_core_pos(); + tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; + + assert(tsp_vectors); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON); + + /* + * Abort any preempted SMC request before overwriting the SECURE + * context. + */ + tspd_abort_preempted_smc(tsp_ctx); + + /* Program the entry point */ + cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->system_reset_entry); + + /* + * Enter the TSP. We do not care about the return value because we + * must continue the reset anyway + */ + tspd_synchronous_sp_entry(tsp_ctx); +} + +/******************************************************************************* + * Structure populated by the TSP Dispatcher to be given a chance to perform any + * TSP bookkeeping before PSCI executes a power mgmt. operation. + ******************************************************************************/ +const spd_pm_ops_t tspd_pm = { + .svc_on = tspd_cpu_on_handler, + .svc_off = tspd_cpu_off_handler, + .svc_suspend = tspd_cpu_suspend_handler, + .svc_on_finish = tspd_cpu_on_finish_handler, + .svc_suspend_finish = tspd_cpu_suspend_finish_handler, + .svc_migrate = NULL, + .svc_migrate_info = tspd_cpu_migrate_info, + .svc_system_off = tspd_system_off, + .svc_system_reset = tspd_system_reset +}; diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h new file mode 100644 index 0000000..d6c03c9 --- /dev/null +++ b/services/spd/tspd/tspd_private.h @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef TSPD_PRIVATE_H +#define TSPD_PRIVATE_H + +#include + +#include +#include +#include +#include + +/******************************************************************************* + * Secure Payload PM state information e.g. SP is suspended, uninitialised etc + * and macros to access the state information in the per-cpu 'state' flags + ******************************************************************************/ +#define TSP_PSTATE_OFF 0 +#define TSP_PSTATE_ON 1 +#define TSP_PSTATE_SUSPEND 2 +#define TSP_PSTATE_SHIFT 0 +#define TSP_PSTATE_MASK 0x3 +#define get_tsp_pstate(state) ((state >> TSP_PSTATE_SHIFT) & TSP_PSTATE_MASK) +#define clr_tsp_pstate(state) (state &= ~(TSP_PSTATE_MASK \ + << TSP_PSTATE_SHIFT)) +#define set_tsp_pstate(st, pst) do { \ + clr_tsp_pstate(st); \ + st |= (pst & TSP_PSTATE_MASK) << \ + TSP_PSTATE_SHIFT; \ + } while (0); + + +/* + * This flag is used by the TSPD to determine if the TSP is servicing a yielding + * SMC request prior to programming the next entry into the TSP e.g. if TSP + * execution is preempted by a non-secure interrupt and handed control to the + * normal world. If another request which is distinct from what the TSP was + * previously doing arrives, then this flag will be help the TSPD to either + * reject the new request or service it while ensuring that the previous context + * is not corrupted. + */ +#define YIELD_SMC_ACTIVE_FLAG_SHIFT 2 +#define YIELD_SMC_ACTIVE_FLAG_MASK 1 +#define get_yield_smc_active_flag(state) \ + ((state >> YIELD_SMC_ACTIVE_FLAG_SHIFT) \ + & YIELD_SMC_ACTIVE_FLAG_MASK) +#define set_yield_smc_active_flag(state) (state |= \ + 1 << YIELD_SMC_ACTIVE_FLAG_SHIFT) +#define clr_yield_smc_active_flag(state) (state &= \ + ~(YIELD_SMC_ACTIVE_FLAG_MASK \ + << YIELD_SMC_ACTIVE_FLAG_SHIFT)) + +/******************************************************************************* + * Secure Payload execution state information i.e. aarch32 or aarch64 + ******************************************************************************/ +#define TSP_AARCH32 MODE_RW_32 +#define TSP_AARCH64 MODE_RW_64 + +/******************************************************************************* + * The SPD should know the type of Secure Payload. + ******************************************************************************/ +#define TSP_TYPE_UP PSCI_TOS_NOT_UP_MIG_CAP +#define TSP_TYPE_UPM PSCI_TOS_UP_MIG_CAP +#define TSP_TYPE_MP PSCI_TOS_NOT_PRESENT_MP + +/******************************************************************************* + * Secure Payload migrate type information as known to the SPD. We assume that + * the SPD is dealing with an MP Secure Payload. + ******************************************************************************/ +#define TSP_MIGRATE_INFO TSP_TYPE_MP + +/******************************************************************************* + * Number of cpus that the present on this platform. TODO: Rely on a topology + * tree to determine this in the future to avoid assumptions about mpidr + * allocation + ******************************************************************************/ +#define TSPD_CORE_COUNT PLATFORM_CORE_COUNT + +/******************************************************************************* + * Constants that allow assembler code to preserve callee-saved registers of the + * C runtime context while performing a security state switch. + ******************************************************************************/ +#define TSPD_C_RT_CTX_X19 0x0 +#define TSPD_C_RT_CTX_X20 0x8 +#define TSPD_C_RT_CTX_X21 0x10 +#define TSPD_C_RT_CTX_X22 0x18 +#define TSPD_C_RT_CTX_X23 0x20 +#define TSPD_C_RT_CTX_X24 0x28 +#define TSPD_C_RT_CTX_X25 0x30 +#define TSPD_C_RT_CTX_X26 0x38 +#define TSPD_C_RT_CTX_X27 0x40 +#define TSPD_C_RT_CTX_X28 0x48 +#define TSPD_C_RT_CTX_X29 0x50 +#define TSPD_C_RT_CTX_X30 0x58 +#define TSPD_C_RT_CTX_SIZE 0x60 +#define TSPD_C_RT_CTX_ENTRIES (TSPD_C_RT_CTX_SIZE >> DWORD_SHIFT) + +/******************************************************************************* + * Constants that allow assembler code to preserve caller-saved registers of the + * SP context while performing a TSP preemption. + * Note: These offsets have to match with the offsets for the corresponding + * registers in cpu_context as we are using memcpy to copy the values from + * cpu_context to sp_ctx. + ******************************************************************************/ +#define TSPD_SP_CTX_X0 0x0 +#define TSPD_SP_CTX_X1 0x8 +#define TSPD_SP_CTX_X2 0x10 +#define TSPD_SP_CTX_X3 0x18 +#define TSPD_SP_CTX_X4 0x20 +#define TSPD_SP_CTX_X5 0x28 +#define TSPD_SP_CTX_X6 0x30 +#define TSPD_SP_CTX_X7 0x38 +#define TSPD_SP_CTX_X8 0x40 +#define TSPD_SP_CTX_X9 0x48 +#define TSPD_SP_CTX_X10 0x50 +#define TSPD_SP_CTX_X11 0x58 +#define TSPD_SP_CTX_X12 0x60 +#define TSPD_SP_CTX_X13 0x68 +#define TSPD_SP_CTX_X14 0x70 +#define TSPD_SP_CTX_X15 0x78 +#define TSPD_SP_CTX_X16 0x80 +#define TSPD_SP_CTX_X17 0x88 +#define TSPD_SP_CTX_SIZE 0x90 +#define TSPD_SP_CTX_ENTRIES (TSPD_SP_CTX_SIZE >> DWORD_SHIFT) + +#ifndef __ASSEMBLER__ + +#include + +#include + +/* + * The number of arguments to save during a SMC call for TSP. + * Currently only x1 and x2 are used by TSP. + */ +#define TSP_NUM_ARGS 0x2 + +/* AArch64 callee saved general purpose register context structure. */ +DEFINE_REG_STRUCT(c_rt_regs, TSPD_C_RT_CTX_ENTRIES); + +/* + * Compile time assertion to ensure that both the compiler and linker + * have the same double word aligned view of the size of the C runtime + * register context. + */ +CASSERT(TSPD_C_RT_CTX_SIZE == sizeof(c_rt_regs_t), \ + assert_spd_c_rt_regs_size_mismatch); + +/* SEL1 Secure payload (SP) caller saved register context structure. */ +DEFINE_REG_STRUCT(sp_ctx_regs, TSPD_SP_CTX_ENTRIES); + +/* + * Compile time assertion to ensure that both the compiler and linker + * have the same double word aligned view of the size of the C runtime + * register context. + */ +CASSERT(TSPD_SP_CTX_SIZE == sizeof(sp_ctx_regs_t), \ + assert_spd_sp_regs_size_mismatch); + +/******************************************************************************* + * Structure which helps the SPD to maintain the per-cpu state of the SP. + * 'saved_spsr_el3' - temporary copy to allow S-EL1 interrupt handling when + * the TSP has been preempted. + * 'saved_elr_el3' - temporary copy to allow S-EL1 interrupt handling when + * the TSP has been preempted. + * 'state' - collection of flags to track SP state e.g. on/off + * 'mpidr' - mpidr to associate a context with a cpu + * 'c_rt_ctx' - stack address to restore C runtime context from after + * returning from a synchronous entry into the SP. + * 'cpu_ctx' - space to maintain SP architectural state + * 'saved_tsp_args' - space to store arguments for TSP arithmetic operations + * which will queried using the TSP_GET_ARGS SMC by TSP. + * 'sp_ctx' - space to save the SEL1 Secure Payload(SP) caller saved + * register context after it has been preempted by an EL3 + * routed NS interrupt and when a Secure Interrupt is taken + * to SP. + ******************************************************************************/ +typedef struct tsp_context { + uint64_t saved_elr_el3; + uint32_t saved_spsr_el3; + uint32_t state; + uint64_t mpidr; + uint64_t c_rt_ctx; + cpu_context_t cpu_ctx; + uint64_t saved_tsp_args[TSP_NUM_ARGS]; +#if TSP_NS_INTR_ASYNC_PREEMPT + sp_ctx_regs_t sp_ctx; + bool preempted_by_sel1_intr; +#endif +} tsp_context_t; + +/* Helper macros to store and retrieve tsp args from tsp_context */ +#define store_tsp_args(_tsp_ctx, _x1, _x2) do {\ + _tsp_ctx->saved_tsp_args[0] = _x1;\ + _tsp_ctx->saved_tsp_args[1] = _x2;\ + } while (0) + +#define get_tsp_args(_tsp_ctx, _x1, _x2) do {\ + _x1 = _tsp_ctx->saved_tsp_args[0];\ + _x2 = _tsp_ctx->saved_tsp_args[1];\ + } while (0) + +/* TSPD power management handlers */ +extern const spd_pm_ops_t tspd_pm; + +/******************************************************************************* + * Forward declarations + ******************************************************************************/ +typedef struct tsp_vectors tsp_vectors_t; + +/******************************************************************************* + * Function & Data prototypes + ******************************************************************************/ +uint64_t tspd_enter_sp(uint64_t *c_rt_ctx); +void __dead2 tspd_exit_sp(uint64_t c_rt_ctx, uint64_t ret); +uint64_t tspd_synchronous_sp_entry(tsp_context_t *tsp_ctx); +void __dead2 tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret); +void tspd_init_tsp_ep_state(struct entry_point_info *tsp_entry_point, + uint32_t rw, + uint64_t pc, + tsp_context_t *tsp_ctx); +int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx); + +uint64_t tspd_handle_sp_preemption(void *handle); + +extern tsp_context_t tspd_sp_context[TSPD_CORE_COUNT]; +extern tsp_vectors_t *tsp_vectors; +#endif /*__ASSEMBLER__*/ + +#endif /* TSPD_PRIVATE_H */ -- cgit v1.2.3