summaryrefslogtreecommitdiffstats
path: root/bl32
diff options
context:
space:
mode:
Diffstat (limited to 'bl32')
-rw-r--r--bl32/optee/optee.mk15
-rw-r--r--bl32/sp_min/aarch32/entrypoint.S367
-rw-r--r--bl32/sp_min/sp_min.ld.S157
-rw-r--r--bl32/sp_min/sp_min.mk84
-rw-r--r--bl32/sp_min/sp_min_main.c250
-rw-r--r--bl32/sp_min/sp_min_private.h14
-rw-r--r--bl32/sp_min/wa_cve_2017_5715_bpiall.S74
-rw-r--r--bl32/sp_min/wa_cve_2017_5715_icache_inv.S75
-rw-r--r--bl32/tsp/aarch64/tsp_entrypoint.S489
-rw-r--r--bl32/tsp/aarch64/tsp_exceptions.S162
-rw-r--r--bl32/tsp/aarch64/tsp_request.S30
-rw-r--r--bl32/tsp/ffa_helpers.c252
-rw-r--r--bl32/tsp/ffa_helpers.h116
-rw-r--r--bl32/tsp/tsp.ld.S129
-rw-r--r--bl32/tsp/tsp.mk49
-rw-r--r--bl32/tsp/tsp_common.c156
-rw-r--r--bl32/tsp/tsp_ffa_main.c656
-rw-r--r--bl32/tsp/tsp_interrupt.c115
-rw-r--r--bl32/tsp/tsp_main.c288
-rw-r--r--bl32/tsp/tsp_private.h145
-rw-r--r--bl32/tsp/tsp_timer.c91
21 files changed, 3714 insertions, 0 deletions
diff --git a/bl32/optee/optee.mk b/bl32/optee/optee.mk
new file mode 100644
index 0000000..c8aa7ce
--- /dev/null
+++ b/bl32/optee/optee.mk
@@ -0,0 +1,15 @@
+#
+# Copyright (c) 2016-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# This makefile only aims at complying with Trusted Firmware-A build process so
+# that "optee" is a valid TF-A AArch32 Secure Playload identifier.
+
+ifneq ($(ARCH),aarch32)
+$(error This directory targets AArch32 support)
+endif
+
+$(eval $(call add_define,AARCH32_SP_OPTEE))
+
+$(info Trusted Firmware-A built for OP-TEE payload support)
diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S
new file mode 100644
index 0000000..693dd4b
--- /dev/null
+++ b/bl32/sp_min/aarch32/entrypoint.S
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <common/runtime_svc.h>
+#include <context.h>
+#include <el3_common_macros.S>
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/pmf/aarch32/pmf_asm_macros.S>
+#include <lib/runtime_instr.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <smccc_helpers.h>
+#include <smccc_macros.S>
+
+ .globl sp_min_vector_table
+ .globl sp_min_entrypoint
+ .globl sp_min_warm_entrypoint
+ .globl sp_min_handle_smc
+ .globl sp_min_handle_fiq
+
+#define FIXUP_SIZE ((BL32_LIMIT) - (BL32_BASE))
+
+ .macro route_fiq_to_sp_min reg
+ /* -----------------------------------------------------
+ * FIQs are secure interrupts trapped by Monitor and non
+ * secure is not allowed to mask the FIQs.
+ * -----------------------------------------------------
+ */
+ ldcopr \reg, SCR
+ orr \reg, \reg, #SCR_FIQ_BIT
+ bic \reg, \reg, #SCR_FW_BIT
+ stcopr \reg, SCR
+ .endm
+
+ .macro clrex_on_monitor_entry
+#if (ARM_ARCH_MAJOR == 7)
+ /*
+ * ARMv7 architectures need to clear the exclusive access when
+ * entering Monitor mode.
+ */
+ clrex
+#endif
+ .endm
+
+vector_base sp_min_vector_table
+ b sp_min_entrypoint
+ b plat_panic_handler /* Undef */
+ b sp_min_handle_smc /* Syscall */
+ b report_prefetch_abort /* Prefetch abort */
+ b report_data_abort /* Data abort */
+ b plat_panic_handler /* Reserved */
+ b plat_panic_handler /* IRQ */
+ b sp_min_handle_fiq /* FIQ */
+
+
+/*
+ * The Cold boot/Reset entrypoint for SP_MIN
+ */
+func sp_min_entrypoint
+ /* ---------------------------------------------------------------
+ * Stash the previous bootloader arguments r0 - r3 for later use.
+ * ---------------------------------------------------------------
+ */
+ mov r9, r0
+ mov r10, r1
+ mov r11, r2
+ mov r12, r3
+
+#if !RESET_TO_SP_MIN
+ /* ---------------------------------------------------------------------
+ * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
+ * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
+ * and primary/secondary CPU logic should not be executed in this case.
+ *
+ * Also, assume that the previous bootloader has already initialised the
+ * SCTLR, including the CPU endianness, and has initialised the memory.
+ * ---------------------------------------------------------------------
+ */
+ el3_entrypoint_common \
+ _init_sctlr=0 \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=1 \
+ _exception_vectors=sp_min_vector_table \
+ _pie_fixup_size=FIXUP_SIZE
+#else
+ /* ---------------------------------------------------------------------
+ * For RESET_TO_SP_MIN systems which have a programmable reset address,
+ * sp_min_entrypoint() is executed only on the cold boot path so we can
+ * skip the warm boot mailbox mechanism.
+ * ---------------------------------------------------------------------
+ */
+ el3_entrypoint_common \
+ _init_sctlr=1 \
+ _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
+ _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
+ _init_memory=1 \
+ _init_c_runtime=1 \
+ _exception_vectors=sp_min_vector_table \
+ _pie_fixup_size=FIXUP_SIZE
+#endif /* RESET_TO_SP_MIN */
+
+#if SP_MIN_WITH_SECURE_FIQ
+ route_fiq_to_sp_min r4
+#endif
+
+ /* ---------------------------------------------------------------------
+ * Relay the previous bootloader's arguments to the platform layer
+ * ---------------------------------------------------------------------
+ */
+ mov r0, r9
+ mov r1, r10
+ mov r2, r11
+ mov r3, r12
+ bl sp_min_early_platform_setup2
+ bl sp_min_plat_arch_setup
+
+ /* Jump to the main function */
+ bl sp_min_main
+
+ /* -------------------------------------------------------------
+ * Clean the .data & .bss sections to main memory. This ensures
+ * that any global data which was initialised by the primary CPU
+ * is visible to secondary CPUs before they enable their data
+ * caches and participate in coherency.
+ * -------------------------------------------------------------
+ */
+ ldr r0, =__DATA_START__
+ ldr r1, =__DATA_END__
+ sub r1, r1, r0
+ bl clean_dcache_range
+
+ ldr r0, =__BSS_START__
+ ldr r1, =__BSS_END__
+ sub r1, r1, r0
+ bl clean_dcache_range
+
+ bl smc_get_next_ctx
+
+ /* r0 points to `smc_ctx_t` */
+ /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
+ b sp_min_exit
+endfunc sp_min_entrypoint
+
+
+/*
+ * SMC handling function for SP_MIN.
+ */
+func sp_min_handle_smc
+ /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
+ str lr, [sp, #SMC_CTX_LR_MON]
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ /*
+ * Read the timestamp value and store it on top of the C runtime stack.
+ * The value will be saved to the per-cpu data once the C stack is
+ * available, as a valid stack is needed to call _cpu_data()
+ */
+ strd r0, r1, [sp, #SMC_CTX_GPREG_R0]
+ ldcopr16 r0, r1, CNTPCT_64
+ ldr lr, [sp, #SMC_CTX_SP_MON]
+ strd r0, r1, [lr, #-8]!
+ str lr, [sp, #SMC_CTX_SP_MON]
+ ldrd r0, r1, [sp, #SMC_CTX_GPREG_R0]
+#endif
+
+ smccc_save_gp_mode_regs
+
+ clrex_on_monitor_entry
+
+ /*
+ * `sp` still points to `smc_ctx_t`. Save it to a register
+ * and restore the C runtime stack pointer to `sp`.
+ */
+ mov r2, sp /* handle */
+ ldr sp, [r2, #SMC_CTX_SP_MON]
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ /* Save handle to a callee saved register */
+ mov r6, r2
+
+ /*
+ * Restore the timestamp value and store it in per-cpu data. The value
+ * will be extracted from per-cpu data by the C level SMC handler and
+ * saved to the PMF timestamp region.
+ */
+ ldrd r4, r5, [sp], #8
+ bl _cpu_data
+ strd r4, r5, [r0, #CPU_DATA_PMF_TS0_OFFSET]
+
+ /* Restore handle */
+ mov r2, r6
+#endif
+
+ ldr r0, [r2, #SMC_CTX_SCR]
+ and r3, r0, #SCR_NS_BIT /* flags */
+
+ /* Switch to Secure Mode*/
+ bic r0, #SCR_NS_BIT
+ stcopr r0, SCR
+ isb
+
+ ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
+ /* Check whether an SMC64 is issued */
+ tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
+ beq 1f
+ /* SMC32 is not detected. Return error back to caller */
+ mov r0, #SMC_UNK
+ str r0, [r2, #SMC_CTX_GPREG_R0]
+ mov r0, r2
+ b sp_min_exit
+1:
+ /* SMC32 is detected */
+ mov r1, #0 /* cookie */
+ bl handle_runtime_svc
+
+ /* `r0` points to `smc_ctx_t` */
+ b sp_min_exit
+endfunc sp_min_handle_smc
+
+/*
+ * Secure Interrupts handling function for SP_MIN.
+ */
+func sp_min_handle_fiq
+#if !SP_MIN_WITH_SECURE_FIQ
+ b plat_panic_handler
+#else
+ /* FIQ has a +4 offset for lr compared to preferred return address */
+ sub lr, lr, #4
+ /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
+ str lr, [sp, #SMC_CTX_LR_MON]
+
+ smccc_save_gp_mode_regs
+
+ clrex_on_monitor_entry
+
+ /* load run-time stack */
+ mov r2, sp
+ ldr sp, [r2, #SMC_CTX_SP_MON]
+
+ /* Switch to Secure Mode */
+ ldr r0, [r2, #SMC_CTX_SCR]
+ bic r0, #SCR_NS_BIT
+ stcopr r0, SCR
+ isb
+
+ push {r2, r3}
+ bl sp_min_fiq
+ pop {r0, r3}
+
+ b sp_min_exit
+#endif
+endfunc sp_min_handle_fiq
+
+/*
+ * The Warm boot entrypoint for SP_MIN.
+ */
+func sp_min_warm_entrypoint
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ /*
+ * This timestamp update happens with cache off. The next
+ * timestamp collection will need to do cache maintenance prior
+ * to timestamp update.
+ */
+ pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR
+ ldcopr16 r2, r3, CNTPCT_64
+ strd r2, r3, [r0]
+#endif
+ /*
+ * On the warm boot path, most of the EL3 initialisations performed by
+ * 'el3_entrypoint_common' must be skipped:
+ *
+ * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
+ * programming the reset address do we need to initialied the SCTLR.
+ * In other cases, we assume this has been taken care by the
+ * entrypoint code.
+ *
+ * - No need to determine the type of boot, we know it is a warm boot.
+ *
+ * - Do not try to distinguish between primary and secondary CPUs, this
+ * notion only exists for a cold boot.
+ *
+ * - No need to initialise the memory or the C runtime environment,
+ * it has been done once and for all on the cold boot path.
+ */
+ el3_entrypoint_common \
+ _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=0 \
+ _exception_vectors=sp_min_vector_table \
+ _pie_fixup_size=0
+
+ /*
+ * We're about to enable MMU and participate in PSCI state coordination.
+ *
+ * The PSCI implementation invokes platform routines that enable CPUs to
+ * participate in coherency. On a system where CPUs are not
+ * cache-coherent without appropriate platform specific programming,
+ * having caches enabled until such time might lead to coherency issues
+ * (resulting from stale data getting speculatively fetched, among
+ * others). Therefore we keep data caches disabled even after enabling
+ * the MMU for such platforms.
+ *
+ * On systems with hardware-assisted coherency, or on single cluster
+ * platforms, such platform specific programming is not required to
+ * enter coherency (as CPUs already are); and there's no reason to have
+ * caches disabled either.
+ */
+#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
+ mov r0, #0
+#else
+ mov r0, #DISABLE_DCACHE
+#endif
+ bl bl32_plat_enable_mmu
+
+#if SP_MIN_WITH_SECURE_FIQ
+ route_fiq_to_sp_min r0
+#endif
+
+ bl sp_min_warm_boot
+ bl smc_get_next_ctx
+ /* r0 points to `smc_ctx_t` */
+ /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ /* Save smc_ctx_t */
+ mov r5, r0
+
+ pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI
+ mov r4, r0
+
+ /*
+ * Invalidate before updating timestamp to ensure previous timestamp
+ * updates on the same cache line with caches disabled are properly
+ * seen by the same core. Without the cache invalidate, the core might
+ * write into a stale cache line.
+ */
+ mov r1, #PMF_TS_SIZE
+ bl inv_dcache_range
+
+ ldcopr16 r0, r1, CNTPCT_64
+ strd r0, r1, [r4]
+
+ /* Restore smc_ctx_t */
+ mov r0, r5
+#endif
+
+ b sp_min_exit
+endfunc sp_min_warm_entrypoint
+
+/*
+ * The function to restore the registers from SMC context and return
+ * to the mode restored to SPSR.
+ *
+ * Arguments : r0 must point to the SMC context to restore from.
+ */
+func sp_min_exit
+ monitor_exit
+endfunc sp_min_exit
diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S
new file mode 100644
index 0000000..dd81973
--- /dev/null
+++ b/bl32/sp_min/sp_min.ld.S
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/bl_common.ld.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+OUTPUT_FORMAT(elf32-littlearm)
+OUTPUT_ARCH(arm)
+ENTRY(sp_min_vector_table)
+
+MEMORY {
+ RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE
+}
+
+#ifdef PLAT_SP_MIN_EXTRA_LD_SCRIPT
+# include <plat_sp_min.ld.S>
+#endif /* PLAT_SP_MIN_EXTRA_LD_SCRIPT */
+
+SECTIONS {
+ RAM_REGION_START = ORIGIN(RAM);
+ RAM_REGION_LENGTH = LENGTH(RAM);
+ . = BL32_BASE;
+
+ ASSERT(. == ALIGN(PAGE_SIZE),
+ "BL32_BASE address is not aligned on a page boundary.")
+
+#if SEPARATE_CODE_AND_RODATA
+ .text . : {
+ __TEXT_START__ = .;
+
+ *entrypoint.o(.text*)
+ *(SORT_BY_ALIGNMENT(.text*))
+ *(.vectors)
+ __TEXT_END_UNALIGNED__ = .;
+
+ . = ALIGN(PAGE_SIZE);
+
+ __TEXT_END__ = .;
+ } >RAM
+
+ /* .ARM.extab and .ARM.exidx are only added because Clang needs them */
+ .ARM.extab . : {
+ *(.ARM.extab* .gnu.linkonce.armextab.*)
+ } >RAM
+
+ .ARM.exidx . : {
+ *(.ARM.exidx* .gnu.linkonce.armexidx.*)
+ } >RAM
+
+ .rodata . : {
+ __RODATA_START__ = .;
+ *(SORT_BY_ALIGNMENT(.rodata*))
+
+ RODATA_COMMON
+
+ . = ALIGN(8);
+
+# include <lib/el3_runtime/pubsub_events.h>
+ __RODATA_END_UNALIGNED__ = .;
+
+ . = ALIGN(PAGE_SIZE);
+
+ __RODATA_END__ = .;
+ } >RAM
+#else /* SEPARATE_CODE_AND_RODATA */
+ .ro . : {
+ __RO_START__ = .;
+
+ *entrypoint.o(.text*)
+ *(SORT_BY_ALIGNMENT(.text*))
+ *(SORT_BY_ALIGNMENT(.rodata*))
+
+ RODATA_COMMON
+
+ . = ALIGN(8);
+
+# include <lib/el3_runtime/pubsub_events.h>
+
+ *(.vectors)
+
+ __RO_END_UNALIGNED__ = .;
+
+ /*
+ * Memory page(s) mapped to this section will be marked as device
+ * memory. No other unexpected data must creep in. Ensure that the rest
+ * of the current memory page is unused.
+ */
+ . = ALIGN(PAGE_SIZE);
+
+ __RO_END__ = .;
+ } >RAM
+#endif /* SEPARATE_CODE_AND_RODATA */
+
+ ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
+ "cpu_ops not defined for this platform.")
+
+ __RW_START__ = .;
+
+ DATA_SECTION >RAM
+ RELA_SECTION >RAM
+
+#ifdef BL32_PROGBITS_LIMIT
+ ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.")
+#endif /* BL32_PROGBITS_LIMIT */
+
+ STACK_SECTION >RAM
+ BSS_SECTION >RAM
+ XLAT_TABLE_SECTION >RAM
+
+ __BSS_SIZE__ = SIZEOF(.bss);
+
+#if USE_COHERENT_MEM
+ /*
+ * The base address of the coherent memory section must be page-aligned to
+ * guarantee that the coherent data are stored on their own pages and are
+ * not mixed with normal data. This is required to set up the correct
+ * memory attributes for the coherent data page tables.
+ */
+ .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+ __COHERENT_RAM_START__ = .;
+
+ /*
+ * Bakery locks are stored in coherent memory. Each lock's data is
+ * contiguous and fully allocated by the compiler.
+ */
+ *(.bakery_lock)
+ *(.tzfw_coherent_mem)
+
+ __COHERENT_RAM_END_UNALIGNED__ = .;
+
+ /*
+ * Memory page(s) mapped to this section will be marked as device
+ * memory. No other unexpected data must creep in. Ensure that the rest
+ * of the current memory page is unused.
+ */
+ . = ALIGN(PAGE_SIZE);
+
+ __COHERENT_RAM_END__ = .;
+ } >RAM
+
+ __COHERENT_RAM_UNALIGNED_SIZE__ =
+ __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
+#endif /* USE_COHERENT_MEM */
+
+ __RW_END__ = .;
+ __BL32_END__ = .;
+
+ /DISCARD/ : {
+ *(.dynsym .dynstr .hash .gnu.hash)
+ }
+
+ ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.")
+ RAM_REGION_END = .;
+}
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
new file mode 100644
index 0000000..065468c
--- /dev/null
+++ b/bl32/sp_min/sp_min.mk
@@ -0,0 +1,84 @@
+#
+# Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH}, aarch32)
+ $(error SP_MIN is only supported on AArch32 platforms)
+endif
+
+include lib/extensions/amu/amu.mk
+include lib/psci/psci_lib.mk
+
+INCLUDES += -Iinclude/bl32/sp_min
+
+BL32_SOURCES += bl32/sp_min/sp_min_main.c \
+ bl32/sp_min/aarch32/entrypoint.S \
+ common/runtime_svc.c \
+ plat/common/aarch32/plat_sp_min_common.c\
+ services/arm_arch_svc/arm_arch_svc_setup.c \
+ services/std_svc/std_svc_setup.c \
+ ${PSCI_LIB_SOURCES}
+
+ifeq (${ENABLE_PMF}, 1)
+BL32_SOURCES += lib/pmf/pmf_main.c
+endif
+
+ifneq (${ENABLE_FEAT_AMU},0)
+BL32_SOURCES += ${AMU_SOURCES}
+endif
+
+ifeq (${WORKAROUND_CVE_2017_5715},1)
+BL32_SOURCES += bl32/sp_min/wa_cve_2017_5715_bpiall.S \
+ bl32/sp_min/wa_cve_2017_5715_icache_inv.S
+else
+ifeq (${WORKAROUND_CVE_2022_23960},1)
+BL32_SOURCES += bl32/sp_min/wa_cve_2017_5715_icache_inv.S
+endif
+endif
+
+ifeq (${TRNG_SUPPORT},1)
+BL32_SOURCES += services/std_svc/trng/trng_main.c \
+ services/std_svc/trng/trng_entropy_pool.c
+endif
+
+ifeq (${ERRATA_ABI_SUPPORT}, 1)
+BL32_SOURCES += services/std_svc/errata_abi/errata_abi_main.c
+endif
+
+ifneq (${ENABLE_SYS_REG_TRACE_FOR_NS},0)
+BL32_SOURCES += lib/extensions/sys_reg_trace/aarch32/sys_reg_trace.c
+endif
+
+ifneq (${ENABLE_TRF_FOR_NS},0)
+BL32_SOURCES += lib/extensions/trf/aarch32/trf.c
+endif
+
+BL32_DEFAULT_LINKER_SCRIPT_SOURCE := bl32/sp_min/sp_min.ld.S
+
+ifneq ($(findstring gcc,$(notdir $(LD))),)
+ BL32_LDFLAGS += -Wl,--sort-section=alignment
+else ifneq ($(findstring ld,$(notdir $(LD))),)
+ BL32_LDFLAGS += --sort-section=alignment
+endif
+
+# Include the platform-specific SP_MIN Makefile
+# If no platform-specific SP_MIN Makefile exists, it means SP_MIN is not supported
+# on this platform.
+SP_MIN_PLAT_MAKEFILE := $(wildcard ${PLAT_DIR}/sp_min/sp_min-${PLAT}.mk)
+ifeq (,${SP_MIN_PLAT_MAKEFILE})
+ $(error SP_MIN is not supported on platform ${PLAT})
+else
+ include ${SP_MIN_PLAT_MAKEFILE}
+endif
+
+RESET_TO_SP_MIN := 0
+$(eval $(call add_define,RESET_TO_SP_MIN))
+$(eval $(call assert_boolean,RESET_TO_SP_MIN))
+
+# Flag to allow SP_MIN to handle FIQ interrupts in monitor mode. The platform
+# port is free to override this value. It is default disabled.
+SP_MIN_WITH_SECURE_FIQ ?= 0
+$(eval $(call add_define,SP_MIN_WITH_SECURE_FIQ))
+$(eval $(call assert_boolean,SP_MIN_WITH_SECURE_FIQ))
diff --git a/bl32/sp_min/sp_min_main.c b/bl32/sp_min/sp_min_main.c
new file mode 100644
index 0000000..26cf207
--- /dev/null
+++ b/bl32/sp_min/sp_min_main.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <context.h>
+#include <drivers/console.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/pmf/pmf.h>
+#include <lib/psci/psci.h>
+#include <lib/runtime_instr.h>
+#include <lib/utils.h>
+#include <plat/common/platform.h>
+#include <platform_sp_min.h>
+#include <services/std_svc.h>
+#include <smccc_helpers.h>
+
+#include "sp_min_private.h"
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+PMF_REGISTER_SERVICE_SMC(rt_instr_svc, PMF_RT_INSTR_SVC_ID,
+ RT_INSTR_TOTAL_IDS, PMF_STORE_ENABLE)
+#endif
+
+/* Pointers to per-core cpu contexts */
+static void *sp_min_cpu_ctx_ptr[PLATFORM_CORE_COUNT];
+
+/* SP_MIN only stores the non secure smc context */
+static smc_ctx_t sp_min_smc_context[PLATFORM_CORE_COUNT];
+
+/******************************************************************************
+ * Define the smccc helper library APIs
+ *****************************************************************************/
+void *smc_get_ctx(unsigned int security_state)
+{
+ assert(security_state == NON_SECURE);
+ return &sp_min_smc_context[plat_my_core_pos()];
+}
+
+void smc_set_next_ctx(unsigned int security_state)
+{
+ assert(security_state == NON_SECURE);
+ /* SP_MIN stores only non secure smc context. Nothing to do here */
+}
+
+void *smc_get_next_ctx(void)
+{
+ return &sp_min_smc_context[plat_my_core_pos()];
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the calling CPU that was set as the context for the specified security
+ * state. NULL is returned if no such structure has been specified.
+ ******************************************************************************/
+void *cm_get_context(uint32_t security_state)
+{
+ assert(security_state == NON_SECURE);
+ return sp_min_cpu_ctx_ptr[plat_my_core_pos()];
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the calling CPU
+ ******************************************************************************/
+void cm_set_context(void *context, uint32_t security_state)
+{
+ assert(security_state == NON_SECURE);
+ sp_min_cpu_ctx_ptr[plat_my_core_pos()] = context;
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the CPU identified by `cpu_idx` that was set as the context for the
+ * specified security state. NULL is returned if no such structure has been
+ * specified.
+ ******************************************************************************/
+void *cm_get_context_by_index(unsigned int cpu_idx,
+ unsigned int security_state)
+{
+ assert(security_state == NON_SECURE);
+ return sp_min_cpu_ctx_ptr[cpu_idx];
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the CPU identified by CPU index.
+ ******************************************************************************/
+void cm_set_context_by_index(unsigned int cpu_idx, void *context,
+ unsigned int security_state)
+{
+ assert(security_state == NON_SECURE);
+ sp_min_cpu_ctx_ptr[cpu_idx] = context;
+}
+
+static void copy_cpu_ctx_to_smc_stx(const regs_t *cpu_reg_ctx,
+ smc_ctx_t *next_smc_ctx)
+{
+ next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
+ next_smc_ctx->r1 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R1);
+ next_smc_ctx->r2 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R2);
+ next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
+ next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
+ next_smc_ctx->scr = read_ctx_reg(cpu_reg_ctx, CTX_SCR);
+}
+
+/*******************************************************************************
+ * This function invokes the PSCI library interface to initialize the
+ * non secure cpu context and copies the relevant cpu context register values
+ * to smc context. These registers will get programmed during `smc_exit`.
+ ******************************************************************************/
+static void sp_min_prepare_next_image_entry(void)
+{
+ entry_point_info_t *next_image_info;
+ cpu_context_t *ctx = cm_get_context(NON_SECURE);
+ u_register_t ns_sctlr;
+
+ /* Program system registers to proceed to non-secure */
+ next_image_info = sp_min_plat_get_bl33_ep_info();
+ assert(next_image_info);
+ assert(NON_SECURE == GET_SECURITY_STATE(next_image_info->h.attr));
+
+ INFO("SP_MIN: Preparing exit to normal world\n");
+ print_entry_point_info(next_image_info);
+
+ psci_prepare_next_non_secure_ctx(next_image_info);
+ smc_set_next_ctx(NON_SECURE);
+
+ /* Copy r0, lr and spsr from cpu context to SMC context */
+ copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
+ smc_get_next_ctx());
+
+ /* Temporarily set the NS bit to access NS SCTLR */
+ write_scr(read_scr() | SCR_NS_BIT);
+ isb();
+ ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
+ write_sctlr(ns_sctlr);
+ isb();
+
+ write_scr(read_scr() & ~SCR_NS_BIT);
+ isb();
+}
+
+/******************************************************************************
+ * Implement the ARM Standard Service function to get arguments for a
+ * particular service.
+ *****************************************************************************/
+uintptr_t get_arm_std_svc_args(unsigned int svc_mask)
+{
+ /* Setup the arguments for PSCI Library */
+ DEFINE_STATIC_PSCI_LIB_ARGS_V1(psci_args, sp_min_warm_entrypoint);
+
+ /* PSCI is the only ARM Standard Service implemented */
+ assert(svc_mask == PSCI_FID_MASK);
+
+ return (uintptr_t)&psci_args;
+}
+
+/******************************************************************************
+ * The SP_MIN main function. Do the platform and PSCI Library setup. Also
+ * initialize the runtime service framework.
+ *****************************************************************************/
+void sp_min_main(void)
+{
+ NOTICE("SP_MIN: %s\n", version_string);
+ NOTICE("SP_MIN: %s\n", build_message);
+
+ /* Perform the SP_MIN platform setup */
+ sp_min_platform_setup();
+
+ /* Initialize the runtime services e.g. psci */
+ INFO("SP_MIN: Initializing runtime services\n");
+ runtime_svc_init();
+
+ /*
+ * We are ready to enter the next EL. Prepare entry into the image
+ * corresponding to the desired security state after the next ERET.
+ */
+ sp_min_prepare_next_image_entry();
+
+ /*
+ * Perform any platform specific runtime setup prior to cold boot exit
+ * from SP_MIN.
+ */
+ sp_min_plat_runtime_setup();
+
+ console_flush();
+}
+
+/******************************************************************************
+ * This function is invoked during warm boot. Invoke the PSCI library
+ * warm boot entry point which takes care of Architectural and platform setup/
+ * restore. Copy the relevant cpu_context register values to smc context which
+ * will get programmed during `smc_exit`.
+ *****************************************************************************/
+void sp_min_warm_boot(void)
+{
+ smc_ctx_t *next_smc_ctx;
+ cpu_context_t *ctx = cm_get_context(NON_SECURE);
+ u_register_t ns_sctlr;
+
+ psci_warmboot_entrypoint();
+
+ smc_set_next_ctx(NON_SECURE);
+
+ next_smc_ctx = smc_get_next_ctx();
+ zeromem(next_smc_ctx, sizeof(smc_ctx_t));
+
+ copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
+ next_smc_ctx);
+
+ /* Temporarily set the NS bit to access NS SCTLR */
+ write_scr(read_scr() | SCR_NS_BIT);
+ isb();
+ ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
+ write_sctlr(ns_sctlr);
+ isb();
+
+ write_scr(read_scr() & ~SCR_NS_BIT);
+ isb();
+}
+
+#if SP_MIN_WITH_SECURE_FIQ
+/******************************************************************************
+ * This function is invoked on secure interrupts. By construction of the
+ * SP_MIN, secure interrupts can only be handled when core executes in non
+ * secure state.
+ *****************************************************************************/
+void sp_min_fiq(void)
+{
+ uint32_t id;
+
+ id = plat_ic_acknowledge_interrupt();
+ sp_min_plat_fiq_handler(id);
+ plat_ic_end_of_interrupt(id);
+}
+#endif /* SP_MIN_WITH_SECURE_FIQ */
diff --git a/bl32/sp_min/sp_min_private.h b/bl32/sp_min/sp_min_private.h
new file mode 100644
index 0000000..628581a
--- /dev/null
+++ b/bl32/sp_min/sp_min_private.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SP_MIN_PRIVATE_H
+#define SP_MIN_PRIVATE_H
+
+void sp_min_main(void);
+void sp_min_warm_boot(void);
+void sp_min_fiq(void);
+
+#endif /* SP_MIN_PRIVATE_H */
diff --git a/bl32/sp_min/wa_cve_2017_5715_bpiall.S b/bl32/sp_min/wa_cve_2017_5715_bpiall.S
new file mode 100644
index 0000000..385f3d4
--- /dev/null
+++ b/bl32/sp_min/wa_cve_2017_5715_bpiall.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl wa_cve_2017_5715_bpiall_vbar
+
+vector_base wa_cve_2017_5715_bpiall_vbar
+ /* We encode the exception entry in the bottom 3 bits of SP */
+ add sp, sp, #1 /* Reset: 0b111 */
+ add sp, sp, #1 /* Undef: 0b110 */
+ add sp, sp, #1 /* Syscall: 0b101 */
+ add sp, sp, #1 /* Prefetch abort: 0b100 */
+ add sp, sp, #1 /* Data abort: 0b011 */
+ add sp, sp, #1 /* Reserved: 0b010 */
+ add sp, sp, #1 /* IRQ: 0b001 */
+ nop /* FIQ: 0b000 */
+
+ /*
+ * Invalidate the branch predictor, `r0` is a dummy register
+ * and is unused.
+ */
+ stcopr r0, BPIALL
+ isb
+
+ /*
+ * As we cannot use any temporary registers and cannot
+ * clobber SP, we can decode the exception entry using
+ * an unrolled binary search.
+ *
+ * Note, if this code is re-used by other secure payloads,
+ * the below exception entry vectors must be changed to
+ * the vectors specific to that secure payload.
+ */
+
+ tst sp, #4
+ bne 1f
+
+ tst sp, #2
+ bne 3f
+
+ /* Expected encoding: 0x1 and 0x0 */
+ tst sp, #1
+ /* Restore original value of SP by clearing the bottom 3 bits */
+ bic sp, sp, #0x7
+ bne plat_panic_handler /* IRQ */
+ b sp_min_handle_fiq /* FIQ */
+
+1:
+ tst sp, #2
+ bne 2f
+
+ /* Expected encoding: 0x4 and 0x5 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne sp_min_handle_smc /* Syscall */
+ b plat_panic_handler /* Prefetch abort */
+
+2:
+ /* Expected encoding: 0x7 and 0x6 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne sp_min_entrypoint /* Reset */
+ b plat_panic_handler /* Undef */
+
+3:
+ /* Expected encoding: 0x2 and 0x3 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne plat_panic_handler /* Data abort */
+ b plat_panic_handler /* Reserved */
diff --git a/bl32/sp_min/wa_cve_2017_5715_icache_inv.S b/bl32/sp_min/wa_cve_2017_5715_icache_inv.S
new file mode 100644
index 0000000..d0a4625
--- /dev/null
+++ b/bl32/sp_min/wa_cve_2017_5715_icache_inv.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl wa_cve_2017_5715_icache_inv_vbar
+
+vector_base wa_cve_2017_5715_icache_inv_vbar
+ /* We encode the exception entry in the bottom 3 bits of SP */
+ add sp, sp, #1 /* Reset: 0b111 */
+ add sp, sp, #1 /* Undef: 0b110 */
+ add sp, sp, #1 /* Syscall: 0b101 */
+ add sp, sp, #1 /* Prefetch abort: 0b100 */
+ add sp, sp, #1 /* Data abort: 0b011 */
+ add sp, sp, #1 /* Reserved: 0b010 */
+ add sp, sp, #1 /* IRQ: 0b001 */
+ nop /* FIQ: 0b000 */
+
+ /*
+ * Invalidate the instruction cache, which we assume also
+ * invalidates the branch predictor. This may depend on
+ * other CPU specific changes (e.g. an ACTLR setting).
+ */
+ stcopr r0, ICIALLU
+ isb
+
+ /*
+ * As we cannot use any temporary registers and cannot
+ * clobber SP, we can decode the exception entry using
+ * an unrolled binary search.
+ *
+ * Note, if this code is re-used by other secure payloads,
+ * the below exception entry vectors must be changed to
+ * the vectors specific to that secure payload.
+ */
+
+ tst sp, #4
+ bne 1f
+
+ tst sp, #2
+ bne 3f
+
+ /* Expected encoding: 0x1 and 0x0 */
+ tst sp, #1
+ /* Restore original value of SP by clearing the bottom 3 bits */
+ bic sp, sp, #0x7
+ bne plat_panic_handler /* IRQ */
+ b sp_min_handle_fiq /* FIQ */
+
+1:
+ /* Expected encoding: 0x4 and 0x5 */
+ tst sp, #2
+ bne 2f
+
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne sp_min_handle_smc /* Syscall */
+ b plat_panic_handler /* Prefetch abort */
+
+2:
+ /* Expected encoding: 0x7 and 0x6 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne sp_min_entrypoint /* Reset */
+ b plat_panic_handler /* Undef */
+
+3:
+ /* Expected encoding: 0x2 and 0x3 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne plat_panic_handler /* Data abort */
+ b plat_panic_handler /* Reserved */
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
new file mode 100644
index 0000000..e5ea88c
--- /dev/null
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -0,0 +1,489 @@
+/*
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl32/tsp/tsp.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <smccc_helpers.h>
+
+#include "../tsp_private.h"
+
+
+ .globl tsp_entrypoint
+ .globl tsp_vector_table
+#if SPMC_AT_EL3
+ .globl tsp_cpu_on_entry
+#endif
+
+
+
+ /* ---------------------------------------------
+ * Populate the params in x0-x7 from the pointer
+ * to the smc args structure in x0.
+ * ---------------------------------------------
+ */
+ .macro restore_args_call_smc
+ ldp x6, x7, [x0, #SMC_ARG6]
+ ldp x4, x5, [x0, #SMC_ARG4]
+ ldp x2, x3, [x0, #SMC_ARG2]
+ ldp x0, x1, [x0, #SMC_ARG0]
+ smc #0
+ .endm
+
+ .macro save_eret_context reg1 reg2
+ mrs \reg1, elr_el1
+ mrs \reg2, spsr_el1
+ stp \reg1, \reg2, [sp, #-0x10]!
+ stp x30, x18, [sp, #-0x10]!
+ .endm
+
+ .macro restore_eret_context reg1 reg2
+ ldp x30, x18, [sp], #0x10
+ ldp \reg1, \reg2, [sp], #0x10
+ msr elr_el1, \reg1
+ msr spsr_el1, \reg2
+ .endm
+
+func tsp_entrypoint _align=3
+
+#if ENABLE_PIE
+ /*
+ * ------------------------------------------------------------
+ * If PIE is enabled fixup the Global descriptor Table only
+ * once during primary core cold boot path.
+ *
+ * Compile time base address, required for fixup, is calculated
+ * using "pie_fixup" label present within first page.
+ * ------------------------------------------------------------
+ */
+ pie_fixup:
+ ldr x0, =pie_fixup
+ and x0, x0, #~(PAGE_SIZE_MASK)
+ mov_imm x1, (BL32_LIMIT - BL32_BASE)
+ add x1, x1, x0
+ bl fixup_gdt_reloc
+#endif /* ENABLE_PIE */
+
+ /* ---------------------------------------------
+ * Set the exception vector to something sane.
+ * ---------------------------------------------
+ */
+ adr x0, tsp_exceptions
+ msr vbar_el1, x0
+ isb
+
+ /* ---------------------------------------------
+ * Enable the SError interrupt now that the
+ * exception vectors have been setup.
+ * ---------------------------------------------
+ */
+ msr daifclr, #DAIF_ABT_BIT
+
+ /* ---------------------------------------------
+ * Enable the instruction cache, stack pointer
+ * and data access alignment checks and disable
+ * speculative loads.
+ * ---------------------------------------------
+ */
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ mrs x0, sctlr_el1
+ orr x0, x0, x1
+ bic x0, x0, #SCTLR_DSSBS_BIT
+ msr sctlr_el1, x0
+ isb
+
+ /* ---------------------------------------------
+ * Invalidate the RW memory used by the BL32
+ * image. This includes the data and NOBITS
+ * sections. This is done to safeguard against
+ * possible corruption of this memory by dirty
+ * cache lines in a system cache as a result of
+ * use by an earlier boot loader stage. If PIE
+ * is enabled however, RO sections including the
+ * GOT may be modified during pie fixup.
+ * Therefore, to be on the safe side, invalidate
+ * the entire image region if PIE is enabled.
+ * ---------------------------------------------
+ */
+#if ENABLE_PIE
+#if SEPARATE_CODE_AND_RODATA
+ adrp x0, __TEXT_START__
+ add x0, x0, :lo12:__TEXT_START__
+#else
+ adrp x0, __RO_START__
+ add x0, x0, :lo12:__RO_START__
+#endif /* SEPARATE_CODE_AND_RODATA */
+#else
+ adrp x0, __RW_START__
+ add x0, x0, :lo12:__RW_START__
+#endif /* ENABLE_PIE */
+ adrp x1, __RW_END__
+ add x1, x1, :lo12:__RW_END__
+ sub x1, x1, x0
+ bl inv_dcache_range
+
+ /* ---------------------------------------------
+ * Zero out NOBITS sections. There are 2 of them:
+ * - the .bss section;
+ * - the coherent memory section.
+ * ---------------------------------------------
+ */
+ adrp x0, __BSS_START__
+ add x0, x0, :lo12:__BSS_START__
+ adrp x1, __BSS_END__
+ add x1, x1, :lo12:__BSS_END__
+ sub x1, x1, x0
+ bl zeromem
+
+#if USE_COHERENT_MEM
+ adrp x0, __COHERENT_RAM_START__
+ add x0, x0, :lo12:__COHERENT_RAM_START__
+ adrp x1, __COHERENT_RAM_END_UNALIGNED__
+ add x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__
+ sub x1, x1, x0
+ bl zeromem
+#endif
+
+ /* --------------------------------------------
+ * Allocate a stack whose memory will be marked
+ * as Normal-IS-WBWA when the MMU is enabled.
+ * There is no risk of reading stale stack
+ * memory after enabling the MMU as only the
+ * primary cpu is running at the moment.
+ * --------------------------------------------
+ */
+ bl plat_set_my_stack
+
+ /* ---------------------------------------------
+ * Initialize the stack protector canary before
+ * any C code is called.
+ * ---------------------------------------------
+ */
+#if STACK_PROTECTOR_ENABLED
+ bl update_stack_protector_canary
+#endif
+
+ /* ---------------------------------------------
+ * Perform TSP setup
+ * ---------------------------------------------
+ */
+ bl tsp_setup
+
+#if ENABLE_PAUTH
+ /* ---------------------------------------------
+ * Program APIAKey_EL1
+ * and enable pointer authentication
+ * ---------------------------------------------
+ */
+ bl pauth_init_enable_el1
+#endif /* ENABLE_PAUTH */
+
+ /* ---------------------------------------------
+ * Jump to main function.
+ * ---------------------------------------------
+ */
+ bl tsp_main
+
+ /* ---------------------------------------------
+ * Tell TSPD that we are done initialising
+ * ---------------------------------------------
+ */
+ mov x1, x0
+ mov x0, #TSP_ENTRY_DONE
+ smc #0
+
+tsp_entrypoint_panic:
+ b tsp_entrypoint_panic
+endfunc tsp_entrypoint
+
+
+ /* -------------------------------------------
+ * Table of entrypoint vectors provided to the
+ * TSPD for the various entrypoints
+ * -------------------------------------------
+ */
+vector_base tsp_vector_table
+ b tsp_yield_smc_entry
+ b tsp_fast_smc_entry
+ b tsp_cpu_on_entry
+ b tsp_cpu_off_entry
+ b tsp_cpu_resume_entry
+ b tsp_cpu_suspend_entry
+ b tsp_sel1_intr_entry
+ b tsp_system_off_entry
+ b tsp_system_reset_entry
+ b tsp_abort_yield_smc_entry
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD when this
+ * cpu is to be turned off through a CPU_OFF
+ * psci call to ask the TSP to perform any
+ * bookeeping necessary. In the current
+ * implementation, the TSPD expects the TSP to
+ * re-initialise its state so nothing is done
+ * here except for acknowledging the request.
+ * ---------------------------------------------
+ */
+func tsp_cpu_off_entry
+ bl tsp_cpu_off_main
+ restore_args_call_smc
+endfunc tsp_cpu_off_entry
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD when the
+ * system is about to be switched off (through
+ * a SYSTEM_OFF psci call) to ask the TSP to
+ * perform any necessary bookkeeping.
+ * ---------------------------------------------
+ */
+func tsp_system_off_entry
+ bl tsp_system_off_main
+ restore_args_call_smc
+endfunc tsp_system_off_entry
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD when the
+ * system is about to be reset (through a
+ * SYSTEM_RESET psci call) to ask the TSP to
+ * perform any necessary bookkeeping.
+ * ---------------------------------------------
+ */
+func tsp_system_reset_entry
+ bl tsp_system_reset_main
+ restore_args_call_smc
+endfunc tsp_system_reset_entry
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD when this
+ * cpu is turned on using a CPU_ON psci call to
+ * ask the TSP to initialise itself i.e. setup
+ * the mmu, stacks etc. Minimal architectural
+ * state will be initialised by the TSPD when
+ * this function is entered i.e. Caches and MMU
+ * will be turned off, the execution state
+ * will be aarch64 and exceptions masked.
+ * ---------------------------------------------
+ */
+func tsp_cpu_on_entry
+ /* ---------------------------------------------
+ * Set the exception vector to something sane.
+ * ---------------------------------------------
+ */
+ adr x0, tsp_exceptions
+ msr vbar_el1, x0
+ isb
+
+ /* Enable the SError interrupt */
+ msr daifclr, #DAIF_ABT_BIT
+
+ /* ---------------------------------------------
+ * Enable the instruction cache, stack pointer
+ * and data access alignment checks
+ * ---------------------------------------------
+ */
+ mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ mrs x0, sctlr_el1
+ orr x0, x0, x1
+ msr sctlr_el1, x0
+ isb
+
+ /* --------------------------------------------
+ * Give ourselves a stack whose memory will be
+ * marked as Normal-IS-WBWA when the MMU is
+ * enabled.
+ * --------------------------------------------
+ */
+ bl plat_set_my_stack
+
+ /* --------------------------------------------
+ * Enable MMU and D-caches together.
+ * --------------------------------------------
+ */
+ mov x0, #0
+ bl bl32_plat_enable_mmu
+
+#if ENABLE_PAUTH
+ /* ---------------------------------------------
+ * Program APIAKey_EL1
+ * and enable pointer authentication
+ * ---------------------------------------------
+ */
+ bl pauth_init_enable_el1
+#endif /* ENABLE_PAUTH */
+
+ /* ---------------------------------------------
+ * Enter C runtime to perform any remaining
+ * book keeping
+ * ---------------------------------------------
+ */
+ bl tsp_cpu_on_main
+ restore_args_call_smc
+
+ /* Should never reach here */
+tsp_cpu_on_entry_panic:
+ b tsp_cpu_on_entry_panic
+endfunc tsp_cpu_on_entry
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD when this
+ * cpu is to be suspended through a CPU_SUSPEND
+ * psci call to ask the TSP to perform any
+ * bookeeping necessary. In the current
+ * implementation, the TSPD saves and restores
+ * the EL1 state.
+ * ---------------------------------------------
+ */
+func tsp_cpu_suspend_entry
+ bl tsp_cpu_suspend_main
+ restore_args_call_smc
+endfunc tsp_cpu_suspend_entry
+
+ /*-------------------------------------------------
+ * This entrypoint is used by the TSPD to pass
+ * control for `synchronously` handling a S-EL1
+ * Interrupt which was triggered while executing
+ * in normal world. 'x0' contains a magic number
+ * which indicates this. TSPD expects control to
+ * be handed back at the end of interrupt
+ * processing. This is done through an SMC.
+ * The handover agreement is:
+ *
+ * 1. PSTATE.DAIF are set upon entry. 'x1' has
+ * the ELR_EL3 from the non-secure state.
+ * 2. TSP has to preserve the callee saved
+ * general purpose registers, SP_EL1/EL0 and
+ * LR.
+ * 3. TSP has to preserve the system and vfp
+ * registers (if applicable).
+ * 4. TSP can use 'x0-x18' to enable its C
+ * runtime.
+ * 5. TSP returns to TSPD using an SMC with
+ * 'x0' = TSP_HANDLED_S_EL1_INTR
+ * ------------------------------------------------
+ */
+func tsp_sel1_intr_entry
+#if DEBUG
+ mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
+ cmp x0, x2
+ b.ne tsp_sel1_int_entry_panic
+#endif
+ /*-------------------------------------------------
+ * Save any previous context needed to perform
+ * an exception return from S-EL1 e.g. context
+ * from a previous Non secure Interrupt.
+ * Update statistics and handle the S-EL1
+ * interrupt before returning to the TSPD.
+ * IRQ/FIQs are not enabled since that will
+ * complicate the implementation. Execution
+ * will be transferred back to the normal world
+ * in any case. The handler can return 0
+ * if the interrupt was handled or TSP_PREEMPTED
+ * if the expected interrupt was preempted
+ * by an interrupt that should be handled in EL3
+ * e.g. Group 0 interrupt in GICv3. In both
+ * the cases switch to EL3 using SMC with id
+ * TSP_HANDLED_S_EL1_INTR. Any other return value
+ * from the handler will result in panic.
+ * ------------------------------------------------
+ */
+ save_eret_context x2 x3
+ bl tsp_update_sync_sel1_intr_stats
+ bl tsp_common_int_handler
+ /* Check if the S-EL1 interrupt has been handled */
+ cbnz x0, tsp_sel1_intr_check_preemption
+ b tsp_sel1_intr_return
+tsp_sel1_intr_check_preemption:
+ /* Check if the S-EL1 interrupt has been preempted */
+ mov_imm x1, TSP_PREEMPTED
+ cmp x0, x1
+ b.ne tsp_sel1_int_entry_panic
+tsp_sel1_intr_return:
+ mov_imm x0, TSP_HANDLED_S_EL1_INTR
+ restore_eret_context x2 x3
+ smc #0
+
+ /* Should never reach here */
+tsp_sel1_int_entry_panic:
+ no_ret plat_panic_handler
+endfunc tsp_sel1_intr_entry
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD when this
+ * cpu resumes execution after an earlier
+ * CPU_SUSPEND psci call to ask the TSP to
+ * restore its saved context. In the current
+ * implementation, the TSPD saves and restores
+ * EL1 state so nothing is done here apart from
+ * acknowledging the request.
+ * ---------------------------------------------
+ */
+func tsp_cpu_resume_entry
+ bl tsp_cpu_resume_main
+ restore_args_call_smc
+
+ /* Should never reach here */
+ no_ret plat_panic_handler
+endfunc tsp_cpu_resume_entry
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD to ask
+ * the TSP to service a fast smc request.
+ * ---------------------------------------------
+ */
+func tsp_fast_smc_entry
+ bl tsp_smc_handler
+ restore_args_call_smc
+
+ /* Should never reach here */
+ no_ret plat_panic_handler
+endfunc tsp_fast_smc_entry
+
+ /*---------------------------------------------
+ * This entrypoint is used by the TSPD to ask
+ * the TSP to service a Yielding SMC request.
+ * We will enable preemption during execution
+ * of tsp_smc_handler.
+ * ---------------------------------------------
+ */
+func tsp_yield_smc_entry
+ msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
+ bl tsp_smc_handler
+ msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
+ restore_args_call_smc
+
+ /* Should never reach here */
+ no_ret plat_panic_handler
+endfunc tsp_yield_smc_entry
+
+ /*---------------------------------------------------------------------
+ * This entrypoint is used by the TSPD to abort a pre-empted Yielding
+ * SMC. It could be on behalf of non-secure world or because a CPU
+ * suspend/CPU off request needs to abort the preempted SMC.
+ * --------------------------------------------------------------------
+ */
+func tsp_abort_yield_smc_entry
+
+ /*
+ * Exceptions masking is already done by the TSPD when entering this
+ * hook so there is no need to do it here.
+ */
+
+ /* Reset the stack used by the pre-empted SMC */
+ bl plat_set_my_stack
+
+ /*
+ * Allow some cleanup such as releasing locks.
+ */
+ bl tsp_abort_smc_handler
+
+ restore_args_call_smc
+
+ /* Should never reach here */
+ bl plat_panic_handler
+endfunc tsp_abort_yield_smc_entry
diff --git a/bl32/tsp/aarch64/tsp_exceptions.S b/bl32/tsp/aarch64/tsp_exceptions.S
new file mode 100644
index 0000000..4c6a56a
--- /dev/null
+++ b/bl32/tsp/aarch64/tsp_exceptions.S
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl32/tsp/tsp.h>
+#include <common/bl_common.h>
+
+ /* ----------------------------------------------------
+ * The caller-saved registers x0-x18 and LR are saved
+ * here.
+ * ----------------------------------------------------
+ */
+
+#define SCRATCH_REG_SIZE #(20 * 8)
+
+ .macro save_caller_regs_and_lr
+ sub sp, sp, SCRATCH_REG_SIZE
+ stp x0, x1, [sp]
+ stp x2, x3, [sp, #0x10]
+ stp x4, x5, [sp, #0x20]
+ stp x6, x7, [sp, #0x30]
+ stp x8, x9, [sp, #0x40]
+ stp x10, x11, [sp, #0x50]
+ stp x12, x13, [sp, #0x60]
+ stp x14, x15, [sp, #0x70]
+ stp x16, x17, [sp, #0x80]
+ stp x18, x30, [sp, #0x90]
+ .endm
+
+ .macro restore_caller_regs_and_lr
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #0x10]
+ ldp x4, x5, [sp, #0x20]
+ ldp x6, x7, [sp, #0x30]
+ ldp x8, x9, [sp, #0x40]
+ ldp x10, x11, [sp, #0x50]
+ ldp x12, x13, [sp, #0x60]
+ ldp x14, x15, [sp, #0x70]
+ ldp x16, x17, [sp, #0x80]
+ ldp x18, x30, [sp, #0x90]
+ add sp, sp, SCRATCH_REG_SIZE
+ .endm
+
+ /* ----------------------------------------------------
+ * Common TSP interrupt handling routine
+ * ----------------------------------------------------
+ */
+ .macro handle_tsp_interrupt label
+ /* Enable the SError interrupt */
+ msr daifclr, #DAIF_ABT_BIT
+
+ save_caller_regs_and_lr
+ bl tsp_common_int_handler
+ cbz x0, interrupt_exit_\label
+
+ /*
+ * This interrupt was not targetted to S-EL1 so send it to
+ * the monitor and wait for execution to resume.
+ */
+ smc #0
+interrupt_exit_\label:
+ restore_caller_regs_and_lr
+ exception_return
+ .endm
+
+ .globl tsp_exceptions
+
+ /* -----------------------------------------------------
+ * TSP exception handlers.
+ * -----------------------------------------------------
+ */
+vector_base tsp_exceptions
+ /* -----------------------------------------------------
+ * Current EL with _sp_el0 : 0x0 - 0x200. No exceptions
+ * are expected and treated as irrecoverable errors.
+ * -----------------------------------------------------
+ */
+vector_entry sync_exception_sp_el0
+ b plat_panic_handler
+end_vector_entry sync_exception_sp_el0
+
+vector_entry irq_sp_el0
+ b plat_panic_handler
+end_vector_entry irq_sp_el0
+
+vector_entry fiq_sp_el0
+ b plat_panic_handler
+end_vector_entry fiq_sp_el0
+
+vector_entry serror_sp_el0
+ b plat_panic_handler
+end_vector_entry serror_sp_el0
+
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x400. Only IRQs/FIQs
+ * are expected and handled
+ * -----------------------------------------------------
+ */
+vector_entry sync_exception_sp_elx
+ b plat_panic_handler
+end_vector_entry sync_exception_sp_elx
+
+vector_entry irq_sp_elx
+ handle_tsp_interrupt irq_sp_elx
+end_vector_entry irq_sp_elx
+
+vector_entry fiq_sp_elx
+ handle_tsp_interrupt fiq_sp_elx
+end_vector_entry fiq_sp_elx
+
+vector_entry serror_sp_elx
+ b plat_panic_handler
+end_vector_entry serror_sp_elx
+
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600. No exceptions
+ * are handled since TSP does not implement a lower EL
+ * -----------------------------------------------------
+ */
+vector_entry sync_exception_aarch64
+ b plat_panic_handler
+end_vector_entry sync_exception_aarch64
+
+vector_entry irq_aarch64
+ b plat_panic_handler
+end_vector_entry irq_aarch64
+
+vector_entry fiq_aarch64
+ b plat_panic_handler
+end_vector_entry fiq_aarch64
+
+vector_entry serror_aarch64
+ b plat_panic_handler
+end_vector_entry serror_aarch64
+
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800. No exceptions
+ * handled since the TSP does not implement a lower EL.
+ * -----------------------------------------------------
+ */
+vector_entry sync_exception_aarch32
+ b plat_panic_handler
+end_vector_entry sync_exception_aarch32
+
+vector_entry irq_aarch32
+ b plat_panic_handler
+end_vector_entry irq_aarch32
+
+vector_entry fiq_aarch32
+ b plat_panic_handler
+end_vector_entry fiq_aarch32
+
+vector_entry serror_aarch32
+ b plat_panic_handler
+end_vector_entry serror_aarch32
diff --git a/bl32/tsp/aarch64/tsp_request.S b/bl32/tsp/aarch64/tsp_request.S
new file mode 100644
index 0000000..6e238ea
--- /dev/null
+++ b/bl32/tsp/aarch64/tsp_request.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <bl32/tsp/tsp.h>
+
+ .globl tsp_get_magic
+
+/*
+ * This function raises an SMC to retrieve arguments from secure
+ * monitor/dispatcher, saves the returned arguments the array received in x0,
+ * and then returns to the caller
+ */
+func tsp_get_magic
+ /* Load arguments */
+ ldr w0, _tsp_fid_get_magic
+
+ /* Raise SMC */
+ smc #0
+
+ /* Return arguments in x1:x0 */
+ ret
+endfunc tsp_get_magic
+
+ .align 2
+_tsp_fid_get_magic:
+ .word TSP_GET_ARGS
diff --git a/bl32/tsp/ffa_helpers.c b/bl32/tsp/ffa_helpers.c
new file mode 100644
index 0000000..ad70c2b
--- /dev/null
+++ b/bl32/tsp/ffa_helpers.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include "ffa_helpers.h"
+#include <services/ffa_svc.h>
+#include "tsp_private.h"
+
+/*******************************************************************************
+ * Wrapper function to send a direct request.
+ ******************************************************************************/
+smc_args_t ffa_msg_send_direct_req(ffa_endpoint_id16_t sender,
+ ffa_endpoint_id16_t receiver,
+ uint32_t arg3,
+ uint32_t arg4,
+ uint32_t arg5,
+ uint32_t arg6,
+ uint32_t arg7)
+{
+ uint32_t src_dst_ids = (sender << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+ (receiver << FFA_DIRECT_MSG_DESTINATION_SHIFT);
+
+
+ /* Send Direct Request. */
+ return smc_helper(FFA_MSG_SEND_DIRECT_REQ_SMC64, src_dst_ids,
+ 0, arg3, arg4, arg5, arg6, arg7);
+}
+
+/*******************************************************************************
+ * Wrapper function to send a direct response.
+ ******************************************************************************/
+smc_args_t *ffa_msg_send_direct_resp(ffa_endpoint_id16_t sender,
+ ffa_endpoint_id16_t receiver,
+ uint32_t arg3,
+ uint32_t arg4,
+ uint32_t arg5,
+ uint32_t arg6,
+ uint32_t arg7)
+{
+ uint32_t src_dst_ids = (sender << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+ (receiver << FFA_DIRECT_MSG_DESTINATION_SHIFT);
+
+ return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC64, src_dst_ids,
+ 0, arg3, arg4, arg5, arg6, arg7);
+}
+
+/*******************************************************************************
+ * Memory Management Helpers.
+ ******************************************************************************/
+
+/**
+ * Initialises the header of the given `ffa_mtd`, not including the
+ * composite memory region offset.
+ */
+static void ffa_memory_region_init_header(
+ struct ffa_mtd *memory_region, ffa_endpoint_id16_t sender,
+ ffa_mem_attr16_t attributes, ffa_mtd_flag32_t flags,
+ uint64_t handle, uint64_t tag, ffa_endpoint_id16_t *receivers,
+ uint32_t receiver_count, ffa_mem_perm8_t permissions)
+{
+ struct ffa_emad_v1_0 *emad;
+
+ memory_region->emad_offset = sizeof(struct ffa_mtd);
+ memory_region->emad_size = sizeof(struct ffa_emad_v1_0);
+ emad = (struct ffa_emad_v1_0 *)
+ ((uint8_t *) memory_region +
+ memory_region->emad_offset);
+ memory_region->sender_id = sender;
+ memory_region->memory_region_attributes = attributes;
+ memory_region->reserved_36_39 = 0;
+ memory_region->flags = flags;
+ memory_region->handle = handle;
+ memory_region->tag = tag;
+ memory_region->reserved_40_47 = 0;
+ memory_region->emad_count = receiver_count;
+ for (uint32_t i = 0U; i < receiver_count; i++) {
+ emad[i].mapd.endpoint_id = receivers[i];
+ emad[i].mapd.memory_access_permissions = permissions;
+ emad[i].mapd.flags = 0;
+ emad[i].comp_mrd_offset = 0;
+ emad[i].reserved_8_15 = 0;
+ }
+}
+/**
+ * Initialises the given `ffa_mtd` to be used for an
+ * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
+ * TODO: Support differing attributes per receiver.
+ *
+ * Returns the size of the descriptor written.
+ */
+static uint32_t ffa_memory_retrieve_request_init(
+ struct ffa_mtd *memory_region, uint64_t handle,
+ ffa_endpoint_id16_t sender, ffa_endpoint_id16_t *receivers, uint32_t receiver_count,
+ uint64_t tag, ffa_mtd_flag32_t flags,
+ ffa_mem_perm8_t permissions,
+ ffa_mem_attr16_t attributes)
+{
+ ffa_memory_region_init_header(memory_region, sender, attributes, flags,
+ handle, tag, receivers,
+ receiver_count, permissions);
+
+ return sizeof(struct ffa_mtd) +
+ memory_region->emad_count * sizeof(struct ffa_emad_v1_0);
+}
+
+/* Relinquish access to memory region. */
+bool ffa_mem_relinquish(void)
+{
+ smc_args_t ret;
+
+ ret = smc_helper(FFA_MEM_RELINQUISH, 0, 0, 0, 0, 0, 0, 0);
+ if (ffa_func_id(ret) != FFA_SUCCESS_SMC32) {
+ ERROR("%s failed to relinquish memory! error: (%x) %x\n",
+ __func__, ffa_func_id(ret), ffa_error_code(ret));
+ return false;
+ }
+ return true;
+}
+
+/* Retrieve memory shared by another partition. */
+smc_args_t ffa_mem_retrieve_req(uint32_t descriptor_length,
+ uint32_t fragment_length)
+{
+ return smc_helper(FFA_MEM_RETRIEVE_REQ_SMC32,
+ descriptor_length,
+ fragment_length,
+ 0, 0, 0, 0, 0);
+}
+
+/* Retrieve the next memory descriptor fragment. */
+smc_args_t ffa_mem_frag_rx(uint64_t handle, uint32_t recv_length)
+{
+ return smc_helper(FFA_MEM_FRAG_RX,
+ FFA_MEM_HANDLE_LOW(handle),
+ FFA_MEM_HANDLE_HIGH(handle),
+ recv_length,
+ 0, 0, 0, 0);
+}
+
+bool memory_retrieve(struct mailbox *mb,
+ struct ffa_mtd **retrieved,
+ uint64_t handle, ffa_endpoint_id16_t sender,
+ ffa_endpoint_id16_t *receivers, uint32_t receiver_count,
+ ffa_mtd_flag32_t flags, uint32_t *frag_length,
+ uint32_t *total_length)
+{
+ smc_args_t ret;
+ uint32_t descriptor_size;
+ struct ffa_mtd *memory_region;
+
+ if (retrieved == NULL || mb == NULL) {
+ ERROR("Invalid parameters!\n");
+ return false;
+ }
+
+ memory_region = (struct ffa_mtd *)mb->tx_buffer;
+
+ /* Clear TX buffer. */
+ memset(memory_region, 0, PAGE_SIZE);
+
+ /* Clear local buffer. */
+ memset(mem_region_buffer, 0, REGION_BUF_SIZE);
+
+ descriptor_size = ffa_memory_retrieve_request_init(
+ memory_region, handle, sender, receivers, receiver_count, 0, flags,
+ FFA_MEM_PERM_RW | FFA_MEM_PERM_NX,
+ FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB |
+ FFA_MEM_ATTR_INNER_SHAREABLE);
+
+ ret = ffa_mem_retrieve_req(descriptor_size, descriptor_size);
+
+ if (ffa_func_id(ret) == FFA_ERROR) {
+ ERROR("Couldn't retrieve the memory page. Error: %x\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ /*
+ * Following total_size and fragment_size are useful to keep track
+ * of the state of transaction. When the sum of all fragment_size of all
+ * fragments is equal to total_size, the memory transaction has been
+ * completed.
+ */
+ *total_length = ret._regs[1];
+ *frag_length = ret._regs[2];
+
+ /* Validate frag_length is less than total_length and mailbox size. */
+ if (*frag_length == 0U || *total_length == 0U ||
+ *frag_length > *total_length || *frag_length > (mb->rxtx_page_count * PAGE_SIZE)) {
+ ERROR("Invalid parameters!\n");
+ return false;
+ }
+
+ /* Copy response to local buffer. */
+ memcpy(mem_region_buffer, mb->rx_buffer, *frag_length);
+
+ if (ffa_rx_release()) {
+ ERROR("Failed to release buffer!\n");
+ return false;
+ }
+
+ *retrieved = (struct ffa_mtd *) mem_region_buffer;
+
+ if ((*retrieved)->emad_count > MAX_MEM_SHARE_RECIPIENTS) {
+ VERBOSE("SPMC memory sharing supports max of %u receivers!\n",
+ MAX_MEM_SHARE_RECIPIENTS);
+ return false;
+ }
+
+ /*
+ * We are sharing memory from the normal world therefore validate the NS
+ * bit was set by the SPMC.
+ */
+ if (((*retrieved)->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) == 0U) {
+ ERROR("SPMC has not set the NS bit! 0x%x\n",
+ (*retrieved)->memory_region_attributes);
+ return false;
+ }
+
+ VERBOSE("Memory Descriptor Retrieved!\n");
+
+ return true;
+}
+
+/* Relinquish the memory region. */
+bool memory_relinquish(struct ffa_mem_relinquish_descriptor *m, uint64_t handle,
+ ffa_endpoint_id16_t id)
+{
+ ffa_mem_relinquish_init(m, handle, 0, id);
+ return ffa_mem_relinquish();
+}
+
+/* Query SPMC that the rx buffer of the partition can be released. */
+bool ffa_rx_release(void)
+{
+ smc_args_t ret;
+
+ ret = smc_helper(FFA_RX_RELEASE, 0, 0, 0, 0, 0, 0, 0);
+ return ret._regs[SMC_ARG0] != FFA_SUCCESS_SMC32;
+}
+
+/* Map the provided buffers with the SPMC. */
+bool ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages)
+{
+ smc_args_t ret;
+
+ ret = smc_helper(FFA_RXTX_MAP_SMC64, send, recv, pages, 0, 0, 0, 0);
+ return ret._regs[0] != FFA_SUCCESS_SMC32;
+}
diff --git a/bl32/tsp/ffa_helpers.h b/bl32/tsp/ffa_helpers.h
new file mode 100644
index 0000000..e650a07
--- /dev/null
+++ b/bl32/tsp/ffa_helpers.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FFA_HELPERS_H
+#define FFA_HELPERS_H
+
+#include <stdint.h>
+
+#include "../../services/std_svc/spm/el3_spmc/spmc.h"
+#include "../../services/std_svc/spm/el3_spmc/spmc_shared_mem.h"
+#include <services/el3_spmc_ffa_memory.h>
+#include <services/ffa_svc.h>
+#include "tsp_private.h"
+
+static inline uint32_t ffa_func_id(smc_args_t val)
+{
+ return (uint32_t) val._regs[0];
+}
+
+static inline int32_t ffa_error_code(smc_args_t val)
+{
+ return (uint32_t) val._regs[2];
+}
+
+extern uint8_t mem_region_buffer[4096 * 2] __aligned(PAGE_SIZE);
+#define REGION_BUF_SIZE sizeof(mem_region_buffer)
+
+/** The maximum number of recipients a memory region may be sent to. */
+#define MAX_MEM_SHARE_RECIPIENTS 2U
+
+/* FFA Memory Management mode flags. */
+#define FFA_FLAG_SHARE_MEMORY (1U << 3)
+#define FFA_FLAG_LEND_MEMORY (1U << 4)
+
+#define FFA_FLAG_MEMORY_MASK (3U << 3)
+
+#define FFA_MEM_HANDLE_LOW(x) (x & 0xFFFFFFFF)
+#define FFA_MEM_HANDLE_HIGH(x) (x >> 32)
+
+#define FFA_MEM_PERM_DATA_OFFSET 0
+#define FFA_MEM_PERM_DATA_MASK 0x3
+
+static inline uint32_t ffa_mem_relinquish_init(
+ struct ffa_mem_relinquish_descriptor *relinquish_request,
+ uint64_t handle, ffa_mtd_flag32_t flags,
+ ffa_endpoint_id16_t sender)
+{
+ relinquish_request->handle = handle;
+ relinquish_request->flags = flags;
+ relinquish_request->endpoint_count = 1;
+ relinquish_request->endpoint_array[0] = sender;
+
+ return sizeof(struct ffa_mem_relinquish_descriptor) + sizeof(ffa_endpoint_id16_t);
+}
+
+/**
+ * Gets the `ffa_comp_mrd` for the given receiver from an
+ * `ffa_mtd`, or NULL if it is not valid.
+ */
+static inline struct ffa_comp_mrd *
+ffa_memory_region_get_composite(struct ffa_mtd *memory_region,
+ uint32_t receiver_index)
+{
+ struct ffa_emad_v1_0 *receivers;
+ uint32_t offset;
+
+ receivers = (struct ffa_emad_v1_0 *)
+ ((uint8_t *) memory_region +
+ memory_region->emad_offset +
+ (memory_region->emad_size * receiver_index));
+ offset = receivers->comp_mrd_offset;
+
+ if (offset == 0U) {
+ return NULL;
+ }
+
+ return (struct ffa_comp_mrd *)
+ ((uint8_t *) memory_region + offset);
+}
+
+static inline uint32_t ffa_get_data_access_attr(ffa_mem_perm8_t perm)
+{
+ return ((perm >> FFA_MEM_PERM_DATA_OFFSET) & FFA_MEM_PERM_DATA_MASK);
+}
+
+smc_args_t ffa_mem_frag_rx(uint64_t handle, uint32_t recv_length);
+bool ffa_mem_relinquish(void);
+bool ffa_rx_release(void);
+bool memory_relinquish(struct ffa_mem_relinquish_descriptor *m, uint64_t handle,
+ ffa_endpoint_id16_t id);
+bool ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages);
+bool memory_retrieve(struct mailbox *mb,
+ struct ffa_mtd **retrieved,
+ uint64_t handle, ffa_endpoint_id16_t sender,
+ ffa_endpoint_id16_t *receivers, uint32_t receiver_count,
+ ffa_mtd_flag32_t flags, uint32_t *frag_length,
+ uint32_t *total_length);
+
+smc_args_t ffa_msg_send_direct_req(ffa_endpoint_id16_t sender,
+ ffa_endpoint_id16_t receiver,
+ uint32_t arg3,
+ uint32_t arg4,
+ uint32_t arg5,
+ uint32_t arg6,
+ uint32_t arg7);
+smc_args_t *ffa_msg_send_direct_resp(ffa_endpoint_id16_t sender,
+ ffa_endpoint_id16_t receiver,
+ uint32_t arg3,
+ uint32_t arg4,
+ uint32_t arg5,
+ uint32_t arg6,
+ uint32_t arg7);
+#endif /* FFA_HELPERS_H */
diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S
new file mode 100644
index 0000000..22bf11d
--- /dev/null
+++ b/bl32/tsp/tsp.ld.S
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/bl_common.ld.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+ENTRY(tsp_entrypoint)
+
+MEMORY {
+ RAM (rwx): ORIGIN = TSP_SEC_MEM_BASE, LENGTH = TSP_SEC_MEM_SIZE
+}
+
+SECTIONS {
+ RAM_REGION_START = ORIGIN(RAM);
+ RAM_REGION_LENGTH = LENGTH(RAM);
+ . = BL32_BASE;
+
+ ASSERT(. == ALIGN(PAGE_SIZE),
+ "BL32_BASE address is not aligned on a page boundary.")
+
+#if SEPARATE_CODE_AND_RODATA
+ .text . : {
+ __TEXT_START__ = .;
+
+ *tsp_entrypoint.o(.text*)
+ *(.text*)
+ *(.vectors)
+ __TEXT_END_UNALIGNED__ = .;
+
+ . = ALIGN(PAGE_SIZE);
+
+ __TEXT_END__ = .;
+ } >RAM
+
+ .rodata . : {
+ __RODATA_START__ = .;
+
+ *(.rodata*)
+
+ RODATA_COMMON
+
+ __RODATA_END_UNALIGNED__ = .;
+ . = ALIGN(PAGE_SIZE);
+
+ __RODATA_END__ = .;
+ } >RAM
+#else /* SEPARATE_CODE_AND_RODATA */
+ .ro . : {
+ __RO_START__ = .;
+
+ *tsp_entrypoint.o(.text*)
+ *(.text*)
+ *(.rodata*)
+
+ RODATA_COMMON
+
+ *(.vectors)
+
+ __RO_END_UNALIGNED__ = .;
+
+ /*
+ * Memory page(s) mapped to this section will be marked as read-only,
+ * executable. No RW data from the next section must creep in. Ensure
+ * that the rest of the current memory page is unused.
+ */
+ . = ALIGN(PAGE_SIZE);
+
+ __RO_END__ = .;
+ } >RAM
+#endif /* SEPARATE_CODE_AND_RODATA */
+
+ __RW_START__ = .;
+
+ DATA_SECTION >RAM
+ RELA_SECTION >RAM
+
+#ifdef TSP_PROGBITS_LIMIT
+ ASSERT(. <= TSP_PROGBITS_LIMIT, "TSP progbits has exceeded its limit.")
+#endif /* TSP_PROGBITS_LIMIT */
+
+ STACK_SECTION >RAM
+ BSS_SECTION >RAM
+ XLAT_TABLE_SECTION >RAM
+
+#if USE_COHERENT_MEM
+ /*
+ * The base address of the coherent memory section must be page-aligned to
+ * guarantee that the coherent data are stored on their own pages and are
+ * not mixed with normal data. This is required to set up the correct memory
+ * attributes for the coherent data page tables.
+ */
+ .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+ __COHERENT_RAM_START__ = .;
+ *(.tzfw_coherent_mem)
+ __COHERENT_RAM_END_UNALIGNED__ = .;
+
+ /*
+ * Memory page(s) mapped to this section will be marked as device
+ * memory. No other unexpected data must creep in. Ensure that the rest
+ * of the current memory page is unused.
+ */
+ . = ALIGN(PAGE_SIZE);
+
+ __COHERENT_RAM_END__ = .;
+ } >RAM
+#endif /* USE_COHERENT_MEM */
+
+ __RW_END__ = .;
+ __BL32_END__ = .;
+
+ /DISCARD/ : {
+ *(.dynsym .dynstr .hash .gnu.hash)
+ }
+
+ __BSS_SIZE__ = SIZEOF(.bss);
+
+#if USE_COHERENT_MEM
+ __COHERENT_RAM_UNALIGNED_SIZE__ =
+ __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
+#endif /* USE_COHERENT_MEM */
+
+ ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.")
+ RAM_REGION_END = .;
+}
diff --git a/bl32/tsp/tsp.mk b/bl32/tsp/tsp.mk
new file mode 100644
index 0000000..4c18131
--- /dev/null
+++ b/bl32/tsp/tsp.mk
@@ -0,0 +1,49 @@
+#
+# Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+INCLUDES += -Iinclude/bl32/tsp
+
+ifeq (${SPMC_AT_EL3},1)
+ BL32_SOURCES += bl32/tsp/tsp_ffa_main.c \
+ bl32/tsp/ffa_helpers.c
+else
+ BL32_SOURCES += bl32/tsp/tsp_main.c
+endif
+
+BL32_SOURCES += bl32/tsp/aarch64/tsp_entrypoint.S \
+ bl32/tsp/aarch64/tsp_exceptions.S \
+ bl32/tsp/aarch64/tsp_request.S \
+ bl32/tsp/tsp_interrupt.c \
+ bl32/tsp/tsp_timer.c \
+ bl32/tsp/tsp_common.c \
+ common/aarch64/early_exceptions.S \
+ lib/locks/exclusive/aarch64/spinlock.S
+
+BL32_DEFAULT_LINKER_SCRIPT_SOURCE := bl32/tsp/tsp.ld.S
+
+ifneq ($(findstring gcc,$(notdir $(LD))),)
+ BL32_LDFLAGS += -Wl,--sort-section=alignment
+else ifneq ($(findstring ld,$(notdir $(LD))),)
+ BL32_LDFLAGS += --sort-section=alignment
+endif
+
+# This flag determines if the TSPD initializes BL32 in tspd_init() (synchronous
+# method) or configures BL31 to pass control to BL32 instead of BL33
+# (asynchronous method).
+TSP_INIT_ASYNC := 0
+
+$(eval $(call assert_boolean,TSP_INIT_ASYNC))
+$(eval $(call add_define,TSP_INIT_ASYNC))
+
+# Include the platform-specific TSP Makefile
+# If no platform-specific TSP Makefile exists, it means TSP is not supported
+# on this platform.
+TSP_PLAT_MAKEFILE := $(wildcard ${PLAT_DIR}/tsp/tsp-${PLAT}.mk)
+ifeq (,${TSP_PLAT_MAKEFILE})
+ $(error TSP is not supported on platform ${PLAT})
+else
+ include ${TSP_PLAT_MAKEFILE}
+endif
diff --git a/bl32/tsp/tsp_common.c b/bl32/tsp/tsp_common.c
new file mode 100644
index 0000000..908b4ff
--- /dev/null
+++ b/bl32/tsp/tsp_common.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <inttypes.h>
+#include <stdint.h>
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <bl32/tsp/tsp.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <lib/spinlock.h>
+#include <plat/common/platform.h>
+#include <platform_tsp.h>
+#include "tsp_private.h"
+
+#include <platform_def.h>
+
+/*******************************************************************************
+ * Per cpu data structure to populate parameters for an SMC in C code and use
+ * a pointer to this structure in assembler code to populate x0-x7.
+ ******************************************************************************/
+static smc_args_t tsp_smc_args[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Per cpu data structure to keep track of TSP activity
+ ******************************************************************************/
+work_statistics_t tsp_stats[PLATFORM_CORE_COUNT];
+
+smc_args_t *set_smc_args(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint32_t linear_id;
+ smc_args_t *pcpu_smc_args;
+
+ /*
+ * Return to Secure Monitor by raising an SMC. The results of the
+ * service are passed as an arguments to the SMC.
+ */
+ linear_id = plat_my_core_pos();
+ pcpu_smc_args = &tsp_smc_args[linear_id];
+ write_sp_arg(pcpu_smc_args, SMC_ARG0, arg0);
+ write_sp_arg(pcpu_smc_args, SMC_ARG1, arg1);
+ write_sp_arg(pcpu_smc_args, SMC_ARG2, arg2);
+ write_sp_arg(pcpu_smc_args, SMC_ARG3, arg3);
+ write_sp_arg(pcpu_smc_args, SMC_ARG4, arg4);
+ write_sp_arg(pcpu_smc_args, SMC_ARG5, arg5);
+ write_sp_arg(pcpu_smc_args, SMC_ARG6, arg6);
+ write_sp_arg(pcpu_smc_args, SMC_ARG7, arg7);
+
+ return pcpu_smc_args;
+}
+
+/*******************************************************************************
+ * Setup function for TSP.
+ ******************************************************************************/
+void tsp_setup(void)
+{
+ /* Perform early platform-specific setup. */
+ tsp_early_platform_setup();
+
+ /* Perform late platform-specific setup. */
+ tsp_plat_arch_setup();
+
+#if ENABLE_PAUTH
+ /*
+ * Assert that the ARMv8.3-PAuth registers are present or an access
+ * fault will be triggered when they are being saved or restored.
+ */
+ assert(is_armv8_3_pauth_present());
+#endif /* ENABLE_PAUTH */
+}
+
+/*******************************************************************************
+ * This function performs any remaining bookkeeping in the test secure payload
+ * before the system is switched off (in response to a psci SYSTEM_OFF request).
+ ******************************************************************************/
+smc_args_t *tsp_system_off_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ /* Update this cpu's statistics. */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+
+ INFO("TSP: cpu 0x%lx SYSTEM_OFF request\n", read_mpidr());
+ INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count);
+
+ /* Indicate to the SPD that we have completed this request. */
+ return set_smc_args(TSP_SYSTEM_OFF_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * This function performs any remaining bookkeeping in the test secure payload
+ * before the system is reset (in response to a psci SYSTEM_RESET request).
+ ******************************************************************************/
+smc_args_t *tsp_system_reset_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ /* Update this cpu's statistics. */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+
+ INFO("TSP: cpu 0x%lx SYSTEM_RESET request\n", read_mpidr());
+ INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count);
+
+ /* Indicate to the SPD that we have completed this request. */
+ return set_smc_args(TSP_SYSTEM_RESET_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * TSP smc abort handler. This function is called when aborting a preempted
+ * yielding SMC request. It should cleanup all resources owned by the SMC
+ * handler such as locks or dynamically allocated memory so following SMC
+ * request are executed in a clean environment.
+ ******************************************************************************/
+smc_args_t *tsp_abort_smc_handler(uint64_t func,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ return set_smc_args(TSP_ABORT_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
diff --git a/bl32/tsp/tsp_ffa_main.c b/bl32/tsp/tsp_ffa_main.c
new file mode 100644
index 0000000..1c8c68f
--- /dev/null
+++ b/bl32/tsp/tsp_ffa_main.c
@@ -0,0 +1,656 @@
+/*
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <inttypes.h>
+#include <stdint.h>
+
+#include "../../services/std_svc/spm/el3_spmc/spmc.h"
+#include "../../services/std_svc/spm/el3_spmc/spmc_shared_mem.h"
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <bl32/tsp/tsp.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include "ffa_helpers.h"
+#include <lib/psci/psci.h>
+#include <lib/spinlock.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/platform.h>
+#include <platform_tsp.h>
+#include <services/ffa_svc.h>
+#include "tsp_private.h"
+
+#include <platform_def.h>
+
+static ffa_endpoint_id16_t tsp_id, spmc_id;
+uint8_t mem_region_buffer[4096 * 2] __aligned(PAGE_SIZE);
+
+/* Partition Mailbox. */
+static uint8_t send_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+static uint8_t recv_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+/*
+ * Declare a global mailbox for use within the TSP.
+ * This will be initialized appropriately when the buffers
+ * are mapped with the SPMC.
+ */
+static struct mailbox mailbox;
+
+/*******************************************************************************
+ * This enum is used to handle test cases driven from the FF-A Test Driver.
+ ******************************************************************************/
+/* Keep in Sync with FF-A Test Driver. */
+enum message_t {
+ /* Partition Only Messages. */
+ FF_A_RELAY_MESSAGE = 0,
+
+ /* Basic Functionality. */
+ FF_A_ECHO_MESSAGE,
+ FF_A_RELAY_MESSAGE_EL3,
+
+ /* Memory Sharing. */
+ FF_A_MEMORY_SHARE,
+ FF_A_MEMORY_SHARE_FRAGMENTED,
+ FF_A_MEMORY_LEND,
+ FF_A_MEMORY_LEND_FRAGMENTED,
+
+ FF_A_MEMORY_SHARE_MULTI_ENDPOINT,
+ FF_A_MEMORY_LEND_MULTI_ENDPOINT,
+
+ LAST,
+ FF_A_RUN_ALL = 255,
+ FF_A_OP_MAX = 256
+};
+
+#if SPMC_AT_EL3
+extern void tsp_cpu_on_entry(void);
+#endif
+
+/*******************************************************************************
+ * Test Functions.
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Enable the TSP to forward the received message to another partition and ask
+ * it to echo the value back in order to validate direct messages functionality.
+ ******************************************************************************/
+static int ffa_test_relay(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ smc_args_t ffa_forward_result;
+ ffa_endpoint_id16_t receiver = arg5;
+
+ ffa_forward_result = ffa_msg_send_direct_req(tsp_id,
+ receiver,
+ FF_A_ECHO_MESSAGE, arg4,
+ 0, 0, 0);
+ return ffa_forward_result._regs[3];
+}
+
+/*******************************************************************************
+ * This function handles memory management tests, currently share and lend.
+ * This test supports the use of FRAG_RX to use memory descriptors that do not
+ * fit in a single 4KB buffer.
+ ******************************************************************************/
+static int test_memory_send(ffa_endpoint_id16_t sender, uint64_t handle,
+ ffa_mtd_flag32_t flags, bool multi_endpoint)
+{
+ struct ffa_mtd *m;
+ struct ffa_emad_v1_0 *receivers;
+ struct ffa_comp_mrd *composite;
+ int ret, status = 0;
+ unsigned int mem_attrs;
+ char *ptr;
+ ffa_endpoint_id16_t source = sender;
+ uint32_t total_length, recv_length = 0;
+
+ /*
+ * In the case that we're testing multiple endpoints choose a partition
+ * ID that resides in the normal world so the SPMC won't detect it as
+ * invalid.
+ * TODO: Should get endpoint receiver id and flag as input from NWd.
+ */
+ uint32_t receiver_count = multi_endpoint ? 2 : 1;
+ ffa_endpoint_id16_t test_receivers[2] = { tsp_id, 0x10 };
+
+ /* Ensure that the sender ID resides in the normal world. */
+ if (ffa_is_secure_world_id(sender)) {
+ ERROR("Invalid sender ID 0x%x.\n", sender);
+ return FFA_ERROR_DENIED;
+ }
+
+ if (!memory_retrieve(&mailbox, &m, handle, source, test_receivers,
+ receiver_count, flags, &recv_length,
+ &total_length)) {
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ receivers = (struct ffa_emad_v1_0 *)
+ ((uint8_t *) m + m->emad_offset);
+ while (total_length != recv_length) {
+ smc_args_t ffa_return;
+ uint32_t frag_length;
+
+ ffa_return = ffa_mem_frag_rx(handle, recv_length);
+
+ if (ffa_return._regs[0] == FFA_ERROR) {
+ WARN("TSP: failed to resume mem with handle %lx\n",
+ handle);
+ return ffa_return._regs[2];
+ }
+ frag_length = ffa_return._regs[3];
+
+ /* Validate frag_length is less than total_length and mailbox size. */
+ if (frag_length > total_length ||
+ frag_length > (mailbox.rxtx_page_count * PAGE_SIZE)) {
+ ERROR("Invalid parameters!\n");
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ /* Validate frag_length is less than remaining mem_region_buffer size. */
+ if (frag_length + recv_length >= REGION_BUF_SIZE) {
+ ERROR("Out of memory!\n");
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ memcpy(&mem_region_buffer[recv_length], mailbox.rx_buffer,
+ frag_length);
+
+ if (ffa_rx_release()) {
+ ERROR("Failed to release buffer!\n");
+ return FFA_ERROR_DENIED;
+ }
+
+ recv_length += frag_length;
+
+ assert(recv_length <= total_length);
+ }
+
+ composite = ffa_memory_region_get_composite(m, 0);
+ if (composite == NULL) {
+ WARN("Failed to get composite descriptor!\n");
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ VERBOSE("Address: %p; page_count: %x %lx\n",
+ (void *)composite->address_range_array[0].address,
+ composite->address_range_array[0].page_count, PAGE_SIZE);
+
+ /* This test is only concerned with RW permissions. */
+ if (ffa_get_data_access_attr(
+ receivers[0].mapd.memory_access_permissions) != FFA_MEM_PERM_RW) {
+ ERROR("Data permission in retrieve response %x does not match share/lend %x!\n",
+ ffa_get_data_access_attr(receivers[0].mapd.memory_access_permissions),
+ FFA_MEM_PERM_RW);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ mem_attrs = MT_RW_DATA | MT_EXECUTE_NEVER;
+
+ /* Only expecting to be sent memory from NWd so map accordingly. */
+ mem_attrs |= MT_NS;
+
+ for (int32_t i = 0; i < (int32_t)composite->address_range_count; i++) {
+ size_t size = composite->address_range_array[i].page_count * PAGE_SIZE;
+
+ ptr = (char *) composite->address_range_array[i].address;
+ ret = mmap_add_dynamic_region(
+ (uint64_t)ptr,
+ (uint64_t)ptr,
+ size, mem_attrs);
+
+ if (ret != 0) {
+ ERROR("Failed [%d] mmap_add_dynamic_region %u (%lx) (%lx) (%x)!\n",
+ i, ret,
+ (uint64_t)composite->address_range_array[i].address,
+ size, mem_attrs);
+
+ /* Remove mappings previously created in this transaction. */
+ for (i--; i >= 0; i--) {
+ ret = mmap_remove_dynamic_region(
+ (uint64_t)composite->address_range_array[i].address,
+ composite->address_range_array[i].page_count * PAGE_SIZE);
+
+ if (ret != 0) {
+ ERROR("Failed [%d] mmap_remove_dynamic_region!\n", i);
+ panic();
+ }
+ }
+
+ return FFA_ERROR_NO_MEMORY;
+ }
+
+ /* Increment memory region for validation purposes. */
+ ++(*ptr);
+
+ /*
+ * Read initial magic number from memory region for
+ * validation purposes.
+ */
+ if (!i) {
+ status = *ptr;
+ }
+ }
+
+ for (uint32_t i = 0U; i < composite->address_range_count; i++) {
+ ret = mmap_remove_dynamic_region(
+ (uint64_t)composite->address_range_array[i].address,
+ composite->address_range_array[i].page_count * PAGE_SIZE);
+
+ if (ret != 0) {
+ ERROR("Failed [%d] mmap_remove_dynamic_region!\n", i);
+ return FFA_ERROR_NO_MEMORY;
+ }
+ }
+
+ if (!memory_relinquish((struct ffa_mem_relinquish_descriptor *)mailbox.tx_buffer,
+ m->handle, tsp_id)) {
+ ERROR("Failed to relinquish memory region!\n");
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ return status;
+}
+
+static smc_args_t *send_ffa_pm_success(void)
+{
+ return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC32,
+ ((tsp_id & FFA_DIRECT_MSG_ENDPOINT_ID_MASK)
+ << FFA_DIRECT_MSG_SOURCE_SHIFT) | spmc_id,
+ FFA_FWK_MSG_BIT |
+ (FFA_PM_MSG_PM_RESP & FFA_FWK_MSG_MASK),
+ 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * This function performs any remaining book keeping in the test secure payload
+ * before this cpu is turned off in response to a psci cpu_off request.
+ ******************************************************************************/
+smc_args_t *tsp_cpu_off_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ /*
+ * This cpu is being turned off, so disable the timer to prevent the
+ * secure timer interrupt from interfering with power down. A pending
+ * interrupt will be lost but we do not care as we are turning off.
+ */
+ tsp_generic_timer_stop();
+
+ /* Update this cpu's statistics. */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_off_count++;
+
+ VERBOSE("TSP: cpu 0x%lx off request\n", read_mpidr());
+ VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n",
+ read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_off_count);
+
+ return send_ffa_pm_success();
+}
+
+/*******************************************************************************
+ * This function performs any book keeping in the test secure payload before
+ * this cpu's architectural state is saved in response to an earlier psci
+ * cpu_suspend request.
+ ******************************************************************************/
+smc_args_t *tsp_cpu_suspend_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ /*
+ * Save the time context and disable it to prevent the secure timer
+ * interrupt from interfering with wakeup from the suspend state.
+ */
+ tsp_generic_timer_save();
+ tsp_generic_timer_stop();
+
+ /* Update this cpu's statistics. */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_suspend_count++;
+
+ VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
+ read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_suspend_count);
+
+ return send_ffa_pm_success();
+}
+
+/*******************************************************************************
+ * This function performs any bookkeeping in the test secure payload after this
+ * cpu's architectural state has been restored after wakeup from an earlier psci
+ * cpu_suspend request.
+ ******************************************************************************/
+smc_args_t *tsp_cpu_resume_main(uint64_t max_off_pwrlvl,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ /* Restore the generic timer context. */
+ tsp_generic_timer_restore();
+
+ /* Update this cpu's statistics. */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_resume_count++;
+
+ VERBOSE("TSP: cpu 0x%lx resumed. maximum off power level %" PRId64 "\n",
+ read_mpidr(), max_off_pwrlvl);
+ VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu resume requests\n",
+ read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_resume_count);
+
+ return send_ffa_pm_success();
+}
+
+/*******************************************************************************
+ * This function handles framework messages. Currently only PM.
+ ******************************************************************************/
+static smc_args_t *handle_framework_message(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ /* Check if it is a power management message from the SPMC. */
+ if (ffa_endpoint_source(arg1) != spmc_id) {
+ goto err;
+ }
+
+ /* Check if it is a PM request message. */
+ if ((arg2 & FFA_FWK_MSG_MASK) == FFA_FWK_MSG_PSCI) {
+ /* Check if it is a PSCI CPU_OFF request. */
+ if (arg3 == PSCI_CPU_OFF) {
+ return tsp_cpu_off_main(arg0, arg1, arg2, arg3,
+ arg4, arg5, arg6, arg7);
+ } else if (arg3 == PSCI_CPU_SUSPEND_AARCH64) {
+ return tsp_cpu_suspend_main(arg0, arg1, arg2, arg3,
+ arg4, arg5, arg6, arg7);
+ }
+ } else if ((arg2 & FFA_FWK_MSG_MASK) == FFA_PM_MSG_WB_REQ) {
+ /* Check it is a PSCI Warm Boot request. */
+ if (arg3 == FFA_WB_TYPE_NOTS2RAM) {
+ return tsp_cpu_resume_main(arg0, arg1, arg2, arg3,
+ arg4, arg5, arg6, arg7);
+ }
+ }
+
+err:
+ ERROR("%s: Unknown framework message!\n", __func__);
+ panic();
+}
+
+/*******************************************************************************
+ * Handles partition messages. Exercised from the FF-A Test Driver.
+ ******************************************************************************/
+static smc_args_t *handle_partition_message(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint16_t sender = ffa_endpoint_source(arg1);
+ uint16_t receiver = ffa_endpoint_destination(arg1);
+ int status = -1;
+ const bool multi_endpoint = true;
+
+ switch (arg3) {
+ case FF_A_MEMORY_SHARE:
+ INFO("TSP Tests: Memory Share Request--\n");
+ status = test_memory_send(sender, arg4, FFA_FLAG_SHARE_MEMORY, !multi_endpoint);
+ break;
+
+ case FF_A_MEMORY_LEND:
+ INFO("TSP Tests: Memory Lend Request--\n");
+ status = test_memory_send(sender, arg4, FFA_FLAG_LEND_MEMORY, !multi_endpoint);
+ break;
+
+ case FF_A_MEMORY_SHARE_MULTI_ENDPOINT:
+ INFO("TSP Tests: Multi Endpoint Memory Share Request--\n");
+ status = test_memory_send(sender, arg4, FFA_FLAG_SHARE_MEMORY, multi_endpoint);
+ break;
+
+ case FF_A_MEMORY_LEND_MULTI_ENDPOINT:
+ INFO("TSP Tests: Multi Endpoint Memory Lend Request--\n");
+ status = test_memory_send(sender, arg4, FFA_FLAG_LEND_MEMORY, multi_endpoint);
+ break;
+ case FF_A_RELAY_MESSAGE:
+ INFO("TSP Tests: Relaying message--\n");
+ status = ffa_test_relay(arg0, arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7);
+ break;
+
+ case FF_A_ECHO_MESSAGE:
+ INFO("TSP Tests: echo message--\n");
+ status = arg4;
+ break;
+
+ default:
+ INFO("TSP Tests: Unknown request ID %d--\n", (int) arg3);
+ }
+
+ /* Swap the sender and receiver in the response. */
+ return ffa_msg_send_direct_resp(receiver, sender, status, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * This function implements the event loop for handling FF-A ABI invocations.
+ ******************************************************************************/
+static smc_args_t *tsp_event_loop(uint64_t smc_fid,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ /* Panic if the SPMC did not forward an FF-A call. */
+ if (!is_ffa_fid(smc_fid)) {
+ ERROR("%s: Unknown SMC FID (0x%lx)\n", __func__, smc_fid);
+ panic();
+ }
+
+ switch (smc_fid) {
+ case FFA_INTERRUPT:
+ /*
+ * IRQs were enabled upon re-entry into the TSP. The interrupt
+ * must have been handled by now. Return to the SPMC indicating
+ * the same.
+ */
+ return set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0);
+
+ case FFA_MSG_SEND_DIRECT_REQ_SMC64:
+ case FFA_MSG_SEND_DIRECT_REQ_SMC32:
+ /* Check if a framework message, handle accordingly. */
+ if ((arg2 & FFA_FWK_MSG_BIT)) {
+ return handle_framework_message(smc_fid, arg1, arg2, arg3,
+ arg4, arg5, arg6, arg7);
+ }
+ return handle_partition_message(smc_fid, arg1, arg2, arg3,
+ arg4, arg5, arg6, arg7);
+ }
+
+ ERROR("%s: Unsupported FF-A FID (0x%lx)\n", __func__, smc_fid);
+ panic();
+}
+
+static smc_args_t *tsp_loop(smc_args_t *args)
+{
+ smc_args_t ret;
+
+ do {
+ /* --------------------------------------------
+ * Mask FIQ interrupts to avoid preemption
+ * in case EL3 SPMC delegates an IRQ next or a
+ * managed exit. Lastly, unmask IRQs so that
+ * they can be handled immediately upon re-entry.
+ * ---------------------------------------------
+ */
+ write_daifset(DAIF_FIQ_BIT);
+ write_daifclr(DAIF_IRQ_BIT);
+ ret = smc_helper(args->_regs[0], args->_regs[1], args->_regs[2],
+ args->_regs[3], args->_regs[4], args->_regs[5],
+ args->_regs[6], args->_regs[7]);
+ args = tsp_event_loop(ret._regs[0], ret._regs[1], ret._regs[2],
+ ret._regs[3], ret._regs[4], ret._regs[5],
+ ret._regs[6], ret._regs[7]);
+ } while (1);
+
+ /* Not Reached. */
+ return NULL;
+}
+
+/*******************************************************************************
+ * TSP main entry point where it gets the opportunity to initialize its secure
+ * state/applications. Once the state is initialized, it must return to the
+ * SPD with a pointer to the 'tsp_vector_table' jump table.
+ ******************************************************************************/
+uint64_t tsp_main(void)
+{
+ smc_args_t smc_args = {0};
+
+ NOTICE("TSP: %s\n", version_string);
+ NOTICE("TSP: %s\n", build_message);
+ INFO("TSP: Total memory base : 0x%lx\n", (unsigned long) BL32_BASE);
+ INFO("TSP: Total memory size : 0x%lx bytes\n", BL32_TOTAL_SIZE);
+ uint32_t linear_id = plat_my_core_pos();
+
+ /* Initialize the platform. */
+ tsp_platform_setup();
+
+ /* Initialize secure/applications state here. */
+ tsp_generic_timer_start();
+
+ /* Register secondary entrypoint with the SPMC. */
+ smc_args = smc_helper(FFA_SECONDARY_EP_REGISTER_SMC64,
+ (uint64_t) tsp_cpu_on_entry,
+ 0, 0, 0, 0, 0, 0);
+ if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
+ ERROR("TSP could not register secondary ep (0x%lx)\n",
+ smc_args._regs[2]);
+ panic();
+ }
+ /* Get TSP's endpoint id. */
+ smc_args = smc_helper(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
+ if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
+ ERROR("TSP could not get own ID (0x%lx) on core%d\n",
+ smc_args._regs[2], linear_id);
+ panic();
+ }
+
+ tsp_id = smc_args._regs[2];
+ INFO("TSP FF-A endpoint id = 0x%x\n", tsp_id);
+
+ /* Get the SPMC ID. */
+ smc_args = smc_helper(FFA_SPM_ID_GET, 0, 0, 0, 0, 0, 0, 0);
+ if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
+ ERROR("TSP could not get SPMC ID (0x%lx) on core%d\n",
+ smc_args._regs[2], linear_id);
+ panic();
+ }
+
+ spmc_id = smc_args._regs[2];
+
+ /* Call RXTX_MAP to map a 4k RX and TX buffer. */
+ if (ffa_rxtx_map((uintptr_t) send_page,
+ (uintptr_t) recv_page, 1)) {
+ ERROR("TSP could not map it's RX/TX Buffers\n");
+ panic();
+ }
+
+ mailbox.tx_buffer = send_page;
+ mailbox.rx_buffer = recv_page;
+ mailbox.rxtx_page_count = 1;
+
+ /* Update this cpu's statistics. */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_on_count++;
+
+ VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
+ read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_on_count);
+
+ /* Tell SPMD that we are done initialising. */
+ tsp_loop(set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0));
+
+ /* Not reached. */
+ return 0;
+}
+
+/*******************************************************************************
+ * This function performs any remaining book keeping in the test secure payload
+ * after this cpu's architectural state has been setup in response to an earlier
+ * psci cpu_on request.
+ ******************************************************************************/
+smc_args_t *tsp_cpu_on_main(void)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ /* Initialize secure/applications state here. */
+ tsp_generic_timer_start();
+
+ /* Update this cpu's statistics. */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_on_count++;
+ VERBOSE("TSP: cpu 0x%lx turned on\n", read_mpidr());
+ VERBOSE("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
+ read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_on_count);
+ /* ---------------------------------------------
+ * Jump to the main event loop to return to EL3
+ * and be ready for the next request on this cpu.
+ * ---------------------------------------------
+ */
+ return tsp_loop(set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0));
+}
diff --git a/bl32/tsp/tsp_interrupt.c b/bl32/tsp/tsp_interrupt.c
new file mode 100644
index 0000000..a847b6c
--- /dev/null
+++ b/bl32/tsp/tsp_interrupt.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2014-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <inttypes.h>
+
+#include <platform_def.h>
+
+#include <arch_helpers.h>
+#include <bl32/tsp/tsp.h>
+#include <common/debug.h>
+#include <plat/common/platform.h>
+
+#include "tsp_private.h"
+
+/*******************************************************************************
+ * This function updates the TSP statistics for S-EL1 interrupts handled
+ * synchronously i.e the ones that have been handed over by the TSPD. It also
+ * keeps count of the number of times control was passed back to the TSPD
+ * after handling the interrupt. In the future it will be possible that the
+ * TSPD hands over an S-EL1 interrupt to the TSP but does not expect it to
+ * return execution. This statistic will be useful to distinguish between these
+ * two models of synchronous S-EL1 interrupt handling. The 'elr_el3' parameter
+ * contains the address of the instruction in normal world where this S-EL1
+ * interrupt was generated.
+ ******************************************************************************/
+void tsp_update_sync_sel1_intr_stats(uint32_t type, uint64_t elr_el3)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ tsp_stats[linear_id].sync_sel1_intr_count++;
+ if (type == TSP_HANDLE_SEL1_INTR_AND_RETURN)
+ tsp_stats[linear_id].sync_sel1_intr_ret_count++;
+
+ VERBOSE("TSP: cpu 0x%lx sync s-el1 interrupt request from 0x%" PRIx64 "\n",
+ read_mpidr(), elr_el3);
+ VERBOSE("TSP: cpu 0x%lx: %d sync s-el1 interrupt requests,"
+ " %d sync s-el1 interrupt returns\n",
+ read_mpidr(),
+ tsp_stats[linear_id].sync_sel1_intr_count,
+ tsp_stats[linear_id].sync_sel1_intr_ret_count);
+}
+
+/******************************************************************************
+ * This function is invoked when a non S-EL1 interrupt is received and causes
+ * the preemption of TSP. This function returns TSP_PREEMPTED and results
+ * in the control being handed over to EL3 for handling the interrupt.
+ *****************************************************************************/
+int32_t tsp_handle_preemption(void)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ tsp_stats[linear_id].preempt_intr_count++;
+ VERBOSE("TSP: cpu 0x%lx: %d preempt interrupt requests\n",
+ read_mpidr(), tsp_stats[linear_id].preempt_intr_count);
+ return TSP_PREEMPTED;
+}
+
+/*******************************************************************************
+ * TSP interrupt handler is called as a part of both synchronous and
+ * asynchronous handling of TSP interrupts. Currently the physical timer
+ * interrupt is the only S-EL1 interrupt that this handler expects. It returns
+ * 0 upon successfully handling the expected interrupt and all other
+ * interrupts are treated as normal world or EL3 interrupts.
+ ******************************************************************************/
+int32_t tsp_common_int_handler(void)
+{
+ uint32_t linear_id = plat_my_core_pos(), id;
+
+ /*
+ * Get the highest priority pending interrupt id and see if it is the
+ * secure physical generic timer interrupt in which case, handle it.
+ * Otherwise throw this interrupt at the EL3 firmware.
+ *
+ * There is a small time window between reading the highest priority
+ * pending interrupt and acknowledging it during which another
+ * interrupt of higher priority could become the highest pending
+ * interrupt. This is not expected to happen currently for TSP.
+ */
+ id = plat_ic_get_pending_interrupt_id();
+
+ /* TSP can only handle the secure physical timer interrupt */
+ if (id != TSP_IRQ_SEC_PHY_TIMER) {
+#if SPMC_AT_EL3
+ /*
+ * With the EL3 FF-A SPMC we expect only Timer secure interrupt to fire in
+ * the TSP, so panic if any other interrupt does.
+ */
+ ERROR("Unexpected interrupt id %u\n", id);
+ panic();
+#else
+ return tsp_handle_preemption();
+#endif
+ }
+
+ /*
+ * Acknowledge and handle the secure timer interrupt. Also sanity check
+ * if it has been preempted by another interrupt through an assertion.
+ */
+ id = plat_ic_acknowledge_interrupt();
+ assert(id == TSP_IRQ_SEC_PHY_TIMER);
+ tsp_generic_timer_handler();
+ plat_ic_end_of_interrupt(id);
+
+ /* Update the statistics and print some messages */
+ tsp_stats[linear_id].sel1_intr_count++;
+ VERBOSE("TSP: cpu 0x%lx handled S-EL1 interrupt %d\n",
+ read_mpidr(), id);
+ VERBOSE("TSP: cpu 0x%lx: %d S-EL1 requests\n",
+ read_mpidr(), tsp_stats[linear_id].sel1_intr_count);
+ return 0;
+}
diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c
new file mode 100644
index 0000000..1ab2260
--- /dev/null
+++ b/bl32/tsp/tsp_main.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <inttypes.h>
+#include <stdint.h>
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <bl32/tsp/tsp.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <lib/spinlock.h>
+#include <plat/common/platform.h>
+#include <platform_tsp.h>
+#include "tsp_private.h"
+
+#include <platform_def.h>
+
+/*******************************************************************************
+ * TSP main entry point where it gets the opportunity to initialize its secure
+ * state/applications. Once the state is initialized, it must return to the
+ * SPD with a pointer to the 'tsp_vector_table' jump table.
+ ******************************************************************************/
+uint64_t tsp_main(void)
+{
+ NOTICE("TSP: %s\n", version_string);
+ NOTICE("TSP: %s\n", build_message);
+ INFO("TSP: Total memory base : 0x%lx\n", (unsigned long) BL32_BASE);
+ INFO("TSP: Total memory size : 0x%lx bytes\n", BL32_TOTAL_SIZE);
+
+ uint32_t linear_id = plat_my_core_pos();
+
+ /* Initialize the platform */
+ tsp_platform_setup();
+
+ /* Initialize secure/applications state here */
+ tsp_generic_timer_start();
+
+ /* Update this cpu's statistics */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_on_count++;
+
+ INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
+ read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_on_count);
+
+ console_flush();
+ return (uint64_t) &tsp_vector_table;
+}
+
+/*******************************************************************************
+ * This function performs any remaining book keeping in the test secure payload
+ * after this cpu's architectural state has been setup in response to an earlier
+ * psci cpu_on request.
+ ******************************************************************************/
+smc_args_t *tsp_cpu_on_main(void)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ /* Initialize secure/applications state here */
+ tsp_generic_timer_start();
+
+ /* Update this cpu's statistics */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_on_count++;
+
+ INFO("TSP: cpu 0x%lx turned on\n", read_mpidr());
+ INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
+ read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_on_count);
+ /* Indicate to the SPD that we have completed turned ourselves on */
+ return set_smc_args(TSP_ON_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * This function performs any remaining book keeping in the test secure payload
+ * before this cpu is turned off in response to a psci cpu_off request.
+ ******************************************************************************/
+smc_args_t *tsp_cpu_off_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ /*
+ * This cpu is being turned off, so disable the timer to prevent the
+ * secure timer interrupt from interfering with power down. A pending
+ * interrupt will be lost but we do not care as we are turning off.
+ */
+ tsp_generic_timer_stop();
+
+ /* Update this cpu's statistics */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_off_count++;
+
+ INFO("TSP: cpu 0x%lx off request\n", read_mpidr());
+ INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n",
+ read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_off_count);
+
+ /* Indicate to the SPD that we have completed this request */
+ return set_smc_args(TSP_OFF_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * This function performs any book keeping in the test secure payload before
+ * this cpu's architectural state is saved in response to an earlier psci
+ * cpu_suspend request.
+ ******************************************************************************/
+smc_args_t *tsp_cpu_suspend_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ /*
+ * Save the time context and disable it to prevent the secure timer
+ * interrupt from interfering with wakeup from the suspend state.
+ */
+ tsp_generic_timer_save();
+ tsp_generic_timer_stop();
+
+ /* Update this cpu's statistics */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_suspend_count++;
+
+ INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
+ read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_suspend_count);
+
+ /* Indicate to the SPD that we have completed this request */
+ return set_smc_args(TSP_SUSPEND_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * This function performs any book keeping in the test secure payload after this
+ * cpu's architectural state has been restored after wakeup from an earlier psci
+ * cpu_suspend request.
+ ******************************************************************************/
+smc_args_t *tsp_cpu_resume_main(uint64_t max_off_pwrlvl,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ /* Restore the generic timer context */
+ tsp_generic_timer_restore();
+
+ /* Update this cpu's statistics */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+ tsp_stats[linear_id].cpu_resume_count++;
+
+ INFO("TSP: cpu 0x%lx resumed. maximum off power level %" PRId64 "\n",
+ read_mpidr(), max_off_pwrlvl);
+ INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu resume requests\n",
+ read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count,
+ tsp_stats[linear_id].cpu_resume_count);
+ /* Indicate to the SPD that we have completed this request */
+ return set_smc_args(TSP_RESUME_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * TSP fast smc handler. The secure monitor jumps to this function by
+ * doing the ERET after populating X0-X7 registers. The arguments are received
+ * in the function arguments in order. Once the service is rendered, this
+ * function returns to Secure Monitor by raising SMC.
+ ******************************************************************************/
+smc_args_t *tsp_smc_handler(uint64_t func,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint128_t service_args;
+ uint64_t service_arg0;
+ uint64_t service_arg1;
+ uint64_t results[2];
+ uint32_t linear_id = plat_my_core_pos();
+ u_register_t dit;
+
+ /* Update this cpu's statistics */
+ tsp_stats[linear_id].smc_count++;
+ tsp_stats[linear_id].eret_count++;
+
+ INFO("TSP: cpu 0x%lx received %s smc 0x%" PRIx64 "\n", read_mpidr(),
+ ((func >> 31) & 1) == 1 ? "fast" : "yielding",
+ func);
+ INFO("TSP: cpu 0x%lx: %d smcs, %d erets\n", read_mpidr(),
+ tsp_stats[linear_id].smc_count,
+ tsp_stats[linear_id].eret_count);
+
+ /* Render secure services and obtain results here */
+ results[0] = arg1;
+ results[1] = arg2;
+
+ /*
+ * Request a service back from dispatcher/secure monitor.
+ * This call returns and thereafter resumes execution.
+ */
+ service_args = tsp_get_magic();
+ service_arg0 = (uint64_t)service_args;
+ service_arg1 = (uint64_t)(service_args >> 64U);
+
+#if CTX_INCLUDE_MTE_REGS
+ /*
+ * Write a dummy value to an MTE register, to simulate usage in the
+ * secure world
+ */
+ write_gcr_el1(0x99);
+#endif
+
+ /* Determine the function to perform based on the function ID */
+ switch (TSP_BARE_FID(func)) {
+ case TSP_ADD:
+ results[0] += service_arg0;
+ results[1] += service_arg1;
+ break;
+ case TSP_SUB:
+ results[0] -= service_arg0;
+ results[1] -= service_arg1;
+ break;
+ case TSP_MUL:
+ results[0] *= service_arg0;
+ results[1] *= service_arg1;
+ break;
+ case TSP_DIV:
+ results[0] /= service_arg0 ? service_arg0 : 1;
+ results[1] /= service_arg1 ? service_arg1 : 1;
+ break;
+ case TSP_CHECK_DIT:
+ if (!is_feat_dit_supported()) {
+ ERROR("DIT not supported\n");
+ results[0] = 0;
+ results[1] = 0xffff;
+ break;
+ }
+ dit = read_dit();
+ results[0] = dit == service_arg0;
+ results[1] = dit;
+ /* Toggle the dit bit */
+ write_dit(service_arg0 != 0U ? 0 : DIT_BIT);
+ break;
+ default:
+ break;
+ }
+
+ return set_smc_args(func, 0,
+ results[0],
+ results[1],
+ 0, 0, 0, 0);
+}
diff --git a/bl32/tsp/tsp_private.h b/bl32/tsp/tsp_private.h
new file mode 100644
index 0000000..66873e2
--- /dev/null
+++ b/bl32/tsp/tsp_private.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2014-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TSP_PRIVATE_H
+#define TSP_PRIVATE_H
+
+/*******************************************************************************
+ * The TSP memory footprint starts at address BL32_BASE and ends with the
+ * linker symbol __BL32_END__. Use these addresses to compute the TSP image
+ * size.
+ ******************************************************************************/
+#define BL32_TOTAL_LIMIT BL32_END
+#define BL32_TOTAL_SIZE (BL32_TOTAL_LIMIT - (unsigned long) BL32_BASE)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#include <bl32/tsp/tsp.h>
+#include <lib/cassert.h>
+#include <lib/spinlock.h>
+#include <smccc_helpers.h>
+
+typedef struct work_statistics {
+ /* Number of s-el1 interrupts on this cpu */
+ uint32_t sel1_intr_count;
+ /* Number of non s-el1 interrupts on this cpu which preempted TSP */
+ uint32_t preempt_intr_count;
+ /* Number of sync s-el1 interrupts on this cpu */
+ uint32_t sync_sel1_intr_count;
+ /* Number of s-el1 interrupts returns on this cpu */
+ uint32_t sync_sel1_intr_ret_count;
+ uint32_t smc_count; /* Number of returns on this cpu */
+ uint32_t eret_count; /* Number of entries on this cpu */
+ uint32_t cpu_on_count; /* Number of cpu on requests */
+ uint32_t cpu_off_count; /* Number of cpu off requests */
+ uint32_t cpu_suspend_count; /* Number of cpu suspend requests */
+ uint32_t cpu_resume_count; /* Number of cpu resume requests */
+} __aligned(CACHE_WRITEBACK_GRANULE) work_statistics_t;
+
+/* Macros to access members of the above structure using their offsets */
+#define read_sp_arg(args, offset) ((args)->_regs[offset >> 3])
+#define write_sp_arg(args, offset, val) (((args)->_regs[offset >> 3]) \
+ = val)
+
+uint128_t tsp_get_magic(void);
+
+smc_args_t *set_smc_args(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+smc_args_t *tsp_cpu_resume_main(uint64_t max_off_pwrlvl,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+smc_args_t *tsp_cpu_suspend_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+smc_args_t *tsp_cpu_on_main(void);
+smc_args_t *tsp_cpu_off_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+
+/* Generic Timer functions */
+void tsp_generic_timer_start(void);
+void tsp_generic_timer_handler(void);
+void tsp_generic_timer_stop(void);
+void tsp_generic_timer_save(void);
+void tsp_generic_timer_restore(void);
+
+/* S-EL1 interrupt management functions */
+void tsp_update_sync_sel1_intr_stats(uint32_t type, uint64_t elr_el3);
+
+
+/* Data structure to keep track of TSP statistics */
+extern work_statistics_t tsp_stats[PLATFORM_CORE_COUNT];
+
+/* Vector table of jumps */
+extern tsp_vectors_t tsp_vector_table;
+
+/* functions */
+int32_t tsp_common_int_handler(void);
+int32_t tsp_handle_preemption(void);
+
+smc_args_t *tsp_abort_smc_handler(uint64_t func,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+
+smc_args_t *tsp_smc_handler(uint64_t func,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+
+smc_args_t *tsp_system_reset_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+
+smc_args_t *tsp_system_off_main(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7);
+
+uint64_t tsp_main(void);
+#endif /* __ASSEMBLER__ */
+
+#endif /* TSP_PRIVATE_H */
diff --git a/bl32/tsp/tsp_timer.c b/bl32/tsp/tsp_timer.c
new file mode 100644
index 0000000..d1ff2b0
--- /dev/null
+++ b/bl32/tsp/tsp_timer.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <plat/common/platform.h>
+
+#include "tsp_private.h"
+
+/*******************************************************************************
+ * Data structure to keep track of per-cpu secure generic timer context across
+ * power management operations.
+ ******************************************************************************/
+typedef struct timer_context {
+ uint64_t cval;
+ uint32_t ctl;
+} timer_context_t;
+
+static timer_context_t pcpu_timer_context[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * This function initializes the generic timer to fire every 0.5 second
+ ******************************************************************************/
+void tsp_generic_timer_start(void)
+{
+ uint64_t cval;
+ uint32_t ctl = 0;
+
+ /* The timer will fire every 0.5 second */
+ cval = read_cntpct_el0() + (read_cntfrq_el0() >> 1);
+ write_cntps_cval_el1(cval);
+
+ /* Enable the secure physical timer */
+ set_cntp_ctl_enable(ctl);
+ write_cntps_ctl_el1(ctl);
+}
+
+/*******************************************************************************
+ * This function deasserts the timer interrupt and sets it up again
+ ******************************************************************************/
+void tsp_generic_timer_handler(void)
+{
+ /* Ensure that the timer did assert the interrupt */
+ assert(get_cntp_ctl_istatus(read_cntps_ctl_el1()));
+
+ /*
+ * Disable the timer and reprogram it. The barriers ensure that there is
+ * no reordering of instructions around the reprogramming code.
+ */
+ isb();
+ write_cntps_ctl_el1(0);
+ tsp_generic_timer_start();
+ isb();
+}
+
+/*******************************************************************************
+ * This function deasserts the timer interrupt prior to cpu power down
+ ******************************************************************************/
+void tsp_generic_timer_stop(void)
+{
+ /* Disable the timer */
+ write_cntps_ctl_el1(0);
+}
+
+/*******************************************************************************
+ * This function saves the timer context prior to cpu suspension
+ ******************************************************************************/
+void tsp_generic_timer_save(void)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ pcpu_timer_context[linear_id].cval = read_cntps_cval_el1();
+ pcpu_timer_context[linear_id].ctl = read_cntps_ctl_el1();
+ flush_dcache_range((uint64_t) &pcpu_timer_context[linear_id],
+ sizeof(pcpu_timer_context[linear_id]));
+}
+
+/*******************************************************************************
+ * This function restores the timer context post cpu resumption
+ ******************************************************************************/
+void tsp_generic_timer_restore(void)
+{
+ uint32_t linear_id = plat_my_core_pos();
+
+ write_cntps_cval_el1(pcpu_timer_context[linear_id].cval);
+ write_cntps_ctl_el1(pcpu_timer_context[linear_id].ctl);
+}