summaryrefslogtreecommitdiffstats
path: root/services
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 17:43:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 17:43:51 +0000
commitbe58c81aff4cd4c0ccf43dbd7998da4a6a08c03b (patch)
tree779c248fb61c83f65d1f0dc867f2053d76b4e03a /services
parentInitial commit. (diff)
downloadarm-trusted-firmware-upstream.tar.xz
arm-trusted-firmware-upstream.zip
Adding upstream version 2.10.0+dfsg.upstream/2.10.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'services')
-rw-r--r--services/arm_arch_svc/arm_arch_svc_setup.c179
-rw-r--r--services/spd/opteed/opteed.mk35
-rw-r--r--services/spd/opteed/opteed_common.c111
-rw-r--r--services/spd/opteed/opteed_helpers.S79
-rw-r--r--services/spd/opteed/opteed_main.c695
-rw-r--r--services/spd/opteed/opteed_pm.c252
-rw-r--r--services/spd/opteed/opteed_private.h164
-rw-r--r--services/spd/opteed/teesmc_opteed.h169
-rw-r--r--services/spd/opteed/teesmc_opteed_macros.h23
-rw-r--r--services/spd/pncd/pncd.mk24
-rw-r--r--services/spd/pncd/pncd_common.c102
-rw-r--r--services/spd/pncd/pncd_helpers.S79
-rw-r--r--services/spd/pncd/pncd_main.c471
-rw-r--r--services/spd/pncd/pncd_private.h79
-rw-r--r--services/spd/tlkd/tlkd.mk14
-rw-r--r--services/spd/tlkd/tlkd_common.c165
-rw-r--r--services/spd/tlkd/tlkd_helpers.S80
-rw-r--r--services/spd/tlkd/tlkd_main.c546
-rw-r--r--services/spd/tlkd/tlkd_pm.c109
-rw-r--r--services/spd/tlkd/tlkd_private.h124
-rw-r--r--services/spd/trusty/generic-arm64-smcall.c116
-rw-r--r--services/spd/trusty/generic-arm64-smcall.h28
-rw-r--r--services/spd/trusty/sm_err.h22
-rw-r--r--services/spd/trusty/smcall.h82
-rw-r--r--services/spd/trusty/trusty.c541
-rw-r--r--services/spd/trusty/trusty.mk18
-rw-r--r--services/spd/trusty/trusty_helpers.S69
-rw-r--r--services/spd/tspd/tspd.mk46
-rw-r--r--services/spd/tspd/tspd_common.c140
-rw-r--r--services/spd/tspd/tspd_helpers.S79
-rw-r--r--services/spd/tspd/tspd_main.c819
-rw-r--r--services/spd/tspd/tspd_pm.c254
-rw-r--r--services/spd/tspd/tspd_private.h233
-rw-r--r--services/std_svc/drtm/drtm_dma_prot.c263
-rw-r--r--services/std_svc/drtm/drtm_dma_prot.h50
-rw-r--r--services/std_svc/drtm/drtm_main.c839
-rw-r--r--services/std_svc/drtm/drtm_main.h106
-rw-r--r--services/std_svc/drtm/drtm_measurements.c214
-rw-r--r--services/std_svc/drtm/drtm_measurements.h40
-rw-r--r--services/std_svc/drtm/drtm_remediation.c59
-rw-r--r--services/std_svc/drtm/drtm_remediation.h15
-rw-r--r--services/std_svc/drtm/drtm_res_address_map.c88
-rw-r--r--services/std_svc/errata_abi/cpu_errata_info.h73
-rw-r--r--services/std_svc/errata_abi/errata_abi_main.c603
-rw-r--r--services/std_svc/pci_svc.c113
-rw-r--r--services/std_svc/rmmd/aarch64/rmmd_helpers.S73
-rw-r--r--services/std_svc/rmmd/rmmd.mk19
-rw-r--r--services/std_svc/rmmd/rmmd_attest.c153
-rw-r--r--services/std_svc/rmmd/rmmd_initial_context.h33
-rw-r--r--services/std_svc/rmmd/rmmd_main.c485
-rw-r--r--services/std_svc/rmmd/rmmd_private.h63
-rw-r--r--services/std_svc/rmmd/trp/linker.ld.S72
-rw-r--r--services/std_svc/rmmd/trp/trp.mk27
-rw-r--r--services/std_svc/rmmd/trp/trp_entry.S153
-rw-r--r--services/std_svc/rmmd/trp/trp_helpers.c58
-rw-r--r--services/std_svc/rmmd/trp/trp_main.c185
-rw-r--r--services/std_svc/rmmd/trp/trp_private.h61
-rw-r--r--services/std_svc/sdei/sdei_dispatch.S26
-rw-r--r--services/std_svc/sdei/sdei_event.c122
-rw-r--r--services/std_svc/sdei/sdei_intr_mgmt.c774
-rw-r--r--services/std_svc/sdei/sdei_main.c1112
-rw-r--r--services/std_svc/sdei/sdei_private.h248
-rw-r--r--services/std_svc/sdei/sdei_state.c150
-rw-r--r--services/std_svc/spm/common/aarch64/spm_helpers.S74
-rw-r--r--services/std_svc/spm/common/aarch64/spm_shim_exceptions.S128
-rw-r--r--services/std_svc/spm/common/include/spm_common.h46
-rw-r--r--services/std_svc/spm/common/include/spm_shim_private.h26
-rw-r--r--services/std_svc/spm/common/spm.mk23
-rw-r--r--services/std_svc/spm/common/spm_xlat_common.c30
-rw-r--r--services/std_svc/spm/el3_spmc/logical_sp.c107
-rw-r--r--services/std_svc/spm/el3_spmc/spmc.h281
-rw-r--r--services/std_svc/spm/el3_spmc/spmc.mk46
-rw-r--r--services/std_svc/spm/el3_spmc/spmc_main.c2112
-rw-r--r--services/std_svc/spm/el3_spmc/spmc_pm.c285
-rw-r--r--services/std_svc/spm/el3_spmc/spmc_setup.c278
-rw-r--r--services/std_svc/spm/el3_spmc/spmc_shared_mem.c1934
-rw-r--r--services/std_svc/spm/el3_spmc/spmc_shared_mem.h115
-rw-r--r--services/std_svc/spm/spm_mm/spm_mm.mk33
-rw-r--r--services/std_svc/spm/spm_mm/spm_mm_main.c370
-rw-r--r--services/std_svc/spm/spm_mm/spm_mm_private.h67
-rw-r--r--services/std_svc/spm/spm_mm/spm_mm_setup.c260
-rw-r--r--services/std_svc/spm/spm_mm/spm_mm_xlat.c137
-rw-r--r--services/std_svc/spmd/aarch64/spmd_helpers.S73
-rw-r--r--services/std_svc/spmd/spmd.mk29
-rw-r--r--services/std_svc/spmd/spmd_logical_sp.c742
-rw-r--r--services/std_svc/spmd/spmd_main.c1292
-rw-r--r--services/std_svc/spmd/spmd_pm.c168
-rw-r--r--services/std_svc/spmd/spmd_private.h115
-rw-r--r--services/std_svc/std_svc_setup.c246
-rw-r--r--services/std_svc/trng/trng_entropy_pool.c236
-rw-r--r--services/std_svc/trng/trng_entropy_pool.h16
-rw-r--r--services/std_svc/trng/trng_main.c146
92 files changed, 21639 insertions, 0 deletions
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
new file mode 100644
index 0000000..57d211e
--- /dev/null
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/cpus/errata.h>
+#include <lib/cpus/wa_cve_2017_5715.h>
+#include <lib/cpus/wa_cve_2018_3639.h>
+#include <lib/cpus/wa_cve_2022_23960.h>
+#include <lib/smccc.h>
+#include <services/arm_arch_svc.h>
+#include <smccc_helpers.h>
+#include <plat/common/platform.h>
+
+static int32_t smccc_version(void)
+{
+ return MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION);
+}
+
+static int32_t smccc_arch_features(u_register_t arg1)
+{
+ switch (arg1) {
+ case SMCCC_VERSION:
+ case SMCCC_ARCH_FEATURES:
+ return SMC_ARCH_CALL_SUCCESS;
+ case SMCCC_ARCH_SOC_ID:
+ return plat_is_smccc_feature_available(arg1);
+#ifdef __aarch64__
+ /* Workaround checks are currently only implemented for aarch64 */
+#if WORKAROUND_CVE_2017_5715
+ case SMCCC_ARCH_WORKAROUND_1:
+ if (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES)
+ return 1;
+ return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
+#endif
+
+#if WORKAROUND_CVE_2018_3639
+ case SMCCC_ARCH_WORKAROUND_2: {
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ unsigned long long ssbs;
+
+ /*
+ * Firmware doesn't have to carry out dynamic workaround if the
+ * PE implements architectural Speculation Store Bypass Safe
+ * (SSBS) feature.
+ */
+ ssbs = (read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SSBS_SHIFT) &
+ ID_AA64PFR1_EL1_SSBS_MASK;
+
+ /*
+ * If architectural SSBS is available on this PE, no firmware
+ * mitigation via SMCCC_ARCH_WORKAROUND_2 is required.
+ */
+ if (ssbs != SSBS_UNAVAILABLE)
+ return 1;
+
+ /*
+ * On a platform where at least one CPU requires
+ * dynamic mitigation but others are either unaffected
+ * or permanently mitigated, report the latter as not
+ * needing dynamic mitigation.
+ */
+ if (wa_cve_2018_3639_get_disable_ptr() == NULL)
+ return 1;
+ /*
+ * If we get here, this CPU requires dynamic mitigation
+ * so report it as such.
+ */
+ return 0;
+#else
+ /* Either the CPUs are unaffected or permanently mitigated */
+ return SMC_ARCH_CALL_NOT_REQUIRED;
+#endif
+ }
+#endif
+
+#if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715)
+ case SMCCC_ARCH_WORKAROUND_3:
+ /*
+ * SMCCC_ARCH_WORKAROUND_3 should also take into account
+ * CVE-2017-5715 since this SMC can be used instead of
+ * SMCCC_ARCH_WORKAROUND_1.
+ */
+ if ((check_smccc_arch_wa3_applies() == ERRATA_NOT_APPLIES) &&
+ (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES)) {
+ return 1;
+ }
+ return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
+#endif
+#endif /* __aarch64__ */
+
+ /* Fallthrough */
+
+ default:
+ return SMC_UNK;
+ }
+}
+
+/* return soc revision or soc version on success otherwise
+ * return invalid parameter */
+static int32_t smccc_arch_id(u_register_t arg1)
+{
+ if (arg1 == SMCCC_GET_SOC_REVISION) {
+ return plat_get_soc_revision();
+ }
+ if (arg1 == SMCCC_GET_SOC_VERSION) {
+ return plat_get_soc_version();
+ }
+ return SMC_ARCH_CALL_INVAL_PARAM;
+}
+
+/*
+ * Top-level Arm Architectural Service SMC handler.
+ */
+static uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ switch (smc_fid) {
+ case SMCCC_VERSION:
+ SMC_RET1(handle, smccc_version());
+ case SMCCC_ARCH_FEATURES:
+ SMC_RET1(handle, smccc_arch_features(x1));
+ case SMCCC_ARCH_SOC_ID:
+ SMC_RET1(handle, smccc_arch_id(x1));
+#ifdef __aarch64__
+#if WORKAROUND_CVE_2017_5715
+ case SMCCC_ARCH_WORKAROUND_1:
+ /*
+ * The workaround has already been applied on affected PEs
+ * during entry to EL3. On unaffected PEs, this function
+ * has no effect.
+ */
+ SMC_RET0(handle);
+#endif
+#if WORKAROUND_CVE_2018_3639
+ case SMCCC_ARCH_WORKAROUND_2:
+ /*
+ * The workaround has already been applied on affected PEs
+ * requiring dynamic mitigation during entry to EL3.
+ * On unaffected or statically mitigated PEs, this function
+ * has no effect.
+ */
+ SMC_RET0(handle);
+#endif
+#if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715)
+ case SMCCC_ARCH_WORKAROUND_3:
+ /*
+ * The workaround has already been applied on affected PEs
+ * during entry to EL3. On unaffected PEs, this function
+ * has no effect.
+ */
+ SMC_RET0(handle);
+#endif
+#endif /* __aarch64__ */
+ default:
+ WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
+ smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
+
+/* Register Standard Service Calls as runtime service */
+DECLARE_RT_SVC(
+ arm_arch_svc,
+ OEN_ARM_START,
+ OEN_ARM_END,
+ SMC_TYPE_FAST,
+ NULL,
+ arm_arch_svc_smc_handler
+);
diff --git a/services/spd/opteed/opteed.mk b/services/spd/opteed/opteed.mk
new file mode 100644
index 0000000..f394744
--- /dev/null
+++ b/services/spd/opteed/opteed.mk
@@ -0,0 +1,35 @@
+#
+# Copyright (c) 2013-2023, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+OPTEED_DIR := services/spd/opteed
+SPD_INCLUDES :=
+
+SPD_SOURCES := services/spd/opteed/opteed_common.c \
+ services/spd/opteed/opteed_helpers.S \
+ services/spd/opteed/opteed_main.c \
+ services/spd/opteed/opteed_pm.c
+
+NEED_BL32 := yes
+
+# required so that optee code can control access to the timer registers
+NS_TIMER_SWITCH := 1
+
+# WARNING: This enables loading of OP-TEE via an SMC, which can be potentially
+# insecure. This removes the boundary between the startup of the secure and
+# non-secure worlds until the point where this SMC is invoked. Only use this
+# setting if you can ensure that the non-secure OS can remain trusted up until
+# the point where this SMC is invoked.
+OPTEE_ALLOW_SMC_LOAD := 0
+ifeq ($(OPTEE_ALLOW_SMC_LOAD),1)
+ifeq ($(PLAT_XLAT_TABLES_DYNAMIC),0)
+$(error When OPTEE_ALLOW_SMC_LOAD=1, PLAT_XLAT_TABLES_DYNAMIC must also be 1)
+endif
+$(warning "OPTEE_ALLOW_SMC_LOAD is enabled which may result in an insecure \
+ platform")
+$(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC))
+$(eval $(call add_define,OPTEE_ALLOW_SMC_LOAD))
+include lib/libfdt/libfdt.mk
+endif
diff --git a/services/spd/opteed/opteed_common.c b/services/spd/opteed/opteed_common.c
new file mode 100644
index 0000000..9aa19c5
--- /dev/null
+++ b/services/spd/opteed/opteed_common.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/utils.h>
+
+#include "opteed_private.h"
+
+/*******************************************************************************
+ * Given a OPTEE entrypoint info pointer, entry point PC, register width,
+ * cpu id & pointer to a context data structure, this function will
+ * initialize OPTEE context and entry point info for OPTEE.
+ ******************************************************************************/
+void opteed_init_optee_ep_state(struct entry_point_info *optee_entry_point,
+ uint32_t rw, uint64_t pc,
+ uint64_t pageable_part, uint64_t mem_limit,
+ uint64_t dt_addr, optee_context_t *optee_ctx)
+{
+ uint32_t ep_attr;
+
+ /* Passing a NULL context is a critical programming error */
+ assert(optee_ctx);
+ assert(optee_entry_point);
+ assert(pc);
+
+ /* Associate this context with the cpu specified */
+ optee_ctx->mpidr = read_mpidr_el1();
+ optee_ctx->state = 0;
+ set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_OFF);
+
+ cm_set_context(&optee_ctx->cpu_ctx, SECURE);
+
+ /* initialise an entrypoint to set up the CPU context */
+ ep_attr = SECURE | EP_ST_ENABLE;
+ if (read_sctlr_el3() & SCTLR_EE_BIT)
+ ep_attr |= EP_EE_BIG;
+ SET_PARAM_HEAD(optee_entry_point, PARAM_EP, VERSION_1, ep_attr);
+ optee_entry_point->pc = pc;
+ if (rw == OPTEE_AARCH64)
+ optee_entry_point->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+ else
+ optee_entry_point->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+ SPSR_E_LITTLE,
+ DAIF_FIQ_BIT |
+ DAIF_IRQ_BIT |
+ DAIF_ABT_BIT);
+ zeromem(&optee_entry_point->args, sizeof(optee_entry_point->args));
+ optee_entry_point->args.arg0 = pageable_part;
+ optee_entry_point->args.arg1 = mem_limit;
+ optee_entry_point->args.arg2 = dt_addr;
+}
+
+/*******************************************************************************
+ * This function takes an OPTEE context pointer and:
+ * 1. Applies the S-EL1 system register context from optee_ctx->cpu_ctx.
+ * 2. Saves the current C runtime state (callee saved registers) on the stack
+ * frame and saves a reference to this state.
+ * 3. Calls el3_exit() so that the EL3 system and general purpose registers
+ * from the optee_ctx->cpu_ctx are used to enter the OPTEE image.
+ ******************************************************************************/
+uint64_t opteed_synchronous_sp_entry(optee_context_t *optee_ctx)
+{
+ uint64_t rc;
+
+ assert(optee_ctx != NULL);
+ assert(optee_ctx->c_rt_ctx == 0);
+
+ /* Apply the Secure EL1 system register context and switch to it */
+ assert(cm_get_context(SECURE) == &optee_ctx->cpu_ctx);
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ rc = opteed_enter_sp(&optee_ctx->c_rt_ctx);
+#if ENABLE_ASSERTIONS
+ optee_ctx->c_rt_ctx = 0;
+#endif
+
+ return rc;
+}
+
+
+/*******************************************************************************
+ * This function takes an OPTEE context pointer and:
+ * 1. Saves the S-EL1 system register context tp optee_ctx->cpu_ctx.
+ * 2. Restores the current C runtime state (callee saved registers) from the
+ * stack frame using the reference to this state saved in opteed_enter_sp().
+ * 3. It does not need to save any general purpose or EL3 system register state
+ * as the generic smc entry routine should have saved those.
+ ******************************************************************************/
+void opteed_synchronous_sp_exit(optee_context_t *optee_ctx, uint64_t ret)
+{
+ assert(optee_ctx != NULL);
+ /* Save the Secure EL1 system register context */
+ assert(cm_get_context(SECURE) == &optee_ctx->cpu_ctx);
+ cm_el1_sysregs_context_save(SECURE);
+
+ assert(optee_ctx->c_rt_ctx != 0);
+ opteed_exit_sp(optee_ctx->c_rt_ctx, ret);
+
+ /* Should never reach here */
+ assert(0);
+}
diff --git a/services/spd/opteed/opteed_helpers.S b/services/spd/opteed/opteed_helpers.S
new file mode 100644
index 0000000..075a71b
--- /dev/null
+++ b/services/spd/opteed/opteed_helpers.S
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "opteed_private.h"
+
+ .global opteed_enter_sp
+ /* ---------------------------------------------
+ * This function is called with SP_EL0 as stack.
+ * Here we stash our EL3 callee-saved registers
+ * on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where
+ * the address of the C runtime context is to be
+ * saved.
+ * ---------------------------------------------
+ */
+func opteed_enter_sp
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #OPTEED_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #OPTEED_C_RT_CTX_X19]
+ stp x21, x22, [sp, #OPTEED_C_RT_CTX_X21]
+ stp x23, x24, [sp, #OPTEED_C_RT_CTX_X23]
+ stp x25, x26, [sp, #OPTEED_C_RT_CTX_X25]
+ stp x27, x28, [sp, #OPTEED_C_RT_CTX_X27]
+ stp x29, x30, [sp, #OPTEED_C_RT_CTX_X29]
+
+ /* ---------------------------------------------
+ * Everything is setup now. el3_exit() will
+ * use the secure context to restore to the
+ * general purpose and EL3 system registers to
+ * ERET into OPTEE.
+ * ---------------------------------------------
+ */
+ b el3_exit
+endfunc opteed_enter_sp
+
+ /* ---------------------------------------------
+ * This function is called 'x0' pointing to a C
+ * runtime context saved in opteed_enter_sp(). It
+ * restores the saved registers and jumps to
+ * that runtime with 'x0' as the new sp. This
+ * destroys the C runtime context that had been
+ * built on the stack below the saved context by
+ * the caller. Later the second parameter 'x1'
+ * is passed as return value to the caller
+ * ---------------------------------------------
+ */
+ .global opteed_exit_sp
+func opteed_exit_sp
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(OPTEED_C_RT_CTX_X19 - OPTEED_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(OPTEED_C_RT_CTX_X21 - OPTEED_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(OPTEED_C_RT_CTX_X23 - OPTEED_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(OPTEED_C_RT_CTX_X25 - OPTEED_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(OPTEED_C_RT_CTX_X27 - OPTEED_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(OPTEED_C_RT_CTX_X29 - OPTEED_C_RT_CTX_SIZE)]
+
+ /* ---------------------------------------------
+ * This should take us back to the instruction
+ * after the call to the last opteed_enter_sp().
+ * Place the second parameter to x0 so that the
+ * caller will see it as a return value from the
+ * original entry call
+ * ---------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc opteed_exit_sp
diff --git a/services/spd/opteed/opteed_main.c b/services/spd/opteed/opteed_main.c
new file mode 100644
index 0000000..4d055db
--- /dev/null
+++ b/services/spd/opteed/opteed_main.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright (c) 2013-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/*******************************************************************************
+ * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a
+ * plug-in component to the Secure Monitor, registered as a runtime service. The
+ * SPD is expected to be a functional extension of the Secure Payload (SP) that
+ * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting
+ * the Trusted OS/Applications range to the dispatcher. The SPD will either
+ * handle the request locally or delegate it to the Secure Payload. It is also
+ * responsible for initialising and maintaining communication with the SP.
+ ******************************************************************************/
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stddef.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/coreboot.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/optee_utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#if OPTEE_ALLOW_SMC_LOAD
+#include <libfdt.h>
+#endif /* OPTEE_ALLOW_SMC_LOAD */
+#include <plat/common/platform.h>
+#include <tools_share/uuid.h>
+
+#include "opteed_private.h"
+#include "teesmc_opteed.h"
+
+/*******************************************************************************
+ * Address of the entrypoint vector table in OPTEE. It is
+ * initialised once on the primary core after a cold boot.
+ ******************************************************************************/
+struct optee_vectors *optee_vector_table;
+
+/*******************************************************************************
+ * Array to keep track of per-cpu OPTEE state
+ ******************************************************************************/
+optee_context_t opteed_sp_context[OPTEED_CORE_COUNT];
+uint32_t opteed_rw;
+
+#if OPTEE_ALLOW_SMC_LOAD
+static bool opteed_allow_load;
+/* OP-TEE image loading service UUID */
+DEFINE_SVC_UUID2(optee_image_load_uuid,
+ 0xb1eafba3, 0x5d31, 0x4612, 0xb9, 0x06,
+ 0xc4, 0xc7, 0xa4, 0xbe, 0x3c, 0xc0);
+
+#define OPTEED_FDT_SIZE 256
+static uint8_t fdt_buf[OPTEED_FDT_SIZE] __aligned(CACHE_WRITEBACK_GRANULE);
+
+#else
+static int32_t opteed_init(void);
+#endif
+
+uint64_t dual32to64(uint32_t high, uint32_t low)
+{
+ return ((uint64_t)high << 32) | low;
+}
+
+/*******************************************************************************
+ * This function is the handler registered for S-EL1 interrupts by the
+ * OPTEED. It validates the interrupt and upon success arranges entry into
+ * the OPTEE at 'optee_fiq_entry()' for handling the interrupt.
+ ******************************************************************************/
+static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ uint32_t linear_id;
+ optee_context_t *optee_ctx;
+
+ /* Check the security state when the exception was generated */
+ assert(get_interrupt_src_ss(flags) == NON_SECURE);
+
+ /* Sanity check the pointer to this cpu's context */
+ assert(handle == cm_get_context(NON_SECURE));
+
+ /* Save the non-secure context before entering the OPTEE */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /* Get a reference to this cpu's OPTEE context */
+ linear_id = plat_my_core_pos();
+ optee_ctx = &opteed_sp_context[linear_id];
+ assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));
+
+ cm_set_elr_el3(SECURE, (uint64_t)&optee_vector_table->fiq_entry);
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ /*
+ * Tell the OPTEE that it has to handle an FIQ (synchronously).
+ * Also the instruction in normal world where the interrupt was
+ * generated is passed for debugging purposes. It is safe to
+ * retrieve this address from ELR_EL3 as the secure context will
+ * not take effect until el3_exit().
+ */
+ SMC_RET1(&optee_ctx->cpu_ctx, read_elr_el3());
+}
+
+/*******************************************************************************
+ * OPTEE Dispatcher setup. The OPTEED finds out the OPTEE entrypoint and type
+ * (aarch32/aarch64) if not already known and initialises the context for entry
+ * into OPTEE for its initialization.
+ ******************************************************************************/
+static int32_t opteed_setup(void)
+{
+#if OPTEE_ALLOW_SMC_LOAD
+ opteed_allow_load = true;
+ INFO("Delaying OP-TEE setup until we receive an SMC call to load it\n");
+ return 0;
+#else
+ entry_point_info_t *optee_ep_info;
+ uint32_t linear_id;
+ uint64_t opteed_pageable_part;
+ uint64_t opteed_mem_limit;
+ uint64_t dt_addr;
+
+ linear_id = plat_my_core_pos();
+
+ /*
+ * Get information about the Secure Payload (BL32) image. Its
+ * absence is a critical failure. TODO: Add support to
+ * conditionally include the SPD service
+ */
+ optee_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (!optee_ep_info) {
+ WARN("No OPTEE provided by BL2 boot loader, Booting device"
+ " without OPTEE initialization. SMC`s destined for OPTEE"
+ " will return SMC_UNK\n");
+ return 1;
+ }
+
+ /*
+ * If there's no valid entry point for SP, we return a non-zero value
+ * signalling failure initializing the service. We bail out without
+ * registering any handlers
+ */
+ if (!optee_ep_info->pc)
+ return 1;
+
+ opteed_rw = optee_ep_info->args.arg0;
+ opteed_pageable_part = optee_ep_info->args.arg1;
+ opteed_mem_limit = optee_ep_info->args.arg2;
+ dt_addr = optee_ep_info->args.arg3;
+
+ opteed_init_optee_ep_state(optee_ep_info,
+ opteed_rw,
+ optee_ep_info->pc,
+ opteed_pageable_part,
+ opteed_mem_limit,
+ dt_addr,
+ &opteed_sp_context[linear_id]);
+
+ /*
+ * All OPTEED initialization done. Now register our init function with
+ * BL31 for deferred invocation
+ */
+ bl31_register_bl32_init(&opteed_init);
+
+ return 0;
+#endif /* OPTEE_ALLOW_SMC_LOAD */
+}
+
+/*******************************************************************************
+ * This function passes control to the OPTEE image (BL32) for the first time
+ * on the primary cpu after a cold boot. It assumes that a valid secure
+ * context has already been created by opteed_setup() which can be directly
+ * used. It also assumes that a valid non-secure context has been
+ * initialised by PSCI so it does not need to save and restore any
+ * non-secure state. This function performs a synchronous entry into
+ * OPTEE. OPTEE passes control back to this routine through a SMC. This returns
+ * a non-zero value on success and zero on failure.
+ ******************************************************************************/
+static int32_t
+opteed_init_with_entry_point(entry_point_info_t *optee_entry_point)
+{
+ uint32_t linear_id = plat_my_core_pos();
+ optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
+ uint64_t rc;
+ assert(optee_entry_point);
+
+ cm_init_my_context(optee_entry_point);
+
+ /*
+ * Arrange for an entry into OPTEE. It will be returned via
+ * OPTEE_ENTRY_DONE case
+ */
+ rc = opteed_synchronous_sp_entry(optee_ctx);
+ assert(rc != 0);
+
+ return rc;
+}
+
+#if !OPTEE_ALLOW_SMC_LOAD
+static int32_t opteed_init(void)
+{
+ entry_point_info_t *optee_entry_point;
+ /*
+ * Get information about the OP-TEE (BL32) image. Its
+ * absence is a critical failure.
+ */
+ optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
+ return opteed_init_with_entry_point(optee_entry_point);
+}
+#endif /* !OPTEE_ALLOW_SMC_LOAD */
+
+#if OPTEE_ALLOW_SMC_LOAD
+#if COREBOOT
+/*
+ * Adds a firmware/coreboot node with the coreboot table information to a device
+ * tree. Returns zero on success or if there is no coreboot table information;
+ * failure code otherwise.
+ */
+static int add_coreboot_node(void *fdt)
+{
+ int ret;
+ uint64_t coreboot_table_addr;
+ uint32_t coreboot_table_size;
+ struct {
+ uint64_t addr;
+ uint32_t size;
+ } reg_node;
+ coreboot_get_table_location(&coreboot_table_addr, &coreboot_table_size);
+ if (!coreboot_table_addr || !coreboot_table_size) {
+ WARN("Unable to get coreboot table location for device tree");
+ return 0;
+ }
+ ret = fdt_begin_node(fdt, "firmware");
+ if (ret)
+ return ret;
+
+ ret = fdt_property(fdt, "ranges", NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = fdt_begin_node(fdt, "coreboot");
+ if (ret)
+ return ret;
+
+ ret = fdt_property_string(fdt, "compatible", "coreboot");
+ if (ret)
+ return ret;
+
+ reg_node.addr = cpu_to_fdt64(coreboot_table_addr);
+ reg_node.size = cpu_to_fdt32(coreboot_table_size);
+ ret = fdt_property(fdt, "reg", &reg_node,
+ sizeof(uint64_t) + sizeof(uint32_t));
+ if (ret)
+ return ret;
+
+ ret = fdt_end_node(fdt);
+ if (ret)
+ return ret;
+
+ return fdt_end_node(fdt);
+}
+#endif /* COREBOOT */
+
+/*
+ * Creates a device tree for passing into OP-TEE. Currently is populated with
+ * the coreboot table address.
+ * Returns 0 on success, error code otherwise.
+ */
+static int create_opteed_dt(void)
+{
+ int ret;
+
+ ret = fdt_create(fdt_buf, OPTEED_FDT_SIZE);
+ if (ret)
+ return ret;
+
+ ret = fdt_finish_reservemap(fdt_buf);
+ if (ret)
+ return ret;
+
+ ret = fdt_begin_node(fdt_buf, "");
+ if (ret)
+ return ret;
+
+#if COREBOOT
+ ret = add_coreboot_node(fdt_buf);
+ if (ret)
+ return ret;
+#endif /* COREBOOT */
+
+ ret = fdt_end_node(fdt_buf);
+ if (ret)
+ return ret;
+
+ return fdt_finish(fdt_buf);
+}
+
+/*******************************************************************************
+ * This function is responsible for handling the SMC that loads the OP-TEE
+ * binary image via a non-secure SMC call. It takes the size and physical
+ * address of the payload as parameters.
+ ******************************************************************************/
+static int32_t opteed_handle_smc_load(uint64_t data_size, uint32_t data_pa)
+{
+ uintptr_t data_va = data_pa;
+ uint64_t mapped_data_pa;
+ uintptr_t mapped_data_va;
+ uint64_t data_map_size;
+ int32_t rc;
+ optee_header_t *image_header;
+ uint8_t *image_ptr;
+ uint64_t target_pa;
+ uint64_t target_end_pa;
+ uint64_t image_pa;
+ uintptr_t image_va;
+ optee_image_t *curr_image;
+ uintptr_t target_va;
+ uint64_t target_size;
+ entry_point_info_t optee_ep_info;
+ uint32_t linear_id = plat_my_core_pos();
+ uint64_t dt_addr = 0;
+
+ mapped_data_pa = page_align(data_pa, DOWN);
+ mapped_data_va = mapped_data_pa;
+ data_map_size = page_align(data_size + (mapped_data_pa - data_pa), UP);
+
+ /*
+ * We do not validate the passed in address because we are trusting the
+ * non-secure world at this point still.
+ */
+ rc = mmap_add_dynamic_region(mapped_data_pa, mapped_data_va,
+ data_map_size, MT_MEMORY | MT_RO | MT_NS);
+ if (rc != 0) {
+ return rc;
+ }
+
+ image_header = (optee_header_t *)data_va;
+ if (image_header->magic != TEE_MAGIC_NUM_OPTEE ||
+ image_header->version != 2 || image_header->nb_images != 1) {
+ mmap_remove_dynamic_region(mapped_data_va, data_map_size);
+ return -EINVAL;
+ }
+
+ image_ptr = (uint8_t *)data_va + sizeof(optee_header_t) +
+ sizeof(optee_image_t);
+ if (image_header->arch == 1) {
+ opteed_rw = OPTEE_AARCH64;
+ } else {
+ opteed_rw = OPTEE_AARCH32;
+ }
+
+ curr_image = &image_header->optee_image_list[0];
+ image_pa = dual32to64(curr_image->load_addr_hi,
+ curr_image->load_addr_lo);
+ image_va = image_pa;
+ target_end_pa = image_pa + curr_image->size;
+
+ /* Now also map the memory we want to copy it to. */
+ target_pa = page_align(image_pa, DOWN);
+ target_va = target_pa;
+ target_size = page_align(target_end_pa, UP) - target_pa;
+
+ rc = mmap_add_dynamic_region(target_pa, target_va, target_size,
+ MT_MEMORY | MT_RW | MT_SECURE);
+ if (rc != 0) {
+ mmap_remove_dynamic_region(mapped_data_va, data_map_size);
+ return rc;
+ }
+
+ INFO("Loaded OP-TEE via SMC: size %d addr 0x%" PRIx64 "\n",
+ curr_image->size, image_va);
+
+ memcpy((void *)image_va, image_ptr, curr_image->size);
+ flush_dcache_range(target_pa, target_size);
+
+ mmap_remove_dynamic_region(mapped_data_va, data_map_size);
+ mmap_remove_dynamic_region(target_va, target_size);
+
+ /* Save the non-secure state */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ rc = create_opteed_dt();
+ if (rc) {
+ ERROR("Failed device tree creation %d\n", rc);
+ return rc;
+ }
+ dt_addr = (uint64_t)fdt_buf;
+ flush_dcache_range(dt_addr, OPTEED_FDT_SIZE);
+
+ opteed_init_optee_ep_state(&optee_ep_info,
+ opteed_rw,
+ image_pa,
+ 0,
+ 0,
+ dt_addr,
+ &opteed_sp_context[linear_id]);
+ if (opteed_init_with_entry_point(&optee_ep_info) == 0) {
+ rc = -EFAULT;
+ }
+
+ /* Restore non-secure state */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ return rc;
+}
+#endif /* OPTEE_ALLOW_SMC_LOAD */
+
+/*******************************************************************************
+ * This function is responsible for handling all SMCs in the Trusted OS/App
+ * range from the non-secure state as defined in the SMC Calling Convention
+ * Document. It is also responsible for communicating with the Secure
+ * payload to delegate work and return results back to the non-secure
+ * state. Lastly it will also return any information that OPTEE needs to do
+ * the work assigned to it.
+ ******************************************************************************/
+static uintptr_t opteed_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ cpu_context_t *ns_cpu_context;
+ uint32_t linear_id = plat_my_core_pos();
+ optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
+ uint64_t rc;
+
+ /*
+ * Determine which security state this SMC originated from
+ */
+
+ if (is_caller_non_secure(flags)) {
+#if OPTEE_ALLOW_SMC_LOAD
+ if (opteed_allow_load && smc_fid == NSSMC_OPTEED_CALL_UID) {
+ /* Provide the UUID of the image loading service. */
+ SMC_UUID_RET(handle, optee_image_load_uuid);
+ }
+ if (smc_fid == NSSMC_OPTEED_CALL_LOAD_IMAGE) {
+ /*
+ * TODO: Consider wiping the code for SMC loading from
+ * memory after it has been invoked similar to what is
+ * done under RECLAIM_INIT, but extended to happen
+ * later.
+ */
+ if (!opteed_allow_load) {
+ SMC_RET1(handle, -EPERM);
+ }
+
+ opteed_allow_load = false;
+ uint64_t data_size = dual32to64(x1, x2);
+ uint64_t data_pa = dual32to64(x3, x4);
+ if (!data_size || !data_pa) {
+ /*
+ * This is invoked when the OP-TEE image didn't
+ * load correctly in the kernel but we want to
+ * block off loading of it later for security
+ * reasons.
+ */
+ SMC_RET1(handle, -EINVAL);
+ }
+ SMC_RET1(handle, opteed_handle_smc_load(
+ data_size, data_pa));
+ }
+#endif /* OPTEE_ALLOW_SMC_LOAD */
+ /*
+ * This is a fresh request from the non-secure client.
+ * The parameters are in x1 and x2. Figure out which
+ * registers need to be preserved, save the non-secure
+ * state and send the request to the secure payload.
+ */
+ assert(handle == cm_get_context(NON_SECURE));
+
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /*
+ * We are done stashing the non-secure context. Ask the
+ * OP-TEE to do the work now. If we are loading vi an SMC,
+ * then we also need to init this CPU context if not done
+ * already.
+ */
+ if (optee_vector_table == NULL) {
+ SMC_RET1(handle, -EINVAL);
+ }
+
+ if (get_optee_pstate(optee_ctx->state) ==
+ OPTEE_PSTATE_UNKNOWN) {
+ opteed_cpu_on_finish_handler(0);
+ }
+
+ /*
+ * Verify if there is a valid context to use, copy the
+ * operation type and parameters to the secure context
+ * and jump to the fast smc entry point in the secure
+ * payload. Entry into S-EL1 will take place upon exit
+ * from this function.
+ */
+ assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));
+
+ /* Set appropriate entry for SMC.
+ * We expect OPTEE to manage the PSTATE.I and PSTATE.F
+ * flags as appropriate.
+ */
+ if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
+ cm_set_elr_el3(SECURE, (uint64_t)
+ &optee_vector_table->fast_smc_entry);
+ } else {
+ cm_set_elr_el3(SECURE, (uint64_t)
+ &optee_vector_table->yield_smc_entry);
+ }
+
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
+ CTX_GPREG_X4,
+ read_ctx_reg(get_gpregs_ctx(handle),
+ CTX_GPREG_X4));
+ write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
+ CTX_GPREG_X5,
+ read_ctx_reg(get_gpregs_ctx(handle),
+ CTX_GPREG_X5));
+ write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
+ CTX_GPREG_X6,
+ read_ctx_reg(get_gpregs_ctx(handle),
+ CTX_GPREG_X6));
+ /* Propagate hypervisor client ID */
+ write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
+ CTX_GPREG_X7,
+ read_ctx_reg(get_gpregs_ctx(handle),
+ CTX_GPREG_X7));
+
+ SMC_RET4(&optee_ctx->cpu_ctx, smc_fid, x1, x2, x3);
+ }
+
+ /*
+ * Returning from OPTEE
+ */
+
+ switch (smc_fid) {
+ /*
+ * OPTEE has finished initialising itself after a cold boot
+ */
+ case TEESMC_OPTEED_RETURN_ENTRY_DONE:
+ /*
+ * Stash the OPTEE entry points information. This is done
+ * only once on the primary cpu
+ */
+ assert(optee_vector_table == NULL);
+ optee_vector_table = (optee_vectors_t *) x1;
+
+ if (optee_vector_table) {
+ set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_ON);
+
+ /*
+ * OPTEE has been successfully initialized.
+ * Register power management hooks with PSCI
+ */
+ psci_register_spd_pm_hook(&opteed_pm);
+
+ /*
+ * Register an interrupt handler for S-EL1 interrupts
+ * when generated during code executing in the
+ * non-secure state.
+ */
+ flags = 0;
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+ opteed_sel1_interrupt_handler,
+ flags);
+ if (rc)
+ panic();
+ }
+
+ /*
+ * OPTEE reports completion. The OPTEED must have initiated
+ * the original request through a synchronous entry into
+ * OPTEE. Jump back to the original C runtime context.
+ */
+ opteed_synchronous_sp_exit(optee_ctx, x1);
+ break;
+
+
+ /*
+ * These function IDs is used only by OP-TEE to indicate it has
+ * finished:
+ * 1. turning itself on in response to an earlier psci
+ * cpu_on request
+ * 2. resuming itself after an earlier psci cpu_suspend
+ * request.
+ */
+ case TEESMC_OPTEED_RETURN_ON_DONE:
+ case TEESMC_OPTEED_RETURN_RESUME_DONE:
+
+
+ /*
+ * These function IDs is used only by the SP to indicate it has
+ * finished:
+ * 1. suspending itself after an earlier psci cpu_suspend
+ * request.
+ * 2. turning itself off in response to an earlier psci
+ * cpu_off request.
+ */
+ case TEESMC_OPTEED_RETURN_OFF_DONE:
+ case TEESMC_OPTEED_RETURN_SUSPEND_DONE:
+ case TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE:
+ case TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE:
+
+ /*
+ * OPTEE reports completion. The OPTEED must have initiated the
+ * original request through a synchronous entry into OPTEE.
+ * Jump back to the original C runtime context, and pass x1 as
+ * return value to the caller
+ */
+ opteed_synchronous_sp_exit(optee_ctx, x1);
+ break;
+
+ /*
+ * OPTEE is returning from a call or being preempted from a call, in
+ * either case execution should resume in the normal world.
+ */
+ case TEESMC_OPTEED_RETURN_CALL_DONE:
+ /*
+ * This is the result from the secure client of an
+ * earlier request. The results are in x0-x3. Copy it
+ * into the non-secure context, save the secure state
+ * and return to the non-secure state.
+ */
+ assert(handle == cm_get_context(SECURE));
+ cm_el1_sysregs_context_save(SECURE);
+
+ /* Get a reference to the non-secure context */
+ ns_cpu_context = cm_get_context(NON_SECURE);
+ assert(ns_cpu_context);
+
+ /* Restore non-secure state */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ SMC_RET4(ns_cpu_context, x1, x2, x3, x4);
+
+ /*
+ * OPTEE has finished handling a S-EL1 FIQ interrupt. Execution
+ * should resume in the normal world.
+ */
+ case TEESMC_OPTEED_RETURN_FIQ_DONE:
+ /* Get a reference to the non-secure context */
+ ns_cpu_context = cm_get_context(NON_SECURE);
+ assert(ns_cpu_context);
+
+ /*
+ * Restore non-secure state. There is no need to save the
+ * secure system register context since OPTEE was supposed
+ * to preserve it during S-EL1 interrupt handling.
+ */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ SMC_RET0((uint64_t) ns_cpu_context);
+
+ default:
+ panic();
+ }
+}
+
+/* Define an OPTEED runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ opteed_fast,
+
+ OEN_TOS_START,
+ OEN_TOS_END,
+ SMC_TYPE_FAST,
+ opteed_setup,
+ opteed_smc_handler
+);
+
+/* Define an OPTEED runtime service descriptor for yielding SMC calls */
+DECLARE_RT_SVC(
+ opteed_std,
+
+ OEN_TOS_START,
+ OEN_TOS_END,
+ SMC_TYPE_YIELD,
+ NULL,
+ opteed_smc_handler
+);
diff --git a/services/spd/opteed/opteed_pm.c b/services/spd/opteed/opteed_pm.c
new file mode 100644
index 0000000..fa724a1
--- /dev/null
+++ b/services/spd/opteed/opteed_pm.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2013-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <plat/common/platform.h>
+
+#include "opteed_private.h"
+
+/*******************************************************************************
+ * The target cpu is being turned on. Allow the OPTEED/OPTEE to perform any
+ * actions needed. Nothing at the moment.
+ ******************************************************************************/
+static void opteed_cpu_on_handler(u_register_t target_cpu)
+{
+}
+
+/*******************************************************************************
+ * This cpu is being turned off. Allow the OPTEED/OPTEE to perform any actions
+ * needed
+ ******************************************************************************/
+static int32_t opteed_cpu_off_handler(u_register_t unused)
+{
+ int32_t rc = 0;
+ uint32_t linear_id = plat_my_core_pos();
+ optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
+
+ if (get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_UNKNOWN) {
+ return 0;
+ }
+
+ assert(optee_vector_table);
+ assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_ON);
+
+ /* Program the entry point and enter OPTEE */
+ cm_set_elr_el3(SECURE, (uint64_t) &optee_vector_table->cpu_off_entry);
+ rc = opteed_synchronous_sp_entry(optee_ctx);
+
+ /*
+ * Read the response from OPTEE. A non-zero return means that
+ * something went wrong while communicating with OPTEE.
+ */
+ if (rc != 0)
+ panic();
+
+ /*
+ * Reset OPTEE's context for a fresh start when this cpu is turned on
+ * subsequently.
+ */
+ set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_OFF);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This cpu is being suspended. S-EL1 state must have been saved in the
+ * resident cpu (mpidr format) if it is a UP/UP migratable OPTEE.
+ ******************************************************************************/
+static void opteed_cpu_suspend_handler(u_register_t max_off_pwrlvl)
+{
+ int32_t rc = 0;
+ uint32_t linear_id = plat_my_core_pos();
+ optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
+
+ if (get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_UNKNOWN) {
+ return;
+ }
+
+ assert(optee_vector_table);
+ assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_ON);
+
+ write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx), CTX_GPREG_X0,
+ max_off_pwrlvl);
+
+ /* Program the entry point and enter OPTEE */
+ cm_set_elr_el3(SECURE, (uint64_t) &optee_vector_table->cpu_suspend_entry);
+ rc = opteed_synchronous_sp_entry(optee_ctx);
+
+ /*
+ * Read the response from OPTEE. A non-zero return means that
+ * something went wrong while communicating with OPTEE.
+ */
+ if (rc != 0)
+ panic();
+
+ /* Update its context to reflect the state OPTEE is in */
+ set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_SUSPEND);
+}
+
+/*******************************************************************************
+ * This cpu has been turned on. Enter OPTEE to initialise S-EL1 and other bits
+ * before passing control back to the Secure Monitor. Entry in S-El1 is done
+ * after initialising minimal architectural state that guarantees safe
+ * execution.
+ ******************************************************************************/
+void opteed_cpu_on_finish_handler(u_register_t unused)
+{
+ int32_t rc = 0;
+ uint32_t linear_id = plat_my_core_pos();
+ optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
+ entry_point_info_t optee_on_entrypoint;
+
+ assert(optee_vector_table);
+ assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_OFF ||
+ get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_UNKNOWN);
+
+ opteed_init_optee_ep_state(&optee_on_entrypoint, opteed_rw,
+ (uint64_t)&optee_vector_table->cpu_on_entry,
+ 0, 0, 0, optee_ctx);
+
+ /* Initialise this cpu's secure context */
+ cm_init_my_context(&optee_on_entrypoint);
+
+ /* Enter OPTEE */
+ rc = opteed_synchronous_sp_entry(optee_ctx);
+
+ /*
+ * Read the response from OPTEE. A non-zero return means that
+ * something went wrong while communicating with OPTEE.
+ */
+ if (rc != 0)
+ panic();
+
+ /* Update its context to reflect the state OPTEE is in */
+ set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_ON);
+}
+
+/*******************************************************************************
+ * This cpu has resumed from suspend. The OPTEED saved the OPTEE context when it
+ * completed the preceding suspend call. Use that context to program an entry
+ * into OPTEE to allow it to do any remaining book keeping
+ ******************************************************************************/
+static void opteed_cpu_suspend_finish_handler(u_register_t max_off_pwrlvl)
+{
+ int32_t rc = 0;
+ uint32_t linear_id = plat_my_core_pos();
+ optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
+
+ if (get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_UNKNOWN) {
+ return;
+ }
+
+ assert(optee_vector_table);
+ assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_SUSPEND);
+
+ /* Program the entry point, max_off_pwrlvl and enter the SP */
+ write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
+ CTX_GPREG_X0,
+ max_off_pwrlvl);
+ cm_set_elr_el3(SECURE, (uint64_t) &optee_vector_table->cpu_resume_entry);
+ rc = opteed_synchronous_sp_entry(optee_ctx);
+
+ /*
+ * Read the response from OPTEE. A non-zero return means that
+ * something went wrong while communicating with OPTEE.
+ */
+ if (rc != 0)
+ panic();
+
+ /* Update its context to reflect the state OPTEE is in */
+ set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_ON);
+}
+
+/*******************************************************************************
+ * Return the type of OPTEE the OPTEED is dealing with. Report the current
+ * resident cpu (mpidr format) if it is a UP/UP migratable OPTEE.
+ ******************************************************************************/
+static int32_t opteed_cpu_migrate_info(u_register_t *resident_cpu)
+{
+ return OPTEE_MIGRATE_INFO;
+}
+
+/*******************************************************************************
+ * System is about to be switched off. Allow the OPTEED/OPTEE to perform
+ * any actions needed.
+ ******************************************************************************/
+static void opteed_system_off(void)
+{
+ uint32_t linear_id = plat_my_core_pos();
+ optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
+
+ /*
+ * OP-TEE must have been initialized in order to reach this location so
+ * it is safe to init the CPU context if not already done for this core.
+ */
+ if (get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_UNKNOWN) {
+ opteed_cpu_on_finish_handler(0);
+ }
+
+ assert(optee_vector_table);
+ assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_ON);
+
+ /* Program the entry point */
+ cm_set_elr_el3(SECURE, (uint64_t) &optee_vector_table->system_off_entry);
+
+ /* Enter OPTEE. We do not care about the return value because we
+ * must continue the shutdown anyway */
+ opteed_synchronous_sp_entry(optee_ctx);
+}
+
+/*******************************************************************************
+ * System is about to be reset. Allow the OPTEED/OPTEE to perform
+ * any actions needed.
+ ******************************************************************************/
+static void opteed_system_reset(void)
+{
+ uint32_t linear_id = plat_my_core_pos();
+ optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
+
+ /*
+ * OP-TEE must have been initialized in order to reach this location so
+ * it is safe to init the CPU context if not already done for this core.
+ */
+ if (get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_UNKNOWN) {
+ opteed_cpu_on_finish_handler(0);
+ }
+
+ assert(optee_vector_table);
+ assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_ON);
+
+ /* Program the entry point */
+ cm_set_elr_el3(SECURE, (uint64_t) &optee_vector_table->system_reset_entry);
+
+ /* Enter OPTEE. We do not care about the return value because we
+ * must continue the reset anyway */
+ opteed_synchronous_sp_entry(optee_ctx);
+}
+
+
+/*******************************************************************************
+ * Structure populated by the OPTEE Dispatcher to be given a chance to
+ * perform any OPTEE bookkeeping before PSCI executes a power mgmt.
+ * operation.
+ ******************************************************************************/
+const spd_pm_ops_t opteed_pm = {
+ .svc_on = opteed_cpu_on_handler,
+ .svc_off = opteed_cpu_off_handler,
+ .svc_suspend = opteed_cpu_suspend_handler,
+ .svc_on_finish = opteed_cpu_on_finish_handler,
+ .svc_suspend_finish = opteed_cpu_suspend_finish_handler,
+ .svc_migrate = NULL,
+ .svc_migrate_info = opteed_cpu_migrate_info,
+ .svc_system_off = opteed_system_off,
+ .svc_system_reset = opteed_system_reset,
+};
diff --git a/services/spd/opteed/opteed_private.h b/services/spd/opteed/opteed_private.h
new file mode 100644
index 0000000..c8fbc22
--- /dev/null
+++ b/services/spd/opteed/opteed_private.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2013-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef OPTEED_PRIVATE_H
+#define OPTEED_PRIVATE_H
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <bl31/interrupt_mgmt.h>
+#include <context.h>
+#include <lib/psci/psci.h>
+
+/*******************************************************************************
+ * OPTEE PM state information e.g. OPTEE is suspended, uninitialised etc
+ * and macros to access the state information in the per-cpu 'state' flags
+ ******************************************************************************/
+#define OPTEE_PSTATE_OFF 1
+#define OPTEE_PSTATE_ON 2
+#define OPTEE_PSTATE_SUSPEND 3
+#define OPTEE_PSTATE_UNKNOWN 0
+#define OPTEE_PSTATE_SHIFT 0
+#define OPTEE_PSTATE_MASK 0x3
+#define get_optee_pstate(state) ((state >> OPTEE_PSTATE_SHIFT) & \
+ OPTEE_PSTATE_MASK)
+#define clr_optee_pstate(state) (state &= ~(OPTEE_PSTATE_MASK \
+ << OPTEE_PSTATE_SHIFT))
+#define set_optee_pstate(st, pst) do { \
+ clr_optee_pstate(st); \
+ st |= (pst & OPTEE_PSTATE_MASK) << \
+ OPTEE_PSTATE_SHIFT; \
+ } while (0)
+
+
+/*******************************************************************************
+ * OPTEE execution state information i.e. aarch32 or aarch64
+ ******************************************************************************/
+#define OPTEE_AARCH32 MODE_RW_32
+#define OPTEE_AARCH64 MODE_RW_64
+
+/*******************************************************************************
+ * The OPTEED should know the type of OPTEE
+ ******************************************************************************/
+#define OPTEE_TYPE_UP PSCI_TOS_NOT_UP_MIG_CAP
+#define OPTEE_TYPE_UPM PSCI_TOS_UP_MIG_CAP
+#define OPTEE_TYPE_MP PSCI_TOS_NOT_PRESENT_MP
+
+/*******************************************************************************
+ * OPTEE migrate type information as known to the OPTEED. We assume that
+ * the OPTEED is dealing with an MP Secure Payload.
+ ******************************************************************************/
+#define OPTEE_MIGRATE_INFO OPTEE_TYPE_MP
+
+/*******************************************************************************
+ * Number of cpus that the present on this platform. TODO: Rely on a topology
+ * tree to determine this in the future to avoid assumptions about mpidr
+ * allocation
+ ******************************************************************************/
+#define OPTEED_CORE_COUNT PLATFORM_CORE_COUNT
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define OPTEED_C_RT_CTX_X19 0x0
+#define OPTEED_C_RT_CTX_X20 0x8
+#define OPTEED_C_RT_CTX_X21 0x10
+#define OPTEED_C_RT_CTX_X22 0x18
+#define OPTEED_C_RT_CTX_X23 0x20
+#define OPTEED_C_RT_CTX_X24 0x28
+#define OPTEED_C_RT_CTX_X25 0x30
+#define OPTEED_C_RT_CTX_X26 0x38
+#define OPTEED_C_RT_CTX_X27 0x40
+#define OPTEED_C_RT_CTX_X28 0x48
+#define OPTEED_C_RT_CTX_X29 0x50
+#define OPTEED_C_RT_CTX_X30 0x58
+#define OPTEED_C_RT_CTX_SIZE 0x60
+#define OPTEED_C_RT_CTX_ENTRIES (OPTEED_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#include <lib/cassert.h>
+
+typedef uint32_t optee_vector_isn_t;
+
+typedef struct optee_vectors {
+ optee_vector_isn_t yield_smc_entry;
+ optee_vector_isn_t fast_smc_entry;
+ optee_vector_isn_t cpu_on_entry;
+ optee_vector_isn_t cpu_off_entry;
+ optee_vector_isn_t cpu_resume_entry;
+ optee_vector_isn_t cpu_suspend_entry;
+ optee_vector_isn_t fiq_entry;
+ optee_vector_isn_t system_off_entry;
+ optee_vector_isn_t system_reset_entry;
+} optee_vectors_t;
+
+/*
+ * The number of arguments to save during a SMC call for OPTEE.
+ * Currently only x1 and x2 are used by OPTEE.
+ */
+#define OPTEE_NUM_ARGS 0x2
+
+/* AArch64 callee saved general purpose register context structure. */
+DEFINE_REG_STRUCT(c_rt_regs, OPTEED_C_RT_CTX_ENTRIES);
+
+/*
+ * Compile time assertion to ensure that both the compiler and linker
+ * have the same double word aligned view of the size of the C runtime
+ * register context.
+ */
+CASSERT(OPTEED_C_RT_CTX_SIZE == sizeof(c_rt_regs_t),
+ assert_spd_c_rt_regs_size_mismatch);
+
+/*******************************************************************************
+ * Structure which helps the OPTEED to maintain the per-cpu state of OPTEE.
+ * 'state' - collection of flags to track OPTEE state e.g. on/off
+ * 'mpidr' - mpidr to associate a context with a cpu
+ * 'c_rt_ctx' - stack address to restore C runtime context from after
+ * returning from a synchronous entry into OPTEE.
+ * 'cpu_ctx' - space to maintain OPTEE architectural state
+ ******************************************************************************/
+typedef struct optee_context {
+ uint32_t state;
+ uint64_t mpidr;
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+} optee_context_t;
+
+/* OPTEED power management handlers */
+extern const spd_pm_ops_t opteed_pm;
+
+/*******************************************************************************
+ * Forward declarations
+ ******************************************************************************/
+struct optee_vectors;
+
+/*******************************************************************************
+ * Function & Data prototypes
+ ******************************************************************************/
+uint64_t opteed_enter_sp(uint64_t *c_rt_ctx);
+void __dead2 opteed_exit_sp(uint64_t c_rt_ctx, uint64_t ret);
+uint64_t opteed_synchronous_sp_entry(optee_context_t *optee_ctx);
+void __dead2 opteed_synchronous_sp_exit(optee_context_t *optee_ctx, uint64_t ret);
+void opteed_init_optee_ep_state(struct entry_point_info *optee_entry_point,
+ uint32_t rw,
+ uint64_t pc,
+ uint64_t pageable_part,
+ uint64_t mem_limit,
+ uint64_t dt_addr,
+ optee_context_t *optee_ctx);
+void opteed_cpu_on_finish_handler(u_register_t unused);
+
+extern optee_context_t opteed_sp_context[OPTEED_CORE_COUNT];
+extern uint32_t opteed_rw;
+extern struct optee_vectors *optee_vector_table;
+#endif /*__ASSEMBLER__*/
+
+#endif /* OPTEED_PRIVATE_H */
diff --git a/services/spd/opteed/teesmc_opteed.h b/services/spd/opteed/teesmc_opteed.h
new file mode 100644
index 0000000..4026fa4
--- /dev/null
+++ b/services/spd/opteed/teesmc_opteed.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2014-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* Copyright (c) 2014, Linaro Limited. All rights reserved. */
+
+#ifndef TEESMC_OPTEED_H
+#define TEESMC_OPTEED_H
+
+#include "teesmc_opteed_macros.h"
+
+/*
+ * This section specifies SMC function IDs used when returning from TEE to the
+ * secure monitor.
+ *
+ * All SMC Function IDs indicates SMC32 Calling Convention but will carry
+ * full 64 bit values in the argument registers if invoked from Aarch64
+ * mode. This violates the SMC Calling Convention, but since this
+ * convention only coveres API towards Normal World it's something that
+ * only concerns the OP-TEE Dispatcher in Trusted Firmware-A and OP-TEE
+ * OS at Secure EL1.
+ */
+
+/*
+ * Issued when returning from initial entry.
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_ENTRY_DONE
+ * r1/x1 Pointer to entry vector
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_ENTRY_DONE 0
+#define TEESMC_OPTEED_RETURN_ENTRY_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_ENTRY_DONE)
+
+
+
+/*
+ * Issued when returning from "cpu_on" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_ON_DONE
+ * r1/x1 0 on success and anything else to indicate error condition
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_ON_DONE 1
+#define TEESMC_OPTEED_RETURN_ON_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_ON_DONE)
+
+/*
+ * Issued when returning from "cpu_off" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_OFF_DONE
+ * r1/x1 0 on success and anything else to indicate error condition
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_OFF_DONE 2
+#define TEESMC_OPTEED_RETURN_OFF_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_OFF_DONE)
+
+/*
+ * Issued when returning from "cpu_suspend" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_SUSPEND_DONE
+ * r1/x1 0 on success and anything else to indicate error condition
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_SUSPEND_DONE 3
+#define TEESMC_OPTEED_RETURN_SUSPEND_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_SUSPEND_DONE)
+
+/*
+ * Issued when returning from "cpu_resume" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_RESUME_DONE
+ * r1/x1 0 on success and anything else to indicate error condition
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_RESUME_DONE 4
+#define TEESMC_OPTEED_RETURN_RESUME_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_RESUME_DONE)
+
+/*
+ * Issued when returning from "std_smc" or "fast_smc" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_CALL_DONE
+ * r1-4/x1-4 Return value 0-3 which will passed to normal world in
+ * r0-3/x0-3
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_CALL_DONE 5
+#define TEESMC_OPTEED_RETURN_CALL_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_CALL_DONE)
+
+/*
+ * Issued when returning from "fiq" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_FIQ_DONE
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_FIQ_DONE 6
+#define TEESMC_OPTEED_RETURN_FIQ_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_FIQ_DONE)
+
+/*
+ * Issued when returning from "system_off" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_SYSTEM_OFF_DONE 7
+#define TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_SYSTEM_OFF_DONE)
+
+/*
+ * Issued when returning from "system_reset" vector
+ *
+ * Register usage:
+ * r0/x0 SMC Function ID, TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
+ */
+#define TEESMC_OPTEED_FUNCID_RETURN_SYSTEM_RESET_DONE 8
+#define TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE \
+ TEESMC_OPTEED_RV(TEESMC_OPTEED_FUNCID_RETURN_SYSTEM_RESET_DONE)
+
+/*
+ * This section specifies SMC function IDs used when the secure monitor is
+ * invoked from the non-secure world.
+ */
+
+/*
+ * Load OP-TEE image from the payload specified in the registers.
+ *
+ * WARNING: Use this cautiously as it could lead to insecure loading of the
+ * Trusted OS. Further details are in opteed.mk.
+ *
+ * Call register usage:
+ * x0 SMC Function ID, OPTEE_SMC_CALL_LOAD_IMAGE
+ * x1 Upper 32bit of a 64bit size for the payload
+ * x2 Lower 32bit of a 64bit size for the payload
+ * x3 Upper 32bit of the physical address for the payload
+ * x4 Lower 32bit of the physical address for the payload
+ *
+ * The payload consists of a optee_header struct that contains optee_image
+ * structs in a flex array, immediately following that in memory is the data
+ * referenced by the optee_image structs.
+ * Example:
+ *
+ * struct optee_header (with n images specified)
+ * image 0 data
+ * image 1 data
+ * ...
+ * image n-1 data
+ *
+ * Returns 0 on success and an error code otherwise.
+ */
+#define NSSMC_OPTEED_FUNCID_LOAD_IMAGE 2
+#define NSSMC_OPTEED_CALL_LOAD_IMAGE \
+ NSSMC_OPTEED_CALL(NSSMC_OPTEED_FUNCID_LOAD_IMAGE)
+
+/*
+ * Returns the UID of the OP-TEE image loading service if image loading is
+ * enabled and the image had not been loaded yet. Otherwise this call will be
+ * passed through to OP-TEE where it will return the OP-TEE UID.
+ */
+#define NSSMC_OPTEED_FUNCID_CALLS_UID 0xFF01
+#define NSSMC_OPTEED_CALL_UID \
+ NSSMC_OPTEED_CALL(NSSMC_OPTEED_FUNCID_CALLS_UID)
+
+#endif /*TEESMC_OPTEED_H*/
diff --git a/services/spd/opteed/teesmc_opteed_macros.h b/services/spd/opteed/teesmc_opteed_macros.h
new file mode 100644
index 0000000..7219140
--- /dev/null
+++ b/services/spd/opteed/teesmc_opteed_macros.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef TEESMC_OPTEED_MACROS_H
+#define TEESMC_OPTEED_MACROS_H
+
+#include <common/runtime_svc.h>
+
+#define TEESMC_OPTEED_RV(func_num) \
+ ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \
+ ((SMC_32) << FUNCID_CC_SHIFT) | \
+ (62 << FUNCID_OEN_SHIFT) | \
+ ((func_num) & FUNCID_NUM_MASK))
+
+#define NSSMC_OPTEED_CALL(func_num) \
+ ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \
+ ((SMC_32) << FUNCID_CC_SHIFT) | \
+ (63 << FUNCID_OEN_SHIFT) | \
+ ((func_num) & FUNCID_NUM_MASK))
+
+#endif /* TEESMC_OPTEED_MACROS_H */
diff --git a/services/spd/pncd/pncd.mk b/services/spd/pncd/pncd.mk
new file mode 100644
index 0000000..0f8eb25
--- /dev/null
+++ b/services/spd/pncd/pncd.mk
@@ -0,0 +1,24 @@
+# Copyright (c) 2021-2022, ProvenRun S.A.S. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+PNCD_DIR := services/spd/pncd
+SPD_INCLUDES := -Iinclude/bl32/pnc
+SPD_INCLUDES += -Iinclude/common/
+
+SPD_SOURCES := services/spd/pncd/pncd_common.c \
+ services/spd/pncd/pncd_helpers.S \
+ services/spd/pncd/pncd_main.c
+
+NEED_BL32 := yes
+
+# The following constants need to be defined:
+# - SPD_PNCD_NS_IRQ: IRQ number used to notify NS world when SMC_ACTION_FROM_S is received
+# - SPD_PNCD_S_IRQ: IRQ number used to notify S world when SMC_ACTION_FROM_NS is received
+$(eval $(call assert_numerics, SPD_PNCD_NS_IRQ SPD_PNCD_S_IRQ))
+
+$(eval $(call add_defines,\
+ $(sort \
+ SPD_PNCD_NS_IRQ \
+ SPD_PNCD_S_IRQ \
+)))
diff --git a/services/spd/pncd/pncd_common.c b/services/spd/pncd/pncd_common.c
new file mode 100644
index 0000000..6fdb629
--- /dev/null
+++ b/services/spd/pncd/pncd_common.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2021-2022, ProvenRun S.A.S. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/utils.h>
+#include <plat/common/platform.h>
+
+#include "pncd_private.h"
+
+/*******************************************************************************
+ * Given a secure payload entrypoint info pointer, entry point PC & pointer to a
+ * context data structure, this function will initialize pnc context and entry
+ * point info for the secure payload
+ ******************************************************************************/
+void pncd_init_pnc_ep_state(struct entry_point_info *pnc_entry_point,
+ uint64_t pc,
+ pnc_context_t *pnc_ctx)
+{
+ uint32_t ep_attr;
+
+ /* Passing a NULL context is a critical programming error */
+ assert(pnc_ctx);
+ assert(pnc_entry_point);
+ assert(pc);
+
+ /* Associate this context with the current cpu */
+ pnc_ctx->mpidr = read_mpidr();
+
+ cm_set_context(&pnc_ctx->cpu_ctx, SECURE);
+
+ /* initialise an entrypoint to set up the CPU context */
+ ep_attr = SECURE | EP_ST_ENABLE;
+ if (read_sctlr_el3() & SCTLR_EE_BIT) {
+ ep_attr |= EP_EE_BIG;
+ }
+ SET_PARAM_HEAD(pnc_entry_point, PARAM_EP, VERSION_1, ep_attr);
+
+ pnc_entry_point->pc = pc;
+ pnc_entry_point->spsr = SPSR_64(MODE_EL1,
+ MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+ memset(&pnc_entry_point->args, 0, sizeof(pnc_entry_point->args));
+}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and:
+ * 1. Applies the S-EL1 system register context from pnc_ctx->cpu_ctx.
+ * 2. Saves the current C runtime state (callee saved registers) on the stack
+ * frame and saves a reference to this state.
+ * 3. Calls el3_exit() so that the EL3 system and general purpose registers
+ * from the pnc_ctx->cpu_ctx are used to enter the secure payload image.
+ ******************************************************************************/
+uint64_t pncd_synchronous_sp_entry(pnc_context_t *pnc_ctx)
+{
+ assert(pnc_ctx != NULL);
+ assert(pnc_ctx->c_rt_ctx == 0U);
+
+ /* Apply the Secure EL1 system register context and switch to it */
+ assert(cm_get_context(SECURE) == &pnc_ctx->cpu_ctx);
+ cm_el1_sysregs_context_restore(SECURE);
+#if CTX_INCLUDE_FPREGS
+ fpregs_context_restore(get_fpregs_ctx(cm_get_context(SECURE)));
+#endif
+ cm_set_next_eret_context(SECURE);
+
+ return pncd_enter_sp(&pnc_ctx->c_rt_ctx);
+}
+
+
+/*******************************************************************************
+ * This function takes an SP context pointer and:
+ * 1. Saves the S-EL1 system register context tp pnc_ctx->cpu_ctx.
+ * 2. Restores the current C runtime state (callee saved registers) from the
+ * stack frame using the reference to this state saved in pncd_enter_sp().
+ * 3. It does not need to save any general purpose or EL3 system register state
+ * as the generic smc entry routine should have saved those.
+ ******************************************************************************/
+void pncd_synchronous_sp_exit(pnc_context_t *pnc_ctx, uint64_t ret)
+{
+ assert(pnc_ctx != NULL);
+ /* Save the Secure EL1 system register context */
+ assert(cm_get_context(SECURE) == &pnc_ctx->cpu_ctx);
+ cm_el1_sysregs_context_save(SECURE);
+#if CTX_INCLUDE_FPREGS
+ fpregs_context_save(get_fpregs_ctx(cm_get_context(SECURE)));
+#endif
+
+ assert(pnc_ctx->c_rt_ctx != 0);
+ pncd_exit_sp(pnc_ctx->c_rt_ctx, ret);
+
+ /* Should never reach here */
+ panic();
+}
diff --git a/services/spd/pncd/pncd_helpers.S b/services/spd/pncd/pncd_helpers.S
new file mode 100644
index 0000000..736b30f
--- /dev/null
+++ b/services/spd/pncd/pncd_helpers.S
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2021-2022, ProvenRun S.A.S. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "pncd_private.h"
+
+ .global pncd_enter_sp
+ /* ---------------------------------------------
+ * This function is called with SP_EL0 as stack.
+ * Here we stash our EL3 callee-saved registers
+ * on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where
+ * the address of the C runtime context is to be
+ * saved.
+ * ---------------------------------------------
+ */
+func pncd_enter_sp
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #PNCD_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #PNCD_C_RT_CTX_X19]
+ stp x21, x22, [sp, #PNCD_C_RT_CTX_X21]
+ stp x23, x24, [sp, #PNCD_C_RT_CTX_X23]
+ stp x25, x26, [sp, #PNCD_C_RT_CTX_X25]
+ stp x27, x28, [sp, #PNCD_C_RT_CTX_X27]
+ stp x29, x30, [sp, #PNCD_C_RT_CTX_X29]
+
+ /* ---------------------------------------------
+ * Everything is setup now. el3_exit() will
+ * use the secure context to restore to the
+ * general purpose and EL3 system registers to
+ * ERET into the secure payload.
+ * ---------------------------------------------
+ */
+ b el3_exit
+endfunc pncd_enter_sp
+
+ /* ---------------------------------------------
+ * This function is called 'x0' pointing to a C
+ * runtime context saved in pncd_enter_sp(). It
+ * restores the saved registers and jumps to
+ * that runtime with 'x0' as the new sp. This
+ * destroys the C runtime context that had been
+ * built on the stack below the saved context by
+ * the caller. Later the second parameter 'x1'
+ * is passed as return value to the caller
+ * ---------------------------------------------
+ */
+ .global pncd_exit_sp
+func pncd_exit_sp
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(PNCD_C_RT_CTX_X19 - PNCD_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(PNCD_C_RT_CTX_X21 - PNCD_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(PNCD_C_RT_CTX_X23 - PNCD_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(PNCD_C_RT_CTX_X25 - PNCD_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(PNCD_C_RT_CTX_X27 - PNCD_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(PNCD_C_RT_CTX_X29 - PNCD_C_RT_CTX_SIZE)]
+
+ /* ---------------------------------------------
+ * This should take us back to the instruction
+ * after the call to the last pncd_enter_sp().
+ * Place the second parameter to x0 so that the
+ * caller will see it as a return value from the
+ * original entry call
+ * ---------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc pncd_exit_sp
diff --git a/services/spd/pncd/pncd_main.c b/services/spd/pncd/pncd_main.c
new file mode 100644
index 0000000..99c4aa1
--- /dev/null
+++ b/services/spd/pncd/pncd_main.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2021-2022, ProvenRun S.A.S. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*******************************************************************************
+ * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a
+ * plug-in component to the Secure Monitor, registered as a runtime service. The
+ * SPD is expected to be a functional extension of the Secure Payload (SP) that
+ * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting
+ * the Trusted OS/Applications range to the dispatcher. The SPD will either
+ * handle the request locally or delegate it to the Secure Payload. It is also
+ * responsible for initialising and maintaining communication with the SP.
+ ******************************************************************************/
+
+#include <assert.h>
+#include <errno.h>
+#include <stddef.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <bl31/interrupt_mgmt.h>
+#include <bl_common.h>
+#include <common/debug.h>
+#include <common/ep_info.h>
+#include <drivers/arm/gic_common.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/spinlock.h>
+#include <plat/common/platform.h>
+#include <pnc.h>
+#include "pncd_private.h"
+#include <runtime_svc.h>
+#include <tools_share/uuid.h>
+
+/*******************************************************************************
+ * Structure to keep track of ProvenCore state
+ ******************************************************************************/
+static pnc_context_t pncd_sp_context;
+
+static bool ree_info;
+static uint64_t ree_base_addr;
+static uint64_t ree_length;
+static uint64_t ree_tag;
+
+static bool pnc_initialized;
+
+static spinlock_t smc_handler_lock;
+
+static int pncd_init(void);
+
+static void context_save(unsigned long security_state)
+{
+ assert(sec_state_is_valid(security_state));
+
+ cm_el1_sysregs_context_save((uint32_t) security_state);
+#if CTX_INCLUDE_FPREGS
+ fpregs_context_save(get_fpregs_ctx(cm_get_context(security_state)));
+#endif
+}
+
+static void *context_restore(unsigned long security_state)
+{
+ void *handle;
+
+ assert(sec_state_is_valid(security_state));
+
+ /* Get a reference to the next context */
+ handle = cm_get_context((uint32_t) security_state);
+ assert(handle);
+
+ /* Restore state */
+ cm_el1_sysregs_context_restore((uint32_t) security_state);
+#if CTX_INCLUDE_FPREGS
+ fpregs_context_restore(get_fpregs_ctx(cm_get_context(security_state)));
+#endif
+
+ cm_set_next_eret_context((uint32_t) security_state);
+
+ return handle;
+}
+
+static uint64_t pncd_sel1_interrupt_handler(uint32_t id,
+ uint32_t flags, void *handle, void *cookie);
+
+/*******************************************************************************
+ * Switch context to the specified security state and return the targeted
+ * handle. Note that the context may remain unchanged if the switch is not
+ * allowed.
+ ******************************************************************************/
+void *pncd_context_switch_to(unsigned long security_state)
+{
+ unsigned long sec_state_from =
+ security_state == SECURE ? NON_SECURE : SECURE;
+
+ assert(sec_state_is_valid(security_state));
+
+ /* Check if this is the first world switch */
+ if (!pnc_initialized) {
+ int rc;
+ uint32_t flags;
+
+ assert(sec_state_from == SECURE);
+
+ INFO("PnC initialization done\n");
+
+ /*
+ * Register an interrupt handler for S-EL1 interrupts
+ * when generated during code executing in the
+ * non-secure state.
+ */
+ flags = 0U;
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+ pncd_sel1_interrupt_handler,
+ flags);
+ if (rc != 0) {
+ ERROR("Failed to register S-EL1 interrupt handler (%d)\n",
+ rc);
+ panic();
+ }
+
+ context_save(SECURE);
+
+ pnc_initialized = true;
+
+ /*
+ * Release the lock before restoring the EL3 context to
+ * bl31_main.
+ */
+ spin_unlock(&smc_handler_lock);
+
+ /*
+ * SP reports completion. The SPD must have initiated
+ * the original request through a synchronous entry
+ * into the SP. Jump back to the original C runtime
+ * context.
+ */
+ pncd_synchronous_sp_exit(&pncd_sp_context, (uint64_t) 0x0);
+
+ /* Unreachable */
+ ERROR("Returned from pncd_synchronous_sp_exit... Should not happen\n");
+ panic();
+ }
+
+ /* Check that the world switch is allowed */
+ if (read_mpidr() != pncd_sp_context.mpidr) {
+ if (sec_state_from == SECURE) {
+ /*
+ * Secure -> Non-Secure world switch initiated on a CPU where there
+ * should be no Trusted OS running
+ */
+ WARN("Secure to Non-Secure switch requested on CPU where ProvenCore is not supposed to be running...\n");
+ }
+
+ /*
+ * Secure or Non-Secure world wants to switch world but there is no Secure
+ * software on this core
+ */
+ return cm_get_context((uint32_t) sec_state_from);
+ }
+
+ context_save(sec_state_from);
+
+ return context_restore(security_state);
+}
+
+/*******************************************************************************
+ * This function is the handler registered for S-EL1 interrupts by the PNCD. It
+ * validates the interrupt and upon success arranges entry into the PNC at
+ * 'pnc_sel1_intr_entry()' for handling the interrupt.
+ ******************************************************************************/
+static uint64_t pncd_sel1_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ /* Check the security state when the exception was generated */
+ assert(get_interrupt_src_ss(flags) == NON_SECURE);
+
+ /* Sanity check the pointer to this cpu's context */
+ assert(handle == cm_get_context(NON_SECURE));
+
+ /* switch to PnC */
+ handle = pncd_context_switch_to(SECURE);
+
+ assert(handle != NULL);
+
+ SMC_RET0(handle);
+}
+
+#pragma weak plat_pncd_setup
+int plat_pncd_setup(void)
+{
+ return 0;
+}
+
+/*******************************************************************************
+ * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type
+ * (aarch32/aarch64) if not already known and initialises the context for entry
+ * into the SP for its initialisation.
+ ******************************************************************************/
+static int pncd_setup(void)
+{
+ entry_point_info_t *pnc_ep_info;
+
+ /*
+ * Get information about the Secure Payload (BL32) image. Its
+ * absence is a critical failure.
+ *
+ * TODO: Add support to conditionally include the SPD service
+ */
+ pnc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (!pnc_ep_info) {
+ WARN("No PNC provided by BL2 boot loader, Booting device without PNC initialization. SMC`s destined for PNC will return SMC_UNK\n");
+ return 1;
+ }
+
+ /*
+ * If there's no valid entry point for SP, we return a non-zero value
+ * signalling failure initializing the service. We bail out without
+ * registering any handlers
+ */
+ if (!pnc_ep_info->pc) {
+ return 1;
+ }
+
+ pncd_init_pnc_ep_state(pnc_ep_info,
+ pnc_ep_info->pc,
+ &pncd_sp_context);
+
+ /*
+ * All PNCD initialization done. Now register our init function with
+ * BL31 for deferred invocation
+ */
+ bl31_register_bl32_init(&pncd_init);
+ bl31_set_next_image_type(NON_SECURE);
+
+ return plat_pncd_setup();
+}
+
+/*******************************************************************************
+ * This function passes control to the Secure Payload image (BL32) for the first
+ * time on the primary cpu after a cold boot. It assumes that a valid secure
+ * context has already been created by pncd_setup() which can be directly used.
+ * It also assumes that a valid non-secure context has been initialised by PSCI
+ * so it does not need to save and restore any non-secure state. This function
+ * performs a synchronous entry into the Secure payload. The SP passes control
+ * back to this routine through a SMC.
+ ******************************************************************************/
+static int32_t pncd_init(void)
+{
+ entry_point_info_t *pnc_entry_point;
+ uint64_t rc = 0;
+
+ /*
+ * Get information about the Secure Payload (BL32) image. Its
+ * absence is a critical failure.
+ */
+ pnc_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
+ assert(pnc_entry_point);
+
+ cm_init_my_context(pnc_entry_point);
+
+ /*
+ * Arrange for an entry into the test secure payload. It will be
+ * returned via PNC_ENTRY_DONE case
+ */
+ rc = pncd_synchronous_sp_entry(&pncd_sp_context);
+
+ /*
+ * If everything went well at this point, the return value should be 0.
+ */
+ return rc == 0;
+}
+
+#pragma weak plat_pncd_smc_handler
+/*******************************************************************************
+ * This function is responsible for handling the platform-specific SMCs in the
+ * Trusted OS/App range as defined in the SMC Calling Convention Document.
+ ******************************************************************************/
+uintptr_t plat_pncd_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ (void) smc_fid;
+ (void) x1;
+ (void) x2;
+ (void) x3;
+ (void) x4;
+ (void) cookie;
+ (void) flags;
+
+ SMC_RET1(handle, SMC_UNK);
+}
+
+/*******************************************************************************
+ * This function is responsible for handling all SMCs in the Trusted OS/App
+ * range as defined in the SMC Calling Convention Document. It is also
+ * responsible for communicating with the Secure payload to delegate work and
+ * return results back to the non-secure state. Lastly it will also return any
+ * information that the secure payload needs to do the work assigned to it.
+ *
+ * It should only be called with the smc_handler_lock held.
+ ******************************************************************************/
+static uintptr_t pncd_smc_handler_unsafe(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ uint32_t ns;
+
+ /* Determine which security state this SMC originated from */
+ ns = is_caller_non_secure(flags);
+
+ assert(ns != 0 || read_mpidr() == pncd_sp_context.mpidr);
+
+ switch (smc_fid) {
+ case SMC_CONFIG_SHAREDMEM:
+ if (ree_info) {
+ /* Do not Yield */
+ SMC_RET0(handle);
+ }
+
+ /*
+ * Fetch the physical base address (x1) and size (x2) of the
+ * shared memory allocated by the Non-Secure world. This memory
+ * will be used by PNC to communicate with the Non-Secure world.
+ * Verifying the validity of these values is up to the Trusted
+ * OS.
+ */
+ ree_base_addr = x1 | (x2 << 32);
+ ree_length = x3;
+ ree_tag = x4;
+
+ INFO("IN SMC_CONFIG_SHAREDMEM: addr=%lx, length=%lx, tag=%lx\n",
+ (unsigned long) ree_base_addr,
+ (unsigned long) ree_length,
+ (unsigned long) ree_tag);
+
+ if ((ree_base_addr % 0x200000) != 0) {
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ if ((ree_length % 0x200000) != 0) {
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ ree_info = true;
+
+ /* Do not Yield */
+ SMC_RET4(handle, 0, 0, 0, 0);
+
+ break;
+
+ case SMC_GET_SHAREDMEM:
+ if (ree_info) {
+ x1 = (1U << 16) | ree_tag;
+ x2 = ree_base_addr & 0xFFFFFFFF;
+ x3 = (ree_base_addr >> 32) & 0xFFFFFFFF;
+ x4 = ree_length & 0xFFFFFFFF;
+ SMC_RET4(handle, x1, x2, x3, x4);
+ } else {
+ SMC_RET4(handle, 0, 0, 0, 0);
+ }
+
+ break;
+
+ case SMC_ACTION_FROM_NS:
+ if (ns == 0) {
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ if (SPD_PNCD_S_IRQ < MIN_PPI_ID) {
+ plat_ic_raise_s_el1_sgi(SPD_PNCD_S_IRQ,
+ pncd_sp_context.mpidr);
+ } else {
+ plat_ic_set_interrupt_pending(SPD_PNCD_S_IRQ);
+ }
+
+ SMC_RET0(handle);
+
+ break;
+
+ case SMC_ACTION_FROM_S:
+ if (ns != 0) {
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ if (SPD_PNCD_NS_IRQ < MIN_PPI_ID) {
+ /*
+ * NS SGI is sent to the same core as the one running
+ * PNC
+ */
+ plat_ic_raise_ns_sgi(SPD_PNCD_NS_IRQ, read_mpidr());
+ } else {
+ plat_ic_set_interrupt_pending(SPD_PNCD_NS_IRQ);
+ }
+
+ SMC_RET0(handle);
+
+ break;
+
+ case SMC_YIELD:
+ assert(handle == cm_get_context(ns != 0 ? NON_SECURE : SECURE));
+ handle = pncd_context_switch_to(ns != 0 ? SECURE : NON_SECURE);
+
+ assert(handle != NULL);
+
+ SMC_RET0(handle);
+
+ break;
+
+ default:
+ INFO("Unknown smc: %x\n", smc_fid);
+ break;
+ }
+
+ return plat_pncd_smc_handler(smc_fid, x1, x2, x3, x4,
+ cookie, handle, flags);
+}
+
+static uintptr_t pncd_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ uintptr_t ret;
+
+ /* SMC handling is serialized */
+ spin_lock(&smc_handler_lock);
+ ret = pncd_smc_handler_unsafe(smc_fid, x1, x2, x3, x4, cookie, handle,
+ flags);
+ spin_unlock(&smc_handler_lock);
+
+ return ret;
+}
+
+/* Define a SPD runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ pncd_fast,
+ OEN_TOS_START,
+ OEN_TOS_END,
+ SMC_TYPE_FAST,
+ pncd_setup,
+ pncd_smc_handler
+);
+
+/* Define a SPD runtime service descriptor for standard SMC calls */
+DECLARE_RT_SVC(
+ pncd_std,
+ OEN_TOS_START,
+ OEN_TOS_END,
+ SMC_TYPE_YIELD,
+ NULL,
+ pncd_smc_handler
+);
diff --git a/services/spd/pncd/pncd_private.h b/services/spd/pncd/pncd_private.h
new file mode 100644
index 0000000..8c9b634
--- /dev/null
+++ b/services/spd/pncd/pncd_private.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
+ * Portions copyright (c) 2021-2022, ProvenRun S.A.S. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PNCD_PRIVATE_H__
+#define __PNCD_PRIVATE_H__
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+#endif /* __ASSEMBLER __ */
+
+#include <context.h>
+#ifndef __ASSEMBLER__
+#include <lib/cassert.h>
+#endif /* __ASSEMBLER __ */
+
+#include <platform_def.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define PNCD_C_RT_CTX_X19 U(0x0)
+#define PNCD_C_RT_CTX_X20 U(0x8)
+#define PNCD_C_RT_CTX_X21 U(0x10)
+#define PNCD_C_RT_CTX_X22 U(0x18)
+#define PNCD_C_RT_CTX_X23 U(0x20)
+#define PNCD_C_RT_CTX_X24 U(0x28)
+#define PNCD_C_RT_CTX_X25 U(0x30)
+#define PNCD_C_RT_CTX_X26 U(0x38)
+#define PNCD_C_RT_CTX_X27 U(0x40)
+#define PNCD_C_RT_CTX_X28 U(0x48)
+#define PNCD_C_RT_CTX_X29 U(0x50)
+#define PNCD_C_RT_CTX_X30 U(0x58)
+#define PNCD_C_RT_CTX_SIZE U(0x60)
+#define PNCD_C_RT_CTX_ENTRIES (PNCD_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+
+/* AArch64 callee saved general purpose register context structure. */
+DEFINE_REG_STRUCT(c_rt_regs, PNCD_C_RT_CTX_ENTRIES);
+
+/*
+ * Compile time assertion to ensure that both the compiler and linker
+ * have the same double word aligned view of the size of the C runtime
+ * register context.
+ */
+CASSERT(sizeof(c_rt_regs_t) == PNCD_C_RT_CTX_SIZE,
+ assert_spd_c_rt_regs_size_mismatch);
+
+/*******************************************************************************
+ * Structure which helps the SPD to maintain the per-cpu state of the SP.
+ * 'mpidr' - mpidr of the CPU running PNC
+ * 'c_rt_ctx' - stack address to restore C runtime context from after
+ * returning from a synchronous entry into the SP.
+ * 'cpu_ctx' - space to maintain SP architectural state
+ ******************************************************************************/
+typedef struct pnc_context {
+ uint64_t mpidr;
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+} pnc_context_t;
+
+/*******************************************************************************
+ * Function & Data prototypes
+ ******************************************************************************/
+uint64_t pncd_enter_sp(uint64_t *c_rt_ctx);
+void __dead2 pncd_exit_sp(uint64_t c_rt_ctx, uint64_t ret);
+uint64_t pncd_synchronous_sp_entry(pnc_context_t *pnc_ctx);
+void __dead2 pncd_synchronous_sp_exit(pnc_context_t *pnc_ctx, uint64_t ret);
+void pncd_init_pnc_ep_state(struct entry_point_info *pnc_ep,
+ uint64_t pc,
+ pnc_context_t *pnc_ctx);
+#endif /* __ASSEMBLER__ */
+
+#endif /* __PNCD_PRIVATE_H__ */
diff --git a/services/spd/tlkd/tlkd.mk b/services/spd/tlkd/tlkd.mk
new file mode 100644
index 0000000..56de0a6
--- /dev/null
+++ b/services/spd/tlkd/tlkd.mk
@@ -0,0 +1,14 @@
+#
+# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ERROR_DEPRECATED},0)
+SPD_INCLUDES := -Iinclude/bl32/payloads
+endif
+
+SPD_SOURCES := services/spd/tlkd/tlkd_common.c \
+ services/spd/tlkd/tlkd_helpers.S \
+ services/spd/tlkd/tlkd_main.c \
+ services/spd/tlkd/tlkd_pm.c
diff --git a/services/spd/tlkd/tlkd_common.c b/services/spd/tlkd/tlkd_common.c
new file mode 100644
index 0000000..820bd8a
--- /dev/null
+++ b/services/spd/tlkd/tlkd_common.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <lib/el3_runtime/context_mgmt.h>
+
+#include "tlkd_private.h"
+
+#define AT_MASK 3
+
+/*******************************************************************************
+ * This function helps the SP to translate NS/S virtual addresses.
+ ******************************************************************************/
+uint64_t tlkd_va_translate(uintptr_t va, int type)
+{
+ uint64_t pa;
+
+ if (type & TLK_TRANSLATE_NS_VADDR) {
+
+ /* save secure context */
+ cm_el1_sysregs_context_save(SECURE);
+
+ /* restore non-secure context */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+
+ /* switch NS bit to start using 64-bit, non-secure mappings */
+ write_scr(cm_get_scr_el3(NON_SECURE));
+ isb();
+ }
+
+ int at = type & AT_MASK;
+ switch (at) {
+ case 0:
+ AT(ats12e1r, va);
+ break;
+ case 1:
+ AT(ats12e1w, va);
+ break;
+ case 2:
+ AT(ats12e0r, va);
+ break;
+ case 3:
+ AT(ats12e0w, va);
+ break;
+ default:
+ assert(0); /* Unreachable */
+ break;
+ }
+
+ /* get the (NS/S) physical address */
+ isb();
+ pa = read_par_el1();
+
+ /* Restore secure state */
+ if (type & TLK_TRANSLATE_NS_VADDR) {
+
+ /* restore secure context */
+ cm_el1_sysregs_context_restore(SECURE);
+
+ /* switch NS bit to start using 32-bit, secure mappings */
+ write_scr(cm_get_scr_el3(SECURE));
+ isb();
+ }
+
+ return pa;
+}
+
+/*******************************************************************************
+ * Given a secure payload entrypoint, register width, cpu id & pointer to a
+ * context data structure, this function will create a secure context ready for
+ * programming an entry into the secure payload.
+ ******************************************************************************/
+void tlkd_init_tlk_ep_state(struct entry_point_info *tlk_entry_point,
+ uint32_t rw,
+ uint64_t pc,
+ tlk_context_t *tlk_ctx)
+{
+ uint32_t ep_attr, spsr;
+
+ /* Passing a NULL context is a critical programming error */
+ assert(tlk_ctx);
+ assert(tlk_entry_point);
+ assert(pc);
+
+ /* Associate this context with the cpu specified */
+ tlk_ctx->mpidr = read_mpidr_el1();
+ clr_yield_smc_active_flag(tlk_ctx->state);
+ cm_set_context(&tlk_ctx->cpu_ctx, SECURE);
+
+ if (rw == SP_AARCH64)
+ spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ else
+ spsr = SPSR_MODE32(MODE32_svc,
+ SPSR_T_ARM,
+ read_sctlr_el3() & SCTLR_EE_BIT,
+ DISABLE_ALL_EXCEPTIONS);
+
+ /* initialise an entrypoint to set up the CPU context */
+ ep_attr = SECURE | EP_ST_ENABLE;
+ if (read_sctlr_el3() & SCTLR_EE_BIT)
+ ep_attr |= EP_EE_BIG;
+ SET_PARAM_HEAD(tlk_entry_point, PARAM_EP, VERSION_1, ep_attr);
+
+ tlk_entry_point->pc = pc;
+ tlk_entry_point->spsr = spsr;
+}
+
+/*******************************************************************************
+ * This function takes a TLK context pointer and:
+ * 1. Applies the S-EL1 system register context from tlk_ctx->cpu_ctx.
+ * 2. Saves the current C runtime state (callee saved registers) on the stack
+ * frame and saves a reference to this state.
+ * 3. Calls el3_exit() so that the EL3 system and general purpose registers
+ * from the tlk_ctx->cpu_ctx are used to enter the secure payload image.
+ ******************************************************************************/
+uint64_t tlkd_synchronous_sp_entry(tlk_context_t *tlk_ctx)
+{
+ uint64_t rc;
+
+ /* Passing a NULL context is a critical programming error */
+ assert(tlk_ctx);
+
+ /* Apply the Secure EL1 system register context and switch to it */
+ assert(cm_get_context(SECURE) == &tlk_ctx->cpu_ctx);
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ rc = tlkd_enter_sp(&tlk_ctx->c_rt_ctx);
+#if ENABLE_ASSERTIONS
+ tlk_ctx->c_rt_ctx = 0;
+#endif
+
+ return rc;
+}
+
+/*******************************************************************************
+ * This function takes a TLK context pointer and:
+ * 1. Saves the S-EL1 system register context to tlk_ctx->cpu_ctx.
+ * 2. Restores the current C runtime state (callee saved registers) from the
+ * stack frame using reference to this state saved in tlkd_enter_sp().
+ * 3. It does not need to save any general purpose or EL3 system register state
+ * as the generic smc entry routine should have saved those.
+ ******************************************************************************/
+void tlkd_synchronous_sp_exit(tlk_context_t *tlk_ctx, uint64_t ret)
+{
+ /* Passing a NULL context is a critical programming error */
+ assert(tlk_ctx);
+
+ /* Save the Secure EL1 system register context */
+ assert(cm_get_context(SECURE) == &tlk_ctx->cpu_ctx);
+ cm_el1_sysregs_context_save(SECURE);
+
+ assert(tlk_ctx->c_rt_ctx != 0);
+ tlkd_exit_sp(tlk_ctx->c_rt_ctx, ret);
+
+ /* Should never reach here */
+ assert(0);
+}
diff --git a/services/spd/tlkd/tlkd_helpers.S b/services/spd/tlkd/tlkd_helpers.S
new file mode 100644
index 0000000..6e616a6
--- /dev/null
+++ b/services/spd/tlkd/tlkd_helpers.S
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "tlkd_private.h"
+
+ .global tlkd_enter_sp
+ .global tlkd_exit_sp
+
+ /* ---------------------------------------------
+ * This function is called with SP_EL0 as stack.
+ * Here we stash our EL3 callee-saved registers
+ * on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where
+ * the address of the C runtime context is to be
+ * saved.
+ * ---------------------------------------------
+ */
+func tlkd_enter_sp
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #TLKD_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #TLKD_C_RT_CTX_X19]
+ stp x21, x22, [sp, #TLKD_C_RT_CTX_X21]
+ stp x23, x24, [sp, #TLKD_C_RT_CTX_X23]
+ stp x25, x26, [sp, #TLKD_C_RT_CTX_X25]
+ stp x27, x28, [sp, #TLKD_C_RT_CTX_X27]
+ stp x29, x30, [sp, #TLKD_C_RT_CTX_X29]
+
+ /* ----------------------------------------------
+ * Everything is setup now. el3_exit() will
+ * use the secure context to restore to the
+ * general purpose and EL3 system registers to
+ * ERET into the secure payload.
+ * ----------------------------------------------
+ */
+ b el3_exit
+endfunc tlkd_enter_sp
+
+ /* ----------------------------------------------
+ * This function is called with 'x0' pointing to
+ * a C runtime context saved in tlkd_enter_sp().
+ * It restores the saved registers and jumps to
+ * that runtime with 'x0' as the new sp. This
+ * destroys the C runtime context that had been
+ * built on the stack below the saved context by
+ * the caller. Later the second parameter 'x1'
+ * is passed as return value to the caller
+ * ----------------------------------------------
+ */
+func tlkd_exit_sp
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(TLKD_C_RT_CTX_X19 - TLKD_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(TLKD_C_RT_CTX_X21 - TLKD_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(TLKD_C_RT_CTX_X23 - TLKD_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(TLKD_C_RT_CTX_X25 - TLKD_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(TLKD_C_RT_CTX_X27 - TLKD_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(TLKD_C_RT_CTX_X29 - TLKD_C_RT_CTX_SIZE)]
+
+ /* ------------------------------------------------
+ * This should take us back to the instruction
+ * after the call to the last tlkd_enter_sp().
+ * Place the second parameter to x0 so that the
+ * caller will see it as a return value from the
+ * original entry call
+ * ------------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc tlkd_exit_sp
diff --git a/services/spd/tlkd/tlkd_main.c b/services/spd/tlkd/tlkd_main.c
new file mode 100644
index 0000000..ecac435
--- /dev/null
+++ b/services/spd/tlkd/tlkd_main.c
@@ -0,0 +1,546 @@
+/*
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*******************************************************************************
+ * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a
+ * plug-in component to the Secure Monitor, registered as a runtime service. The
+ * SPD is expected to be a functional extension of the Secure Payload (SP) that
+ * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting
+ * the Trusted OS/Applications range to the dispatcher. The SPD will either
+ * handle the request locally or delegate it to the Secure Payload. It is also
+ * responsible for initialising and maintaining communication with the SP.
+ ******************************************************************************/
+#include <assert.h>
+#include <bl31/interrupt_mgmt.h>
+#include <errno.h>
+#include <stddef.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <bl32/payloads/tlk.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <plat/common/platform.h>
+#include <tools_share/uuid.h>
+
+#include "tlkd_private.h"
+
+extern const spd_pm_ops_t tlkd_pm_ops;
+
+/*******************************************************************************
+ * Per-cpu Secure Payload state
+ ******************************************************************************/
+tlk_context_t tlk_ctx;
+
+/*******************************************************************************
+ * CPU number on which TLK booted up
+ ******************************************************************************/
+static uint32_t boot_cpu;
+
+/* TLK UID: RFC-4122 compliant UUID (version-5, sha-1) */
+DEFINE_SVC_UUID2(tlk_uuid,
+ 0xc9e911bd, 0xba2b, 0xee52, 0xb1, 0x72,
+ 0x46, 0x1f, 0xba, 0x97, 0x7f, 0x63);
+
+static int32_t tlkd_init(void);
+
+/*******************************************************************************
+ * Secure Payload Dispatcher's timer interrupt handler
+ ******************************************************************************/
+static uint64_t tlkd_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ cpu_context_t *s_cpu_context;
+ int irq = plat_ic_get_pending_interrupt_id();
+
+ /* acknowledge the interrupt and mark it complete */
+ (void)plat_ic_acknowledge_interrupt();
+ plat_ic_end_of_interrupt(irq);
+
+ /*
+ * Disable the routing of NS interrupts from secure world to
+ * EL3 while interrupted on this core.
+ */
+ disable_intr_rm_local(INTR_TYPE_S_EL1, SECURE);
+
+ /* Check the security state when the exception was generated */
+ assert(get_interrupt_src_ss(flags) == NON_SECURE);
+ assert(handle == cm_get_context(NON_SECURE));
+
+ /* Save non-secure state */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /* Get a reference to the secure context */
+ s_cpu_context = cm_get_context(SECURE);
+ assert(s_cpu_context);
+
+ /*
+ * Restore non-secure state. There is no need to save the
+ * secure system register context since the SP was supposed
+ * to preserve it during S-EL1 interrupt handling.
+ */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ /* Provide the IRQ number to the SPD */
+ SMC_RET4(s_cpu_context, (uint32_t)TLK_IRQ_FIRED, 0, (uint32_t)irq, 0);
+}
+
+/*******************************************************************************
+ * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type
+ * (aarch32/aarch64) if not already known and initialises the context for entry
+ * into the SP for its initialisation.
+ ******************************************************************************/
+static int32_t tlkd_setup(void)
+{
+ entry_point_info_t *tlk_ep_info;
+ uint32_t flags;
+ int32_t ret;
+
+ /*
+ * Get information about the Secure Payload (BL32) image. Its
+ * absence is a critical failure.
+ */
+ tlk_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (!tlk_ep_info) {
+ WARN("No SP provided. Booting device without SP"
+ " initialization. SMC`s destined for SP"
+ " will return SMC_UNK\n");
+ return 1;
+ }
+
+ /*
+ * If there's no valid entry point for SP, we return a non-zero value
+ * signalling failure initializing the service. We bail out without
+ * registering any handlers
+ */
+ if (!tlk_ep_info->pc)
+ return 1;
+
+ /*
+ * Inspect the SP image's SPSR and determine it's execution state
+ * i.e whether AArch32 or AArch64.
+ */
+ tlkd_init_tlk_ep_state(tlk_ep_info,
+ (tlk_ep_info->spsr >> MODE_RW_SHIFT) & MODE_RW_MASK,
+ tlk_ep_info->pc,
+ &tlk_ctx);
+
+ /* get a list of all S-EL1 IRQs from the platform */
+
+ /* register interrupt handler */
+ flags = 0;
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+ tlkd_interrupt_handler,
+ flags);
+ if (ret != 0) {
+ ERROR("failed to register tlkd interrupt handler (%d)\n", ret);
+ }
+
+ /*
+ * All TLK SPD initialization done. Now register our init function
+ * with BL31 for deferred invocation
+ */
+ bl31_register_bl32_init(&tlkd_init);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function passes control to the Secure Payload image (BL32) for the first
+ * time on the primary cpu after a cold boot. It assumes that a valid secure
+ * context has already been created by tlkd_setup() which can be directly
+ * used. This function performs a synchronous entry into the Secure payload.
+ * The SP passes control back to this routine through a SMC.
+ ******************************************************************************/
+static int32_t tlkd_init(void)
+{
+ entry_point_info_t *tlk_entry_point;
+
+ /*
+ * Get information about the Secure Payload (BL32) image. Its
+ * absence is a critical failure.
+ */
+ tlk_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
+ assert(tlk_entry_point);
+
+ cm_init_my_context(tlk_entry_point);
+
+ /*
+ * TLK runs only on a single CPU. Store the value of the boot
+ * CPU for sanity checking later.
+ */
+ boot_cpu = plat_my_core_pos();
+
+ /*
+ * Arrange for an entry into the test secure payload.
+ */
+ return tlkd_synchronous_sp_entry(&tlk_ctx);
+}
+
+/*******************************************************************************
+ * This function is responsible for handling all SMCs in the Trusted OS/App
+ * range from the non-secure state as defined in the SMC Calling Convention
+ * Document. It is also responsible for communicating with the Secure payload
+ * to delegate work and return results back to the non-secure state. Lastly it
+ * will also return any information that the secure payload needs to do the
+ * work assigned to it.
+ ******************************************************************************/
+static uintptr_t tlkd_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ cpu_context_t *ns_cpu_context;
+ gp_regs_t *gp_regs;
+ uint32_t ns;
+ uint64_t par;
+
+ /* Passing a NULL context is a critical programming error */
+ assert(handle);
+
+ /* These SMCs are only supported by a single CPU */
+ if (boot_cpu != plat_my_core_pos())
+ SMC_RET1(handle, SMC_UNK);
+
+ /* Determine which security state this SMC originated from */
+ ns = is_caller_non_secure(flags);
+
+ switch (smc_fid) {
+
+ /*
+ * This function ID is used by SP to indicate that it was
+ * preempted by a non-secure world IRQ.
+ */
+ case TLK_PREEMPTED:
+
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ assert(handle == cm_get_context(SECURE));
+ cm_el1_sysregs_context_save(SECURE);
+
+ /* Get a reference to the non-secure context */
+ ns_cpu_context = cm_get_context(NON_SECURE);
+ assert(ns_cpu_context);
+
+ /*
+ * Restore non-secure state. There is no need to save the
+ * secure system register context since the SP was supposed
+ * to preserve it during S-EL1 interrupt handling.
+ */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ SMC_RET1(ns_cpu_context, x1);
+
+ /*
+ * This is a request from the non-secure context to:
+ *
+ * a. register shared memory with the SP for storing it's
+ * activity logs.
+ * b. register shared memory with the SP for passing args
+ * required for maintaining sessions with the Trusted
+ * Applications.
+ * c. register shared persistent buffers for secure storage
+ * d. register NS DRAM ranges passed by Cboot
+ * e. register Root of Trust parameters from Cboot for Verified Boot
+ * f. open/close sessions
+ * g. issue commands to the Trusted Apps
+ * h. resume the preempted yielding SMC call.
+ */
+ case TLK_REGISTER_LOGBUF:
+ case TLK_REGISTER_REQBUF:
+ case TLK_SS_REGISTER_HANDLER:
+ case TLK_REGISTER_NS_DRAM_RANGES:
+ case TLK_SET_ROOT_OF_TRUST:
+ case TLK_OPEN_TA_SESSION:
+ case TLK_CLOSE_TA_SESSION:
+ case TLK_TA_LAUNCH_OP:
+ case TLK_TA_SEND_EVENT:
+ case TLK_RESUME_FID:
+ case TLK_SET_BL_VERSION:
+ case TLK_LOCK_BL_INTERFACE:
+ case TLK_BL_RPMB_SERVICE:
+
+ if (!ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ /*
+ * This is a fresh request from the non-secure client.
+ * The parameters are in x1 and x2. Figure out which
+ * registers need to be preserved, save the non-secure
+ * state and send the request to the secure payload.
+ */
+ assert(handle == cm_get_context(NON_SECURE));
+
+ /*
+ * Check if we are already processing a yielding SMC
+ * call. Of all the supported fids, only the "resume"
+ * fid expects the flag to be set.
+ */
+ if (smc_fid == TLK_RESUME_FID) {
+ if (!get_yield_smc_active_flag(tlk_ctx.state))
+ SMC_RET1(handle, SMC_UNK);
+ } else {
+ if (get_yield_smc_active_flag(tlk_ctx.state))
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /*
+ * Verify if there is a valid context to use.
+ */
+ assert(&tlk_ctx.cpu_ctx == cm_get_context(SECURE));
+
+ /*
+ * Mark the SP state as active.
+ */
+ set_yield_smc_active_flag(tlk_ctx.state);
+
+ /*
+ * We are done stashing the non-secure context. Ask the
+ * secure payload to do the work now.
+ */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ /*
+ * TLK is a 32-bit Trusted OS and so expects the SMC
+ * arguments via r0-r7. TLK expects the monitor frame
+ * registers to be 64-bits long. Hence, we pass x0 in
+ * r0-r1, x1 in r2-r3, x3 in r4-r5 and x4 in r6-r7.
+ *
+ * As smc_fid is a uint32 value, r1 contains 0.
+ */
+ gp_regs = get_gpregs_ctx(&tlk_ctx.cpu_ctx);
+ write_ctx_reg(gp_regs, CTX_GPREG_X4, (uint32_t)x2);
+ write_ctx_reg(gp_regs, CTX_GPREG_X5, (uint32_t)(x2 >> 32));
+ write_ctx_reg(gp_regs, CTX_GPREG_X6, (uint32_t)x3);
+ write_ctx_reg(gp_regs, CTX_GPREG_X7, (uint32_t)(x3 >> 32));
+ SMC_RET4(&tlk_ctx.cpu_ctx, smc_fid, 0, (uint32_t)x1,
+ (uint32_t)(x1 >> 32));
+
+ /*
+ * Translate NS/EL1-S virtual addresses.
+ *
+ * x1 = virtual address
+ * x3 = type (NS/S)
+ *
+ * Returns PA:lo in r0, PA:hi in r1.
+ */
+ case TLK_VA_TRANSLATE:
+
+ /* Should be invoked only by secure world */
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ /* NS virtual addresses are 64-bit long */
+ if (x3 & TLK_TRANSLATE_NS_VADDR)
+ x1 = (uint32_t)x1 | (x2 << 32);
+
+ if (!x1)
+ SMC_RET1(handle, SMC_UNK);
+
+ /*
+ * TODO: Sanity check x1. This would require platform
+ * support.
+ */
+
+ /* virtual address and type: ns/s */
+ par = tlkd_va_translate(x1, x3);
+
+ /* return physical address in r0-r1 */
+ SMC_RET4(handle, (uint32_t)par, (uint32_t)(par >> 32), 0, 0);
+
+ /*
+ * This is a request from the SP to mark completion of
+ * a yielding function ID.
+ */
+ case TLK_REQUEST_DONE:
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ /*
+ * Mark the SP state as inactive.
+ */
+ clr_yield_smc_active_flag(tlk_ctx.state);
+
+ /* Get a reference to the non-secure context */
+ ns_cpu_context = cm_get_context(NON_SECURE);
+ assert(ns_cpu_context);
+
+ /*
+ * This is a request completion SMC and we must switch to
+ * the non-secure world to pass the result.
+ */
+ cm_el1_sysregs_context_save(SECURE);
+
+ /*
+ * We are done stashing the secure context. Switch to the
+ * non-secure context and return the result.
+ */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+ SMC_RET1(ns_cpu_context, x1);
+
+ /*
+ * This function ID is used only by the SP to indicate it has
+ * finished initialising itself after a cold boot
+ */
+ case TLK_ENTRY_DONE:
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ /*
+ * SP has been successfully initialized. Register power
+ * management hooks with PSCI
+ */
+ psci_register_spd_pm_hook(&tlkd_pm_ops);
+
+ /*
+ * TLK reports completion. The SPD must have initiated
+ * the original request through a synchronous entry
+ * into the SP. Jump back to the original C runtime
+ * context.
+ */
+ tlkd_synchronous_sp_exit(&tlk_ctx, x1);
+ break;
+
+ /*
+ * These function IDs are used only by TLK to indicate it has
+ * finished:
+ * 1. suspending itself after an earlier psci cpu_suspend
+ * request.
+ * 2. resuming itself after an earlier psci cpu_suspend
+ * request.
+ * 3. powering down after an earlier psci system_off/system_reset
+ * request.
+ */
+ case TLK_SUSPEND_DONE:
+ case TLK_RESUME_DONE:
+
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ /*
+ * TLK reports completion. TLKD must have initiated the
+ * original request through a synchronous entry into the SP.
+ * Jump back to the original C runtime context, and pass x1 as
+ * return value to the caller
+ */
+ tlkd_synchronous_sp_exit(&tlk_ctx, x1);
+ break;
+
+ /*
+ * This function ID is used by SP to indicate that it has completed
+ * handling the secure interrupt.
+ */
+ case TLK_IRQ_DONE:
+
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ assert(handle == cm_get_context(SECURE));
+
+ /* save secure world context */
+ cm_el1_sysregs_context_save(SECURE);
+
+ /* Get a reference to the non-secure context */
+ ns_cpu_context = cm_get_context(NON_SECURE);
+ assert(ns_cpu_context);
+
+ /*
+ * Restore non-secure state. There is no need to save the
+ * secure system register context since the SP was supposed
+ * to preserve it during S-EL1 interrupt handling.
+ */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ SMC_RET0(ns_cpu_context);
+
+ /*
+ * Return the number of service function IDs implemented to
+ * provide service to non-secure
+ */
+ case TOS_CALL_COUNT:
+ SMC_RET1(handle, TLK_NUM_FID);
+
+ /*
+ * Return TLK's UID to the caller
+ */
+ case TOS_UID:
+ SMC_UUID_RET(handle, tlk_uuid);
+
+ /*
+ * Return the version of current implementation
+ */
+ case TOS_CALL_VERSION:
+ SMC_RET2(handle, TLK_VERSION_MAJOR, TLK_VERSION_MINOR);
+
+ default:
+ WARN("%s: Unhandled SMC: 0x%x\n", __func__, smc_fid);
+ break;
+ }
+
+ SMC_RET1(handle, SMC_UNK);
+}
+
+/* Define a SPD runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ tlkd_tos_fast,
+
+ OEN_TOS_START,
+ OEN_TOS_END,
+ SMC_TYPE_FAST,
+ tlkd_setup,
+ tlkd_smc_handler
+);
+
+/* Define a SPD runtime service descriptor for yielding SMC calls */
+DECLARE_RT_SVC(
+ tlkd_tos_std,
+
+ OEN_TOS_START,
+ OEN_TOS_END,
+ SMC_TYPE_YIELD,
+ NULL,
+ tlkd_smc_handler
+);
+
+/* Define a SPD runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ tlkd_tap_fast,
+
+ OEN_TAP_START,
+ OEN_TAP_END,
+ SMC_TYPE_FAST,
+ NULL,
+ tlkd_smc_handler
+);
+
+/* Define a SPD runtime service descriptor for yielding SMC calls */
+DECLARE_RT_SVC(
+ tlkd_tap_std,
+
+ OEN_TAP_START,
+ OEN_TAP_END,
+ SMC_TYPE_YIELD,
+ NULL,
+ tlkd_smc_handler
+);
diff --git a/services/spd/tlkd/tlkd_pm.c b/services/spd/tlkd/tlkd_pm.c
new file mode 100644
index 0000000..ed5bf77
--- /dev/null
+++ b/services/spd/tlkd/tlkd_pm.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <bl32/payloads/tlk.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/psci/psci.h>
+
+#include "tlkd_private.h"
+
+extern tlk_context_t tlk_ctx;
+
+#define MPIDR_CPU0 0x80000000
+
+/*******************************************************************************
+ * Return the type of payload TLKD is dealing with. Report the current
+ * resident cpu (mpidr format) if it is a UP/UP migratable payload.
+ ******************************************************************************/
+static int32_t cpu_migrate_info(u_register_t *resident_cpu)
+{
+ /* the payload runs only on CPU0 */
+ *resident_cpu = MPIDR_CPU0;
+
+ /* Uniprocessor, not migrate capable payload */
+ return PSCI_TOS_NOT_UP_MIG_CAP;
+}
+
+/*******************************************************************************
+ * This cpu is being suspended. Inform TLK of the SYSTEM_SUSPEND event, so
+ * that it can pass this information to its Trusted Apps.
+ ******************************************************************************/
+static void cpu_suspend_handler(u_register_t suspend_level)
+{
+ gp_regs_t *gp_regs;
+ int cpu = read_mpidr() & MPIDR_CPU_MASK;
+ int32_t rc = 0;
+
+ /*
+ * TLK runs only on CPU0 and suspends its Trusted Apps during
+ * SYSTEM_SUSPEND. It has no role to play during CPU_SUSPEND.
+ */
+ if ((cpu != 0) || (suspend_level != PLAT_MAX_PWR_LVL))
+ return;
+
+ /* pass system suspend event to TLK */
+ gp_regs = get_gpregs_ctx(&tlk_ctx.cpu_ctx);
+ write_ctx_reg(gp_regs, CTX_GPREG_X0, TLK_SYSTEM_SUSPEND);
+
+ /* Program the entry point and enter TLK */
+ rc = tlkd_synchronous_sp_entry(&tlk_ctx);
+
+ /*
+ * Read the response from TLK. A non-zero return means that
+ * something went wrong while communicating with it.
+ */
+ if (rc != 0)
+ panic();
+}
+
+/*******************************************************************************
+ * This cpu is being resumed. Inform TLK of the SYSTEM_SUSPEND exit, so
+ * that it can pass this information to its Trusted Apps.
+ ******************************************************************************/
+static void cpu_resume_handler(u_register_t suspend_level)
+{
+ gp_regs_t *gp_regs;
+ int cpu = read_mpidr() & MPIDR_CPU_MASK;
+ int32_t rc = 0;
+
+ /*
+ * TLK runs only on CPU0 and resumes its Trusted Apps during
+ * SYSTEM_SUSPEND exit. It has no role to play during CPU_SUSPEND
+ * exit.
+ */
+ if ((cpu != 0) || (suspend_level != PLAT_MAX_PWR_LVL))
+ return;
+
+ /* pass system resume event to TLK */
+ gp_regs = get_gpregs_ctx(&tlk_ctx.cpu_ctx);
+ write_ctx_reg(gp_regs, CTX_GPREG_X0, TLK_SYSTEM_RESUME);
+
+ /* Program the entry point and enter TLK */
+ rc = tlkd_synchronous_sp_entry(&tlk_ctx);
+
+ /*
+ * Read the response from TLK. A non-zero return means that
+ * something went wrong while communicating with it.
+ */
+ if (rc != 0)
+ panic();
+}
+
+/*******************************************************************************
+ * Structure populated by the Dispatcher to be given a chance to perform any
+ * bookkeeping before PSCI executes a power mgmt. operation.
+ ******************************************************************************/
+const spd_pm_ops_t tlkd_pm_ops = {
+ .svc_migrate_info = cpu_migrate_info,
+ .svc_suspend = cpu_suspend_handler,
+ .svc_suspend_finish = cpu_resume_handler,
+};
diff --git a/services/spd/tlkd/tlkd_private.h b/services/spd/tlkd/tlkd_private.h
new file mode 100644
index 0000000..ad36f5e
--- /dev/null
+++ b/services/spd/tlkd/tlkd_private.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TLKD_PRIVATE_H
+#define TLKD_PRIVATE_H
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <bl31/interrupt_mgmt.h>
+#include <context.h>
+#include <lib/psci/psci.h>
+
+/*
+ * This flag is used by the TLKD to determine if the SP is servicing a yielding
+ * SMC request prior to programming the next entry into the SP e.g. if SP
+ * execution is preempted by a non-secure interrupt and handed control to the
+ * normal world. If another request which is distinct from what the SP was
+ * previously doing arrives, then this flag will be help the TLKD to either
+ * reject the new request or service it while ensuring that the previous context
+ * is not corrupted.
+ */
+#define YIELD_SMC_ACTIVE_FLAG_SHIFT 2
+#define YIELD_SMC_ACTIVE_FLAG_MASK 1
+#define get_yield_smc_active_flag(state) \
+ (((state) >> YIELD_SMC_ACTIVE_FLAG_SHIFT) \
+ & YIELD_SMC_ACTIVE_FLAG_MASK)
+#define set_yield_smc_active_flag(state) ((state) |= \
+ (1 << YIELD_SMC_ACTIVE_FLAG_SHIFT))
+#define clr_yield_smc_active_flag(state) ((state) &= \
+ ~(YIELD_SMC_ACTIVE_FLAG_MASK \
+ << YIELD_SMC_ACTIVE_FLAG_SHIFT))
+
+/*******************************************************************************
+ * Translate virtual address received from the NS world
+ ******************************************************************************/
+#define TLK_TRANSLATE_NS_VADDR 4
+
+/*******************************************************************************
+ * Secure Payload execution state information i.e. aarch32 or aarch64
+ ******************************************************************************/
+#define SP_AARCH32 MODE_RW_32
+#define SP_AARCH64 MODE_RW_64
+
+/*******************************************************************************
+ * Number of cpus that the present on this platform. TODO: Rely on a topology
+ * tree to determine this in the future to avoid assumptions about mpidr
+ * allocation
+ ******************************************************************************/
+#define TLKD_CORE_COUNT PLATFORM_CORE_COUNT
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define TLKD_C_RT_CTX_X19 0x0
+#define TLKD_C_RT_CTX_X20 0x8
+#define TLKD_C_RT_CTX_X21 0x10
+#define TLKD_C_RT_CTX_X22 0x18
+#define TLKD_C_RT_CTX_X23 0x20
+#define TLKD_C_RT_CTX_X24 0x28
+#define TLKD_C_RT_CTX_X25 0x30
+#define TLKD_C_RT_CTX_X26 0x38
+#define TLKD_C_RT_CTX_X27 0x40
+#define TLKD_C_RT_CTX_X28 0x48
+#define TLKD_C_RT_CTX_X29 0x50
+#define TLKD_C_RT_CTX_X30 0x58
+#define TLKD_C_RT_CTX_SIZE 0x60
+#define TLKD_C_RT_CTX_ENTRIES (TLKD_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#include <lib/cassert.h>
+
+/* AArch64 callee saved general purpose register context structure. */
+DEFINE_REG_STRUCT(c_rt_regs, TLKD_C_RT_CTX_ENTRIES);
+
+/*
+ * Compile time assertion to ensure that both the compiler and linker
+ * have the same double word aligned view of the size of the C runtime
+ * register context.
+ */
+CASSERT(TLKD_C_RT_CTX_SIZE == sizeof(c_rt_regs_t),
+ assert_tlkd_c_rt_regs_size_mismatch);
+
+/*******************************************************************************
+ * Structure which helps the SPD to maintain the per-cpu state of the SP.
+ * 'state' - collection of flags to track SP state e.g. on/off
+ * 'mpidr' - mpidr to associate a context with a cpu
+ * 'c_rt_ctx' - stack address to restore C runtime context from after
+ * returning from a synchronous entry into the SP.
+ * 'cpu_ctx' - space to maintain SP architectural state
+ * 'saved_tsp_args' - space to store arguments for TSP arithmetic operations
+ * which will queried using the TSP_GET_ARGS SMC by TSP.
+ ******************************************************************************/
+typedef struct tlk_context {
+ uint32_t state;
+ uint64_t mpidr;
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+} tlk_context_t;
+
+/*******************************************************************************
+ * Function & Data prototypes
+ ******************************************************************************/
+uint64_t tlkd_va_translate(uintptr_t va, int type);
+uint64_t tlkd_enter_sp(uint64_t *c_rt_ctx);
+void __dead2 tlkd_exit_sp(uint64_t c_rt_ctx, uint64_t ret);
+uint64_t tlkd_synchronous_sp_entry(tlk_context_t *tlk_ctx);
+void __dead2 tlkd_synchronous_sp_exit(tlk_context_t *tlk_ctx,
+ uint64_t ret);
+void tlkd_init_tlk_ep_state(struct entry_point_info *tlk_entry_point,
+ uint32_t rw,
+ uint64_t pc,
+ tlk_context_t *tlk_ctx);
+
+#endif /*__ASSEMBLER__*/
+
+#endif /* TLKD_PRIVATE_H */
diff --git a/services/spd/trusty/generic-arm64-smcall.c b/services/spd/trusty/generic-arm64-smcall.c
new file mode 100644
index 0000000..5c3a628
--- /dev/null
+++ b/services/spd/trusty/generic-arm64-smcall.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <platform_def.h>
+
+#include "generic-arm64-smcall.h"
+
+#ifndef PLAT_ARM_GICD_BASE
+#ifdef GICD_BASE
+#define PLAT_ARM_GICD_BASE GICD_BASE
+#define PLAT_ARM_GICC_BASE GICC_BASE
+#ifdef GICR_BASE
+#define PLAT_ARM_GICR_BASE GICR_BASE
+#endif
+#else
+#error PLAT_ARM_GICD_BASE or GICD_BASE must be defined
+#endif
+#endif
+
+#ifndef PLAT_ARM_GICR_BASE
+#define PLAT_ARM_GICR_BASE SMC_UNK
+#endif
+
+int trusty_disable_serial_debug;
+
+struct dputc_state {
+ char linebuf[128];
+ unsigned l;
+};
+
+static struct dputc_state dputc_state[2];
+
+static void trusty_dputc(char ch, int secure)
+{
+ unsigned i;
+ struct dputc_state *s = &dputc_state[!secure];
+
+ if (trusty_disable_serial_debug)
+ return;
+
+ s->linebuf[s->l++] = ch;
+ if (s->l == sizeof(s->linebuf) || ch == '\n') {
+ if (secure)
+ printf("secure os: ");
+ else
+ printf("non-secure os: ");
+ for (i = 0; i < s->l; i++) {
+ putchar(s->linebuf[i]);
+ }
+ if (ch != '\n') {
+ printf(" <...>\n");
+ }
+ s->l = 0;
+ }
+}
+
+static uint64_t trusty_get_reg_base(uint32_t reg)
+{
+ switch (reg) {
+ case SMC_GET_GIC_BASE_GICD:
+ return PLAT_ARM_GICD_BASE;
+
+ case SMC_GET_GIC_BASE_GICC:
+ return PLAT_ARM_GICC_BASE;
+
+ case SMC_GET_GIC_BASE_GICR:
+ return PLAT_ARM_GICR_BASE;
+
+ default:
+ NOTICE("%s(0x%x) unknown reg\n", __func__, reg);
+ return SMC_UNK;
+ }
+}
+
+static uintptr_t trusty_generic_platform_smc(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ switch (smc_fid) {
+ case SMC_FC_DEBUG_PUTC:
+ trusty_dputc(x1, is_caller_secure(flags));
+ SMC_RET1(handle, 0);
+
+ case SMC_FC_GET_REG_BASE:
+ case SMC_FC64_GET_REG_BASE:
+ SMC_RET1(handle, trusty_get_reg_base(x1));
+
+ default:
+ NOTICE("%s(0x%x, 0x%lx) unknown smc\n", __func__, smc_fid, x1);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
+
+/* Define a SPD runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ trusty_fast,
+
+ SMC_ENTITY_PLATFORM_MONITOR,
+ SMC_ENTITY_PLATFORM_MONITOR,
+ SMC_TYPE_FAST,
+ NULL,
+ trusty_generic_platform_smc
+);
+
diff --git a/services/spd/trusty/generic-arm64-smcall.h b/services/spd/trusty/generic-arm64-smcall.h
new file mode 100644
index 0000000..ac03469
--- /dev/null
+++ b/services/spd/trusty/generic-arm64-smcall.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "smcall.h"
+
+#define SMC_ENTITY_PLATFORM_MONITOR 61
+
+/*
+ * SMC calls implemented by EL3 monitor
+ */
+
+/*
+ * Write character in r1 to debug console
+ */
+#define SMC_FC_DEBUG_PUTC SMC_FASTCALL_NR(SMC_ENTITY_PLATFORM_MONITOR, 0x0)
+
+/*
+ * Get register base address
+ * r1: SMC_GET_GIC_BASE_GICD or SMC_GET_GIC_BASE_GICC
+ */
+#define SMC_GET_GIC_BASE_GICD 0
+#define SMC_GET_GIC_BASE_GICC 1
+#define SMC_GET_GIC_BASE_GICR 2
+#define SMC_FC_GET_REG_BASE SMC_FASTCALL_NR(SMC_ENTITY_PLATFORM_MONITOR, 0x1)
+#define SMC_FC64_GET_REG_BASE SMC_FASTCALL64_NR(SMC_ENTITY_PLATFORM_MONITOR, 0x1)
diff --git a/services/spd/trusty/sm_err.h b/services/spd/trusty/sm_err.h
new file mode 100644
index 0000000..80a8748
--- /dev/null
+++ b/services/spd/trusty/sm_err.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SM_ERR_H
+#define SM_ERR_H
+
+/* Errors from the secure monitor */
+#define SM_ERR_UNDEFINED_SMC 0xFFFFFFFF /* Unknown SMC (defined by ARM DEN 0028A(0.9.0) */
+#define SM_ERR_INVALID_PARAMETERS -2
+#define SM_ERR_INTERRUPTED -3 /* Got interrupted. Call back with restart SMC */
+#define SM_ERR_UNEXPECTED_RESTART -4 /* Got an restart SMC when we didn't expect it */
+#define SM_ERR_BUSY -5 /* Temporarily busy. Call back with original args */
+#define SM_ERR_INTERLEAVED_SMC -6 /* Got a trusted_service SMC when a restart SMC is required */
+#define SM_ERR_INTERNAL_FAILURE -7 /* Unknown error */
+#define SM_ERR_NOT_SUPPORTED -8
+#define SM_ERR_NOT_ALLOWED -9 /* SMC call not allowed */
+#define SM_ERR_END_OF_INPUT -10
+
+#endif /* SM_ERR_H */
diff --git a/services/spd/trusty/smcall.h b/services/spd/trusty/smcall.h
new file mode 100644
index 0000000..c66f7db
--- /dev/null
+++ b/services/spd/trusty/smcall.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SMCALL_H
+#define SMCALL_H
+
+#define SMC_NUM_ENTITIES 64U
+#define SMC_NUM_ARGS 4U
+#define SMC_NUM_PARAMS (SMC_NUM_ARGS - 1U)
+
+#define SMC_IS_FASTCALL(smc_nr) ((smc_nr) & 0x80000000U)
+#define SMC_IS_SMC64(smc_nr) ((smc_nr) & 0x40000000U)
+#define SMC_ENTITY(smc_nr) (((smc_nr) & 0x3F000000U) >> 24U)
+#define SMC_FUNCTION(smc_nr) ((smc_nr) & 0x0000FFFFU)
+
+#define SMC_NR(entity, fn, fastcall, smc64) \
+ (((((uint32_t)(fastcall)) & 0x1U) << 31U) | \
+ (((smc64) & 0x1U) << 30U) | \
+ (((entity) & 0x3FU) << 24U) | \
+ ((fn) & 0xFFFFU))
+
+#define SMC_FASTCALL_NR(entity, fn) SMC_NR((entity), (fn), 1U, 0U)
+#define SMC_FASTCALL64_NR(entity, fn) SMC_NR((entity), (fn), 1U, 1U)
+#define SMC_YIELDCALL_NR(entity, fn) SMC_NR((entity), (fn), 0U, 0U)
+#define SMC_YIELDCALL64_NR(entity, fn) SMC_NR((entity), (fn), 0U, 1U)
+
+#define SMC_ENTITY_ARCH 0U /* ARM Architecture calls */
+#define SMC_ENTITY_CPU 1U /* CPU Service calls */
+#define SMC_ENTITY_SIP 2U /* SIP Service calls */
+#define SMC_ENTITY_OEM 3U /* OEM Service calls */
+#define SMC_ENTITY_STD 4U /* Standard Service calls */
+#define SMC_ENTITY_RESERVED 5U /* Reserved for future use */
+#define SMC_ENTITY_TRUSTED_APP 48U /* Trusted Application calls */
+#define SMC_ENTITY_TRUSTED_OS 50U /* Trusted OS calls */
+#define SMC_ENTITY_LOGGING 51U /* Used for secure -> nonsecure logging */
+#define SMC_ENTITY_SECURE_MONITOR 60U /* Trusted OS calls internal to secure monitor */
+
+/* FC = Fast call, YC = Yielding call */
+#define SMC_YC_RESTART_LAST SMC_YIELDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0U)
+#define SMC_YC_NOP SMC_YIELDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1U)
+
+/*
+ * Return from secure os to non-secure os with return value in r1
+ */
+#define SMC_YC_NS_RETURN SMC_YIELDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0U)
+
+#define SMC_FC_RESERVED SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0U)
+#define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1U)
+#define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2U)
+#define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3U)
+#define SMC_FC_FIQ_ENTER SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 4U)
+
+#define SMC_FC64_SET_FIQ_HANDLER SMC_FASTCALL64_NR(SMC_ENTITY_SECURE_MONITOR, 5U)
+#define SMC_FC64_GET_FIQ_REGS SMC_FASTCALL64_NR (SMC_ENTITY_SECURE_MONITOR, 6U)
+
+#define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 7U)
+#define SMC_FC_CPU_RESUME SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8U)
+
+#define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9U)
+#define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10U)
+
+/* Trusted OS entity calls */
+#define SMC_YC_VIRTIO_GET_DESCR SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20U)
+#define SMC_YC_VIRTIO_START SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21U)
+#define SMC_YC_VIRTIO_STOP SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22U)
+
+#define SMC_YC_VDEV_RESET SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23U)
+#define SMC_YC_VDEV_KICK_VQ SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24U)
+#define SMC_YC_SET_ROT_PARAMS SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 65535U)
+
+/*
+ * Standard Trusted OS Function IDs that fall under Trusted OS call range
+ * according to SMC calling convention
+ */
+#define SMC_FC64_GET_UUID SMC_FASTCALL64_NR(63U, 0xFF01U) /* Implementation UID */
+#define SMC_FC_GET_UUID SMC_FASTCALL_NR(63U, 0xFF01U) /* Implementation.UID */
+
+#endif /* SMCALL_H */
diff --git a/services/spd/trusty/trusty.c b/services/spd/trusty/trusty.c
new file mode 100644
index 0000000..7daebcd
--- /dev/null
+++ b/services/spd/trusty/trusty.c
@@ -0,0 +1,541 @@
+/*
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <inttypes.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/smccc.h>
+#include <plat/common/platform.h>
+#include <tools_share/uuid.h>
+
+#include "sm_err.h"
+#include "smcall.h"
+
+/* Trusty UID: RFC-4122 compliant UUID version 4 */
+DEFINE_SVC_UUID2(trusty_uuid,
+ 0x40ee25f0, 0xa2bc, 0x304c, 0x8c, 0x4c,
+ 0xa1, 0x73, 0xc5, 0x7d, 0x8a, 0xf1);
+
+/* macro to check if Hypervisor is enabled in the HCR_EL2 register */
+#define HYP_ENABLE_FLAG 0x286001U
+
+/* length of Trusty's input parameters (in bytes) */
+#define TRUSTY_PARAMS_LEN_BYTES (4096U * 2)
+
+struct trusty_stack {
+ uint8_t space[PLATFORM_STACK_SIZE] __aligned(16);
+ uint32_t end;
+};
+
+struct trusty_cpu_ctx {
+ cpu_context_t cpu_ctx;
+ void *saved_sp;
+ uint32_t saved_security_state;
+ int32_t fiq_handler_active;
+ uint64_t fiq_handler_pc;
+ uint64_t fiq_handler_cpsr;
+ uint64_t fiq_handler_sp;
+ uint64_t fiq_pc;
+ uint64_t fiq_cpsr;
+ uint64_t fiq_sp_el1;
+ gp_regs_t fiq_gpregs;
+ struct trusty_stack secure_stack;
+};
+
+struct smc_args {
+ uint64_t r0;
+ uint64_t r1;
+ uint64_t r2;
+ uint64_t r3;
+ uint64_t r4;
+ uint64_t r5;
+ uint64_t r6;
+ uint64_t r7;
+};
+
+static struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT];
+
+struct smc_args trusty_init_context_stack(void **sp, void *new_stack);
+struct smc_args trusty_context_switch_helper(void **sp, void *smc_params);
+
+static uint32_t current_vmid;
+
+static struct trusty_cpu_ctx *get_trusty_ctx(void)
+{
+ return &trusty_cpu_ctx[plat_my_core_pos()];
+}
+
+static bool is_hypervisor_mode(void)
+{
+ uint64_t hcr = read_hcr();
+
+ return ((hcr & HYP_ENABLE_FLAG) != 0U) ? true : false;
+}
+
+static struct smc_args trusty_context_switch(uint32_t security_state, uint64_t r0,
+ uint64_t r1, uint64_t r2, uint64_t r3)
+{
+ struct smc_args args, ret_args;
+ struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+ struct trusty_cpu_ctx *ctx_smc;
+
+ assert(ctx->saved_security_state != security_state);
+
+ args.r7 = 0;
+ if (is_hypervisor_mode()) {
+ /* According to the ARM DEN0028A spec, VMID is stored in x7 */
+ ctx_smc = cm_get_context(NON_SECURE);
+ assert(ctx_smc != NULL);
+ args.r7 = SMC_GET_GP(ctx_smc, CTX_GPREG_X7);
+ }
+ /* r4, r5, r6 reserved for future use. */
+ args.r6 = 0;
+ args.r5 = 0;
+ args.r4 = 0;
+ args.r3 = r3;
+ args.r2 = r2;
+ args.r1 = r1;
+ args.r0 = r0;
+
+ /*
+ * To avoid the additional overhead in PSCI flow, skip FP context
+ * saving/restoring in case of CPU suspend and resume, assuming that
+ * when it's needed the PSCI caller has preserved FP context before
+ * going here.
+ */
+ if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
+ fpregs_context_save(get_fpregs_ctx(cm_get_context(security_state)));
+ cm_el1_sysregs_context_save(security_state);
+
+ ctx->saved_security_state = security_state;
+ ret_args = trusty_context_switch_helper(&ctx->saved_sp, &args);
+
+ assert(ctx->saved_security_state == ((security_state == 0U) ? 1U : 0U));
+
+ cm_el1_sysregs_context_restore(security_state);
+ if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
+ fpregs_context_restore(get_fpregs_ctx(cm_get_context(security_state)));
+
+ cm_set_next_eret_context(security_state);
+
+ return ret_args;
+}
+
+static uint64_t trusty_fiq_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ struct smc_args ret;
+ struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+ assert(!is_caller_secure(flags));
+
+ ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_ENTER, 0, 0, 0);
+ if (ret.r0 != 0U) {
+ SMC_RET0(handle);
+ }
+
+ if (ctx->fiq_handler_active != 0) {
+ INFO("%s: fiq handler already active\n", __func__);
+ SMC_RET0(handle);
+ }
+
+ ctx->fiq_handler_active = 1;
+ (void)memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
+ ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
+ ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
+ ctx->fiq_sp_el1 = read_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1);
+
+ write_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
+ cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, (uint32_t)ctx->fiq_handler_cpsr);
+
+ SMC_RET0(handle);
+}
+
+static uint64_t trusty_set_fiq_handler(void *handle, uint64_t cpu,
+ uint64_t handler, uint64_t stack)
+{
+ struct trusty_cpu_ctx *ctx;
+
+ if (cpu >= (uint64_t)PLATFORM_CORE_COUNT) {
+ ERROR("%s: cpu %" PRId64 " >= %d\n", __func__, cpu, PLATFORM_CORE_COUNT);
+ return (uint64_t)SM_ERR_INVALID_PARAMETERS;
+ }
+
+ ctx = &trusty_cpu_ctx[cpu];
+ ctx->fiq_handler_pc = handler;
+ ctx->fiq_handler_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
+ ctx->fiq_handler_sp = stack;
+
+ SMC_RET1(handle, 0);
+}
+
+static uint64_t trusty_get_fiq_regs(void *handle)
+{
+ struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+ uint64_t sp_el0 = read_ctx_reg(&ctx->fiq_gpregs, CTX_GPREG_SP_EL0);
+
+ SMC_RET4(handle, ctx->fiq_pc, ctx->fiq_cpsr, sp_el0, ctx->fiq_sp_el1);
+}
+
+static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t x3)
+{
+ struct smc_args ret;
+ struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+ if (ctx->fiq_handler_active == 0) {
+ NOTICE("%s: fiq handler not active\n", __func__);
+ SMC_RET1(handle, (uint64_t)SM_ERR_INVALID_PARAMETERS);
+ }
+
+ ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_EXIT, 0, 0, 0);
+ if (ret.r0 != 1U) {
+ INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %" PRId64 "\n",
+ __func__, handle, ret.r0);
+ }
+
+ /*
+ * Restore register state to state recorded on fiq entry.
+ *
+ * x0, sp_el1, pc and cpsr need to be restored because el1 cannot
+ * restore them.
+ *
+ * x1-x4 and x8-x17 need to be restored here because smc_handler64
+ * corrupts them (el1 code also restored them).
+ */
+ (void)memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
+ ctx->fiq_handler_active = 0;
+ write_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
+ cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, (uint32_t)ctx->fiq_cpsr);
+
+ SMC_RET0(handle);
+}
+
+static uintptr_t trusty_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ struct smc_args ret;
+ uint32_t vmid = 0U;
+ entry_point_info_t *ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+
+ /*
+ * Return success for SET_ROT_PARAMS if Trusty is not present, as
+ * Verified Boot is not even supported and returning success here
+ * would not compromise the boot process.
+ */
+ if ((ep_info == NULL) && (smc_fid == SMC_YC_SET_ROT_PARAMS)) {
+ SMC_RET1(handle, 0);
+ } else if (ep_info == NULL) {
+ SMC_RET1(handle, SMC_UNK);
+ } else {
+ ; /* do nothing */
+ }
+
+ if (is_caller_secure(flags)) {
+ if (smc_fid == SMC_YC_NS_RETURN) {
+ ret = trusty_context_switch(SECURE, x1, 0, 0, 0);
+ SMC_RET8(handle, ret.r0, ret.r1, ret.r2, ret.r3,
+ ret.r4, ret.r5, ret.r6, ret.r7);
+ }
+ INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \
+ cpu %d, unknown smc\n",
+ __func__, smc_fid, x1, x2, x3, x4, cookie, handle, flags,
+ plat_my_core_pos());
+ SMC_RET1(handle, SMC_UNK);
+ } else {
+ switch (smc_fid) {
+ case SMC_FC64_GET_UUID:
+ case SMC_FC_GET_UUID:
+ /* provide the UUID for the service to the client */
+ SMC_UUID_RET(handle, trusty_uuid);
+ break;
+ case SMC_FC64_SET_FIQ_HANDLER:
+ return trusty_set_fiq_handler(handle, x1, x2, x3);
+ case SMC_FC64_GET_FIQ_REGS:
+ return trusty_get_fiq_regs(handle);
+ case SMC_FC_FIQ_EXIT:
+ return trusty_fiq_exit(handle, x1, x2, x3);
+ default:
+ /* Not all OENs greater than SMC_ENTITY_SECURE_MONITOR are supported */
+ if (SMC_ENTITY(smc_fid) > SMC_ENTITY_SECURE_MONITOR) {
+ VERBOSE("%s: unsupported SMC FID (0x%x)\n", __func__, smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ if (is_hypervisor_mode())
+ vmid = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+ if ((current_vmid != 0) && (current_vmid != vmid)) {
+ /* This message will cause SMC mechanism
+ * abnormal in multi-guest environment.
+ * Change it to WARN in case you need it.
+ */
+ VERBOSE("Previous SMC not finished.\n");
+ SMC_RET1(handle, SM_ERR_BUSY);
+ }
+ current_vmid = vmid;
+ ret = trusty_context_switch(NON_SECURE, smc_fid, x1,
+ x2, x3);
+ current_vmid = 0;
+ SMC_RET1(handle, ret.r0);
+ }
+ }
+}
+
+static int32_t trusty_init(void)
+{
+ entry_point_info_t *ep_info;
+ struct smc_args zero_args = {0};
+ struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+ uint32_t cpu = plat_my_core_pos();
+ uint64_t reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx),
+ CTX_SPSR_EL3));
+
+ /*
+ * Get information about the Trusty image. Its absence is a critical
+ * failure.
+ */
+ ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ assert(ep_info != NULL);
+
+ fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE)));
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ cm_set_context(&ctx->cpu_ctx, SECURE);
+ cm_init_my_context(ep_info);
+
+ /*
+ * Adjust secondary cpu entry point for 32 bit images to the
+ * end of exception vectors
+ */
+ if ((cpu != 0U) && (reg_width == MODE_RW_32)) {
+ INFO("trusty: cpu %d, adjust entry point to 0x%lx\n",
+ cpu, ep_info->pc + (1U << 5));
+ cm_set_elr_el3(SECURE, ep_info->pc + (1U << 5));
+ }
+
+ cm_el1_sysregs_context_restore(SECURE);
+ fpregs_context_restore(get_fpregs_ctx(cm_get_context(SECURE)));
+ cm_set_next_eret_context(SECURE);
+
+ ctx->saved_security_state = ~0U; /* initial saved state is invalid */
+ (void)trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack.end);
+
+ (void)trusty_context_switch_helper(&ctx->saved_sp, &zero_args);
+
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE)));
+ cm_set_next_eret_context(NON_SECURE);
+
+ return 1;
+}
+
+static void trusty_cpu_suspend(uint32_t off)
+{
+ struct smc_args ret;
+
+ ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_SUSPEND, off, 0, 0);
+ if (ret.r0 != 0U) {
+ INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %" PRId64 "\n",
+ __func__, plat_my_core_pos(), ret.r0);
+ }
+}
+
+static void trusty_cpu_resume(uint32_t on)
+{
+ struct smc_args ret;
+
+ ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_RESUME, on, 0, 0);
+ if (ret.r0 != 0U) {
+ INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %" PRId64 "\n",
+ __func__, plat_my_core_pos(), ret.r0);
+ }
+}
+
+static int32_t trusty_cpu_off_handler(u_register_t max_off_lvl)
+{
+ trusty_cpu_suspend(max_off_lvl);
+
+ return 0;
+}
+
+static void trusty_cpu_on_finish_handler(u_register_t max_off_lvl)
+{
+ struct trusty_cpu_ctx *ctx = get_trusty_ctx();
+
+ if (ctx->saved_sp == NULL) {
+ (void)trusty_init();
+ } else {
+ trusty_cpu_resume(max_off_lvl);
+ }
+}
+
+static void trusty_cpu_suspend_handler(u_register_t max_off_lvl)
+{
+ trusty_cpu_suspend(max_off_lvl);
+}
+
+static void trusty_cpu_suspend_finish_handler(u_register_t max_off_lvl)
+{
+ trusty_cpu_resume(max_off_lvl);
+}
+
+static const spd_pm_ops_t trusty_pm = {
+ .svc_off = trusty_cpu_off_handler,
+ .svc_suspend = trusty_cpu_suspend_handler,
+ .svc_on_finish = trusty_cpu_on_finish_handler,
+ .svc_suspend_finish = trusty_cpu_suspend_finish_handler,
+};
+
+void plat_trusty_set_boot_args(aapcs64_params_t *args);
+
+#if !defined(TSP_SEC_MEM_SIZE) && defined(BL32_MEM_SIZE)
+#define TSP_SEC_MEM_SIZE BL32_MEM_SIZE
+#endif
+
+#ifdef TSP_SEC_MEM_SIZE
+#pragma weak plat_trusty_set_boot_args
+void plat_trusty_set_boot_args(aapcs64_params_t *args)
+{
+ args->arg0 = TSP_SEC_MEM_SIZE;
+}
+#endif
+
+static int32_t trusty_setup(void)
+{
+ entry_point_info_t *ep_info;
+ uint32_t instr;
+ uint32_t flags;
+ int32_t ret;
+ bool aarch32 = false;
+
+ /* Get trusty's entry point info */
+ ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (ep_info == NULL) {
+ VERBOSE("Trusty image missing.\n");
+ return -1;
+ }
+
+ /* memmap first page of trusty's code memory before peeking */
+ ret = mmap_add_dynamic_region(ep_info->pc, /* PA */
+ ep_info->pc, /* VA */
+ PAGE_SIZE, /* size */
+ MT_SECURE | MT_RW_DATA); /* attrs */
+ assert(ret == 0);
+
+ /* peek into trusty's code to see if we have a 32-bit or 64-bit image */
+ instr = *(uint32_t *)ep_info->pc;
+
+ if (instr >> 24 == 0xeaU) {
+ INFO("trusty: Found 32 bit image\n");
+ aarch32 = true;
+ } else if (instr >> 8 == 0xd53810U || instr >> 16 == 0x9400U) {
+ INFO("trusty: Found 64 bit image\n");
+ } else {
+ ERROR("trusty: Found unknown image, 0x%x\n", instr);
+ return -1;
+ }
+
+ /* unmap trusty's memory page */
+ (void)mmap_remove_dynamic_region(ep_info->pc, PAGE_SIZE);
+
+ SET_PARAM_HEAD(ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
+ if (!aarch32)
+ ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+ else
+ ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+ SPSR_E_LITTLE,
+ DAIF_FIQ_BIT |
+ DAIF_IRQ_BIT |
+ DAIF_ABT_BIT);
+ (void)memset(&ep_info->args, 0, sizeof(ep_info->args));
+ plat_trusty_set_boot_args(&ep_info->args);
+
+ /* register init handler */
+ bl31_register_bl32_init(trusty_init);
+
+ /* register power management hooks */
+ psci_register_spd_pm_hook(&trusty_pm);
+
+ /* register interrupt handler */
+ flags = 0;
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+ trusty_fiq_handler,
+ flags);
+ if (ret != 0) {
+ VERBOSE("trusty: failed to register fiq handler, ret = %d\n", ret);
+ }
+
+ if (aarch32) {
+ entry_point_info_t *ns_ep_info;
+ uint32_t spsr;
+
+ ns_ep_info = bl31_plat_get_next_image_ep_info(NON_SECURE);
+ if (ns_ep_info == NULL) {
+ NOTICE("Trusty: non-secure image missing.\n");
+ return -1;
+ }
+ spsr = ns_ep_info->spsr;
+ if (GET_RW(spsr) == MODE_RW_64 && GET_EL(spsr) == MODE_EL2) {
+ spsr &= ~(MODE_EL_MASK << MODE_EL_SHIFT);
+ spsr |= MODE_EL1 << MODE_EL_SHIFT;
+ }
+ if (GET_RW(spsr) == MODE_RW_32 && GET_M32(spsr) == MODE32_hyp) {
+ spsr &= ~(MODE32_MASK << MODE32_SHIFT);
+ spsr |= MODE32_svc << MODE32_SHIFT;
+ }
+ if (spsr != ns_ep_info->spsr) {
+ NOTICE("Trusty: Switch bl33 from EL2 to EL1 (spsr 0x%x -> 0x%x)\n",
+ ns_ep_info->spsr, spsr);
+ ns_ep_info->spsr = spsr;
+ }
+ }
+
+ return 0;
+}
+
+/* Define a SPD runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ trusty_fast,
+
+ OEN_TOS_START,
+ OEN_TOS_END,
+ SMC_TYPE_FAST,
+ trusty_setup,
+ trusty_smc_handler
+);
+
+/* Define a SPD runtime service descriptor for yielding SMC calls */
+DECLARE_RT_SVC(
+ trusty_std,
+
+ OEN_TAP_START,
+ SMC_ENTITY_SECURE_MONITOR,
+ SMC_TYPE_YIELD,
+ NULL,
+ trusty_smc_handler
+);
diff --git a/services/spd/trusty/trusty.mk b/services/spd/trusty/trusty.mk
new file mode 100644
index 0000000..43b80bb
--- /dev/null
+++ b/services/spd/trusty/trusty.mk
@@ -0,0 +1,18 @@
+#
+# Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+SPD_INCLUDES :=
+
+SPD_SOURCES := services/spd/trusty/trusty.c \
+ services/spd/trusty/trusty_helpers.S
+
+ifeq (${TRUSTY_SPD_WITH_GENERIC_SERVICES},1)
+SPD_SOURCES += services/spd/trusty/generic-arm64-smcall.c
+endif
+
+NEED_BL32 := yes
+
+CTX_INCLUDE_FPREGS := 1
diff --git a/services/spd/trusty/trusty_helpers.S b/services/spd/trusty/trusty_helpers.S
new file mode 100644
index 0000000..da5cb57
--- /dev/null
+++ b/services/spd/trusty/trusty_helpers.S
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+.macro push ra, rb, sp=sp
+ stp \ra, \rb, [\sp,#-16]!
+.endm
+
+.macro pop ra, rb, sp=sp
+ ldp \ra, \rb, [\sp], #16
+.endm
+
+ .global trusty_context_switch_helper
+func trusty_context_switch_helper
+ push x8, xzr
+ push x19, x20
+ push x21, x22
+ push x23, x24
+ push x25, x26
+ push x27, x28
+ push x29, x30
+
+ mov x9, sp
+ ldr x10, [x0]
+ mov sp, x10
+ str x9, [x0]
+
+ pop x29, x30
+ pop x27, x28
+ pop x25, x26
+ pop x23, x24
+ pop x21, x22
+ pop x19, x20
+ pop x8, xzr
+
+ ldr x2, [x1]
+ ldr x3, [x1, #0x08]
+ ldr x4, [x1, #0x10]
+ ldr x5, [x1, #0x18]
+ ldr x6, [x1, #0x20]
+ ldr x7, [x1, #0x28]
+ ldr x10, [x1, #0x30]
+ ldr x11, [x1, #0x38]
+
+ stp x2, x3, [x8]
+ stp x4, x5, [x8, #16]
+ stp x6, x7, [x8, #32]
+ stp x10, x11, [x8, #48]
+
+ ret
+endfunc trusty_context_switch_helper
+
+ .global trusty_init_context_stack
+func trusty_init_context_stack
+ push x8, xzr, x1
+ push xzr, xzr, x1
+ push xzr, xzr, x1
+ push xzr, xzr, x1
+ push xzr, xzr, x1
+ push xzr, xzr, x1
+ adr x9, el3_exit
+ push xzr, x9, x1
+ str x1, [x0]
+ ret
+endfunc trusty_init_context_stack
diff --git a/services/spd/tspd/tspd.mk b/services/spd/tspd/tspd.mk
new file mode 100644
index 0000000..bda8338
--- /dev/null
+++ b/services/spd/tspd/tspd.mk
@@ -0,0 +1,46 @@
+#
+# Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TSPD_DIR := services/spd/tspd
+
+ifeq (${ERROR_DEPRECATED},0)
+SPD_INCLUDES := -Iinclude/bl32/tsp
+endif
+
+SPD_SOURCES := services/spd/tspd/tspd_common.c \
+ services/spd/tspd/tspd_helpers.S \
+ services/spd/tspd/tspd_main.c \
+ services/spd/tspd/tspd_pm.c
+
+# This dispatcher is paired with a Test Secure Payload source and we intend to
+# build the Test Secure Payload along with this dispatcher.
+#
+# In cases where an associated Secure Payload lies outside this build
+# system/source tree, the the dispatcher Makefile can either invoke an external
+# build command or assume it pre-built
+
+BL32_ROOT := bl32/tsp
+
+# Include SP's Makefile. The assumption is that the TSP's build system is
+# compatible with that of Trusted Firmware, and it'll add and populate necessary
+# build targets and variables
+include ${BL32_ROOT}/tsp.mk
+
+# Let the top-level Makefile know that we intend to build the SP from source
+NEED_BL32 := yes
+
+# Flag used to enable routing of non-secure interrupts to EL3 when they are
+# generated while the code is executing in S-EL1/0.
+TSP_NS_INTR_ASYNC_PREEMPT := 0
+
+ifeq ($(EL3_EXCEPTION_HANDLING),1)
+ifeq ($(TSP_NS_INTR_ASYNC_PREEMPT),0)
+$(error When EL3_EXCEPTION_HANDLING=1, TSP_NS_INTR_ASYNC_PREEMPT must also be 1)
+endif
+endif
+
+$(eval $(call assert_boolean,TSP_NS_INTR_ASYNC_PREEMPT))
+$(eval $(call add_define,TSP_NS_INTR_ASYNC_PREEMPT))
diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c
new file mode 100644
index 0000000..063fd01
--- /dev/null
+++ b/services/spd/tspd/tspd_common.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <bl32/tsp/tsp.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/utils.h>
+
+#include "tspd_private.h"
+
+/*******************************************************************************
+ * Given a secure payload entrypoint info pointer, entry point PC, register
+ * width, cpu id & pointer to a context data structure, this function will
+ * initialize tsp context and entry point info for the secure payload
+ ******************************************************************************/
+void tspd_init_tsp_ep_state(struct entry_point_info *tsp_entry_point,
+ uint32_t rw,
+ uint64_t pc,
+ tsp_context_t *tsp_ctx)
+{
+ uint32_t ep_attr;
+
+ /* Passing a NULL context is a critical programming error */
+ assert(tsp_ctx);
+ assert(tsp_entry_point);
+ assert(pc);
+
+ /*
+ * We support AArch64 TSP for now.
+ * TODO: Add support for AArch32 TSP
+ */
+ assert(rw == TSP_AARCH64);
+
+ /* Associate this context with the cpu specified */
+ tsp_ctx->mpidr = read_mpidr_el1();
+ tsp_ctx->state = 0;
+ set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
+ clr_yield_smc_active_flag(tsp_ctx->state);
+
+ cm_set_context(&tsp_ctx->cpu_ctx, SECURE);
+
+ /* initialise an entrypoint to set up the CPU context */
+ ep_attr = SECURE | EP_ST_ENABLE;
+ if (read_sctlr_el3() & SCTLR_EE_BIT)
+ ep_attr |= EP_EE_BIG;
+ SET_PARAM_HEAD(tsp_entry_point, PARAM_EP, VERSION_1, ep_attr);
+
+ tsp_entry_point->pc = pc;
+ tsp_entry_point->spsr = SPSR_64(MODE_EL1,
+ MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+ zeromem(&tsp_entry_point->args, sizeof(tsp_entry_point->args));
+}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and:
+ * 1. Applies the S-EL1 system register context from tsp_ctx->cpu_ctx.
+ * 2. Saves the current C runtime state (callee saved registers) on the stack
+ * frame and saves a reference to this state.
+ * 3. Calls el3_exit() so that the EL3 system and general purpose registers
+ * from the tsp_ctx->cpu_ctx are used to enter the secure payload image.
+ ******************************************************************************/
+uint64_t tspd_synchronous_sp_entry(tsp_context_t *tsp_ctx)
+{
+ uint64_t rc;
+
+ assert(tsp_ctx != NULL);
+ assert(tsp_ctx->c_rt_ctx == 0);
+
+ /* Apply the Secure EL1 system register context and switch to it */
+ assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ rc = tspd_enter_sp(&tsp_ctx->c_rt_ctx);
+#if ENABLE_ASSERTIONS
+ tsp_ctx->c_rt_ctx = 0;
+#endif
+
+ return rc;
+}
+
+
+/*******************************************************************************
+ * This function takes an SP context pointer and:
+ * 1. Saves the S-EL1 system register context tp tsp_ctx->cpu_ctx.
+ * 2. Restores the current C runtime state (callee saved registers) from the
+ * stack frame using the reference to this state saved in tspd_enter_sp().
+ * 3. It does not need to save any general purpose or EL3 system register state
+ * as the generic smc entry routine should have saved those.
+ ******************************************************************************/
+void tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret)
+{
+ assert(tsp_ctx != NULL);
+ /* Save the Secure EL1 system register context */
+ assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
+ cm_el1_sysregs_context_save(SECURE);
+
+ assert(tsp_ctx->c_rt_ctx != 0);
+ tspd_exit_sp(tsp_ctx->c_rt_ctx, ret);
+
+ /* Should never reach here */
+ assert(0);
+}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and abort any preempted SMC
+ * request.
+ * Return 1 if there was a preempted SMC request, 0 otherwise.
+ ******************************************************************************/
+int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx)
+{
+ if (!get_yield_smc_active_flag(tsp_ctx->state))
+ return 0;
+
+ /* Abort any preempted SMC request */
+ clr_yield_smc_active_flag(tsp_ctx->state);
+
+ /*
+ * Arrange for an entry into the test secure payload. It will
+ * be returned via TSP_ABORT_DONE case in tspd_smc_handler.
+ */
+ cm_set_elr_el3(SECURE,
+ (uint64_t) &tsp_vectors->abort_yield_smc_entry);
+ uint64_t rc = tspd_synchronous_sp_entry(tsp_ctx);
+
+ if (rc != 0)
+ panic();
+
+ return 1;
+}
+
diff --git a/services/spd/tspd/tspd_helpers.S b/services/spd/tspd/tspd_helpers.S
new file mode 100644
index 0000000..f15d66b
--- /dev/null
+++ b/services/spd/tspd/tspd_helpers.S
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "tspd_private.h"
+
+ .global tspd_enter_sp
+ /* ---------------------------------------------
+ * This function is called with SP_EL0 as stack.
+ * Here we stash our EL3 callee-saved registers
+ * on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where
+ * the address of the C runtime context is to be
+ * saved.
+ * ---------------------------------------------
+ */
+func tspd_enter_sp
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #TSPD_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #TSPD_C_RT_CTX_X19]
+ stp x21, x22, [sp, #TSPD_C_RT_CTX_X21]
+ stp x23, x24, [sp, #TSPD_C_RT_CTX_X23]
+ stp x25, x26, [sp, #TSPD_C_RT_CTX_X25]
+ stp x27, x28, [sp, #TSPD_C_RT_CTX_X27]
+ stp x29, x30, [sp, #TSPD_C_RT_CTX_X29]
+
+ /* ---------------------------------------------
+ * Everything is setup now. el3_exit() will
+ * use the secure context to restore to the
+ * general purpose and EL3 system registers to
+ * ERET into the secure payload.
+ * ---------------------------------------------
+ */
+ b el3_exit
+endfunc tspd_enter_sp
+
+ /* ---------------------------------------------
+ * This function is called 'x0' pointing to a C
+ * runtime context saved in tspd_enter_sp(). It
+ * restores the saved registers and jumps to
+ * that runtime with 'x0' as the new sp. This
+ * destroys the C runtime context that had been
+ * built on the stack below the saved context by
+ * the caller. Later the second parameter 'x1'
+ * is passed as return value to the caller
+ * ---------------------------------------------
+ */
+ .global tspd_exit_sp
+func tspd_exit_sp
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(TSPD_C_RT_CTX_X19 - TSPD_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(TSPD_C_RT_CTX_X21 - TSPD_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(TSPD_C_RT_CTX_X23 - TSPD_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(TSPD_C_RT_CTX_X25 - TSPD_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(TSPD_C_RT_CTX_X27 - TSPD_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(TSPD_C_RT_CTX_X29 - TSPD_C_RT_CTX_SIZE)]
+
+ /* ---------------------------------------------
+ * This should take us back to the instruction
+ * after the call to the last tspd_enter_sp().
+ * Place the second parameter to x0 so that the
+ * caller will see it as a return value from the
+ * original entry call
+ * ---------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc tspd_exit_sp
diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c
new file mode 100644
index 0000000..6cb4992
--- /dev/null
+++ b/services/spd/tspd/tspd_main.c
@@ -0,0 +1,819 @@
+/*
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/*******************************************************************************
+ * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a
+ * plug-in component to the Secure Monitor, registered as a runtime service. The
+ * SPD is expected to be a functional extension of the Secure Payload (SP) that
+ * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting
+ * the Trusted OS/Applications range to the dispatcher. The SPD will either
+ * handle the request locally or delegate it to the Secure Payload. It is also
+ * responsible for initialising and maintaining communication with the SP.
+ ******************************************************************************/
+#include <assert.h>
+#include <errno.h>
+#include <stddef.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <bl31/ehf.h>
+#include <bl32/tsp/tsp.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <plat/common/platform.h>
+#include <tools_share/uuid.h>
+
+#include "tspd_private.h"
+
+/*******************************************************************************
+ * Address of the entrypoint vector table in the Secure Payload. It is
+ * initialised once on the primary core after a cold boot.
+ ******************************************************************************/
+tsp_vectors_t *tsp_vectors;
+
+/*******************************************************************************
+ * Array to keep track of per-cpu Secure Payload state
+ ******************************************************************************/
+tsp_context_t tspd_sp_context[TSPD_CORE_COUNT];
+
+
+/* TSP UID */
+DEFINE_SVC_UUID2(tsp_uuid,
+ 0xa056305b, 0x9132, 0x7b42, 0x98, 0x11,
+ 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa);
+
+int32_t tspd_init(void);
+
+/*
+ * This helper function handles Secure EL1 preemption. The preemption could be
+ * due Non Secure interrupts or EL3 interrupts. In both the cases we context
+ * switch to the normal world and in case of EL3 interrupts, it will again be
+ * routed to EL3 which will get handled at the exception vectors.
+ */
+uint64_t tspd_handle_sp_preemption(void *handle)
+{
+ cpu_context_t *ns_cpu_context;
+
+ assert(handle == cm_get_context(SECURE));
+ cm_el1_sysregs_context_save(SECURE);
+ /* Get a reference to the non-secure context */
+ ns_cpu_context = cm_get_context(NON_SECURE);
+ assert(ns_cpu_context);
+
+ /*
+ * To allow Secure EL1 interrupt handler to re-enter TSP while TSP
+ * is preempted, the secure system register context which will get
+ * overwritten must be additionally saved. This is currently done
+ * by the TSPD S-EL1 interrupt handler.
+ */
+
+ /*
+ * Restore non-secure state.
+ */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ /*
+ * The TSP was preempted during execution of a Yielding SMC Call.
+ * Return back to the normal world with SMC_PREEMPTED as error
+ * code in x0.
+ */
+ SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
+}
+
+/*******************************************************************************
+ * This function is the handler registered for S-EL1 interrupts by the TSPD. It
+ * validates the interrupt and upon success arranges entry into the TSP at
+ * 'tsp_sel1_intr_entry()' for handling the interrupt.
+ * Typically, interrupts for a specific security state get handled in the same
+ * security execption level if the execution is in the same security state. For
+ * example, if a non-secure interrupt gets fired when CPU is executing in NS-EL2
+ * it gets handled in the non-secure world.
+ * However, interrupts belonging to the opposite security state typically demand
+ * a world(context) switch. This is inline with the security principle which
+ * states a secure interrupt has to be handled in the secure world.
+ * Hence, the TSPD in EL3 expects the context(handle) for a secure interrupt to
+ * be non-secure and vice versa.
+ * However, a race condition between non-secure and secure interrupts can lead to
+ * a scenario where the above assumptions do not hold true. This is demonstrated
+ * below through Note 1.
+ ******************************************************************************/
+static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ uint32_t linear_id;
+ tsp_context_t *tsp_ctx;
+
+ /* Get a reference to this cpu's TSP context */
+ linear_id = plat_my_core_pos();
+ tsp_ctx = &tspd_sp_context[linear_id];
+
+#if TSP_NS_INTR_ASYNC_PREEMPT
+
+ /*
+ * Note 1:
+ * Under the current interrupt routing model, interrupts from other
+ * world are routed to EL3 when TSP_NS_INTR_ASYNC_PREEMPT is enabled.
+ * Consider the following scenario:
+ * 1/ A non-secure payload(like tftf) requests a secure service from
+ * TSP by invoking a yielding SMC call.
+ * 2/ Later, execution jumps to TSP in S-EL1 with the help of TSP
+ * Dispatcher in Secure Monitor(EL3).
+ * 3/ While CPU is executing TSP, a Non-secure interrupt gets fired.
+ * this demands a context switch to the non-secure world through
+ * secure monitor.
+ * 4/ Consequently, TSP in S-EL1 get asynchronously pre-empted and
+ * execution switches to secure monitor(EL3).
+ * 5/ EL3 tries to triage the (Non-secure) interrupt based on the
+ * highest pending interrupt.
+ * 6/ However, while the NS Interrupt was pending, secure timer gets
+ * fired which makes a S-EL1 interrupt to be pending.
+ * 7/ Hence, execution jumps to this companion handler of S-EL1
+ * interrupt (i.e., tspd_sel1_interrupt_handler) even though the TSP
+ * was pre-empted due to non-secure interrupt.
+ * 8/ The above sequence of events explain how TSP was pre-empted by
+ * S-EL1 interrupt indirectly in an asynchronous way.
+ * 9/ Hence, we track the TSP pre-emption by S-EL1 interrupt using a
+ * boolean variable per each core.
+ * 10/ This helps us to indicate that SMC call for TSP service was
+ * pre-empted when execution resumes in non-secure world.
+ */
+
+ /* Check the security state when the exception was generated */
+ if (get_interrupt_src_ss(flags) == NON_SECURE) {
+ /* Sanity check the pointer to this cpu's context */
+ assert(handle == cm_get_context(NON_SECURE));
+
+ /* Save the non-secure context before entering the TSP */
+ cm_el1_sysregs_context_save(NON_SECURE);
+ tsp_ctx->preempted_by_sel1_intr = false;
+ } else {
+ /* Sanity check the pointer to this cpu's context */
+ assert(handle == cm_get_context(SECURE));
+
+ /* Save the secure context before entering the TSP for S-EL1
+ * interrupt handling
+ */
+ cm_el1_sysregs_context_save(SECURE);
+ tsp_ctx->preempted_by_sel1_intr = true;
+ }
+#else
+ /* Check the security state when the exception was generated */
+ assert(get_interrupt_src_ss(flags) == NON_SECURE);
+
+ /* Sanity check the pointer to this cpu's context */
+ assert(handle == cm_get_context(NON_SECURE));
+
+ /* Save the non-secure context before entering the TSP */
+ cm_el1_sysregs_context_save(NON_SECURE);
+#endif
+
+ assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
+
+ /*
+ * Determine if the TSP was previously preempted. Its last known
+ * context has to be preserved in this case.
+ * The TSP should return control to the TSPD after handling this
+ * S-EL1 interrupt. Preserve essential EL3 context to allow entry into
+ * the TSP at the S-EL1 interrupt entry point using the 'cpu_context'
+ * structure. There is no need to save the secure system register
+ * context since the TSP is supposed to preserve it during S-EL1
+ * interrupt handling.
+ */
+ if (get_yield_smc_active_flag(tsp_ctx->state)) {
+ tsp_ctx->saved_spsr_el3 = (uint32_t)SMC_GET_EL3(&tsp_ctx->cpu_ctx,
+ CTX_SPSR_EL3);
+ tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
+ CTX_ELR_EL3);
+#if TSP_NS_INTR_ASYNC_PREEMPT
+ memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE);
+#endif
+ }
+
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry,
+ SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
+
+ cm_set_next_eret_context(SECURE);
+
+ /*
+ * Tell the TSP that it has to handle a S-EL1 interrupt synchronously.
+ * Also the instruction in normal world where the interrupt was
+ * generated is passed for debugging purposes. It is safe to retrieve
+ * this address from ELR_EL3 as the secure context will not take effect
+ * until el3_exit().
+ */
+ SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3());
+}
+
+#if TSP_NS_INTR_ASYNC_PREEMPT
+/*******************************************************************************
+ * This function is the handler registered for Non secure interrupts by the
+ * TSPD. It validates the interrupt and upon success arranges entry into the
+ * normal world for handling the interrupt.
+ ******************************************************************************/
+static uint64_t tspd_ns_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ /* Check the security state when the exception was generated */
+ assert(get_interrupt_src_ss(flags) == SECURE);
+
+ /*
+ * Disable the routing of NS interrupts from secure world to EL3 while
+ * interrupted on this core.
+ */
+ disable_intr_rm_local(INTR_TYPE_NS, SECURE);
+
+ return tspd_handle_sp_preemption(handle);
+}
+#endif
+
+/*******************************************************************************
+ * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type
+ * (aarch32/aarch64) if not already known and initialises the context for entry
+ * into the SP for its initialisation.
+ ******************************************************************************/
+static int32_t tspd_setup(void)
+{
+ entry_point_info_t *tsp_ep_info;
+ uint32_t linear_id;
+
+ linear_id = plat_my_core_pos();
+
+ /*
+ * Get information about the Secure Payload (BL32) image. Its
+ * absence is a critical failure. TODO: Add support to
+ * conditionally include the SPD service
+ */
+ tsp_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (!tsp_ep_info) {
+ WARN("No TSP provided by BL2 boot loader, Booting device"
+ " without TSP initialization. SMC`s destined for TSP"
+ " will return SMC_UNK\n");
+ return 1;
+ }
+
+ /*
+ * If there's no valid entry point for SP, we return a non-zero value
+ * signalling failure initializing the service. We bail out without
+ * registering any handlers
+ */
+ if (!tsp_ep_info->pc)
+ return 1;
+
+ /*
+ * We could inspect the SP image and determine its execution
+ * state i.e whether AArch32 or AArch64. Assuming it's AArch64
+ * for the time being.
+ */
+ tspd_init_tsp_ep_state(tsp_ep_info,
+ TSP_AARCH64,
+ tsp_ep_info->pc,
+ &tspd_sp_context[linear_id]);
+
+#if TSP_INIT_ASYNC
+ bl31_set_next_image_type(SECURE);
+#else
+ /*
+ * All TSPD initialization done. Now register our init function with
+ * BL31 for deferred invocation
+ */
+ bl31_register_bl32_init(&tspd_init);
+#endif
+ return 0;
+}
+
+/*******************************************************************************
+ * This function passes control to the Secure Payload image (BL32) for the first
+ * time on the primary cpu after a cold boot. It assumes that a valid secure
+ * context has already been created by tspd_setup() which can be directly used.
+ * It also assumes that a valid non-secure context has been initialised by PSCI
+ * so it does not need to save and restore any non-secure state. This function
+ * performs a synchronous entry into the Secure payload. The SP passes control
+ * back to this routine through a SMC.
+ ******************************************************************************/
+int32_t tspd_init(void)
+{
+ uint32_t linear_id = plat_my_core_pos();
+ tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
+ entry_point_info_t *tsp_entry_point;
+ uint64_t rc;
+
+ /*
+ * Get information about the Secure Payload (BL32) image. Its
+ * absence is a critical failure.
+ */
+ tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
+ assert(tsp_entry_point);
+
+ cm_init_my_context(tsp_entry_point);
+
+ /*
+ * Arrange for an entry into the test secure payload. It will be
+ * returned via TSP_ENTRY_DONE case
+ */
+ rc = tspd_synchronous_sp_entry(tsp_ctx);
+ assert(rc != 0);
+
+ return rc;
+}
+
+
+/*******************************************************************************
+ * This function is responsible for handling all SMCs in the Trusted OS/App
+ * range from the non-secure state as defined in the SMC Calling Convention
+ * Document. It is also responsible for communicating with the Secure payload
+ * to delegate work and return results back to the non-secure state. Lastly it
+ * will also return any information that the secure payload needs to do the
+ * work assigned to it.
+ ******************************************************************************/
+static uintptr_t tspd_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ cpu_context_t *ns_cpu_context;
+ uint32_t linear_id = plat_my_core_pos(), ns;
+ tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
+ uint64_t rc;
+#if TSP_INIT_ASYNC
+ entry_point_info_t *next_image_info;
+#endif
+
+ /* Determine which security state this SMC originated from */
+ ns = is_caller_non_secure(flags);
+
+ switch (smc_fid) {
+
+ /*
+ * This function ID is used by TSP to indicate that it was
+ * preempted by a normal world IRQ.
+ *
+ */
+ case TSP_PREEMPTED:
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ return tspd_handle_sp_preemption(handle);
+
+ /*
+ * This function ID is used only by the TSP to indicate that it has
+ * finished handling a S-EL1 interrupt or was preempted by a higher
+ * priority pending EL3 interrupt. Execution should resume
+ * in the normal world.
+ */
+ case TSP_HANDLED_S_EL1_INTR:
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ assert(handle == cm_get_context(SECURE));
+
+ /*
+ * Restore the relevant EL3 state which saved to service
+ * this SMC.
+ */
+ if (get_yield_smc_active_flag(tsp_ctx->state)) {
+ SMC_SET_EL3(&tsp_ctx->cpu_ctx,
+ CTX_SPSR_EL3,
+ tsp_ctx->saved_spsr_el3);
+ SMC_SET_EL3(&tsp_ctx->cpu_ctx,
+ CTX_ELR_EL3,
+ tsp_ctx->saved_elr_el3);
+#if TSP_NS_INTR_ASYNC_PREEMPT
+ /*
+ * Need to restore the previously interrupted
+ * secure context.
+ */
+ memcpy(&tsp_ctx->cpu_ctx, &tsp_ctx->sp_ctx,
+ TSPD_SP_CTX_SIZE);
+#endif
+ }
+
+ /* Get a reference to the non-secure context */
+ ns_cpu_context = cm_get_context(NON_SECURE);
+ assert(ns_cpu_context);
+
+ /*
+ * Restore non-secure state. There is no need to save the
+ * secure system register context since the TSP was supposed
+ * to preserve it during S-EL1 interrupt handling.
+ */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ /* Refer to Note 1 in function tspd_sel1_interrupt_handler()*/
+#if TSP_NS_INTR_ASYNC_PREEMPT
+ if (tsp_ctx->preempted_by_sel1_intr) {
+ /* Reset the flag */
+ tsp_ctx->preempted_by_sel1_intr = false;
+
+ SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
+ } else {
+ SMC_RET0((uint64_t) ns_cpu_context);
+ }
+#else
+ SMC_RET0((uint64_t) ns_cpu_context);
+#endif
+
+
+ /*
+ * This function ID is used only by the SP to indicate it has
+ * finished initialising itself after a cold boot
+ */
+ case TSP_ENTRY_DONE:
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ /*
+ * Stash the SP entry points information. This is done
+ * only once on the primary cpu
+ */
+ assert(tsp_vectors == NULL);
+ tsp_vectors = (tsp_vectors_t *) x1;
+
+ if (tsp_vectors) {
+ set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);
+
+ /*
+ * TSP has been successfully initialized. Register power
+ * management hooks with PSCI
+ */
+ psci_register_spd_pm_hook(&tspd_pm);
+
+ /*
+ * Register an interrupt handler for S-EL1 interrupts
+ * when generated during code executing in the
+ * non-secure state.
+ */
+ flags = 0;
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+ tspd_sel1_interrupt_handler,
+ flags);
+ if (rc)
+ panic();
+
+#if TSP_NS_INTR_ASYNC_PREEMPT
+ /*
+ * Register an interrupt handler for NS interrupts when
+ * generated during code executing in secure state are
+ * routed to EL3.
+ */
+ flags = 0;
+ set_interrupt_rm_flag(flags, SECURE);
+
+ rc = register_interrupt_type_handler(INTR_TYPE_NS,
+ tspd_ns_interrupt_handler,
+ flags);
+ if (rc)
+ panic();
+
+ /*
+ * Disable the NS interrupt locally.
+ */
+ disable_intr_rm_local(INTR_TYPE_NS, SECURE);
+#endif
+ }
+
+
+#if TSP_INIT_ASYNC
+ /* Save the Secure EL1 system register context */
+ assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
+ cm_el1_sysregs_context_save(SECURE);
+
+ /* Program EL3 registers to enable entry into the next EL */
+ next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE);
+ assert(next_image_info);
+ assert(NON_SECURE ==
+ GET_SECURITY_STATE(next_image_info->h.attr));
+
+ cm_init_my_context(next_image_info);
+ cm_prepare_el3_exit(NON_SECURE);
+ SMC_RET0(cm_get_context(NON_SECURE));
+#else
+ /*
+ * SP reports completion. The SPD must have initiated
+ * the original request through a synchronous entry
+ * into the SP. Jump back to the original C runtime
+ * context.
+ */
+ tspd_synchronous_sp_exit(tsp_ctx, x1);
+ break;
+#endif
+ /*
+ * This function ID is used only by the SP to indicate it has finished
+ * aborting a preempted Yielding SMC Call.
+ */
+ case TSP_ABORT_DONE:
+
+ /*
+ * These function IDs are used only by the SP to indicate it has
+ * finished:
+ * 1. turning itself on in response to an earlier psci
+ * cpu_on request
+ * 2. resuming itself after an earlier psci cpu_suspend
+ * request.
+ */
+ case TSP_ON_DONE:
+ case TSP_RESUME_DONE:
+
+ /*
+ * These function IDs are used only by the SP to indicate it has
+ * finished:
+ * 1. suspending itself after an earlier psci cpu_suspend
+ * request.
+ * 2. turning itself off in response to an earlier psci
+ * cpu_off request.
+ */
+ case TSP_OFF_DONE:
+ case TSP_SUSPEND_DONE:
+ case TSP_SYSTEM_OFF_DONE:
+ case TSP_SYSTEM_RESET_DONE:
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ /*
+ * SP reports completion. The SPD must have initiated the
+ * original request through a synchronous entry into the SP.
+ * Jump back to the original C runtime context, and pass x1 as
+ * return value to the caller
+ */
+ tspd_synchronous_sp_exit(tsp_ctx, x1);
+ break;
+
+ /*
+ * Request from non-secure client to perform an
+ * arithmetic operation or response from secure
+ * payload to an earlier request.
+ */
+ case TSP_FAST_FID(TSP_ADD):
+ case TSP_FAST_FID(TSP_SUB):
+ case TSP_FAST_FID(TSP_MUL):
+ case TSP_FAST_FID(TSP_DIV):
+
+ case TSP_YIELD_FID(TSP_ADD):
+ case TSP_YIELD_FID(TSP_SUB):
+ case TSP_YIELD_FID(TSP_MUL):
+ case TSP_YIELD_FID(TSP_DIV):
+ /*
+ * Request from non-secure client to perform a check
+ * of the DIT PSTATE bit.
+ */
+ case TSP_YIELD_FID(TSP_CHECK_DIT):
+ if (ns) {
+ /*
+ * This is a fresh request from the non-secure client.
+ * The parameters are in x1 and x2. Figure out which
+ * registers need to be preserved, save the non-secure
+ * state and send the request to the secure payload.
+ */
+ assert(handle == cm_get_context(NON_SECURE));
+
+ /* Check if we are already preempted */
+ if (get_yield_smc_active_flag(tsp_ctx->state))
+ SMC_RET1(handle, SMC_UNK);
+
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /* Save x1 and x2 for use by TSP_GET_ARGS call below */
+ store_tsp_args(tsp_ctx, x1, x2);
+
+ /*
+ * We are done stashing the non-secure context. Ask the
+ * secure payload to do the work now.
+ */
+
+ /*
+ * Verify if there is a valid context to use, copy the
+ * operation type and parameters to the secure context
+ * and jump to the fast smc entry point in the secure
+ * payload. Entry into S-EL1 will take place upon exit
+ * from this function.
+ */
+ assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
+
+ /* Set appropriate entry for SMC.
+ * We expect the TSP to manage the PSTATE.I and PSTATE.F
+ * flags as appropriate.
+ */
+ if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
+ cm_set_elr_el3(SECURE, (uint64_t)
+ &tsp_vectors->fast_smc_entry);
+ } else {
+ set_yield_smc_active_flag(tsp_ctx->state);
+ cm_set_elr_el3(SECURE, (uint64_t)
+ &tsp_vectors->yield_smc_entry);
+#if TSP_NS_INTR_ASYNC_PREEMPT
+ /*
+ * Enable the routing of NS interrupts to EL3
+ * during processing of a Yielding SMC Call on
+ * this core.
+ */
+ enable_intr_rm_local(INTR_TYPE_NS, SECURE);
+#endif
+
+#if EL3_EXCEPTION_HANDLING
+ /*
+ * With EL3 exception handling, while an SMC is
+ * being processed, Non-secure interrupts can't
+ * preempt Secure execution. However, for
+ * yielding SMCs, we want preemption to happen;
+ * so explicitly allow NS preemption in this
+ * case, and supply the preemption return code
+ * for TSP.
+ */
+ ehf_allow_ns_preemption(TSP_PREEMPTED);
+#endif
+ }
+
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+ SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2);
+ } else {
+ /*
+ * This is the result from the secure client of an
+ * earlier request. The results are in x1-x3. Copy it
+ * into the non-secure context, save the secure state
+ * and return to the non-secure state.
+ */
+ assert(handle == cm_get_context(SECURE));
+ cm_el1_sysregs_context_save(SECURE);
+
+ /* Get a reference to the non-secure context */
+ ns_cpu_context = cm_get_context(NON_SECURE);
+ assert(ns_cpu_context);
+
+ /* Restore non-secure state */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+ if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_YIELD) {
+ clr_yield_smc_active_flag(tsp_ctx->state);
+#if TSP_NS_INTR_ASYNC_PREEMPT
+ /*
+ * Disable the routing of NS interrupts to EL3
+ * after processing of a Yielding SMC Call on
+ * this core is finished.
+ */
+ disable_intr_rm_local(INTR_TYPE_NS, SECURE);
+#endif
+ }
+
+ SMC_RET3(ns_cpu_context, x1, x2, x3);
+ }
+ assert(0); /* Unreachable */
+
+ /*
+ * Request from the non-secure world to abort a preempted Yielding SMC
+ * Call.
+ */
+ case TSP_FID_ABORT:
+ /* ABORT should only be invoked by normal world */
+ if (!ns) {
+ assert(0);
+ break;
+ }
+
+ assert(handle == cm_get_context(NON_SECURE));
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /* Abort the preempted SMC request */
+ if (!tspd_abort_preempted_smc(tsp_ctx)) {
+ /*
+ * If there was no preempted SMC to abort, return
+ * SMC_UNK.
+ *
+ * Restoring the NON_SECURE context is not necessary as
+ * the synchronous entry did not take place if the
+ * return code of tspd_abort_preempted_smc is zero.
+ */
+ cm_set_next_eret_context(NON_SECURE);
+ break;
+ }
+
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+ SMC_RET1(handle, SMC_OK);
+
+ /*
+ * Request from non secure world to resume the preempted
+ * Yielding SMC Call.
+ */
+ case TSP_FID_RESUME:
+ /* RESUME should be invoked only by normal world */
+ if (!ns) {
+ assert(0);
+ break;
+ }
+
+ /*
+ * This is a resume request from the non-secure client.
+ * save the non-secure state and send the request to
+ * the secure payload.
+ */
+ assert(handle == cm_get_context(NON_SECURE));
+
+ /* Check if we are already preempted before resume */
+ if (!get_yield_smc_active_flag(tsp_ctx->state))
+ SMC_RET1(handle, SMC_UNK);
+
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /*
+ * We are done stashing the non-secure context. Ask the
+ * secure payload to do the work now.
+ */
+#if TSP_NS_INTR_ASYNC_PREEMPT
+ /*
+ * Enable the routing of NS interrupts to EL3 during resumption
+ * of a Yielding SMC Call on this core.
+ */
+ enable_intr_rm_local(INTR_TYPE_NS, SECURE);
+#endif
+
+#if EL3_EXCEPTION_HANDLING
+ /*
+ * Allow the resumed yielding SMC processing to be preempted by
+ * Non-secure interrupts. Also, supply the preemption return
+ * code for TSP.
+ */
+ ehf_allow_ns_preemption(TSP_PREEMPTED);
+#endif
+
+ /* We just need to return to the preempted point in
+ * TSP and the execution will resume as normal.
+ */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+ SMC_RET0(&tsp_ctx->cpu_ctx);
+
+ /*
+ * This is a request from the secure payload for more arguments
+ * for an ongoing arithmetic operation requested by the
+ * non-secure world. Simply return the arguments from the non-
+ * secure client in the original call.
+ */
+ case TSP_GET_ARGS:
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ get_tsp_args(tsp_ctx, x1, x2);
+ SMC_RET2(handle, x1, x2);
+
+ case TOS_CALL_COUNT:
+ /*
+ * Return the number of service function IDs implemented to
+ * provide service to non-secure
+ */
+ SMC_RET1(handle, TSP_NUM_FID);
+
+ case TOS_UID:
+ /* Return TSP UID to the caller */
+ SMC_UUID_RET(handle, tsp_uuid);
+
+ case TOS_CALL_VERSION:
+ /* Return the version of current implementation */
+ SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR);
+
+ default:
+ break;
+ }
+
+ SMC_RET1(handle, SMC_UNK);
+}
+
+/* Define a SPD runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ tspd_fast,
+
+ OEN_TOS_START,
+ OEN_TOS_END,
+ SMC_TYPE_FAST,
+ tspd_setup,
+ tspd_smc_handler
+);
+
+/* Define a SPD runtime service descriptor for Yielding SMC Calls */
+DECLARE_RT_SVC(
+ tspd_std,
+
+ OEN_TOS_START,
+ OEN_TOS_END,
+ SMC_TYPE_YIELD,
+ NULL,
+ tspd_smc_handler
+);
diff --git a/services/spd/tspd/tspd_pm.c b/services/spd/tspd/tspd_pm.c
new file mode 100644
index 0000000..b95ee8f
--- /dev/null
+++ b/services/spd/tspd/tspd_pm.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <bl32/tsp/tsp.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <plat/common/platform.h>
+
+#include "tspd_private.h"
+
+/*******************************************************************************
+ * The target cpu is being turned on. Allow the TSPD/TSP to perform any actions
+ * needed. Nothing at the moment.
+ ******************************************************************************/
+static void tspd_cpu_on_handler(u_register_t target_cpu)
+{
+}
+
+/*******************************************************************************
+ * This cpu is being turned off. Allow the TSPD/TSP to perform any actions
+ * needed
+ ******************************************************************************/
+static int32_t tspd_cpu_off_handler(u_register_t unused)
+{
+ int32_t rc = 0;
+ uint32_t linear_id = plat_my_core_pos();
+ tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
+
+ assert(tsp_vectors);
+ assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
+
+ /*
+ * Abort any preempted SMC request before overwriting the SECURE
+ * context.
+ */
+ tspd_abort_preempted_smc(tsp_ctx);
+
+ /* Program the entry point and enter the TSP */
+ cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_off_entry);
+ rc = tspd_synchronous_sp_entry(tsp_ctx);
+
+ /*
+ * Read the response from the TSP. A non-zero return means that
+ * something went wrong while communicating with the TSP.
+ */
+ if (rc != 0)
+ panic();
+
+ /*
+ * Reset TSP's context for a fresh start when this cpu is turned on
+ * subsequently.
+ */
+ set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This cpu is being suspended. S-EL1 state must have been saved in the
+ * resident cpu (mpidr format) if it is a UP/UP migratable TSP.
+ ******************************************************************************/
+static void tspd_cpu_suspend_handler(u_register_t max_off_pwrlvl)
+{
+ int32_t rc = 0;
+ uint32_t linear_id = plat_my_core_pos();
+ tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
+
+ assert(tsp_vectors);
+ assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
+
+ /*
+ * Abort any preempted SMC request before overwriting the SECURE
+ * context.
+ */
+ tspd_abort_preempted_smc(tsp_ctx);
+
+ /* Program the entry point and enter the TSP */
+ cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_suspend_entry);
+ rc = tspd_synchronous_sp_entry(tsp_ctx);
+
+ /*
+ * Read the response from the TSP. A non-zero return means that
+ * something went wrong while communicating with the TSP.
+ */
+ if (rc)
+ panic();
+
+ /* Update its context to reflect the state the TSP is in */
+ set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_SUSPEND);
+}
+
+/*******************************************************************************
+ * This cpu has been turned on. Enter the TSP to initialise S-EL1 and other bits
+ * before passing control back to the Secure Monitor. Entry in S-EL1 is done
+ * after initialising minimal architectural state that guarantees safe
+ * execution.
+ ******************************************************************************/
+static void tspd_cpu_on_finish_handler(u_register_t unused)
+{
+ int32_t rc = 0;
+ uint32_t linear_id = plat_my_core_pos();
+ tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
+ entry_point_info_t tsp_on_entrypoint;
+
+ assert(tsp_vectors);
+ assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_OFF);
+
+ tspd_init_tsp_ep_state(&tsp_on_entrypoint,
+ TSP_AARCH64,
+ (uint64_t) &tsp_vectors->cpu_on_entry,
+ tsp_ctx);
+
+ /* Initialise this cpu's secure context */
+ cm_init_my_context(&tsp_on_entrypoint);
+
+#if TSP_NS_INTR_ASYNC_PREEMPT
+ /*
+ * Disable the NS interrupt locally since it will be enabled globally
+ * within cm_init_my_context.
+ */
+ disable_intr_rm_local(INTR_TYPE_NS, SECURE);
+#endif
+
+ /* Enter the TSP */
+ rc = tspd_synchronous_sp_entry(tsp_ctx);
+
+ /*
+ * Read the response from the TSP. A non-zero return means that
+ * something went wrong while communicating with the SP.
+ */
+ if (rc != 0)
+ panic();
+
+ /* Update its context to reflect the state the SP is in */
+ set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);
+}
+
+/*******************************************************************************
+ * This cpu has resumed from suspend. The SPD saved the TSP context when it
+ * completed the preceding suspend call. Use that context to program an entry
+ * into the TSP to allow it to do any remaining book keeping
+ ******************************************************************************/
+static void tspd_cpu_suspend_finish_handler(u_register_t max_off_pwrlvl)
+{
+ int32_t rc = 0;
+ uint32_t linear_id = plat_my_core_pos();
+ tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
+
+ assert(tsp_vectors);
+ assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_SUSPEND);
+
+ /* Program the entry point, max_off_pwrlvl and enter the SP */
+ write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx),
+ CTX_GPREG_X0,
+ max_off_pwrlvl);
+ cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_resume_entry);
+ rc = tspd_synchronous_sp_entry(tsp_ctx);
+
+ /*
+ * Read the response from the TSP. A non-zero return means that
+ * something went wrong while communicating with the TSP.
+ */
+ if (rc != 0)
+ panic();
+
+ /* Update its context to reflect the state the SP is in */
+ set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);
+}
+
+/*******************************************************************************
+ * Return the type of TSP the TSPD is dealing with. Report the current resident
+ * cpu (mpidr format) if it is a UP/UP migratable TSP.
+ ******************************************************************************/
+static int32_t tspd_cpu_migrate_info(u_register_t *resident_cpu)
+{
+ return TSP_MIGRATE_INFO;
+}
+
+/*******************************************************************************
+ * System is about to be switched off. Allow the TSPD/TSP to perform
+ * any actions needed.
+ ******************************************************************************/
+static void tspd_system_off(void)
+{
+ uint32_t linear_id = plat_my_core_pos();
+ tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
+
+ assert(tsp_vectors);
+ assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
+
+ /*
+ * Abort any preempted SMC request before overwriting the SECURE
+ * context.
+ */
+ tspd_abort_preempted_smc(tsp_ctx);
+
+ /* Program the entry point */
+ cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->system_off_entry);
+
+ /* Enter the TSP. We do not care about the return value because we
+ * must continue the shutdown anyway */
+ tspd_synchronous_sp_entry(tsp_ctx);
+}
+
+/*******************************************************************************
+ * System is about to be reset. Allow the TSPD/TSP to perform
+ * any actions needed.
+ ******************************************************************************/
+static void tspd_system_reset(void)
+{
+ uint32_t linear_id = plat_my_core_pos();
+ tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
+
+ assert(tsp_vectors);
+ assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
+
+ /*
+ * Abort any preempted SMC request before overwriting the SECURE
+ * context.
+ */
+ tspd_abort_preempted_smc(tsp_ctx);
+
+ /* Program the entry point */
+ cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->system_reset_entry);
+
+ /*
+ * Enter the TSP. We do not care about the return value because we
+ * must continue the reset anyway
+ */
+ tspd_synchronous_sp_entry(tsp_ctx);
+}
+
+/*******************************************************************************
+ * Structure populated by the TSP Dispatcher to be given a chance to perform any
+ * TSP bookkeeping before PSCI executes a power mgmt. operation.
+ ******************************************************************************/
+const spd_pm_ops_t tspd_pm = {
+ .svc_on = tspd_cpu_on_handler,
+ .svc_off = tspd_cpu_off_handler,
+ .svc_suspend = tspd_cpu_suspend_handler,
+ .svc_on_finish = tspd_cpu_on_finish_handler,
+ .svc_suspend_finish = tspd_cpu_suspend_finish_handler,
+ .svc_migrate = NULL,
+ .svc_migrate_info = tspd_cpu_migrate_info,
+ .svc_system_off = tspd_system_off,
+ .svc_system_reset = tspd_system_reset
+};
diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h
new file mode 100644
index 0000000..043644a
--- /dev/null
+++ b/services/spd/tspd/tspd_private.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TSPD_PRIVATE_H
+#define TSPD_PRIVATE_H
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <bl31/interrupt_mgmt.h>
+#include <context.h>
+#include <lib/psci/psci.h>
+
+/*******************************************************************************
+ * Secure Payload PM state information e.g. SP is suspended, uninitialised etc
+ * and macros to access the state information in the per-cpu 'state' flags
+ ******************************************************************************/
+#define TSP_PSTATE_OFF 0
+#define TSP_PSTATE_ON 1
+#define TSP_PSTATE_SUSPEND 2
+#define TSP_PSTATE_SHIFT 0
+#define TSP_PSTATE_MASK 0x3
+#define get_tsp_pstate(state) ((state >> TSP_PSTATE_SHIFT) & TSP_PSTATE_MASK)
+#define clr_tsp_pstate(state) (state &= ~(TSP_PSTATE_MASK \
+ << TSP_PSTATE_SHIFT))
+#define set_tsp_pstate(st, pst) do { \
+ clr_tsp_pstate(st); \
+ st |= (pst & TSP_PSTATE_MASK) << \
+ TSP_PSTATE_SHIFT; \
+ } while (0);
+
+
+/*
+ * This flag is used by the TSPD to determine if the TSP is servicing a yielding
+ * SMC request prior to programming the next entry into the TSP e.g. if TSP
+ * execution is preempted by a non-secure interrupt and handed control to the
+ * normal world. If another request which is distinct from what the TSP was
+ * previously doing arrives, then this flag will be help the TSPD to either
+ * reject the new request or service it while ensuring that the previous context
+ * is not corrupted.
+ */
+#define YIELD_SMC_ACTIVE_FLAG_SHIFT 2
+#define YIELD_SMC_ACTIVE_FLAG_MASK 1
+#define get_yield_smc_active_flag(state) \
+ ((state >> YIELD_SMC_ACTIVE_FLAG_SHIFT) \
+ & YIELD_SMC_ACTIVE_FLAG_MASK)
+#define set_yield_smc_active_flag(state) (state |= \
+ 1 << YIELD_SMC_ACTIVE_FLAG_SHIFT)
+#define clr_yield_smc_active_flag(state) (state &= \
+ ~(YIELD_SMC_ACTIVE_FLAG_MASK \
+ << YIELD_SMC_ACTIVE_FLAG_SHIFT))
+
+/*******************************************************************************
+ * Secure Payload execution state information i.e. aarch32 or aarch64
+ ******************************************************************************/
+#define TSP_AARCH32 MODE_RW_32
+#define TSP_AARCH64 MODE_RW_64
+
+/*******************************************************************************
+ * The SPD should know the type of Secure Payload.
+ ******************************************************************************/
+#define TSP_TYPE_UP PSCI_TOS_NOT_UP_MIG_CAP
+#define TSP_TYPE_UPM PSCI_TOS_UP_MIG_CAP
+#define TSP_TYPE_MP PSCI_TOS_NOT_PRESENT_MP
+
+/*******************************************************************************
+ * Secure Payload migrate type information as known to the SPD. We assume that
+ * the SPD is dealing with an MP Secure Payload.
+ ******************************************************************************/
+#define TSP_MIGRATE_INFO TSP_TYPE_MP
+
+/*******************************************************************************
+ * Number of cpus that the present on this platform. TODO: Rely on a topology
+ * tree to determine this in the future to avoid assumptions about mpidr
+ * allocation
+ ******************************************************************************/
+#define TSPD_CORE_COUNT PLATFORM_CORE_COUNT
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define TSPD_C_RT_CTX_X19 0x0
+#define TSPD_C_RT_CTX_X20 0x8
+#define TSPD_C_RT_CTX_X21 0x10
+#define TSPD_C_RT_CTX_X22 0x18
+#define TSPD_C_RT_CTX_X23 0x20
+#define TSPD_C_RT_CTX_X24 0x28
+#define TSPD_C_RT_CTX_X25 0x30
+#define TSPD_C_RT_CTX_X26 0x38
+#define TSPD_C_RT_CTX_X27 0x40
+#define TSPD_C_RT_CTX_X28 0x48
+#define TSPD_C_RT_CTX_X29 0x50
+#define TSPD_C_RT_CTX_X30 0x58
+#define TSPD_C_RT_CTX_SIZE 0x60
+#define TSPD_C_RT_CTX_ENTRIES (TSPD_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve caller-saved registers of the
+ * SP context while performing a TSP preemption.
+ * Note: These offsets have to match with the offsets for the corresponding
+ * registers in cpu_context as we are using memcpy to copy the values from
+ * cpu_context to sp_ctx.
+ ******************************************************************************/
+#define TSPD_SP_CTX_X0 0x0
+#define TSPD_SP_CTX_X1 0x8
+#define TSPD_SP_CTX_X2 0x10
+#define TSPD_SP_CTX_X3 0x18
+#define TSPD_SP_CTX_X4 0x20
+#define TSPD_SP_CTX_X5 0x28
+#define TSPD_SP_CTX_X6 0x30
+#define TSPD_SP_CTX_X7 0x38
+#define TSPD_SP_CTX_X8 0x40
+#define TSPD_SP_CTX_X9 0x48
+#define TSPD_SP_CTX_X10 0x50
+#define TSPD_SP_CTX_X11 0x58
+#define TSPD_SP_CTX_X12 0x60
+#define TSPD_SP_CTX_X13 0x68
+#define TSPD_SP_CTX_X14 0x70
+#define TSPD_SP_CTX_X15 0x78
+#define TSPD_SP_CTX_X16 0x80
+#define TSPD_SP_CTX_X17 0x88
+#define TSPD_SP_CTX_SIZE 0x90
+#define TSPD_SP_CTX_ENTRIES (TSPD_SP_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#include <lib/cassert.h>
+
+/*
+ * The number of arguments to save during a SMC call for TSP.
+ * Currently only x1 and x2 are used by TSP.
+ */
+#define TSP_NUM_ARGS 0x2
+
+/* AArch64 callee saved general purpose register context structure. */
+DEFINE_REG_STRUCT(c_rt_regs, TSPD_C_RT_CTX_ENTRIES);
+
+/*
+ * Compile time assertion to ensure that both the compiler and linker
+ * have the same double word aligned view of the size of the C runtime
+ * register context.
+ */
+CASSERT(TSPD_C_RT_CTX_SIZE == sizeof(c_rt_regs_t),
+ assert_spd_c_rt_regs_size_mismatch);
+
+/* SEL1 Secure payload (SP) caller saved register context structure. */
+DEFINE_REG_STRUCT(sp_ctx_regs, TSPD_SP_CTX_ENTRIES);
+
+/*
+ * Compile time assertion to ensure that both the compiler and linker
+ * have the same double word aligned view of the size of the C runtime
+ * register context.
+ */
+CASSERT(TSPD_SP_CTX_SIZE == sizeof(sp_ctx_regs_t),
+ assert_spd_sp_regs_size_mismatch);
+
+/*******************************************************************************
+ * Structure which helps the SPD to maintain the per-cpu state of the SP.
+ * 'saved_spsr_el3' - temporary copy to allow S-EL1 interrupt handling when
+ * the TSP has been preempted.
+ * 'saved_elr_el3' - temporary copy to allow S-EL1 interrupt handling when
+ * the TSP has been preempted.
+ * 'state' - collection of flags to track SP state e.g. on/off
+ * 'mpidr' - mpidr to associate a context with a cpu
+ * 'c_rt_ctx' - stack address to restore C runtime context from after
+ * returning from a synchronous entry into the SP.
+ * 'cpu_ctx' - space to maintain SP architectural state
+ * 'saved_tsp_args' - space to store arguments for TSP arithmetic operations
+ * which will queried using the TSP_GET_ARGS SMC by TSP.
+ * 'sp_ctx' - space to save the SEL1 Secure Payload(SP) caller saved
+ * register context after it has been preempted by an EL3
+ * routed NS interrupt and when a Secure Interrupt is taken
+ * to SP.
+ ******************************************************************************/
+typedef struct tsp_context {
+ uint64_t saved_elr_el3;
+ uint32_t saved_spsr_el3;
+ uint32_t state;
+ uint64_t mpidr;
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+ uint64_t saved_tsp_args[TSP_NUM_ARGS];
+#if TSP_NS_INTR_ASYNC_PREEMPT
+ sp_ctx_regs_t sp_ctx;
+ bool preempted_by_sel1_intr;
+#endif
+} tsp_context_t;
+
+/* Helper macros to store and retrieve tsp args from tsp_context */
+#define store_tsp_args(_tsp_ctx, _x1, _x2) do {\
+ _tsp_ctx->saved_tsp_args[0] = _x1;\
+ _tsp_ctx->saved_tsp_args[1] = _x2;\
+ } while (0)
+
+#define get_tsp_args(_tsp_ctx, _x1, _x2) do {\
+ _x1 = _tsp_ctx->saved_tsp_args[0];\
+ _x2 = _tsp_ctx->saved_tsp_args[1];\
+ } while (0)
+
+/* TSPD power management handlers */
+extern const spd_pm_ops_t tspd_pm;
+
+/*******************************************************************************
+ * Forward declarations
+ ******************************************************************************/
+typedef struct tsp_vectors tsp_vectors_t;
+
+/*******************************************************************************
+ * Function & Data prototypes
+ ******************************************************************************/
+uint64_t tspd_enter_sp(uint64_t *c_rt_ctx);
+void __dead2 tspd_exit_sp(uint64_t c_rt_ctx, uint64_t ret);
+uint64_t tspd_synchronous_sp_entry(tsp_context_t *tsp_ctx);
+void __dead2 tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret);
+void tspd_init_tsp_ep_state(struct entry_point_info *tsp_entry_point,
+ uint32_t rw,
+ uint64_t pc,
+ tsp_context_t *tsp_ctx);
+int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx);
+
+uint64_t tspd_handle_sp_preemption(void *handle);
+
+extern tsp_context_t tspd_sp_context[TSPD_CORE_COUNT];
+extern tsp_vectors_t *tsp_vectors;
+#endif /*__ASSEMBLER__*/
+
+#endif /* TSPD_PRIVATE_H */
diff --git a/services/std_svc/drtm/drtm_dma_prot.c b/services/std_svc/drtm/drtm_dma_prot.c
new file mode 100644
index 0000000..48317fd
--- /dev/null
+++ b/services/std_svc/drtm/drtm_dma_prot.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * DRTM DMA protection.
+ *
+ * Authors:
+ * Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
+ *
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <drivers/arm/smmu_v3.h>
+#include "drtm_dma_prot.h"
+#include "drtm_main.h"
+#include "drtm_remediation.h"
+#include <plat/common/platform.h>
+#include <smccc_helpers.h>
+
+/*
+ * ________________________ LAUNCH success ________________________
+ * | Initial | -------------------> | Prot engaged |
+ * |````````````````````````| |````````````````````````|
+ * | request.type == NONE | | request.type != NONE |
+ * | | <------------------- | |
+ * `________________________' UNPROTECT_MEM `________________________'
+ *
+ * Transitions that are not shown correspond to ABI calls that do not change
+ * state and result in an error being returned to the caller.
+ */
+static struct dma_prot active_prot = {
+ .type = PROTECT_NONE,
+};
+
+/* Version-independent type. */
+typedef struct drtm_dl_dma_prot_args_v1 struct_drtm_dl_dma_prot_args;
+
+/*
+ * This function checks that platform supports complete DMA protection.
+ * and returns false - if the platform supports complete DMA protection.
+ * and returns true - if the platform does not support complete DMA protection.
+ */
+bool drtm_dma_prot_init(void)
+{
+ bool must_init_fail = false;
+ const uintptr_t *smmus;
+ size_t num_smmus = 0;
+ unsigned int total_smmus;
+
+ /* Warns presence of non-host platforms */
+ if (plat_has_non_host_platforms()) {
+ WARN("DRTM: the platform includes trusted DMA-capable devices"
+ " (non-host platforms)\n");
+ }
+
+ /*
+ * DLME protection is uncertain on platforms with peripherals whose
+ * DMA is not managed by an SMMU. DRTM doesn't work on such platforms.
+ */
+ if (plat_has_unmanaged_dma_peripherals()) {
+ ERROR("DRTM: this platform does not provide DMA protection\n");
+ must_init_fail = true;
+ }
+
+ /*
+ * Check that the platform reported all SMMUs.
+ * It is acceptable if the platform doesn't have any SMMUs when it
+ * doesn't have any DMA-capable devices.
+ */
+ total_smmus = plat_get_total_smmus();
+ plat_enumerate_smmus(&smmus, &num_smmus);
+ if (num_smmus != total_smmus) {
+ ERROR("DRTM: could not discover all SMMUs\n");
+ must_init_fail = true;
+ }
+
+ return must_init_fail;
+}
+
+/*
+ * Checks that the DMA protection arguments are valid and that the given
+ * protected regions are covered by DMA protection.
+ */
+enum drtm_retc drtm_dma_prot_check_args(const struct_drtm_dl_dma_prot_args *a,
+ int a_dma_prot_type,
+ drtm_mem_region_t p)
+{
+ switch ((enum dma_prot_type)a_dma_prot_type) {
+ case PROTECT_MEM_ALL:
+ if (a->dma_prot_table_paddr || a->dma_prot_table_size) {
+ ERROR("DRTM: invalid launch due to inconsistent"
+ " DMA protection arguments\n");
+ return MEM_PROTECT_INVALID;
+ }
+ /*
+ * Full DMA protection ought to ensure that the DLME and NWd
+ * DCE regions are protected, no further checks required.
+ */
+ return SUCCESS;
+
+ default:
+ ERROR("DRTM: invalid launch due to unsupported DMA protection type\n");
+ return MEM_PROTECT_INVALID;
+ }
+}
+
+enum drtm_retc drtm_dma_prot_engage(const struct_drtm_dl_dma_prot_args *a,
+ int a_dma_prot_type)
+{
+ const uintptr_t *smmus;
+ size_t num_smmus = 0;
+
+ if (active_prot.type != PROTECT_NONE) {
+ ERROR("DRTM: launch denied as previous DMA protection"
+ " is still engaged\n");
+ return DENIED;
+ }
+
+ if (a_dma_prot_type == PROTECT_NONE) {
+ return SUCCESS;
+ /* Only PROTECT_MEM_ALL is supported currently. */
+ } else if (a_dma_prot_type != PROTECT_MEM_ALL) {
+ ERROR("%s(): unimplemented DMA protection type\n", __func__);
+ panic();
+ }
+
+ /*
+ * Engage SMMUs in accordance with the request we have previously received.
+ * Only PROTECT_MEM_ALL is implemented currently.
+ */
+ plat_enumerate_smmus(&smmus, &num_smmus);
+ for (const uintptr_t *smmu = smmus; smmu < smmus+num_smmus; smmu++) {
+ /*
+ * TODO: Invalidate SMMU's Stage-1 and Stage-2 TLB entries. This ensures
+ * that any outstanding device transactions are completed, see Section
+ * 3.21.1, specification IHI_0070_C_a for an approximate reference.
+ */
+ int rc = smmuv3_ns_set_abort_all(*smmu);
+ if (rc != 0) {
+ ERROR("DRTM: SMMU at PA 0x%lx failed to engage DMA protection"
+ " rc=%d\n", *smmu, rc);
+ return INTERNAL_ERROR;
+ }
+ }
+
+ /*
+ * TODO: Restrict DMA from the GIC.
+ *
+ * Full DMA protection may be achieved as follows:
+ *
+ * With a GICv3:
+ * - Set GICR_CTLR.EnableLPIs to 0, for each GICR;
+ * GICR_CTLR.RWP == 0 must be the case before finishing, for each GICR.
+ * - Set GITS_CTLR.Enabled to 0;
+ * GITS_CTLR.Quiescent == 1 must be the case before finishing.
+ *
+ * In addition, with a GICv4:
+ * - Set GICR_VPENDBASER.Valid to 0, for each GICR;
+ * GICR_CTLR.RWP == 0 must be the case before finishing, for each GICR.
+ *
+ * Alternatively, e.g. if some bit values cannot be changed at runtime,
+ * this procedure should return an error if the LPI Pending and
+ * Configuration tables overlap the regions being protected.
+ */
+
+ active_prot.type = a_dma_prot_type;
+
+ return SUCCESS;
+}
+
+/*
+ * Undo what has previously been done in drtm_dma_prot_engage(), or enter
+ * remediation if it is not possible.
+ */
+enum drtm_retc drtm_dma_prot_disengage(void)
+{
+ const uintptr_t *smmus;
+ size_t num_smmus = 0;
+ const char *err_str = "cannot undo PROTECT_MEM_ALL SMMU config";
+
+ if (active_prot.type == PROTECT_NONE) {
+ return SUCCESS;
+ /* Only PROTECT_MEM_ALL is supported currently. */
+ } else if (active_prot.type != PROTECT_MEM_ALL) {
+ ERROR("%s(): unimplemented DMA protection type\n", __func__);
+ panic();
+ }
+
+ /*
+ * For PROTECT_MEM_ALL, undo the SMMU configuration for "abort all" mode
+ * done during engage().
+ */
+ /* Simply enter remediation for now. */
+ (void)smmus;
+ (void)num_smmus;
+ drtm_enter_remediation(1ULL, err_str);
+
+ /* TODO: Undo GIC DMA restrictions. */
+
+ active_prot.type = PROTECT_NONE;
+
+ return SUCCESS;
+}
+
+uint64_t drtm_unprotect_mem(void *ctx)
+{
+ enum drtm_retc ret;
+
+ switch (active_prot.type) {
+ case PROTECT_NONE:
+ ERROR("DRTM: invalid UNPROTECT_MEM, no DMA protection has"
+ " previously been engaged\n");
+ ret = DENIED;
+ break;
+
+ case PROTECT_MEM_ALL:
+ /*
+ * UNPROTECT_MEM is a no-op for PROTECT_MEM_ALL: DRTM must not touch
+ * the NS SMMU as it is expected that the DLME has configured it.
+ */
+ active_prot.type = PROTECT_NONE;
+
+ ret = SUCCESS;
+ break;
+
+ default:
+ ret = drtm_dma_prot_disengage();
+ break;
+ }
+
+ SMC_RET1(ctx, ret);
+}
+
+void drtm_dma_prot_serialise_table(uint8_t *dst, size_t *size_out)
+{
+ if (active_prot.type == PROTECT_NONE) {
+ return;
+ } else if (active_prot.type != PROTECT_MEM_ALL) {
+ ERROR("%s(): unimplemented DMA protection type\n", __func__);
+ panic();
+ }
+
+ struct __packed descr_table_1 {
+ drtm_memory_region_descriptor_table_t header;
+ drtm_mem_region_t regions[1];
+ } prot_table = {
+ .header = {
+ .revision = 1,
+ .num_regions = sizeof(((struct descr_table_1 *)NULL)->regions) /
+ sizeof(((struct descr_table_1 *)NULL)->regions[0])
+ },
+ .regions = {
+ {.region_address = 0, PAGES_AND_TYPE(UINT64_MAX, 0x3)},
+ }
+ };
+
+ memcpy(dst, &prot_table, sizeof(prot_table));
+ *size_out = sizeof(prot_table);
+}
diff --git a/services/std_svc/drtm/drtm_dma_prot.h b/services/std_svc/drtm/drtm_dma_prot.h
new file mode 100644
index 0000000..79dc9cb
--- /dev/null
+++ b/services/std_svc/drtm/drtm_dma_prot.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef DRTM_DMA_PROT_H
+#define DRTM_DMA_PROT_H
+
+#include <stdint.h>
+#include <plat/common/platform.h>
+#include <services/drtm_svc.h>
+
+struct __packed drtm_dl_dma_prot_args_v1 {
+ uint64_t dma_prot_table_paddr;
+ uint64_t dma_prot_table_size;
+};
+
+/* Values for DRTM_PROTECT_MEMORY */
+enum dma_prot_type {
+ PROTECT_NONE = -1,
+ PROTECT_MEM_ALL = 0,
+ PROTECT_MEM_REGION = 2,
+};
+
+struct dma_prot {
+ enum dma_prot_type type;
+};
+
+#define DRTM_MEM_REGION_PAGES_AND_TYPE(pages, type) \
+ (((uint64_t)(pages) & (((uint64_t)1 << 52) - 1)) \
+ | (((uint64_t)(type) & 0x7) << 52))
+
+#define PAGES_AND_TYPE(pages, type) \
+ .region_size_type = DRTM_MEM_REGION_PAGES_AND_TYPE(pages, type)
+
+/* Opaque / encapsulated type. */
+typedef struct drtm_dl_dma_prot_args_v1 drtm_dl_dma_prot_args_v1_t;
+
+bool drtm_dma_prot_init(void);
+enum drtm_retc drtm_dma_prot_check_args(const drtm_dl_dma_prot_args_v1_t *a,
+ int a_dma_prot_type,
+ drtm_mem_region_t p);
+enum drtm_retc drtm_dma_prot_engage(const drtm_dl_dma_prot_args_v1_t *a,
+ int a_dma_prot_type);
+enum drtm_retc drtm_dma_prot_disengage(void);
+uint64_t drtm_unprotect_mem(void *ctx);
+void drtm_dma_prot_serialise_table(uint8_t *dst, size_t *size_out);
+
+#endif /* DRTM_DMA_PROT_H */
diff --git a/services/std_svc/drtm/drtm_main.c b/services/std_svc/drtm/drtm_main.c
new file mode 100644
index 0000000..3acf683
--- /dev/null
+++ b/services/std_svc/drtm/drtm_main.c
@@ -0,0 +1,839 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * DRTM service
+ *
+ * Authors:
+ * Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
+ * Brian Nezvadovitz <brinez@microsoft.com> 2021-02-01
+ */
+
+#include <stdint.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <drivers/auth/crypto_mod.h>
+#include "drtm_main.h"
+#include "drtm_measurements.h"
+#include "drtm_remediation.h"
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/psci/psci_lib.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/platform.h>
+#include <services/drtm_svc.h>
+#include <services/sdei.h>
+#include <platform_def.h>
+
+/* Structure to store DRTM features specific to the platform. */
+static drtm_features_t plat_drtm_features;
+
+/* DRTM-formatted memory map. */
+static drtm_memory_region_descriptor_table_t *plat_drtm_mem_map;
+
+/* DLME header */
+struct_dlme_data_header dlme_data_hdr_init;
+
+/* Minimum data memory requirement */
+uint64_t dlme_data_min_size;
+
+int drtm_setup(void)
+{
+ bool rc;
+ const plat_drtm_tpm_features_t *plat_tpm_feat;
+ const plat_drtm_dma_prot_features_t *plat_dma_prot_feat;
+
+ INFO("DRTM service setup\n");
+
+ /* Read boot PE ID from MPIDR */
+ plat_drtm_features.boot_pe_id = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
+
+ rc = drtm_dma_prot_init();
+ if (rc) {
+ return INTERNAL_ERROR;
+ }
+
+ /*
+ * initialise the platform supported crypto module that will
+ * be used by the DRTM-service to calculate hash of DRTM-
+ * implementation specific components
+ */
+ crypto_mod_init();
+
+ /* Build DRTM-compatible address map. */
+ plat_drtm_mem_map = drtm_build_address_map();
+ if (plat_drtm_mem_map == NULL) {
+ return INTERNAL_ERROR;
+ }
+
+ /* Get DRTM features from platform hooks. */
+ plat_tpm_feat = plat_drtm_get_tpm_features();
+ if (plat_tpm_feat == NULL) {
+ return INTERNAL_ERROR;
+ }
+
+ plat_dma_prot_feat = plat_drtm_get_dma_prot_features();
+ if (plat_dma_prot_feat == NULL) {
+ return INTERNAL_ERROR;
+ }
+
+ /*
+ * Add up minimum DLME data memory.
+ *
+ * For systems with complete DMA protection there is only one entry in
+ * the protected regions table.
+ */
+ if (plat_dma_prot_feat->dma_protection_support ==
+ ARM_DRTM_DMA_PROT_FEATURES_DMA_SUPPORT_COMPLETE) {
+ dlme_data_min_size =
+ sizeof(drtm_memory_region_descriptor_table_t) +
+ sizeof(drtm_mem_region_t);
+ dlme_data_hdr_init.dlme_prot_regions_size = dlme_data_min_size;
+ } else {
+ /*
+ * TODO set protected regions table size based on platform DMA
+ * protection configuration
+ */
+ panic();
+ }
+
+ dlme_data_hdr_init.dlme_addr_map_size = drtm_get_address_map_size();
+ dlme_data_hdr_init.dlme_tcb_hashes_table_size =
+ plat_drtm_get_tcb_hash_table_size();
+ dlme_data_hdr_init.dlme_impdef_region_size =
+ plat_drtm_get_imp_def_dlme_region_size();
+
+ dlme_data_min_size += dlme_data_hdr_init.dlme_addr_map_size +
+ PLAT_DRTM_EVENT_LOG_MAX_SIZE +
+ dlme_data_hdr_init.dlme_tcb_hashes_table_size +
+ dlme_data_hdr_init.dlme_impdef_region_size;
+
+ dlme_data_min_size = page_align(dlme_data_min_size, UP)/PAGE_SIZE;
+
+ /* Fill out platform DRTM features structure */
+ /* Only support default PCR schema (0x1) in this implementation. */
+ ARM_DRTM_TPM_FEATURES_SET_PCR_SCHEMA(plat_drtm_features.tpm_features,
+ ARM_DRTM_TPM_FEATURES_PCR_SCHEMA_DEFAULT);
+ ARM_DRTM_TPM_FEATURES_SET_TPM_HASH(plat_drtm_features.tpm_features,
+ plat_tpm_feat->tpm_based_hash_support);
+ ARM_DRTM_TPM_FEATURES_SET_FW_HASH(plat_drtm_features.tpm_features,
+ plat_tpm_feat->firmware_hash_algorithm);
+ ARM_DRTM_MIN_MEM_REQ_SET_MIN_DLME_DATA_SIZE(plat_drtm_features.minimum_memory_requirement,
+ dlme_data_min_size);
+ ARM_DRTM_MIN_MEM_REQ_SET_DCE_SIZE(plat_drtm_features.minimum_memory_requirement,
+ plat_drtm_get_min_size_normal_world_dce());
+ ARM_DRTM_DMA_PROT_FEATURES_SET_MAX_REGIONS(plat_drtm_features.dma_prot_features,
+ plat_dma_prot_feat->max_num_mem_prot_regions);
+ ARM_DRTM_DMA_PROT_FEATURES_SET_DMA_SUPPORT(plat_drtm_features.dma_prot_features,
+ plat_dma_prot_feat->dma_protection_support);
+ ARM_DRTM_TCB_HASH_FEATURES_SET_MAX_NUM_HASHES(plat_drtm_features.tcb_hash_features,
+ plat_drtm_get_tcb_hash_features());
+
+ return 0;
+}
+
+static inline void invalidate_icache_all(void)
+{
+ __asm__ volatile("ic ialluis");
+ dsb();
+ isb();
+}
+
+static inline uint64_t drtm_features_tpm(void *ctx)
+{
+ SMC_RET2(ctx, 1ULL, /* TPM feature is supported */
+ plat_drtm_features.tpm_features);
+}
+
+static inline uint64_t drtm_features_mem_req(void *ctx)
+{
+ SMC_RET2(ctx, 1ULL, /* memory req Feature is supported */
+ plat_drtm_features.minimum_memory_requirement);
+}
+
+static inline uint64_t drtm_features_boot_pe_id(void *ctx)
+{
+ SMC_RET2(ctx, 1ULL, /* Boot PE feature is supported */
+ plat_drtm_features.boot_pe_id);
+}
+
+static inline uint64_t drtm_features_dma_prot(void *ctx)
+{
+ SMC_RET2(ctx, 1ULL, /* DMA protection feature is supported */
+ plat_drtm_features.dma_prot_features);
+}
+
+static inline uint64_t drtm_features_tcb_hashes(void *ctx)
+{
+ SMC_RET2(ctx, 1ULL, /* TCB hash feature is supported */
+ plat_drtm_features.tcb_hash_features);
+}
+
+static enum drtm_retc drtm_dl_check_caller_el(void *ctx)
+{
+ uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
+ uint64_t dl_caller_el;
+ uint64_t dl_caller_aarch;
+
+ dl_caller_el = spsr_el3 >> MODE_EL_SHIFT & MODE_EL_MASK;
+ dl_caller_aarch = spsr_el3 >> MODE_RW_SHIFT & MODE_RW_MASK;
+
+ /* Caller's security state is checked from drtm_smc_handle function */
+
+ /* Caller can be NS-EL2/EL1 */
+ if (dl_caller_el == MODE_EL3) {
+ ERROR("DRTM: invalid launch from EL3\n");
+ return DENIED;
+ }
+
+ if (dl_caller_aarch != MODE_RW_64) {
+ ERROR("DRTM: invalid launch from non-AArch64 execution state\n");
+ return DENIED;
+ }
+
+ return SUCCESS;
+}
+
+static enum drtm_retc drtm_dl_check_cores(void)
+{
+ bool running_on_single_core;
+ uint64_t this_pe_aff_value = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
+
+ if (this_pe_aff_value != plat_drtm_features.boot_pe_id) {
+ ERROR("DRTM: invalid launch on a non-boot PE\n");
+ return DENIED;
+ }
+
+ running_on_single_core = psci_is_last_on_cpu_safe();
+ if (!running_on_single_core) {
+ ERROR("DRTM: invalid launch due to non-boot PE not being turned off\n");
+ return DENIED;
+ }
+
+ return SUCCESS;
+}
+
+static enum drtm_retc drtm_dl_prepare_dlme_data(const struct_drtm_dl_args *args)
+{
+ int rc;
+ uint64_t dlme_data_paddr;
+ size_t dlme_data_max_size;
+ uintptr_t dlme_data_mapping;
+ struct_dlme_data_header *dlme_data_hdr;
+ uint8_t *dlme_data_cursor;
+ size_t dlme_data_mapping_bytes;
+ size_t serialised_bytes_actual;
+
+ dlme_data_paddr = args->dlme_paddr + args->dlme_data_off;
+ dlme_data_max_size = args->dlme_size - args->dlme_data_off;
+
+ /*
+ * The capacity of the given DLME data region is checked when
+ * the other dynamic launch arguments are.
+ */
+ if (dlme_data_max_size < dlme_data_min_size) {
+ ERROR("%s: assertion failed:"
+ " dlme_data_max_size (%ld) < dlme_data_total_bytes_req (%ld)\n",
+ __func__, dlme_data_max_size, dlme_data_min_size);
+ panic();
+ }
+
+ /* Map the DLME data region as NS memory. */
+ dlme_data_mapping_bytes = ALIGNED_UP(dlme_data_max_size, DRTM_PAGE_SIZE);
+ rc = mmap_add_dynamic_region_alloc_va(dlme_data_paddr,
+ &dlme_data_mapping,
+ dlme_data_mapping_bytes,
+ MT_RW_DATA | MT_NS |
+ MT_SHAREABILITY_ISH);
+ if (rc != 0) {
+ WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
+ __func__, rc);
+ return INTERNAL_ERROR;
+ }
+ dlme_data_hdr = (struct_dlme_data_header *)dlme_data_mapping;
+ dlme_data_cursor = (uint8_t *)dlme_data_hdr + sizeof(*dlme_data_hdr);
+
+ memcpy(dlme_data_hdr, (const void *)&dlme_data_hdr_init,
+ sizeof(*dlme_data_hdr));
+
+ /* Set the header version and size. */
+ dlme_data_hdr->version = 1;
+ dlme_data_hdr->this_hdr_size = sizeof(*dlme_data_hdr);
+
+ /* Prepare DLME protected regions. */
+ drtm_dma_prot_serialise_table(dlme_data_cursor,
+ &serialised_bytes_actual);
+ assert(serialised_bytes_actual ==
+ dlme_data_hdr->dlme_prot_regions_size);
+ dlme_data_cursor += serialised_bytes_actual;
+
+ /* Prepare DLME address map. */
+ if (plat_drtm_mem_map != NULL) {
+ memcpy(dlme_data_cursor, plat_drtm_mem_map,
+ dlme_data_hdr->dlme_addr_map_size);
+ } else {
+ WARN("DRTM: DLME address map is not in the cache\n");
+ }
+ dlme_data_cursor += dlme_data_hdr->dlme_addr_map_size;
+
+ /* Prepare DRTM event log for DLME. */
+ drtm_serialise_event_log(dlme_data_cursor, &serialised_bytes_actual);
+ assert(serialised_bytes_actual <= PLAT_DRTM_EVENT_LOG_MAX_SIZE);
+ dlme_data_hdr->dlme_tpm_log_size = serialised_bytes_actual;
+ dlme_data_cursor += serialised_bytes_actual;
+
+ /*
+ * TODO: Prepare the TCB hashes for DLME, currently its size
+ * 0
+ */
+ dlme_data_cursor += dlme_data_hdr->dlme_tcb_hashes_table_size;
+
+ /* Implementation-specific region size is unused. */
+ dlme_data_cursor += dlme_data_hdr->dlme_impdef_region_size;
+
+ /*
+ * Prepare DLME data size, includes all data region referenced above
+ * alongwith the DLME data header
+ */
+ dlme_data_hdr->dlme_data_size = dlme_data_cursor - (uint8_t *)dlme_data_hdr;
+
+ /* Unmap the DLME data region. */
+ rc = mmap_remove_dynamic_region(dlme_data_mapping, dlme_data_mapping_bytes);
+ if (rc != 0) {
+ ERROR("%s(): mmap_remove_dynamic_region() failed"
+ " unexpectedly rc=%d\n", __func__, rc);
+ panic();
+ }
+
+ return SUCCESS;
+}
+
+/*
+ * Note: accesses to the dynamic launch args, and to the DLME data are
+ * little-endian as required, thanks to TF-A BL31 init requirements.
+ */
+static enum drtm_retc drtm_dl_check_args(uint64_t x1,
+ struct_drtm_dl_args *a_out)
+{
+ uint64_t dlme_start, dlme_end;
+ uint64_t dlme_img_start, dlme_img_ep, dlme_img_end;
+ uint64_t dlme_data_start, dlme_data_end;
+ uintptr_t va_mapping;
+ size_t va_mapping_size;
+ struct_drtm_dl_args *a;
+ struct_drtm_dl_args args_buf;
+ int rc;
+
+ if (x1 % DRTM_PAGE_SIZE != 0) {
+ ERROR("DRTM: parameters structure is not "
+ DRTM_PAGE_SIZE_STR "-aligned\n");
+ return INVALID_PARAMETERS;
+ }
+
+ va_mapping_size = ALIGNED_UP(sizeof(struct_drtm_dl_args), DRTM_PAGE_SIZE);
+
+ /* check DRTM parameters are within NS address region */
+ rc = plat_drtm_validate_ns_region(x1, va_mapping_size);
+ if (rc != 0) {
+ ERROR("DRTM: parameters lies within secure memory\n");
+ return INVALID_PARAMETERS;
+ }
+
+ rc = mmap_add_dynamic_region_alloc_va(x1, &va_mapping, va_mapping_size,
+ MT_MEMORY | MT_NS | MT_RO |
+ MT_SHAREABILITY_ISH);
+ if (rc != 0) {
+ WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
+ __func__, rc);
+ return INTERNAL_ERROR;
+ }
+ a = (struct_drtm_dl_args *)va_mapping;
+
+ /* Sanitize cache of data passed in args by the DCE Preamble. */
+ flush_dcache_range(va_mapping, va_mapping_size);
+
+ args_buf = *a;
+
+ rc = mmap_remove_dynamic_region(va_mapping, va_mapping_size);
+ if (rc) {
+ ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
+ " rc=%d\n", __func__, rc);
+ panic();
+ }
+ a = &args_buf;
+
+ if (!((a->version >= ARM_DRTM_PARAMS_MIN_VERSION) &&
+ (a->version <= ARM_DRTM_PARAMS_MAX_VERSION))) {
+ ERROR("DRTM: parameters structure version %u is unsupported\n",
+ a->version);
+ return NOT_SUPPORTED;
+ }
+
+ if (!(a->dlme_img_off < a->dlme_size &&
+ a->dlme_data_off < a->dlme_size)) {
+ ERROR("DRTM: argument offset is outside of the DLME region\n");
+ return INVALID_PARAMETERS;
+ }
+ dlme_start = a->dlme_paddr;
+ dlme_end = a->dlme_paddr + a->dlme_size;
+ dlme_img_start = a->dlme_paddr + a->dlme_img_off;
+ dlme_img_ep = dlme_img_start + a->dlme_img_ep_off;
+ dlme_img_end = dlme_img_start + a->dlme_img_size;
+ dlme_data_start = a->dlme_paddr + a->dlme_data_off;
+ dlme_data_end = dlme_end;
+
+ /* Check the DLME regions arguments. */
+ if ((dlme_start % DRTM_PAGE_SIZE) != 0) {
+ ERROR("DRTM: argument DLME region is not "
+ DRTM_PAGE_SIZE_STR "-aligned\n");
+ return INVALID_PARAMETERS;
+ }
+
+ if (!(dlme_start < dlme_end &&
+ dlme_start <= dlme_img_start && dlme_img_start < dlme_img_end &&
+ dlme_start <= dlme_data_start && dlme_data_start < dlme_data_end)) {
+ ERROR("DRTM: argument DLME region is discontiguous\n");
+ return INVALID_PARAMETERS;
+ }
+
+ if (dlme_img_start < dlme_data_end && dlme_data_start < dlme_img_end) {
+ ERROR("DRTM: argument DLME regions overlap\n");
+ return INVALID_PARAMETERS;
+ }
+
+ /* Check the DLME image region arguments. */
+ if ((dlme_img_start % DRTM_PAGE_SIZE) != 0) {
+ ERROR("DRTM: argument DLME image region is not "
+ DRTM_PAGE_SIZE_STR "-aligned\n");
+ return INVALID_PARAMETERS;
+ }
+
+ if (!(dlme_img_start <= dlme_img_ep && dlme_img_ep < dlme_img_end)) {
+ ERROR("DRTM: DLME entry point is outside of the DLME image region\n");
+ return INVALID_PARAMETERS;
+ }
+
+ if ((dlme_img_ep % 4) != 0) {
+ ERROR("DRTM: DLME image entry point is not 4-byte-aligned\n");
+ return INVALID_PARAMETERS;
+ }
+
+ /* Check the DLME data region arguments. */
+ if ((dlme_data_start % DRTM_PAGE_SIZE) != 0) {
+ ERROR("DRTM: argument DLME data region is not "
+ DRTM_PAGE_SIZE_STR "-aligned\n");
+ return INVALID_PARAMETERS;
+ }
+
+ if (dlme_data_end - dlme_data_start < dlme_data_min_size) {
+ ERROR("DRTM: argument DLME data region is short of %lu bytes\n",
+ dlme_data_min_size - (size_t)(dlme_data_end - dlme_data_start));
+ return INVALID_PARAMETERS;
+ }
+
+ /* check DLME region (paddr + size) is within a NS address region */
+ rc = plat_drtm_validate_ns_region(dlme_start, (size_t)a->dlme_size);
+ if (rc != 0) {
+ ERROR("DRTM: DLME region lies within secure memory\n");
+ return INVALID_PARAMETERS;
+ }
+
+ /* Check the Normal World DCE region arguments. */
+ if (a->dce_nwd_paddr != 0) {
+ uint32_t dce_nwd_start = a->dce_nwd_paddr;
+ uint32_t dce_nwd_end = dce_nwd_start + a->dce_nwd_size;
+
+ if (!(dce_nwd_start < dce_nwd_end)) {
+ ERROR("DRTM: argument Normal World DCE region is dicontiguous\n");
+ return INVALID_PARAMETERS;
+ }
+
+ if (dce_nwd_start < dlme_end && dlme_start < dce_nwd_end) {
+ ERROR("DRTM: argument Normal World DCE regions overlap\n");
+ return INVALID_PARAMETERS;
+ }
+ }
+
+ /*
+ * Map and sanitize the cache of data range passed by DCE Preamble. This
+ * is required to avoid / defend against racing with cache evictions
+ */
+ va_mapping_size = ALIGNED_UP((dlme_end - dlme_start), DRTM_PAGE_SIZE);
+ rc = mmap_add_dynamic_region_alloc_va(dlme_img_start, &va_mapping, va_mapping_size,
+ MT_MEMORY | MT_NS | MT_RO |
+ MT_SHAREABILITY_ISH);
+ if (rc != 0) {
+ ERROR("DRTM: %s: mmap_add_dynamic_region_alloc_va() failed rc=%d\n",
+ __func__, rc);
+ return INTERNAL_ERROR;
+ }
+ flush_dcache_range(va_mapping, va_mapping_size);
+
+ rc = mmap_remove_dynamic_region(va_mapping, va_mapping_size);
+ if (rc) {
+ ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
+ " rc=%d\n", __func__, rc);
+ panic();
+ }
+
+ *a_out = *a;
+ return SUCCESS;
+}
+
+static void drtm_dl_reset_dlme_el_state(enum drtm_dlme_el dlme_el)
+{
+ uint64_t sctlr;
+
+ /*
+ * TODO: Set PE state according to the PSCI's specification of the initial
+ * state after CPU_ON, or to reset values if unspecified, where they exist,
+ * or define sensible values otherwise.
+ */
+
+ switch (dlme_el) {
+ case DLME_AT_EL1:
+ sctlr = read_sctlr_el1();
+ break;
+
+ case DLME_AT_EL2:
+ sctlr = read_sctlr_el2();
+ break;
+
+ default: /* Not reached */
+ ERROR("%s(): dlme_el has the unexpected value %d\n",
+ __func__, dlme_el);
+ panic();
+ }
+
+ sctlr &= ~(/* Disable DLME's EL MMU, since the existing page-tables are untrusted. */
+ SCTLR_M_BIT
+ | SCTLR_EE_BIT /* Little-endian data accesses. */
+ );
+
+ sctlr |= SCTLR_C_BIT | SCTLR_I_BIT; /* Allow instruction and data caching. */
+
+ switch (dlme_el) {
+ case DLME_AT_EL1:
+ write_sctlr_el1(sctlr);
+ break;
+
+ case DLME_AT_EL2:
+ write_sctlr_el2(sctlr);
+ break;
+ }
+}
+
+static void drtm_dl_reset_dlme_context(enum drtm_dlme_el dlme_el)
+{
+ void *ns_ctx = cm_get_context(NON_SECURE);
+ gp_regs_t *gpregs = get_gpregs_ctx(ns_ctx);
+ uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3);
+
+ /* Reset all gpregs, including SP_EL0. */
+ memset(gpregs, 0, sizeof(*gpregs));
+
+ /* Reset SP_ELx. */
+ switch (dlme_el) {
+ case DLME_AT_EL1:
+ write_sp_el1(0);
+ break;
+
+ case DLME_AT_EL2:
+ write_sp_el2(0);
+ break;
+ }
+
+ /*
+ * DLME's async exceptions are masked to avoid a NWd attacker's timed
+ * interference with any state we established trust in or measured.
+ */
+ spsr_el3 |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
+
+ write_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3, spsr_el3);
+}
+
+static void drtm_dl_prepare_eret_to_dlme(const struct_drtm_dl_args *args, enum drtm_dlme_el dlme_el)
+{
+ void *ctx = cm_get_context(NON_SECURE);
+ uint64_t dlme_ep = DL_ARGS_GET_DLME_ENTRY_POINT(args);
+ uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
+
+ /* Next ERET is to the DLME's EL. */
+ spsr_el3 &= ~(MODE_EL_MASK << MODE_EL_SHIFT);
+ switch (dlme_el) {
+ case DLME_AT_EL1:
+ spsr_el3 |= MODE_EL1 << MODE_EL_SHIFT;
+ break;
+
+ case DLME_AT_EL2:
+ spsr_el3 |= MODE_EL2 << MODE_EL_SHIFT;
+ break;
+ }
+
+ /* Next ERET is to the DLME entry point. */
+ cm_set_elr_spsr_el3(NON_SECURE, dlme_ep, spsr_el3);
+}
+
+static uint64_t drtm_dynamic_launch(uint64_t x1, void *handle)
+{
+ enum drtm_retc ret = SUCCESS;
+ enum drtm_retc dma_prot_ret;
+ struct_drtm_dl_args args;
+ /* DLME should be highest NS exception level */
+ enum drtm_dlme_el dlme_el = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
+
+ /* Ensure that only boot PE is powered on */
+ ret = drtm_dl_check_cores();
+ if (ret != SUCCESS) {
+ SMC_RET1(handle, ret);
+ }
+
+ /*
+ * Ensure that execution state is AArch64 and the caller
+ * is highest non-secure exception level
+ */
+ ret = drtm_dl_check_caller_el(handle);
+ if (ret != SUCCESS) {
+ SMC_RET1(handle, ret);
+ }
+
+ ret = drtm_dl_check_args(x1, &args);
+ if (ret != SUCCESS) {
+ SMC_RET1(handle, ret);
+ }
+
+ /* Ensure that there are no SDEI event registered */
+#if SDEI_SUPPORT
+ if (sdei_get_registered_event_count() != 0) {
+ SMC_RET1(handle, DENIED);
+ }
+#endif /* SDEI_SUPPORT */
+
+ /*
+ * Engage the DMA protections. The launch cannot proceed without the DMA
+ * protections due to potential TOC/TOU vulnerabilities w.r.t. the DLME
+ * region (and to the NWd DCE region).
+ */
+ ret = drtm_dma_prot_engage(&args.dma_prot_args,
+ DL_ARGS_GET_DMA_PROT_TYPE(&args));
+ if (ret != SUCCESS) {
+ SMC_RET1(handle, ret);
+ }
+
+ /*
+ * The DMA protection is now engaged. Note that any failure mode that
+ * returns an error to the DRTM-launch caller must now disengage DMA
+ * protections before returning to the caller.
+ */
+
+ ret = drtm_take_measurements(&args);
+ if (ret != SUCCESS) {
+ goto err_undo_dma_prot;
+ }
+
+ ret = drtm_dl_prepare_dlme_data(&args);
+ if (ret != SUCCESS) {
+ goto err_undo_dma_prot;
+ }
+
+ /*
+ * Note that, at the time of writing, the DRTM spec allows a successful
+ * launch from NS-EL1 to return to a DLME in NS-EL2. The practical risk
+ * of a privilege escalation, e.g. due to a compromised hypervisor, is
+ * considered small enough not to warrant the specification of additional
+ * DRTM conduits that would be necessary to maintain OSs' abstraction from
+ * the presence of EL2 were the dynamic launch only be allowed from the
+ * highest NS EL.
+ */
+
+ dlme_el = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
+
+ drtm_dl_reset_dlme_el_state(dlme_el);
+ drtm_dl_reset_dlme_context(dlme_el);
+
+ drtm_dl_prepare_eret_to_dlme(&args, dlme_el);
+
+ /*
+ * As per DRTM beta0 spec table #28 invalidate the instruction cache
+ * before jumping to the DLME. This is required to defend against
+ * potentially-malicious cache contents.
+ */
+ invalidate_icache_all();
+
+ /* Return the DLME region's address in x0, and the DLME data offset in x1.*/
+ SMC_RET2(handle, args.dlme_paddr, args.dlme_data_off);
+
+err_undo_dma_prot:
+ dma_prot_ret = drtm_dma_prot_disengage();
+ if (dma_prot_ret != SUCCESS) {
+ ERROR("%s(): drtm_dma_prot_disengage() failed unexpectedly"
+ " rc=%d\n", __func__, ret);
+ panic();
+ }
+
+ SMC_RET1(handle, ret);
+}
+
+uint64_t drtm_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ /* Check that the SMC call is from the Normal World. */
+ if (!is_caller_non_secure(flags)) {
+ SMC_RET1(handle, NOT_SUPPORTED);
+ }
+
+ switch (smc_fid) {
+ case ARM_DRTM_SVC_VERSION:
+ INFO("DRTM service handler: version\n");
+ /* Return the version of current implementation */
+ SMC_RET1(handle, ARM_DRTM_VERSION);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_FEATURES:
+ if (((x1 >> ARM_DRTM_FUNC_SHIFT) & ARM_DRTM_FUNC_MASK) ==
+ ARM_DRTM_FUNC_ID) {
+ /* Dispatch function-based queries. */
+ switch (x1 & FUNCID_MASK) {
+ case ARM_DRTM_SVC_VERSION:
+ SMC_RET1(handle, SUCCESS);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_FEATURES:
+ SMC_RET1(handle, SUCCESS);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_UNPROTECT_MEM:
+ SMC_RET1(handle, SUCCESS);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
+ SMC_RET1(handle, SUCCESS);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_CLOSE_LOCALITY:
+ WARN("ARM_DRTM_SVC_CLOSE_LOCALITY feature %s",
+ "is not supported\n");
+ SMC_RET1(handle, NOT_SUPPORTED);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_GET_ERROR:
+ SMC_RET1(handle, SUCCESS);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_SET_ERROR:
+ SMC_RET1(handle, SUCCESS);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_SET_TCB_HASH:
+ WARN("ARM_DRTM_SVC_TCB_HASH feature %s",
+ "is not supported\n");
+ SMC_RET1(handle, NOT_SUPPORTED);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_LOCK_TCB_HASH:
+ WARN("ARM_DRTM_SVC_LOCK_TCB_HASH feature %s",
+ "is not supported\n");
+ SMC_RET1(handle, NOT_SUPPORTED);
+ break; /* not reached */
+
+ default:
+ ERROR("Unknown DRTM service function\n");
+ SMC_RET1(handle, NOT_SUPPORTED);
+ break; /* not reached */
+ }
+ } else {
+ /* Dispatch feature-based queries. */
+ switch (x1 & ARM_DRTM_FEAT_ID_MASK) {
+ case ARM_DRTM_FEATURES_TPM:
+ INFO("++ DRTM service handler: TPM features\n");
+ return drtm_features_tpm(handle);
+ break; /* not reached */
+
+ case ARM_DRTM_FEATURES_MEM_REQ:
+ INFO("++ DRTM service handler: Min. mem."
+ " requirement features\n");
+ return drtm_features_mem_req(handle);
+ break; /* not reached */
+
+ case ARM_DRTM_FEATURES_DMA_PROT:
+ INFO("++ DRTM service handler: "
+ "DMA protection features\n");
+ return drtm_features_dma_prot(handle);
+ break; /* not reached */
+
+ case ARM_DRTM_FEATURES_BOOT_PE_ID:
+ INFO("++ DRTM service handler: "
+ "Boot PE ID features\n");
+ return drtm_features_boot_pe_id(handle);
+ break; /* not reached */
+
+ case ARM_DRTM_FEATURES_TCB_HASHES:
+ INFO("++ DRTM service handler: "
+ "TCB-hashes features\n");
+ return drtm_features_tcb_hashes(handle);
+ break; /* not reached */
+
+ default:
+ ERROR("Unknown ARM DRTM service feature\n");
+ SMC_RET1(handle, NOT_SUPPORTED);
+ break; /* not reached */
+ }
+ }
+
+ case ARM_DRTM_SVC_UNPROTECT_MEM:
+ INFO("DRTM service handler: unprotect mem\n");
+ return drtm_unprotect_mem(handle);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
+ INFO("DRTM service handler: dynamic launch\n");
+ return drtm_dynamic_launch(x1, handle);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_CLOSE_LOCALITY:
+ WARN("DRTM service handler: close locality %s\n",
+ "is not supported");
+ SMC_RET1(handle, NOT_SUPPORTED);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_GET_ERROR:
+ INFO("DRTM service handler: get error\n");
+ drtm_get_error(handle);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_SET_ERROR:
+ INFO("DRTM service handler: set error\n");
+ drtm_set_error(x1, handle);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_SET_TCB_HASH:
+ WARN("DRTM service handler: set TCB hash %s\n",
+ "is not supported");
+ SMC_RET1(handle, NOT_SUPPORTED);
+ break; /* not reached */
+
+ case ARM_DRTM_SVC_LOCK_TCB_HASH:
+ WARN("DRTM service handler: lock TCB hash %s\n",
+ "is not supported");
+ SMC_RET1(handle, NOT_SUPPORTED);
+ break; /* not reached */
+
+ default:
+ ERROR("Unknown DRTM service function: 0x%x\n", smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ break; /* not reached */
+ }
+
+ /* not reached */
+ SMC_RET1(handle, SMC_UNK);
+}
diff --git a/services/std_svc/drtm/drtm_main.h b/services/std_svc/drtm/drtm_main.h
new file mode 100644
index 0000000..6005163
--- /dev/null
+++ b/services/std_svc/drtm/drtm_main.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef DRTM_MAIN_H
+#define DRTM_MAIN_H
+
+#include <stdint.h>
+
+#include <assert.h>
+#include <lib/smccc.h>
+
+#include "drtm_dma_prot.h"
+
+#define ALIGNED_UP(x, a) __extension__ ({ \
+ __typeof__(a) _a = (a); \
+ __typeof__(a) _one = 1; \
+ assert(IS_POWER_OF_TWO(_a)); \
+ ((x) + (_a - _one)) & ~(_a - _one); \
+})
+
+#define ALIGNED_DOWN(x, a) __extension__ ({ \
+ __typeof__(a) _a = (a); \
+ __typeof__(a) _one = 1; \
+ assert(IS_POWER_OF_TWO(_a)); \
+ (x) & ~(_a - _one); \
+})
+
+#define DRTM_PAGE_SIZE (4 * (1 << 10))
+#define DRTM_PAGE_SIZE_STR "4-KiB"
+
+#define DL_ARGS_GET_DMA_PROT_TYPE(a) (((a)->features >> 3) & 0x7U)
+#define DL_ARGS_GET_PCR_SCHEMA(a) (((a)->features >> 1) & 0x3U)
+#define DL_ARGS_GET_DLME_ENTRY_POINT(a) \
+ (((a)->dlme_paddr + (a)->dlme_img_off + (a)->dlme_img_ep_off))
+
+/*
+ * Range(Min/Max) of DRTM parameter structure versions supported
+ */
+#define ARM_DRTM_PARAMS_MIN_VERSION U(1)
+#define ARM_DRTM_PARAMS_MAX_VERSION U(1)
+
+enum drtm_dlme_el {
+ DLME_AT_EL1 = MODE_EL1,
+ DLME_AT_EL2 = MODE_EL2
+};
+
+enum drtm_retc {
+ SUCCESS = SMC_OK,
+ NOT_SUPPORTED = SMC_UNK,
+ INVALID_PARAMETERS = -2,
+ DENIED = -3,
+ NOT_FOUND = -4,
+ INTERNAL_ERROR = -5,
+ MEM_PROTECT_INVALID = -6,
+};
+
+typedef struct {
+ uint64_t tpm_features;
+ uint64_t minimum_memory_requirement;
+ uint64_t dma_prot_features;
+ uint64_t boot_pe_id;
+ uint64_t tcb_hash_features;
+} drtm_features_t;
+
+struct __packed drtm_dl_args_v1 {
+ uint16_t version; /* Must be 1. */
+ uint8_t __res[2];
+ uint32_t features;
+ uint64_t dlme_paddr;
+ uint64_t dlme_size;
+ uint64_t dlme_img_off;
+ uint64_t dlme_img_ep_off;
+ uint64_t dlme_img_size;
+ uint64_t dlme_data_off;
+ uint64_t dce_nwd_paddr;
+ uint64_t dce_nwd_size;
+ drtm_dl_dma_prot_args_v1_t dma_prot_args;
+} __aligned(__alignof(uint16_t /* First member's type, `uint16_t version' */));
+
+struct __packed dlme_data_header_v1 {
+ uint16_t version; /* Must be 1. */
+ uint16_t this_hdr_size;
+ uint8_t __res[4];
+ uint64_t dlme_data_size;
+ uint64_t dlme_prot_regions_size;
+ uint64_t dlme_addr_map_size;
+ uint64_t dlme_tpm_log_size;
+ uint64_t dlme_tcb_hashes_table_size;
+ uint64_t dlme_impdef_region_size;
+} __aligned(__alignof(uint16_t /* First member's type, `uint16_t version'. */));
+
+typedef struct dlme_data_header_v1 struct_dlme_data_header;
+
+drtm_memory_region_descriptor_table_t *drtm_build_address_map(void);
+uint64_t drtm_get_address_map_size(void);
+
+/*
+ * Version-independent type. May be used to avoid excessive line of code
+ * changes when migrating to new struct versions.
+ */
+typedef struct drtm_dl_args_v1 struct_drtm_dl_args;
+
+#endif /* DRTM_MAIN_H */
diff --git a/services/std_svc/drtm/drtm_measurements.c b/services/std_svc/drtm/drtm_measurements.c
new file mode 100644
index 0000000..8d514b7
--- /dev/null
+++ b/services/std_svc/drtm/drtm_measurements.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * DRTM measurements into TPM PCRs.
+ *
+ * Authors:
+ * Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
+ *
+ */
+#include <assert.h>
+
+#include <common/debug.h>
+#include <drivers/auth/crypto_mod.h>
+#include <drivers/measured_boot/event_log/event_log.h>
+#include "drtm_main.h"
+#include "drtm_measurements.h"
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+/* Event Log buffer */
+static uint8_t drtm_event_log[PLAT_DRTM_EVENT_LOG_MAX_SIZE];
+
+/*
+ * Calculate and write hash of various payloads as per DRTM specification
+ * to Event Log.
+ *
+ * @param[in] data_base Address of data
+ * @param[in] data_size Size of data
+ * @param[in] event_type Type of Event
+ * @param[in] event_name Name of the Event
+ * @return:
+ * 0 = success
+ * < 0 = error
+ */
+static int drtm_event_log_measure_and_record(uintptr_t data_base,
+ uint32_t data_size,
+ uint32_t event_type,
+ const char *event_name,
+ unsigned int pcr)
+{
+ int rc;
+ unsigned char hash_data[CRYPTO_MD_MAX_SIZE];
+ event_log_metadata_t metadata = {0};
+
+ metadata.name = event_name;
+ metadata.pcr = pcr;
+
+ /*
+ * Measure the payloads requested by D-CRTM and DCE components
+ * Hash algorithm decided by the Event Log driver at build-time
+ */
+ rc = event_log_measure(data_base, data_size, hash_data);
+ if (rc != 0) {
+ return rc;
+ }
+
+ /* Record the mesasurement in the EventLog buffer */
+ event_log_record(hash_data, event_type, &metadata);
+
+ return 0;
+}
+
+/*
+ * Initialise Event Log global variables, used during the recording
+ * of various payload measurements into the Event Log buffer
+ *
+ * @param[in] event_log_start Base address of Event Log buffer
+ * @param[in] event_log_finish End address of Event Log buffer,
+ * it is a first byte past end of the
+ * buffer
+ */
+static void drtm_event_log_init(uint8_t *event_log_start,
+ uint8_t *event_log_finish)
+{
+ event_log_buf_init(event_log_start, event_log_finish);
+ event_log_write_specid_event();
+}
+
+enum drtm_retc drtm_take_measurements(const struct_drtm_dl_args *a)
+{
+ int rc;
+ uintptr_t dlme_img_mapping;
+ uint64_t dlme_img_ep;
+ size_t dlme_img_mapping_bytes;
+ uint8_t drtm_null_data = 0U;
+ uint8_t pcr_schema = DL_ARGS_GET_PCR_SCHEMA(a);
+ const char *drtm_event_arm_sep_data = "ARM_DRTM";
+
+ /* Initialise the EventLog driver */
+ drtm_event_log_init(drtm_event_log, drtm_event_log +
+ sizeof(drtm_event_log));
+
+ /**
+ * Measurements extended into PCR-17.
+ *
+ * PCR-17: Measure the DCE image. Extend digest of (char)0 into PCR-17
+ * since the D-CRTM and the DCE are not separate.
+ */
+ rc = drtm_event_log_measure_and_record((uintptr_t)&drtm_null_data,
+ sizeof(drtm_null_data),
+ DRTM_EVENT_ARM_DCE, NULL,
+ PCR_17);
+ CHECK_RC(rc, drtm_event_log_measure_and_record(DRTM_EVENT_ARM_DCE));
+
+ /* PCR-17: Measure the PCR schema DRTM launch argument. */
+ rc = drtm_event_log_measure_and_record((uintptr_t)&pcr_schema,
+ sizeof(pcr_schema),
+ DRTM_EVENT_ARM_PCR_SCHEMA,
+ NULL, PCR_17);
+ CHECK_RC(rc,
+ drtm_event_log_measure_and_record(DRTM_EVENT_ARM_PCR_SCHEMA));
+
+ /* PCR-17: Measure the enable state of external-debug, and trace. */
+ /*
+ * TODO: Measure the enable state of external-debug and trace. This should
+ * be returned through a platform-specific hook.
+ */
+
+ /* PCR-17: Measure the security lifecycle state. */
+ /*
+ * TODO: Measure the security lifecycle state. This is an implementation-
+ * defined value, retrieved through an implementation-defined mechanisms.
+ */
+
+ /*
+ * PCR-17: Optionally measure the NWd DCE.
+ * It is expected that such subsequent DCE stages are signed and verified.
+ * Whether they are measured in addition to signing is implementation
+ * -defined.
+ * Here the choice is to not measure any NWd DCE, in favour of PCR value
+ * resilience to any NWd DCE updates.
+ */
+
+ /* PCR-17: End of DCE measurements. */
+ rc = drtm_event_log_measure_and_record((uintptr_t)drtm_event_arm_sep_data,
+ strlen(drtm_event_arm_sep_data),
+ DRTM_EVENT_ARM_SEPARATOR, NULL,
+ PCR_17);
+ CHECK_RC(rc, drtm_event_log_measure_and_record(DRTM_EVENT_ARM_SEPARATOR));
+
+ /**
+ * Measurements extended into PCR-18.
+ *
+ * PCR-18: Measure the PCR schema DRTM launch argument.
+ */
+ rc = drtm_event_log_measure_and_record((uintptr_t)&pcr_schema,
+ sizeof(pcr_schema),
+ DRTM_EVENT_ARM_PCR_SCHEMA,
+ NULL, PCR_18);
+ CHECK_RC(rc,
+ drtm_event_log_measure_and_record(DRTM_EVENT_ARM_PCR_SCHEMA));
+
+ /*
+ * PCR-18: Measure the public key used to verify DCE image(s) signatures.
+ * Extend digest of (char)0, since we do not expect the NWd DCE to be
+ * present.
+ */
+ assert(a->dce_nwd_size == 0);
+ rc = drtm_event_log_measure_and_record((uintptr_t)&drtm_null_data,
+ sizeof(drtm_null_data),
+ DRTM_EVENT_ARM_DCE_PUBKEY,
+ NULL, PCR_18);
+ CHECK_RC(rc,
+ drtm_event_log_measure_and_record(DRTM_EVENT_ARM_DCE_PUBKEY));
+
+ /* PCR-18: Measure the DLME image. */
+ dlme_img_mapping_bytes = page_align(a->dlme_img_size, UP);
+ rc = mmap_add_dynamic_region_alloc_va(a->dlme_paddr + a->dlme_img_off,
+ &dlme_img_mapping,
+ dlme_img_mapping_bytes, MT_RO_DATA | MT_NS);
+ if (rc) {
+ WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
+ __func__, rc);
+ return INTERNAL_ERROR;
+ }
+
+ rc = drtm_event_log_measure_and_record(dlme_img_mapping, a->dlme_img_size,
+ DRTM_EVENT_ARM_DLME, NULL,
+ PCR_18);
+ CHECK_RC(rc, drtm_event_log_measure_and_record(DRTM_EVENT_ARM_DLME));
+
+ rc = mmap_remove_dynamic_region(dlme_img_mapping, dlme_img_mapping_bytes);
+ CHECK_RC(rc, mmap_remove_dynamic_region);
+
+ /* PCR-18: Measure the DLME image entry point. */
+ dlme_img_ep = DL_ARGS_GET_DLME_ENTRY_POINT(a);
+ drtm_event_log_measure_and_record((uintptr_t)&dlme_img_ep,
+ sizeof(dlme_img_ep),
+ DRTM_EVENT_ARM_DLME_EP, NULL,
+ PCR_18);
+ CHECK_RC(rc, drtm_event_log_measure_and_record(DRTM_EVENT_ARM_DLME_EP));
+
+ /* PCR-18: End of DCE measurements. */
+ rc = drtm_event_log_measure_and_record((uintptr_t)drtm_event_arm_sep_data,
+ strlen(drtm_event_arm_sep_data),
+ DRTM_EVENT_ARM_SEPARATOR, NULL,
+ PCR_18);
+ CHECK_RC(rc,
+ drtm_event_log_measure_and_record(DRTM_EVENT_ARM_SEPARATOR));
+ /*
+ * If the DCE is unable to log a measurement because there is no available
+ * space in the event log region, the DCE must extend a hash of the value
+ * 0xFF (1 byte in size) into PCR[17] and PCR[18] and enter remediation.
+ */
+
+ return SUCCESS;
+}
+
+void drtm_serialise_event_log(uint8_t *dst, size_t *event_log_size_out)
+{
+ *event_log_size_out = event_log_get_cur_size(drtm_event_log);
+ memcpy(dst, drtm_event_log, *event_log_size_out);
+}
diff --git a/services/std_svc/drtm/drtm_measurements.h b/services/std_svc/drtm/drtm_measurements.h
new file mode 100644
index 0000000..6d7a84e
--- /dev/null
+++ b/services/std_svc/drtm/drtm_measurements.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef DRTM_MEASUREMENTS_H
+#define DRTM_MEASUREMENTS_H
+
+#include <stdint.h>
+
+#include "drtm_main.h"
+#include <platform_def.h>
+
+#define DRTM_EVENT_ARM_BASE 0x9000U
+#define DRTM_EVENT_TYPE(n) (DRTM_EVENT_ARM_BASE + (unsigned int)(n))
+
+#define DRTM_EVENT_ARM_PCR_SCHEMA DRTM_EVENT_TYPE(1)
+#define DRTM_EVENT_ARM_DCE DRTM_EVENT_TYPE(2)
+#define DRTM_EVENT_ARM_DCE_PUBKEY DRTM_EVENT_TYPE(3)
+#define DRTM_EVENT_ARM_DLME DRTM_EVENT_TYPE(4)
+#define DRTM_EVENT_ARM_DLME_EP DRTM_EVENT_TYPE(5)
+#define DRTM_EVENT_ARM_DEBUG_CONFIG DRTM_EVENT_TYPE(6)
+#define DRTM_EVENT_ARM_NONSECURE_CONFIG DRTM_EVENT_TYPE(7)
+#define DRTM_EVENT_ARM_DCE_SECONDARY DRTM_EVENT_TYPE(8)
+#define DRTM_EVENT_ARM_TZFW DRTM_EVENT_TYPE(9)
+#define DRTM_EVENT_ARM_SEPARATOR DRTM_EVENT_TYPE(10)
+
+#define CHECK_RC(rc, func_call) { \
+ if (rc != 0) { \
+ ERROR("%s(): " #func_call "failed unexpectedly rc=%d\n", \
+ __func__, rc); \
+ panic(); \
+ } \
+}
+
+enum drtm_retc drtm_take_measurements(const struct_drtm_dl_args *a);
+void drtm_serialise_event_log(uint8_t *dst, size_t *event_log_size_out);
+
+#endif /* DRTM_MEASUREMENTS_H */
diff --git a/services/std_svc/drtm/drtm_remediation.c b/services/std_svc/drtm/drtm_remediation.c
new file mode 100644
index 0000000..696b4ea
--- /dev/null
+++ b/services/std_svc/drtm/drtm_remediation.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * DRTM support for DRTM error remediation.
+ *
+ */
+#include <inttypes.h>
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include "drtm_main.h"
+#include <plat/common/platform.h>
+
+uint64_t drtm_set_error(uint64_t x1, void *ctx)
+{
+ int rc;
+
+ rc = plat_set_drtm_error(x1);
+
+ if (rc != 0) {
+ SMC_RET1(ctx, INTERNAL_ERROR);
+ }
+
+ SMC_RET1(ctx, SUCCESS);
+}
+
+uint64_t drtm_get_error(void *ctx)
+{
+ uint64_t error_code;
+ int rc;
+
+ rc = plat_get_drtm_error(&error_code);
+
+ if (rc != 0) {
+ SMC_RET1(ctx, INTERNAL_ERROR);
+ }
+
+ SMC_RET2(ctx, SUCCESS, error_code);
+}
+
+void drtm_enter_remediation(uint64_t err_code, const char *err_str)
+{
+ int rc = plat_set_drtm_error(err_code);
+
+ if (rc != 0) {
+ ERROR("%s(): drtm_error_set() failed unexpectedly rc=%d\n",
+ __func__, rc);
+ panic();
+ }
+
+ ERROR("DRTM: entering remediation of error:\n%" PRIu64 "\t\'%s\'\n",
+ err_code, err_str);
+
+ ERROR("%s(): system reset is not yet supported\n", __func__);
+ plat_system_reset();
+}
diff --git a/services/std_svc/drtm/drtm_remediation.h b/services/std_svc/drtm/drtm_remediation.h
new file mode 100644
index 0000000..8f965f1
--- /dev/null
+++ b/services/std_svc/drtm/drtm_remediation.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef DRTM_REMEDIATION_H
+#define DRTM_REMEDIATION_H
+
+uint64_t drtm_set_error(uint64_t x1, void *ctx);
+uint64_t drtm_get_error(void *ctx);
+
+void drtm_enter_remediation(uint64_t error_code, const char *error_str);
+
+#endif /* DRTM_REMEDIATION_H */
diff --git a/services/std_svc/drtm/drtm_res_address_map.c b/services/std_svc/drtm/drtm_res_address_map.c
new file mode 100644
index 0000000..8636706
--- /dev/null
+++ b/services/std_svc/drtm/drtm_res_address_map.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+
+#include <plat/common/platform.h>
+#include <services/drtm_svc.h>
+#include <platform_def.h>
+
+/* Address map revision generated by this code. */
+#define DRTM_ADDRESS_MAP_REVISION U(0x0001)
+
+/* Amount of space needed for address map based on PLAT_DRTM_MMAP_ENTRIES */
+#define DRTM_ADDRESS_MAP_SIZE (sizeof(drtm_memory_region_descriptor_table_t) + \
+ (sizeof(drtm_mem_region_t) * \
+ PLAT_DRTM_MMAP_ENTRIES))
+
+/* Allocate space for DRTM-formatted address map to be constructed. */
+static uint8_t drtm_address_map[DRTM_ADDRESS_MAP_SIZE];
+
+static uint64_t drtm_address_map_size;
+
+drtm_memory_region_descriptor_table_t *drtm_build_address_map(void)
+{
+ /* Set up pointer to DRTM memory map. */
+ drtm_memory_region_descriptor_table_t *map =
+ (drtm_memory_region_descriptor_table_t *)drtm_address_map;
+
+ /* Get the platform memory map. */
+ const mmap_region_t *mmap = plat_get_addr_mmap();
+ unsigned int i;
+
+ /* Set up header for address map structure. */
+ map->revision = DRTM_ADDRESS_MAP_REVISION;
+ map->reserved = 0x0000;
+
+ /* Iterate through mmap and generate DRTM address map. */
+ for (i = 0U; mmap[i].base_pa != 0UL; i++) {
+ /* Set PA of region. */
+ map->region[i].region_address = mmap[i].base_pa;
+
+ /* Set size of region (in 4kb chunks). */
+ map->region[i].region_size_type = 0;
+ ARM_DRTM_REGION_SIZE_TYPE_SET_4K_PAGE_NUM(
+ map->region[i].region_size_type,
+ mmap[i].size / PAGE_SIZE_4KB);
+
+ /* Set type and cacheability. */
+ switch (MT_TYPE(mmap[i].attr)) {
+ case MT_DEVICE:
+ ARM_DRTM_REGION_SIZE_TYPE_SET_REGION_TYPE(
+ map->region[i].region_size_type,
+ ARM_DRTM_REGION_SIZE_TYPE_REGION_TYPE_DEVICE);
+ break;
+ case MT_NON_CACHEABLE:
+ ARM_DRTM_REGION_SIZE_TYPE_SET_REGION_TYPE(
+ map->region[i].region_size_type,
+ ARM_DRTM_REGION_SIZE_TYPE_REGION_TYPE_NCAR);
+ ARM_DRTM_REGION_SIZE_TYPE_SET_CACHEABILITY(
+ map->region[i].region_size_type,
+ ARM_DRTM_REGION_SIZE_TYPE_CACHEABILITY_NC);
+ break;
+ case MT_MEMORY:
+ ARM_DRTM_REGION_SIZE_TYPE_SET_REGION_TYPE(
+ map->region[i].region_size_type,
+ ARM_DRTM_REGION_SIZE_TYPE_REGION_TYPE_NORMAL);
+ break;
+ default:
+ return NULL;
+ }
+ }
+
+ map->num_regions = i;
+
+ /* Store total size of address map. */
+ drtm_address_map_size = sizeof(drtm_memory_region_descriptor_table_t);
+ drtm_address_map_size += (i * sizeof(drtm_mem_region_t));
+
+ return map;
+}
+
+uint64_t drtm_get_address_map_size(void)
+{
+ return drtm_address_map_size;
+}
diff --git a/services/std_svc/errata_abi/cpu_errata_info.h b/services/std_svc/errata_abi/cpu_errata_info.h
new file mode 100644
index 0000000..e24a621
--- /dev/null
+++ b/services/std_svc/errata_abi/cpu_errata_info.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ERRATA_CPUSPEC_H
+#define ERRATA_CPUSPEC_H
+
+#include <stdint.h>
+#include <arch_helpers.h>
+
+#if __aarch64__
+#include <cortex_a35.h>
+#include <cortex_a510.h>
+#include <cortex_a53.h>
+#include <cortex_a57.h>
+#include <cortex_a55.h>
+#include <cortex_a710.h>
+#include <cortex_a72.h>
+#include <cortex_a73.h>
+#include <cortex_a75.h>
+#include <cortex_a76.h>
+#include <cortex_a77.h>
+#include <cortex_a78.h>
+#include <cortex_a78_ae.h>
+#include <cortex_a78c.h>
+#include <cortex_a715.h>
+#include <cortex_x1.h>
+#include <cortex_x2.h>
+#include <cortex_x3.h>
+#include <neoverse_n1.h>
+#include <neoverse_n2.h>
+#include <neoverse_v1.h>
+#include <neoverse_v2.h>
+#else
+#include <cortex_a15.h>
+#include <cortex_a17.h>
+#include <cortex_a57.h>
+#include <cortex_a9.h>
+#endif
+
+#define MAX_ERRATA_ENTRIES 32
+
+#define ERRATA_LIST_END (MAX_ERRATA_ENTRIES - 1)
+
+/* Default values for unused memory in the array */
+#define UNDEF_ERRATA {UINT_MAX, UCHAR_MAX, UCHAR_MAX, false, false}
+
+#define EXTRACT_PARTNUM(x) ((x >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
+
+#define RXPX_RANGE(x, y, z) (((x >= y) && (x <= z)) ? true : false)
+
+/*
+ * CPU specific values for errata handling
+ */
+struct em_cpu{
+ unsigned int em_errata_id;
+ unsigned char em_rxpx_lo; /* lowest revision of errata applicable for the cpu */
+ unsigned char em_rxpx_hi; /* highest revision of errata applicable for the cpu */
+ bool errata_enabled; /* indicate if errata enabled */
+ /* flag to indicate if errata query is based out of non-arm interconnect */
+ bool non_arm_interconnect;
+};
+
+struct em_cpu_list{
+ /* field to hold cpu specific part number defined in midr reg */
+ unsigned long cpu_partnumber;
+ struct em_cpu cpu_errata_list[MAX_ERRATA_ENTRIES];
+};
+
+int32_t verify_errata_implemented(uint32_t errata_id, uint32_t forward_flag);
+#endif /* ERRATA_CPUSPEC_H */
diff --git a/services/std_svc/errata_abi/errata_abi_main.c b/services/std_svc/errata_abi/errata_abi_main.c
new file mode 100644
index 0000000..0b263e5
--- /dev/null
+++ b/services/std_svc/errata_abi/errata_abi_main.c
@@ -0,0 +1,603 @@
+/*
+ * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include "cpu_errata_info.h"
+#include <lib/smccc.h>
+#include <lib/utils_def.h>
+#include <services/errata_abi_svc.h>
+#include <smccc_helpers.h>
+
+/*
+ * Global pointer that points to the specific
+ * structure based on the MIDR part number
+ */
+struct em_cpu_list *cpu_ptr;
+
+extern uint8_t cpu_get_rev_var(void);
+
+/* Structure array that holds CPU specific errata information */
+struct em_cpu_list cpu_list[] = {
+#if CORTEX_A9_H_INC
+{
+ .cpu_partnumber = CORTEX_A9_MIDR,
+ .cpu_errata_list = {
+ [0] = {794073, 0x00, 0xFF, ERRATA_A9_794073},
+ [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A9_H_INC */
+
+#if CORTEX_A15_H_INC
+{
+ .cpu_partnumber = CORTEX_A15_MIDR,
+ .cpu_errata_list = {
+ [0] = {816470, 0x30, 0xFF, ERRATA_A15_816470},
+ [1] = {827671, 0x30, 0xFF, ERRATA_A15_827671},
+ [2 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A15_H_INC */
+
+#if CORTEX_A17_H_INC
+{
+ .cpu_partnumber = CORTEX_A17_MIDR,
+ .cpu_errata_list = {
+ [0] = {852421, 0x00, 0x12, ERRATA_A17_852421},
+ [1] = {852423, 0x00, 0x12, ERRATA_A17_852423},
+ [2 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A17_H_INC */
+
+#if CORTEX_A35_H_INC
+{
+ .cpu_partnumber = CORTEX_A35_MIDR,
+ .cpu_errata_list = {
+ [0] = {855472, 0x00, 0x00, ERRATA_A35_855472},
+ [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A35_H_INC */
+
+#if CORTEX_A53_H_INC
+{
+ .cpu_partnumber = CORTEX_A53_MIDR,
+ .cpu_errata_list = {
+ [0] = {819472, 0x00, 0x01, ERRATA_A53_819472},
+ [1] = {824069, 0x00, 0x02, ERRATA_A53_824069},
+ [2] = {826319, 0x00, 0x02, ERRATA_A53_826319},
+ [3] = {827319, 0x00, 0x02, ERRATA_A53_827319},
+ [4] = {835769, 0x00, 0x04, ERRATA_A53_835769},
+ [5] = {836870, 0x00, 0x03, ERRATA_A53_836870},
+ [6] = {843419, 0x00, 0x04, ERRATA_A53_843419},
+ [7] = {855873, 0x03, 0xFF, ERRATA_A53_855873},
+ [8] = {1530924, 0x00, 0xFF, ERRATA_A53_1530924},
+ [9 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A53_H_INC */
+
+#if CORTEX_A55_H_INC
+{
+ .cpu_partnumber = CORTEX_A55_MIDR,
+ .cpu_errata_list = {
+ [0] = {768277, 0x00, 0x00, ERRATA_A55_768277},
+ [1] = {778703, 0x00, 0x00, ERRATA_A55_778703},
+ [2] = {798797, 0x00, 0x00, ERRATA_A55_798797},
+ [3] = {846532, 0x00, 0x01, ERRATA_A55_846532},
+ [4] = {903758, 0x00, 0x01, ERRATA_A55_903758},
+ [5] = {1221012, 0x00, 0x10, ERRATA_A55_1221012},
+ [6] = {1530923, 0x00, 0xFF, ERRATA_A55_1530923},
+ [7 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A55_H_INC */
+
+#if CORTEX_A57_H_INC
+{
+ .cpu_partnumber = CORTEX_A57_MIDR,
+ .cpu_errata_list = {
+ [0] = {806969, 0x00, 0x00, ERRATA_A57_806969},
+ [1] = {813419, 0x00, 0x00, ERRATA_A57_813419},
+ [2] = {813420, 0x00, 0x00, ERRATA_A57_813420},
+ [3] = {814670, 0x00, 0x00, ERRATA_A57_814670},
+ [4] = {817169, 0x00, 0x01, ERRATA_A57_817169},
+ [5] = {826974, 0x00, 0x11, ERRATA_A57_826974},
+ [6] = {826977, 0x00, 0x11, ERRATA_A57_826977},
+ [7] = {828024, 0x00, 0x11, ERRATA_A57_828024},
+ [8] = {829520, 0x00, 0x12, ERRATA_A57_829520},
+ [9] = {833471, 0x00, 0x12, ERRATA_A57_833471},
+ [10] = {859972, 0x00, 0x13, ERRATA_A57_859972},
+ [11] = {1319537, 0x00, 0xFF, ERRATA_A57_1319537},
+ [12 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A57_H_INC */
+
+#if CORTEX_A72_H_INC
+{
+ .cpu_partnumber = CORTEX_A72_MIDR,
+ .cpu_errata_list = {
+ [0] = {859971, 0x00, 0x03, ERRATA_A72_859971},
+ [1] = {1319367, 0x00, 0xFF, ERRATA_A72_1319367},
+ [2 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A72_H_INC */
+
+#if CORTEX_A73_H_INC
+{
+ .cpu_partnumber = CORTEX_A73_MIDR,
+ .cpu_errata_list = {
+ [0] = {852427, 0x00, 0x00, ERRATA_A73_852427},
+ [1] = {855423, 0x00, 0x01, ERRATA_A73_855423},
+ [2 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A73_H_INC */
+
+#if CORTEX_A75_H_INC
+{
+ .cpu_partnumber = CORTEX_A75_MIDR,
+ .cpu_errata_list = {
+ [0] = {764081, 0x00, 0x00, ERRATA_A75_764081},
+ [1] = {790748, 0x00, 0x00, ERRATA_A75_790748},
+ [2 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A75_H_INC */
+
+#if CORTEX_A76_H_INC
+{
+ .cpu_partnumber = CORTEX_A76_MIDR,
+ .cpu_errata_list = {
+ [0] = {1073348, 0x00, 0x10, ERRATA_A76_1073348},
+ [1] = {1130799, 0x00, 0x20, ERRATA_A76_1130799},
+ [2] = {1165522, 0x00, 0xFF, ERRATA_A76_1165522},
+ [3] = {1220197, 0x00, 0x20, ERRATA_A76_1220197},
+ [4] = {1257314, 0x00, 0x30, ERRATA_A76_1257314},
+ [5] = {1262606, 0x00, 0x30, ERRATA_A76_1262606},
+ [6] = {1262888, 0x00, 0x30, ERRATA_A76_1262888},
+ [7] = {1275112, 0x00, 0x30, ERRATA_A76_1275112},
+ [8] = {1286807, 0x00, 0x30, ERRATA_A76_1286807},
+ [9] = {1791580, 0x00, 0x40, ERRATA_A76_1791580},
+ [10] = {1868343, 0x00, 0x40, ERRATA_A76_1868343},
+ [11] = {1946160, 0x30, 0x41, ERRATA_A76_1946160},
+ [12] = {2743102, 0x00, 0x41, ERRATA_A76_2743102},
+ [13 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A76_H_INC */
+
+#if CORTEX_A77_H_INC
+{
+ .cpu_partnumber = CORTEX_A77_MIDR,
+ .cpu_errata_list = {
+ [0] = {1508412, 0x00, 0x10, ERRATA_A77_1508412},
+ [1] = {1791578, 0x00, 0x11, ERRATA_A77_1791578},
+ [2] = {1800714, 0x00, 0x11, ERRATA_A77_1800714},
+ [3] = {1925769, 0x00, 0x11, ERRATA_A77_1925769},
+ [4] = {1946167, 0x00, 0x11, ERRATA_A77_1946167},
+ [5] = {2356587, 0x00, 0x11, ERRATA_A77_2356587},
+ [6] = {2743100, 0x00, 0x11, ERRATA_A77_2743100},
+ [7 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A77_H_INC */
+
+#if CORTEX_A78_H_INC
+{
+ .cpu_partnumber = CORTEX_A78_MIDR,
+ .cpu_errata_list = {
+ [0] = {1688305, 0x00, 0x10, ERRATA_A78_1688305},
+ [1] = {1821534, 0x00, 0x10, ERRATA_A78_1821534},
+ [2] = {1941498, 0x00, 0x11, ERRATA_A78_1941498},
+ [3] = {1951500, 0x10, 0x11, ERRATA_A78_1951500},
+ [4] = {1952683, 0x00, 0x00, ERRATA_A78_1952683},
+ [5] = {2132060, 0x00, 0x12, ERRATA_A78_2132060},
+ [6] = {2242635, 0x10, 0x12, ERRATA_A78_2242635},
+ [7] = {2376745, 0x00, 0x12, ERRATA_A78_2376745},
+ [8] = {2395406, 0x00, 0x12, ERRATA_A78_2395406},
+ [9] = {2712571, 0x00, 0x12, ERRATA_A78_2712571, \
+ ERRATA_NON_ARM_INTERCONNECT},
+ [10] = {2742426, 0x00, 0x12, ERRATA_A78_2742426},
+ [11] = {2772019, 0x00, 0x12, ERRATA_A78_2772019},
+ [12] = {2779479, 0x00, 0x12, ERRATA_A78_2779479},
+ [13 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A78_H_INC */
+
+#if CORTEX_A78_AE_H_INC
+{
+ .cpu_partnumber = CORTEX_A78_AE_MIDR,
+ .cpu_errata_list = {
+ [0] = {1941500, 0x00, 0x01, ERRATA_A78_AE_1941500},
+ [1] = {1951502, 0x00, 0x01, ERRATA_A78_AE_1951502},
+ [2] = {2376748, 0x00, 0x02, ERRATA_A78_AE_2376748},
+ [3] = {2395408, 0x00, 0x01, ERRATA_A78_AE_2395408},
+ [4] = {2712574, 0x00, 0x02, ERRATA_A78_AE_2712574, \
+ ERRATA_NON_ARM_INTERCONNECT},
+ [5 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A78_AE_H_INC */
+
+#if CORTEX_A78C_H_INC
+{
+ .cpu_partnumber = CORTEX_A78C_MIDR,
+ .cpu_errata_list = {
+ [0] = {1827430, 0x00, 0x00, ERRATA_A78C_1827430},
+ [1] = {1827440, 0x00, 0x00, ERRATA_A78C_1827440},
+ [2] = {2132064, 0x01, 0x02, ERRATA_A78C_2132064},
+ [3] = {2242638, 0x01, 0x02, ERRATA_A78C_2242638},
+ [4] = {2376749, 0x01, 0x02, ERRATA_A78C_2376749},
+ [5] = {2395411, 0x01, 0x02, ERRATA_A78C_2395411},
+ [6] = {2712575, 0x01, 0x02, ERRATA_A78C_2712575, \
+ ERRATA_NON_ARM_INTERCONNECT},
+ [7] = {2772121, 0x00, 0x02, ERRATA_A78C_2772121},
+ [8] = {2779484, 0x01, 0x02, ERRATA_A78C_2779484},
+ [9 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A78C_H_INC */
+
+#if CORTEX_X1_H_INC
+{
+ .cpu_partnumber = CORTEX_X1_MIDR,
+ .cpu_errata_list = {
+ [0] = {1688305, 0x00, 0x10, ERRATA_X1_1688305},
+ [1] = {1821534, 0x00, 0x10, ERRATA_X1_1821534},
+ [2] = {1827429, 0x00, 0x10, ERRATA_X1_1827429},
+ [3 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_X1_H_INC */
+
+#if NEOVERSE_N1_H_INC
+{
+ .cpu_partnumber = NEOVERSE_N1_MIDR,
+ .cpu_errata_list = {
+ [0] = {1043202, 0x00, 0x10, ERRATA_N1_1043202},
+ [1] = {1073348, 0x00, 0x10, ERRATA_N1_1073348},
+ [2] = {1130799, 0x00, 0x20, ERRATA_N1_1130799},
+ [3] = {1165347, 0x00, 0x20, ERRATA_N1_1165347},
+ [4] = {1207823, 0x00, 0x20, ERRATA_N1_1207823},
+ [5] = {1220197, 0x00, 0x20, ERRATA_N1_1220197},
+ [6] = {1257314, 0x00, 0x30, ERRATA_N1_1257314},
+ [7] = {1262606, 0x00, 0x30, ERRATA_N1_1262606},
+ [8] = {1262888, 0x00, 0x30, ERRATA_N1_1262888},
+ [9] = {1275112, 0x00, 0x30, ERRATA_N1_1275112},
+ [10] = {1315703, 0x00, 0x30, ERRATA_N1_1315703},
+ [11] = {1542419, 0x30, 0x40, ERRATA_N1_1542419},
+ [12] = {1868343, 0x00, 0x40, ERRATA_N1_1868343},
+ [13] = {1946160, 0x30, 0x41, ERRATA_N1_1946160},
+ [14] = {2743102, 0x00, 0x41, ERRATA_N1_2743102},
+ [15 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* NEOVERSE_N1_H_INC */
+
+#if NEOVERSE_V1_H_INC
+{
+ .cpu_partnumber = NEOVERSE_V1_MIDR,
+ .cpu_errata_list = {
+ [0] = {1618635, 0x00, 0x00, ERRATA_V1_1618635},
+ [1] = {1774420, 0x00, 0x10, ERRATA_V1_1774420},
+ [2] = {1791573, 0x00, 0x10, ERRATA_V1_1791573},
+ [3] = {1852267, 0x00, 0x10, ERRATA_V1_1852267},
+ [4] = {1925756, 0x00, 0x11, ERRATA_V1_1925756},
+ [5] = {1940577, 0x10, 0x11, ERRATA_V1_1940577},
+ [6] = {1966096, 0x10, 0x11, ERRATA_V1_1966096},
+ [7] = {2108267, 0x00, 0x12, ERRATA_V1_2108267},
+ [8] = {2139242, 0x00, 0x11, ERRATA_V1_2139242},
+ [9] = {2216392, 0x10, 0x11, ERRATA_V1_2216392},
+ [10] = {2294912, 0x00, 0x12, ERRATA_V1_2294912},
+ [11] = {2372203, 0x00, 0x11, ERRATA_V1_2372203},
+ [12] = {2701953, 0x00, 0x11, ERRATA_V1_2701953, \
+ ERRATA_NON_ARM_INTERCONNECT},
+ [13] = {2743093, 0x00, 0x12, ERRATA_V1_2743093},
+ [14] = {2743233, 0x00, 0x12, ERRATA_V1_2743233},
+ [15] = {2779461, 0x00, 0x12, ERRATA_V1_2779461},
+ [16 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* NEOVERSE_V1_H_INC */
+
+#if CORTEX_A710_H_INC
+{
+ .cpu_partnumber = CORTEX_A710_MIDR,
+ .cpu_errata_list = {
+ [0] = {1987031, 0x00, 0x20, ERRATA_A710_1987031},
+ [1] = {2008768, 0x00, 0x20, ERRATA_A710_2008768},
+ [2] = {2017096, 0x00, 0x20, ERRATA_A710_2017096},
+ [3] = {2055002, 0x10, 0x20, ERRATA_A710_2055002},
+ [4] = {2058056, 0x00, 0x21, ERRATA_A710_2058056},
+ [5] = {2081180, 0x00, 0x20, ERRATA_A710_2081180},
+ [6] = {2083908, 0x20, 0x20, ERRATA_A710_2083908},
+ [7] = {2136059, 0x00, 0x20, ERRATA_A710_2136059},
+ [8] = {2147715, 0x20, 0x20, ERRATA_A710_2147715},
+ [9] = {2216384, 0x00, 0x20, ERRATA_A710_2216384},
+ [10] = {2267065, 0x00, 0x20, ERRATA_A710_2267065},
+ [11] = {2282622, 0x00, 0x21, ERRATA_A710_2282622},
+ [12] = {2291219, 0x00, 0x20, ERRATA_A710_2291219},
+ [13] = {2371105, 0x00, 0x20, ERRATA_A710_2371105},
+ [14] = {2701952, 0x00, 0x21, ERRATA_A710_2701952, \
+ ERRATA_NON_ARM_INTERCONNECT},
+ [15] = {2742423, 0x00, 0x21, ERRATA_A710_2742423},
+ [16] = {2768515, 0x00, 0x21, ERRATA_A710_2768515},
+ [17 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A710_H_INC */
+
+#if NEOVERSE_N2_H_INC
+{
+ .cpu_partnumber = NEOVERSE_N2_MIDR,
+ .cpu_errata_list = {
+ [0] = {2002655, 0x00, 0x00, ERRATA_N2_2002655},
+ [1] = {2009478, 0x00, 0x00, ERRATA_N2_2009478},
+ [2] = {2025414, 0x00, 0x00, ERRATA_N2_2025414},
+ [3] = {2067956, 0x00, 0x00, ERRATA_N2_2067956},
+ [4] = {2138953, 0x00, 0x03, ERRATA_N2_2138953},
+ [5] = {2138956, 0x00, 0x00, ERRATA_N2_2138956},
+ [6] = {2138958, 0x00, 0x00, ERRATA_N2_2138958},
+ [7] = {2189731, 0x00, 0x00, ERRATA_N2_2189731},
+ [8] = {2242400, 0x00, 0x00, ERRATA_N2_2242400},
+ [9] = {2242415, 0x00, 0x00, ERRATA_N2_2242415},
+ [10] = {2280757, 0x00, 0x00, ERRATA_N2_2280757},
+ [11] = {2326639, 0x00, 0x00, ERRATA_N2_2326639},
+ [12] = {2340933, 0x00, 0x00, ERRATA_N2_2340933},
+ [13] = {2346952, 0x00, 0x02, ERRATA_N2_2346952},
+ [14] = {2376738, 0x00, 0x00, ERRATA_N2_2376738},
+ [15] = {2388450, 0x00, 0x00, ERRATA_N2_2388450},
+ [16] = {2728475, 0x00, 0x02, ERRATA_N2_2728475, \
+ ERRATA_NON_ARM_INTERCONNECT},
+ [17] = {2743014, 0x00, 0x02, ERRATA_N2_2743014},
+ [18] = {2743089, 0x00, 0x02, ERRATA_N2_2743089},
+ [19] = {2779511, 0x00, 0x02, ERRATA_N2_2779511},
+ [20 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* NEOVERSE_N2_H_INC */
+
+#if CORTEX_X2_H_INC
+{
+ .cpu_partnumber = CORTEX_X2_MIDR,
+ .cpu_errata_list = {
+ [0] = {2002765, 0x00, 0x20, ERRATA_X2_2002765},
+ [1] = {2017096, 0x00, 0x20, ERRATA_X2_2017096},
+ [2] = {2058056, 0x00, 0x21, ERRATA_X2_2058056},
+ [3] = {2081180, 0x00, 0x20, ERRATA_X2_2081180},
+ [4] = {2083908, 0x20, 0x20, ERRATA_X2_2083908},
+ [5] = {2147715, 0x20, 0x20, ERRATA_X2_2147715},
+ [6] = {2216384, 0x00, 0x20, ERRATA_X2_2216384},
+ [7] = {2282622, 0x00, 0x21, ERRATA_X2_2282622},
+ [8] = {2371105, 0x00, 0x20, ERRATA_X2_2371105},
+ [9] = {2701952, 0x00, 0x21, ERRATA_X2_2701952, \
+ ERRATA_NON_ARM_INTERCONNECT},
+ [10] = {2742423, 0x00, 0x21, ERRATA_X2_2742423},
+ [11] = {2768515, 0x00, 0x21, ERRATA_X2_2768515},
+ [12 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_X2_H_INC */
+
+#if CORTEX_A510_H_INC
+{
+ .cpu_partnumber = CORTEX_A510_MIDR,
+ .cpu_errata_list = {
+ [0] = {1922240, 0x00, 0x00, ERRATA_A510_1922240},
+ [1] = {2041909, 0x02, 0x02, ERRATA_A510_2041909},
+ [2] = {2042739, 0x00, 0x02, ERRATA_A510_2042739},
+ [3] = {2080326, 0x02, 0x02, ERRATA_A510_2080326},
+ [4] = {2172148, 0x00, 0x10, ERRATA_A510_2172148},
+ [5] = {2218950, 0x00, 0x10, ERRATA_A510_2218950},
+ [6] = {2250311, 0x00, 0x10, ERRATA_A510_2250311},
+ [7] = {2288014, 0x00, 0x10, ERRATA_A510_2288014},
+ [8] = {2347730, 0x00, 0x11, ERRATA_A510_2347730},
+ [9] = {2371937, 0x00, 0x11, ERRATA_A510_2371937},
+ [10] = {2666669, 0x00, 0x11, ERRATA_A510_2666669},
+ [11] = {2684597, 0x00, 0x12, ERRATA_A510_2684597},
+ [12 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A510_H_INC */
+
+#if NEOVERSE_V2_H_INC
+{
+ .cpu_partnumber = NEOVERSE_V2_MIDR,
+ .cpu_errata_list = {
+ [0] = {2331132, 0x00, 0x02, ERRATA_V2_2331132},
+ [1] = {2719103, 0x00, 0x01, ERRATA_V2_2719103, \
+ ERRATA_NON_ARM_INTERCONNECT},
+ [2] = {2719105, 0x00, 0x01, ERRATA_V2_2719105},
+ [3] = {2743011, 0x00, 0x01, ERRATA_V2_2743011},
+ [4] = {2779510, 0x00, 0x01, ERRATA_V2_2779510},
+ [5] = {2801372, 0x00, 0x01, ERRATA_V2_2801372},
+ [6 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* NEOVERSE_V2_H_INC */
+
+#if CORTEX_A715_H_INC
+{
+ .cpu_partnumber = CORTEX_A715_MIDR,
+ .cpu_errata_list = {
+ [0] = {2701951, 0x00, 0x11, ERRATA_A715_2701951, \
+ ERRATA_NON_ARM_INTERCONNECT},
+ [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_A715_H_INC */
+
+#if CORTEX_X3_H_INC
+{
+ .cpu_partnumber = CORTEX_X3_MIDR,
+ .cpu_errata_list = {
+ [0] = {2070301, 0x00, 0x12, ERRATA_X3_2070301},
+ [1] = {2313909, 0x00, 0x10, ERRATA_X3_2313909},
+ [2] = {2615812, 0x00, 0x11, ERRATA_X3_2615812},
+ [3] = {2742421, 0x00, 0x11, ERRATA_X3_2742421},
+ [4 ... ERRATA_LIST_END] = UNDEF_ERRATA,
+ }
+},
+#endif /* CORTEX_X3_H_INC */
+};
+
+/*
+ * Function to do binary search and check for the specific errata ID
+ * in the array of structures specific to the cpu identified.
+ */
+int32_t binary_search(struct em_cpu_list *ptr, uint32_t erratum_id, uint8_t rxpx_val)
+{
+ int low_index = 0U, mid_index = 0U;
+
+ int high_index = MAX_ERRATA_ENTRIES - 1;
+
+ assert(ptr != NULL);
+
+ /*
+ * Pointer to the errata list of the cpu that matches
+ * extracted partnumber in the cpu list
+ */
+ struct em_cpu *erratum_ptr = NULL;
+
+ while (low_index <= high_index) {
+ mid_index = (low_index + high_index) / 2;
+
+ erratum_ptr = &ptr->cpu_errata_list[mid_index];
+ assert(erratum_ptr != NULL);
+
+ if (erratum_id < erratum_ptr->em_errata_id) {
+ high_index = mid_index - 1;
+ } else if (erratum_id > erratum_ptr->em_errata_id) {
+ low_index = mid_index + 1;
+ } else if (erratum_id == erratum_ptr->em_errata_id) {
+ if (RXPX_RANGE(rxpx_val, erratum_ptr->em_rxpx_lo, \
+ erratum_ptr->em_rxpx_hi)) {
+ if ((erratum_ptr->errata_enabled) && \
+ (!(erratum_ptr->non_arm_interconnect))) {
+ return EM_HIGHER_EL_MITIGATION;
+ }
+ return EM_AFFECTED;
+ }
+ return EM_NOT_AFFECTED;
+ }
+ }
+ /* no matching errata ID */
+ return EM_UNKNOWN_ERRATUM;
+}
+
+/* Function to check if the errata exists for the specific CPU and rxpx */
+int32_t verify_errata_implemented(uint32_t errata_id, uint32_t forward_flag)
+{
+ /*
+ * Read MIDR value and extract the revision, variant and partnumber
+ */
+ static uint32_t midr_val, cpu_partnum;
+ static uint8_t cpu_rxpx_val;
+ int32_t ret_val = EM_UNKNOWN_ERRATUM;
+
+ /* Determine the number of cpu listed in the cpu list */
+ uint8_t size_cpulist = ARRAY_SIZE(cpu_list);
+
+ /* Read the midr reg to extract cpu, revision and variant info */
+ midr_val = read_midr();
+
+ /* Extract revision and variant from the MIDR register */
+ cpu_rxpx_val = cpu_get_rev_var();
+
+ /* Extract the cpu partnumber and check if the cpu is in the cpu list */
+ cpu_partnum = EXTRACT_PARTNUM(midr_val);
+
+ for (uint8_t i = 0; i < size_cpulist; i++) {
+ cpu_ptr = &cpu_list[i];
+ uint16_t partnum_extracted = EXTRACT_PARTNUM(cpu_ptr->cpu_partnumber);
+
+ if (partnum_extracted == cpu_partnum) {
+ /*
+ * If the midr value is in the cpu list, binary search
+ * for the errata ID and specific revision in the list.
+ */
+ ret_val = binary_search(cpu_ptr, errata_id, cpu_rxpx_val);
+ break;
+ }
+ }
+ return ret_val;
+}
+
+/* Predicate indicating that a function id is part of EM_ABI */
+bool is_errata_fid(uint32_t smc_fid)
+{
+ return ((smc_fid == ARM_EM_VERSION) ||
+ (smc_fid == ARM_EM_FEATURES) ||
+ (smc_fid == ARM_EM_CPU_ERRATUM_FEATURES));
+
+}
+
+bool validate_spsr_mode(void)
+{
+ /* In AArch64, if the caller is EL1, return true */
+
+ #if __aarch64__
+ if (GET_EL(read_spsr_el3()) == MODE_EL1) {
+ return true;
+ }
+ return false;
+ #else
+
+ /* In AArch32, if in system/svc mode, return true */
+ uint8_t read_el_state = GET_M32(read_spsr());
+
+ if ((read_el_state == (MODE32_svc)) || (read_el_state == MODE32_sys)) {
+ return true;
+ }
+ return false;
+ #endif /* __aarch64__ */
+}
+
+uintptr_t errata_abi_smc_handler(uint32_t smc_fid, u_register_t x1,
+ u_register_t x2, u_register_t x3, u_register_t x4,
+ void *cookie, void *handle, u_register_t flags)
+{
+ int32_t ret_id = EM_UNKNOWN_ERRATUM;
+
+ switch (smc_fid) {
+ case ARM_EM_VERSION:
+ SMC_RET1(handle, MAKE_SMCCC_VERSION(
+ EM_VERSION_MAJOR, EM_VERSION_MINOR
+ ));
+ break; /* unreachable */
+ case ARM_EM_FEATURES:
+ if (is_errata_fid((uint32_t)x1)) {
+ SMC_RET1(handle, EM_SUCCESS);
+ }
+
+ SMC_RET1(handle, EM_NOT_SUPPORTED);
+ break; /* unreachable */
+ case ARM_EM_CPU_ERRATUM_FEATURES:
+
+ /*
+ * If the forward flag is greater than zero and the calling EL
+ * is EL1 in AArch64 or in system mode or svc mode in case of AArch32,
+ * return Invalid Parameters.
+ */
+ if (((uint32_t)x2 != 0) && (validate_spsr_mode())) {
+ SMC_RET1(handle, EM_INVALID_PARAMETERS);
+ }
+ ret_id = verify_errata_implemented((uint32_t)x1, (uint32_t)x2);
+ SMC_RET1(handle, ret_id);
+ break; /* unreachable */
+ default:
+ {
+ WARN("Unimplemented Errata ABI Service Call: 0x%x\n", smc_fid);
+ SMC_RET1(handle, EM_UNKNOWN_ERRATUM);
+ break; /* unreachable */
+ }
+ }
+}
diff --git a/services/std_svc/pci_svc.c b/services/std_svc/pci_svc.c
new file mode 100644
index 0000000..a02b8a7
--- /dev/null
+++ b/services/std_svc/pci_svc.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <services/pci_svc.h>
+#include <services/std_svc.h>
+#include <smccc_helpers.h>
+
+static uint64_t validate_rw_addr_sz(uint32_t addr, uint64_t off, uint64_t sz)
+{
+ uint32_t nseg;
+ uint32_t ret;
+ uint32_t start_end_bus;
+
+ ret = pci_get_bus_for_seg(PCI_ADDR_SEG(addr), &start_end_bus, &nseg);
+
+ if (ret != SMC_PCI_CALL_SUCCESS) {
+ return SMC_PCI_CALL_INVAL_PARAM;
+ }
+ switch (sz) {
+ case SMC_PCI_SZ_8BIT:
+ case SMC_PCI_SZ_16BIT:
+ case SMC_PCI_SZ_32BIT:
+ break;
+ default:
+ return SMC_PCI_CALL_INVAL_PARAM;
+ }
+ if ((off + sz) > (PCI_OFFSET_MASK + 1U)) {
+ return SMC_PCI_CALL_INVAL_PARAM;
+ }
+ return SMC_PCI_CALL_SUCCESS;
+}
+
+uint64_t pci_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ switch (smc_fid) {
+ case SMC_PCI_VERSION: {
+ pcie_version ver;
+
+ ver.major = 1U;
+ ver.minor = 0U;
+ SMC_RET4(handle, ver.val, 0U, 0U, 0U);
+ }
+ case SMC_PCI_FEATURES:
+ switch (x1) {
+ case SMC_PCI_VERSION:
+ case SMC_PCI_FEATURES:
+ case SMC_PCI_READ:
+ case SMC_PCI_WRITE:
+ case SMC_PCI_SEG_INFO:
+ SMC_RET1(handle, SMC_PCI_CALL_SUCCESS);
+ default:
+ SMC_RET1(handle, SMC_PCI_CALL_NOT_SUPPORTED);
+ }
+ break;
+ case SMC_PCI_READ: {
+ uint32_t ret;
+
+ if (validate_rw_addr_sz(x1, x2, x3) != SMC_PCI_CALL_SUCCESS) {
+ SMC_RET2(handle, SMC_PCI_CALL_INVAL_PARAM, 0U);
+ }
+ if (x4 != 0U) {
+ SMC_RET2(handle, SMC_PCI_CALL_INVAL_PARAM, 0U);
+ }
+ if (pci_read_config(x1, x2, x3, &ret) != 0U) {
+ SMC_RET2(handle, SMC_PCI_CALL_INVAL_PARAM, 0U);
+ } else {
+ SMC_RET2(handle, SMC_PCI_CALL_SUCCESS, ret);
+ }
+ break;
+ }
+ case SMC_PCI_WRITE: {
+ uint32_t ret;
+
+ if (validate_rw_addr_sz(x1, x2, x3) != SMC_PCI_CALL_SUCCESS) {
+ SMC_RET1(handle, SMC_PCI_CALL_INVAL_PARAM);
+ }
+ ret = pci_write_config(x1, x2, x3, x4);
+ SMC_RET1(handle, ret);
+ break;
+ }
+ case SMC_PCI_SEG_INFO: {
+ uint32_t nseg;
+ uint32_t ret;
+ uint32_t start_end_bus;
+
+ if ((x2 != 0U) || (x3 != 0U) || (x4 != 0U)) {
+ SMC_RET3(handle, SMC_PCI_CALL_INVAL_PARAM, 0U, 0U);
+ }
+ ret = pci_get_bus_for_seg(x1, &start_end_bus, &nseg);
+ SMC_RET3(handle, ret, start_end_bus, nseg);
+ break;
+ }
+ default:
+ /* should be unreachable */
+ WARN("Unimplemented PCI Service Call: 0x%x\n", smc_fid);
+ SMC_RET1(handle, SMC_PCI_CALL_NOT_SUPPORTED);
+ }
+}
diff --git a/services/std_svc/rmmd/aarch64/rmmd_helpers.S b/services/std_svc/rmmd/aarch64/rmmd_helpers.S
new file mode 100644
index 0000000..6229baf
--- /dev/null
+++ b/services/std_svc/rmmd/aarch64/rmmd_helpers.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "../rmmd_private.h"
+#include <asm_macros.S>
+
+ .global rmmd_rmm_enter
+ .global rmmd_rmm_exit
+
+ /* ---------------------------------------------------------------------
+ * This function is called with SP_EL0 as stack. Here we stash our EL3
+ * callee-saved registers on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where the address of the C
+ * runtime context is to be saved.
+ * ---------------------------------------------------------------------
+ */
+func rmmd_rmm_enter
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #RMMD_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #RMMD_C_RT_CTX_X19]
+ stp x21, x22, [sp, #RMMD_C_RT_CTX_X21]
+ stp x23, x24, [sp, #RMMD_C_RT_CTX_X23]
+ stp x25, x26, [sp, #RMMD_C_RT_CTX_X25]
+ stp x27, x28, [sp, #RMMD_C_RT_CTX_X27]
+ stp x29, x30, [sp, #RMMD_C_RT_CTX_X29]
+
+ /* ---------------------------------------------------------------------
+ * Everything is setup now. el3_exit() will use the secure context to
+ * restore to the general purpose and EL3 system registers to ERET
+ * into the secure payload.
+ * ---------------------------------------------------------------------
+ */
+ b el3_exit
+endfunc rmmd_rmm_enter
+
+ /* ---------------------------------------------------------------------
+ * This function is called with 'x0' pointing to a C runtime context.
+ * It restores the saved registers and jumps to that runtime with 'x0'
+ * as the new SP register. This destroys the C runtime context that had
+ * been built on the stack below the saved context by the caller. Later
+ * the second parameter 'x1' is passed as a return value to the caller.
+ * ---------------------------------------------------------------------
+ */
+func rmmd_rmm_exit
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(RMMD_C_RT_CTX_X19 - RMMD_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(RMMD_C_RT_CTX_X21 - RMMD_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(RMMD_C_RT_CTX_X23 - RMMD_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(RMMD_C_RT_CTX_X25 - RMMD_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(RMMD_C_RT_CTX_X27 - RMMD_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(RMMD_C_RT_CTX_X29 - RMMD_C_RT_CTX_SIZE)]
+
+ /* ---------------------------------------------------------------------
+ * This should take us back to the instruction after the call to the
+ * last rmmd_rmm_enter().* Place the second parameter to x0
+ * so that the caller will see it as a return value from the original
+ * entry call.
+ * ---------------------------------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc rmmd_rmm_exit
diff --git a/services/std_svc/rmmd/rmmd.mk b/services/std_svc/rmmd/rmmd.mk
new file mode 100644
index 0000000..bcf54e1
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd.mk
@@ -0,0 +1,19 @@
+#
+# Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH},aarch64)
+ $(error "Error: RMMD is only supported on aarch64.")
+endif
+
+include services/std_svc/rmmd/trp/trp.mk
+
+RMMD_SOURCES += $(addprefix services/std_svc/rmmd/, \
+ ${ARCH}/rmmd_helpers.S \
+ rmmd_main.c \
+ rmmd_attest.c)
+
+# Let the top-level Makefile know that we intend to include RMM image
+NEED_RMM := yes
diff --git a/services/std_svc/rmmd/rmmd_attest.c b/services/std_svc/rmmd/rmmd_attest.c
new file mode 100644
index 0000000..25adf50
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd_attest.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <stdint.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <lib/spinlock.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/platform.h>
+#include "rmmd_private.h"
+#include <services/rmmd_svc.h>
+
+static spinlock_t lock;
+
+/* For printing Realm attestation token hash */
+#define DIGITS_PER_BYTE 2UL
+#define LENGTH_OF_TERMINATING_ZERO_IN_BYTES 1UL
+#define BYTES_PER_LINE_BASE 4UL
+
+static void print_challenge(uint8_t *hash, size_t hash_size)
+{
+ size_t leftover;
+ /*
+ * bytes_per_line is always a power of two, so it can be used to
+ * construct mask with it when it is necessary to count remainder.
+ *
+ */
+ const size_t bytes_per_line = 1 << BYTES_PER_LINE_BASE;
+ char hash_text[(1 << BYTES_PER_LINE_BASE) * DIGITS_PER_BYTE +
+ LENGTH_OF_TERMINATING_ZERO_IN_BYTES];
+ const char hex_chars[] = {'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
+ unsigned int i;
+
+ for (i = 0U; i < hash_size; ++i) {
+ hash_text[(i & (bytes_per_line - 1)) * DIGITS_PER_BYTE] =
+ hex_chars[hash[i] >> 4];
+ hash_text[(i & (bytes_per_line - 1)) * DIGITS_PER_BYTE + 1] =
+ hex_chars[hash[i] & 0x0f];
+ if (((i + 1) & (bytes_per_line - 1)) == 0U) {
+ hash_text[bytes_per_line * DIGITS_PER_BYTE] = '\0';
+ VERBOSE("hash part %u = %s\n",
+ (i >> BYTES_PER_LINE_BASE) + 1, hash_text);
+ }
+ }
+
+ leftover = (size_t)i & (bytes_per_line - 1);
+
+ if (leftover != 0UL) {
+ hash_text[leftover * DIGITS_PER_BYTE] = '\0';
+ VERBOSE("hash part %u = %s\n", (i >> BYTES_PER_LINE_BASE) + 1,
+ hash_text);
+ }
+}
+
+/*
+ * Helper function to validate that the buffer base and length are
+ * within range.
+ */
+static int validate_buffer_params(uint64_t buf_pa, uint64_t buf_len)
+{
+ unsigned long shared_buf_page;
+ uintptr_t shared_buf_base;
+
+ (void)plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base);
+
+ shared_buf_page = shared_buf_base & ~PAGE_SIZE_MASK;
+
+ /* Validate the buffer pointer */
+ if ((buf_pa & ~PAGE_SIZE_MASK) != shared_buf_page) {
+ ERROR("Buffer PA out of range\n");
+ return E_RMM_BAD_ADDR;
+ }
+
+ /* Validate the size of the shared area */
+ if (((buf_pa + buf_len - 1UL) & ~PAGE_SIZE_MASK) != shared_buf_page) {
+ ERROR("Invalid buffer length\n");
+ return E_RMM_INVAL;
+ }
+
+ return 0; /* No error */
+}
+
+int rmmd_attest_get_platform_token(uint64_t buf_pa, uint64_t *buf_size,
+ uint64_t c_size)
+{
+ int err;
+ uint8_t temp_buf[SHA512_DIGEST_SIZE];
+
+ err = validate_buffer_params(buf_pa, *buf_size);
+ if (err != 0) {
+ return err;
+ }
+
+ if ((c_size != SHA256_DIGEST_SIZE) &&
+ (c_size != SHA384_DIGEST_SIZE) &&
+ (c_size != SHA512_DIGEST_SIZE)) {
+ ERROR("Invalid hash size: %lu\n", c_size);
+ return E_RMM_INVAL;
+ }
+
+ spin_lock(&lock);
+
+ (void)memcpy(temp_buf, (void *)buf_pa, c_size);
+
+ print_challenge((uint8_t *)temp_buf, c_size);
+
+ /* Get the platform token. */
+ err = plat_rmmd_get_cca_attest_token((uintptr_t)buf_pa,
+ buf_size, (uintptr_t)temp_buf, c_size);
+
+ if (err != 0) {
+ ERROR("Failed to get platform token: %d.\n", err);
+ err = E_RMM_UNK;
+ }
+
+ spin_unlock(&lock);
+
+ return err;
+}
+
+int rmmd_attest_get_signing_key(uint64_t buf_pa, uint64_t *buf_size,
+ uint64_t ecc_curve)
+{
+ int err;
+
+ err = validate_buffer_params(buf_pa, *buf_size);
+ if (err != 0) {
+ return err;
+ }
+
+ if (ecc_curve != ATTEST_KEY_CURVE_ECC_SECP384R1) {
+ ERROR("Invalid ECC curve specified\n");
+ return E_RMM_INVAL;
+ }
+
+ spin_lock(&lock);
+
+ /* Get the Realm attestation key. */
+ err = plat_rmmd_get_cca_realm_attest_key((uintptr_t)buf_pa, buf_size,
+ (unsigned int)ecc_curve);
+ if (err != 0) {
+ ERROR("Failed to get attestation key: %d.\n", err);
+ err = E_RMM_UNK;
+ }
+
+ spin_unlock(&lock);
+
+ return err;
+}
diff --git a/services/std_svc/rmmd/rmmd_initial_context.h b/services/std_svc/rmmd/rmmd_initial_context.h
new file mode 100644
index 0000000..d7a743d
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd_initial_context.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RMMD_INITIAL_CONTEXT_H
+#define RMMD_INITIAL_CONTEXT_H
+
+#include <arch.h>
+
+/*
+ * SPSR_EL2
+ * M=0x9 (0b1001 EL2h)
+ * M[4]=0
+ * DAIF=0xF Exceptions masked on entry.
+ * BTYPE=0 BTI not yet supported.
+ * SSBS=0 Not yet supported.
+ * IL=0 Not an illegal exception return.
+ * SS=0 Not single stepping.
+ * PAN=1 RMM shouldn't access realm memory.
+ * UAO=0
+ * DIT=0
+ * TCO=0
+ * NZCV=0
+ */
+#define REALM_SPSR_EL2 ( \
+ SPSR_M_EL2H | \
+ (0xF << SPSR_DAIF_SHIFT) | \
+ SPSR_PAN_BIT \
+ )
+
+#endif /* RMMD_INITIAL_CONTEXT_H */
diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c
new file mode 100644
index 0000000..8b78b13
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd_main.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <arch_features.h>
+#include <bl31/bl31.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/el3_runtime/pubsub.h>
+#include <lib/extensions/pmuv3.h>
+#include <lib/extensions/sys_reg_trace.h>
+#include <lib/gpt_rme/gpt_rme.h>
+
+#include <lib/spinlock.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+#include <services/rmmd_svc.h>
+#include <smccc_helpers.h>
+#include <lib/extensions/sme.h>
+#include <lib/extensions/sve.h>
+#include "rmmd_initial_context.h"
+#include "rmmd_private.h"
+
+/*******************************************************************************
+ * RMM boot failure flag
+ ******************************************************************************/
+static bool rmm_boot_failed;
+
+/*******************************************************************************
+ * RMM context information.
+ ******************************************************************************/
+rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * RMM entry point information. Discovered on the primary core and reused
+ * on secondary cores.
+ ******************************************************************************/
+static entry_point_info_t *rmm_ep_info;
+
+/*******************************************************************************
+ * Static function declaration.
+ ******************************************************************************/
+static int32_t rmm_init(void);
+
+/*******************************************************************************
+ * This function takes an RMM context pointer and performs a synchronous entry
+ * into it.
+ ******************************************************************************/
+uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx)
+{
+ uint64_t rc;
+
+ assert(rmm_ctx != NULL);
+
+ cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
+
+ /* Restore the realm context assigned above */
+ cm_el1_sysregs_context_restore(REALM);
+ cm_el2_sysregs_context_restore(REALM);
+ cm_set_next_eret_context(REALM);
+
+ /* Enter RMM */
+ rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
+
+ /*
+ * Save realm context. EL1 and EL2 Non-secure
+ * contexts will be restored before exiting to
+ * Non-secure world, therefore there is no need
+ * to clear EL1 and EL2 context registers.
+ */
+ cm_el1_sysregs_context_save(REALM);
+ cm_el2_sysregs_context_save(REALM);
+
+ return rc;
+}
+
+/*******************************************************************************
+ * This function returns to the place where rmmd_rmm_sync_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 void rmmd_rmm_sync_exit(uint64_t rc)
+{
+ rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
+
+ /* Get context of the RMM in use by this CPU. */
+ assert(cm_get_context(REALM) == &(ctx->cpu_ctx));
+
+ /*
+ * The RMMD must have initiated the original request through a
+ * synchronous entry into RMM. Jump back to the original C runtime
+ * context with the value of rc in x0;
+ */
+ rmmd_rmm_exit(ctx->c_rt_ctx, rc);
+
+ panic();
+}
+
+static void rmm_el2_context_init(el2_sysregs_t *regs)
+{
+ regs->ctx_regs[CTX_SPSR_EL2 >> 3] = REALM_SPSR_EL2;
+ regs->ctx_regs[CTX_SCTLR_EL2 >> 3] = SCTLR_EL2_RES1;
+}
+
+/*******************************************************************************
+ * Enable architecture extensions on first entry to Realm world.
+ ******************************************************************************/
+
+static void manage_extensions_realm(cpu_context_t *ctx)
+{
+ pmuv3_enable(ctx);
+
+ /*
+ * Enable access to TPIDR2_EL0 if SME/SME2 is enabled for Non Secure world.
+ */
+ if (is_feat_sme_supported()) {
+ sme_enable(ctx);
+ }
+}
+
+static void manage_extensions_realm_per_world(void)
+{
+ if (is_feat_sve_supported()) {
+ /*
+ * Enable SVE and FPU in realm context when it is enabled for NS.
+ * Realm manager must ensure that the SVE and FPU register
+ * contexts are properly managed.
+ */
+ sve_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
+ }
+
+ /* NS can access this but Realm shouldn't */
+ if (is_feat_sys_reg_trace_supported()) {
+ sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
+ }
+
+ /*
+ * If SME/SME2 is supported and enabled for NS world, then disable trapping
+ * of SME instructions for Realm world. RMM will save/restore required
+ * registers that are shared with SVE/FPU so that Realm can use FPU or SVE.
+ */
+ if (is_feat_sme_supported()) {
+ sme_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
+ }
+}
+
+/*******************************************************************************
+ * Jump to the RMM for the first time.
+ ******************************************************************************/
+static int32_t rmm_init(void)
+{
+ long rc;
+ rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
+
+ INFO("RMM init start.\n");
+
+ /* Enable architecture extensions */
+ manage_extensions_realm(&ctx->cpu_ctx);
+
+ manage_extensions_realm_per_world();
+
+ /* Initialize RMM EL2 context. */
+ rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
+
+ rc = rmmd_rmm_sync_entry(ctx);
+ if (rc != E_RMM_BOOT_SUCCESS) {
+ ERROR("RMM init failed: %ld\n", rc);
+ /* Mark the boot as failed for all the CPUs */
+ rmm_boot_failed = true;
+ return 0;
+ }
+
+ INFO("RMM init end.\n");
+
+ return 1;
+}
+
+/*******************************************************************************
+ * Load and read RMM manifest, setup RMM.
+ ******************************************************************************/
+int rmmd_setup(void)
+{
+ size_t shared_buf_size __unused;
+ uintptr_t shared_buf_base;
+ uint32_t ep_attr;
+ unsigned int linear_id = plat_my_core_pos();
+ rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id];
+ struct rmm_manifest *manifest;
+ int rc;
+
+ /* Make sure RME is supported. */
+ assert(get_armv9_2_feat_rme_support() != 0U);
+
+ rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM);
+ if (rmm_ep_info == NULL) {
+ WARN("No RMM image provided by BL2 boot loader, Booting "
+ "device without RMM initialization. SMCs destined for "
+ "RMM will return SMC_UNK\n");
+ return -ENOENT;
+ }
+
+ /* Under no circumstances will this parameter be 0 */
+ assert(rmm_ep_info->pc == RMM_BASE);
+
+ /* Initialise an entrypoint to set up the CPU context */
+ ep_attr = EP_REALM;
+ if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) {
+ ep_attr |= EP_EE_BIG;
+ }
+
+ SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr);
+ rmm_ep_info->spsr = SPSR_64(MODE_EL2,
+ MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+
+ shared_buf_size =
+ plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base);
+
+ assert((shared_buf_size == SZ_4K) &&
+ ((void *)shared_buf_base != NULL));
+
+ /* Load the boot manifest at the beginning of the shared area */
+ manifest = (struct rmm_manifest *)shared_buf_base;
+ rc = plat_rmmd_load_manifest(manifest);
+ if (rc != 0) {
+ ERROR("Error loading RMM Boot Manifest (%i)\n", rc);
+ return rc;
+ }
+ flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size);
+
+ /*
+ * Prepare coldboot arguments for RMM:
+ * arg0: This CPUID (primary processor).
+ * arg1: Version for this Boot Interface.
+ * arg2: PLATFORM_CORE_COUNT.
+ * arg3: Base address for the EL3 <-> RMM shared area. The boot
+ * manifest will be stored at the beginning of this area.
+ */
+ rmm_ep_info->args.arg0 = linear_id;
+ rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION;
+ rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT;
+ rmm_ep_info->args.arg3 = shared_buf_base;
+
+ /* Initialise RMM context with this entry point information */
+ cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info);
+
+ INFO("RMM setup done.\n");
+
+ /* Register init function for deferred init. */
+ bl31_register_rmm_init(&rmm_init);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Forward SMC to the other security state
+ ******************************************************************************/
+static uint64_t rmmd_smc_forward(uint32_t src_sec_state,
+ uint32_t dst_sec_state, uint64_t x0,
+ uint64_t x1, uint64_t x2, uint64_t x3,
+ uint64_t x4, void *handle)
+{
+ cpu_context_t *ctx = cm_get_context(dst_sec_state);
+
+ /* Save incoming security state */
+ cm_el1_sysregs_context_save(src_sec_state);
+ cm_el2_sysregs_context_save(src_sec_state);
+
+ /* Restore outgoing security state */
+ cm_el1_sysregs_context_restore(dst_sec_state);
+ cm_el2_sysregs_context_restore(dst_sec_state);
+ cm_set_next_eret_context(dst_sec_state);
+
+ /*
+ * As per SMCCCv1.2, we need to preserve x4 to x7 unless
+ * being used as return args. Hence we differentiate the
+ * onward and backward path. Support upto 8 args in the
+ * onward path and 4 args in return path.
+ * Register x4 will be preserved by RMM in case it is not
+ * used in return path.
+ */
+ if (src_sec_state == NON_SECURE) {
+ SMC_RET8(ctx, x0, x1, x2, x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+ }
+
+ SMC_RET5(ctx, x0, x1, x2, x3, x4);
+}
+
+/*******************************************************************************
+ * This function handles all SMCs in the range reserved for RMI. Each call is
+ * either forwarded to the other security state or handled by the RMM dispatcher
+ ******************************************************************************/
+uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+ uint64_t x3, uint64_t x4, void *cookie,
+ void *handle, uint64_t flags)
+{
+ uint32_t src_sec_state;
+
+ /* If RMM failed to boot, treat any RMI SMC as unknown */
+ if (rmm_boot_failed) {
+ WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n");
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ /* Determine which security state this SMC originated from */
+ src_sec_state = caller_sec_state(flags);
+
+ /* RMI must not be invoked by the Secure world */
+ if (src_sec_state == SMC_FROM_SECURE) {
+ WARN("RMMD: RMI invoked by secure world.\n");
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ /*
+ * Forward an RMI call from the Normal world to the Realm world as it
+ * is.
+ */
+ if (src_sec_state == SMC_FROM_NON_SECURE) {
+ /*
+ * If SVE hint bit is set in the flags then update the SMC
+ * function id and pass it on to the lower EL.
+ */
+ if (is_sve_hint_set(flags)) {
+ smc_fid |= (FUNCID_SVE_HINT_MASK <<
+ FUNCID_SVE_HINT_SHIFT);
+ }
+ VERBOSE("RMMD: RMI call from non-secure world.\n");
+ return rmmd_smc_forward(NON_SECURE, REALM, smc_fid,
+ x1, x2, x3, x4, handle);
+ }
+
+ if (src_sec_state != SMC_FROM_REALM) {
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ switch (smc_fid) {
+ case RMM_RMI_REQ_COMPLETE: {
+ uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+
+ return rmmd_smc_forward(REALM, NON_SECURE, x1,
+ x2, x3, x4, x5, handle);
+ }
+ default:
+ WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
+
+/*******************************************************************************
+ * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM
+ * is done after initialising minimal architectural state that guarantees safe
+ * execution.
+ ******************************************************************************/
+static void *rmmd_cpu_on_finish_handler(const void *arg)
+{
+ long rc;
+ uint32_t linear_id = plat_my_core_pos();
+ rmmd_rmm_context_t *ctx = &rmm_context[linear_id];
+
+ if (rmm_boot_failed) {
+ /* RMM Boot failed on a previous CPU. Abort. */
+ ERROR("RMM Failed to initialize. Ignoring for CPU%d\n",
+ linear_id);
+ return NULL;
+ }
+
+ /*
+ * Prepare warmboot arguments for RMM:
+ * arg0: This CPUID.
+ * arg1 to arg3: Not used.
+ */
+ rmm_ep_info->args.arg0 = linear_id;
+ rmm_ep_info->args.arg1 = 0ULL;
+ rmm_ep_info->args.arg2 = 0ULL;
+ rmm_ep_info->args.arg3 = 0ULL;
+
+ /* Initialise RMM context with this entry point information */
+ cm_setup_context(&ctx->cpu_ctx, rmm_ep_info);
+
+ /* Enable architecture extensions */
+ manage_extensions_realm(&ctx->cpu_ctx);
+
+ /* Initialize RMM EL2 context. */
+ rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
+
+ rc = rmmd_rmm_sync_entry(ctx);
+
+ if (rc != E_RMM_BOOT_SUCCESS) {
+ ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc);
+ /* Mark the boot as failed for any other booting CPU */
+ rmm_boot_failed = true;
+ }
+
+ return NULL;
+}
+
+/* Subscribe to PSCI CPU on to initialize RMM on secondary */
+SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
+
+/* Convert GPT lib error to RMMD GTS error */
+static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address)
+{
+ int ret;
+
+ if (error == 0) {
+ return E_RMM_OK;
+ }
+
+ if (error == -EINVAL) {
+ ret = E_RMM_BAD_ADDR;
+ } else {
+ /* This is the only other error code we expect */
+ assert(error == -EPERM);
+ ret = E_RMM_BAD_PAS;
+ }
+
+ ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n",
+ error, address, smc_fid);
+ return ret;
+}
+
+/*******************************************************************************
+ * This function handles RMM-EL3 interface SMCs
+ ******************************************************************************/
+uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+ uint64_t x3, uint64_t x4, void *cookie,
+ void *handle, uint64_t flags)
+{
+ uint32_t src_sec_state;
+ int ret;
+
+ /* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */
+ if (rmm_boot_failed) {
+ WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n");
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ /* Determine which security state this SMC originated from */
+ src_sec_state = caller_sec_state(flags);
+
+ if (src_sec_state != SMC_FROM_REALM) {
+ WARN("RMMD: RMM-EL3 call originated from secure or normal world\n");
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ switch (smc_fid) {
+ case RMM_GTSI_DELEGATE:
+ ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
+ SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
+ case RMM_GTSI_UNDELEGATE:
+ ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
+ SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
+ case RMM_ATTEST_GET_PLAT_TOKEN:
+ ret = rmmd_attest_get_platform_token(x1, &x2, x3);
+ SMC_RET2(handle, ret, x2);
+ case RMM_ATTEST_GET_REALM_KEY:
+ ret = rmmd_attest_get_signing_key(x1, &x2, x3);
+ SMC_RET2(handle, ret, x2);
+
+ case RMM_BOOT_COMPLETE:
+ VERBOSE("RMMD: running rmmd_rmm_sync_exit\n");
+ rmmd_rmm_sync_exit(x1);
+
+ default:
+ WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
diff --git a/services/std_svc/rmmd/rmmd_private.h b/services/std_svc/rmmd/rmmd_private.h
new file mode 100644
index 0000000..4954a43
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd_private.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RMMD_PRIVATE_H
+#define RMMD_PRIVATE_H
+
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define RMMD_C_RT_CTX_X19 0x0
+#define RMMD_C_RT_CTX_X20 0x8
+#define RMMD_C_RT_CTX_X21 0x10
+#define RMMD_C_RT_CTX_X22 0x18
+#define RMMD_C_RT_CTX_X23 0x20
+#define RMMD_C_RT_CTX_X24 0x28
+#define RMMD_C_RT_CTX_X25 0x30
+#define RMMD_C_RT_CTX_X26 0x38
+#define RMMD_C_RT_CTX_X27 0x40
+#define RMMD_C_RT_CTX_X28 0x48
+#define RMMD_C_RT_CTX_X29 0x50
+#define RMMD_C_RT_CTX_X30 0x58
+
+#define RMMD_C_RT_CTX_SIZE 0x60
+#define RMMD_C_RT_CTX_ENTRIES (RMMD_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+
+/*
+ * Data structure used by the RMM dispatcher (RMMD) in EL3 to track context of
+ * the RMM at R-EL2.
+ */
+typedef struct rmmd_rmm_context {
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+} rmmd_rmm_context_t;
+
+/* Functions used to enter/exit the RMM synchronously */
+uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *ctx);
+__dead2 void rmmd_rmm_sync_exit(uint64_t rc);
+
+/* Functions implementing attestation utilities for RMM */
+int rmmd_attest_get_platform_token(uint64_t buf_pa, uint64_t *buf_size,
+ uint64_t c_size);
+int rmmd_attest_get_signing_key(uint64_t buf_pa, uint64_t *buf_size,
+ uint64_t ecc_curve);
+
+/* Assembly helpers */
+uint64_t rmmd_rmm_enter(uint64_t *c_rt_ctx);
+void __dead2 rmmd_rmm_exit(uint64_t c_rt_ctx, uint64_t ret);
+
+/* Reference to PM ops for the RMMD */
+extern const spd_pm_ops_t rmmd_pm;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* RMMD_PRIVATE_H */
diff --git a/services/std_svc/rmmd/trp/linker.ld.S b/services/std_svc/rmmd/trp/linker.ld.S
new file mode 100644
index 0000000..9895cf9
--- /dev/null
+++ b/services/std_svc/rmmd/trp/linker.ld.S
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/bl_common.ld.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+/* Mapped using 4K pages, requires us to align different sections with
+ * different property at the same granularity. */
+PAGE_SIZE_4K = 4096;
+
+OUTPUT_FORMAT("elf64-littleaarch64")
+OUTPUT_ARCH(aarch64)
+ENTRY(trp_head)
+
+MEMORY {
+ RAM (rwx): ORIGIN = RMM_BASE, LENGTH = RMM_LIMIT - RMM_BASE
+}
+
+
+SECTIONS
+{
+ . = RMM_BASE;
+
+ .text : {
+ *(.head.text)
+ . = ALIGN(8);
+ *(.text*)
+ } >RAM
+
+ . = ALIGN(PAGE_SIZE_4K);
+
+ .rodata : {
+ *(.rodata*)
+ } >RAM
+
+ . = ALIGN(PAGE_SIZE_4K);
+
+ __RW_START__ = . ;
+
+ .data : {
+ *(.data*)
+ } >RAM
+
+ .bss (NOLOAD) : {
+ __BSS_START__ = .;
+ *(.bss*)
+ __BSS_END__ = .;
+ } >RAM
+ __BSS_SIZE__ = SIZEOF(.bss);
+
+
+ STACK_SECTION >RAM
+
+
+ /*
+ * Define a linker symbol to mark the end of the RW memory area for this
+ * image.
+ */
+ __RW_END__ = .;
+ __RMM_END__ = .;
+
+
+ /DISCARD/ : { *(.dynstr*) }
+ /DISCARD/ : { *(.dynamic*) }
+ /DISCARD/ : { *(.plt*) }
+ /DISCARD/ : { *(.interp*) }
+ /DISCARD/ : { *(.gnu*) }
+ /DISCARD/ : { *(.note*) }
+}
diff --git a/services/std_svc/rmmd/trp/trp.mk b/services/std_svc/rmmd/trp/trp.mk
new file mode 100644
index 0000000..b7bd317
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp.mk
@@ -0,0 +1,27 @@
+#
+# Copyright (c) 2021-2023 Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+RMM_SOURCES += services/std_svc/rmmd/trp/trp_entry.S \
+ services/std_svc/rmmd/trp/trp_main.c \
+ services/std_svc/rmmd/trp/trp_helpers.c
+
+RMM_DEFAULT_LINKER_SCRIPT_SOURCE := services/std_svc/rmmd/trp/linker.ld.S
+
+ifneq ($(findstring gcc,$(notdir $(LD))),)
+ RMM_LDFLAGS += -Wl,--sort-section=alignment
+else ifneq ($(findstring ld,$(notdir $(LD))),)
+ RMM_LDFLAGS += --sort-section=alignment
+endif
+
+# Include the platform-specific TRP Makefile
+# If no platform-specific TRP Makefile exists, it means TRP is not supported
+# on this platform.
+TRP_PLAT_MAKEFILE := $(wildcard ${PLAT_DIR}/trp/trp-${PLAT}.mk)
+ifeq (,${TRP_PLAT_MAKEFILE})
+ $(error TRP is not supported on platform ${PLAT})
+else
+ include ${TRP_PLAT_MAKEFILE}
+endif
diff --git a/services/std_svc/rmmd/trp/trp_entry.S b/services/std_svc/rmmd/trp/trp_entry.S
new file mode 100644
index 0000000..3e1d8c9
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp_entry.S
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <services/rmmd_svc.h>
+
+#include <platform_def.h>
+#include "trp_private.h"
+
+.global trp_head
+.global trp_smc
+
+.section ".head.text", "ax"
+
+ /* ---------------------------------------------
+ * Populate the params in x0-x7 from the pointer
+ * to the smc args structure in x0.
+ * ---------------------------------------------
+ */
+ .macro restore_args_call_smc
+ ldp x6, x7, [x0, #TRP_ARG6]
+ ldp x4, x5, [x0, #TRP_ARG4]
+ ldp x2, x3, [x0, #TRP_ARG2]
+ ldp x0, x1, [x0, #TRP_ARG0]
+ smc #0
+ .endm
+
+ /* ---------------------------------------------
+ * Entry point for TRP
+ * ---------------------------------------------
+ */
+trp_head:
+ /*
+ * Stash arguments from previous boot stage
+ */
+ mov x20, x0
+ mov x21, x1
+ mov x22, x2
+ mov x23, x3
+
+ /*
+ * Validate CPUId before allocating a stack.
+ */
+ cmp x20, #PLATFORM_CORE_COUNT
+ b.lo 1f
+
+ mov_imm x0, RMM_BOOT_COMPLETE
+ mov_imm x1, E_RMM_BOOT_CPU_ID_OUT_OF_RANGE
+ smc #0
+
+ /* EL3 should never return back here, so panic if it does */
+ b trp_panic
+
+1:
+ bl plat_set_my_stack
+
+ /*
+ * Find out whether this is a cold or warm boot
+ */
+ ldr x1, cold_boot_flag
+ cbz x1, warm_boot
+
+ /*
+ * Update cold boot flag to indicate cold boot is done
+ */
+ adr x2, cold_boot_flag
+ str xzr, [x2]
+
+ /* ---------------------------------------------
+ * Zero out BSS section
+ * ---------------------------------------------
+ */
+ ldr x0, =__BSS_START__
+ ldr x1, =__BSS_SIZE__
+ bl zeromem
+
+ mov x0, x20
+ mov x1, x21
+ mov x2, x22
+ mov x3, x23
+ bl trp_setup
+ bl trp_main
+ b 1f
+
+warm_boot:
+ mov x0, x20
+ mov x1, x21
+ mov x2, x22
+ mov x3, x23
+ bl trp_validate_warmboot_args
+ cbnz x0, trp_panic /* Failed to validate warmboot args */
+
+1:
+ mov_imm x0, RMM_BOOT_COMPLETE
+ mov x1, xzr /* RMM_BOOT_SUCCESS */
+ smc #0
+ b trp_handler
+
+trp_panic:
+ no_ret plat_panic_handler
+
+ /*
+ * Flag to mark if it is a cold boot.
+ * 1: cold boot, 0: warmboot.
+ */
+.align 3
+cold_boot_flag:
+ .dword 1
+
+ /* ---------------------------------------------
+ * Direct SMC call to BL31 service provided by
+ * RMM Dispatcher
+ * ---------------------------------------------
+ */
+func trp_smc
+ restore_args_call_smc
+ ret
+endfunc trp_smc
+
+ /* ---------------------------------------------
+ * RMI call handler
+ * ---------------------------------------------
+ */
+func trp_handler
+ /*
+ * Save Link Register and X4, as per SMCCC v1.2 its value
+ * must be preserved unless it contains result, as specified
+ * in the function definition.
+ */
+ stp x4, lr, [sp, #-16]!
+
+ /*
+ * Zero the space for X0-X3 in trp_smc_result structure
+ * and pass its address as the last argument.
+ */
+ stp xzr, xzr, [sp, #-16]!
+ stp xzr, xzr, [sp, #-16]!
+ mov x7, sp
+
+ bl trp_rmi_handler
+
+ ldp x1, x2, [sp], #16
+ ldp x3, x4, [sp], #16
+ ldp x5, lr, [sp], #16
+
+ ldr x0, =RMM_RMI_REQ_COMPLETE
+ smc #0
+
+ b trp_handler
+endfunc trp_handler
diff --git a/services/std_svc/rmmd/trp/trp_helpers.c b/services/std_svc/rmmd/trp/trp_helpers.c
new file mode 100644
index 0000000..159f3a5
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp_helpers.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <plat/common/platform.h>
+#include <services/rmmd_svc.h>
+#include "trp_private.h"
+
+/*
+ * Per cpu data structure to populate parameters for an SMC in C code and use
+ * a pointer to this structure in assembler code to populate x0-x7
+ */
+static trp_args_t trp_smc_args[PLATFORM_CORE_COUNT];
+
+/*
+ * Set the arguments for SMC call
+ */
+trp_args_t *set_smc_args(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint32_t linear_id;
+ trp_args_t *pcpu_smc_args;
+
+ /*
+ * Return to Secure Monitor by raising an SMC. The results of the
+ * service are passed as an arguments to the SMC
+ */
+ linear_id = plat_my_core_pos();
+ pcpu_smc_args = &trp_smc_args[linear_id];
+ write_trp_arg(pcpu_smc_args, TRP_ARG0, arg0);
+ write_trp_arg(pcpu_smc_args, TRP_ARG1, arg1);
+ write_trp_arg(pcpu_smc_args, TRP_ARG2, arg2);
+ write_trp_arg(pcpu_smc_args, TRP_ARG3, arg3);
+ write_trp_arg(pcpu_smc_args, TRP_ARG4, arg4);
+ write_trp_arg(pcpu_smc_args, TRP_ARG5, arg5);
+ write_trp_arg(pcpu_smc_args, TRP_ARG6, arg6);
+ write_trp_arg(pcpu_smc_args, TRP_ARG7, arg7);
+
+ return pcpu_smc_args;
+}
+
+/*
+ * Abort the boot process with the reason given in err.
+ */
+__dead2 void trp_boot_abort(uint64_t err)
+{
+ (void)trp_smc(set_smc_args(RMM_BOOT_COMPLETE, err, 0, 0, 0, 0, 0, 0));
+ panic();
+}
diff --git a/services/std_svc/rmmd/trp/trp_main.c b/services/std_svc/rmmd/trp/trp_main.c
new file mode 100644
index 0000000..33f2fb0
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp_main.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <plat/common/platform.h>
+#include <services/rmm_core_manifest.h>
+#include <services/rmmd_svc.h>
+#include <services/trp/platform_trp.h>
+#include <trp_helpers.h>
+#include "trp_private.h"
+
+#include <platform_def.h>
+
+/* Parameters received from the previous image */
+static unsigned int trp_boot_abi_version;
+static uintptr_t trp_shared_region_start;
+
+/* Parameters received from boot manifest */
+uint32_t trp_boot_manifest_version;
+
+/*******************************************************************************
+ * Setup function for TRP.
+ ******************************************************************************/
+void trp_setup(uint64_t x0,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3)
+{
+ /*
+ * Validate boot parameters
+ *
+ * According to the Boot Interface ABI v.0.1,
+ * the parameters received from EL3 are:
+ * x0: CPUID (verified earlier, so not used)
+ * x1: Boot Interface version
+ * x2: PLATFORM_CORE_COUNT
+ * x3: Pointer to the shared memory area.
+ */
+
+ (void)x0;
+
+ if (TRP_RMM_EL3_VERSION_GET_MAJOR(x1) != TRP_RMM_EL3_ABI_VERS_MAJOR) {
+ trp_boot_abort(E_RMM_BOOT_VERSION_MISMATCH);
+ }
+
+ if ((void *)x3 == NULL) {
+ trp_boot_abort(E_RMM_BOOT_INVALID_SHARED_BUFFER);
+ }
+
+ if (x2 > TRP_PLATFORM_CORE_COUNT) {
+ trp_boot_abort(E_RMM_BOOT_CPUS_OUT_OF_RANGE);
+ }
+
+ trp_boot_abi_version = x1;
+ trp_shared_region_start = x3;
+ flush_dcache_range((uintptr_t)&trp_boot_abi_version,
+ sizeof(trp_boot_abi_version));
+ flush_dcache_range((uintptr_t)&trp_shared_region_start,
+ sizeof(trp_shared_region_start));
+
+ /* Perform early platform-specific setup */
+ trp_early_platform_setup((struct rmm_manifest *)trp_shared_region_start);
+}
+
+int trp_validate_warmboot_args(uint64_t x0, uint64_t x1,
+ uint64_t x2, uint64_t x3)
+{
+ /*
+ * Validate boot parameters for warm boot
+ *
+ * According to the Boot Interface ABI v.0.1, the parameters
+ * received from EL3 during warm boot are:
+ *
+ * x0: CPUID (verified earlier so not used here)
+ * [x1:x3]: RES0
+ */
+
+ (void)x0;
+
+ return ((x1 | x2 | x3) == 0UL) ? 0 : E_RMM_BOOT_UNKNOWN;
+}
+
+/* Main function for TRP */
+void trp_main(void)
+{
+ NOTICE("TRP: %s\n", version_string);
+ NOTICE("TRP: %s\n", build_message);
+ NOTICE("TRP: Supported RMM-EL3 Interface ABI: v.%u.%u\n",
+ TRP_RMM_EL3_ABI_VERS_MAJOR, TRP_RMM_EL3_ABI_VERS_MINOR);
+ NOTICE("TRP: Boot Manifest Version: v.%u.%u\n",
+ RMMD_GET_MANIFEST_VERSION_MAJOR(trp_boot_manifest_version),
+ RMMD_GET_MANIFEST_VERSION_MINOR(trp_boot_manifest_version));
+ INFO("TRP: Memory base: 0x%lx\n", (unsigned long)RMM_BASE);
+ INFO("TRP: Shared region base address: 0x%lx\n",
+ (unsigned long)trp_shared_region_start);
+ INFO("TRP: Total size: 0x%lx bytes\n",
+ (unsigned long)(RMM_END - RMM_BASE));
+ INFO("TRP: RMM-EL3 Interface ABI reported by EL3: v.%u.%u\n",
+ TRP_RMM_EL3_VERSION_GET_MAJOR(trp_boot_abi_version),
+ TRP_RMM_EL3_VERSION_GET_MINOR(trp_boot_abi_version));
+}
+
+/*******************************************************************************
+ * Returning RMI version back to Normal World
+ ******************************************************************************/
+static void trp_ret_rmi_version(unsigned long long rmi_version,
+ struct trp_smc_result *smc_ret)
+{
+ if (rmi_version != RMI_ABI_VERSION) {
+ smc_ret->x[0] = RMI_ERROR_INPUT;
+ } else {
+ smc_ret->x[0] = RMI_SUCCESS;
+ }
+ VERBOSE("RMM version is %u.%u\n", RMI_ABI_VERSION_MAJOR,
+ RMI_ABI_VERSION_MINOR);
+ smc_ret->x[1] = RMI_ABI_VERSION;
+ smc_ret->x[2] = RMI_ABI_VERSION;
+}
+
+/*******************************************************************************
+ * Transitioning granule of NON-SECURE type to REALM type
+ ******************************************************************************/
+static void trp_asc_mark_realm(unsigned long long x1,
+ struct trp_smc_result *smc_ret)
+{
+ VERBOSE("Delegating granule 0x%llx\n", x1);
+ smc_ret->x[0] = trp_smc(set_smc_args(RMM_GTSI_DELEGATE, x1,
+ 0UL, 0UL, 0UL, 0UL, 0UL, 0UL));
+
+ if (smc_ret->x[0] != 0ULL) {
+ ERROR("Granule transition from NON-SECURE type to REALM type "
+ "failed 0x%llx\n", smc_ret->x[0]);
+ }
+}
+
+/*******************************************************************************
+ * Transitioning granule of REALM type to NON-SECURE type
+ ******************************************************************************/
+static void trp_asc_mark_nonsecure(unsigned long long x1,
+ struct trp_smc_result *smc_ret)
+{
+ VERBOSE("Undelegating granule 0x%llx\n", x1);
+ smc_ret->x[0] = trp_smc(set_smc_args(RMM_GTSI_UNDELEGATE, x1,
+ 0UL, 0UL, 0UL, 0UL, 0UL, 0UL));
+
+ if (smc_ret->x[0] != 0ULL) {
+ ERROR("Granule transition from REALM type to NON-SECURE type "
+ "failed 0x%llx\n", smc_ret->x[0]);
+ }
+}
+
+/*******************************************************************************
+ * Main RMI SMC handler function
+ ******************************************************************************/
+void trp_rmi_handler(unsigned long fid,
+ unsigned long long x1, unsigned long long x2,
+ unsigned long long x3, unsigned long long x4,
+ unsigned long long x5, unsigned long long x6,
+ struct trp_smc_result *smc_ret)
+{
+ /* Not used in the current implementation */
+ (void)x2;
+ (void)x3;
+ (void)x4;
+ (void)x5;
+ (void)x6;
+
+ switch (fid) {
+ case RMI_RMM_REQ_VERSION:
+ trp_ret_rmi_version(x1, smc_ret);
+ break;
+ case RMI_RMM_GRANULE_DELEGATE:
+ trp_asc_mark_realm(x1, smc_ret);
+ break;
+ case RMI_RMM_GRANULE_UNDELEGATE:
+ trp_asc_mark_nonsecure(x1, smc_ret);
+ break;
+ default:
+ ERROR("Invalid SMC code to %s, FID %lx\n", __func__, fid);
+ smc_ret->x[0] = SMC_UNK;
+ }
+}
diff --git a/services/std_svc/rmmd/trp/trp_private.h b/services/std_svc/rmmd/trp/trp_private.h
new file mode 100644
index 0000000..d8c6960
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp_private.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TRP_PRIVATE_H
+#define TRP_PRIVATE_H
+
+#include <services/rmmd_svc.h>
+#include <trp_helpers.h>
+
+/* Definitions for RMM-EL3 Interface ABI VERSION */
+#define TRP_RMM_EL3_ABI_VERS_MAJOR RMM_EL3_IFC_VERSION_MAJOR
+#define TRP_RMM_EL3_ABI_VERS_MINOR RMM_EL3_IFC_VERSION_MINOR
+#define TRP_RMM_EL3_ABI_VERS (((TRP_RMM_EL3_ABI_VERS_MAJOR & 0x7FFF) << 16) | \
+ (TRP_RMM_EL3_ABI_VERS_MINOR & 0xFFFF))
+
+#define TRP_PLATFORM_CORE_COUNT PLATFORM_CORE_COUNT
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#define write_trp_arg(args, offset, val) (((args)->regs[offset >> 3]) \
+ = val)
+/* RMI SMC64 FIDs handled by the TRP */
+#define RMI_RMM_REQ_VERSION SMC64_RMI_FID(U(0))
+#define RMI_RMM_GRANULE_DELEGATE SMC64_RMI_FID(U(1))
+#define RMI_RMM_GRANULE_UNDELEGATE SMC64_RMI_FID(U(2))
+
+/* Definitions for RMI VERSION */
+#define RMI_ABI_VERSION_MAJOR U(0x0)
+#define RMI_ABI_VERSION_MINOR U(0x0)
+#define RMI_ABI_VERSION (((RMI_ABI_VERSION_MAJOR & 0x7FFF) \
+ << 16) | \
+ (RMI_ABI_VERSION_MINOR & 0xFFFF))
+
+#define TRP_RMM_EL3_VERSION_GET_MAJOR(x) \
+ RMM_EL3_IFC_VERSION_GET_MAJOR((x))
+#define TRP_RMM_EL3_VERSION_GET_MINOR(x) \
+ RMM_EL3_IFC_VERSION_GET_MAJOR_MINOR((x))
+
+/* Helper to issue SMC calls to BL31 */
+uint64_t trp_smc(trp_args_t *);
+
+/* The main function to executed only by Primary CPU */
+void trp_main(void);
+
+/* Setup TRP. Executed only by Primary CPU */
+void trp_setup(uint64_t x0,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3);
+
+/* Validate arguments for warm boot only */
+int trp_validate_warmboot_args(uint64_t x0, uint64_t x1,
+ uint64_t x2, uint64_t x3);
+
+#endif /* __ASSEMBLER__ */
+#endif /* TRP_PRIVATE_H */
diff --git a/services/std_svc/sdei/sdei_dispatch.S b/services/std_svc/sdei/sdei_dispatch.S
new file mode 100644
index 0000000..8449e4b
--- /dev/null
+++ b/services/std_svc/sdei/sdei_dispatch.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl begin_sdei_synchronous_dispatch
+
+/*
+ * void begin_sdei_synchronous_dispatch(jmp_buf *buffer);
+ *
+ * Begin SDEI dispatch synchronously by setting up a jump point, and exiting
+ * EL3. This jump point is jumped to by the dispatcher after the event is
+ * completed by the client.
+ */
+func begin_sdei_synchronous_dispatch
+ stp x30, xzr, [sp, #-16]!
+ bl setjmp
+ cbz x0, 1f
+ ldp x30, xzr, [sp], #16
+ ret
+1:
+ b el3_exit
+endfunc begin_sdei_synchronous_dispatch
diff --git a/services/std_svc/sdei/sdei_event.c b/services/std_svc/sdei/sdei_event.c
new file mode 100644
index 0000000..e0c7971
--- /dev/null
+++ b/services/std_svc/sdei/sdei_event.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <lib/utils.h>
+
+#include "sdei_private.h"
+
+#define MAP_OFF(_map, _mapping) ((_map) - (_mapping)->map)
+
+/*
+ * Get SDEI entry with the given mapping: on success, returns pointer to SDEI
+ * entry. On error, returns NULL.
+ *
+ * Both shared and private maps are stored in single-dimensional array. Private
+ * event entries are kept for each PE forming a 2D array.
+ */
+sdei_entry_t *get_event_entry(sdei_ev_map_t *map)
+{
+ const sdei_mapping_t *mapping;
+ sdei_entry_t *cpu_priv_base;
+ unsigned int base_idx;
+ long int idx;
+
+ if (is_event_private(map)) {
+ /*
+ * For a private map, find the index of the mapping in the
+ * array.
+ */
+ mapping = SDEI_PRIVATE_MAPPING();
+ idx = MAP_OFF(map, mapping);
+
+ /* Base of private mappings for this CPU */
+ base_idx = plat_my_core_pos() * ((unsigned int) mapping->num_maps);
+ cpu_priv_base = &sdei_private_event_table[base_idx];
+
+ /*
+ * Return the address of the entry at the same index in the
+ * per-CPU event entry.
+ */
+ return &cpu_priv_base[idx];
+ } else {
+ mapping = SDEI_SHARED_MAPPING();
+ idx = MAP_OFF(map, mapping);
+
+ return &sdei_shared_event_table[idx];
+ }
+}
+
+/*
+ * Find event mapping for a given interrupt number: On success, returns pointer
+ * to the event mapping. On error, returns NULL.
+ */
+sdei_ev_map_t *find_event_map_by_intr(unsigned int intr_num, bool shared)
+{
+ const sdei_mapping_t *mapping;
+ sdei_ev_map_t *map;
+ unsigned int i;
+
+ /*
+ * Look for a match in private and shared mappings, as requested. This
+ * is a linear search. However, if the mappings are required to be
+ * sorted, for large maps, we could consider binary search.
+ */
+ mapping = shared ? SDEI_SHARED_MAPPING() : SDEI_PRIVATE_MAPPING();
+ iterate_mapping(mapping, i, map) {
+ if (map->intr == intr_num)
+ return map;
+ }
+
+ return NULL;
+}
+
+/*
+ * Find event mapping for a given event number: On success returns pointer to
+ * the event mapping. On error, returns NULL.
+ */
+sdei_ev_map_t *find_event_map(int ev_num)
+{
+ const sdei_mapping_t *mapping;
+ sdei_ev_map_t *map;
+ unsigned int i, j;
+
+ /*
+ * Iterate through mappings to find a match. This is a linear search.
+ * However, if the mappings are required to be sorted, for large maps,
+ * we could consider binary search.
+ */
+ for_each_mapping_type(i, mapping) {
+ iterate_mapping(mapping, j, map) {
+ if (map->ev_num == ev_num)
+ return map;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Return the total number of currently registered SDEI events.
+ */
+int sdei_get_registered_event_count(void)
+{
+ const sdei_mapping_t *mapping;
+ sdei_ev_map_t *map;
+ unsigned int i;
+ unsigned int j;
+ int count = 0;
+
+ /* Add up reg counts for each mapping. */
+ for_each_mapping_type(i, mapping) {
+ iterate_mapping(mapping, j, map) {
+ count += map->reg_count;
+ }
+ }
+
+ return count;
+}
diff --git a/services/std_svc/sdei/sdei_intr_mgmt.c b/services/std_svc/sdei/sdei_intr_mgmt.c
new file mode 100644
index 0000000..3bdf4a2
--- /dev/null
+++ b/services/std_svc/sdei/sdei_intr_mgmt.c
@@ -0,0 +1,774 @@
+/*
+ * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <arch_features.h>
+#include <bl31/ehf.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/cassert.h>
+#include <services/sdei.h>
+
+#include "sdei_private.h"
+
+/* x0-x17 GPREGS context */
+#define SDEI_SAVED_GPREGS 18U
+
+/* Maximum preemption nesting levels: Critical priority and Normal priority */
+#define MAX_EVENT_NESTING 2U
+
+/* Per-CPU SDEI state access macro */
+#define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()])
+
+/* Structure to store information about an outstanding dispatch */
+typedef struct sdei_dispatch_context {
+ sdei_ev_map_t *map;
+ uint64_t x[SDEI_SAVED_GPREGS];
+ jmp_buf *dispatch_jmp;
+
+ /* Exception state registers */
+ uint64_t elr_el3;
+ uint64_t spsr_el3;
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ /* CVE-2018-3639 mitigation state */
+ uint64_t disable_cve_2018_3639;
+#endif
+} sdei_dispatch_context_t;
+
+/* Per-CPU SDEI state data */
+typedef struct sdei_cpu_state {
+ sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
+ unsigned short stack_top; /* Empty ascending */
+ bool pe_masked;
+ bool pending_enables;
+} sdei_cpu_state_t;
+
+/* SDEI states for all cores in the system */
+static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT];
+
+int64_t sdei_pe_mask(void)
+{
+ int64_t ret = 0;
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+
+ /*
+ * Return value indicates whether this call had any effect in the mask
+ * status of this PE.
+ */
+ if (!state->pe_masked) {
+ state->pe_masked = true;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+void sdei_pe_unmask(void)
+{
+ unsigned int i;
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+ uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
+
+ /*
+ * If there are pending enables, iterate through the private mappings
+ * and enable those bound maps that are in enabled state. Also, iterate
+ * through shared mappings and enable interrupts of events that are
+ * targeted to this PE.
+ */
+ if (state->pending_enables) {
+ for_each_private_map(i, map) {
+ se = get_event_entry(map);
+ if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
+ plat_ic_enable_interrupt(map->intr);
+ }
+
+ for_each_shared_map(i, map) {
+ se = get_event_entry(map);
+
+ sdei_map_lock(map);
+ if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) &&
+ (se->reg_flags == SDEI_REGF_RM_PE) &&
+ (se->affinity == my_mpidr)) {
+ plat_ic_enable_interrupt(map->intr);
+ }
+ sdei_map_unlock(map);
+ }
+ }
+
+ state->pending_enables = false;
+ state->pe_masked = false;
+}
+
+/* Push a dispatch context to the dispatch stack */
+static sdei_dispatch_context_t *push_dispatch(void)
+{
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+ sdei_dispatch_context_t *disp_ctx;
+
+ /* Cannot have more than max events */
+ assert(state->stack_top < MAX_EVENT_NESTING);
+
+ disp_ctx = &state->dispatch_stack[state->stack_top];
+ state->stack_top++;
+
+ return disp_ctx;
+}
+
+/* Pop a dispatch context to the dispatch stack */
+static sdei_dispatch_context_t *pop_dispatch(void)
+{
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+
+ if (state->stack_top == 0U)
+ return NULL;
+
+ assert(state->stack_top <= MAX_EVENT_NESTING);
+
+ state->stack_top--;
+
+ return &state->dispatch_stack[state->stack_top];
+}
+
+/* Retrieve the context at the top of dispatch stack */
+static sdei_dispatch_context_t *get_outstanding_dispatch(void)
+{
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+
+ if (state->stack_top == 0U)
+ return NULL;
+
+ assert(state->stack_top <= MAX_EVENT_NESTING);
+
+ return &state->dispatch_stack[state->stack_top - 1U];
+}
+
+static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map,
+ void *tgt_ctx)
+{
+ sdei_dispatch_context_t *disp_ctx;
+ const gp_regs_t *tgt_gpregs;
+ const el3_state_t *tgt_el3;
+
+ assert(tgt_ctx != NULL);
+ tgt_gpregs = get_gpregs_ctx(tgt_ctx);
+ tgt_el3 = get_el3state_ctx(tgt_ctx);
+
+ disp_ctx = push_dispatch();
+ assert(disp_ctx != NULL);
+ disp_ctx->map = map;
+
+ /* Save general purpose and exception registers */
+ memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
+ disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
+ disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
+
+ return disp_ctx;
+}
+
+static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
+{
+ gp_regs_t *tgt_gpregs;
+ el3_state_t *tgt_el3;
+
+ assert(tgt_ctx != NULL);
+ tgt_gpregs = get_gpregs_ctx(tgt_ctx);
+ tgt_el3 = get_el3state_ctx(tgt_ctx);
+
+ CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
+ foo);
+
+ /* Restore general purpose and exception registers */
+ memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
+ write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
+ write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ cve_2018_3639_t *tgt_cve_2018_3639;
+ tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
+
+ /* Restore CVE-2018-3639 mitigation state */
+ write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE,
+ disp_ctx->disable_cve_2018_3639);
+#endif
+}
+
+static void save_secure_context(void)
+{
+ cm_el1_sysregs_context_save(SECURE);
+}
+
+/* Restore Secure context and arrange to resume it at the next ERET */
+static void restore_and_resume_secure_context(void)
+{
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+}
+
+/*
+ * Restore Non-secure context and arrange to resume it at the next ERET. Return
+ * pointer to the Non-secure context.
+ */
+static cpu_context_t *restore_and_resume_ns_context(void)
+{
+ cpu_context_t *ns_ctx;
+
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ ns_ctx = cm_get_context(NON_SECURE);
+ assert(ns_ctx != NULL);
+
+ return ns_ctx;
+}
+
+/*
+ * Prepare for ERET:
+ * - Set the ELR to the registered handler address
+ * - Set the SPSR register as described in the SDEI documentation and
+ * the AArch64.TakeException() pseudocode function in
+ * ARM DDI 0487F.c page J1-7635
+ */
+
+static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ctx)
+{
+ unsigned int client_el = sdei_client_el();
+ u_register_t sdei_spsr = SPSR_64(client_el, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+
+ u_register_t interrupted_pstate = disp_ctx->spsr_el3;
+
+ /* Check the SPAN bit in the client el SCTLR */
+ u_register_t client_el_sctlr;
+
+ if (client_el == MODE_EL2) {
+ client_el_sctlr = read_sctlr_el2();
+ } else {
+ client_el_sctlr = read_sctlr_el1();
+ }
+
+ /*
+ * Check whether to force the PAN bit or use the value in the
+ * interrupted EL according to the check described in
+ * TakeException. Since the client can only be Non-Secure
+ * EL2 or El1 some of the conditions in ElIsInHost() we know
+ * will always be True.
+ * When the client_el is EL2 we know that there will be a SPAN
+ * bit in SCTLR_EL2 as we have already checked for the condition
+ * HCR_EL2.E2H = 1 and HCR_EL2.TGE = 1
+ */
+ u_register_t hcr_el2 = read_hcr();
+ bool el_is_in_host = (read_feat_vhe_id_field() != 0U) &&
+ (hcr_el2 & HCR_TGE_BIT) &&
+ (hcr_el2 & HCR_E2H_BIT);
+
+ if (is_feat_pan_supported() &&
+ ((client_el == MODE_EL1) ||
+ (client_el == MODE_EL2 && el_is_in_host)) &&
+ ((client_el_sctlr & SCTLR_SPAN_BIT) == 0U)) {
+ sdei_spsr |= SPSR_PAN_BIT;
+ } else {
+ sdei_spsr |= (interrupted_pstate & SPSR_PAN_BIT);
+ }
+
+ /* If SSBS is implemented, take the value from the client el SCTLR */
+ u_register_t ssbs_enabled = (read_id_aa64pfr1_el1()
+ >> ID_AA64PFR1_EL1_SSBS_SHIFT)
+ & ID_AA64PFR1_EL1_SSBS_MASK;
+ if (ssbs_enabled != SSBS_UNAVAILABLE) {
+ u_register_t ssbs_bit = ((client_el_sctlr & SCTLR_DSSBS_BIT)
+ >> SCTLR_DSSBS_SHIFT)
+ << SPSR_SSBS_SHIFT_AARCH64;
+ sdei_spsr |= ssbs_bit;
+ }
+
+ /* If MTE is implemented in the client el set the TCO bit */
+ if (get_armv8_5_mte_support() >= MTE_IMPLEMENTED_ELX) {
+ sdei_spsr |= SPSR_TCO_BIT_AARCH64;
+ }
+
+ /* Take the DIT field from the pstate of the interrupted el */
+ sdei_spsr |= (interrupted_pstate & SPSR_DIT_BIT);
+
+ cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, sdei_spsr);
+}
+
+/*
+ * Populate the Non-secure context so that the next ERET will dispatch to the
+ * SDEI client.
+ */
+static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
+ cpu_context_t *ctx, jmp_buf *dispatch_jmp)
+{
+ sdei_dispatch_context_t *disp_ctx;
+
+ /* Push the event and context */
+ disp_ctx = save_event_ctx(map, ctx);
+
+ /*
+ * Setup handler arguments:
+ *
+ * - x0: Event number
+ * - x1: Handler argument supplied at the time of event registration
+ * - x2: Interrupted PC
+ * - x3: Interrupted SPSR
+ */
+ SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num);
+ SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
+ SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3);
+ SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3);
+
+ /* Setup the elr and spsr register to prepare for ERET */
+ sdei_set_elr_spsr(se, disp_ctx);
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ cve_2018_3639_t *tgt_cve_2018_3639;
+ tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx);
+
+ /* Save CVE-2018-3639 mitigation state */
+ disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639,
+ CTX_CVE_2018_3639_DISABLE);
+
+ /* Force SDEI handler to execute with mitigation enabled by default */
+ write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0);
+#endif
+
+ disp_ctx->dispatch_jmp = dispatch_jmp;
+}
+
+/* Handle a triggered SDEI interrupt while events were masked on this PE */
+static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
+ sdei_cpu_state_t *state, unsigned int intr_raw)
+{
+ uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
+ bool disable = false;
+
+ /* Nothing to do for event 0 */
+ if (map->ev_num == SDEI_EVENT_0)
+ return;
+
+ /*
+ * For a private event, or for a shared event specifically routed to
+ * this CPU, we disable interrupt, leave the interrupt pending, and do
+ * EOI.
+ */
+ if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE))
+ disable = true;
+
+ if (se->reg_flags == SDEI_REGF_RM_PE)
+ assert(se->affinity == my_mpidr);
+
+ if (disable) {
+ plat_ic_disable_interrupt(map->intr);
+ plat_ic_set_interrupt_pending(map->intr);
+ plat_ic_end_of_interrupt(intr_raw);
+ state->pending_enables = true;
+
+ return;
+ }
+
+ /*
+ * We just received a shared event with routing set to ANY PE. The
+ * interrupt can't be delegated on this PE as SDEI events are masked.
+ * However, because its routing mode is ANY, it is possible that the
+ * event can be delegated on any other PE that hasn't masked events.
+ * Therefore, we set the interrupt back pending so as to give other
+ * suitable PEs a chance of handling it.
+ */
+ assert(plat_ic_is_spi(map->intr) != 0);
+ plat_ic_set_interrupt_pending(map->intr);
+
+ /*
+ * Leaving the same interrupt pending also means that the same interrupt
+ * can target this PE again as soon as this PE leaves EL3. Whether and
+ * how often that happens depends on the implementation of GIC.
+ *
+ * We therefore call a platform handler to resolve this situation.
+ */
+ plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
+
+ /* This PE is masked. We EOI the interrupt, as it can't be delegated */
+ plat_ic_end_of_interrupt(intr_raw);
+}
+
+/* SDEI main interrupt handler */
+int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
+ void *cookie)
+{
+ sdei_entry_t *se;
+ cpu_context_t *ctx;
+ sdei_ev_map_t *map;
+ const sdei_dispatch_context_t *disp_ctx;
+ unsigned int sec_state;
+ sdei_cpu_state_t *state;
+ uint32_t intr;
+ jmp_buf dispatch_jmp;
+ const uint64_t mpidr = read_mpidr_el1();
+
+ /*
+ * To handle an event, the following conditions must be true:
+ *
+ * 1. Event must be signalled
+ * 2. Event must be enabled
+ * 3. This PE must be a target PE for the event
+ * 4. PE must be unmasked for SDEI
+ * 5. If this is a normal event, no event must be running
+ * 6. If this is a critical event, no critical event must be running
+ *
+ * (1) and (2) are true when this function is running
+ * (3) is enforced in GIC by selecting the appropriate routing option
+ * (4) is satisfied by client calling PE_UNMASK
+ * (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
+ * - Normal SDEI events belong to Normal SDE priority class
+ * - Critical SDEI events belong to Critical CSDE priority class
+ *
+ * The interrupt has already been acknowledged, and therefore is active,
+ * so no other PE can handle this event while we are at it.
+ *
+ * Find if this is an SDEI interrupt. There must be an event mapped to
+ * this interrupt
+ */
+ intr = plat_ic_get_interrupt_id(intr_raw);
+ map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0));
+ if (map == NULL) {
+ ERROR("No SDEI map for interrupt %u\n", intr);
+ panic();
+ }
+
+ /*
+ * Received interrupt number must either correspond to event 0, or must
+ * be bound interrupt.
+ */
+ assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
+
+ se = get_event_entry(map);
+ state = sdei_get_this_pe_state();
+
+ if (state->pe_masked) {
+ /*
+ * Interrupts received while this PE was masked can't be
+ * dispatched.
+ */
+ SDEI_LOG("interrupt %u on %" PRIx64 " while PE masked\n",
+ map->intr, mpidr);
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ handle_masked_trigger(map, se, state, intr_raw);
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return 0;
+ }
+
+ /* Insert load barrier for signalled SDEI event */
+ if (map->ev_num == SDEI_EVENT_0)
+ dmbld();
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ /* Assert shared event routed to this PE had been configured so */
+ if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
+ assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK));
+ }
+
+ if (!can_sdei_state_trans(se, DO_DISPATCH)) {
+ SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
+ map->ev_num, se->state);
+
+ /*
+ * If the event is registered, leave the interrupt pending so
+ * that it's delivered when the event is enabled.
+ */
+ if (GET_EV_STATE(se, REGISTERED))
+ plat_ic_set_interrupt_pending(map->intr);
+
+ /*
+ * The interrupt was disabled or unregistered after the handler
+ * started to execute, which means now the interrupt is already
+ * disabled and we just need to EOI the interrupt.
+ */
+ plat_ic_end_of_interrupt(intr_raw);
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return 0;
+ }
+
+ disp_ctx = get_outstanding_dispatch();
+ if (is_event_critical(map)) {
+ /*
+ * If this event is Critical, and if there's an outstanding
+ * dispatch, assert the latter is a Normal dispatch. Critical
+ * events can preempt an outstanding Normal event dispatch.
+ */
+ if (disp_ctx != NULL)
+ assert(is_event_normal(disp_ctx->map));
+ } else {
+ /*
+ * If this event is Normal, assert that there are no outstanding
+ * dispatches. Normal events can't preempt any outstanding event
+ * dispatches.
+ */
+ assert(disp_ctx == NULL);
+ }
+
+ sec_state = get_interrupt_src_ss(flags);
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ SDEI_LOG("ACK %" PRIx64 ", ev:0x%x ss:%d spsr:%lx ELR:%lx\n",
+ mpidr, map->ev_num, sec_state, read_spsr_el3(), read_elr_el3());
+
+ ctx = handle;
+
+ /*
+ * Check if we interrupted secure state. Perform a context switch so
+ * that we can delegate to NS.
+ */
+ if (sec_state == SECURE) {
+ save_secure_context();
+ ctx = restore_and_resume_ns_context();
+ }
+
+ /* Synchronously dispatch event */
+ setup_ns_dispatch(map, se, ctx, &dispatch_jmp);
+ begin_sdei_synchronous_dispatch(&dispatch_jmp);
+
+ /*
+ * We reach here when client completes the event.
+ *
+ * If the cause of dispatch originally interrupted the Secure world,
+ * resume Secure.
+ *
+ * No need to save the Non-secure context ahead of a world switch: the
+ * Non-secure context was fully saved before dispatch, and has been
+ * returned to its pre-dispatch state.
+ */
+ if (sec_state == SECURE)
+ restore_and_resume_secure_context();
+
+ /*
+ * The event was dispatched after receiving SDEI interrupt. With
+ * the event handling completed, EOI the corresponding
+ * interrupt.
+ */
+ if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) {
+ ERROR("Invalid SDEI mapping: ev=0x%x\n", map->ev_num);
+ panic();
+ }
+ plat_ic_end_of_interrupt(intr_raw);
+
+ return 0;
+}
+
+/*
+ * Explicitly dispatch the given SDEI event.
+ *
+ * When calling this API, the caller must be prepared for the SDEI dispatcher to
+ * restore and make Non-secure context as active. This call returns only after
+ * the client has completed the dispatch. Then, the Non-secure context will be
+ * active, and the following ERET will return to Non-secure.
+ *
+ * Should the caller require re-entry to Secure, it must restore the Secure
+ * context and program registers for ERET.
+ */
+int sdei_dispatch_event(int ev_num)
+{
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+ cpu_context_t *ns_ctx;
+ sdei_dispatch_context_t *disp_ctx;
+ sdei_cpu_state_t *state;
+ jmp_buf dispatch_jmp;
+
+ /* Can't dispatch if events are masked on this PE */
+ state = sdei_get_this_pe_state();
+ if (state->pe_masked)
+ return -1;
+
+ /* Event 0 can't be dispatched */
+ if (ev_num == SDEI_EVENT_0)
+ return -1;
+
+ /* Locate mapping corresponding to this event */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return -1;
+
+ /* Only explicit events can be dispatched */
+ if (!is_map_explicit(map))
+ return -1;
+
+ /* Examine state of dispatch stack */
+ disp_ctx = get_outstanding_dispatch();
+ if (disp_ctx != NULL) {
+ /*
+ * There's an outstanding dispatch. If the outstanding dispatch
+ * is critical, no more dispatches are possible.
+ */
+ if (is_event_critical(disp_ctx->map))
+ return -1;
+
+ /*
+ * If the outstanding dispatch is Normal, only critical events
+ * can be dispatched.
+ */
+ if (is_event_normal(map))
+ return -1;
+ }
+
+ se = get_event_entry(map);
+ if (!can_sdei_state_trans(se, DO_DISPATCH))
+ return -1;
+
+ /*
+ * Prepare for NS dispatch by restoring the Non-secure context and
+ * marking that as active.
+ */
+ ns_ctx = restore_and_resume_ns_context();
+
+ /* Activate the priority corresponding to the event being dispatched */
+ ehf_activate_priority(sdei_event_priority(map));
+
+ /* Dispatch event synchronously */
+ setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp);
+ begin_sdei_synchronous_dispatch(&dispatch_jmp);
+
+ /*
+ * We reach here when client completes the event.
+ *
+ * Deactivate the priority level that was activated at the time of
+ * explicit dispatch.
+ */
+ ehf_deactivate_priority(sdei_event_priority(map));
+
+ return 0;
+}
+
+static void end_sdei_synchronous_dispatch(jmp_buf *buffer)
+{
+ longjmp(*buffer, 1);
+}
+
+int sdei_event_complete(bool resume, uint64_t pc)
+{
+ sdei_dispatch_context_t *disp_ctx;
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+ cpu_context_t *ctx;
+ sdei_action_t act;
+ unsigned int client_el = sdei_client_el();
+
+ /* Return error if called without an active event */
+ disp_ctx = get_outstanding_dispatch();
+ if (disp_ctx == NULL)
+ return SDEI_EDENY;
+
+ /* Validate resumption point */
+ if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
+ return SDEI_EDENY;
+
+ map = disp_ctx->map;
+ assert(map != NULL);
+ se = get_event_entry(map);
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
+ if (!can_sdei_state_trans(se, act)) {
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+ return SDEI_EDENY;
+ }
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ /* Having done sanity checks, pop dispatch */
+ (void) pop_dispatch();
+
+ SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
+ map->ev_num, read_spsr_el3(), read_elr_el3());
+
+ /*
+ * Restore Non-secure to how it was originally interrupted. Once done,
+ * it's up-to-date with the saved copy.
+ */
+ ctx = cm_get_context(NON_SECURE);
+ restore_event_ctx(disp_ctx, ctx);
+
+ if (resume) {
+ /*
+ * Complete-and-resume call. Prepare the Non-secure context
+ * (currently active) for complete and resume.
+ */
+ cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
+ MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
+
+ /*
+ * Make it look as if a synchronous exception were taken at the
+ * supplied Non-secure resumption point. Populate SPSR and
+ * ELR_ELx so that an ERET from there works as expected.
+ *
+ * The assumption is that the client, if necessary, would have
+ * saved any live content in these registers before making this
+ * call.
+ */
+ if (client_el == MODE_EL2) {
+ write_elr_el2(disp_ctx->elr_el3);
+ write_spsr_el2(disp_ctx->spsr_el3);
+ } else {
+ /* EL1 */
+ write_elr_el1(disp_ctx->elr_el3);
+ write_spsr_el1(disp_ctx->spsr_el3);
+ }
+ }
+
+ /* End the outstanding dispatch */
+ end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp);
+
+ return 0;
+}
+
+int64_t sdei_event_context(void *handle, unsigned int param)
+{
+ sdei_dispatch_context_t *disp_ctx;
+
+ if (param >= SDEI_SAVED_GPREGS)
+ return SDEI_EINVAL;
+
+ /* Get outstanding dispatch on this CPU */
+ disp_ctx = get_outstanding_dispatch();
+ if (disp_ctx == NULL)
+ return SDEI_EDENY;
+
+ assert(disp_ctx->map != NULL);
+
+ if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
+ return SDEI_EDENY;
+
+ /*
+ * No locking is required for the Running status as this is the only CPU
+ * which can complete the event
+ */
+
+ return (int64_t) disp_ctx->x[param];
+}
diff --git a/services/std_svc/sdei/sdei_main.c b/services/std_svc/sdei/sdei_main.c
new file mode 100644
index 0000000..59a1673
--- /dev/null
+++ b/services/std_svc/sdei/sdei_main.c
@@ -0,0 +1,1112 @@
+/*
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <bl31/bl31.h>
+#include <bl31/ehf.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <context.h>
+#include <lib/cassert.h>
+#include <lib/el3_runtime/pubsub.h>
+#include <lib/utils.h>
+#include <plat/common/platform.h>
+#include <services/sdei.h>
+
+#include "sdei_private.h"
+
+#define MAJOR_VERSION 1ULL
+#define MINOR_VERSION 0ULL
+#define VENDOR_VERSION 0ULL
+
+#define MAKE_SDEI_VERSION(_major, _minor, _vendor) \
+ ((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor))
+
+#define LOWEST_INTR_PRIORITY 0xff
+
+CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI,
+ sdei_critical_must_have_higher_priority);
+
+static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots;
+
+/* Initialise SDEI map entries */
+static void init_map(sdei_ev_map_t *map)
+{
+ map->reg_count = 0;
+}
+
+/* Convert mapping to SDEI class */
+static sdei_class_t map_to_class(sdei_ev_map_t *map)
+{
+ return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL;
+}
+
+/* Clear SDEI event entries except state */
+static void clear_event_entries(sdei_entry_t *se)
+{
+ se->ep = 0;
+ se->arg = 0;
+ se->affinity = 0;
+ se->reg_flags = 0;
+}
+
+/* Perform CPU-specific state initialisation */
+static void *sdei_cpu_on_init(const void *arg)
+{
+ unsigned int i;
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+
+ /* Initialize private mappings on this CPU */
+ for_each_private_map(i, map) {
+ se = get_event_entry(map);
+ clear_event_entries(se);
+ se->state = 0;
+ }
+
+ SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1());
+
+ /* All PEs start with SDEI events masked */
+ (void) sdei_pe_mask();
+
+ return NULL;
+}
+
+/* CPU initialisation after wakeup from suspend */
+static void *sdei_cpu_wakeup_init(const void *arg)
+{
+ SDEI_LOG("Events masked on %lx\n", read_mpidr_el1());
+
+ /* All PEs wake up with SDEI events masked */
+ sdei_pe_mask();
+
+ return 0;
+}
+
+/* Initialise an SDEI class */
+static void sdei_class_init(sdei_class_t class)
+{
+ unsigned int i;
+ bool zero_found __unused = false;
+ int ev_num_so_far __unused;
+ sdei_ev_map_t *map;
+
+ /* Sanity check and configuration of shared events */
+ ev_num_so_far = -1;
+ for_each_shared_map(i, map) {
+#if ENABLE_ASSERTIONS
+ /* Ensure mappings are sorted */
+ assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
+
+ ev_num_so_far = map->ev_num;
+
+ /* Event 0 must not be shared */
+ assert(map->ev_num != SDEI_EVENT_0);
+
+ /* Check for valid event */
+ assert(map->ev_num >= 0);
+
+ /* Make sure it's a shared event */
+ assert(is_event_shared(map));
+
+ /* No shared mapping should have signalable property */
+ assert(!is_event_signalable(map));
+
+ /* Shared mappings can't be explicit */
+ assert(!is_map_explicit(map));
+#endif
+
+ /* Skip initializing the wrong priority */
+ if (map_to_class(map) != class)
+ continue;
+
+ /* Platform events are always bound, so set the bound flag */
+ if (is_map_dynamic(map)) {
+ assert(map->intr == SDEI_DYN_IRQ);
+ assert(is_event_normal(map));
+ num_dyn_shrd_slots++;
+ } else {
+ /* Shared mappings must be bound to shared interrupt */
+ assert(plat_ic_is_spi(map->intr) != 0);
+ set_map_bound(map);
+ }
+
+ init_map(map);
+ }
+
+ /* Sanity check and configuration of private events for this CPU */
+ ev_num_so_far = -1;
+ for_each_private_map(i, map) {
+#if ENABLE_ASSERTIONS
+ /* Ensure mappings are sorted */
+ assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
+
+ ev_num_so_far = map->ev_num;
+
+ if (map->ev_num == SDEI_EVENT_0) {
+ zero_found = true;
+
+ /* Event 0 must be a Secure SGI */
+ assert(is_secure_sgi(map->intr));
+
+ /*
+ * Event 0 can have only have signalable flag (apart
+ * from being private
+ */
+ assert(map->map_flags == (SDEI_MAPF_SIGNALABLE |
+ SDEI_MAPF_PRIVATE));
+ } else {
+ /* No other mapping should have signalable property */
+ assert(!is_event_signalable(map));
+ }
+
+ /* Check for valid event */
+ assert(map->ev_num >= 0);
+
+ /* Make sure it's a private event */
+ assert(is_event_private(map));
+
+ /*
+ * Other than priority, explicit events can only have explicit
+ * and private flags set.
+ */
+ if (is_map_explicit(map)) {
+ assert((map->map_flags | SDEI_MAPF_CRITICAL) ==
+ (SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE
+ | SDEI_MAPF_CRITICAL));
+ }
+#endif
+
+ /* Skip initializing the wrong priority */
+ if (map_to_class(map) != class)
+ continue;
+
+ /* Platform events are always bound, so set the bound flag */
+ if (map->ev_num != SDEI_EVENT_0) {
+ if (is_map_dynamic(map)) {
+ assert(map->intr == SDEI_DYN_IRQ);
+ assert(is_event_normal(map));
+ num_dyn_priv_slots++;
+ } else if (is_map_explicit(map)) {
+ /*
+ * Explicit mappings don't have a backing
+ * SDEI interrupt, but verify that anyway.
+ */
+ assert(map->intr == SDEI_DYN_IRQ);
+ } else {
+ /*
+ * Private mappings must be bound to private
+ * interrupt.
+ */
+ assert(plat_ic_is_ppi((unsigned) map->intr) != 0);
+ set_map_bound(map);
+ }
+ }
+
+ init_map(map);
+ }
+
+ /* Ensure event 0 is in the mapping */
+ assert(zero_found);
+
+ (void) sdei_cpu_on_init(NULL);
+}
+
+/* SDEI dispatcher initialisation */
+void sdei_init(void)
+{
+ plat_sdei_setup();
+ sdei_class_init(SDEI_CRITICAL);
+ sdei_class_init(SDEI_NORMAL);
+
+ /* Register priority level handlers */
+ ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI,
+ sdei_intr_handler);
+ ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI,
+ sdei_intr_handler);
+}
+
+/* Populate SDEI event entry */
+static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg,
+ unsigned int flags, uint64_t affinity)
+{
+ assert(se != NULL);
+
+ se->ep = ep;
+ se->arg = arg;
+ se->affinity = (affinity & MPIDR_AFFINITY_MASK);
+ se->reg_flags = flags;
+}
+
+static uint64_t sdei_version(void)
+{
+ return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION);
+}
+
+/* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */
+static int validate_flags(uint64_t flags, uint64_t mpidr)
+{
+ /* Validate flags */
+ switch (flags) {
+ case SDEI_REGF_RM_PE:
+ if (!is_valid_mpidr(mpidr))
+ return SDEI_EINVAL;
+ break;
+ case SDEI_REGF_RM_ANY:
+ break;
+ default:
+ /* Unknown flags */
+ return SDEI_EINVAL;
+ }
+
+ return 0;
+}
+
+/* Set routing of an SDEI event */
+static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr)
+{
+ int ret;
+ unsigned int routing;
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+
+ ret = validate_flags(flags, mpidr);
+ if (ret != 0)
+ return ret;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ /* The event must not be private */
+ if (is_event_private(map))
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ sdei_map_lock(map);
+
+ if (!is_map_bound(map) || is_event_private(map)) {
+ ret = SDEI_EINVAL;
+ goto finish;
+ }
+
+ if (!can_sdei_state_trans(se, DO_ROUTING)) {
+ ret = SDEI_EDENY;
+ goto finish;
+ }
+
+ /* Choose appropriate routing */
+ routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ?
+ INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE);
+
+ /* Update event registration flag */
+ se->reg_flags = (unsigned int) flags;
+ if (flags == SDEI_REGF_RM_PE) {
+ se->affinity = (mpidr & MPIDR_AFFINITY_MASK);
+ }
+
+ /*
+ * ROUTING_SET is permissible only when event composite state is
+ * 'registered, disabled, and not running'. This means that the
+ * interrupt is currently disabled, and not active.
+ */
+ plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr);
+
+finish:
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Register handler and argument for an SDEI event */
+static int64_t sdei_event_register(int ev_num,
+ uint64_t ep,
+ uint64_t arg,
+ uint64_t flags,
+ uint64_t mpidr)
+{
+ int ret;
+ unsigned int routing;
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+ sdei_state_t backup_state;
+
+ if ((ep == 0U) || (plat_sdei_validate_entry_point(
+ ep, sdei_client_el()) != 0)) {
+ return SDEI_EINVAL;
+ }
+
+ ret = validate_flags(flags, mpidr);
+ if (ret != 0)
+ return ret;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ /* Private events always target the PE */
+ if (is_event_private(map)) {
+ /*
+ * SDEI internally handles private events in the same manner
+ * as public events with routing mode=RM_PE, since the routing
+ * mode flag and affinity fields are not used when registering
+ * a private event, set them here.
+ */
+ flags = SDEI_REGF_RM_PE;
+ /*
+ * Kernel may pass 0 as mpidr, as we set flags to
+ * SDEI_REGF_RM_PE, so set mpidr also.
+ */
+ mpidr = read_mpidr_el1();
+ }
+
+ se = get_event_entry(map);
+
+ /*
+ * Even though register operation is per-event (additionally for private
+ * events, registration is required individually), it has to be
+ * serialised with respect to bind/release, which are global operations.
+ * So we hold the lock throughout, unconditionally.
+ */
+ sdei_map_lock(map);
+
+ backup_state = se->state;
+ if (!can_sdei_state_trans(se, DO_REGISTER))
+ goto fallback;
+
+ /*
+ * When registering for dynamic events, make sure it's been bound
+ * already. This has to be the case as, without binding, the client
+ * can't know about the event number to register for.
+ */
+ if (is_map_dynamic(map) && !is_map_bound(map))
+ goto fallback;
+
+ if (is_event_private(map)) {
+ /* Multiple calls to register are possible for private events */
+ assert(map->reg_count >= 0);
+ } else {
+ /* Only single call to register is possible for shared events */
+ assert(map->reg_count == 0);
+ }
+
+ if (is_map_bound(map)) {
+ /* Meanwhile, did any PE ACK the interrupt? */
+ if (plat_ic_get_interrupt_active(map->intr) != 0U)
+ goto fallback;
+
+ /* The interrupt must currently owned by Non-secure */
+ if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS)
+ goto fallback;
+
+ /*
+ * Disable forwarding of new interrupt triggers to CPU
+ * interface.
+ */
+ plat_ic_disable_interrupt(map->intr);
+
+ /*
+ * Any events that are triggered after register and before
+ * enable should remain pending. Clear any previous interrupt
+ * triggers which are pending (except for SGIs). This has no
+ * affect on level-triggered interrupts.
+ */
+ if (ev_num != SDEI_EVENT_0)
+ plat_ic_clear_interrupt_pending(map->intr);
+
+ /* Map interrupt to EL3 and program the correct priority */
+ plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3);
+
+ /* Program the appropriate interrupt priority */
+ plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map));
+
+ /*
+ * Set the routing mode for shared event as requested. We
+ * already ensure that shared events get bound to SPIs.
+ */
+ if (is_event_shared(map)) {
+ routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ?
+ INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE);
+ plat_ic_set_spi_routing(map->intr, routing,
+ (u_register_t) mpidr);
+ }
+ }
+
+ /* Populate event entries */
+ set_sdei_entry(se, ep, arg, (unsigned int) flags, mpidr);
+
+ /* Increment register count */
+ map->reg_count++;
+
+ sdei_map_unlock(map);
+
+ return 0;
+
+fallback:
+ /* Reinstate previous state */
+ se->state = backup_state;
+
+ sdei_map_unlock(map);
+
+ return SDEI_EDENY;
+}
+
+/* Enable SDEI event */
+static int64_t sdei_event_enable(int ev_num)
+{
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+ int ret;
+ bool before, after;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+ ret = SDEI_EDENY;
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ before = GET_EV_STATE(se, ENABLED);
+ if (!can_sdei_state_trans(se, DO_ENABLE))
+ goto finish;
+ after = GET_EV_STATE(se, ENABLED);
+
+ /*
+ * Enable interrupt for bound events only if there's a change in enabled
+ * state.
+ */
+ if (is_map_bound(map) && (!before && after))
+ plat_ic_enable_interrupt(map->intr);
+
+ ret = 0;
+
+finish:
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Disable SDEI event */
+static int sdei_event_disable(int ev_num)
+{
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+ int ret;
+ bool before, after;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+ ret = SDEI_EDENY;
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ before = GET_EV_STATE(se, ENABLED);
+ if (!can_sdei_state_trans(se, DO_DISABLE))
+ goto finish;
+ after = GET_EV_STATE(se, ENABLED);
+
+ /*
+ * Disable interrupt for bound events only if there's a change in
+ * enabled state.
+ */
+ if (is_map_bound(map) && (before && !after))
+ plat_ic_disable_interrupt(map->intr);
+
+ ret = 0;
+
+finish:
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Query SDEI event information */
+static int64_t sdei_event_get_info(int ev_num, int info)
+{
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+
+ uint64_t flags;
+ bool registered;
+ uint64_t affinity;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ /* Sample state under lock */
+ registered = GET_EV_STATE(se, REGISTERED);
+ flags = se->reg_flags;
+ affinity = se->affinity;
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ switch (info) {
+ case SDEI_INFO_EV_TYPE:
+ return is_event_shared(map);
+
+ case SDEI_INFO_EV_NOT_SIGNALED:
+ return !is_event_signalable(map);
+
+ case SDEI_INFO_EV_PRIORITY:
+ return is_event_critical(map);
+
+ case SDEI_INFO_EV_ROUTING_MODE:
+ if (!is_event_shared(map))
+ return SDEI_EINVAL;
+ if (!registered)
+ return SDEI_EDENY;
+ return (flags == SDEI_REGF_RM_PE);
+
+ case SDEI_INFO_EV_ROUTING_AFF:
+ if (!is_event_shared(map))
+ return SDEI_EINVAL;
+ if (!registered)
+ return SDEI_EDENY;
+ if (flags != SDEI_REGF_RM_PE)
+ return SDEI_EINVAL;
+ return affinity;
+
+ default:
+ return SDEI_EINVAL;
+ }
+}
+
+/* Unregister an SDEI event */
+static int sdei_event_unregister(int ev_num)
+{
+ int ret = 0;
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ /*
+ * Even though unregister operation is per-event (additionally for
+ * private events, unregistration is required individually), it has to
+ * be serialised with respect to bind/release, which are global
+ * operations. So we hold the lock throughout, unconditionally.
+ */
+ sdei_map_lock(map);
+
+ if (!can_sdei_state_trans(se, DO_UNREGISTER)) {
+ /*
+ * Even if the call is invalid, and the handler is running (for
+ * example, having unregistered from a running handler earlier),
+ * return pending error code; otherwise, return deny.
+ */
+ ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY;
+
+ goto finish;
+ }
+
+ map->reg_count--;
+ if (is_event_private(map)) {
+ /* Multiple calls to register are possible for private events */
+ assert(map->reg_count >= 0);
+ } else {
+ /* Only single call to register is possible for shared events */
+ assert(map->reg_count == 0);
+ }
+
+ if (is_map_bound(map)) {
+ plat_ic_disable_interrupt(map->intr);
+
+ /*
+ * Clear pending interrupt. Skip for SGIs as they may not be
+ * cleared on interrupt controllers.
+ */
+ if (ev_num != SDEI_EVENT_0)
+ plat_ic_clear_interrupt_pending(map->intr);
+
+ assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3);
+ plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS);
+ plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY);
+ }
+
+ clear_event_entries(se);
+
+ /*
+ * If the handler is running at the time of unregister, return the
+ * pending error code.
+ */
+ if (GET_EV_STATE(se, RUNNING))
+ ret = SDEI_EPEND;
+
+finish:
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Query status of an SDEI event */
+static int sdei_event_status(int ev_num)
+{
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+ sdei_state_t state;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ /* State value directly maps to the expected return format */
+ state = se->state;
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return (int) state;
+}
+
+/* Bind an SDEI event to an interrupt */
+static int sdei_interrupt_bind(unsigned int intr_num)
+{
+ sdei_ev_map_t *map;
+ bool retry = true, shared_mapping;
+
+ /* Interrupt must be either PPI or SPI */
+ if (!(plat_ic_is_ppi(intr_num) || plat_ic_is_spi(intr_num)))
+ return SDEI_EINVAL;
+
+ shared_mapping = (plat_ic_is_spi(intr_num) != 0);
+ do {
+ /*
+ * Bail out if there is already an event for this interrupt,
+ * either platform-defined or dynamic.
+ */
+ map = find_event_map_by_intr(intr_num, shared_mapping);
+ if (map != NULL) {
+ if (is_map_dynamic(map)) {
+ if (is_map_bound(map)) {
+ /*
+ * Dynamic event, already bound. Return
+ * event number.
+ */
+ return map->ev_num;
+ }
+ } else {
+ /* Binding non-dynamic event */
+ return SDEI_EINVAL;
+ }
+ }
+
+ /*
+ * The interrupt is not bound yet. Try to find a free slot to
+ * bind it. Free dynamic mappings have their interrupt set as
+ * SDEI_DYN_IRQ.
+ */
+ map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping);
+ if (map == NULL)
+ return SDEI_ENOMEM;
+
+ /* The returned mapping must be dynamic */
+ assert(is_map_dynamic(map));
+
+ /*
+ * We cannot assert for bound maps here, as we might be racing
+ * with another bind.
+ */
+
+ /* The requested interrupt must already belong to NS */
+ if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS)
+ return SDEI_EDENY;
+
+ /*
+ * Interrupt programming and ownership transfer are deferred
+ * until register.
+ */
+
+ sdei_map_lock(map);
+ if (!is_map_bound(map)) {
+ map->intr = intr_num;
+ set_map_bound(map);
+ retry = false;
+ }
+ sdei_map_unlock(map);
+ } while (retry);
+
+ return map->ev_num;
+}
+
+/* Release a bound SDEI event previously to an interrupt */
+static int sdei_interrupt_release(int ev_num)
+{
+ int ret = 0;
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ if (!is_map_dynamic(map))
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ sdei_map_lock(map);
+
+ /* Event must have been unregistered before release */
+ if (map->reg_count != 0) {
+ ret = SDEI_EDENY;
+ goto finish;
+ }
+
+ /*
+ * Interrupt release never causes the state to change. We only check
+ * whether it's permissible or not.
+ */
+ if (!can_sdei_state_trans(se, DO_RELEASE)) {
+ ret = SDEI_EDENY;
+ goto finish;
+ }
+
+ if (is_map_bound(map)) {
+ /*
+ * Deny release if the interrupt is active, which means it's
+ * probably being acknowledged and handled elsewhere.
+ */
+ if (plat_ic_get_interrupt_active(map->intr) != 0U) {
+ ret = SDEI_EDENY;
+ goto finish;
+ }
+
+ /*
+ * Interrupt programming and ownership transfer are already done
+ * during unregister.
+ */
+
+ map->intr = SDEI_DYN_IRQ;
+ clr_map_bound(map);
+ } else {
+ SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map),
+ map->reg_count);
+ ret = SDEI_EINVAL;
+ }
+
+finish:
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Perform reset of private SDEI events */
+static int sdei_private_reset(void)
+{
+ sdei_ev_map_t *map;
+ int ret = 0, final_ret = 0;
+ unsigned int i;
+
+ /* Unregister all private events */
+ for_each_private_map(i, map) {
+ /*
+ * The unregister can fail if the event is not registered, which
+ * is allowed, and a deny will be returned. But if the event is
+ * running or unregister pending, the call fails.
+ */
+ ret = sdei_event_unregister(map->ev_num);
+ if ((ret == SDEI_EPEND) && (final_ret == 0))
+ final_ret = SDEI_EDENY;
+ }
+
+ return final_ret;
+}
+
+/* Perform reset of shared SDEI events */
+static int sdei_shared_reset(void)
+{
+ const sdei_mapping_t *mapping;
+ sdei_ev_map_t *map;
+ int ret = 0, final_ret = 0;
+ unsigned int i, j;
+
+ /* Unregister all shared events */
+ for_each_shared_map(i, map) {
+ /*
+ * The unregister can fail if the event is not registered, which
+ * is allowed, and a deny will be returned. But if the event is
+ * running or unregister pending, the call fails.
+ */
+ ret = sdei_event_unregister(map->ev_num);
+ if ((ret == SDEI_EPEND) && (final_ret == 0))
+ final_ret = SDEI_EDENY;
+ }
+
+ if (final_ret != 0)
+ return final_ret;
+
+ /*
+ * Loop through both private and shared mappings, and release all
+ * bindings.
+ */
+ for_each_mapping_type(i, mapping) {
+ iterate_mapping(mapping, j, map) {
+ /*
+ * Release bindings for mappings that are dynamic and
+ * bound.
+ */
+ if (is_map_dynamic(map) && is_map_bound(map)) {
+ /*
+ * Any failure to release would mean there is at
+ * least a PE registered for the event.
+ */
+ ret = sdei_interrupt_release(map->ev_num);
+ if ((ret != 0) && (final_ret == 0))
+ final_ret = ret;
+ }
+ }
+ }
+
+ return final_ret;
+}
+
+/* Send a signal to another SDEI client PE */
+static int sdei_signal(int ev_num, uint64_t target_pe)
+{
+ sdei_ev_map_t *map;
+
+ /* Only event 0 can be signalled */
+ if (ev_num != SDEI_EVENT_0)
+ return SDEI_EINVAL;
+
+ /* Find mapping for event 0 */
+ map = find_event_map(SDEI_EVENT_0);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ /* The event must be signalable */
+ if (!is_event_signalable(map))
+ return SDEI_EINVAL;
+
+ /* Validate target */
+ if (!is_valid_mpidr(target_pe))
+ return SDEI_EINVAL;
+
+ /* Raise SGI. Platform will validate target_pe */
+ plat_ic_raise_el3_sgi((int) map->intr, (u_register_t) target_pe);
+
+ return 0;
+}
+
+/* Query SDEI dispatcher features */
+static uint64_t sdei_features(unsigned int feature)
+{
+ if (feature == SDEI_FEATURE_BIND_SLOTS) {
+ return FEATURE_BIND_SLOTS(num_dyn_priv_slots,
+ num_dyn_shrd_slots);
+ }
+
+ return (uint64_t) SDEI_EINVAL;
+}
+
+/* SDEI top level handler for servicing SMCs */
+uint64_t sdei_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+
+ uint64_t x5;
+ unsigned int ss = (unsigned int) get_interrupt_src_ss(flags);
+ int64_t ret;
+ bool resume = false;
+ cpu_context_t *ctx = handle;
+ int ev_num = (int) x1;
+
+ if (ss != NON_SECURE)
+ SMC_RET1(ctx, SMC_UNK);
+
+ /* Verify the caller EL */
+ if (GET_EL(read_spsr_el3()) != sdei_client_el())
+ SMC_RET1(ctx, SMC_UNK);
+
+ switch (smc_fid) {
+ case SDEI_VERSION:
+ SDEI_LOG("> VER\n");
+ ret = (int64_t) sdei_version();
+ SDEI_LOG("< VER:%" PRIx64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_REGISTER:
+ x5 = SMC_GET_GP(ctx, CTX_GPREG_X5);
+ SDEI_LOG("> REG(n:%d e:%" PRIx64 " a:%" PRIx64 " f:%x m:%" PRIx64 "\n", ev_num,
+ x2, x3, (int) x4, x5);
+ ret = sdei_event_register(ev_num, x2, x3, x4, x5);
+ SDEI_LOG("< REG:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_ENABLE:
+ SDEI_LOG("> ENABLE(n:%d)\n", (int) x1);
+ ret = sdei_event_enable(ev_num);
+ SDEI_LOG("< ENABLE:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_DISABLE:
+ SDEI_LOG("> DISABLE(n:0x%x)\n", ev_num);
+ ret = sdei_event_disable(ev_num);
+ SDEI_LOG("< DISABLE:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_CONTEXT:
+ SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1());
+ ret = sdei_event_context(ctx, (unsigned int) x1);
+ SDEI_LOG("< CTX:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_COMPLETE_AND_RESUME:
+ resume = true;
+ /* Fallthrough */
+
+ case SDEI_EVENT_COMPLETE:
+ SDEI_LOG("> COMPLETE(r:%u sta/ep:%" PRIx64 "):%lx\n",
+ (unsigned int) resume, x1, read_mpidr_el1());
+ ret = sdei_event_complete(resume, x1);
+ SDEI_LOG("< COMPLETE:%" PRIx64 "\n", ret);
+
+ /*
+ * Set error code only if the call failed. If the call
+ * succeeded, we discard the dispatched context, and restore the
+ * interrupted context to a pristine condition, and therefore
+ * shouldn't be modified. We don't return to the caller in this
+ * case anyway.
+ */
+ if (ret != 0)
+ SMC_RET1(ctx, ret);
+
+ SMC_RET0(ctx);
+
+ case SDEI_EVENT_STATUS:
+ SDEI_LOG("> STAT(n:0x%x)\n", ev_num);
+ ret = sdei_event_status(ev_num);
+ SDEI_LOG("< STAT:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_GET_INFO:
+ SDEI_LOG("> INFO(n:0x%x, %d)\n", ev_num, (int) x2);
+ ret = sdei_event_get_info(ev_num, (int) x2);
+ SDEI_LOG("< INFO:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_UNREGISTER:
+ SDEI_LOG("> UNREG(n:0x%x)\n", ev_num);
+ ret = sdei_event_unregister(ev_num);
+ SDEI_LOG("< UNREG:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_PE_UNMASK:
+ SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1());
+ sdei_pe_unmask();
+ SDEI_LOG("< UNMASK:%d\n", 0);
+ SMC_RET1(ctx, 0);
+
+ case SDEI_PE_MASK:
+ SDEI_LOG("> MASK:%lx\n", read_mpidr_el1());
+ ret = sdei_pe_mask();
+ SDEI_LOG("< MASK:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_INTERRUPT_BIND:
+ SDEI_LOG("> BIND(%d)\n", (int) x1);
+ ret = sdei_interrupt_bind((unsigned int) x1);
+ SDEI_LOG("< BIND:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_INTERRUPT_RELEASE:
+ SDEI_LOG("> REL(0x%x)\n", ev_num);
+ ret = sdei_interrupt_release(ev_num);
+ SDEI_LOG("< REL:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_SHARED_RESET:
+ SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1());
+ ret = sdei_shared_reset();
+ SDEI_LOG("< S_RESET:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_PRIVATE_RESET:
+ SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1());
+ ret = sdei_private_reset();
+ SDEI_LOG("< P_RESET:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_ROUTING_SET:
+ SDEI_LOG("> ROUTE_SET(n:%d f:%" PRIx64 " aff:%" PRIx64 ")\n", ev_num, x2, x3);
+ ret = sdei_event_routing_set(ev_num, x2, x3);
+ SDEI_LOG("< ROUTE_SET:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_FEATURES:
+ SDEI_LOG("> FTRS(f:%" PRIx64 ")\n", x1);
+ ret = (int64_t) sdei_features((unsigned int) x1);
+ SDEI_LOG("< FTRS:%" PRIx64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_SIGNAL:
+ SDEI_LOG("> SIGNAL(e:%d t:%" PRIx64 ")\n", ev_num, x2);
+ ret = sdei_signal(ev_num, x2);
+ SDEI_LOG("< SIGNAL:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ default:
+ /* Do nothing in default case */
+ break;
+ }
+
+ WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid);
+ SMC_RET1(ctx, SMC_UNK);
+}
+
+/* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */
+SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init);
+
+/* Subscribe to PSCI CPU suspend finisher for per-CPU configuration */
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, sdei_cpu_wakeup_init);
diff --git a/services/std_svc/sdei/sdei_private.h b/services/std_svc/sdei/sdei_private.h
new file mode 100644
index 0000000..44a7301
--- /dev/null
+++ b/services/std_svc/sdei/sdei_private.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SDEI_PRIVATE_H
+#define SDEI_PRIVATE_H
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/debug.h>
+#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/spinlock.h>
+#include <lib/utils_def.h>
+#include <plat/common/platform.h>
+#include <services/sdei.h>
+#include <setjmp.h>
+
+#ifndef __aarch64__
+# error SDEI is implemented only for AArch64 systems
+#endif
+
+#ifndef PLAT_SDEI_CRITICAL_PRI
+# error Platform must define SDEI critical priority value
+#endif
+
+#ifndef PLAT_SDEI_NORMAL_PRI
+# error Platform must define SDEI normal priority value
+#endif
+
+/* Output SDEI logs as verbose */
+#define SDEI_LOG(...) VERBOSE("SDEI: " __VA_ARGS__)
+
+/* SDEI handler unregistered state. This is the default state. */
+#define SDEI_STATE_UNREGISTERED 0U
+
+/* SDE event status values in bit position */
+#define SDEI_STATF_REGISTERED 0U
+#define SDEI_STATF_ENABLED 1U
+#define SDEI_STATF_RUNNING 2U
+
+/* SDEI SMC error codes */
+#define SDEI_EINVAL (-2)
+#define SDEI_EDENY (-3)
+#define SDEI_EPEND (-5)
+#define SDEI_ENOMEM (-10)
+
+/*
+ * 'info' parameter to SDEI_EVENT_GET_INFO SMC.
+ *
+ * Note that the SDEI v1.0 specification mistakenly enumerates the
+ * SDEI_INFO_EV_SIGNALED as SDEI_INFO_SIGNALED. This will be corrected in a
+ * future version.
+ */
+#define SDEI_INFO_EV_TYPE 0
+#define SDEI_INFO_EV_NOT_SIGNALED 1
+#define SDEI_INFO_EV_PRIORITY 2
+#define SDEI_INFO_EV_ROUTING_MODE 3
+#define SDEI_INFO_EV_ROUTING_AFF 4
+
+#define SDEI_PRIVATE_MAPPING() (&sdei_global_mappings[SDEI_MAP_IDX_PRIV_])
+#define SDEI_SHARED_MAPPING() (&sdei_global_mappings[SDEI_MAP_IDX_SHRD_])
+
+#define for_each_mapping_type(_i, _mapping) \
+ for ((_i) = 0, (_mapping) = &sdei_global_mappings[(_i)]; \
+ (_i) < SDEI_MAP_IDX_MAX_; \
+ (_i)++, (_mapping) = &sdei_global_mappings[(_i)])
+
+#define iterate_mapping(_mapping, _i, _map) \
+ for ((_map) = (_mapping)->map, (_i) = 0; \
+ (_i) < (_mapping)->num_maps; \
+ (_i)++, (_map)++)
+
+#define for_each_private_map(_i, _map) \
+ iterate_mapping(SDEI_PRIVATE_MAPPING(), _i, _map)
+
+#define for_each_shared_map(_i, _map) \
+ iterate_mapping(SDEI_SHARED_MAPPING(), _i, _map)
+
+/* SDEI_FEATURES */
+#define SDEI_FEATURE_BIND_SLOTS 0U
+#define BIND_SLOTS_MASK 0xffffU
+#define FEATURES_SHARED_SLOTS_SHIFT 16U
+#define FEATURES_PRIVATE_SLOTS_SHIFT 0U
+#define FEATURE_BIND_SLOTS(_priv, _shrd) \
+ (((((uint64_t) (_priv)) & BIND_SLOTS_MASK) << FEATURES_PRIVATE_SLOTS_SHIFT) | \
+ ((((uint64_t) (_shrd)) & BIND_SLOTS_MASK) << FEATURES_SHARED_SLOTS_SHIFT))
+
+#define GET_EV_STATE(_e, _s) get_ev_state_bit(_e, SDEI_STATF_##_s)
+#define SET_EV_STATE(_e, _s) clr_ev_state_bit(_e->state, SDEI_STATF_##_s)
+
+static inline bool is_event_private(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT_32(SDEI_MAPF_PRIVATE_SHIFT_)) != 0U);
+}
+
+static inline bool is_event_shared(sdei_ev_map_t *map)
+{
+ return !is_event_private(map);
+}
+
+static inline bool is_event_critical(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT_32(SDEI_MAPF_CRITICAL_SHIFT_)) != 0U);
+}
+
+static inline bool is_event_normal(sdei_ev_map_t *map)
+{
+ return !is_event_critical(map);
+}
+
+static inline bool is_event_signalable(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT_32(SDEI_MAPF_SIGNALABLE_SHIFT_)) != 0U);
+}
+
+static inline bool is_map_dynamic(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT_32(SDEI_MAPF_DYNAMIC_SHIFT_)) != 0U);
+}
+
+/*
+ * Checks whether an event is associated with an interrupt. Static events always
+ * return true, and dynamic events return whether SDEI_INTERRUPT_BIND had been
+ * called on them. This can be used on both static or dynamic events to check
+ * for an associated interrupt.
+ */
+static inline bool is_map_bound(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT_32(SDEI_MAPF_BOUND_SHIFT_)) != 0U);
+}
+
+static inline void set_map_bound(sdei_ev_map_t *map)
+{
+ map->map_flags |= BIT_32(SDEI_MAPF_BOUND_SHIFT_);
+}
+
+static inline bool is_map_explicit(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT_32(SDEI_MAPF_EXPLICIT_SHIFT_)) != 0U);
+}
+
+static inline void clr_map_bound(sdei_ev_map_t *map)
+{
+ map->map_flags &= ~BIT_32(SDEI_MAPF_BOUND_SHIFT_);
+}
+
+static inline bool is_secure_sgi(unsigned int intr)
+{
+ return ((plat_ic_is_sgi(intr) != 0) &&
+ (plat_ic_get_interrupt_type(intr) == INTR_TYPE_EL3));
+}
+
+/*
+ * Determine EL of the client. If EL2 is implemented (hence the enabled HCE
+ * bit), deem EL2; otherwise, deem EL1.
+ */
+static inline unsigned int sdei_client_el(void)
+{
+ cpu_context_t *ns_ctx = cm_get_context(NON_SECURE);
+ el3_state_t *el3_ctx = get_el3state_ctx(ns_ctx);
+
+ return ((read_ctx_reg(el3_ctx, CTX_SCR_EL3) & SCR_HCE_BIT) != 0U) ?
+ MODE_EL2 : MODE_EL1;
+}
+
+static inline unsigned int sdei_event_priority(sdei_ev_map_t *map)
+{
+ return (unsigned int) (is_event_critical(map) ? PLAT_SDEI_CRITICAL_PRI :
+ PLAT_SDEI_NORMAL_PRI);
+}
+
+static inline bool get_ev_state_bit(sdei_entry_t *se, unsigned int bit_no)
+{
+ return ((se->state & BIT_32(bit_no)) != 0U);
+}
+
+static inline void clr_ev_state_bit(sdei_entry_t *se, unsigned int bit_no)
+{
+ se->state &= ~BIT_32(bit_no);
+}
+
+/* SDEI actions for state transition */
+typedef enum {
+ /*
+ * Actions resulting from client requests. These directly map to SMC
+ * calls. Note that the state table columns are listed in this order
+ * too.
+ */
+ DO_REGISTER = 0,
+ DO_RELEASE = 1,
+ DO_ENABLE = 2,
+ DO_DISABLE = 3,
+ DO_UNREGISTER = 4,
+ DO_ROUTING = 5,
+ DO_CONTEXT = 6,
+ DO_COMPLETE = 7,
+ DO_COMPLETE_RESUME = 8,
+
+ /* Action for event dispatch */
+ DO_DISPATCH = 9,
+
+ DO_MAX,
+} sdei_action_t;
+
+typedef enum {
+ SDEI_NORMAL,
+ SDEI_CRITICAL
+} sdei_class_t;
+
+static inline void sdei_map_lock(sdei_ev_map_t *map)
+{
+ spin_lock(&map->lock);
+}
+
+static inline void sdei_map_unlock(sdei_ev_map_t *map)
+{
+ spin_unlock(&map->lock);
+}
+
+extern const sdei_mapping_t sdei_global_mappings[];
+extern sdei_entry_t sdei_private_event_table[];
+extern sdei_entry_t sdei_shared_event_table[];
+
+void init_sdei_state(void);
+
+sdei_ev_map_t *find_event_map_by_intr(unsigned int intr_num, bool shared);
+sdei_ev_map_t *find_event_map(int ev_num);
+sdei_entry_t *get_event_entry(sdei_ev_map_t *map);
+
+int64_t sdei_event_context(void *handle, unsigned int param);
+int sdei_event_complete(bool resume, uint64_t pc);
+
+void sdei_pe_unmask(void);
+int64_t sdei_pe_mask(void);
+
+int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
+ void *cookie);
+bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act);
+void begin_sdei_synchronous_dispatch(jmp_buf *buffer);
+
+#endif /* SDEI_PRIVATE_H */
diff --git a/services/std_svc/sdei/sdei_state.c b/services/std_svc/sdei/sdei_state.c
new file mode 100644
index 0000000..1b448e6
--- /dev/null
+++ b/services/std_svc/sdei/sdei_state.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <lib/cassert.h>
+
+#include "sdei_private.h"
+
+/* Aliases for SDEI handler states: 'R'unning, 'E'nabled, and re'G'istered */
+#define r_ 0U
+#define R_ (1u << SDEI_STATF_RUNNING)
+
+#define e_ 0U
+#define E_ (1u << SDEI_STATF_ENABLED)
+
+#define g_ 0U
+#define G_ (1u << SDEI_STATF_REGISTERED)
+
+/* All possible composite handler states */
+#define reg_ (r_ | e_ | g_)
+#define reG_ (r_ | e_ | G_)
+#define rEg_ (r_ | E_ | g_)
+#define rEG_ (r_ | E_ | G_)
+#define Reg_ (R_ | e_ | g_)
+#define ReG_ (R_ | e_ | G_)
+#define REg_ (R_ | E_ | g_)
+#define REG_ (R_ | E_ | G_)
+
+#define MAX_STATES (REG_ + 1u)
+
+/* Invalid state */
+#define SDEI_STATE_INVALID ((sdei_state_t) (-1))
+
+/* No change in state */
+#define SDEI_STATE_NOP ((sdei_state_t) (-2))
+
+#define X___ SDEI_STATE_INVALID
+#define NOP_ SDEI_STATE_NOP
+
+/* Ensure special states don't overlap with valid ones */
+CASSERT(X___ > REG_, sdei_state_overlap_invalid);
+CASSERT(NOP_ > REG_, sdei_state_overlap_nop);
+
+/*
+ * SDEI handler state machine: refer to sections 6.1 and 6.1.2 of the SDEI v1.0
+ * specification (ARM DEN0054A).
+ *
+ * Not all calls contribute to handler state transition. This table is also used
+ * to validate whether a call is permissible at a given handler state:
+ *
+ * - X___ denotes a forbidden transition;
+ * - NOP_ denotes a permitted transition, but there's no change in state;
+ * - Otherwise, XXX_ gives the new state.
+ *
+ * DISP[atch] is a transition added for the implementation, but is not mentioned
+ * in the spec.
+ *
+ * Those calls that the spec mentions as can be made any time don't picture in
+ * this table.
+ */
+
+static const sdei_state_t sdei_state_table[MAX_STATES][DO_MAX] = {
+/*
+ * Action: REG REL ENA DISA UREG ROUT CTX COMP COMPR DISP
+ * Notes: [3] [1] [3] [3][4] [2]
+ */
+ /* Handler unregistered, disabled, and not running. This is the default state. */
+/* 0 */ [reg_] = { reG_, NOP_, X___, X___, X___, X___, X___, X___, X___, X___, },
+
+ /* Handler unregistered and running */
+/* 4 */ [Reg_] = { X___, X___, X___, X___, X___, X___, NOP_, reg_, reg_, X___, },
+
+ /* Handler registered */
+/* 1 */ [reG_] = { X___, X___, rEG_, NOP_, reg_, NOP_, X___, X___, X___, X___, },
+
+ /* Handler registered and running */
+/* 5 */ [ReG_] = { X___, X___, REG_, NOP_, Reg_, X___, NOP_, reG_, reG_, X___, },
+
+ /* Handler registered and enabled */
+/* 3 */ [rEG_] = { X___, X___, NOP_, reG_, reg_, X___, X___, X___, X___, REG_, },
+
+ /* Handler registered, enabled, and running */
+/* 7 */ [REG_] = { X___, X___, NOP_, ReG_, Reg_, X___, NOP_, rEG_, rEG_, X___, },
+
+ /*
+ * Invalid states: no valid transition would leave the handler in these
+ * states; and no transition from these states is possible either.
+ */
+
+ /*
+ * Handler can't be enabled without being registered. I.e., XEg is
+ * impossible.
+ */
+/* 2 */ [rEg_] = { X___, X___, X___, X___, X___, X___, X___, X___, X___, X___, },
+/* 6 */ [REg_] = { X___, X___, X___, X___, X___, X___, X___, X___, X___, X___, },
+};
+
+/*
+ * [1] Unregister will always also disable the event, so the new state will have
+ * Xeg.
+ * [2] Event is considered for dispatch only when it's both registered and
+ * enabled.
+ * [3] Never causes change in state.
+ * [4] Only allowed when running.
+ */
+
+/*
+ * Given an action, transition the state of an event by looking up the state
+ * table above:
+ *
+ * - Return false for invalid transition;
+ * - Return true for valid transition that causes no change in state;
+ * - Otherwise, update state and return true.
+ *
+ * This function assumes that the caller holds necessary locks. If the
+ * transition has constrains other than the state table describes, the caller is
+ * expected to restore the previous state. See sdei_event_register() for
+ * example.
+ */
+bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act)
+{
+ sdei_state_t next;
+
+ assert(act < DO_MAX);
+ if (se->state >= MAX_STATES) {
+ WARN(" event state invalid: %x\n", se->state);
+ return false;
+ }
+
+ next = sdei_state_table[se->state][act];
+ switch (next) {
+ case SDEI_STATE_INVALID:
+ return false;
+
+ case SDEI_STATE_NOP:
+ return true;
+
+ default:
+ /* Valid transition. Update state. */
+ SDEI_LOG(" event state 0x%x => 0x%x\n", se->state, next);
+ se->state = next;
+
+ return true;
+ }
+}
diff --git a/services/std_svc/spm/common/aarch64/spm_helpers.S b/services/std_svc/spm/common/aarch64/spm_helpers.S
new file mode 100644
index 0000000..95e69fb
--- /dev/null
+++ b/services/std_svc/spm/common/aarch64/spm_helpers.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "spm_common.h"
+
+ .global spm_secure_partition_enter
+ .global spm_secure_partition_exit
+
+ /* ---------------------------------------------------------------------
+ * This function is called with SP_EL0 as stack. Here we stash our EL3
+ * callee-saved registers on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where the address of the C
+ * runtime context is to be saved.
+ * ---------------------------------------------------------------------
+ */
+func spm_secure_partition_enter
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #SP_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #SP_C_RT_CTX_X19]
+ stp x21, x22, [sp, #SP_C_RT_CTX_X21]
+ stp x23, x24, [sp, #SP_C_RT_CTX_X23]
+ stp x25, x26, [sp, #SP_C_RT_CTX_X25]
+ stp x27, x28, [sp, #SP_C_RT_CTX_X27]
+ stp x29, x30, [sp, #SP_C_RT_CTX_X29]
+
+ /* ---------------------------------------------------------------------
+ * Everything is setup now. el3_exit() will use the secure context to
+ * restore to the general purpose and EL3 system registers to ERET
+ * into the secure payload.
+ * ---------------------------------------------------------------------
+ */
+ b el3_exit
+endfunc spm_secure_partition_enter
+
+ /* ---------------------------------------------------------------------
+ * This function is called with 'x0' pointing to a C runtime context
+ * saved in spm_secure_partition_enter().
+ * It restores the saved registers and jumps to that runtime with 'x0'
+ * as the new SP register. This destroys the C runtime context that had
+ * been built on the stack below the saved context by the caller. Later
+ * the second parameter 'x1' is passed as a return value to the caller.
+ * ---------------------------------------------------------------------
+ */
+func spm_secure_partition_exit
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(SP_C_RT_CTX_X19 - SP_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(SP_C_RT_CTX_X21 - SP_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(SP_C_RT_CTX_X23 - SP_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(SP_C_RT_CTX_X25 - SP_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(SP_C_RT_CTX_X27 - SP_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(SP_C_RT_CTX_X29 - SP_C_RT_CTX_SIZE)]
+
+ /* ---------------------------------------------------------------------
+ * This should take us back to the instruction after the call to the
+ * last spm_secure_partition_enter().* Place the second parameter to x0
+ * so that the caller will see it as a return value from the original
+ * entry call.
+ * ---------------------------------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc spm_secure_partition_exit
diff --git a/services/std_svc/spm/common/aarch64/spm_shim_exceptions.S b/services/std_svc/spm/common/aarch64/spm_shim_exceptions.S
new file mode 100644
index 0000000..836f75c
--- /dev/null
+++ b/services/std_svc/spm/common/aarch64/spm_shim_exceptions.S
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <context.h>
+
+/* -----------------------------------------------------------------------------
+ * Very simple stackless exception handlers used by the spm shim layer.
+ * -----------------------------------------------------------------------------
+ */
+ .globl spm_shim_exceptions_ptr
+
+vector_base spm_shim_exceptions_ptr, .spm_shim_exceptions
+
+ /* -----------------------------------------------------
+ * Current EL with SP0 : 0x0 - 0x200
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionSP0, .spm_shim_exceptions
+ b .
+end_vector_entry SynchronousExceptionSP0
+
+vector_entry IrqSP0, .spm_shim_exceptions
+ b .
+end_vector_entry IrqSP0
+
+vector_entry FiqSP0, .spm_shim_exceptions
+ b .
+end_vector_entry FiqSP0
+
+vector_entry SErrorSP0, .spm_shim_exceptions
+ b .
+end_vector_entry SErrorSP0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x400
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionSPx, .spm_shim_exceptions
+ b .
+end_vector_entry SynchronousExceptionSPx
+
+vector_entry IrqSPx, .spm_shim_exceptions
+ b .
+end_vector_entry IrqSPx
+
+vector_entry FiqSPx, .spm_shim_exceptions
+ b .
+end_vector_entry FiqSPx
+
+vector_entry SErrorSPx, .spm_shim_exceptions
+ b .
+end_vector_entry SErrorSPx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600. No exceptions
+ * are handled since secure_partition does not implement
+ * a lower EL
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionA64, .spm_shim_exceptions
+ msr tpidr_el1, x30
+ mrs x30, esr_el1
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+ cmp x30, #EC_AARCH64_SVC
+ b.eq do_smc
+
+ cmp x30, #EC_AARCH32_SVC
+ b.eq do_smc
+
+ cmp x30, #EC_AARCH64_SYS
+ b.eq handle_sys_trap
+
+ /* Fail in all the other cases */
+ b panic
+
+ /* ---------------------------------------------
+ * Tell SPM that we are done initialising
+ * ---------------------------------------------
+ */
+do_smc:
+ mrs x30, tpidr_el1
+ smc #0
+ exception_return
+
+ /* AArch64 system instructions trap are handled as a panic for now */
+handle_sys_trap:
+panic:
+ b panic
+end_vector_entry SynchronousExceptionA64
+
+vector_entry IrqA64, .spm_shim_exceptions
+ b .
+end_vector_entry IrqA64
+
+vector_entry FiqA64, .spm_shim_exceptions
+ b .
+end_vector_entry FiqA64
+
+vector_entry SErrorA64, .spm_shim_exceptions
+ b .
+end_vector_entry SErrorA64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionA32, .spm_shim_exceptions
+ b .
+end_vector_entry SynchronousExceptionA32
+
+vector_entry IrqA32, .spm_shim_exceptions
+ b .
+end_vector_entry IrqA32
+
+vector_entry FiqA32, .spm_shim_exceptions
+ b .
+end_vector_entry FiqA32
+
+vector_entry SErrorA32, .spm_shim_exceptions
+ b .
+end_vector_entry SErrorA32
diff --git a/services/std_svc/spm/common/include/spm_common.h b/services/std_svc/spm/common/include/spm_common.h
new file mode 100644
index 0000000..c736919
--- /dev/null
+++ b/services/std_svc/spm/common/include/spm_common.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_COMMON_H
+#define SPM_COMMON_H
+
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define SP_C_RT_CTX_X19 0x0
+#define SP_C_RT_CTX_X20 0x8
+#define SP_C_RT_CTX_X21 0x10
+#define SP_C_RT_CTX_X22 0x18
+#define SP_C_RT_CTX_X23 0x20
+#define SP_C_RT_CTX_X24 0x28
+#define SP_C_RT_CTX_X25 0x30
+#define SP_C_RT_CTX_X26 0x38
+#define SP_C_RT_CTX_X27 0x40
+#define SP_C_RT_CTX_X28 0x48
+#define SP_C_RT_CTX_X29 0x50
+#define SP_C_RT_CTX_X30 0x58
+
+#define SP_C_RT_CTX_SIZE 0x60
+#define SP_C_RT_CTX_ENTRIES (SP_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+/* Assembly helpers */
+uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
+void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
+
+/* Helper to obtain a reference to the SP's translation table context */
+xlat_ctx_t *spm_get_sp_xlat_context(void);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* SPM_COMMON_H */
diff --git a/services/std_svc/spm/common/include/spm_shim_private.h b/services/std_svc/spm/common/include/spm_shim_private.h
new file mode 100644
index 0000000..bcb1147
--- /dev/null
+++ b/services/std_svc/spm/common/include/spm_shim_private.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_SHIM_PRIVATE_H
+#define SPM_SHIM_PRIVATE_H
+
+#include <stdint.h>
+
+#include <lib/utils_def.h>
+
+/* Assembly source */
+IMPORT_SYM(uintptr_t, spm_shim_exceptions_ptr, SPM_SHIM_EXCEPTIONS_PTR);
+
+/* Linker symbols */
+IMPORT_SYM(uintptr_t, __SPM_SHIM_EXCEPTIONS_START__, SPM_SHIM_EXCEPTIONS_START);
+IMPORT_SYM(uintptr_t, __SPM_SHIM_EXCEPTIONS_END__, SPM_SHIM_EXCEPTIONS_END);
+
+/* Definitions */
+
+#define SPM_SHIM_EXCEPTIONS_SIZE \
+ (SPM_SHIM_EXCEPTIONS_END - SPM_SHIM_EXCEPTIONS_START)
+
+#endif /* SPM_SHIM_PRIVATE_H */
diff --git a/services/std_svc/spm/common/spm.mk b/services/std_svc/spm/common/spm.mk
new file mode 100644
index 0000000..65fd72a
--- /dev/null
+++ b/services/std_svc/spm/common/spm.mk
@@ -0,0 +1,23 @@
+#
+# Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH},aarch64)
+ $(error "Error: SPM is only supported on aarch64.")
+endif
+
+INCLUDES += -Iservices/std_svc/spm/common/include
+
+SPM_SOURCES := $(addprefix services/std_svc/spm/common/, \
+ ${ARCH}/spm_helpers.S \
+ ${ARCH}/spm_shim_exceptions.S)
+
+ifeq (1, $(filter 1, ${SPM_MM} ${SPMC_AT_EL3_SEL0_SP}))
+SPM_SOURCES += $(addprefix services/std_svc/spm/common/, \
+ spm_xlat_common.c)
+endif
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
diff --git a/services/std_svc/spm/common/spm_xlat_common.c b/services/std_svc/spm/common/spm_xlat_common.c
new file mode 100644
index 0000000..a463c8b
--- /dev/null
+++ b/services/std_svc/spm/common/spm_xlat_common.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <platform_def.h>
+
+/* Place translation tables by default along with the ones used by BL31. */
+#ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
+#define PLAT_SP_IMAGE_XLAT_SECTION_NAME ".xlat_table"
+#endif
+#ifndef PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME
+#define PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME ".bss"
+#endif
+
+/* Allocate and initialise the translation context for the secure partitions. */
+REGISTER_XLAT_CONTEXT2(sp,
+ PLAT_SP_IMAGE_MMAP_REGIONS,
+ PLAT_SP_IMAGE_MAX_XLAT_TABLES,
+ PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
+ EL1_EL0_REGIME, PLAT_SP_IMAGE_XLAT_SECTION_NAME,
+ PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME);
+
+/* Get handle of Secure Partition translation context */
+xlat_ctx_t *spm_get_sp_xlat_context(void)
+{
+ return &sp_xlat_ctx;
+};
diff --git a/services/std_svc/spm/el3_spmc/logical_sp.c b/services/std_svc/spm/el3_spmc/logical_sp.c
new file mode 100644
index 0000000..e080832
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/logical_sp.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <services/el3_spmc_logical_sp.h>
+#include <services/ffa_svc.h>
+#include "spmc.h"
+
+/*******************************************************************************
+ * Validate any logical partition descriptors before we initialise.
+ * Initialization of said partitions will be taken care of during SPMC boot.
+ ******************************************************************************/
+int el3_sp_desc_validate(void)
+{
+ struct el3_lp_desc *lp_array;
+
+ /*
+ * Assert the number of descriptors is less than maximum allowed.
+ * This constant should be define on a per platform basis.
+ */
+ assert(EL3_LP_DESCS_COUNT <= MAX_EL3_LP_DESCS_COUNT);
+
+ /* Check the array bounds are valid. */
+ assert(EL3_LP_DESCS_END >= EL3_LP_DESCS_START);
+
+ /* If no logical partitions are implemented then simply bail out. */
+ if (EL3_LP_DESCS_COUNT == 0U) {
+ return 0;
+ }
+
+ lp_array = get_el3_lp_array();
+
+ for (unsigned int index = 0; index < EL3_LP_DESCS_COUNT; index++) {
+ struct el3_lp_desc *lp_desc = &lp_array[index];
+
+ /* Validate our logical partition descriptors. */
+ if (lp_desc == NULL) {
+ ERROR("Invalid Logical SP Descriptor\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure the ID follows the convention to indidate it resides
+ * in the secure world.
+ */
+ if (!ffa_is_secure_world_id(lp_desc->sp_id)) {
+ ERROR("Invalid Logical SP ID (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure we don't conflict with the SPMC partition ID. */
+ if (lp_desc->sp_id == FFA_SPMC_ID) {
+ ERROR("Logical SP ID clashes with SPMC ID(0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure the UUID is not the NULL UUID. */
+ if (lp_desc->uuid[0] == 0 && lp_desc->uuid[1] == 0 &&
+ lp_desc->uuid[2] == 0 && lp_desc->uuid[3] == 0) {
+ ERROR("Invalid UUID for Logical SP (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure init function callback is registered. */
+ if (lp_desc->init == NULL) {
+ ERROR("Missing init function for Logical SP(0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure that LP only supports receiving direct requests. */
+ if (lp_desc->properties &
+ ~(FFA_PARTITION_DIRECT_REQ_RECV)) {
+ ERROR("Invalid partition properties (0x%x)\n",
+ lp_desc->properties);
+ return -EINVAL;
+ }
+
+ /* Ensure direct request function callback is registered. */
+ if (lp_desc->direct_req == NULL) {
+ ERROR("No Direct Req handler for Logical SP (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure that all partition IDs are unique. */
+ for (unsigned int inner_idx = index + 1;
+ inner_idx < EL3_LP_DESCS_COUNT; inner_idx++) {
+ if (lp_desc->sp_id == lp_array[inner_idx].sp_id) {
+ ERROR("Duplicate SP ID Detected (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
diff --git a/services/std_svc/spm/el3_spmc/spmc.h b/services/std_svc/spm/el3_spmc/spmc.h
new file mode 100644
index 0000000..48644ac
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMC_H
+#define SPMC_H
+
+#include <stdint.h>
+
+#include <common/bl_common.h>
+#include <lib/psci/psci.h>
+#include <lib/spinlock.h>
+#include <services/el3_spmc_logical_sp.h>
+#include "spm_common.h"
+
+/*
+ * Ranges of FF-A IDs for Normal world and Secure world components. The
+ * convention matches that used by other SPMCs i.e. Hafnium and OP-TEE.
+ */
+#define FFA_NWD_ID_BASE 0x0
+#define FFA_NWD_ID_LIMIT 0x7FFF
+#define FFA_SWD_ID_BASE 0x8000
+#define FFA_SWD_ID_LIMIT SPMD_DIRECT_MSG_ENDPOINT_ID - 1
+#define FFA_SWD_ID_MASK 0x8000
+
+/* ID 0 is reserved for the normal world entity, (Hypervisor or OS Kernel). */
+#define FFA_NWD_ID U(0)
+/* First ID is reserved for the SPMC */
+#define FFA_SPMC_ID U(FFA_SWD_ID_BASE)
+/* SP IDs are allocated after the SPMC ID */
+#define FFA_SP_ID_BASE (FFA_SPMC_ID + 1)
+/* Align with Hafnium implementation */
+#define INV_SP_ID 0x7FFF
+
+/* FF-A Related helper macros. */
+#define FFA_ID_MASK U(0xFFFF)
+#define FFA_PARTITION_ID_SHIFT U(16)
+#define FFA_FEATURES_BIT31_MASK U(0x1u << 31)
+#define FFA_FEATURES_RET_REQ_NS_BIT U(0x1 << 1)
+
+#define FFA_RUN_EP_ID(ep_vcpu_ids) \
+ ((ep_vcpu_ids >> FFA_PARTITION_ID_SHIFT) & FFA_ID_MASK)
+#define FFA_RUN_VCPU_ID(ep_vcpu_ids) \
+ (ep_vcpu_ids & FFA_ID_MASK)
+
+#define FFA_PAGE_SIZE (4096)
+#define FFA_RXTX_PAGE_COUNT_MASK 0x1F
+
+/* Ensure that the page size used by TF-A is 4k aligned. */
+CASSERT((PAGE_SIZE % FFA_PAGE_SIZE) == 0, assert_aligned_page_size);
+
+/*
+ * Defines to allow an SP to subscribe for power management messages
+ */
+#define FFA_PM_MSG_SUB_CPU_OFF U(1 << 0)
+#define FFA_PM_MSG_SUB_CPU_SUSPEND U(1 << 1)
+#define FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME U(1 << 2)
+
+/*
+ * Runtime states of an execution context as per the FF-A v1.1 specification.
+ */
+enum sp_runtime_states {
+ RT_STATE_WAITING,
+ RT_STATE_RUNNING,
+ RT_STATE_PREEMPTED,
+ RT_STATE_BLOCKED
+};
+
+/*
+ * Runtime model of an execution context as per the FF-A v1.1 specification. Its
+ * value is valid only if the execution context is not in the waiting state.
+ */
+enum sp_runtime_model {
+ RT_MODEL_DIR_REQ,
+ RT_MODEL_RUN,
+ RT_MODEL_INIT,
+ RT_MODEL_INTR
+};
+
+enum sp_runtime_el {
+ EL1 = 0,
+ S_EL0,
+ S_EL1
+};
+
+enum sp_execution_state {
+ SP_STATE_AARCH64 = 0,
+ SP_STATE_AARCH32
+};
+
+enum mailbox_state {
+ /* There is no message in the mailbox. */
+ MAILBOX_STATE_EMPTY,
+
+ /* There is a message that has been populated in the mailbox. */
+ MAILBOX_STATE_FULL,
+};
+
+struct mailbox {
+ enum mailbox_state state;
+
+ /* RX/TX Buffers. */
+ void *rx_buffer;
+ const void *tx_buffer;
+
+ /* Size of RX/TX Buffer. */
+ uint32_t rxtx_page_count;
+
+ /* Lock access to mailbox. */
+ spinlock_t lock;
+};
+
+/*
+ * Execution context members for an SP. This is a bit like struct
+ * vcpu in a hypervisor.
+ */
+struct sp_exec_ctx {
+ /*
+ * Store the stack address to restore C runtime context from after
+ * returning from a synchronous entry into the SP.
+ */
+ uint64_t c_rt_ctx;
+
+ /* Space to maintain the architectural state of an SP. */
+ cpu_context_t cpu_ctx;
+
+ /* Track the current runtime state of the SP. */
+ enum sp_runtime_states rt_state;
+
+ /* Track the current runtime model of the SP. */
+ enum sp_runtime_model rt_model;
+
+ /* Track the source partition ID to validate a direct response. */
+ uint16_t dir_req_origin_id;
+};
+
+/*
+ * Structure to describe the cumulative properties of an SP.
+ */
+struct secure_partition_desc {
+ /*
+ * Execution contexts allocated to this endpoint. Ideally,
+ * we need as many contexts as there are physical cpus only
+ * for a S-EL1 SP which is MP-pinned.
+ */
+ struct sp_exec_ctx ec[PLATFORM_CORE_COUNT];
+
+ /* ID of the Secure Partition. */
+ uint16_t sp_id;
+
+ /* Runtime EL. */
+ enum sp_runtime_el runtime_el;
+
+ /* Partition UUID. */
+ uint32_t uuid[4];
+
+ /* Partition Properties. */
+ uint32_t properties;
+
+ /* Supported FF-A Version. */
+ uint32_t ffa_version;
+
+ /* Execution State. */
+ enum sp_execution_state execution_state;
+
+ /* Mailbox tracking. */
+ struct mailbox mailbox;
+
+ /* Secondary entrypoint. Only valid for a S-EL1 SP. */
+ uintptr_t secondary_ep;
+
+ /*
+ * Store whether the SP has subscribed to any power management messages.
+ */
+ uint16_t pwr_mgmt_msgs;
+
+ /*
+ * Store whether the SP has requested the use of the NS bit for memory
+ * management transactions if it is using FF-A v1.0.
+ */
+ bool ns_bit_requested;
+};
+
+/*
+ * This define identifies the only SP that will be initialised and participate
+ * in FF-A communication. The implementation leaves the door open for more SPs
+ * to be managed in future but for now it is reasonable to assume that either a
+ * single S-EL0 or a single S-EL1 SP will be supported. This define will be used
+ * to identify which SP descriptor to initialise and manage during SP runtime.
+ */
+#define ACTIVE_SP_DESC_INDEX 0
+
+/*
+ * Structure to describe the cumulative properties of the Hypervisor and
+ * NS-Endpoints.
+ */
+struct ns_endpoint_desc {
+ /*
+ * ID of the NS-Endpoint or Hypervisor.
+ */
+ uint16_t ns_ep_id;
+
+ /*
+ * Mailbox tracking.
+ */
+ struct mailbox mailbox;
+
+ /*
+ * Supported FF-A Version
+ */
+ uint32_t ffa_version;
+};
+
+/* Reference to power management hooks */
+extern const spd_pm_ops_t spmc_pm;
+
+/* Setup Function for different SP types. */
+void spmc_sp_common_setup(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info,
+ int32_t boot_info_reg);
+void spmc_el1_sp_setup(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info);
+void spmc_sp_common_ep_commit(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info);
+
+/*
+ * Helper function to perform a synchronous entry into a SP.
+ */
+uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec);
+
+/*
+ * Helper function to obtain the descriptor of the current SP on a physical cpu.
+ */
+struct secure_partition_desc *spmc_get_current_sp_ctx(void);
+
+/*
+ * Helper function to obtain the execution context of an SP on a
+ * physical cpu.
+ */
+struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp);
+
+/*
+ * Helper function to obtain the index of the execution context of an SP on a
+ * physical cpu.
+ */
+unsigned int get_ec_index(struct secure_partition_desc *sp);
+
+uint64_t spmc_ffa_error_return(void *handle, int error_code);
+
+/*
+ * Ensure a partition ID does not clash and follows the secure world convention.
+ */
+bool is_ffa_secure_id_valid(uint16_t partition_id);
+
+/*
+ * Helper function to obtain the array storing the EL3
+ * Logical Partition descriptors.
+ */
+struct el3_lp_desc *get_el3_lp_array(void);
+
+/*
+ * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
+ * or OS kernel in the normal world or the last SP that was run.
+ */
+struct mailbox *spmc_get_mbox_desc(bool secure_origin);
+
+/*
+ * Helper function to obtain the context of an SP with a given partition ID.
+ */
+struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id);
+
+/*
+ * Add helper function to obtain the FF-A version of the calling
+ * partition.
+ */
+uint32_t get_partition_ffa_version(bool secure_origin);
+
+
+#endif /* SPMC_H */
diff --git a/services/std_svc/spm/el3_spmc/spmc.mk b/services/std_svc/spm/el3_spmc/spmc.mk
new file mode 100644
index 0000000..6442af0
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc.mk
@@ -0,0 +1,46 @@
+#
+# Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH},aarch64)
+ $(error "Error: SPMC is only supported on aarch64.")
+endif
+
+SPMC_SOURCES := $(addprefix services/std_svc/spm/el3_spmc/, \
+ spmc_main.c \
+ spmc_setup.c \
+ logical_sp.c \
+ spmc_pm.c \
+ spmc_shared_mem.c)
+
+# Specify platform specific logical partition implementation.
+SPMC_LP_SOURCES := $(addprefix ${PLAT_DIR}/, \
+ ${PLAT}_el3_spmc_logical_sp.c)
+
+
+ifneq ($(wildcard $(SPMC_LP_SOURCES)),)
+SPMC_SOURCES += $(SPMC_LP_SOURCES)
+endif
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
+
+ifndef BL32
+# The SPMC is paired with a Test Secure Payload source and we intend to
+# build the Test Secure Payload if no other image has been provided
+# for BL32.
+#
+# In cases where an associated Secure Payload lies outside this build
+# system/source tree, the dispatcher Makefile can either invoke an external
+# build command or assume it is pre-built.
+
+BL32_ROOT := bl32/tsp
+
+# Conditionally include SP's Makefile. The assumption is that the TSP's build
+# system is compatible with that of Trusted Firmware, and it'll add and populate
+# necessary build targets and variables.
+
+include ${BL32_ROOT}/tsp.mk
+endif
diff --git a/services/std_svc/spm/el3_spmc/spmc_main.c b/services/std_svc/spm/el3_spmc/spmc_main.c
new file mode 100644
index 0000000..ada6f45
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_main.c
@@ -0,0 +1,2112 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <bl31/ehf.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <common/runtime_svc.h>
+#include <common/uuid.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/smccc.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <libfdt.h>
+#include <plat/common/platform.h>
+#include <services/el3_spmc_logical_sp.h>
+#include <services/ffa_svc.h>
+#include <services/spmc_svc.h>
+#include <services/spmd_svc.h>
+#include "spmc.h"
+#include "spmc_shared_mem.h"
+
+#include <platform_def.h>
+
+/* Declare the maximum number of SPs and El3 LPs. */
+#define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
+
+/*
+ * Allocate a secure partition descriptor to describe each SP in the system that
+ * does not reside at EL3.
+ */
+static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
+
+/*
+ * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
+ * the system that interacts with a SP. It is used to track the Hypervisor
+ * buffer pair, version and ID for now. It could be extended to track VM
+ * properties when the SPMC supports indirect messaging.
+ */
+static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
+
+static uint64_t spmc_sp_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie);
+
+/*
+ * Helper function to obtain the array storing the EL3
+ * Logical Partition descriptors.
+ */
+struct el3_lp_desc *get_el3_lp_array(void)
+{
+ return (struct el3_lp_desc *) EL3_LP_DESCS_START;
+}
+
+/*
+ * Helper function to obtain the descriptor of the last SP to whom control was
+ * handed to on this physical cpu. Currently, we assume there is only one SP.
+ * TODO: Expand to track multiple partitions when required.
+ */
+struct secure_partition_desc *spmc_get_current_sp_ctx(void)
+{
+ return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
+}
+
+/*
+ * Helper function to obtain the execution context of an SP on the
+ * current physical cpu.
+ */
+struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
+{
+ return &(sp->ec[get_ec_index(sp)]);
+}
+
+/* Helper function to get pointer to SP context from its ID. */
+struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
+{
+ /* Check for Secure World Partitions. */
+ for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
+ if (sp_desc[i].sp_id == id) {
+ return &(sp_desc[i]);
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
+ * We assume that the first descriptor is reserved for this entity.
+ */
+struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
+{
+ return &(ns_ep_desc[0]);
+}
+
+/*
+ * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
+ * or OS kernel in the normal world or the last SP that was run.
+ */
+struct mailbox *spmc_get_mbox_desc(bool secure_origin)
+{
+ /* Obtain the RX/TX buffer pair descriptor. */
+ if (secure_origin) {
+ return &(spmc_get_current_sp_ctx()->mailbox);
+ } else {
+ return &(spmc_get_hyp_ctx()->mailbox);
+ }
+}
+
+/******************************************************************************
+ * This function returns to the place where spmc_sp_synchronous_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
+{
+ /*
+ * The SPM must have initiated the original request through a
+ * synchronous entry into the secure partition. Jump back to the
+ * original C runtime context with the value of rc in x0;
+ */
+ spm_secure_partition_exit(ec->c_rt_ctx, rc);
+
+ panic();
+}
+
+/*******************************************************************************
+ * Return FFA_ERROR with specified error code.
+ ******************************************************************************/
+uint64_t spmc_ffa_error_return(void *handle, int error_code)
+{
+ SMC_RET8(handle, FFA_ERROR,
+ FFA_TARGET_INFO_MBZ, error_code,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+}
+
+/******************************************************************************
+ * Helper function to validate a secure partition ID to ensure it does not
+ * conflict with any other FF-A component and follows the convention to
+ * indicate it resides within the secure world.
+ ******************************************************************************/
+bool is_ffa_secure_id_valid(uint16_t partition_id)
+{
+ struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
+
+ /* Ensure the ID is not the invalid partition ID. */
+ if (partition_id == INV_SP_ID) {
+ return false;
+ }
+
+ /* Ensure the ID is not the SPMD ID. */
+ if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
+ return false;
+ }
+
+ /*
+ * Ensure the ID follows the convention to indicate it resides
+ * in the secure world.
+ */
+ if (!ffa_is_secure_world_id(partition_id)) {
+ return false;
+ }
+
+ /* Ensure we don't conflict with the SPMC partition ID. */
+ if (partition_id == FFA_SPMC_ID) {
+ return false;
+ }
+
+ /* Ensure we do not already have an SP context with this ID. */
+ if (spmc_get_sp_ctx(partition_id)) {
+ return false;
+ }
+
+ /* Ensure we don't clash with any Logical SP's. */
+ for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
+ if (el3_lp_descs[i].sp_id == partition_id) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*******************************************************************************
+ * This function either forwards the request to the other world or returns
+ * with an ERET depending on the source of the call.
+ * We can assume that the destination is for an entity at a lower exception
+ * level as any messages destined for a logical SP resident in EL3 will have
+ * already been taken care of by the SPMC before entering this function.
+ ******************************************************************************/
+static uint64_t spmc_smc_return(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *handle,
+ void *cookie,
+ uint64_t flags,
+ uint16_t dst_id)
+{
+ /* If the destination is in the normal world always go via the SPMD. */
+ if (ffa_is_normal_world_id(dst_id)) {
+ return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
+ cookie, handle, flags);
+ }
+ /*
+ * If the caller is secure and we want to return to the secure world,
+ * ERET directly.
+ */
+ else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
+ SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
+ }
+ /* If we originated in the normal world then switch contexts. */
+ else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
+ return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
+ x3, x4, handle);
+ } else {
+ /* Unknown State. */
+ panic();
+ }
+
+ /* Shouldn't be Reached. */
+ return 0;
+}
+
+/*******************************************************************************
+ * FF-A ABI Handlers.
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Helper function to validate arg2 as part of a direct message.
+ ******************************************************************************/
+static inline bool direct_msg_validate_arg2(uint64_t x2)
+{
+ /* Check message type. */
+ if (x2 & FFA_FWK_MSG_BIT) {
+ /* We have a framework message, ensure it is a known message. */
+ if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
+ VERBOSE("Invalid message format 0x%lx.\n", x2);
+ return false;
+ }
+ } else {
+ /* We have a partition messages, ensure x2 is not set. */
+ if (x2 != (uint64_t) 0) {
+ VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
+ x2);
+ return false;
+ }
+ }
+ return true;
+}
+
+/*******************************************************************************
+ * Helper function to validate the destination ID of a direct response.
+ ******************************************************************************/
+static bool direct_msg_validate_dst_id(uint16_t dst_id)
+{
+ struct secure_partition_desc *sp;
+
+ /* Check if we're targeting a normal world partition. */
+ if (ffa_is_normal_world_id(dst_id)) {
+ return true;
+ }
+
+ /* Or directed to the SPMC itself.*/
+ if (dst_id == FFA_SPMC_ID) {
+ return true;
+ }
+
+ /* Otherwise ensure the SP exists. */
+ sp = spmc_get_sp_ctx(dst_id);
+ if (sp != NULL) {
+ return true;
+ }
+
+ return false;
+}
+
+/*******************************************************************************
+ * Helper function to validate the response from a Logical Partition.
+ ******************************************************************************/
+static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id,
+ void *handle)
+{
+ /* Retrieve populated Direct Response Arguments. */
+ uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1);
+ uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2);
+ uint16_t src_id = ffa_endpoint_source(x1);
+ uint16_t dst_id = ffa_endpoint_destination(x1);
+
+ if (src_id != lp_id) {
+ ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id);
+ return false;
+ }
+
+ /*
+ * Check the destination ID is valid and ensure the LP is responding to
+ * the original request.
+ */
+ if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) {
+ ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id);
+ return false;
+ }
+
+ if (!direct_msg_validate_arg2(x2)) {
+ ERROR("Invalid EL3 LP message encoding.\n");
+ return false;
+ }
+ return true;
+}
+
+/*******************************************************************************
+ * Handle direct request messages and route to the appropriate destination.
+ ******************************************************************************/
+static uint64_t direct_req_smc_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint16_t src_id = ffa_endpoint_source(x1);
+ uint16_t dst_id = ffa_endpoint_destination(x1);
+ struct el3_lp_desc *el3_lp_descs;
+ struct secure_partition_desc *sp;
+ unsigned int idx;
+
+ /* Check if arg2 has been populated correctly based on message type. */
+ if (!direct_msg_validate_arg2(x2)) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Validate Sender is either the current SP or from the normal world. */
+ if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) ||
+ (!secure_origin && !ffa_is_normal_world_id(src_id))) {
+ ERROR("Invalid direct request source ID (0x%x).\n", src_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ el3_lp_descs = get_el3_lp_array();
+
+ /* Check if the request is destined for a Logical Partition. */
+ for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
+ if (el3_lp_descs[i].sp_id == dst_id) {
+ uint64_t ret = el3_lp_descs[i].direct_req(
+ smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+ if (!direct_msg_validate_lp_resp(src_id, dst_id,
+ handle)) {
+ panic();
+ }
+
+ /* Message checks out. */
+ return ret;
+ }
+ }
+
+ /*
+ * If the request was not targeted to a LSP and from the secure world
+ * then it is invalid since a SP cannot call into the Normal world and
+ * there is no other SP to call into. If there are other SPs in future
+ * then the partition runtime model would need to be validated as well.
+ */
+ if (secure_origin) {
+ VERBOSE("Direct request not supported to the Normal World.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Check if the SP ID is valid. */
+ sp = spmc_get_sp_ctx(dst_id);
+ if (sp == NULL) {
+ VERBOSE("Direct request to unknown partition ID (0x%x).\n",
+ dst_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Check that the target execution context is in a waiting state before
+ * forwarding the direct request to it.
+ */
+ idx = get_ec_index(sp);
+ if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
+ VERBOSE("SP context on core%u is not waiting (%u).\n",
+ idx, sp->ec[idx].rt_model);
+ return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
+ }
+
+ /*
+ * Everything checks out so forward the request to the SP after updating
+ * its state and runtime model.
+ */
+ sp->ec[idx].rt_state = RT_STATE_RUNNING;
+ sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
+ sp->ec[idx].dir_req_origin_id = src_id;
+ return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
+ handle, cookie, flags, dst_id);
+}
+
+/*******************************************************************************
+ * Handle direct response messages and route to the appropriate destination.
+ ******************************************************************************/
+static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint16_t dst_id = ffa_endpoint_destination(x1);
+ struct secure_partition_desc *sp;
+ unsigned int idx;
+
+ /* Check if arg2 has been populated correctly based on message type. */
+ if (!direct_msg_validate_arg2(x2)) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Check that the response did not originate from the Normal world. */
+ if (!secure_origin) {
+ VERBOSE("Direct Response not supported from Normal World.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Check that the response is either targeted to the Normal world or the
+ * SPMC e.g. a PM response.
+ */
+ if (!direct_msg_validate_dst_id(dst_id)) {
+ VERBOSE("Direct response to invalid partition ID (0x%x).\n",
+ dst_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Obtain the SP descriptor and update its runtime state. */
+ sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
+ if (sp == NULL) {
+ VERBOSE("Direct response to unknown partition ID (0x%x).\n",
+ dst_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Sanity check state is being tracked correctly in the SPMC. */
+ idx = get_ec_index(sp);
+ assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
+
+ /* Ensure SP execution context was in the right runtime model. */
+ if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
+ VERBOSE("SP context on core%u not handling direct req (%u).\n",
+ idx, sp->ec[idx].rt_model);
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ if (sp->ec[idx].dir_req_origin_id != dst_id) {
+ WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n",
+ dst_id, sp->ec[idx].dir_req_origin_id, idx);
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ /* Update the state of the SP execution context. */
+ sp->ec[idx].rt_state = RT_STATE_WAITING;
+
+ /* Clear the ongoing direct request ID. */
+ sp->ec[idx].dir_req_origin_id = INV_SP_ID;
+
+ /*
+ * If the receiver is not the SPMC then forward the response to the
+ * Normal world.
+ */
+ if (dst_id == FFA_SPMC_ID) {
+ spmc_sp_synchronous_exit(&sp->ec[idx], x4);
+ /* Should not get here. */
+ panic();
+ }
+
+ return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
+ handle, cookie, flags, dst_id);
+}
+
+/*******************************************************************************
+ * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
+ * cycles.
+ ******************************************************************************/
+static uint64_t msg_wait_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct secure_partition_desc *sp;
+ unsigned int idx;
+
+ /*
+ * Check that the response did not originate from the Normal world as
+ * only the secure world can call this ABI.
+ */
+ if (!secure_origin) {
+ VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
+ sp = spmc_get_current_sp_ctx();
+ if (sp == NULL) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Get the execution context of the SP that invoked FFA_MSG_WAIT.
+ */
+ idx = get_ec_index(sp);
+
+ /* Ensure SP execution context was in the right runtime model. */
+ if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ /* Sanity check the state is being tracked correctly in the SPMC. */
+ assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
+
+ /*
+ * Perform a synchronous exit if the partition was initialising. The
+ * state is updated after the exit.
+ */
+ if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
+ spmc_sp_synchronous_exit(&sp->ec[idx], x4);
+ /* Should not get here */
+ panic();
+ }
+
+ /* Update the state of the SP execution context. */
+ sp->ec[idx].rt_state = RT_STATE_WAITING;
+
+ /* Resume normal world if a secure interrupt was handled. */
+ if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
+ /* FFA_MSG_WAIT can only be called from the secure world. */
+ unsigned int secure_state_in = SECURE;
+ unsigned int secure_state_out = NON_SECURE;
+
+ cm_el1_sysregs_context_save(secure_state_in);
+ cm_el1_sysregs_context_restore(secure_state_out);
+ cm_set_next_eret_context(secure_state_out);
+ SMC_RET0(cm_get_context(secure_state_out));
+ }
+
+ /* Forward the response to the Normal world. */
+ return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
+ handle, cookie, flags, FFA_NWD_ID);
+}
+
+static uint64_t ffa_error_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct secure_partition_desc *sp;
+ unsigned int idx;
+
+ /* Check that the response did not originate from the Normal world. */
+ if (!secure_origin) {
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Get the descriptor of the SP that invoked FFA_ERROR. */
+ sp = spmc_get_current_sp_ctx();
+ if (sp == NULL) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Get the execution context of the SP that invoked FFA_ERROR. */
+ idx = get_ec_index(sp);
+
+ /*
+ * We only expect FFA_ERROR to be received during SP initialisation
+ * otherwise this is an invalid call.
+ */
+ if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
+ ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
+ spmc_sp_synchronous_exit(&sp->ec[idx], x2);
+ /* Should not get here. */
+ panic();
+ }
+
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+}
+
+static uint64_t ffa_version_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint32_t requested_version = x1 & FFA_VERSION_MASK;
+
+ if (requested_version & FFA_VERSION_BIT31_MASK) {
+ /* Invalid encoding, return an error. */
+ SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
+ /* Execution stops here. */
+ }
+
+ /* Determine the caller to store the requested version. */
+ if (secure_origin) {
+ /*
+ * Ensure that the SP is reporting the same version as
+ * specified in its manifest. If these do not match there is
+ * something wrong with the SP.
+ * TODO: Should we abort the SP? For now assert this is not
+ * case.
+ */
+ assert(requested_version ==
+ spmc_get_current_sp_ctx()->ffa_version);
+ } else {
+ /*
+ * If this is called by the normal world, record this
+ * information in its descriptor.
+ */
+ spmc_get_hyp_ctx()->ffa_version = requested_version;
+ }
+
+ SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
+ FFA_VERSION_MINOR));
+}
+
+/*******************************************************************************
+ * Helper function to obtain the FF-A version of the calling partition.
+ ******************************************************************************/
+uint32_t get_partition_ffa_version(bool secure_origin)
+{
+ if (secure_origin) {
+ return spmc_get_current_sp_ctx()->ffa_version;
+ } else {
+ return spmc_get_hyp_ctx()->ffa_version;
+ }
+}
+
+static uint64_t rxtx_map_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int ret;
+ uint32_t error_code;
+ uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
+ struct mailbox *mbox;
+ uintptr_t tx_address = x1;
+ uintptr_t rx_address = x2;
+ uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
+ uint32_t buf_size = page_count * FFA_PAGE_SIZE;
+
+ /*
+ * The SPMC does not support mapping of VM RX/TX pairs to facilitate
+ * indirect messaging with SPs. Check if the Hypervisor has invoked this
+ * ABI on behalf of a VM and reject it if this is the case.
+ */
+ if (tx_address == 0 || rx_address == 0) {
+ WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Ensure the specified buffers are not the same. */
+ if (tx_address == rx_address) {
+ WARN("TX Buffer must not be the same as RX Buffer.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Ensure the buffer size is not 0. */
+ if (buf_size == 0U) {
+ WARN("Buffer size must not be 0\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Ensure the buffer size is a multiple of the translation granule size
+ * in TF-A.
+ */
+ if (buf_size % PAGE_SIZE != 0U) {
+ WARN("Buffer size must be aligned to translation granule.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Obtain the RX/TX buffer pair descriptor. */
+ mbox = spmc_get_mbox_desc(secure_origin);
+
+ spin_lock(&mbox->lock);
+
+ /* Check if buffers have already been mapped. */
+ if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
+ WARN("RX/TX Buffers already mapped (%p/%p)\n",
+ (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
+ error_code = FFA_ERROR_DENIED;
+ goto err;
+ }
+
+ /* memmap the TX buffer as read only. */
+ ret = mmap_add_dynamic_region(tx_address, /* PA */
+ tx_address, /* VA */
+ buf_size, /* size */
+ mem_atts | MT_RO_DATA); /* attrs */
+ if (ret != 0) {
+ /* Return the correct error code. */
+ error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
+ FFA_ERROR_INVALID_PARAMETER;
+ WARN("Unable to map TX buffer: %d\n", error_code);
+ goto err;
+ }
+
+ /* memmap the RX buffer as read write. */
+ ret = mmap_add_dynamic_region(rx_address, /* PA */
+ rx_address, /* VA */
+ buf_size, /* size */
+ mem_atts | MT_RW_DATA); /* attrs */
+
+ if (ret != 0) {
+ error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
+ FFA_ERROR_INVALID_PARAMETER;
+ WARN("Unable to map RX buffer: %d\n", error_code);
+ /* Unmap the TX buffer again. */
+ mmap_remove_dynamic_region(tx_address, buf_size);
+ goto err;
+ }
+
+ mbox->tx_buffer = (void *) tx_address;
+ mbox->rx_buffer = (void *) rx_address;
+ mbox->rxtx_page_count = page_count;
+ spin_unlock(&mbox->lock);
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+ /* Execution stops here. */
+err:
+ spin_unlock(&mbox->lock);
+ return spmc_ffa_error_return(handle, error_code);
+}
+
+static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+ uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
+
+ /*
+ * The SPMC does not support mapping of VM RX/TX pairs to facilitate
+ * indirect messaging with SPs. Check if the Hypervisor has invoked this
+ * ABI on behalf of a VM and reject it if this is the case.
+ */
+ if (x1 != 0UL) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&mbox->lock);
+
+ /* Check if buffers are currently mapped. */
+ if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
+ spin_unlock(&mbox->lock);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Unmap RX Buffer */
+ if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
+ buf_size) != 0) {
+ WARN("Unable to unmap RX buffer!\n");
+ }
+
+ mbox->rx_buffer = 0;
+
+ /* Unmap TX Buffer */
+ if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
+ buf_size) != 0) {
+ WARN("Unable to unmap TX buffer!\n");
+ }
+
+ mbox->tx_buffer = 0;
+ mbox->rxtx_page_count = 0;
+
+ spin_unlock(&mbox->lock);
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
+/*
+ * Helper function to populate the properties field of a Partition Info Get
+ * descriptor.
+ */
+static uint32_t
+partition_info_get_populate_properties(uint32_t sp_properties,
+ enum sp_execution_state sp_ec_state)
+{
+ uint32_t properties = sp_properties;
+ uint32_t ec_state;
+
+ /* Determine the execution state of the SP. */
+ ec_state = sp_ec_state == SP_STATE_AARCH64 ?
+ FFA_PARTITION_INFO_GET_AARCH64_STATE :
+ FFA_PARTITION_INFO_GET_AARCH32_STATE;
+
+ properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT;
+
+ return properties;
+}
+
+/*
+ * Collate the partition information in a v1.1 partition information
+ * descriptor format, this will be converter later if required.
+ */
+static int partition_info_get_handler_v1_1(uint32_t *uuid,
+ struct ffa_partition_info_v1_1
+ *partitions,
+ uint32_t max_partitions,
+ uint32_t *partition_count)
+{
+ uint32_t index;
+ struct ffa_partition_info_v1_1 *desc;
+ bool null_uuid = is_null_uuid(uuid);
+ struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
+
+ /* Deal with Logical Partitions. */
+ for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
+ if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
+ /* Found a matching UUID, populate appropriately. */
+ if (*partition_count >= max_partitions) {
+ return FFA_ERROR_NO_MEMORY;
+ }
+
+ desc = &partitions[*partition_count];
+ desc->ep_id = el3_lp_descs[index].sp_id;
+ desc->execution_ctx_count = PLATFORM_CORE_COUNT;
+ /* LSPs must be AArch64. */
+ desc->properties =
+ partition_info_get_populate_properties(
+ el3_lp_descs[index].properties,
+ SP_STATE_AARCH64);
+
+ if (null_uuid) {
+ copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
+ }
+ (*partition_count)++;
+ }
+ }
+
+ /* Deal with physical SP's. */
+ for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
+ if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
+ /* Found a matching UUID, populate appropriately. */
+ if (*partition_count >= max_partitions) {
+ return FFA_ERROR_NO_MEMORY;
+ }
+
+ desc = &partitions[*partition_count];
+ desc->ep_id = sp_desc[index].sp_id;
+ /*
+ * Execution context count must match No. cores for
+ * S-EL1 SPs.
+ */
+ desc->execution_ctx_count = PLATFORM_CORE_COUNT;
+ desc->properties =
+ partition_info_get_populate_properties(
+ sp_desc[index].properties,
+ sp_desc[index].execution_state);
+
+ if (null_uuid) {
+ copy_uuid(desc->uuid, sp_desc[index].uuid);
+ }
+ (*partition_count)++;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Handle the case where that caller only wants the count of partitions
+ * matching a given UUID and does not want the corresponding descriptors
+ * populated.
+ */
+static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
+{
+ uint32_t index = 0;
+ uint32_t partition_count = 0;
+ bool null_uuid = is_null_uuid(uuid);
+ struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
+
+ /* Deal with Logical Partitions. */
+ for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
+ if (null_uuid ||
+ uuid_match(uuid, el3_lp_descs[index].uuid)) {
+ (partition_count)++;
+ }
+ }
+
+ /* Deal with physical SP's. */
+ for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
+ if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
+ (partition_count)++;
+ }
+ }
+ return partition_count;
+}
+
+/*
+ * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
+ * the corresponding descriptor format from the v1.1 descriptor array.
+ */
+static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
+ *partitions,
+ struct mailbox *mbox,
+ int partition_count)
+{
+ uint32_t index;
+ uint32_t buf_size;
+ uint32_t descriptor_size;
+ struct ffa_partition_info_v1_0 *v1_0_partitions =
+ (struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
+
+ buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
+ descriptor_size = partition_count *
+ sizeof(struct ffa_partition_info_v1_0);
+
+ if (descriptor_size > buf_size) {
+ return FFA_ERROR_NO_MEMORY;
+ }
+
+ for (index = 0U; index < partition_count; index++) {
+ v1_0_partitions[index].ep_id = partitions[index].ep_id;
+ v1_0_partitions[index].execution_ctx_count =
+ partitions[index].execution_ctx_count;
+ /* Only report v1.0 properties. */
+ v1_0_partitions[index].properties =
+ (partitions[index].properties &
+ FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK);
+ }
+ return 0;
+}
+
+/*
+ * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
+ * v1.0 implementations.
+ */
+static uint64_t partition_info_get_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int ret;
+ uint32_t partition_count = 0;
+ uint32_t size = 0;
+ uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+ struct mailbox *mbox;
+ uint64_t info_get_flags;
+ bool count_only;
+ uint32_t uuid[4];
+
+ uuid[0] = x1;
+ uuid[1] = x2;
+ uuid[2] = x3;
+ uuid[3] = x4;
+
+ /* Determine if the Partition descriptors should be populated. */
+ info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
+ count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
+
+ /* Handle the case where we don't need to populate the descriptors. */
+ if (count_only) {
+ partition_count = partition_info_get_handler_count_only(uuid);
+ if (partition_count == 0) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+ } else {
+ struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
+
+ /*
+ * Handle the case where the partition descriptors are required,
+ * check we have the buffers available and populate the
+ * appropriate structure version.
+ */
+
+ /* Obtain the v1.1 format of the descriptors. */
+ ret = partition_info_get_handler_v1_1(uuid, partitions,
+ MAX_SP_LP_PARTITIONS,
+ &partition_count);
+
+ /* Check if an error occurred during discovery. */
+ if (ret != 0) {
+ goto err;
+ }
+
+ /* If we didn't find any matches the UUID is unknown. */
+ if (partition_count == 0) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err;
+ }
+
+ /* Obtain the partition mailbox RX/TX buffer pair descriptor. */
+ mbox = spmc_get_mbox_desc(secure_origin);
+
+ /*
+ * If the caller has not bothered registering its RX/TX pair
+ * then return an error code.
+ */
+ spin_lock(&mbox->lock);
+ if (mbox->rx_buffer == NULL) {
+ ret = FFA_ERROR_BUSY;
+ goto err_unlock;
+ }
+
+ /* Ensure the RX buffer is currently free. */
+ if (mbox->state != MAILBOX_STATE_EMPTY) {
+ ret = FFA_ERROR_BUSY;
+ goto err_unlock;
+ }
+
+ /* Zero the RX buffer before populating. */
+ (void)memset(mbox->rx_buffer, 0,
+ mbox->rxtx_page_count * FFA_PAGE_SIZE);
+
+ /*
+ * Depending on the FF-A version of the requesting partition
+ * we may need to convert to a v1.0 format otherwise we can copy
+ * directly.
+ */
+ if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
+ ret = partition_info_populate_v1_0(partitions,
+ mbox,
+ partition_count);
+ if (ret != 0) {
+ goto err_unlock;
+ }
+ } else {
+ uint32_t buf_size = mbox->rxtx_page_count *
+ FFA_PAGE_SIZE;
+
+ /* Ensure the descriptor will fit in the buffer. */
+ size = sizeof(struct ffa_partition_info_v1_1);
+ if (partition_count * size > buf_size) {
+ ret = FFA_ERROR_NO_MEMORY;
+ goto err_unlock;
+ }
+ memcpy(mbox->rx_buffer, partitions,
+ partition_count * size);
+ }
+
+ mbox->state = MAILBOX_STATE_FULL;
+ spin_unlock(&mbox->lock);
+ }
+ SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
+
+err_unlock:
+ spin_unlock(&mbox->lock);
+err:
+ return spmc_ffa_error_return(handle, ret);
+}
+
+static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
+{
+ SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
+}
+
+static uint64_t ffa_features_retrieve_request(bool secure_origin,
+ uint32_t input_properties,
+ void *handle)
+{
+ /*
+ * If we're called by the normal world we don't support any
+ * additional features.
+ */
+ if (!secure_origin) {
+ if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ } else {
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ /*
+ * If v1.1 the NS bit must be set otherwise it is an invalid
+ * call. If v1.0 check and store whether the SP has requested
+ * the use of the NS bit.
+ */
+ if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
+ if ((input_properties &
+ FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+ return ffa_feature_success(handle,
+ FFA_FEATURES_RET_REQ_NS_BIT);
+ } else {
+ sp->ns_bit_requested = (input_properties &
+ FFA_FEATURES_RET_REQ_NS_BIT) !=
+ 0U;
+ }
+ if (sp->ns_bit_requested) {
+ return ffa_feature_success(handle,
+ FFA_FEATURES_RET_REQ_NS_BIT);
+ }
+ }
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
+static uint64_t ffa_features_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint32_t function_id = (uint32_t) x1;
+ uint32_t input_properties = (uint32_t) x2;
+
+ /* Check if a Feature ID was requested. */
+ if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
+ /* We currently don't support any additional features. */
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /*
+ * Handle the cases where we have separate handlers due to additional
+ * properties.
+ */
+ switch (function_id) {
+ case FFA_MEM_RETRIEVE_REQ_SMC32:
+ case FFA_MEM_RETRIEVE_REQ_SMC64:
+ return ffa_features_retrieve_request(secure_origin,
+ input_properties,
+ handle);
+ }
+
+ /*
+ * We don't currently support additional input properties for these
+ * other ABIs therefore ensure this value is set to 0.
+ */
+ if (input_properties != 0U) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Report if any other FF-A ABI is supported. */
+ switch (function_id) {
+ /* Supported features from both worlds. */
+ case FFA_ERROR:
+ case FFA_SUCCESS_SMC32:
+ case FFA_INTERRUPT:
+ case FFA_SPM_ID_GET:
+ case FFA_ID_GET:
+ case FFA_FEATURES:
+ case FFA_VERSION:
+ case FFA_RX_RELEASE:
+ case FFA_MSG_SEND_DIRECT_REQ_SMC32:
+ case FFA_MSG_SEND_DIRECT_REQ_SMC64:
+ case FFA_PARTITION_INFO_GET:
+ case FFA_RXTX_MAP_SMC32:
+ case FFA_RXTX_MAP_SMC64:
+ case FFA_RXTX_UNMAP:
+ case FFA_MEM_FRAG_TX:
+ case FFA_MSG_RUN:
+
+ /*
+ * We are relying on the fact that the other registers
+ * will be set to 0 as these values align with the
+ * currently implemented features of the SPMC. If this
+ * changes this function must be extended to handle
+ * reporting the additional functionality.
+ */
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+ /* Execution stops here. */
+
+ /* Supported ABIs only from the secure world. */
+ case FFA_SECONDARY_EP_REGISTER_SMC64:
+ case FFA_MSG_SEND_DIRECT_RESP_SMC32:
+ case FFA_MSG_SEND_DIRECT_RESP_SMC64:
+ case FFA_MEM_RELINQUISH:
+ case FFA_MSG_WAIT:
+
+ if (!secure_origin) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+ /* Execution stops here. */
+
+ /* Supported features only from the normal world. */
+ case FFA_MEM_SHARE_SMC32:
+ case FFA_MEM_SHARE_SMC64:
+ case FFA_MEM_LEND_SMC32:
+ case FFA_MEM_LEND_SMC64:
+ case FFA_MEM_RECLAIM:
+ case FFA_MEM_FRAG_RX:
+
+ if (secure_origin) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+ /* Execution stops here. */
+
+ default:
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+}
+
+static uint64_t ffa_id_get_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ if (secure_origin) {
+ SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
+ spmc_get_current_sp_ctx()->sp_id);
+ } else {
+ SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
+ spmc_get_hyp_ctx()->ns_ep_id);
+ }
+}
+
+/*
+ * Enable an SP to query the ID assigned to the SPMC.
+ */
+static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ assert(x1 == 0UL);
+ assert(x2 == 0UL);
+ assert(x3 == 0UL);
+ assert(x4 == 0UL);
+ assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
+ assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
+ assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
+
+ SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
+}
+
+static uint64_t ffa_run_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct secure_partition_desc *sp;
+ uint16_t target_id = FFA_RUN_EP_ID(x1);
+ uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
+ unsigned int idx;
+ unsigned int *rt_state;
+ unsigned int *rt_model;
+
+ /* Can only be called from the normal world. */
+ if (secure_origin) {
+ ERROR("FFA_RUN can only be called from NWd.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Cannot run a Normal world partition. */
+ if (ffa_is_normal_world_id(target_id)) {
+ ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Check that the target SP exists. */
+ sp = spmc_get_sp_ctx(target_id);
+ ERROR("Unknown partition ID (0x%x).\n", target_id);
+ if (sp == NULL) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ idx = get_ec_index(sp);
+ if (idx != vcpu_id) {
+ ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+ rt_state = &((sp->ec[idx]).rt_state);
+ rt_model = &((sp->ec[idx]).rt_model);
+ if (*rt_state == RT_STATE_RUNNING) {
+ ERROR("Partition (0x%x) is already running.\n", target_id);
+ return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
+ }
+
+ /*
+ * Sanity check that if the execution context was not waiting then it
+ * was either in the direct request or the run partition runtime model.
+ */
+ if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
+ assert(*rt_model == RT_MODEL_RUN ||
+ *rt_model == RT_MODEL_DIR_REQ);
+ }
+
+ /*
+ * If the context was waiting then update the partition runtime model.
+ */
+ if (*rt_state == RT_STATE_WAITING) {
+ *rt_model = RT_MODEL_RUN;
+ }
+
+ /*
+ * Forward the request to the correct SP vCPU after updating
+ * its state.
+ */
+ *rt_state = RT_STATE_RUNNING;
+
+ return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
+ handle, cookie, flags, target_id);
+}
+
+static uint64_t rx_release_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+
+ spin_lock(&mbox->lock);
+
+ if (mbox->state != MAILBOX_STATE_FULL) {
+ spin_unlock(&mbox->lock);
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ mbox->state = MAILBOX_STATE_EMPTY;
+ spin_unlock(&mbox->lock);
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
+/*
+ * Perform initial validation on the provided secondary entry point.
+ * For now ensure it does not lie within the BL31 Image or the SP's
+ * RX/TX buffers as these are mapped within EL3.
+ * TODO: perform validation for additional invalid memory regions.
+ */
+static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
+{
+ struct mailbox *mb;
+ uintptr_t buffer_size;
+ uintptr_t sp_rx_buffer;
+ uintptr_t sp_tx_buffer;
+ uintptr_t sp_rx_buffer_limit;
+ uintptr_t sp_tx_buffer_limit;
+
+ mb = &sp->mailbox;
+ buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
+ sp_rx_buffer = (uintptr_t) mb->rx_buffer;
+ sp_tx_buffer = (uintptr_t) mb->tx_buffer;
+ sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
+ sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
+
+ /*
+ * Check if the entry point lies within BL31, or the
+ * SP's RX or TX buffer.
+ */
+ if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
+ (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
+ (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*******************************************************************************
+ * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
+ * register an entry point for initialization during a secondary cold boot.
+ ******************************************************************************/
+static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct secure_partition_desc *sp;
+ struct sp_exec_ctx *sp_ctx;
+
+ /* This request cannot originate from the Normal world. */
+ if (!secure_origin) {
+ WARN("%s: Can only be called from SWd.\n", __func__);
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Get the context of the current SP. */
+ sp = spmc_get_current_sp_ctx();
+ if (sp == NULL) {
+ WARN("%s: Cannot find SP context.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Only an S-EL1 SP should be invoking this ABI. */
+ if (sp->runtime_el != S_EL1) {
+ WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ /* Ensure the SP is in its initialization state. */
+ sp_ctx = spmc_get_sp_ec(sp);
+ if (sp_ctx->rt_model != RT_MODEL_INIT) {
+ WARN("%s: Can only be called during SP initialization.\n",
+ __func__);
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ /* Perform initial validation of the secondary entry point. */
+ if (validate_secondary_ep(x1, sp)) {
+ WARN("%s: Invalid entry point provided (0x%lx).\n",
+ __func__, x1);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Update the secondary entrypoint in SP context.
+ * We don't need a lock here as during partition initialization there
+ * will only be a single core online.
+ */
+ sp->secondary_ep = x1;
+ VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
+/*******************************************************************************
+ * This function will parse the Secure Partition Manifest. From manifest, it
+ * will fetch details for preparing Secure partition image context and secure
+ * partition image boot arguments if any.
+ ******************************************************************************/
+static int sp_manifest_parse(void *sp_manifest, int offset,
+ struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info,
+ int32_t *boot_info_reg)
+{
+ int32_t ret, node;
+ uint32_t config_32;
+
+ /*
+ * Look for the mandatory fields that are expected to be present in
+ * the SP manifests.
+ */
+ node = fdt_path_offset(sp_manifest, "/");
+ if (node < 0) {
+ ERROR("Did not find root node.\n");
+ return node;
+ }
+
+ ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
+ ARRAY_SIZE(sp->uuid), sp->uuid);
+ if (ret != 0) {
+ ERROR("Missing Secure Partition UUID.\n");
+ return ret;
+ }
+
+ ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
+ if (ret != 0) {
+ ERROR("Missing SP Exception Level information.\n");
+ return ret;
+ }
+
+ sp->runtime_el = config_32;
+
+ ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
+ if (ret != 0) {
+ ERROR("Missing Secure Partition FF-A Version.\n");
+ return ret;
+ }
+
+ sp->ffa_version = config_32;
+
+ ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
+ if (ret != 0) {
+ ERROR("Missing Secure Partition Execution State.\n");
+ return ret;
+ }
+
+ sp->execution_state = config_32;
+
+ ret = fdt_read_uint32(sp_manifest, node,
+ "messaging-method", &config_32);
+ if (ret != 0) {
+ ERROR("Missing Secure Partition messaging method.\n");
+ return ret;
+ }
+
+ /* Validate this entry, we currently only support direct messaging. */
+ if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
+ FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
+ WARN("Invalid Secure Partition messaging method (0x%x)\n",
+ config_32);
+ return -EINVAL;
+ }
+
+ sp->properties = config_32;
+
+ ret = fdt_read_uint32(sp_manifest, node,
+ "execution-ctx-count", &config_32);
+
+ if (ret != 0) {
+ ERROR("Missing SP Execution Context Count.\n");
+ return ret;
+ }
+
+ /*
+ * Ensure this field is set correctly in the manifest however
+ * since this is currently a hardcoded value for S-EL1 partitions
+ * we don't need to save it here, just validate.
+ */
+ if (config_32 != PLATFORM_CORE_COUNT) {
+ ERROR("SP Execution Context Count (%u) must be %u.\n",
+ config_32, PLATFORM_CORE_COUNT);
+ return -EINVAL;
+ }
+
+ /*
+ * Look for the optional fields that are expected to be present in
+ * an SP manifest.
+ */
+ ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
+ if (ret != 0) {
+ WARN("Missing Secure Partition ID.\n");
+ } else {
+ if (!is_ffa_secure_id_valid(config_32)) {
+ ERROR("Invalid Secure Partition ID (0x%x).\n",
+ config_32);
+ return -EINVAL;
+ }
+ sp->sp_id = config_32;
+ }
+
+ ret = fdt_read_uint32(sp_manifest, node,
+ "power-management-messages", &config_32);
+ if (ret != 0) {
+ WARN("Missing Power Management Messages entry.\n");
+ } else {
+ /*
+ * Ensure only the currently supported power messages have
+ * been requested.
+ */
+ if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
+ FFA_PM_MSG_SUB_CPU_SUSPEND |
+ FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
+ ERROR("Requested unsupported PM messages (%x)\n",
+ config_32);
+ return -EINVAL;
+ }
+ sp->pwr_mgmt_msgs = config_32;
+ }
+
+ ret = fdt_read_uint32(sp_manifest, node,
+ "gp-register-num", &config_32);
+ if (ret != 0) {
+ WARN("Missing boot information register.\n");
+ } else {
+ /* Check if a register number between 0-3 is specified. */
+ if (config_32 < 4) {
+ *boot_info_reg = config_32;
+ } else {
+ WARN("Incorrect boot information register (%u).\n",
+ config_32);
+ }
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function gets the Secure Partition Manifest base and maps the manifest
+ * region.
+ * Currently only one Secure Partition manifest is considered which is used to
+ * prepare the context for the single Secure Partition.
+ ******************************************************************************/
+static int find_and_prepare_sp_context(void)
+{
+ void *sp_manifest;
+ uintptr_t manifest_base;
+ uintptr_t manifest_base_align;
+ entry_point_info_t *next_image_ep_info;
+ int32_t ret, boot_info_reg = -1;
+ struct secure_partition_desc *sp;
+
+ next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (next_image_ep_info == NULL) {
+ WARN("No Secure Partition image provided by BL2.\n");
+ return -ENOENT;
+ }
+
+ sp_manifest = (void *)next_image_ep_info->args.arg0;
+ if (sp_manifest == NULL) {
+ WARN("Secure Partition manifest absent.\n");
+ return -ENOENT;
+ }
+
+ manifest_base = (uintptr_t)sp_manifest;
+ manifest_base_align = page_align(manifest_base, DOWN);
+
+ /*
+ * Map the secure partition manifest region in the EL3 translation
+ * regime.
+ * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
+ * alignment the region of 1 PAGE_SIZE from manifest align base may
+ * not completely accommodate the secure partition manifest region.
+ */
+ ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
+ manifest_base_align,
+ PAGE_SIZE * 2,
+ MT_RO_DATA);
+ if (ret != 0) {
+ ERROR("Error while mapping SP manifest (%d).\n", ret);
+ return ret;
+ }
+
+ ret = fdt_node_offset_by_compatible(sp_manifest, -1,
+ "arm,ffa-manifest-1.0");
+ if (ret < 0) {
+ ERROR("Error happened in SP manifest reading.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Store the size of the manifest so that it can be used later to pass
+ * the manifest as boot information later.
+ */
+ next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
+ INFO("Manifest size = %lu bytes.\n", next_image_ep_info->args.arg1);
+
+ /*
+ * Select an SP descriptor for initialising the partition's execution
+ * context on the primary CPU.
+ */
+ sp = spmc_get_current_sp_ctx();
+
+ /* Initialize entry point information for the SP */
+ SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
+ SECURE | EP_ST_ENABLE);
+
+ /* Parse the SP manifest. */
+ ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
+ &boot_info_reg);
+ if (ret != 0) {
+ ERROR("Error in Secure Partition manifest parsing.\n");
+ return ret;
+ }
+
+ /* Check that the runtime EL in the manifest was correct. */
+ if (sp->runtime_el != S_EL1) {
+ ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
+ return -EINVAL;
+ }
+
+ /* Perform any common initialisation. */
+ spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
+
+ /* Perform any initialisation specific to S-EL1 SPs. */
+ spmc_el1_sp_setup(sp, next_image_ep_info);
+
+ /* Initialize the SP context with the required ep info. */
+ spmc_sp_common_ep_commit(sp, next_image_ep_info);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and performs a synchronous entry
+ * into it.
+ ******************************************************************************/
+static int32_t logical_sp_init(void)
+{
+ int32_t rc = 0;
+ struct el3_lp_desc *el3_lp_descs;
+
+ /* Perform initial validation of the Logical Partitions. */
+ rc = el3_sp_desc_validate();
+ if (rc != 0) {
+ ERROR("Logical Partition validation failed!\n");
+ return rc;
+ }
+
+ el3_lp_descs = get_el3_lp_array();
+
+ INFO("Logical Secure Partition init start.\n");
+ for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
+ rc = el3_lp_descs[i].init();
+ if (rc != 0) {
+ ERROR("Logical SP (0x%x) Failed to Initialize\n",
+ el3_lp_descs[i].sp_id);
+ return rc;
+ }
+ VERBOSE("Logical SP (0x%x) Initialized\n",
+ el3_lp_descs[i].sp_id);
+ }
+
+ INFO("Logical Secure Partition init completed.\n");
+
+ return rc;
+}
+
+uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
+{
+ uint64_t rc;
+
+ assert(ec != NULL);
+
+ /* Assign the context of the SP to this CPU */
+ cm_set_context(&(ec->cpu_ctx), SECURE);
+
+ /* Restore the context assigned above */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ /* Invalidate TLBs at EL1. */
+ tlbivmalle1();
+ dsbish();
+
+ /* Enter Secure Partition */
+ rc = spm_secure_partition_enter(&ec->c_rt_ctx);
+
+ /* Save secure state */
+ cm_el1_sysregs_context_save(SECURE);
+
+ return rc;
+}
+
+/*******************************************************************************
+ * SPMC Helper Functions.
+ ******************************************************************************/
+static int32_t sp_init(void)
+{
+ uint64_t rc;
+ struct secure_partition_desc *sp;
+ struct sp_exec_ctx *ec;
+
+ sp = spmc_get_current_sp_ctx();
+ ec = spmc_get_sp_ec(sp);
+ ec->rt_model = RT_MODEL_INIT;
+ ec->rt_state = RT_STATE_RUNNING;
+
+ INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
+
+ rc = spmc_sp_synchronous_entry(ec);
+ if (rc != 0) {
+ /* Indicate SP init was not successful. */
+ ERROR("SP (0x%x) failed to initialize (%lu).\n",
+ sp->sp_id, rc);
+ return 0;
+ }
+
+ ec->rt_state = RT_STATE_WAITING;
+ INFO("Secure Partition initialized.\n");
+
+ return 1;
+}
+
+static void initalize_sp_descs(void)
+{
+ struct secure_partition_desc *sp;
+
+ for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
+ sp = &sp_desc[i];
+ sp->sp_id = INV_SP_ID;
+ sp->mailbox.rx_buffer = NULL;
+ sp->mailbox.tx_buffer = NULL;
+ sp->mailbox.state = MAILBOX_STATE_EMPTY;
+ sp->secondary_ep = 0;
+ }
+}
+
+static void initalize_ns_ep_descs(void)
+{
+ struct ns_endpoint_desc *ns_ep;
+
+ for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
+ ns_ep = &ns_ep_desc[i];
+ /*
+ * Clashes with the Hypervisor ID but will not be a
+ * problem in practice.
+ */
+ ns_ep->ns_ep_id = 0;
+ ns_ep->ffa_version = 0;
+ ns_ep->mailbox.rx_buffer = NULL;
+ ns_ep->mailbox.tx_buffer = NULL;
+ ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
+ }
+}
+
+/*******************************************************************************
+ * Initialize SPMC attributes for the SPMD.
+ ******************************************************************************/
+void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
+{
+ spmc_attrs->major_version = FFA_VERSION_MAJOR;
+ spmc_attrs->minor_version = FFA_VERSION_MINOR;
+ spmc_attrs->exec_state = MODE_RW_64;
+ spmc_attrs->spmc_id = FFA_SPMC_ID;
+}
+
+/*******************************************************************************
+ * Initialize contexts of all Secure Partitions.
+ ******************************************************************************/
+int32_t spmc_setup(void)
+{
+ int32_t ret;
+ uint32_t flags;
+
+ /* Initialize endpoint descriptors */
+ initalize_sp_descs();
+ initalize_ns_ep_descs();
+
+ /*
+ * Retrieve the information of the datastore for tracking shared memory
+ * requests allocated by platform code and zero the region if available.
+ */
+ ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
+ &spmc_shmem_obj_state.data_size);
+ if (ret != 0) {
+ ERROR("Failed to obtain memory descriptor backing store!\n");
+ return ret;
+ }
+ memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
+
+ /* Setup logical SPs. */
+ ret = logical_sp_init();
+ if (ret != 0) {
+ ERROR("Failed to initialize Logical Partitions.\n");
+ return ret;
+ }
+
+ /* Perform physical SP setup. */
+
+ /* Disable MMU at EL1 (initialized by BL2) */
+ disable_mmu_icache_el1();
+
+ /* Initialize context of the SP */
+ INFO("Secure Partition context setup start.\n");
+
+ ret = find_and_prepare_sp_context();
+ if (ret != 0) {
+ ERROR("Error in SP finding and context preparation.\n");
+ return ret;
+ }
+
+ /* Register power management hooks with PSCI */
+ psci_register_spd_pm_hook(&spmc_pm);
+
+ /*
+ * Register an interrupt handler for S-EL1 interrupts
+ * when generated during code executing in the
+ * non-secure state.
+ */
+ flags = 0;
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+ spmc_sp_interrupt_handler,
+ flags);
+ if (ret != 0) {
+ ERROR("Failed to register interrupt handler! (%d)\n", ret);
+ panic();
+ }
+
+ /* Register init function for deferred init. */
+ bl31_register_bl32_init(&sp_init);
+
+ INFO("Secure Partition setup done.\n");
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Secure Partition Manager SMC handler.
+ ******************************************************************************/
+uint64_t spmc_smc_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ switch (smc_fid) {
+
+ case FFA_VERSION:
+ return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_SPM_ID_GET:
+ return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+
+ case FFA_ID_GET:
+ return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_FEATURES:
+ return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_SECONDARY_EP_REGISTER_SMC64:
+ return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
+ x2, x3, x4, cookie, handle,
+ flags);
+
+ case FFA_MSG_SEND_DIRECT_REQ_SMC32:
+ case FFA_MSG_SEND_DIRECT_REQ_SMC64:
+ return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+
+ case FFA_MSG_SEND_DIRECT_RESP_SMC32:
+ case FFA_MSG_SEND_DIRECT_RESP_SMC64:
+ return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+
+ case FFA_RXTX_MAP_SMC32:
+ case FFA_RXTX_MAP_SMC64:
+ return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+ case FFA_RXTX_UNMAP:
+ return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_PARTITION_INFO_GET:
+ return partition_info_get_handler(smc_fid, secure_origin, x1,
+ x2, x3, x4, cookie, handle,
+ flags);
+
+ case FFA_RX_RELEASE:
+ return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_MSG_WAIT:
+ return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+ case FFA_ERROR:
+ return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+ case FFA_MSG_RUN:
+ return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+ case FFA_MEM_SHARE_SMC32:
+ case FFA_MEM_SHARE_SMC64:
+ case FFA_MEM_LEND_SMC32:
+ case FFA_MEM_LEND_SMC64:
+ return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+ case FFA_MEM_FRAG_TX:
+ return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_MEM_FRAG_RX:
+ return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_MEM_RETRIEVE_REQ_SMC32:
+ case FFA_MEM_RETRIEVE_REQ_SMC64:
+ return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+
+ case FFA_MEM_RELINQUISH:
+ return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+
+ case FFA_MEM_RECLAIM:
+ return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ default:
+ WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
+ break;
+ }
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+}
+
+/*******************************************************************************
+ * This function is the handler registered for S-EL1 interrupts by the SPMC. It
+ * validates the interrupt and upon success arranges entry into the SP for
+ * handling the interrupt.
+ ******************************************************************************/
+static uint64_t spmc_sp_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ struct sp_exec_ctx *ec;
+ uint32_t linear_id = plat_my_core_pos();
+
+ /* Sanity check for a NULL pointer dereference. */
+ assert(sp != NULL);
+
+ /* Check the security state when the exception was generated. */
+ assert(get_interrupt_src_ss(flags) == NON_SECURE);
+
+ /* Panic if not an S-EL1 Partition. */
+ if (sp->runtime_el != S_EL1) {
+ ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
+ linear_id);
+ panic();
+ }
+
+ /* Obtain a reference to the SP execution context. */
+ ec = spmc_get_sp_ec(sp);
+
+ /* Ensure that the execution context is in waiting state else panic. */
+ if (ec->rt_state != RT_STATE_WAITING) {
+ ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
+ linear_id, RT_STATE_WAITING, ec->rt_state);
+ panic();
+ }
+
+ /* Update the runtime model and state of the partition. */
+ ec->rt_model = RT_MODEL_INTR;
+ ec->rt_state = RT_STATE_RUNNING;
+
+ VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
+
+ /*
+ * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
+ * populated as the SP can determine this by itself.
+ */
+ return spmd_smc_switch_state(FFA_INTERRUPT, false,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ handle);
+}
diff --git a/services/std_svc/spm/el3_spmc/spmc_pm.c b/services/std_svc/spm/el3_spmc/spmc_pm.c
new file mode 100644
index 0000000..c7e864f
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_pm.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/spinlock.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <services/ffa_svc.h>
+#include "spmc.h"
+
+#include <platform_def.h>
+
+/*******************************************************************************
+ * spmc_build_pm_message
+ *
+ * Builds an SPMC to SP direct message request.
+ ******************************************************************************/
+static void spmc_build_pm_message(gp_regs_t *gpregs,
+ unsigned long long message,
+ uint8_t pm_msg_type,
+ uint16_t sp_id)
+{
+ write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
+ write_ctx_reg(gpregs, CTX_GPREG_X1,
+ (FFA_SPMC_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+ sp_id);
+ write_ctx_reg(gpregs, CTX_GPREG_X2, FFA_FWK_MSG_BIT |
+ (pm_msg_type & FFA_FWK_MSG_MASK));
+ write_ctx_reg(gpregs, CTX_GPREG_X3, message);
+}
+
+/*******************************************************************************
+ * This CPU has been turned on. Enter the SP to initialise S-EL1.
+ ******************************************************************************/
+static void spmc_cpu_on_finish_handler(u_register_t unused)
+{
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ struct sp_exec_ctx *ec;
+ unsigned int linear_id = plat_my_core_pos();
+ entry_point_info_t sec_ec_ep_info = {0};
+ uint64_t rc;
+
+ /* Sanity check for a NULL pointer dereference. */
+ assert(sp != NULL);
+
+ /* Initialize entry point information for the SP. */
+ SET_PARAM_HEAD(&sec_ec_ep_info, PARAM_EP, VERSION_1,
+ SECURE | EP_ST_ENABLE);
+
+ /*
+ * Check if the primary execution context registered an entry point else
+ * bail out early.
+ * TODO: Add support for boot reason in manifest to allow jumping to
+ * entrypoint into the primary execution context.
+ */
+ if (sp->secondary_ep == 0) {
+ WARN("%s: No secondary ep on core%u\n", __func__, linear_id);
+ return;
+ }
+
+ sec_ec_ep_info.pc = sp->secondary_ep;
+
+ /*
+ * Setup and initialise the SP execution context on this physical cpu.
+ */
+ spmc_el1_sp_setup(sp, &sec_ec_ep_info);
+ spmc_sp_common_ep_commit(sp, &sec_ec_ep_info);
+
+ /* Obtain a reference to the SP execution context. */
+ ec = spmc_get_sp_ec(sp);
+
+ /*
+ * TODO: Should we do some PM related state tracking of the SP execution
+ * context here?
+ */
+
+ /* Update the runtime model and state of the partition. */
+ ec->rt_model = RT_MODEL_INIT;
+ ec->rt_state = RT_STATE_RUNNING;
+ ec->dir_req_origin_id = INV_SP_ID;
+
+ INFO("SP (0x%x) init start on core%u.\n", sp->sp_id, linear_id);
+
+ rc = spmc_sp_synchronous_entry(ec);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%lu) on CPU%u\n", __func__, rc, linear_id);
+ }
+
+ /* Update the runtime state of the partition. */
+ ec->rt_state = RT_STATE_WAITING;
+
+ VERBOSE("CPU %u on!\n", linear_id);
+}
+/*******************************************************************************
+ * Helper function to send a FF-A power management message to an SP.
+ ******************************************************************************/
+static int32_t spmc_send_pm_msg(uint8_t pm_msg_type,
+ unsigned long long psci_event)
+{
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ struct sp_exec_ctx *ec;
+ gp_regs_t *gpregs_ctx;
+ unsigned int linear_id = plat_my_core_pos();
+ u_register_t resp;
+ uint64_t rc;
+
+ /* Obtain a reference to the SP execution context. */
+ ec = spmc_get_sp_ec(sp);
+
+ /*
+ * TODO: Should we do some PM related state tracking of the SP execution
+ * context here?
+ */
+
+ /*
+ * Build an SPMC to SP direct message request.
+ * Note that x4-x6 should be populated with the original PSCI arguments.
+ */
+ spmc_build_pm_message(get_gpregs_ctx(&ec->cpu_ctx),
+ psci_event,
+ pm_msg_type,
+ sp->sp_id);
+
+ /* Sanity check partition state. */
+ assert(ec->rt_state == RT_STATE_WAITING);
+
+ /* Update the runtime model and state of the partition. */
+ ec->rt_model = RT_MODEL_DIR_REQ;
+ ec->rt_state = RT_STATE_RUNNING;
+ ec->dir_req_origin_id = FFA_SPMC_ID;
+
+ rc = spmc_sp_synchronous_entry(ec);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%lu) on CPU%u.\n", __func__, rc, linear_id);
+ assert(false);
+ return -EINVAL;
+ }
+
+ /*
+ * Validate we receive an expected response from the SP.
+ * TODO: We don't currently support aborting an SP in the scenario
+ * where it is misbehaving so assert these conditions are not
+ * met for now.
+ */
+ gpregs_ctx = get_gpregs_ctx(&ec->cpu_ctx);
+
+ /* Expect a direct message response from the SP. */
+ resp = read_ctx_reg(gpregs_ctx, CTX_GPREG_X0);
+ if (resp != FFA_MSG_SEND_DIRECT_RESP_SMC32) {
+ ERROR("%s invalid SP response (%lx).\n", __func__, resp);
+ assert(false);
+ return -EINVAL;
+ }
+
+ /* Ensure the sender and receiver are populated correctly. */
+ resp = read_ctx_reg(gpregs_ctx, CTX_GPREG_X1);
+ if (!(ffa_endpoint_source(resp) == sp->sp_id &&
+ ffa_endpoint_destination(resp) == FFA_SPMC_ID)) {
+ ERROR("%s invalid src/dst response (%lx).\n", __func__, resp);
+ assert(false);
+ return -EINVAL;
+ }
+
+ /* Expect a PM message response from the SP. */
+ resp = read_ctx_reg(gpregs_ctx, CTX_GPREG_X2);
+ if ((resp & FFA_FWK_MSG_BIT) == 0U ||
+ ((resp & FFA_FWK_MSG_MASK) != FFA_PM_MSG_PM_RESP)) {
+ ERROR("%s invalid PM response (%lx).\n", __func__, resp);
+ assert(false);
+ return -EINVAL;
+ }
+
+ /* Update the runtime state of the partition. */
+ ec->rt_state = RT_STATE_WAITING;
+
+ /* Return the status code returned by the SP */
+ return read_ctx_reg(gpregs_ctx, CTX_GPREG_X3);
+}
+
+/*******************************************************************************
+ * spmc_cpu_suspend_finish_handler
+ ******************************************************************************/
+static void spmc_cpu_suspend_finish_handler(u_register_t unused)
+{
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ unsigned int linear_id = plat_my_core_pos();
+ int32_t rc;
+
+ /* Sanity check for a NULL pointer dereference. */
+ assert(sp != NULL);
+
+ /*
+ * Check if the SP has subscribed for this power management message.
+ * If not then we don't have anything else to do here.
+ */
+ if ((sp->pwr_mgmt_msgs & FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME) == 0U) {
+ goto exit;
+ }
+
+ rc = spmc_send_pm_msg(FFA_PM_MSG_WB_REQ, FFA_WB_TYPE_NOTS2RAM);
+ if (rc < 0) {
+ ERROR("%s failed (%d) on CPU%u\n", __func__, rc, linear_id);
+ return;
+ }
+
+exit:
+ VERBOSE("CPU %u resumed!\n", linear_id);
+}
+
+/*******************************************************************************
+ * spmc_cpu_suspend_handler
+ ******************************************************************************/
+static void spmc_cpu_suspend_handler(u_register_t unused)
+{
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ unsigned int linear_id = plat_my_core_pos();
+ int32_t rc;
+
+ /* Sanity check for a NULL pointer dereference. */
+ assert(sp != NULL);
+
+ /*
+ * Check if the SP has subscribed for this power management message.
+ * If not then we don't have anything else to do here.
+ */
+ if ((sp->pwr_mgmt_msgs & FFA_PM_MSG_SUB_CPU_SUSPEND) == 0U) {
+ goto exit;
+ }
+
+ rc = spmc_send_pm_msg(FFA_FWK_MSG_PSCI, PSCI_CPU_SUSPEND_AARCH64);
+ if (rc < 0) {
+ ERROR("%s failed (%d) on CPU%u\n", __func__, rc, linear_id);
+ return;
+ }
+exit:
+ VERBOSE("CPU %u suspend!\n", linear_id);
+}
+
+/*******************************************************************************
+ * spmc_cpu_off_handler
+ ******************************************************************************/
+static int32_t spmc_cpu_off_handler(u_register_t unused)
+{
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ unsigned int linear_id = plat_my_core_pos();
+ int32_t ret = 0;
+
+ /* Sanity check for a NULL pointer dereference. */
+ assert(sp != NULL);
+
+ /*
+ * Check if the SP has subscribed for this power management message.
+ * If not then we don't have anything else to do here.
+ */
+ if ((sp->pwr_mgmt_msgs & FFA_PM_MSG_SUB_CPU_OFF) == 0U) {
+ goto exit;
+ }
+
+ ret = spmc_send_pm_msg(FFA_FWK_MSG_PSCI, PSCI_CPU_OFF);
+ if (ret < 0) {
+ ERROR("%s failed (%d) on CPU%u\n", __func__, ret, linear_id);
+ return ret;
+ }
+
+exit:
+ VERBOSE("CPU %u off!\n", linear_id);
+ return ret;
+}
+
+/*******************************************************************************
+ * Structure populated by the SPM Core to perform any bookkeeping before
+ * PSCI executes a power mgmt. operation.
+ ******************************************************************************/
+const spd_pm_ops_t spmc_pm = {
+ .svc_on_finish = spmc_cpu_on_finish_handler,
+ .svc_off = spmc_cpu_off_handler,
+ .svc_suspend = spmc_cpu_suspend_handler,
+ .svc_suspend_finish = spmc_cpu_suspend_finish_handler
+};
diff --git a/services/std_svc/spm/el3_spmc/spmc_setup.c b/services/std_svc/spm/el3_spmc/spmc_setup.c
new file mode 100644
index 0000000..6de25f6
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_setup.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <libfdt.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <services/ffa_svc.h>
+#include "spm_common.h"
+#include "spmc.h"
+#include <tools_share/firmware_image_package.h>
+
+#include <platform_def.h>
+
+/*
+ * Statically allocate a page of memory for passing boot information to an SP.
+ */
+static uint8_t ffa_boot_info_mem[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+/*
+ * This function creates a initialization descriptor in the memory reserved
+ * for passing boot information to an SP. It then copies the partition manifest
+ * into this region and ensures that its reference in the initialization
+ * descriptor is updated.
+ */
+static void spmc_create_boot_info(entry_point_info_t *ep_info,
+ struct secure_partition_desc *sp)
+{
+ struct ffa_boot_info_header *boot_header;
+ struct ffa_boot_info_desc *boot_descriptor;
+ uintptr_t manifest_addr;
+
+ /*
+ * Calculate the maximum size of the manifest that can be accommodated
+ * in the boot information memory region.
+ */
+ const unsigned int
+ max_manifest_sz = sizeof(ffa_boot_info_mem) -
+ (sizeof(struct ffa_boot_info_header) +
+ sizeof(struct ffa_boot_info_desc));
+
+ /*
+ * The current implementation only supports the FF-A v1.1
+ * implementation of the boot protocol, therefore check
+ * that a v1.0 SP has not requested use of the protocol.
+ */
+ if (sp->ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ ERROR("FF-A boot protocol not supported for v1.0 clients\n");
+ return;
+ }
+
+ /*
+ * Check if the manifest will fit into the boot info memory region else
+ * bail.
+ */
+ if (ep_info->args.arg1 > max_manifest_sz) {
+ WARN("Unable to copy manifest into boot information. ");
+ WARN("Max sz = %u bytes. Manifest sz = %lu bytes\n",
+ max_manifest_sz, ep_info->args.arg1);
+ return;
+ }
+
+ /* Zero the memory region before populating. */
+ memset(ffa_boot_info_mem, 0, PAGE_SIZE);
+
+ /*
+ * Populate the ffa_boot_info_header at the start of the boot info
+ * region.
+ */
+ boot_header = (struct ffa_boot_info_header *) ffa_boot_info_mem;
+
+ /* Position the ffa_boot_info_desc after the ffa_boot_info_header. */
+ boot_header->offset_boot_info_desc =
+ sizeof(struct ffa_boot_info_header);
+ boot_descriptor = (struct ffa_boot_info_desc *)
+ (ffa_boot_info_mem +
+ boot_header->offset_boot_info_desc);
+
+ /*
+ * We must use the FF-A version corresponding to the version implemented
+ * by the SP. Currently this can only be v1.1.
+ */
+ boot_header->version = sp->ffa_version;
+
+ /* Populate the boot information header. */
+ boot_header->size_boot_info_desc = sizeof(struct ffa_boot_info_desc);
+
+ /* Set the signature "0xFFA". */
+ boot_header->signature = FFA_INIT_DESC_SIGNATURE;
+
+ /* Set the count. Currently 1 since only the manifest is specified. */
+ boot_header->count_boot_info_desc = 1;
+
+ /* Populate the boot information descriptor for the manifest. */
+ boot_descriptor->type =
+ FFA_BOOT_INFO_TYPE(FFA_BOOT_INFO_TYPE_STD) |
+ FFA_BOOT_INFO_TYPE_ID(FFA_BOOT_INFO_TYPE_ID_FDT);
+
+ boot_descriptor->flags =
+ FFA_BOOT_INFO_FLAG_NAME(FFA_BOOT_INFO_FLAG_NAME_UUID) |
+ FFA_BOOT_INFO_FLAG_CONTENT(FFA_BOOT_INFO_FLAG_CONTENT_ADR);
+
+ /*
+ * Copy the manifest into boot info region after the boot information
+ * descriptor.
+ */
+ boot_descriptor->size_boot_info = (uint32_t) ep_info->args.arg1;
+
+ manifest_addr = (uintptr_t) (ffa_boot_info_mem +
+ boot_header->offset_boot_info_desc +
+ boot_header->size_boot_info_desc);
+
+ memcpy((void *) manifest_addr, (void *) ep_info->args.arg0,
+ boot_descriptor->size_boot_info);
+
+ boot_descriptor->content = manifest_addr;
+
+ /* Calculate the size of the total boot info blob. */
+ boot_header->size_boot_info_blob = boot_header->offset_boot_info_desc +
+ boot_descriptor->size_boot_info +
+ (boot_header->count_boot_info_desc *
+ boot_header->size_boot_info_desc);
+
+ INFO("SP boot info @ 0x%lx, size: %u bytes.\n",
+ (uintptr_t) ffa_boot_info_mem,
+ boot_header->size_boot_info_blob);
+ INFO("SP manifest @ 0x%lx, size: %u bytes.\n",
+ boot_descriptor->content,
+ boot_descriptor->size_boot_info);
+}
+
+/*
+ * We are assuming that the index of the execution
+ * context used is the linear index of the current physical cpu.
+ */
+unsigned int get_ec_index(struct secure_partition_desc *sp)
+{
+ return plat_my_core_pos();
+}
+
+/* S-EL1 partition specific initialisation. */
+void spmc_el1_sp_setup(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info)
+{
+ /* Sanity check input arguments. */
+ assert(sp != NULL);
+ assert(ep_info != NULL);
+
+ /* Initialise the SPSR for S-EL1 SPs. */
+ ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+
+ /*
+ * TF-A Implementation defined behaviour to provide the linear
+ * core ID in the x4 register.
+ */
+ ep_info->args.arg4 = (uintptr_t) plat_my_core_pos();
+
+ /*
+ * Check whether setup is being performed for the primary or a secondary
+ * execution context. In the latter case, indicate to the SP that this
+ * is a warm boot.
+ * TODO: This check would need to be reworked if the same entry point is
+ * used for both primary and secondary initialisation.
+ */
+ if (sp->secondary_ep != 0U) {
+ /*
+ * Sanity check that the secondary entry point is still what was
+ * originally set.
+ */
+ assert(sp->secondary_ep == ep_info->pc);
+ ep_info->args.arg0 = FFA_WB_TYPE_S2RAM;
+ }
+}
+
+/* Common initialisation for all SPs. */
+void spmc_sp_common_setup(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info,
+ int32_t boot_info_reg)
+{
+ uint16_t sp_id;
+
+ /* Assign FF-A Partition ID if not already assigned. */
+ if (sp->sp_id == INV_SP_ID) {
+ sp_id = FFA_SP_ID_BASE + ACTIVE_SP_DESC_INDEX;
+ /*
+ * Ensure we don't clash with previously assigned partition
+ * IDs.
+ */
+ while (!is_ffa_secure_id_valid(sp_id)) {
+ sp_id++;
+
+ if (sp_id == FFA_SWD_ID_LIMIT) {
+ ERROR("Unable to determine valid SP ID.\n");
+ panic();
+ }
+ }
+ sp->sp_id = sp_id;
+ }
+
+ /*
+ * We currently only support S-EL1 partitions so ensure this is the
+ * case.
+ */
+ assert(sp->runtime_el == S_EL1);
+
+ /* Check if the SP wants to use the FF-A boot protocol. */
+ if (boot_info_reg >= 0) {
+ /*
+ * Create a boot information descriptor and copy the partition
+ * manifest into the reserved memory region for consumption by
+ * the SP.
+ */
+ spmc_create_boot_info(ep_info, sp);
+
+ /*
+ * We have consumed what we need from ep args so we can now
+ * zero them before we start populating with new information
+ * specifically for the SP.
+ */
+ zeromem(&ep_info->args, sizeof(ep_info->args));
+
+ /*
+ * Pass the address of the boot information in the
+ * boot_info_reg.
+ */
+ switch (boot_info_reg) {
+ case 0:
+ ep_info->args.arg0 = (uintptr_t) ffa_boot_info_mem;
+ break;
+ case 1:
+ ep_info->args.arg1 = (uintptr_t) ffa_boot_info_mem;
+ break;
+ case 2:
+ ep_info->args.arg2 = (uintptr_t) ffa_boot_info_mem;
+ break;
+ case 3:
+ ep_info->args.arg3 = (uintptr_t) ffa_boot_info_mem;
+ break;
+ default:
+ ERROR("Invalid value for \"gp-register-num\" %d.\n",
+ boot_info_reg);
+ }
+ } else {
+ /*
+ * We don't need any of the information that was populated
+ * in ep_args so we can clear them.
+ */
+ zeromem(&ep_info->args, sizeof(ep_info->args));
+ }
+}
+
+/*
+ * Initialise the SP context now we have populated the common and EL specific
+ * entrypoint information.
+ */
+void spmc_sp_common_ep_commit(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info)
+{
+ cpu_context_t *cpu_ctx;
+
+ cpu_ctx = &(spmc_get_sp_ec(sp)->cpu_ctx);
+ print_entry_point_info(ep_info);
+ cm_setup_context(cpu_ctx, ep_info);
+}
diff --git a/services/std_svc/spm/el3_spmc/spmc_shared_mem.c b/services/std_svc/spm/el3_spmc/spmc_shared_mem.c
new file mode 100644
index 0000000..5263c04
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_shared_mem.c
@@ -0,0 +1,1934 @@
+/*
+ * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/object_pool.h>
+#include <lib/spinlock.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <services/ffa_svc.h>
+#include "spmc.h"
+#include "spmc_shared_mem.h"
+
+#include <platform_def.h>
+
+/**
+ * struct spmc_shmem_obj - Shared memory object.
+ * @desc_size: Size of @desc.
+ * @desc_filled: Size of @desc already received.
+ * @in_use: Number of clients that have called ffa_mem_retrieve_req
+ * without a matching ffa_mem_relinquish call.
+ * @desc: FF-A memory region descriptor passed in ffa_mem_share.
+ */
+struct spmc_shmem_obj {
+ size_t desc_size;
+ size_t desc_filled;
+ size_t in_use;
+ struct ffa_mtd desc;
+};
+
+/*
+ * Declare our data structure to store the metadata of memory share requests.
+ * The main datastore is allocated on a per platform basis to ensure enough
+ * storage can be made available.
+ * The address of the data store will be populated by the SPMC during its
+ * initialization.
+ */
+
+struct spmc_shmem_obj_state spmc_shmem_obj_state = {
+ /* Set start value for handle so top 32 bits are needed quickly. */
+ .next_handle = 0xffffffc0U,
+};
+
+/**
+ * spmc_shmem_obj_size - Convert from descriptor size to object size.
+ * @desc_size: Size of struct ffa_memory_region_descriptor object.
+ *
+ * Return: Size of struct spmc_shmem_obj object.
+ */
+static size_t spmc_shmem_obj_size(size_t desc_size)
+{
+ return desc_size + offsetof(struct spmc_shmem_obj, desc);
+}
+
+/**
+ * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
+ * @state: Global state.
+ * @desc_size: Size of struct ffa_memory_region_descriptor object that
+ * allocated object will hold.
+ *
+ * Return: Pointer to newly allocated object, or %NULL if there not enough space
+ * left. The returned pointer is only valid while @state is locked, to
+ * used it again after unlocking @state, spmc_shmem_obj_lookup must be
+ * called.
+ */
+static struct spmc_shmem_obj *
+spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
+{
+ struct spmc_shmem_obj *obj;
+ size_t free = state->data_size - state->allocated;
+ size_t obj_size;
+
+ if (state->data == NULL) {
+ ERROR("Missing shmem datastore!\n");
+ return NULL;
+ }
+
+ /* Ensure that descriptor size is aligned */
+ if (!is_aligned(desc_size, 16)) {
+ WARN("%s(0x%zx) desc_size not 16-byte aligned\n",
+ __func__, desc_size);
+ return NULL;
+ }
+
+ obj_size = spmc_shmem_obj_size(desc_size);
+
+ /* Ensure the obj size has not overflowed. */
+ if (obj_size < desc_size) {
+ WARN("%s(0x%zx) desc_size overflow\n",
+ __func__, desc_size);
+ return NULL;
+ }
+
+ if (obj_size > free) {
+ WARN("%s(0x%zx) failed, free 0x%zx\n",
+ __func__, desc_size, free);
+ return NULL;
+ }
+ obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
+ obj->desc = (struct ffa_mtd) {0};
+ obj->desc_size = desc_size;
+ obj->desc_filled = 0;
+ obj->in_use = 0;
+ state->allocated += obj_size;
+ return obj;
+}
+
+/**
+ * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
+ * @state: Global state.
+ * @obj: Object to free.
+ *
+ * Release memory used by @obj. Other objects may move, so on return all
+ * pointers to struct spmc_shmem_obj object should be considered invalid, not
+ * just @obj.
+ *
+ * The current implementation always compacts the remaining objects to simplify
+ * the allocator and to avoid fragmentation.
+ */
+
+static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
+ struct spmc_shmem_obj *obj)
+{
+ size_t free_size = spmc_shmem_obj_size(obj->desc_size);
+ uint8_t *shift_dest = (uint8_t *)obj;
+ uint8_t *shift_src = shift_dest + free_size;
+ size_t shift_size = state->allocated - (shift_src - state->data);
+
+ if (shift_size != 0U) {
+ memmove(shift_dest, shift_src, shift_size);
+ }
+ state->allocated -= free_size;
+}
+
+/**
+ * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
+ * @state: Global state.
+ * @handle: Unique handle of object to return.
+ *
+ * Return: struct spmc_shmem_obj_state object with handle matching @handle.
+ * %NULL, if not object in @state->data has a matching handle.
+ */
+static struct spmc_shmem_obj *
+spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
+{
+ uint8_t *curr = state->data;
+
+ while (curr - state->data < state->allocated) {
+ struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
+
+ if (obj->desc.handle == handle) {
+ return obj;
+ }
+ curr += spmc_shmem_obj_size(obj->desc_size);
+ }
+ return NULL;
+}
+
+/**
+ * spmc_shmem_obj_get_next - Get the next memory object from an offset.
+ * @offset: Offset used to track which objects have previously been
+ * returned.
+ *
+ * Return: the next struct spmc_shmem_obj_state object from the provided
+ * offset.
+ * %NULL, if there are no more objects.
+ */
+static struct spmc_shmem_obj *
+spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
+{
+ uint8_t *curr = state->data + *offset;
+
+ if (curr - state->data < state->allocated) {
+ struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
+
+ *offset += spmc_shmem_obj_size(obj->desc_size);
+
+ return obj;
+ }
+ return NULL;
+}
+
+/*******************************************************************************
+ * FF-A memory descriptor helper functions.
+ ******************************************************************************/
+/**
+ * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
+ * clients FF-A version.
+ * @desc: The memory transaction descriptor.
+ * @index: The index of the emad element to be accessed.
+ * @ffa_version: FF-A version of the provided structure.
+ * @emad_size: Will be populated with the size of the returned emad
+ * descriptor.
+ * Return: A pointer to the requested emad structure.
+ */
+static void *
+spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
+ uint32_t ffa_version, size_t *emad_size)
+{
+ uint8_t *emad;
+
+ assert(index < desc->emad_count);
+
+ /*
+ * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
+ * format, otherwise assume it is a v1.1 format.
+ */
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
+ *emad_size = sizeof(struct ffa_emad_v1_0);
+ } else {
+ assert(is_aligned(desc->emad_offset, 16));
+ emad = ((uint8_t *) desc + desc->emad_offset);
+ *emad_size = desc->emad_size;
+ }
+
+ assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
+ return (emad + (*emad_size * index));
+}
+
+/**
+ * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
+ * FF-A version of the descriptor.
+ * @obj: Object containing ffa_memory_region_descriptor.
+ *
+ * Return: struct ffa_comp_mrd object corresponding to the composite memory
+ * region descriptor.
+ */
+static struct ffa_comp_mrd *
+spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
+{
+ size_t emad_size;
+ /*
+ * The comp_mrd_offset field of the emad descriptor remains consistent
+ * between FF-A versions therefore we can use the v1.0 descriptor here
+ * in all cases.
+ */
+ struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
+ ffa_version,
+ &emad_size);
+
+ /* Ensure the composite descriptor offset is aligned. */
+ if (!is_aligned(emad->comp_mrd_offset, 8)) {
+ WARN("Unaligned composite memory region descriptor offset.\n");
+ return NULL;
+ }
+
+ return (struct ffa_comp_mrd *)
+ ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
+}
+
+/**
+ * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
+ * a given memory transaction.
+ * @sp_id: Partition ID to validate.
+ * @obj: The shared memory object containing the descriptor
+ * of the memory transaction.
+ * Return: true if ID is valid, else false.
+ */
+bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
+{
+ bool found = false;
+ struct ffa_mtd *desc = &obj->desc;
+ size_t desc_size = obj->desc_size;
+
+ /* Validate the partition is a valid participant. */
+ for (unsigned int i = 0U; i < desc->emad_count; i++) {
+ size_t emad_size;
+ struct ffa_emad_v1_0 *emad;
+
+ emad = spmc_shmem_obj_get_emad(desc, i,
+ MAKE_FFA_VERSION(1, 1),
+ &emad_size);
+ /*
+ * Validate the calculated emad address resides within the
+ * descriptor.
+ */
+ if ((emad == NULL) || (uintptr_t) emad >=
+ (uintptr_t)((uint8_t *) desc + desc_size)) {
+ VERBOSE("Invalid emad.\n");
+ break;
+ }
+ if (sp_id == emad->mapd.endpoint_id) {
+ found = true;
+ break;
+ }
+ }
+ return found;
+}
+
+/*
+ * Compare two memory regions to determine if any range overlaps with another
+ * ongoing memory transaction.
+ */
+static bool
+overlapping_memory_regions(struct ffa_comp_mrd *region1,
+ struct ffa_comp_mrd *region2)
+{
+ uint64_t region1_start;
+ uint64_t region1_size;
+ uint64_t region1_end;
+ uint64_t region2_start;
+ uint64_t region2_size;
+ uint64_t region2_end;
+
+ assert(region1 != NULL);
+ assert(region2 != NULL);
+
+ if (region1 == region2) {
+ return true;
+ }
+
+ /*
+ * Check each memory region in the request against existing
+ * transactions.
+ */
+ for (size_t i = 0; i < region1->address_range_count; i++) {
+
+ region1_start = region1->address_range_array[i].address;
+ region1_size =
+ region1->address_range_array[i].page_count *
+ PAGE_SIZE_4KB;
+ region1_end = region1_start + region1_size;
+
+ for (size_t j = 0; j < region2->address_range_count; j++) {
+
+ region2_start = region2->address_range_array[j].address;
+ region2_size =
+ region2->address_range_array[j].page_count *
+ PAGE_SIZE_4KB;
+ region2_end = region2_start + region2_size;
+
+ /* Check if regions are not overlapping. */
+ if (!((region2_end <= region1_start) ||
+ (region1_end <= region2_start))) {
+ WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
+ region1_start, region1_end,
+ region2_start, region2_end);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/*******************************************************************************
+ * FF-A v1.0 Memory Descriptor Conversion Helpers.
+ ******************************************************************************/
+/**
+ * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
+ * converted descriptor.
+ * @orig: The original v1.0 memory transaction descriptor.
+ * @desc_size: The size of the original v1.0 memory transaction descriptor.
+ *
+ * Return: the size required to store the descriptor store in the v1.1 format.
+ */
+static uint64_t
+spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
+{
+ uint64_t size = 0;
+ struct ffa_comp_mrd *mrd;
+ struct ffa_emad_v1_0 *emad_array = orig->emad;
+
+ /* Get the size of the v1.1 descriptor. */
+ size += sizeof(struct ffa_mtd);
+
+ /* Add the size of the emad descriptors. */
+ size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
+
+ /* Add the size of the composite mrds. */
+ size += sizeof(struct ffa_comp_mrd);
+
+ /* Add the size of the constituent mrds. */
+ mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
+ emad_array[0].comp_mrd_offset);
+
+ /* Add the size of the memory region descriptors. */
+ size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
+
+ return size;
+}
+
+/**
+ * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
+ * converted descriptor.
+ * @orig: The original v1.1 memory transaction descriptor.
+ * @desc_size: The size of the original v1.1 memory transaction descriptor.
+ *
+ * Return: the size required to store the descriptor store in the v1.0 format.
+ */
+static size_t
+spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
+{
+ size_t size = 0;
+ struct ffa_comp_mrd *mrd;
+ struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
+ ((uint8_t *) orig +
+ orig->emad_offset);
+
+ /* Get the size of the v1.0 descriptor. */
+ size += sizeof(struct ffa_mtd_v1_0);
+
+ /* Add the size of the v1.0 emad descriptors. */
+ size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
+
+ /* Add the size of the composite mrds. */
+ size += sizeof(struct ffa_comp_mrd);
+
+ /* Add the size of the constituent mrds. */
+ mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
+ emad_array[0].comp_mrd_offset);
+
+ /* Check the calculated address is within the memory descriptor. */
+ if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
+ (uintptr_t)((uint8_t *) orig + desc_size)) {
+ return 0;
+ }
+ size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
+
+ return size;
+}
+
+/**
+ * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
+ * @out_obj: The shared memory object to populate the converted descriptor.
+ * @orig: The shared memory object containing the v1.0 descriptor.
+ *
+ * Return: true if the conversion is successful else false.
+ */
+static bool
+spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
+ struct spmc_shmem_obj *orig)
+{
+ struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
+ struct ffa_mtd *out = &out_obj->desc;
+ struct ffa_emad_v1_0 *emad_array_in;
+ struct ffa_emad_v1_0 *emad_array_out;
+ struct ffa_comp_mrd *mrd_in;
+ struct ffa_comp_mrd *mrd_out;
+
+ size_t mrd_in_offset;
+ size_t mrd_out_offset;
+ size_t mrd_size = 0;
+
+ /* Populate the new descriptor format from the v1.0 struct. */
+ out->sender_id = mtd_orig->sender_id;
+ out->memory_region_attributes = mtd_orig->memory_region_attributes;
+ out->flags = mtd_orig->flags;
+ out->handle = mtd_orig->handle;
+ out->tag = mtd_orig->tag;
+ out->emad_count = mtd_orig->emad_count;
+ out->emad_size = sizeof(struct ffa_emad_v1_0);
+
+ /*
+ * We will locate the emad descriptors directly after the ffa_mtd
+ * struct. This will be 8-byte aligned.
+ */
+ out->emad_offset = sizeof(struct ffa_mtd);
+
+ emad_array_in = mtd_orig->emad;
+ emad_array_out = (struct ffa_emad_v1_0 *)
+ ((uint8_t *) out + out->emad_offset);
+
+ /* Copy across the emad structs. */
+ for (unsigned int i = 0U; i < out->emad_count; i++) {
+ /* Bound check for emad array. */
+ if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
+ ((uint8_t *) mtd_orig + orig->desc_size)) {
+ VERBOSE("%s: Invalid mtd structure.\n", __func__);
+ return false;
+ }
+ memcpy(&emad_array_out[i], &emad_array_in[i],
+ sizeof(struct ffa_emad_v1_0));
+ }
+
+ /* Place the mrd descriptors after the end of the emad descriptors.*/
+ mrd_in_offset = emad_array_in->comp_mrd_offset;
+ mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
+ mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
+
+ /* Add the size of the composite memory region descriptor. */
+ mrd_size += sizeof(struct ffa_comp_mrd);
+
+ /* Find the mrd descriptor. */
+ mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
+
+ /* Add the size of the constituent memory region descriptors. */
+ mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
+
+ /*
+ * Update the offset in the emads by the delta between the input and
+ * output addresses.
+ */
+ for (unsigned int i = 0U; i < out->emad_count; i++) {
+ emad_array_out[i].comp_mrd_offset =
+ emad_array_in[i].comp_mrd_offset +
+ (mrd_out_offset - mrd_in_offset);
+ }
+
+ /* Verify that we stay within bound of the memory descriptors. */
+ if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
+ (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
+ ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
+ (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
+ ERROR("%s: Invalid mrd structure.\n", __func__);
+ return false;
+ }
+
+ /* Copy the mrd descriptors directly. */
+ memcpy(mrd_out, mrd_in, mrd_size);
+
+ return true;
+}
+
+/**
+ * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
+ * v1.0 memory object.
+ * @out_obj: The shared memory object to populate the v1.0 descriptor.
+ * @orig: The shared memory object containing the v1.1 descriptor.
+ *
+ * Return: true if the conversion is successful else false.
+ */
+static bool
+spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
+ struct spmc_shmem_obj *orig)
+{
+ struct ffa_mtd *mtd_orig = &orig->desc;
+ struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
+ struct ffa_emad_v1_0 *emad_in;
+ struct ffa_emad_v1_0 *emad_array_in;
+ struct ffa_emad_v1_0 *emad_array_out;
+ struct ffa_comp_mrd *mrd_in;
+ struct ffa_comp_mrd *mrd_out;
+
+ size_t mrd_in_offset;
+ size_t mrd_out_offset;
+ size_t emad_out_array_size;
+ size_t mrd_size = 0;
+ size_t orig_desc_size = orig->desc_size;
+
+ /* Populate the v1.0 descriptor format from the v1.1 struct. */
+ out->sender_id = mtd_orig->sender_id;
+ out->memory_region_attributes = mtd_orig->memory_region_attributes;
+ out->flags = mtd_orig->flags;
+ out->handle = mtd_orig->handle;
+ out->tag = mtd_orig->tag;
+ out->emad_count = mtd_orig->emad_count;
+
+ /* Determine the location of the emad array in both descriptors. */
+ emad_array_in = (struct ffa_emad_v1_0 *)
+ ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
+ emad_array_out = out->emad;
+
+ /* Copy across the emad structs. */
+ emad_in = emad_array_in;
+ for (unsigned int i = 0U; i < out->emad_count; i++) {
+ /* Bound check for emad array. */
+ if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
+ ((uint8_t *) mtd_orig + orig_desc_size)) {
+ VERBOSE("%s: Invalid mtd structure.\n", __func__);
+ return false;
+ }
+ memcpy(&emad_array_out[i], emad_in,
+ sizeof(struct ffa_emad_v1_0));
+
+ emad_in += mtd_orig->emad_size;
+ }
+
+ /* Place the mrd descriptors after the end of the emad descriptors. */
+ emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
+
+ mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
+ emad_out_array_size;
+
+ mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
+
+ mrd_in_offset = mtd_orig->emad_offset +
+ (mtd_orig->emad_size * mtd_orig->emad_count);
+
+ /* Add the size of the composite memory region descriptor. */
+ mrd_size += sizeof(struct ffa_comp_mrd);
+
+ /* Find the mrd descriptor. */
+ mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
+
+ /* Add the size of the constituent memory region descriptors. */
+ mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
+
+ /*
+ * Update the offset in the emads by the delta between the input and
+ * output addresses.
+ */
+ emad_in = emad_array_in;
+
+ for (unsigned int i = 0U; i < out->emad_count; i++) {
+ emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
+ (mrd_out_offset -
+ mrd_in_offset);
+ emad_in += mtd_orig->emad_size;
+ }
+
+ /* Verify that we stay within bound of the memory descriptors. */
+ if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
+ (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
+ ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
+ (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
+ ERROR("%s: Invalid mrd structure.\n", __func__);
+ return false;
+ }
+
+ /* Copy the mrd descriptors directly. */
+ memcpy(mrd_out, mrd_in, mrd_size);
+
+ return true;
+}
+
+/**
+ * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
+ * the v1.0 format and populates the
+ * provided buffer.
+ * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
+ * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
+ * @buf_size: Size of the buffer to populate.
+ * @offset: The offset of the converted descriptor to copy.
+ * @copy_size: Will be populated with the number of bytes copied.
+ * @out_desc_size: Will be populated with the total size of the v1.0
+ * descriptor.
+ *
+ * Return: 0 if conversion and population succeeded.
+ * Note: This function invalidates the reference to @orig therefore
+ * `spmc_shmem_obj_lookup` must be called if further usage is required.
+ */
+static uint32_t
+spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
+ size_t buf_size, size_t offset,
+ size_t *copy_size, size_t *v1_0_desc_size)
+{
+ struct spmc_shmem_obj *v1_0_obj;
+
+ /* Calculate the size that the v1.0 descriptor will require. */
+ *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
+ &orig_obj->desc, orig_obj->desc_size);
+
+ if (*v1_0_desc_size == 0) {
+ ERROR("%s: cannot determine size of descriptor.\n",
+ __func__);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ /* Get a new obj to store the v1.0 descriptor. */
+ v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
+ *v1_0_desc_size);
+
+ if (!v1_0_obj) {
+ return FFA_ERROR_NO_MEMORY;
+ }
+
+ /* Perform the conversion from v1.1 to v1.0. */
+ if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
+ spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
+ memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
+
+ /*
+ * We're finished with the v1.0 descriptor for now so free it.
+ * Note that this will invalidate any references to the v1.1
+ * descriptor.
+ */
+ spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
+
+ return 0;
+}
+
+static int
+spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
+ size_t fragment_length, size_t total_length)
+{
+ unsigned long long emad_end;
+ unsigned long long emad_size;
+ unsigned long long emad_offset;
+ unsigned int min_desc_size;
+
+ /* Determine the appropriate minimum descriptor size. */
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ min_desc_size = sizeof(struct ffa_mtd_v1_0);
+ } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
+ min_desc_size = sizeof(struct ffa_mtd);
+ } else {
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ if (fragment_length < min_desc_size) {
+ WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
+ min_desc_size);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (desc->emad_count == 0U) {
+ WARN("%s: unsupported attribute desc count %u.\n",
+ __func__, desc->emad_count);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ /*
+ * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
+ * format, otherwise assume it is a v1.1 format.
+ */
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
+ } else {
+ if (!is_aligned(desc->emad_offset, 16)) {
+ WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
+ __func__, desc->emad_offset);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ if (desc->emad_offset < sizeof(struct ffa_mtd)) {
+ WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
+ __func__, desc->emad_offset,
+ sizeof(struct ffa_mtd));
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ emad_offset = desc->emad_offset;
+ if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
+ WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
+ desc->emad_size, sizeof(struct ffa_emad_v1_0));
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ if (!is_aligned(desc->emad_size, 16)) {
+ WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
+ __func__, desc->emad_size);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ emad_size = desc->emad_size;
+ }
+
+ /*
+ * Overflow is impossible: the arithmetic happens in at least 64-bit
+ * precision, but all of the operands are bounded by UINT32_MAX, and
+ * ((2^32 - 1) * (2^32 - 1) + (2^32 - 1) + (2^32 - 1))
+ * = ((2^32 - 1) * ((2^32 - 1) + 1 + 1))
+ * = ((2^32 - 1) * (2^32 + 1))
+ * = (2^64 - 1).
+ */
+ CASSERT(sizeof(desc->emad_count) == 4, assert_emad_count_max_too_large);
+ emad_end = (desc->emad_count * (unsigned long long)emad_size) +
+ (unsigned long long)sizeof(struct ffa_comp_mrd) +
+ (unsigned long long)emad_offset;
+
+ if (emad_end > total_length) {
+ WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
+ __func__, emad_end, total_length);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ return 0;
+}
+
+static inline const struct ffa_emad_v1_0 *
+emad_advance(const struct ffa_emad_v1_0 *emad, size_t offset)
+{
+ return (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + offset);
+}
+
+/**
+ * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
+ * @obj: Object containing ffa_memory_region_descriptor.
+ * @ffa_version: FF-A version of the provided descriptor.
+ *
+ * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if
+ * constituent_memory_region_descriptor offset or count is invalid.
+ */
+static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
+ uint32_t ffa_version)
+{
+ unsigned long long total_page_count;
+ const struct ffa_emad_v1_0 *first_emad;
+ const struct ffa_emad_v1_0 *end_emad;
+ size_t emad_size;
+ uint32_t comp_mrd_offset;
+ size_t header_emad_size;
+ size_t size;
+ size_t count;
+ size_t expected_size;
+ const struct ffa_comp_mrd *comp;
+
+ if (obj->desc_filled != obj->desc_size) {
+ ERROR("BUG: %s called on incomplete object (%zu != %zu)\n",
+ __func__, obj->desc_filled, obj->desc_size);
+ panic();
+ }
+
+ if (spmc_validate_mtd_start(&obj->desc, ffa_version,
+ obj->desc_filled, obj->desc_size)) {
+ ERROR("BUG: %s called on object with corrupt memory region descriptor\n",
+ __func__);
+ panic();
+ }
+
+ first_emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
+ ffa_version, &emad_size);
+ end_emad = emad_advance(first_emad, obj->desc.emad_count * emad_size);
+ comp_mrd_offset = first_emad->comp_mrd_offset;
+
+ /* Loop through the endpoint descriptors, validating each of them. */
+ for (const struct ffa_emad_v1_0 *emad = first_emad; emad < end_emad;) {
+ ffa_endpoint_id16_t ep_id;
+
+ /*
+ * If a partition ID resides in the secure world validate that
+ * the partition ID is for a known partition. Ignore any
+ * partition ID belonging to the normal world as it is assumed
+ * the Hypervisor will have validated these.
+ */
+ ep_id = emad->mapd.endpoint_id;
+ if (ffa_is_secure_world_id(ep_id)) {
+ if (spmc_get_sp_ctx(ep_id) == NULL) {
+ WARN("%s: Invalid receiver id 0x%x\n",
+ __func__, ep_id);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ }
+
+ /*
+ * The offset provided to the composite memory region descriptor
+ * should be consistent across endpoint descriptors.
+ */
+ if (comp_mrd_offset != emad->comp_mrd_offset) {
+ ERROR("%s: mismatching offsets provided, %u != %u\n",
+ __func__, emad->comp_mrd_offset, comp_mrd_offset);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ /* Advance to the next endpoint descriptor */
+ emad = emad_advance(emad, emad_size);
+
+ /*
+ * Ensure neither this emad nor any subsequent emads have
+ * the same partition ID as the previous emad.
+ */
+ for (const struct ffa_emad_v1_0 *other_emad = emad;
+ other_emad < end_emad;
+ other_emad = emad_advance(other_emad, emad_size)) {
+ if (ep_id == other_emad->mapd.endpoint_id) {
+ WARN("%s: Duplicated endpoint id 0x%x\n",
+ __func__, emad->mapd.endpoint_id);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ }
+ }
+
+ header_emad_size = (size_t)((const uint8_t *)end_emad -
+ (const uint8_t *)&obj->desc);
+
+ /*
+ * Check that the composite descriptor
+ * is after the endpoint descriptors.
+ */
+ if (comp_mrd_offset < header_emad_size) {
+ WARN("%s: invalid object, offset %u < header + emad %zu\n",
+ __func__, comp_mrd_offset, header_emad_size);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ /* Ensure the composite descriptor offset is aligned. */
+ if (!is_aligned(comp_mrd_offset, 16)) {
+ WARN("%s: invalid object, unaligned composite memory "
+ "region descriptor offset %u.\n",
+ __func__, comp_mrd_offset);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ size = obj->desc_size;
+
+ /* Check that the composite descriptor is in bounds. */
+ if (comp_mrd_offset > size) {
+ WARN("%s: invalid object, offset %u > total size %zu\n",
+ __func__, comp_mrd_offset, obj->desc_size);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ size -= comp_mrd_offset;
+
+ /* Check that there is enough space for the composite descriptor. */
+ if (size < sizeof(struct ffa_comp_mrd)) {
+ WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
+ __func__, comp_mrd_offset, obj->desc_size);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ size -= sizeof(*comp);
+
+ count = size / sizeof(struct ffa_cons_mrd);
+
+ comp = (const struct ffa_comp_mrd *)
+ ((const uint8_t *)(&obj->desc) + comp_mrd_offset);
+
+ if (comp->address_range_count != count) {
+ WARN("%s: invalid object, desc count %u != %zu\n",
+ __func__, comp->address_range_count, count);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ /* Ensure that the expected and actual sizes are equal. */
+ expected_size = comp_mrd_offset + sizeof(*comp) +
+ count * sizeof(struct ffa_cons_mrd);
+
+ if (expected_size != obj->desc_size) {
+ WARN("%s: invalid object, computed size %zu != size %zu\n",
+ __func__, expected_size, obj->desc_size);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ total_page_count = 0;
+
+ /*
+ * comp->address_range_count is 32-bit, so 'count' must fit in a
+ * uint32_t at this point.
+ */
+ for (size_t i = 0; i < count; i++) {
+ const struct ffa_cons_mrd *mrd = comp->address_range_array + i;
+
+ if (!is_aligned(mrd->address, PAGE_SIZE)) {
+ WARN("%s: invalid object, address in region descriptor "
+ "%zu not 4K aligned (got 0x%016llx)",
+ __func__, i, (unsigned long long)mrd->address);
+ }
+
+ /*
+ * No overflow possible: total_page_count can hold at
+ * least 2^64 - 1, but will be have at most 2^32 - 1.
+ * values added to it, each of which cannot exceed 2^32 - 1.
+ */
+ total_page_count += mrd->page_count;
+ }
+
+ if (comp->total_page_count != total_page_count) {
+ WARN("%s: invalid object, desc total_page_count %u != %llu\n",
+ __func__, comp->total_page_count, total_page_count);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ return 0;
+}
+
+/**
+ * spmc_shmem_check_state_obj - Check if the descriptor describes memory
+ * regions that are currently involved with an
+ * existing memory transactions. This implies that
+ * the memory is not in a valid state for lending.
+ * @obj: Object containing ffa_memory_region_descriptor.
+ *
+ * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if invalid memory
+ * state.
+ */
+static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
+ uint32_t ffa_version)
+{
+ size_t obj_offset = 0;
+ struct spmc_shmem_obj *inflight_obj;
+
+ struct ffa_comp_mrd *other_mrd;
+ struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
+ ffa_version);
+
+ if (requested_mrd == NULL) {
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
+ &obj_offset);
+
+ while (inflight_obj != NULL) {
+ /*
+ * Don't compare the transaction to itself or to partially
+ * transmitted descriptors.
+ */
+ if ((obj->desc.handle != inflight_obj->desc.handle) &&
+ (obj->desc_size == obj->desc_filled)) {
+ other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
+ FFA_VERSION_COMPILED);
+ if (other_mrd == NULL) {
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ if (overlapping_memory_regions(requested_mrd,
+ other_mrd)) {
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+ }
+
+ inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
+ &obj_offset);
+ }
+ return 0;
+}
+
+static long spmc_ffa_fill_desc(struct mailbox *mbox,
+ struct spmc_shmem_obj *obj,
+ uint32_t fragment_length,
+ ffa_mtd_flag32_t mtd_flag,
+ uint32_t ffa_version,
+ void *smc_handle)
+{
+ int ret;
+ uint32_t handle_low;
+ uint32_t handle_high;
+
+ if (mbox->rxtx_page_count == 0U) {
+ WARN("%s: buffer pair not registered.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_arg;
+ }
+
+ CASSERT(sizeof(mbox->rxtx_page_count) == 4, assert_bogus_page_count);
+ if (fragment_length > (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB) {
+ WARN("%s: bad fragment size %u > %" PRIu64 " buffer size\n", __func__,
+ fragment_length, (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_arg;
+ }
+
+ if (fragment_length > obj->desc_size - obj->desc_filled) {
+ WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
+ fragment_length, obj->desc_size - obj->desc_filled);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_arg;
+ }
+
+ memcpy((uint8_t *)&obj->desc + obj->desc_filled,
+ (uint8_t *) mbox->tx_buffer, fragment_length);
+
+ /* Ensure that the sender ID resides in the normal world. */
+ if (ffa_is_secure_world_id(obj->desc.sender_id)) {
+ WARN("%s: Invalid sender ID 0x%x.\n",
+ __func__, obj->desc.sender_id);
+ ret = FFA_ERROR_DENIED;
+ goto err_arg;
+ }
+
+ /* Ensure the NS bit is set to 0. */
+ if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
+ WARN("%s: NS mem attributes flags MBZ.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_arg;
+ }
+
+ /*
+ * We don't currently support any optional flags so ensure none are
+ * requested.
+ */
+ if (obj->desc.flags != 0U && mtd_flag != 0U &&
+ (obj->desc.flags != mtd_flag)) {
+ WARN("%s: invalid memory transaction flags %u != %u\n",
+ __func__, obj->desc.flags, mtd_flag);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_arg;
+ }
+
+ if (obj->desc_filled == 0U) {
+ /* First fragment, descriptor header has been copied */
+ ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
+ fragment_length, obj->desc_size);
+ if (ret != 0) {
+ goto err_bad_desc;
+ }
+
+ obj->desc.handle = spmc_shmem_obj_state.next_handle++;
+ obj->desc.flags |= mtd_flag;
+ }
+
+ obj->desc_filled += fragment_length;
+
+ handle_low = (uint32_t)obj->desc.handle;
+ handle_high = obj->desc.handle >> 32;
+
+ if (obj->desc_filled != obj->desc_size) {
+ SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
+ handle_high, obj->desc_filled,
+ (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
+ }
+
+ /* The full descriptor has been received, perform any final checks. */
+
+ ret = spmc_shmem_check_obj(obj, ffa_version);
+ if (ret != 0) {
+ goto err_bad_desc;
+ }
+
+ ret = spmc_shmem_check_state_obj(obj, ffa_version);
+ if (ret) {
+ ERROR("%s: invalid memory region descriptor.\n", __func__);
+ goto err_bad_desc;
+ }
+
+ /*
+ * Everything checks out, if the sender was using FF-A v1.0, convert
+ * the descriptor format to use the v1.1 structures.
+ */
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ struct spmc_shmem_obj *v1_1_obj;
+ uint64_t mem_handle;
+
+ /* Calculate the size that the v1.1 descriptor will required. */
+ uint64_t v1_1_desc_size =
+ spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
+ obj->desc_size);
+
+ if (v1_1_desc_size > UINT32_MAX) {
+ ret = FFA_ERROR_NO_MEMORY;
+ goto err_arg;
+ }
+
+ /* Get a new obj to store the v1.1 descriptor. */
+ v1_1_obj =
+ spmc_shmem_obj_alloc(&spmc_shmem_obj_state, (size_t)v1_1_desc_size);
+
+ if (!v1_1_obj) {
+ ret = FFA_ERROR_NO_MEMORY;
+ goto err_arg;
+ }
+
+ /* Perform the conversion from v1.0 to v1.1. */
+ v1_1_obj->desc_size = (uint32_t)v1_1_desc_size;
+ v1_1_obj->desc_filled = (uint32_t)v1_1_desc_size;
+ if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
+ ERROR("%s: Could not convert mtd!\n", __func__);
+ spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
+ goto err_arg;
+ }
+
+ /*
+ * We're finished with the v1.0 descriptor so free it
+ * and continue our checks with the new v1.1 descriptor.
+ */
+ mem_handle = obj->desc.handle;
+ spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
+ obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
+ if (obj == NULL) {
+ ERROR("%s: Failed to find converted descriptor.\n",
+ __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ return spmc_ffa_error_return(smc_handle, ret);
+ }
+ }
+
+ /* Allow for platform specific operations to be performed. */
+ ret = plat_spmc_shmem_begin(&obj->desc);
+ if (ret != 0) {
+ goto err_arg;
+ }
+
+ SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
+ 0, 0, 0);
+
+err_bad_desc:
+err_arg:
+ spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
+ return spmc_ffa_error_return(smc_handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
+ * @client: Client state.
+ * @total_length: Total length of shared memory descriptor.
+ * @fragment_length: Length of fragment of shared memory descriptor passed in
+ * this call.
+ * @address: Not supported, must be 0.
+ * @page_count: Not supported, must be 0.
+ * @smc_handle: Handle passed to smc call. Used to return
+ * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
+ *
+ * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
+ * to share or lend memory from non-secure os to secure os (with no stream
+ * endpoints).
+ *
+ * Return: 0 on success, error code on failure.
+ */
+long spmc_ffa_mem_send(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t total_length,
+ uint32_t fragment_length,
+ uint64_t address,
+ uint32_t page_count,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+
+{
+ long ret;
+ struct spmc_shmem_obj *obj;
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+ ffa_mtd_flag32_t mtd_flag;
+ uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+ size_t min_desc_size;
+
+ if (address != 0U || page_count != 0U) {
+ WARN("%s: custom memory region for message not supported.\n",
+ __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ if (secure_origin) {
+ WARN("%s: unsupported share direction.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ min_desc_size = sizeof(struct ffa_mtd_v1_0);
+ } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
+ min_desc_size = sizeof(struct ffa_mtd);
+ } else {
+ WARN("%s: bad FF-A version.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Check if the descriptor is too small for the FF-A version. */
+ if (fragment_length < min_desc_size) {
+ WARN("%s: bad first fragment size %u < %zu\n",
+ __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
+ mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
+ } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
+ mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
+ } else {
+ WARN("%s: invalid memory management operation.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&spmc_shmem_obj_state.lock);
+ obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
+ if (obj == NULL) {
+ ret = FFA_ERROR_NO_MEMORY;
+ goto err_unlock;
+ }
+
+ spin_lock(&mbox->lock);
+ ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
+ ffa_version, handle);
+ spin_unlock(&mbox->lock);
+
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ return ret;
+
+err_unlock:
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
+ * @client: Client state.
+ * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
+ * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
+ * @fragment_length: Length of fragments transmitted.
+ * @sender_id: Vmid of sender in bits [31:16]
+ * @smc_handle: Handle passed to smc call. Used to return
+ * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
+ *
+ * Return: @smc_handle on success, error code on failure.
+ */
+long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t handle_low,
+ uint64_t handle_high,
+ uint32_t fragment_length,
+ uint32_t sender_id,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ long ret;
+ uint32_t desc_sender_id;
+ uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+
+ struct spmc_shmem_obj *obj;
+ uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
+
+ spin_lock(&spmc_shmem_obj_state.lock);
+
+ obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
+ if (obj == NULL) {
+ WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
+ __func__, mem_handle);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock;
+ }
+
+ desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
+ if (sender_id != desc_sender_id) {
+ WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
+ sender_id, desc_sender_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock;
+ }
+
+ if (obj->desc_filled == obj->desc_size) {
+ WARN("%s: object desc already filled, %zu\n", __func__,
+ obj->desc_filled);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock;
+ }
+
+ spin_lock(&mbox->lock);
+ ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
+ handle);
+ spin_unlock(&mbox->lock);
+
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ return ret;
+
+err_unlock:
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
+ * if the caller implements a version greater
+ * than FF-A 1.0 or if they have requested
+ * the functionality.
+ * TODO: We are assuming that the caller is
+ * an SP. To support retrieval from the
+ * normal world this function will need to be
+ * expanded accordingly.
+ * @resp: Descriptor populated in callers RX buffer.
+ * @sp_ctx: Context of the calling SP.
+ */
+void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
+ struct secure_partition_desc *sp_ctx)
+{
+ if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
+ sp_ctx->ns_bit_requested) {
+ /*
+ * Currently memory senders must reside in the normal
+ * world, and we do not have the functionlaity to change
+ * the state of memory dynamically. Therefore we can always set
+ * the NS bit to 1.
+ */
+ resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
+ }
+}
+
+/**
+ * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
+ * @smc_fid: FID of SMC
+ * @total_length: Total length of retrieve request descriptor if this is
+ * the first call. Otherwise (unsupported) must be 0.
+ * @fragment_length: Length of fragment of retrieve request descriptor passed
+ * in this call. Only @fragment_length == @length is
+ * supported by this implementation.
+ * @address: Not supported, must be 0.
+ * @page_count: Not supported, must be 0.
+ * @smc_handle: Handle passed to smc call. Used to return
+ * FFA_MEM_RETRIEVE_RESP.
+ *
+ * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
+ * Used by secure os to retrieve memory already shared by non-secure os.
+ * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
+ * the client must call FFA_MEM_FRAG_RX until the full response has been
+ * received.
+ *
+ * Return: @handle on success, error code on failure.
+ */
+long
+spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t total_length,
+ uint32_t fragment_length,
+ uint64_t address,
+ uint32_t page_count,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int ret;
+ size_t buf_size;
+ size_t copy_size = 0;
+ size_t min_desc_size;
+ size_t out_desc_size = 0;
+
+ /*
+ * Currently we are only accessing fields that are the same in both the
+ * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
+ * here. We only need validate against the appropriate struct size.
+ */
+ struct ffa_mtd *resp;
+ const struct ffa_mtd *req;
+ struct spmc_shmem_obj *obj = NULL;
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+ uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+ struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
+
+ if (!secure_origin) {
+ WARN("%s: unsupported retrieve req direction.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ if (address != 0U || page_count != 0U) {
+ WARN("%s: custom memory region not supported.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&mbox->lock);
+
+ req = mbox->tx_buffer;
+ resp = mbox->rx_buffer;
+ buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
+
+ if (mbox->rxtx_page_count == 0U) {
+ WARN("%s: buffer pair not registered.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ if (mbox->state != MAILBOX_STATE_EMPTY) {
+ WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
+ ret = FFA_ERROR_DENIED;
+ goto err_unlock_mailbox;
+ }
+
+ if (fragment_length != total_length) {
+ WARN("%s: fragmented retrieve request not supported.\n",
+ __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ if (req->emad_count == 0U) {
+ WARN("%s: unsupported attribute desc count %u.\n",
+ __func__, obj->desc.emad_count);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ /* Determine the appropriate minimum descriptor size. */
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ min_desc_size = sizeof(struct ffa_mtd_v1_0);
+ } else {
+ min_desc_size = sizeof(struct ffa_mtd);
+ }
+ if (total_length < min_desc_size) {
+ WARN("%s: invalid length %u < %zu\n", __func__, total_length,
+ min_desc_size);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ spin_lock(&spmc_shmem_obj_state.lock);
+
+ obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
+ if (obj == NULL) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (obj->desc_filled != obj->desc_size) {
+ WARN("%s: incomplete object desc filled %zu < size %zu\n",
+ __func__, obj->desc_filled, obj->desc_size);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
+ WARN("%s: wrong sender id 0x%x != 0x%x\n",
+ __func__, req->sender_id, obj->desc.sender_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (req->emad_count != 0U && req->tag != obj->desc.tag) {
+ WARN("%s: wrong tag 0x%lx != 0x%lx\n",
+ __func__, req->tag, obj->desc.tag);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
+ WARN("%s: mistmatch of endpoint counts %u != %u\n",
+ __func__, req->emad_count, obj->desc.emad_count);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ /* Ensure the NS bit is set to 0 in the request. */
+ if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
+ WARN("%s: NS mem attributes flags MBZ.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (req->flags != 0U) {
+ if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
+ (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
+ /*
+ * If the retrieve request specifies the memory
+ * transaction ensure it matches what we expect.
+ */
+ WARN("%s: wrong mem transaction flags %x != %x\n",
+ __func__, req->flags, obj->desc.flags);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
+ req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
+ /*
+ * Current implementation does not support donate and
+ * it supports no other flags.
+ */
+ WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+ }
+
+ /* Validate the caller is a valid participant. */
+ if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
+ WARN("%s: Invalid endpoint ID (0x%x).\n",
+ __func__, sp_ctx->sp_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ /* Validate that the provided emad offset and structure is valid.*/
+ for (size_t i = 0; i < req->emad_count; i++) {
+ size_t emad_size;
+ struct ffa_emad_v1_0 *emad;
+
+ emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
+ &emad_size);
+
+ if ((uintptr_t) emad >= (uintptr_t)
+ ((uint8_t *) req + total_length)) {
+ WARN("Invalid emad access.\n");
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+ }
+
+ /*
+ * Validate all the endpoints match in the case of multiple
+ * borrowers. We don't mandate that the order of the borrowers
+ * must match in the descriptors therefore check to see if the
+ * endpoints match in any order.
+ */
+ for (size_t i = 0; i < req->emad_count; i++) {
+ bool found = false;
+ size_t emad_size;
+ struct ffa_emad_v1_0 *emad;
+ struct ffa_emad_v1_0 *other_emad;
+
+ emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
+ &emad_size);
+
+ for (size_t j = 0; j < obj->desc.emad_count; j++) {
+ other_emad = spmc_shmem_obj_get_emad(
+ &obj->desc, j, MAKE_FFA_VERSION(1, 1),
+ &emad_size);
+
+ if (req->emad_count &&
+ emad->mapd.endpoint_id ==
+ other_emad->mapd.endpoint_id) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ WARN("%s: invalid receiver id (0x%x).\n",
+ __func__, emad->mapd.endpoint_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+ }
+
+ mbox->state = MAILBOX_STATE_FULL;
+
+ if (req->emad_count != 0U) {
+ obj->in_use++;
+ }
+
+ /*
+ * If the caller is v1.0 convert the descriptor, otherwise copy
+ * directly.
+ */
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
+ &copy_size,
+ &out_desc_size);
+ if (ret != 0U) {
+ ERROR("%s: Failed to process descriptor.\n", __func__);
+ goto err_unlock_all;
+ }
+ } else {
+ copy_size = MIN(obj->desc_size, buf_size);
+ out_desc_size = obj->desc_size;
+
+ memcpy(resp, &obj->desc, copy_size);
+ }
+
+ /* Set the NS bit in the response if applicable. */
+ spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
+
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ spin_unlock(&mbox->lock);
+
+ SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
+ copy_size, 0, 0, 0, 0, 0);
+
+err_unlock_all:
+ spin_unlock(&spmc_shmem_obj_state.lock);
+err_unlock_mailbox:
+ spin_unlock(&mbox->lock);
+ return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
+ * @client: Client state.
+ * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
+ * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
+ * @fragment_offset: Byte offset in descriptor to resume at.
+ * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
+ * hypervisor. 0 otherwise.
+ * @smc_handle: Handle passed to smc call. Used to return
+ * FFA_MEM_FRAG_TX.
+ *
+ * Return: @smc_handle on success, error code on failure.
+ */
+long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t handle_low,
+ uint32_t handle_high,
+ uint32_t fragment_offset,
+ uint32_t sender_id,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int ret;
+ void *src;
+ size_t buf_size;
+ size_t copy_size;
+ size_t full_copy_size;
+ uint32_t desc_sender_id;
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+ uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
+ struct spmc_shmem_obj *obj;
+ uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+
+ if (!secure_origin) {
+ WARN("%s: can only be called from swld.\n",
+ __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&spmc_shmem_obj_state.lock);
+
+ obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
+ if (obj == NULL) {
+ WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
+ __func__, mem_handle);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_shmem;
+ }
+
+ desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
+ if (sender_id != 0U && sender_id != desc_sender_id) {
+ WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
+ sender_id, desc_sender_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_shmem;
+ }
+
+ if (fragment_offset >= obj->desc_size) {
+ WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
+ __func__, fragment_offset, obj->desc_size);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_shmem;
+ }
+
+ spin_lock(&mbox->lock);
+
+ if (mbox->rxtx_page_count == 0U) {
+ WARN("%s: buffer pair not registered.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (mbox->state != MAILBOX_STATE_EMPTY) {
+ WARN("%s: RX Buffer is full!\n", __func__);
+ ret = FFA_ERROR_DENIED;
+ goto err_unlock_all;
+ }
+
+ buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
+
+ mbox->state = MAILBOX_STATE_FULL;
+
+ /*
+ * If the caller is v1.0 convert the descriptor, otherwise copy
+ * directly.
+ */
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ size_t out_desc_size;
+
+ ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
+ buf_size,
+ fragment_offset,
+ &copy_size,
+ &out_desc_size);
+ if (ret != 0U) {
+ ERROR("%s: Failed to process descriptor.\n", __func__);
+ goto err_unlock_all;
+ }
+ } else {
+ full_copy_size = obj->desc_size - fragment_offset;
+ copy_size = MIN(full_copy_size, buf_size);
+
+ src = &obj->desc;
+
+ memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
+ }
+
+ spin_unlock(&mbox->lock);
+ spin_unlock(&spmc_shmem_obj_state.lock);
+
+ SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
+ copy_size, sender_id, 0, 0, 0);
+
+err_unlock_all:
+ spin_unlock(&mbox->lock);
+err_unlock_shmem:
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
+ * @client: Client state.
+ *
+ * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
+ * Used by secure os release previously shared memory to non-secure os.
+ *
+ * The handle to release must be in the client's (secure os's) transmit buffer.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int spmc_ffa_mem_relinquish(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t handle_low,
+ uint32_t handle_high,
+ uint32_t fragment_offset,
+ uint32_t sender_id,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int ret;
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+ struct spmc_shmem_obj *obj;
+ const struct ffa_mem_relinquish_descriptor *req;
+ struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
+
+ if (!secure_origin) {
+ WARN("%s: unsupported relinquish direction.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&mbox->lock);
+
+ if (mbox->rxtx_page_count == 0U) {
+ WARN("%s: buffer pair not registered.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ req = mbox->tx_buffer;
+
+ if (req->flags != 0U) {
+ WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ if (req->endpoint_count == 0) {
+ WARN("%s: endpoint count cannot be 0.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ spin_lock(&spmc_shmem_obj_state.lock);
+
+ obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
+ if (obj == NULL) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ /*
+ * Validate the endpoint ID was populated correctly. We don't currently
+ * support proxy endpoints so the endpoint count should always be 1.
+ */
+ if (req->endpoint_count != 1U) {
+ WARN("%s: unsupported endpoint count %u != 1\n", __func__,
+ req->endpoint_count);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ /* Validate provided endpoint ID matches the partition ID. */
+ if (req->endpoint_array[0] != sp_ctx->sp_id) {
+ WARN("%s: invalid endpoint ID %u != %u\n", __func__,
+ req->endpoint_array[0], sp_ctx->sp_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ /* Validate the caller is a valid participant. */
+ if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
+ WARN("%s: Invalid endpoint ID (0x%x).\n",
+ __func__, req->endpoint_array[0]);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (obj->in_use == 0U) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+ obj->in_use--;
+
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ spin_unlock(&mbox->lock);
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+
+err_unlock_all:
+ spin_unlock(&spmc_shmem_obj_state.lock);
+err_unlock_mailbox:
+ spin_unlock(&mbox->lock);
+ return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
+ * @client: Client state.
+ * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
+ * @handle_high: Unique handle of shared memory object to reclaim.
+ * Bit[63:32].
+ * @flags: Unsupported, ignored.
+ *
+ * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
+ * Used by non-secure os reclaim memory previously shared with secure os.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int spmc_ffa_mem_reclaim(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t handle_low,
+ uint32_t handle_high,
+ uint32_t mem_flags,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int ret;
+ struct spmc_shmem_obj *obj;
+ uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
+
+ if (secure_origin) {
+ WARN("%s: unsupported reclaim direction.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ if (mem_flags != 0U) {
+ WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&spmc_shmem_obj_state.lock);
+
+ obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
+ if (obj == NULL) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock;
+ }
+ if (obj->in_use != 0U) {
+ ret = FFA_ERROR_DENIED;
+ goto err_unlock;
+ }
+
+ if (obj->desc_filled != obj->desc_size) {
+ WARN("%s: incomplete object desc filled %zu < size %zu\n",
+ __func__, obj->desc_filled, obj->desc_size);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock;
+ }
+
+ /* Allow for platform specific operations to be performed. */
+ ret = plat_spmc_shmem_reclaim(&obj->desc);
+ if (ret != 0) {
+ goto err_unlock;
+ }
+
+ spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
+ spin_unlock(&spmc_shmem_obj_state.lock);
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+
+err_unlock:
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ return spmc_ffa_error_return(handle, ret);
+}
diff --git a/services/std_svc/spm/el3_spmc/spmc_shared_mem.h b/services/std_svc/spm/el3_spmc/spmc_shared_mem.h
new file mode 100644
index 0000000..839f7a1
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_shared_mem.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMC_SHARED_MEM_H
+#define SPMC_SHARED_MEM_H
+
+#include <services/el3_spmc_ffa_memory.h>
+
+/**
+ * struct ffa_mem_relinquish_descriptor - Relinquish request descriptor.
+ * @handle:
+ * Id of shared memory object to relinquish.
+ * @flags:
+ * If bit 0 is set clear memory after unmapping from borrower. Must be 0
+ * for share. Bit[1]: Time slicing. Not supported, must be 0. All other
+ * bits are reserved 0.
+ * @endpoint_count:
+ * Number of entries in @endpoint_array.
+ * @endpoint_array:
+ * Array of endpoint ids.
+ */
+struct ffa_mem_relinquish_descriptor {
+ uint64_t handle;
+ uint32_t flags;
+ uint32_t endpoint_count;
+ ffa_endpoint_id16_t endpoint_array[];
+};
+CASSERT(sizeof(struct ffa_mem_relinquish_descriptor) == 16,
+ assert_ffa_mem_relinquish_descriptor_size_mismatch);
+
+/**
+ * struct spmc_shmem_obj_state - Global state.
+ * @data: Backing store for spmc_shmem_obj objects.
+ * @data_size: The size allocated for the backing store.
+ * @allocated: Number of bytes allocated in @data.
+ * @next_handle: Handle used for next allocated object.
+ * @lock: Lock protecting all state in this file.
+ */
+struct spmc_shmem_obj_state {
+ uint8_t *data;
+ size_t data_size;
+ size_t allocated;
+ uint64_t next_handle;
+ spinlock_t lock;
+};
+
+extern struct spmc_shmem_obj_state spmc_shmem_obj_state;
+extern int plat_spmc_shmem_begin(struct ffa_mtd *desc);
+extern int plat_spmc_shmem_reclaim(struct ffa_mtd *desc);
+
+long spmc_ffa_mem_send(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t total_length,
+ uint32_t fragment_length,
+ uint64_t address,
+ uint32_t page_count,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t handle_low,
+ uint64_t handle_high,
+ uint32_t fragment_length,
+ uint32_t sender_id,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+long spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t total_length,
+ uint32_t fragment_length,
+ uint64_t address,
+ uint32_t page_count,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t handle_low,
+ uint32_t handle_high,
+ uint32_t fragment_offset,
+ uint32_t sender_id,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+
+int spmc_ffa_mem_relinquish(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t handle_low,
+ uint32_t handle_high,
+ uint32_t fragment_offset,
+ uint32_t sender_id,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+int spmc_ffa_mem_reclaim(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t handle_low,
+ uint32_t handle_high,
+ uint32_t mem_flags,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+#endif /* SPMC_SHARED_MEM_H */
diff --git a/services/std_svc/spm/spm_mm/spm_mm.mk b/services/std_svc/spm/spm_mm/spm_mm.mk
new file mode 100644
index 0000000..cbc7940
--- /dev/null
+++ b/services/std_svc/spm/spm_mm/spm_mm.mk
@@ -0,0 +1,33 @@
+#
+# Copyright (c) 2017-2023, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${SPD},none)
+ $(error "Error: SPD and SPM_MM are incompatible build options.")
+endif
+ifneq (${ARCH},aarch64)
+ $(error "Error: SPM_MM is only supported on aarch64.")
+endif
+ifneq (${ENABLE_SVE_FOR_NS},0)
+ $(error "Error: SPM_MM is not compatible with ENABLE_SVE_FOR_NS")
+endif
+ifneq (${ENABLE_SME_FOR_NS},0)
+ $(error "Error: SPM_MM is not compatible with ENABLE_SME_FOR_NS")
+endif
+ifeq (${CTX_INCLUDE_FPREGS},0)
+ $(warning "Warning: SPM_MM: CTX_INCLUDE_FPREGS is set to 0")
+endif
+
+SPM_MM_SOURCES := $(addprefix services/std_svc/spm/spm_mm/, \
+ spm_mm_main.c \
+ spm_mm_setup.c \
+ spm_mm_xlat.c)
+
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
+
+# required so that SPM code executing at S-EL0 can access the timer registers
+NS_TIMER_SWITCH := 1
diff --git a/services/std_svc/spm/spm_mm/spm_mm_main.c b/services/std_svc/spm/spm_mm/spm_mm_main.c
new file mode 100644
index 0000000..1ff7bb7
--- /dev/null
+++ b/services/std_svc/spm/spm_mm/spm_mm_main.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <bl31/bl31.h>
+#include <bl31/ehf.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/smccc.h>
+#include <lib/spinlock.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/platform.h>
+#include <services/spm_mm_partition.h>
+#include <services/spm_mm_svc.h>
+#include <smccc_helpers.h>
+
+#include "spm_common.h"
+#include "spm_mm_private.h"
+
+/*******************************************************************************
+ * Secure Partition context information.
+ ******************************************************************************/
+static sp_context_t sp_ctx;
+
+/*******************************************************************************
+ * Set state of a Secure Partition context.
+ ******************************************************************************/
+void sp_state_set(sp_context_t *sp_ptr, sp_state_t state)
+{
+ spin_lock(&(sp_ptr->state_lock));
+ sp_ptr->state = state;
+ spin_unlock(&(sp_ptr->state_lock));
+}
+
+/*******************************************************************************
+ * Wait until the state of a Secure Partition is the specified one and change it
+ * to the desired state.
+ ******************************************************************************/
+void sp_state_wait_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to)
+{
+ int success = 0;
+
+ while (success == 0) {
+ spin_lock(&(sp_ptr->state_lock));
+
+ if (sp_ptr->state == from) {
+ sp_ptr->state = to;
+
+ success = 1;
+ }
+
+ spin_unlock(&(sp_ptr->state_lock));
+ }
+}
+
+/*******************************************************************************
+ * Check if the state of a Secure Partition is the specified one and, if so,
+ * change it to the desired state. Returns 0 on success, -1 on error.
+ ******************************************************************************/
+int sp_state_try_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to)
+{
+ int ret = -1;
+
+ spin_lock(&(sp_ptr->state_lock));
+
+ if (sp_ptr->state == from) {
+ sp_ptr->state = to;
+
+ ret = 0;
+ }
+
+ spin_unlock(&(sp_ptr->state_lock));
+
+ return ret;
+}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and performs a synchronous entry
+ * into it.
+ ******************************************************************************/
+static uint64_t spm_sp_synchronous_entry(sp_context_t *ctx)
+{
+ uint64_t rc;
+
+ assert(ctx != NULL);
+
+ /* Assign the context of the SP to this CPU */
+ cm_set_context(&(ctx->cpu_ctx), SECURE);
+
+ /* Restore the context assigned above */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ /* Invalidate TLBs at EL1. */
+ tlbivmalle1();
+ dsbish();
+
+ /* Enter Secure Partition */
+ rc = spm_secure_partition_enter(&ctx->c_rt_ctx);
+
+ /* Save secure state */
+ cm_el1_sysregs_context_save(SECURE);
+
+ return rc;
+}
+
+/*******************************************************************************
+ * This function returns to the place where spm_sp_synchronous_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 static void spm_sp_synchronous_exit(uint64_t rc)
+{
+ sp_context_t *ctx = &sp_ctx;
+
+ /*
+ * The SPM must have initiated the original request through a
+ * synchronous entry into the secure partition. Jump back to the
+ * original C runtime context with the value of rc in x0;
+ */
+ spm_secure_partition_exit(ctx->c_rt_ctx, rc);
+
+ panic();
+}
+
+/*******************************************************************************
+ * Jump to each Secure Partition for the first time.
+ ******************************************************************************/
+static int32_t spm_init(void)
+{
+ uint64_t rc;
+ sp_context_t *ctx;
+
+ INFO("Secure Partition init...\n");
+
+ ctx = &sp_ctx;
+
+ ctx->state = SP_STATE_RESET;
+
+ rc = spm_sp_synchronous_entry(ctx);
+ assert(rc == 0);
+
+ ctx->state = SP_STATE_IDLE;
+
+ INFO("Secure Partition initialized.\n");
+
+ return !rc;
+}
+
+/*******************************************************************************
+ * Initialize contexts of all Secure Partitions.
+ ******************************************************************************/
+int32_t spm_mm_setup(void)
+{
+ sp_context_t *ctx;
+
+ /* Disable MMU at EL1 (initialized by BL2) */
+ disable_mmu_icache_el1();
+
+ /* Initialize context of the SP */
+ INFO("Secure Partition context setup start...\n");
+
+ ctx = &sp_ctx;
+
+ /* Assign translation tables context. */
+ ctx->xlat_ctx_handle = spm_get_sp_xlat_context();
+
+ spm_sp_setup(ctx);
+
+ /* Register init function for deferred init. */
+ bl31_register_bl32_init(&spm_init);
+
+ INFO("Secure Partition setup done.\n");
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Function to perform a call to a Secure Partition.
+ ******************************************************************************/
+uint64_t spm_mm_sp_call(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3)
+{
+ uint64_t rc;
+ sp_context_t *sp_ptr = &sp_ctx;
+
+#if CTX_INCLUDE_FPREGS
+ /*
+ * SP runs to completion, no need to restore FP registers of secure context.
+ * Save FP registers only for non secure context.
+ */
+ fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE)));
+#endif
+
+ /* Wait until the Secure Partition is idle and set it to busy. */
+ sp_state_wait_switch(sp_ptr, SP_STATE_IDLE, SP_STATE_BUSY);
+
+ /* Set values for registers on SP entry */
+ cpu_context_t *cpu_ctx = &(sp_ptr->cpu_ctx);
+
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X0, smc_fid);
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1, x1);
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X2, x2);
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3, x3);
+
+ /* Jump to the Secure Partition. */
+ rc = spm_sp_synchronous_entry(sp_ptr);
+
+ /* Flag Secure Partition as idle. */
+ assert(sp_ptr->state == SP_STATE_BUSY);
+ sp_state_set(sp_ptr, SP_STATE_IDLE);
+
+#if CTX_INCLUDE_FPREGS
+ /*
+ * SP runs to completion, no need to save FP registers of secure context.
+ * Restore only non secure world FP registers.
+ */
+ fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE)));
+#endif
+
+ return rc;
+}
+
+/*******************************************************************************
+ * MM_COMMUNICATE handler
+ ******************************************************************************/
+static uint64_t mm_communicate(uint32_t smc_fid, uint64_t mm_cookie,
+ uint64_t comm_buffer_address,
+ uint64_t comm_size_address, void *handle)
+{
+ uint64_t rc;
+
+ /* Cookie. Reserved for future use. It must be zero. */
+ if (mm_cookie != 0U) {
+ ERROR("MM_COMMUNICATE: cookie is not zero\n");
+ SMC_RET1(handle, SPM_MM_INVALID_PARAMETER);
+ }
+
+ if (comm_buffer_address == 0U) {
+ ERROR("MM_COMMUNICATE: comm_buffer_address is zero\n");
+ SMC_RET1(handle, SPM_MM_INVALID_PARAMETER);
+ }
+
+ if (comm_size_address != 0U) {
+ VERBOSE("MM_COMMUNICATE: comm_size_address is not 0 as recommended.\n");
+ }
+
+ /*
+ * The current secure partition design mandates
+ * - at any point, only a single core can be
+ * executing in the secure partition.
+ * - a core cannot be preempted by an interrupt
+ * while executing in secure partition.
+ * Raise the running priority of the core to the
+ * interrupt level configured for secure partition
+ * so as to block any interrupt from preempting this
+ * core.
+ */
+ ehf_activate_priority(PLAT_SP_PRI);
+
+ /* Save the Normal world context */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ rc = spm_mm_sp_call(smc_fid, comm_buffer_address, comm_size_address,
+ plat_my_core_pos());
+
+ /* Restore non-secure state */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ /*
+ * Exited from secure partition. This core can take
+ * interrupts now.
+ */
+ ehf_deactivate_priority(PLAT_SP_PRI);
+
+ SMC_RET1(handle, rc);
+}
+
+/*******************************************************************************
+ * Secure Partition Manager SMC handler.
+ ******************************************************************************/
+uint64_t spm_mm_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ unsigned int ns;
+
+ /* Determine which security state this SMC originated from */
+ ns = is_caller_non_secure(flags);
+
+ if (ns == SMC_FROM_SECURE) {
+
+ /* Handle SMCs from Secure world. */
+
+ assert(handle == cm_get_context(SECURE));
+
+ /* Make next ERET jump to S-EL0 instead of S-EL1. */
+ cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
+
+ switch (smc_fid) {
+
+ case SPM_MM_VERSION_AARCH32:
+ SMC_RET1(handle, SPM_MM_VERSION_COMPILED);
+
+ case MM_SP_EVENT_COMPLETE_AARCH64:
+ spm_sp_synchronous_exit(x1);
+
+ case MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64:
+ INFO("Received MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64 SMC\n");
+
+ if (sp_ctx.state != SP_STATE_RESET) {
+ WARN("MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64 is available at boot time only\n");
+ SMC_RET1(handle, SPM_MM_NOT_SUPPORTED);
+ }
+ SMC_RET1(handle,
+ spm_memory_attributes_get_smc_handler(
+ &sp_ctx, x1));
+
+ case MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64:
+ INFO("Received MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC\n");
+
+ if (sp_ctx.state != SP_STATE_RESET) {
+ WARN("MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64 is available at boot time only\n");
+ SMC_RET1(handle, SPM_MM_NOT_SUPPORTED);
+ }
+ SMC_RET1(handle,
+ spm_memory_attributes_set_smc_handler(
+ &sp_ctx, x1, x2, x3));
+ default:
+ break;
+ }
+ } else {
+
+ /* Handle SMCs from Non-secure world. */
+
+ assert(handle == cm_get_context(NON_SECURE));
+
+ switch (smc_fid) {
+
+ case MM_VERSION_AARCH32:
+ SMC_RET1(handle, MM_VERSION_COMPILED);
+
+ case MM_COMMUNICATE_AARCH32:
+ case MM_COMMUNICATE_AARCH64:
+ return mm_communicate(smc_fid, x1, x2, x3, handle);
+
+ case MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64:
+ case MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64:
+ /* SMC interfaces reserved for secure callers. */
+ SMC_RET1(handle, SPM_MM_NOT_SUPPORTED);
+
+ default:
+ break;
+ }
+ }
+
+ SMC_RET1(handle, SMC_UNK);
+}
diff --git a/services/std_svc/spm/spm_mm/spm_mm_private.h b/services/std_svc/spm/spm_mm/spm_mm_private.h
new file mode 100644
index 0000000..3a52a3e
--- /dev/null
+++ b/services/std_svc/spm/spm_mm/spm_mm_private.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2017-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_MM_PRIVATE_H
+#define SPM_MM_PRIVATE_H
+
+#include <context.h>
+#include "spm_common.h"
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define SP_C_RT_CTX_X19 0x0
+#define SP_C_RT_CTX_X20 0x8
+#define SP_C_RT_CTX_X21 0x10
+#define SP_C_RT_CTX_X22 0x18
+#define SP_C_RT_CTX_X23 0x20
+#define SP_C_RT_CTX_X24 0x28
+#define SP_C_RT_CTX_X25 0x30
+#define SP_C_RT_CTX_X26 0x38
+#define SP_C_RT_CTX_X27 0x40
+#define SP_C_RT_CTX_X28 0x48
+#define SP_C_RT_CTX_X29 0x50
+#define SP_C_RT_CTX_X30 0x58
+
+#define SP_C_RT_CTX_SIZE 0x60
+#define SP_C_RT_CTX_ENTRIES (SP_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#include <lib/spinlock.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+typedef enum sp_state {
+ SP_STATE_RESET = 0,
+ SP_STATE_IDLE,
+ SP_STATE_BUSY
+} sp_state_t;
+
+typedef struct sp_context {
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+ xlat_ctx_t *xlat_ctx_handle;
+
+ sp_state_t state;
+ spinlock_t state_lock;
+} sp_context_t;
+
+
+void spm_sp_setup(sp_context_t *sp_ctx);
+
+int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
+ uintptr_t base_va);
+int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
+ u_register_t page_address,
+ u_register_t pages_count,
+ u_register_t smc_attributes);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* SPM_MM_PRIVATE_H */
diff --git a/services/std_svc/spm/spm_mm/spm_mm_setup.c b/services/std_svc/spm/spm_mm/spm_mm_setup.c
new file mode 100644
index 0000000..4e65c9c
--- /dev/null
+++ b/services/std_svc/spm/spm_mm/spm_mm_setup.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2017-2023, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <context.h>
+#include <common/debug.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <platform_def.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <services/spm_mm_partition.h>
+
+#include "spm_common.h"
+#include "spm_mm_private.h"
+#include "spm_shim_private.h"
+
+/* Setup context of the Secure Partition */
+void spm_sp_setup(sp_context_t *sp_ctx)
+{
+ cpu_context_t *ctx = &(sp_ctx->cpu_ctx);
+
+ /* Pointer to the MP information from the platform port. */
+ const spm_mm_boot_info_t *sp_boot_info =
+ plat_get_secure_partition_boot_info(NULL);
+
+ /*
+ * Initialize CPU context
+ * ----------------------
+ */
+
+ entry_point_info_t ep_info = {0};
+
+ SET_PARAM_HEAD(&ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
+
+ /* Setup entrypoint and SPSR */
+ ep_info.pc = sp_boot_info->sp_image_base;
+ ep_info.spsr = SPSR_64(MODE_EL0, MODE_SP_EL0, DISABLE_ALL_EXCEPTIONS);
+
+ /*
+ * X0: Virtual address of a buffer shared between EL3 and Secure EL0.
+ * The buffer will be mapped in the Secure EL1 translation regime
+ * with Normal IS WBWA attributes and RO data and Execute Never
+ * instruction access permissions.
+ *
+ * X1: Size of the buffer in bytes
+ *
+ * X2: cookie value (Implementation Defined)
+ *
+ * X3: cookie value (Implementation Defined)
+ *
+ * X4 to X7 = 0
+ */
+ ep_info.args.arg0 = sp_boot_info->sp_shared_buf_base;
+ ep_info.args.arg1 = sp_boot_info->sp_shared_buf_size;
+ ep_info.args.arg2 = PLAT_SPM_COOKIE_0;
+ ep_info.args.arg3 = PLAT_SPM_COOKIE_1;
+
+ cm_setup_context(ctx, &ep_info);
+
+ /*
+ * SP_EL0: A non-zero value will indicate to the SP that the SPM has
+ * initialized the stack pointer for the current CPU through
+ * implementation defined means. The value will be 0 otherwise.
+ */
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_SP_EL0,
+ sp_boot_info->sp_stack_base + sp_boot_info->sp_pcpu_stack_size);
+
+ /*
+ * Setup translation tables
+ * ------------------------
+ */
+
+#if ENABLE_ASSERTIONS
+
+ /* Get max granularity supported by the platform. */
+ unsigned int max_granule = xlat_arch_get_max_supported_granule_size();
+
+ VERBOSE("Max translation granule size supported: %u KiB\n",
+ max_granule / 1024U);
+
+ unsigned int max_granule_mask = max_granule - 1U;
+
+ /* Base must be aligned to the max granularity */
+ assert((sp_boot_info->sp_ns_comm_buf_base & max_granule_mask) == 0);
+
+ /* Size must be a multiple of the max granularity */
+ assert((sp_boot_info->sp_ns_comm_buf_size & max_granule_mask) == 0);
+
+#endif /* ENABLE_ASSERTIONS */
+
+ /* This region contains the exception vectors used at S-EL1. */
+ const mmap_region_t sel1_exception_vectors =
+ MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
+ SPM_SHIM_EXCEPTIONS_SIZE,
+ MT_CODE | MT_SECURE | MT_PRIVILEGED);
+ mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
+ &sel1_exception_vectors);
+
+ mmap_add_ctx(sp_ctx->xlat_ctx_handle,
+ plat_get_secure_partition_mmap(NULL));
+
+ init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
+
+ /*
+ * MMU-related registers
+ * ---------------------
+ */
+ xlat_ctx_t *xlat_ctx = sp_ctx->xlat_ctx_handle;
+
+ uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, 0, xlat_ctx->base_table,
+ xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
+ EL1_EL0_REGIME);
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_MAIR_EL1,
+ mmu_cfg_params[MMU_CFG_MAIR]);
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TCR_EL1,
+ mmu_cfg_params[MMU_CFG_TCR]);
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TTBR0_EL1,
+ mmu_cfg_params[MMU_CFG_TTBR0]);
+
+ /* Setup SCTLR_EL1 */
+ u_register_t sctlr_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1);
+
+ sctlr_el1 |=
+ /*SCTLR_EL1_RES1 |*/
+ /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
+ SCTLR_UCI_BIT |
+ /* RW regions at xlat regime EL1&0 are forced to be XN. */
+ SCTLR_WXN_BIT |
+ /* Don't trap to EL1 execution of WFI or WFE at EL0. */
+ SCTLR_NTWI_BIT | SCTLR_NTWE_BIT |
+ /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */
+ SCTLR_UCT_BIT |
+ /* Don't trap to EL1 execution of DZ ZVA at EL0. */
+ SCTLR_DZE_BIT |
+ /* Enable SP Alignment check for EL0 */
+ SCTLR_SA0_BIT |
+ /* Don't change PSTATE.PAN on taking an exception to EL1 */
+ SCTLR_SPAN_BIT |
+ /* Allow cacheable data and instr. accesses to normal memory. */
+ SCTLR_C_BIT | SCTLR_I_BIT |
+ /* Enable MMU. */
+ SCTLR_M_BIT
+ ;
+
+ sctlr_el1 &= ~(
+ /* Explicit data accesses at EL0 are little-endian. */
+ SCTLR_E0E_BIT |
+ /*
+ * Alignment fault checking disabled when at EL1 and EL0 as
+ * the UEFI spec permits unaligned accesses.
+ */
+ SCTLR_A_BIT |
+ /* Accesses to DAIF from EL0 are trapped to EL1. */
+ SCTLR_UMA_BIT
+ );
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
+
+ /*
+ * Setup other system registers
+ * ----------------------------
+ */
+
+ /* Shim Exception Vector Base Address */
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_VBAR_EL1,
+ SPM_SHIM_EXCEPTIONS_PTR);
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
+ EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
+
+ /*
+ * FPEN: Allow the Secure Partition to access FP/SIMD registers.
+ * Note that SPM will not do any saving/restoring of these registers on
+ * behalf of the SP. This falls under the SP's responsibility.
+ * TTA: Enable access to trace registers.
+ * ZEN (v8.2): Trap SVE instructions and access to SVE registers.
+ */
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CPACR_EL1,
+ CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
+
+ /*
+ * Prepare information in buffer shared between EL3 and S-EL0
+ * ----------------------------------------------------------
+ */
+
+ void *shared_buf_ptr = (void *) sp_boot_info->sp_shared_buf_base;
+
+ /* Copy the boot information into the shared buffer with the SP. */
+ assert((uintptr_t)shared_buf_ptr + sizeof(spm_mm_boot_info_t)
+ <= (sp_boot_info->sp_shared_buf_base + sp_boot_info->sp_shared_buf_size));
+
+ assert(sp_boot_info->sp_shared_buf_base <=
+ (UINTPTR_MAX - sp_boot_info->sp_shared_buf_size + 1));
+
+ assert(sp_boot_info != NULL);
+
+ memcpy((void *) shared_buf_ptr, (const void *) sp_boot_info,
+ sizeof(spm_mm_boot_info_t));
+
+ /* Pointer to the MP information from the platform port. */
+ spm_mm_mp_info_t *sp_mp_info =
+ ((spm_mm_boot_info_t *) shared_buf_ptr)->mp_info;
+
+ assert(sp_mp_info != NULL);
+
+ /*
+ * Point the shared buffer MP information pointer to where the info will
+ * be populated, just after the boot info.
+ */
+ ((spm_mm_boot_info_t *) shared_buf_ptr)->mp_info =
+ (spm_mm_mp_info_t *) ((uintptr_t)shared_buf_ptr
+ + sizeof(spm_mm_boot_info_t));
+
+ /*
+ * Update the shared buffer pointer to where the MP information for the
+ * payload will be populated
+ */
+ shared_buf_ptr = ((spm_mm_boot_info_t *) shared_buf_ptr)->mp_info;
+
+ /*
+ * Copy the cpu information into the shared buffer area after the boot
+ * information.
+ */
+ assert(sp_boot_info->num_cpus <= PLATFORM_CORE_COUNT);
+
+ assert((uintptr_t)shared_buf_ptr
+ <= (sp_boot_info->sp_shared_buf_base + sp_boot_info->sp_shared_buf_size -
+ (sp_boot_info->num_cpus * sizeof(*sp_mp_info))));
+
+ memcpy(shared_buf_ptr, (const void *) sp_mp_info,
+ sp_boot_info->num_cpus * sizeof(*sp_mp_info));
+
+ /*
+ * Calculate the linear indices of cores in boot information for the
+ * secure partition and flag the primary CPU
+ */
+ sp_mp_info = (spm_mm_mp_info_t *) shared_buf_ptr;
+
+ for (unsigned int index = 0; index < sp_boot_info->num_cpus; index++) {
+ u_register_t mpidr = sp_mp_info[index].mpidr;
+
+ sp_mp_info[index].linear_id = plat_core_pos_by_mpidr(mpidr);
+ if (plat_my_core_pos() == sp_mp_info[index].linear_id)
+ sp_mp_info[index].flags |= MP_INFO_FLAG_PRIMARY_CPU;
+ }
+}
diff --git a/services/std_svc/spm/spm_mm/spm_mm_xlat.c b/services/std_svc/spm/spm_mm/spm_mm_xlat.c
new file mode 100644
index 0000000..01d95c7
--- /dev/null
+++ b/services/std_svc/spm/spm_mm/spm_mm_xlat.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <errno.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <platform_def.h>
+#include <plat/common/platform.h>
+#include <services/spm_mm_partition.h>
+#include <services/spm_mm_svc.h>
+
+#include "spm_mm_private.h"
+#include "spm_shim_private.h"
+
+/* Lock used for SP_MEMORY_ATTRIBUTES_GET and SP_MEMORY_ATTRIBUTES_SET */
+static spinlock_t mem_attr_smc_lock;
+
+/*
+ * Attributes are encoded using a different format in the SMC interface than in
+ * the Trusted Firmware, where the mmap_attr_t enum type is used. This function
+ * converts an attributes value from the SMC format to the mmap_attr_t format by
+ * setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER.
+ * The other fields are left as 0 because they are ignored by the function
+ * xlat_change_mem_attributes_ctx().
+ */
+static unsigned int smc_attr_to_mmap_attr(unsigned int attributes)
+{
+ unsigned int tf_attr = 0U;
+
+ unsigned int access = (attributes & MM_SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
+ >> MM_SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT;
+
+ if (access == MM_SP_MEMORY_ATTRIBUTES_ACCESS_RW) {
+ tf_attr |= MT_RW | MT_USER;
+ } else if (access == MM_SP_MEMORY_ATTRIBUTES_ACCESS_RO) {
+ tf_attr |= MT_RO | MT_USER;
+ } else {
+ /* Other values are reserved. */
+ assert(access == MM_SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS);
+ /* The only requirement is that there's no access from EL0 */
+ tf_attr |= MT_RO | MT_PRIVILEGED;
+ }
+
+ if ((attributes & MM_SP_MEMORY_ATTRIBUTES_NON_EXEC) == 0) {
+ tf_attr |= MT_EXECUTE;
+ } else {
+ tf_attr |= MT_EXECUTE_NEVER;
+ }
+
+ return tf_attr;
+}
+
+/*
+ * This function converts attributes from the Trusted Firmware format into the
+ * SMC interface format.
+ */
+static unsigned int smc_mmap_to_smc_attr(unsigned int attr)
+{
+ unsigned int smc_attr = 0U;
+
+ unsigned int data_access;
+
+ if ((attr & MT_USER) == 0) {
+ /* No access from EL0. */
+ data_access = MM_SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS;
+ } else {
+ if ((attr & MT_RW) != 0) {
+ assert(MT_TYPE(attr) != MT_DEVICE);
+ data_access = MM_SP_MEMORY_ATTRIBUTES_ACCESS_RW;
+ } else {
+ data_access = MM_SP_MEMORY_ATTRIBUTES_ACCESS_RO;
+ }
+ }
+
+ smc_attr |= (data_access & MM_SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
+ << MM_SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT;
+
+ if ((attr & MT_EXECUTE_NEVER) != 0U) {
+ smc_attr |= MM_SP_MEMORY_ATTRIBUTES_NON_EXEC;
+ }
+
+ return smc_attr;
+}
+
+int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
+ uintptr_t base_va)
+{
+ uint32_t attributes;
+
+ spin_lock(&mem_attr_smc_lock);
+
+ int rc = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
+ base_va, &attributes);
+
+ spin_unlock(&mem_attr_smc_lock);
+
+ /* Convert error codes of xlat_get_mem_attributes_ctx() into SPM. */
+ assert((rc == 0) || (rc == -EINVAL));
+
+ if (rc == 0) {
+ return (int32_t) smc_mmap_to_smc_attr(attributes);
+ } else {
+ return SPM_MM_INVALID_PARAMETER;
+ }
+}
+
+int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
+ u_register_t page_address,
+ u_register_t pages_count,
+ u_register_t smc_attributes)
+{
+ uintptr_t base_va = (uintptr_t) page_address;
+ size_t size = (size_t) (pages_count * PAGE_SIZE);
+ uint32_t attributes = (uint32_t) smc_attributes;
+
+ INFO(" Start address : 0x%lx\n", base_va);
+ INFO(" Number of pages: %i (%zi bytes)\n", (int) pages_count, size);
+ INFO(" Attributes : 0x%x\n", attributes);
+
+ spin_lock(&mem_attr_smc_lock);
+
+ int ret = xlat_change_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
+ base_va, size,
+ smc_attr_to_mmap_attr(attributes));
+
+ spin_unlock(&mem_attr_smc_lock);
+
+ /* Convert error codes of xlat_change_mem_attributes_ctx() into SPM. */
+ assert((ret == 0) || (ret == -EINVAL));
+
+ return (ret == 0) ? SPM_MM_SUCCESS : SPM_MM_INVALID_PARAMETER;
+}
diff --git a/services/std_svc/spmd/aarch64/spmd_helpers.S b/services/std_svc/spmd/aarch64/spmd_helpers.S
new file mode 100644
index 0000000..d7bffca
--- /dev/null
+++ b/services/std_svc/spmd/aarch64/spmd_helpers.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "../spmd_private.h"
+
+ .global spmd_spm_core_enter
+ .global spmd_spm_core_exit
+
+ /* ---------------------------------------------------------------------
+ * This function is called with SP_EL0 as stack. Here we stash our EL3
+ * callee-saved registers on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where the address of the C
+ * runtime context is to be saved.
+ * ---------------------------------------------------------------------
+ */
+func spmd_spm_core_enter
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #SPMD_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #SPMD_C_RT_CTX_X19]
+ stp x21, x22, [sp, #SPMD_C_RT_CTX_X21]
+ stp x23, x24, [sp, #SPMD_C_RT_CTX_X23]
+ stp x25, x26, [sp, #SPMD_C_RT_CTX_X25]
+ stp x27, x28, [sp, #SPMD_C_RT_CTX_X27]
+ stp x29, x30, [sp, #SPMD_C_RT_CTX_X29]
+
+ /* ---------------------------------------------------------------------
+ * Everything is setup now. el3_exit() will use the secure context to
+ * restore to the general purpose and EL3 system registers to ERET
+ * into the secure payload.
+ * ---------------------------------------------------------------------
+ */
+ b el3_exit
+endfunc spmd_spm_core_enter
+
+ /* ---------------------------------------------------------------------
+ * This function is called with 'x0' pointing to a C runtime context.
+ * It restores the saved registers and jumps to that runtime with 'x0'
+ * as the new SP register. This destroys the C runtime context that had
+ * been built on the stack below the saved context by the caller. Later
+ * the second parameter 'x1' is passed as a return value to the caller.
+ * ---------------------------------------------------------------------
+ */
+func spmd_spm_core_exit
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(SPMD_C_RT_CTX_X19 - SPMD_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(SPMD_C_RT_CTX_X21 - SPMD_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(SPMD_C_RT_CTX_X23 - SPMD_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(SPMD_C_RT_CTX_X25 - SPMD_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(SPMD_C_RT_CTX_X27 - SPMD_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(SPMD_C_RT_CTX_X29 - SPMD_C_RT_CTX_SIZE)]
+
+ /* ---------------------------------------------------------------------
+ * This should take us back to the instruction after the call to the
+ * last spm_secure_partition_enter().* Place the second parameter to x0
+ * so that the caller will see it as a return value from the original
+ * entry call.
+ * ---------------------------------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc spmd_spm_core_exit
diff --git a/services/std_svc/spmd/spmd.mk b/services/std_svc/spmd/spmd.mk
new file mode 100644
index 0000000..e567b53
--- /dev/null
+++ b/services/std_svc/spmd/spmd.mk
@@ -0,0 +1,29 @@
+#
+# Copyright (c) 2021-2023, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+SPMD_SOURCES += $(addprefix services/std_svc/spmd/, \
+ ${ARCH}/spmd_helpers.S \
+ spmd_pm.c \
+ spmd_main.c \
+ spmd_logical_sp.c)
+
+# Specify platform specific SPMD logical partition implementation.
+SPMD_LP_SOURCES := $(wildcard $(addprefix ${PLAT_DIR}/, \
+ ${PLAT}_spmd_logical_sp*.c))
+
+ifeq (${ENABLE_SPMD_LP}, 1)
+ifneq ($(wildcard $(SPMD_LP_SOURCES)),)
+SPMD_SOURCES += $(SPMD_LP_SOURCES)
+endif
+endif
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
+
+# Enable dynamic memory mapping
+# The SPMD component maps the SPMC DTB within BL31 virtual space.
+PLAT_XLAT_TABLES_DYNAMIC := 1
+$(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC))
diff --git a/services/std_svc/spmd/spmd_logical_sp.c b/services/std_svc/spmd/spmd_logical_sp.c
new file mode 100644
index 0000000..d992187
--- /dev/null
+++ b/services/std_svc/spmd/spmd_logical_sp.c
@@ -0,0 +1,742 @@
+/*
+ * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+#include "spmd_private.h"
+
+#include <common/debug.h>
+#include <common/uuid.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <services/el3_spmd_logical_sp.h>
+#include <services/spmc_svc.h>
+#include <smccc_helpers.h>
+
+
+/*
+ * Maximum ffa_partition_info entries that can be returned by an invocation
+ * of FFA_PARTITION_INFO_GET_REGS_64 is size in bytes, of available
+ * registers/args in struct ffa_value divided by size of struct
+ * ffa_partition_info. For this ABI, arg3-arg17 in ffa_value can be used, i.e.
+ * 15 uint64_t fields. For FF-A v1.1, this value should be 5.
+ */
+#define MAX_INFO_REGS_ENTRIES_PER_CALL \
+ (uint8_t)((15 * sizeof(uint64_t)) / \
+ sizeof(struct ffa_partition_info_v1_1))
+CASSERT(MAX_INFO_REGS_ENTRIES_PER_CALL == 5, assert_too_many_info_reg_entries);
+
+#if ENABLE_SPMD_LP
+static bool is_spmd_lp_inited;
+static bool is_spmc_inited;
+
+/*
+ * Helper function to obtain the array storing the EL3
+ * SPMD Logical Partition descriptors.
+ */
+static struct spmd_lp_desc *get_spmd_el3_lp_array(void)
+{
+ return (struct spmd_lp_desc *) SPMD_LP_DESCS_START;
+}
+
+/*******************************************************************************
+ * Validate any logical partition descriptors before we initialize.
+ * Initialization of said partitions will be taken care of during SPMD boot.
+ ******************************************************************************/
+static int el3_spmd_sp_desc_validate(struct spmd_lp_desc *lp_array)
+{
+ /* Check the array bounds are valid. */
+ assert(SPMD_LP_DESCS_END > SPMD_LP_DESCS_START);
+
+ /*
+ * No support for SPMD logical partitions when SPMC is at EL3.
+ */
+ assert(!is_spmc_at_el3());
+
+ /* If no SPMD logical partitions are implemented then simply bail out. */
+ if (SPMD_LP_DESCS_COUNT == 0U) {
+ return -1;
+ }
+
+ for (uint32_t index = 0U; index < SPMD_LP_DESCS_COUNT; index++) {
+ struct spmd_lp_desc *lp_desc = &lp_array[index];
+
+ /* Validate our logical partition descriptors. */
+ if (lp_desc == NULL) {
+ ERROR("Invalid SPMD Logical SP Descriptor\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure the ID follows the convention to indicate it resides
+ * in the secure world.
+ */
+ if (!ffa_is_secure_world_id(lp_desc->sp_id)) {
+ ERROR("Invalid SPMD Logical SP ID (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure SPMD logical partition is in valid range. */
+ if (!is_spmd_lp_id(lp_desc->sp_id)) {
+ ERROR("Invalid SPMD Logical Partition ID (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure the UUID is not the NULL UUID. */
+ if (lp_desc->uuid[0] == 0 && lp_desc->uuid[1] == 0 &&
+ lp_desc->uuid[2] == 0 && lp_desc->uuid[3] == 0) {
+ ERROR("Invalid UUID for SPMD Logical SP (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure init function callback is registered. */
+ if (lp_desc->init == NULL) {
+ ERROR("Missing init function for Logical SP(0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure that SPMD LP only supports sending direct requests. */
+ if (lp_desc->properties != FFA_PARTITION_DIRECT_REQ_SEND) {
+ ERROR("Invalid SPMD logical partition properties (0x%x)\n",
+ lp_desc->properties);
+ return -EINVAL;
+ }
+
+ /* Ensure that all partition IDs are unique. */
+ for (uint32_t inner_idx = index + 1;
+ inner_idx < SPMD_LP_DESCS_COUNT; inner_idx++) {
+ if (lp_desc->sp_id == lp_array[inner_idx].sp_id) {
+ ERROR("Duplicate SPMD logical SP ID Detected (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
+static void spmd_encode_ffa_error(struct ffa_value *retval, int32_t error_code)
+{
+ retval->func = FFA_ERROR;
+ retval->arg1 = FFA_TARGET_INFO_MBZ;
+ retval->arg2 = (uint32_t)error_code;
+ retval->arg3 = FFA_TARGET_INFO_MBZ;
+ retval->arg4 = FFA_TARGET_INFO_MBZ;
+ retval->arg5 = FFA_TARGET_INFO_MBZ;
+ retval->arg6 = FFA_TARGET_INFO_MBZ;
+ retval->arg7 = FFA_TARGET_INFO_MBZ;
+}
+
+static void spmd_build_direct_message_req(spmd_spm_core_context_t *ctx,
+ uint64_t x1, uint64_t x2,
+ uint64_t x3, uint64_t x4)
+{
+ gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
+
+ write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
+ write_ctx_reg(gpregs, CTX_GPREG_X1, x1);
+ write_ctx_reg(gpregs, CTX_GPREG_X2, x2);
+ write_ctx_reg(gpregs, CTX_GPREG_X3, x3);
+ write_ctx_reg(gpregs, CTX_GPREG_X4, x4);
+ write_ctx_reg(gpregs, CTX_GPREG_X5, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X6, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X7, 0U);
+}
+
+static void spmd_encode_ctx_to_ffa_value(spmd_spm_core_context_t *ctx,
+ struct ffa_value *retval)
+{
+ gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
+
+ retval->func = read_ctx_reg(gpregs, CTX_GPREG_X0);
+ retval->arg1 = read_ctx_reg(gpregs, CTX_GPREG_X1);
+ retval->arg2 = read_ctx_reg(gpregs, CTX_GPREG_X2);
+ retval->arg3 = read_ctx_reg(gpregs, CTX_GPREG_X3);
+ retval->arg4 = read_ctx_reg(gpregs, CTX_GPREG_X4);
+ retval->arg5 = read_ctx_reg(gpregs, CTX_GPREG_X5);
+ retval->arg6 = read_ctx_reg(gpregs, CTX_GPREG_X6);
+ retval->arg7 = read_ctx_reg(gpregs, CTX_GPREG_X7);
+ retval->arg8 = read_ctx_reg(gpregs, CTX_GPREG_X8);
+ retval->arg9 = read_ctx_reg(gpregs, CTX_GPREG_X9);
+ retval->arg10 = read_ctx_reg(gpregs, CTX_GPREG_X10);
+ retval->arg11 = read_ctx_reg(gpregs, CTX_GPREG_X11);
+ retval->arg12 = read_ctx_reg(gpregs, CTX_GPREG_X12);
+ retval->arg13 = read_ctx_reg(gpregs, CTX_GPREG_X13);
+ retval->arg14 = read_ctx_reg(gpregs, CTX_GPREG_X14);
+ retval->arg15 = read_ctx_reg(gpregs, CTX_GPREG_X15);
+ retval->arg16 = read_ctx_reg(gpregs, CTX_GPREG_X16);
+ retval->arg17 = read_ctx_reg(gpregs, CTX_GPREG_X17);
+}
+
+static void spmd_logical_sp_set_dir_req_ongoing(spmd_spm_core_context_t *ctx)
+{
+ ctx->spmd_lp_sync_req_ongoing |= SPMD_LP_FFA_DIR_REQ_ONGOING;
+}
+
+static void spmd_logical_sp_reset_dir_req_ongoing(spmd_spm_core_context_t *ctx)
+{
+ ctx->spmd_lp_sync_req_ongoing &= ~SPMD_LP_FFA_DIR_REQ_ONGOING;
+}
+
+static void spmd_build_ffa_info_get_regs(spmd_spm_core_context_t *ctx,
+ const uint32_t uuid[4],
+ const uint16_t start_index,
+ const uint16_t tag)
+{
+ gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
+
+ uint64_t arg1 = (uint64_t)uuid[1] << 32 | uuid[0];
+ uint64_t arg2 = (uint64_t)uuid[3] << 32 | uuid[2];
+ uint64_t arg3 = start_index | (uint64_t)tag << 16;
+
+ write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_PARTITION_INFO_GET_REGS_SMC64);
+ write_ctx_reg(gpregs, CTX_GPREG_X1, arg1);
+ write_ctx_reg(gpregs, CTX_GPREG_X2, arg2);
+ write_ctx_reg(gpregs, CTX_GPREG_X3, arg3);
+ write_ctx_reg(gpregs, CTX_GPREG_X4, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X5, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X6, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X7, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X8, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X9, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X10, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X11, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X12, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X13, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X14, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X15, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X16, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X17, 0U);
+}
+
+static void spmd_logical_sp_set_info_regs_ongoing(spmd_spm_core_context_t *ctx)
+{
+ ctx->spmd_lp_sync_req_ongoing |= SPMD_LP_FFA_INFO_GET_REG_ONGOING;
+}
+
+static void spmd_logical_sp_reset_info_regs_ongoing(
+ spmd_spm_core_context_t *ctx)
+{
+ ctx->spmd_lp_sync_req_ongoing &= ~SPMD_LP_FFA_INFO_GET_REG_ONGOING;
+}
+
+static void spmd_fill_lp_info_array(
+ struct ffa_partition_info_v1_1 (*partitions)[EL3_SPMD_MAX_NUM_LP],
+ uint32_t uuid[4], uint16_t *lp_count_out)
+{
+ uint16_t lp_count = 0;
+ struct spmd_lp_desc *lp_array;
+ bool uuid_is_null = is_null_uuid(uuid);
+
+ if (SPMD_LP_DESCS_COUNT == 0U) {
+ *lp_count_out = 0;
+ return;
+ }
+
+ lp_array = get_spmd_el3_lp_array();
+ for (uint16_t index = 0; index < SPMD_LP_DESCS_COUNT; ++index) {
+ struct spmd_lp_desc *lp = &lp_array[index];
+
+ if (uuid_is_null || uuid_match(uuid, lp->uuid)) {
+ uint16_t array_index = lp_count;
+
+ ++lp_count;
+
+ (*partitions)[array_index].ep_id = lp->sp_id;
+ (*partitions)[array_index].execution_ctx_count = 1;
+ (*partitions)[array_index].properties = lp->properties;
+ (*partitions)[array_index].properties |=
+ (FFA_PARTITION_INFO_GET_AARCH64_STATE <<
+ FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT);
+ if (uuid_is_null) {
+ memcpy(&((*partitions)[array_index].uuid),
+ &lp->uuid, sizeof(lp->uuid));
+ }
+ }
+ }
+
+ *lp_count_out = lp_count;
+}
+
+static inline void spmd_pack_lp_count_props(
+ uint64_t *xn, uint16_t ep_id, uint16_t vcpu_count,
+ uint32_t properties)
+{
+ *xn = (uint64_t)ep_id;
+ *xn |= (uint64_t)vcpu_count << 16;
+ *xn |= (uint64_t)properties << 32;
+}
+
+static inline void spmd_pack_lp_uuid(uint64_t *xn_1, uint64_t *xn_2,
+ uint32_t uuid[4])
+{
+ *xn_1 = (uint64_t)uuid[0];
+ *xn_1 |= (uint64_t)uuid[1] << 32;
+ *xn_2 = (uint64_t)uuid[2];
+ *xn_2 |= (uint64_t)uuid[3] << 32;
+}
+#endif
+
+/*
+ * Initialize SPMD logical partitions. This function assumes that it is called
+ * only after the SPMC has successfully initialized.
+ */
+int32_t spmd_logical_sp_init(void)
+{
+#if ENABLE_SPMD_LP
+ int32_t rc = 0;
+ struct spmd_lp_desc *spmd_lp_descs;
+
+ assert(SPMD_LP_DESCS_COUNT <= EL3_SPMD_MAX_NUM_LP);
+
+ if (is_spmd_lp_inited == true) {
+ return 0;
+ }
+
+ if (is_spmc_inited == false) {
+ return -1;
+ }
+
+ spmd_lp_descs = get_spmd_el3_lp_array();
+
+ /* Perform initial validation of the SPMD Logical Partitions. */
+ rc = el3_spmd_sp_desc_validate(spmd_lp_descs);
+ if (rc != 0) {
+ ERROR("Logical SPMD Partition validation failed!\n");
+ return rc;
+ }
+
+ VERBOSE("SPMD Logical Secure Partition init start.\n");
+ for (unsigned int i = 0U; i < SPMD_LP_DESCS_COUNT; i++) {
+ rc = spmd_lp_descs[i].init();
+ if (rc != 0) {
+ ERROR("SPMD Logical SP (0x%x) failed to initialize\n",
+ spmd_lp_descs[i].sp_id);
+ return rc;
+ }
+ VERBOSE("SPMD Logical SP (0x%x) Initialized\n",
+ spmd_lp_descs[i].sp_id);
+ }
+
+ INFO("SPMD Logical Secure Partition init completed.\n");
+ if (rc == 0) {
+ is_spmd_lp_inited = true;
+ }
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+void spmd_logical_sp_set_spmc_initialized(void)
+{
+#if ENABLE_SPMD_LP
+ is_spmc_inited = true;
+#endif
+}
+
+void spmd_logical_sp_set_spmc_failure(void)
+{
+#if ENABLE_SPMD_LP
+ is_spmc_inited = false;
+#endif
+}
+
+/*
+ * This function takes an ffa_value structure populated with partition
+ * information from an FFA_PARTITION_INFO_GET_REGS ABI call, extracts
+ * the values and writes it into a ffa_partition_info_v1_1 structure for
+ * other code to consume.
+ */
+bool ffa_partition_info_regs_get_part_info(
+ struct ffa_value *args, uint8_t idx,
+ struct ffa_partition_info_v1_1 *partition_info)
+{
+ uint64_t *arg_ptrs;
+ uint64_t info, uuid_lo, uuid_high;
+
+ /*
+ * Each partition information is encoded in 3 registers, so there can be
+ * a maximum of 5 entries.
+ */
+ if (idx >= 5 || partition_info == NULL) {
+ return false;
+ }
+
+ /*
+ * List of pointers to args in return value. arg0/func encodes ff-a
+ * function, arg1 is reserved, arg2 encodes indices. arg3 and greater
+ * values reflect partition properties.
+ */
+ arg_ptrs = (uint64_t *)args + ((idx * 3) + 3);
+ info = *arg_ptrs;
+
+ arg_ptrs++;
+ uuid_lo = *arg_ptrs;
+
+ arg_ptrs++;
+ uuid_high = *arg_ptrs;
+
+ partition_info->ep_id = (uint16_t)(info & 0xFFFFU);
+ partition_info->execution_ctx_count = (uint16_t)((info >> 16) & 0xFFFFU);
+ partition_info->properties = (uint32_t)(info >> 32);
+ partition_info->uuid[0] = (uint32_t)(uuid_lo & 0xFFFFFFFFU);
+ partition_info->uuid[1] = (uint32_t)((uuid_lo >> 32) & 0xFFFFFFFFU);
+ partition_info->uuid[2] = (uint32_t)(uuid_high & 0xFFFFFFFFU);
+ partition_info->uuid[3] = (uint32_t)((uuid_high >> 32) & 0xFFFFFFFFU);
+
+ return true;
+}
+
+/*
+ * This function is called by the SPMD in response to
+ * an FFA_PARTITION_INFO_GET_REG ABI invocation by the SPMC. Secure partitions
+ * are allowed to discover the presence of EL3 SPMD logical partitions by
+ * invoking the aforementioned ABI and this function populates the required
+ * information about EL3 SPMD logical partitions.
+ */
+uint64_t spmd_el3_populate_logical_partition_info(void *handle, uint64_t x1,
+ uint64_t x2, uint64_t x3)
+{
+#if ENABLE_SPMD_LP
+ uint32_t target_uuid[4] = { 0 };
+ uint32_t w0;
+ uint32_t w1;
+ uint32_t w2;
+ uint32_t w3;
+ uint16_t start_index;
+ uint16_t tag;
+ static struct ffa_partition_info_v1_1 partitions[EL3_SPMD_MAX_NUM_LP];
+ uint16_t lp_count = 0;
+ uint16_t max_idx = 0;
+ uint16_t curr_idx = 0;
+ uint8_t num_entries_to_ret = 0;
+ struct ffa_value ret = { 0 };
+ uint64_t *arg_ptrs = (uint64_t *)&ret + 3;
+
+ w0 = (uint32_t)(x1 & 0xFFFFFFFFU);
+ w1 = (uint32_t)(x1 >> 32);
+ w2 = (uint32_t)(x2 & 0xFFFFFFFFU);
+ w3 = (uint32_t)(x2 >> 32);
+
+ target_uuid[0] = w0;
+ target_uuid[1] = w1;
+ target_uuid[2] = w2;
+ target_uuid[3] = w3;
+
+ start_index = (uint16_t)(x3 & 0xFFFFU);
+ tag = (uint16_t)((x3 >> 16) & 0xFFFFU);
+
+ assert(handle == cm_get_context(SECURE));
+
+ if (tag != 0) {
+ VERBOSE("Tag is not 0. Cannot return partition info.\n");
+ return spmd_ffa_error_return(handle, FFA_ERROR_RETRY);
+ }
+
+ memset(&partitions, 0, sizeof(partitions));
+
+ spmd_fill_lp_info_array(&partitions, target_uuid, &lp_count);
+
+ if (lp_count == 0) {
+ VERBOSE("No SPDM EL3 logical partitions exist.\n");
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ if (start_index >= lp_count) {
+ VERBOSE("start_index = %d, lp_count = %d (start index must be"
+ " less than partition count.\n",
+ start_index, lp_count);
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ max_idx = lp_count - 1;
+ num_entries_to_ret = (max_idx - start_index) + 1;
+ num_entries_to_ret =
+ MIN(num_entries_to_ret, MAX_INFO_REGS_ENTRIES_PER_CALL);
+ curr_idx = start_index + num_entries_to_ret - 1;
+ assert(curr_idx <= max_idx);
+
+ ret.func = FFA_SUCCESS_SMC64;
+ ret.arg2 = (uint64_t)((sizeof(struct ffa_partition_info_v1_1) & 0xFFFFU) << 48);
+ ret.arg2 |= (uint64_t)(curr_idx << 16);
+ ret.arg2 |= (uint64_t)max_idx;
+
+ for (uint16_t idx = start_index; idx <= curr_idx; ++idx) {
+ spmd_pack_lp_count_props(arg_ptrs, partitions[idx].ep_id,
+ partitions[idx].execution_ctx_count,
+ partitions[idx].properties);
+ arg_ptrs++;
+ if (is_null_uuid(target_uuid)) {
+ spmd_pack_lp_uuid(arg_ptrs, (arg_ptrs + 1),
+ partitions[idx].uuid);
+ }
+ arg_ptrs += 2;
+ }
+
+ SMC_RET18(handle, ret.func, ret.arg1, ret.arg2, ret.arg3, ret.arg4,
+ ret.arg5, ret.arg6, ret.arg7, ret.arg8, ret.arg9, ret.arg10,
+ ret.arg11, ret.arg12, ret.arg13, ret.arg14, ret.arg15,
+ ret.arg16, ret.arg17);
+#else
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+#endif
+}
+
+/* This function can be used by an SPMD logical partition to invoke the
+ * FFA_PARTITION_INFO_GET_REGS ABI to the SPMC, to discover the secure
+ * partitions in the system. The function takes a UUID, start index and
+ * tag and the partition information are returned in an ffa_value structure
+ * and can be consumed by using appropriate helper functions.
+ */
+bool spmd_el3_invoke_partition_info_get(
+ const uint32_t target_uuid[4],
+ const uint16_t start_index,
+ const uint16_t tag,
+ struct ffa_value *retval)
+{
+#if ENABLE_SPMD_LP
+ uint64_t rc = UINT64_MAX;
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+
+ if (retval == NULL) {
+ return false;
+ }
+
+ memset(retval, 0, sizeof(*retval));
+
+ if (!is_spmc_inited) {
+ VERBOSE("Cannot discover partition before,"
+ " SPMC is initialized.\n");
+ spmd_encode_ffa_error(retval, FFA_ERROR_DENIED);
+ return true;
+ }
+
+ if (tag != 0) {
+ VERBOSE("Tag must be zero. other tags unsupported\n");
+ spmd_encode_ffa_error(retval,
+ FFA_ERROR_INVALID_PARAMETER);
+ return true;
+ }
+
+ /* Save the non-secure context before entering SPMC */
+ cm_el1_sysregs_context_save(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_save(NON_SECURE);
+#endif
+
+ spmd_build_ffa_info_get_regs(ctx, target_uuid, start_index, tag);
+ spmd_logical_sp_set_info_regs_ongoing(ctx);
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%lx) on CPU%u\n", __func__, rc,
+ plat_my_core_pos());
+ panic();
+ }
+
+ spmd_logical_sp_reset_info_regs_ongoing(ctx);
+ spmd_encode_ctx_to_ffa_value(ctx, retval);
+
+ assert(is_ffa_error(retval) || is_ffa_success(retval));
+
+ cm_el1_sysregs_context_restore(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_restore(NON_SECURE);
+#endif
+ cm_set_next_eret_context(NON_SECURE);
+ return true;
+#else
+ return false;
+#endif
+}
+
+/*******************************************************************************
+ * This function sends an FF-A Direct Request from a partition in EL3 to a
+ * partition that may reside under an SPMC (only lower ELs supported). The main
+ * use of this API is for SPMD logical partitions.
+ * The API is expected to be used when there are platform specific SMCs that
+ * need to be routed to a secure partition that is FF-A compliant or when
+ * there are group 0 interrupts that need to be handled first in EL3 and then
+ * forwarded to an FF-A compliant secure partition. Therefore, it is expected
+ * that the handle to the context provided belongs to the non-secure context.
+ * This also means that interrupts/SMCs that trap to EL3 during secure execution
+ * cannot use this API.
+ * x1, x2, x3 and x4 are encoded as specified in the FF-A specification.
+ * retval is used to pass the direct response values to the caller.
+ * The function returns true if retval has valid values, and false otherwise.
+ ******************************************************************************/
+bool spmd_el3_ffa_msg_direct_req(uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *handle,
+ struct ffa_value *retval)
+{
+#if ENABLE_SPMD_LP
+
+ uint64_t rc = UINT64_MAX;
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+
+ if (retval == NULL) {
+ return false;
+ }
+
+ memset(retval, 0, sizeof(*retval));
+
+ if (!is_spmd_lp_inited || !is_spmc_inited) {
+ VERBOSE("Cannot send SPMD logical partition direct message,"
+ " Partitions not initialized or SPMC not initialized.\n");
+ spmd_encode_ffa_error(retval, FFA_ERROR_DENIED);
+ return true;
+ }
+
+ /*
+ * x2 must be zero, since there is no support for framework message via
+ * an SPMD logical partition. This is sort of a useless check and it is
+ * possible to not take parameter. However, as the framework extends it
+ * may be useful to have x2 and extend this function later with
+ * functionality based on x2.
+ */
+ if (x2 != 0) {
+ VERBOSE("x2 must be zero. Cannot send framework message.\n");
+ spmd_encode_ffa_error(retval, FFA_ERROR_DENIED);
+ return true;
+ }
+
+ /*
+ * Current context must be non-secure. API is expected to be used
+ * when entry into EL3 and the SPMD logical partition is via an
+ * interrupt that occurs when execution is in normal world and
+ * SMCs from normal world. FF-A compliant SPMCs are expected to
+ * trap interrupts during secure execution in lower ELs since they
+ * are usually not re-entrant and SMCs from secure world can be
+ * handled synchronously. There is no known use case for an SPMD
+ * logical partition to send a direct message to another partition
+ * in response to a secure interrupt or SMCs from secure world.
+ */
+ if (handle != cm_get_context(NON_SECURE)) {
+ VERBOSE("Handle must be for the non-secure context.\n");
+ spmd_encode_ffa_error(retval, FFA_ERROR_DENIED);
+ return true;
+ }
+
+ if (!is_spmd_lp_id(ffa_endpoint_source(x1))) {
+ VERBOSE("Source ID must be valid SPMD logical partition"
+ " ID.\n");
+ spmd_encode_ffa_error(retval,
+ FFA_ERROR_INVALID_PARAMETER);
+ return true;
+ }
+
+ if (is_spmd_lp_id(ffa_endpoint_destination(x1))) {
+ VERBOSE("Destination ID must not be SPMD logical partition"
+ " ID.\n");
+ spmd_encode_ffa_error(retval,
+ FFA_ERROR_INVALID_PARAMETER);
+ return true;
+ }
+
+ if (!ffa_is_secure_world_id(ffa_endpoint_destination(x1))) {
+ VERBOSE("Destination ID must be secure world ID.\n");
+ spmd_encode_ffa_error(retval,
+ FFA_ERROR_INVALID_PARAMETER);
+ return true;
+ }
+
+ if (ffa_endpoint_destination(x1) == SPMD_DIRECT_MSG_ENDPOINT_ID) {
+ VERBOSE("Destination ID must not be SPMD ID.\n");
+ spmd_encode_ffa_error(retval,
+ FFA_ERROR_INVALID_PARAMETER);
+ return true;
+ }
+
+ if (ffa_endpoint_destination(x1) == spmd_spmc_id_get()) {
+ VERBOSE("Destination ID must not be SPMC ID.\n");
+ spmd_encode_ffa_error(retval,
+ FFA_ERROR_INVALID_PARAMETER);
+ return true;
+ }
+
+ /* Save the non-secure context before entering SPMC */
+ cm_el1_sysregs_context_save(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_save(NON_SECURE);
+#endif
+
+ /*
+ * Perform synchronous entry into the SPMC. Synchronous entry is
+ * required because the spec requires that a direct message request
+ * from an SPMD LP look like a function call from it's perspective.
+ */
+ spmd_build_direct_message_req(ctx, x1, x2, x3, x4);
+ spmd_logical_sp_set_dir_req_ongoing(ctx);
+
+ rc = spmd_spm_core_sync_entry(ctx);
+
+ spmd_logical_sp_reset_dir_req_ongoing(ctx);
+
+ if (rc != 0ULL) {
+ ERROR("%s failed (%lx) on CPU%u\n", __func__, rc,
+ plat_my_core_pos());
+ panic();
+ } else {
+ spmd_encode_ctx_to_ffa_value(ctx, retval);
+
+ /*
+ * Only expect error or direct response,
+ * spmd_spm_core_sync_exit should not be called on other paths.
+ * Checks are asserts since the LSP can fail gracefully if the
+ * source or destination ids are not the same. Panic'ing would
+ * not provide any benefit.
+ */
+ assert(is_ffa_error(retval) || is_ffa_direct_msg_resp(retval));
+ assert(is_ffa_error(retval) ||
+ (ffa_endpoint_destination(retval->arg1) ==
+ ffa_endpoint_source(x1)));
+ assert(is_ffa_error(retval) ||
+ (ffa_endpoint_source(retval->arg1) ==
+ ffa_endpoint_destination(x1)));
+ }
+
+ cm_el1_sysregs_context_restore(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_restore(NON_SECURE);
+#endif
+ cm_set_next_eret_context(NON_SECURE);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool is_spmd_logical_sp_info_regs_req_in_progress(
+ spmd_spm_core_context_t *ctx)
+{
+#if ENABLE_SPMD_LP
+ return ((ctx->spmd_lp_sync_req_ongoing & SPMD_LP_FFA_INFO_GET_REG_ONGOING)
+ == SPMD_LP_FFA_INFO_GET_REG_ONGOING);
+#else
+ return false;
+#endif
+}
+
+bool is_spmd_logical_sp_dir_req_in_progress(
+ spmd_spm_core_context_t *ctx)
+{
+#if ENABLE_SPMD_LP
+ return ((ctx->spmd_lp_sync_req_ongoing & SPMD_LP_FFA_DIR_REQ_ONGOING)
+ == SPMD_LP_FFA_DIR_REQ_ONGOING);
+#else
+ return false;
+#endif
+}
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
new file mode 100644
index 0000000..066571e
--- /dev/null
+++ b/services/std_svc/spmd/spmd_main.c
@@ -0,0 +1,1292 @@
+/*
+ * Copyright (c) 2020-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <arch/aarch64/arch_features.h>
+#include <bl31/bl31.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <common/tbbr/tbbr_img_def.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/fconf/fconf.h>
+#include <lib/fconf/fconf_dyn_cfg_getter.h>
+#include <lib/smccc.h>
+#include <lib/spinlock.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+#include <services/el3_spmd_logical_sp.h>
+#include <services/ffa_svc.h>
+#include <services/spmc_svc.h>
+#include <services/spmd_svc.h>
+#include <smccc_helpers.h>
+#include "spmd_private.h"
+
+/*******************************************************************************
+ * SPM Core context information.
+ ******************************************************************************/
+static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * SPM Core attribute information is read from its manifest if the SPMC is not
+ * at EL3. Else, it is populated from the SPMC directly.
+ ******************************************************************************/
+static spmc_manifest_attribute_t spmc_attrs;
+
+/*******************************************************************************
+ * SPM Core entry point information. Discovered on the primary core and reused
+ * on secondary cores.
+ ******************************************************************************/
+static entry_point_info_t *spmc_ep_info;
+
+/*******************************************************************************
+ * SPM Core context on CPU based on mpidr.
+ ******************************************************************************/
+spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
+{
+ int core_idx = plat_core_pos_by_mpidr(mpidr);
+
+ if (core_idx < 0) {
+ ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx);
+ panic();
+ }
+
+ return &spm_core_context[core_idx];
+}
+
+/*******************************************************************************
+ * SPM Core context on current CPU get helper.
+ ******************************************************************************/
+spmd_spm_core_context_t *spmd_get_context(void)
+{
+ return spmd_get_context_by_mpidr(read_mpidr());
+}
+
+/*******************************************************************************
+ * SPM Core ID getter.
+ ******************************************************************************/
+uint16_t spmd_spmc_id_get(void)
+{
+ return spmc_attrs.spmc_id;
+}
+
+/*******************************************************************************
+ * Static function declaration.
+ ******************************************************************************/
+static int32_t spmd_init(void);
+static int spmd_spmc_init(void *pm_addr);
+
+static uint64_t spmd_smc_forward(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+/******************************************************************************
+ * Builds an SPMD to SPMC direct message request.
+ *****************************************************************************/
+void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
+ unsigned long long message)
+{
+ write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
+ write_ctx_reg(gpregs, CTX_GPREG_X1,
+ (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+ spmd_spmc_id_get());
+ write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
+ write_ctx_reg(gpregs, CTX_GPREG_X3, message);
+
+ /* Zero out x4-x7 for the direct request emitted towards the SPMC. */
+ write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
+}
+
+
+/*******************************************************************************
+ * This function takes an SPMC context pointer and performs a synchronous
+ * SPMC entry.
+ ******************************************************************************/
+uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
+{
+ uint64_t rc;
+
+ assert(spmc_ctx != NULL);
+
+ cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
+
+ /* Restore the context assigned above */
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_restore(SECURE);
+#else
+ cm_el1_sysregs_context_restore(SECURE);
+#endif
+ cm_set_next_eret_context(SECURE);
+
+ /* Enter SPMC */
+ rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
+
+ /* Save secure state */
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_save(SECURE);
+#else
+ cm_el1_sysregs_context_save(SECURE);
+#endif
+
+ return rc;
+}
+
+/*******************************************************************************
+ * This function returns to the place where spmd_spm_core_sync_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+
+ /* Get current CPU context from SPMC context */
+ assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
+
+ /*
+ * The SPMD must have initiated the original request through a
+ * synchronous entry into SPMC. Jump back to the original C runtime
+ * context with the value of rc in x0;
+ */
+ spmd_spm_core_exit(ctx->c_rt_ctx, rc);
+
+ panic();
+}
+
+/*******************************************************************************
+ * Jump to the SPM Core for the first time.
+ ******************************************************************************/
+static int32_t spmd_init(void)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ uint64_t rc;
+
+ VERBOSE("SPM Core init start.\n");
+
+ /* Primary boot core enters the SPMC for initialization. */
+ ctx->state = SPMC_STATE_ON_PENDING;
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
+ return 0;
+ }
+
+ ctx->state = SPMC_STATE_ON;
+
+ VERBOSE("SPM Core init end.\n");
+
+ spmd_logical_sp_set_spmc_initialized();
+ rc = spmd_logical_sp_init();
+ if (rc != 0) {
+ WARN("SPMD Logical partitions failed init.\n");
+ }
+
+ return 1;
+}
+
+/*******************************************************************************
+ * spmd_secure_interrupt_handler
+ * Enter the SPMC for further handling of the secure interrupt by the SPMC
+ * itself or a Secure Partition.
+ ******************************************************************************/
+static uint64_t spmd_secure_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
+ unsigned int linear_id = plat_my_core_pos();
+ int64_t rc;
+
+ /* Sanity check the security state when the exception was generated */
+ assert(get_interrupt_src_ss(flags) == NON_SECURE);
+
+ /* Sanity check the pointer to this cpu's context */
+ assert(handle == cm_get_context(NON_SECURE));
+
+ /* Save the non-secure context before entering SPMC */
+ cm_el1_sysregs_context_save(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_save(NON_SECURE);
+#endif
+
+ /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
+ write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
+ write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
+
+ /* Mark current core as handling a secure interrupt. */
+ ctx->secure_interrupt_ongoing = true;
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id);
+ }
+
+ ctx->secure_interrupt_ongoing = false;
+
+ cm_el1_sysregs_context_restore(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_restore(NON_SECURE);
+#endif
+ cm_set_next_eret_context(NON_SECURE);
+
+ SMC_RET0(&ctx->cpu_ctx);
+}
+
+#if (EL3_EXCEPTION_HANDLING == 0)
+/*******************************************************************************
+ * spmd_group0_interrupt_handler_nwd
+ * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the
+ * handling of the interrupt to the platform handler, and return only upon
+ * successfully handling the Group0 interrupt.
+ ******************************************************************************/
+static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ uint32_t intid;
+
+ /* Sanity check the security state when the exception was generated. */
+ assert(get_interrupt_src_ss(flags) == NON_SECURE);
+
+ /* Sanity check the pointer to this cpu's context. */
+ assert(handle == cm_get_context(NON_SECURE));
+
+ assert(id == INTR_ID_UNAVAILABLE);
+
+ assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
+
+ intid = plat_ic_acknowledge_interrupt();
+
+ if (plat_spmd_handle_group0_interrupt(intid) < 0) {
+ ERROR("Group0 interrupt %u not handled\n", intid);
+ panic();
+ }
+
+ /* Deactivate the corresponding Group0 interrupt. */
+ plat_ic_end_of_interrupt(intid);
+
+ return 0U;
+}
+#endif
+
+/*******************************************************************************
+ * spmd_handle_group0_intr_swd
+ * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using
+ * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the
+ * interrupt to the platform handler, and returns only upon successfully
+ * handling the Group0 interrupt.
+ ******************************************************************************/
+static uint64_t spmd_handle_group0_intr_swd(void *handle)
+{
+ uint32_t intid;
+
+ /* Sanity check the pointer to this cpu's context */
+ assert(handle == cm_get_context(SECURE));
+
+ assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
+
+ intid = plat_ic_acknowledge_interrupt();
+
+ /*
+ * TODO: Currently due to a limitation in SPMD implementation, the
+ * platform handler is expected to not delegate handling to NWd while
+ * processing Group0 secure interrupt.
+ */
+ if (plat_spmd_handle_group0_interrupt(intid) < 0) {
+ /* Group0 interrupt was not handled by the platform. */
+ ERROR("Group0 interrupt %u not handled\n", intid);
+ panic();
+ }
+
+ /* Deactivate the corresponding Group0 interrupt. */
+ plat_ic_end_of_interrupt(intid);
+
+ /* Return success. */
+ SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+}
+
+#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
+static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size,
+ unsigned int attr, uintptr_t *align_addr,
+ size_t *align_size)
+{
+ uintptr_t base_addr_align;
+ size_t mapped_size_align;
+ int rc;
+
+ /* Page aligned address and size if necessary */
+ base_addr_align = page_align(base_addr, DOWN);
+ mapped_size_align = page_align(size, UP);
+
+ if ((base_addr != base_addr_align) &&
+ (size == mapped_size_align)) {
+ mapped_size_align += PAGE_SIZE;
+ }
+
+ /*
+ * Map dynamically given region with its aligned base address and
+ * size
+ */
+ rc = mmap_add_dynamic_region((unsigned long long)base_addr_align,
+ base_addr_align,
+ mapped_size_align,
+ attr);
+ if (rc == 0) {
+ *align_addr = base_addr_align;
+ *align_size = mapped_size_align;
+ }
+
+ return rc;
+}
+
+static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr,
+ size_t size)
+{
+ uintptr_t root_base_addr_align, sec_base_addr_align;
+ size_t root_mapped_size_align, sec_mapped_size_align;
+ int rc;
+
+ assert(root_base_addr != 0UL);
+ assert(sec_base_addr != 0UL);
+ assert(size != 0UL);
+
+ /* Map the memory with required attributes */
+ rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT,
+ &root_base_addr_align,
+ &root_mapped_size_align);
+ if (rc != 0) {
+ ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region",
+ root_base_addr, rc);
+ panic();
+ }
+
+ rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE,
+ &sec_base_addr_align, &sec_mapped_size_align);
+ if (rc != 0) {
+ ERROR("%s %s %lu (%d)\n", "Error while mapping",
+ "secure region", sec_base_addr, rc);
+ panic();
+ }
+
+ /* Do copy operation */
+ (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size);
+
+ /* Unmap root memory region */
+ rc = mmap_remove_dynamic_region(root_base_addr_align,
+ root_mapped_size_align);
+ if (rc != 0) {
+ ERROR("%s %s %lu (%d)\n", "Error while unmapping",
+ "root region", root_base_addr_align, rc);
+ panic();
+ }
+
+ /* Unmap secure memory region */
+ rc = mmap_remove_dynamic_region(sec_base_addr_align,
+ sec_mapped_size_align);
+ if (rc != 0) {
+ ERROR("%s %s %lu (%d)\n", "Error while unmapping",
+ "secure region", sec_base_addr_align, rc);
+ panic();
+ }
+}
+#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
+
+/*******************************************************************************
+ * Loads SPMC manifest and inits SPMC.
+ ******************************************************************************/
+static int spmd_spmc_init(void *pm_addr)
+{
+ cpu_context_t *cpu_ctx;
+ unsigned int core_id;
+ uint32_t ep_attr, flags;
+ int rc;
+ const struct dyn_cfg_dtb_info_t *image_info __unused;
+
+ /* Load the SPM Core manifest */
+ rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
+ if (rc != 0) {
+ WARN("No or invalid SPM Core manifest image provided by BL2\n");
+ return rc;
+ }
+
+ /*
+ * Ensure that the SPM Core version is compatible with the SPM
+ * Dispatcher version.
+ */
+ if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
+ (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
+ WARN("Unsupported FFA version (%u.%u)\n",
+ spmc_attrs.major_version, spmc_attrs.minor_version);
+ return -EINVAL;
+ }
+
+ VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
+ spmc_attrs.minor_version);
+
+ VERBOSE("SPM Core run time EL%x.\n",
+ SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
+
+ /* Validate the SPMC ID, Ensure high bit is set */
+ if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
+ SPMC_SECURE_ID_MASK) == 0U) {
+ WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
+ return -EINVAL;
+ }
+
+ /* Validate the SPM Core execution state */
+ if ((spmc_attrs.exec_state != MODE_RW_64) &&
+ (spmc_attrs.exec_state != MODE_RW_32)) {
+ WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
+ spmc_attrs.exec_state);
+ return -EINVAL;
+ }
+
+ VERBOSE("%s%x.\n", "SPM Core execution state 0x",
+ spmc_attrs.exec_state);
+
+#if SPMD_SPM_AT_SEL2
+ /* Ensure manifest has not requested AArch32 state in S-EL2 */
+ if (spmc_attrs.exec_state == MODE_RW_32) {
+ WARN("AArch32 state at S-EL2 is not supported.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Check if S-EL2 is supported on this system if S-EL2
+ * is required for SPM
+ */
+ if (!is_feat_sel2_supported()) {
+ WARN("SPM Core run time S-EL2 is not supported.\n");
+ return -EINVAL;
+ }
+#endif /* SPMD_SPM_AT_SEL2 */
+
+ /* Initialise an entrypoint to set up the CPU context */
+ ep_attr = SECURE | EP_ST_ENABLE;
+ if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
+ ep_attr |= EP_EE_BIG;
+ }
+
+ SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
+
+ /*
+ * Populate SPSR for SPM Core based upon validated parameters from the
+ * manifest.
+ */
+ if (spmc_attrs.exec_state == MODE_RW_32) {
+ spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+ SPSR_E_LITTLE,
+ DAIF_FIQ_BIT |
+ DAIF_IRQ_BIT |
+ DAIF_ABT_BIT);
+ } else {
+
+#if SPMD_SPM_AT_SEL2
+ static const uint32_t runtime_el = MODE_EL2;
+#else
+ static const uint32_t runtime_el = MODE_EL1;
+#endif
+ spmc_ep_info->spsr = SPSR_64(runtime_el,
+ MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+ }
+
+#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
+ image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID);
+ assert(image_info != NULL);
+
+ if ((image_info->config_addr == 0UL) ||
+ (image_info->secondary_config_addr == 0UL) ||
+ (image_info->config_max_size == 0UL)) {
+ return -EINVAL;
+ }
+
+ /* Copy manifest from root->secure region */
+ spmd_do_sec_cpy(image_info->config_addr,
+ image_info->secondary_config_addr,
+ image_info->config_max_size);
+
+ /* Update ep info of BL32 */
+ assert(spmc_ep_info != NULL);
+ spmc_ep_info->args.arg0 = image_info->secondary_config_addr;
+#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
+
+ /* Set an initial SPMC context state for all cores. */
+ for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
+ spm_core_context[core_id].state = SPMC_STATE_OFF;
+
+ /* Setup an initial cpu context for the SPMC. */
+ cpu_ctx = &spm_core_context[core_id].cpu_ctx;
+ cm_setup_context(cpu_ctx, spmc_ep_info);
+
+ /*
+ * Pass the core linear ID to the SPMC through x4.
+ * (TF-A implementation defined behavior helping
+ * a legacy TOS migration to adopt FF-A).
+ */
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
+ }
+
+ /* Register power management hooks with PSCI */
+ psci_register_spd_pm_hook(&spmd_pm);
+
+ /* Register init function for deferred init. */
+ bl31_register_bl32_init(&spmd_init);
+
+ INFO("SPM Core setup done.\n");
+
+ /*
+ * Register an interrupt handler routing secure interrupts to SPMD
+ * while the NWd is running.
+ */
+ flags = 0;
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+ spmd_secure_interrupt_handler,
+ flags);
+ if (rc != 0) {
+ panic();
+ }
+
+ /*
+ * Permit configurations where the SPM resides at S-EL1/2 and upon a
+ * Group0 interrupt triggering while the normal world runs, the
+ * interrupt is routed either through the EHF or directly to the SPMD:
+ *
+ * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD
+ * for handling by spmd_group0_interrupt_handler_nwd.
+ *
+ * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF.
+ *
+ */
+#if (EL3_EXCEPTION_HANDLING == 0)
+ /*
+ * Register an interrupt handler routing Group0 interrupts to SPMD
+ * while the NWd is running.
+ */
+ rc = register_interrupt_type_handler(INTR_TYPE_EL3,
+ spmd_group0_interrupt_handler_nwd,
+ flags);
+ if (rc != 0) {
+ panic();
+ }
+#endif
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Initialize context of SPM Core.
+ ******************************************************************************/
+int spmd_setup(void)
+{
+ int rc;
+ void *spmc_manifest;
+
+ /*
+ * If the SPMC is at EL3, then just initialise it directly. The
+ * shenanigans of when it is at a lower EL are not needed.
+ */
+ if (is_spmc_at_el3()) {
+ /* Allow the SPMC to populate its attributes directly. */
+ spmc_populate_attrs(&spmc_attrs);
+
+ rc = spmc_setup();
+ if (rc != 0) {
+ WARN("SPMC initialisation failed 0x%x.\n", rc);
+ }
+ return 0;
+ }
+
+ spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (spmc_ep_info == NULL) {
+ WARN("No SPM Core image provided by BL2 boot loader.\n");
+ return 0;
+ }
+
+ /* Under no circumstances will this parameter be 0 */
+ assert(spmc_ep_info->pc != 0ULL);
+
+ /*
+ * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
+ * be used as a manifest for the SPM Core at the next lower EL/mode.
+ */
+ spmc_manifest = (void *)spmc_ep_info->args.arg0;
+ if (spmc_manifest == NULL) {
+ WARN("Invalid or absent SPM Core manifest.\n");
+ return 0;
+ }
+
+ /* Load manifest, init SPMC */
+ rc = spmd_spmc_init(spmc_manifest);
+ if (rc != 0) {
+ WARN("Booting device without SPM initialization.\n");
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Forward FF-A SMCs to the other security state.
+ ******************************************************************************/
+uint64_t spmd_smc_switch_state(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *handle)
+{
+ unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
+ unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
+
+ /* Save incoming security state */
+#if SPMD_SPM_AT_SEL2
+ if (secure_state_in == NON_SECURE) {
+ cm_el1_sysregs_context_save(secure_state_in);
+ }
+ cm_el2_sysregs_context_save(secure_state_in);
+#else
+ cm_el1_sysregs_context_save(secure_state_in);
+#endif
+
+ /* Restore outgoing security state */
+#if SPMD_SPM_AT_SEL2
+ if (secure_state_out == NON_SECURE) {
+ cm_el1_sysregs_context_restore(secure_state_out);
+ }
+ cm_el2_sysregs_context_restore(secure_state_out);
+#else
+ cm_el1_sysregs_context_restore(secure_state_out);
+#endif
+ cm_set_next_eret_context(secure_state_out);
+
+#if SPMD_SPM_AT_SEL2
+ /*
+ * If SPMC is at SEL2, save additional registers x8-x17, which may
+ * be used in FF-A calls such as FFA_PARTITION_INFO_GET_REGS.
+ * Note that technically, all SPMCs can support this, but this code is
+ * under ifdef to minimize breakage in case other SPMCs do not save
+ * and restore x8-x17.
+ * We also need to pass through these registers since not all FF-A ABIs
+ * modify x8-x17, in which case, SMCCC requires that these registers be
+ * preserved, so the SPMD passes through these registers and expects the
+ * SPMC to save and restore (potentially also modify) them.
+ */
+ SMC_RET18(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7),
+ SMC_GET_GP(handle, CTX_GPREG_X8),
+ SMC_GET_GP(handle, CTX_GPREG_X9),
+ SMC_GET_GP(handle, CTX_GPREG_X10),
+ SMC_GET_GP(handle, CTX_GPREG_X11),
+ SMC_GET_GP(handle, CTX_GPREG_X12),
+ SMC_GET_GP(handle, CTX_GPREG_X13),
+ SMC_GET_GP(handle, CTX_GPREG_X14),
+ SMC_GET_GP(handle, CTX_GPREG_X15),
+ SMC_GET_GP(handle, CTX_GPREG_X16),
+ SMC_GET_GP(handle, CTX_GPREG_X17)
+ );
+
+#else
+ SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+#endif
+}
+
+/*******************************************************************************
+ * Forward SMCs to the other security state.
+ ******************************************************************************/
+static uint64_t spmd_smc_forward(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ if (is_spmc_at_el3() && !secure_origin) {
+ return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+ }
+ return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
+ handle);
+
+}
+
+/*******************************************************************************
+ * Return FFA_ERROR with specified error code
+ ******************************************************************************/
+uint64_t spmd_ffa_error_return(void *handle, int error_code)
+{
+ SMC_RET8(handle, (uint32_t) FFA_ERROR,
+ FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+}
+
+/*******************************************************************************
+ * spmd_check_address_in_binary_image
+ ******************************************************************************/
+bool spmd_check_address_in_binary_image(uint64_t address)
+{
+ assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
+
+ return ((address >= spmc_attrs.load_address) &&
+ (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
+}
+
+/******************************************************************************
+ * spmd_is_spmc_message
+ *****************************************************************************/
+static bool spmd_is_spmc_message(unsigned int ep)
+{
+ if (is_spmc_at_el3()) {
+ return false;
+ }
+
+ return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
+ && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
+}
+
+/******************************************************************************
+ * spmd_handle_spmc_message
+ *****************************************************************************/
+static int spmd_handle_spmc_message(unsigned long long msg,
+ unsigned long long parm1, unsigned long long parm2,
+ unsigned long long parm3, unsigned long long parm4)
+{
+ VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
+ msg, parm1, parm2, parm3, parm4);
+
+ return -EINVAL;
+}
+
+/*******************************************************************************
+ * This function forwards FF-A SMCs to either the main SPMD handler or the
+ * SPMC at EL3, depending on the origin security state, if enabled.
+ ******************************************************************************/
+uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ if (is_spmc_at_el3()) {
+ /*
+ * If we have an SPMC at EL3 allow handling of the SMC first.
+ * The SPMC will call back through to SPMD handler if required.
+ */
+ if (is_caller_secure(flags)) {
+ return spmc_smc_handler(smc_fid,
+ is_caller_secure(flags),
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+ }
+ return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+ handle, flags);
+}
+
+/*******************************************************************************
+ * This function handles all SMCs in the range reserved for FFA. Each call is
+ * either forwarded to the other security state or handled by the SPM dispatcher
+ ******************************************************************************/
+uint64_t spmd_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ unsigned int linear_id = plat_my_core_pos();
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ bool secure_origin;
+ int32_t ret;
+ uint32_t input_version;
+
+ /* Determine which security state this SMC originated from */
+ secure_origin = is_caller_secure(flags);
+
+ VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
+ " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
+ linear_id, smc_fid, x1, x2, x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+
+ /*
+ * If there is an on-going info regs from EL3 SPMD LP, unconditionally
+ * return, we don't expect any other FF-A ABIs to be called between
+ * calls to FFA_PARTITION_INFO_GET_REGS.
+ */
+ if (is_spmd_logical_sp_info_regs_req_in_progress(ctx)) {
+ assert(secure_origin);
+ spmd_spm_core_sync_exit(0ULL);
+ }
+
+ switch (smc_fid) {
+ case FFA_ERROR:
+ /*
+ * Check if this is the first invocation of this interface on
+ * this CPU. If so, then indicate that the SPM Core initialised
+ * unsuccessfully.
+ */
+ if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
+ spmd_spm_core_sync_exit(x2);
+ }
+
+ /*
+ * If there was an SPMD logical partition direct request on-going,
+ * return back to the SPMD logical partition so the error can be
+ * consumed.
+ */
+ if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
+ assert(secure_origin);
+ spmd_spm_core_sync_exit(0ULL);
+ }
+
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ break; /* not reached */
+
+ case FFA_VERSION:
+ input_version = (uint32_t)(0xFFFFFFFF & x1);
+ /*
+ * If caller is secure and SPMC was initialized,
+ * return FFA_VERSION of SPMD.
+ * If caller is non secure and SPMC was initialized,
+ * forward to the EL3 SPMC if enabled, otherwise return
+ * the SPMC version if implemented at a lower EL.
+ * Sanity check to "input_version".
+ * If the EL3 SPMC is enabled, ignore the SPMC state as
+ * this is not used.
+ */
+ if ((input_version & FFA_VERSION_BIT31_MASK) ||
+ (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
+ ret = FFA_ERROR_NOT_SUPPORTED;
+ } else if (!secure_origin) {
+ if (is_spmc_at_el3()) {
+ /*
+ * Forward the call directly to the EL3 SPMC, if
+ * enabled, as we don't need to wrap the call in
+ * a direct request.
+ */
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+
+ gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
+ uint64_t rc;
+
+ if (spmc_attrs.major_version == 1 &&
+ spmc_attrs.minor_version == 0) {
+ ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
+ spmc_attrs.minor_version);
+ SMC_RET8(handle, (uint32_t)ret,
+ FFA_TARGET_INFO_MBZ,
+ FFA_TARGET_INFO_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ break;
+ }
+ /* Save non-secure system registers context */
+ cm_el1_sysregs_context_save(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_save(NON_SECURE);
+#endif
+
+ /*
+ * The incoming request has FFA_VERSION as X0 smc_fid
+ * and requested version in x1. Prepare a direct request
+ * from SPMD to SPMC with FFA_VERSION framework function
+ * identifier in X2 and requested version in X3.
+ */
+ spmd_build_spmc_message(gpregs,
+ SPMD_FWK_MSG_FFA_VERSION_REQ,
+ input_version);
+
+ /*
+ * Ensure x8-x17 NS GP register values are untouched when returning
+ * from the SPMC.
+ */
+ write_ctx_reg(gpregs, CTX_GPREG_X8, SMC_GET_GP(handle, CTX_GPREG_X8));
+ write_ctx_reg(gpregs, CTX_GPREG_X9, SMC_GET_GP(handle, CTX_GPREG_X9));
+ write_ctx_reg(gpregs, CTX_GPREG_X10, SMC_GET_GP(handle, CTX_GPREG_X10));
+ write_ctx_reg(gpregs, CTX_GPREG_X11, SMC_GET_GP(handle, CTX_GPREG_X11));
+ write_ctx_reg(gpregs, CTX_GPREG_X12, SMC_GET_GP(handle, CTX_GPREG_X12));
+ write_ctx_reg(gpregs, CTX_GPREG_X13, SMC_GET_GP(handle, CTX_GPREG_X13));
+ write_ctx_reg(gpregs, CTX_GPREG_X14, SMC_GET_GP(handle, CTX_GPREG_X14));
+ write_ctx_reg(gpregs, CTX_GPREG_X15, SMC_GET_GP(handle, CTX_GPREG_X15));
+ write_ctx_reg(gpregs, CTX_GPREG_X16, SMC_GET_GP(handle, CTX_GPREG_X16));
+ write_ctx_reg(gpregs, CTX_GPREG_X17, SMC_GET_GP(handle, CTX_GPREG_X17));
+
+ rc = spmd_spm_core_sync_entry(ctx);
+
+ if ((rc != 0ULL) ||
+ (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
+ FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
+ (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
+ (FFA_FWK_MSG_BIT |
+ SPMD_FWK_MSG_FFA_VERSION_RESP))) {
+ ERROR("Failed to forward FFA_VERSION\n");
+ ret = FFA_ERROR_NOT_SUPPORTED;
+ } else {
+ ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
+ }
+
+ /*
+ * x0-x4 are updated by spmd_smc_forward below.
+ * Zero out x5-x7 in the FFA_VERSION response.
+ */
+ write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
+
+ /*
+ * Return here after SPMC has handled FFA_VERSION.
+ * The returned SPMC version is held in X3.
+ * Forward this version in X0 to the non-secure caller.
+ */
+ return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, cookie, gpregs,
+ flags);
+ } else {
+ ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
+ FFA_VERSION_MINOR);
+ }
+
+ SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
+ FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+ break; /* not reached */
+
+ case FFA_FEATURES:
+ /*
+ * This is an optional interface. Do the minimal checks and
+ * forward to SPM Core which will handle it if implemented.
+ */
+
+ /* Forward SMC from Normal world to the SPM Core */
+ if (!secure_origin) {
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+
+ /*
+ * Return success if call was from secure world i.e. all
+ * FFA functions are supported. This is essentially a
+ * nop.
+ */
+ SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+
+ break; /* not reached */
+
+ case FFA_ID_GET:
+ /*
+ * Returns the ID of the calling FFA component.
+ */
+ if (!secure_origin) {
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ }
+
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+
+ break; /* not reached */
+
+ case FFA_SECONDARY_EP_REGISTER_SMC64:
+ if (secure_origin) {
+ ret = spmd_pm_secondary_ep_register(x1);
+
+ if (ret < 0) {
+ SMC_RET8(handle, FFA_ERROR_SMC64,
+ FFA_TARGET_INFO_MBZ, ret,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ } else {
+ SMC_RET8(handle, FFA_SUCCESS_SMC64,
+ FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ }
+ }
+
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ break; /* Not reached */
+
+ case FFA_SPM_ID_GET:
+ if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+ /*
+ * Returns the ID of the SPMC or SPMD depending on the FF-A
+ * instance where this function is invoked
+ */
+ if (!secure_origin) {
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ }
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+
+ break; /* not reached */
+
+ case FFA_MSG_SEND_DIRECT_REQ_SMC32:
+ case FFA_MSG_SEND_DIRECT_REQ_SMC64:
+ /*
+ * Regardless of secure_origin, SPMD logical partitions cannot
+ * handle direct messages. They can only initiate direct
+ * messages and consume direct responses or errors.
+ */
+ if (is_spmd_lp_id(ffa_endpoint_source(x1)) ||
+ is_spmd_lp_id(ffa_endpoint_destination(x1))) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER
+ );
+ }
+
+ /*
+ * When there is an ongoing SPMD logical partition direct
+ * request, there cannot be another direct request. Return
+ * error in this case. Panic'ing is an option but that does
+ * not provide the opportunity for caller to abort based on
+ * error codes.
+ */
+ if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
+ assert(secure_origin);
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_DENIED);
+ }
+
+ if (!secure_origin) {
+ /* Validate source endpoint is non-secure for non-secure caller. */
+ if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+ }
+ if (secure_origin && spmd_is_spmc_message(x1)) {
+ ret = spmd_handle_spmc_message(x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, ret,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ } else {
+ /* Forward direct message to the other world */
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+ break; /* Not reached */
+
+ case FFA_MSG_SEND_DIRECT_RESP_SMC32:
+ case FFA_MSG_SEND_DIRECT_RESP_SMC64:
+ if (secure_origin && (spmd_is_spmc_message(x1) ||
+ is_spmd_logical_sp_dir_req_in_progress(ctx))) {
+ spmd_spm_core_sync_exit(0ULL);
+ } else {
+ /* Forward direct message to the other world */
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+ break; /* Not reached */
+
+ case FFA_RX_RELEASE:
+ case FFA_RXTX_MAP_SMC32:
+ case FFA_RXTX_MAP_SMC64:
+ case FFA_RXTX_UNMAP:
+ case FFA_PARTITION_INFO_GET:
+#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
+ case FFA_NOTIFICATION_BITMAP_CREATE:
+ case FFA_NOTIFICATION_BITMAP_DESTROY:
+ case FFA_NOTIFICATION_BIND:
+ case FFA_NOTIFICATION_UNBIND:
+ case FFA_NOTIFICATION_SET:
+ case FFA_NOTIFICATION_GET:
+ case FFA_NOTIFICATION_INFO_GET:
+ case FFA_NOTIFICATION_INFO_GET_SMC64:
+ case FFA_MSG_SEND2:
+ case FFA_RX_ACQUIRE:
+#endif
+ case FFA_MSG_RUN:
+ /*
+ * Above calls should be invoked only by the Normal world and
+ * must not be forwarded from Secure world to Normal world.
+ */
+ if (secure_origin) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Forward the call to the other world */
+ /* fallthrough */
+ case FFA_MSG_SEND:
+ case FFA_MEM_DONATE_SMC32:
+ case FFA_MEM_DONATE_SMC64:
+ case FFA_MEM_LEND_SMC32:
+ case FFA_MEM_LEND_SMC64:
+ case FFA_MEM_SHARE_SMC32:
+ case FFA_MEM_SHARE_SMC64:
+ case FFA_MEM_RETRIEVE_REQ_SMC32:
+ case FFA_MEM_RETRIEVE_REQ_SMC64:
+ case FFA_MEM_RETRIEVE_RESP:
+ case FFA_MEM_RELINQUISH:
+ case FFA_MEM_RECLAIM:
+ case FFA_MEM_FRAG_TX:
+ case FFA_MEM_FRAG_RX:
+ case FFA_SUCCESS_SMC32:
+ case FFA_SUCCESS_SMC64:
+ /*
+ * If there is an ongoing direct request from an SPMD logical
+ * partition, return an error.
+ */
+ if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
+ assert(secure_origin);
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_DENIED);
+ }
+
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ break; /* not reached */
+
+ case FFA_MSG_WAIT:
+ /*
+ * Check if this is the first invocation of this interface on
+ * this CPU from the Secure world. If so, then indicate that the
+ * SPM Core initialised successfully.
+ */
+ if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
+ spmd_spm_core_sync_exit(0ULL);
+ }
+
+ /* Forward the call to the other world */
+ /* fallthrough */
+ case FFA_INTERRUPT:
+ case FFA_MSG_YIELD:
+ /* This interface must be invoked only by the Secure world */
+ if (!secure_origin) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
+ assert(secure_origin);
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_DENIED);
+ }
+
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ break; /* not reached */
+
+ case FFA_NORMAL_WORLD_RESUME:
+ if (secure_origin && ctx->secure_interrupt_ongoing) {
+ spmd_spm_core_sync_exit(0ULL);
+ } else {
+ return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+ break; /* Not reached */
+#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
+ case FFA_PARTITION_INFO_GET_REGS_SMC64:
+ if (secure_origin) {
+ return spmd_el3_populate_logical_partition_info(handle, x1,
+ x2, x3);
+ }
+
+ /* Call only supported with SMCCC 1.2+ */
+ if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) {
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ break; /* Not reached */
+#endif
+ case FFA_EL3_INTR_HANDLE:
+ if (secure_origin) {
+ return spmd_handle_group0_intr_swd(handle);
+ } else {
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+ default:
+ WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+}
diff --git a/services/std_svc/spmd/spmd_pm.c b/services/std_svc/spmd/spmd_pm.c
new file mode 100644
index 0000000..fd89c81
--- /dev/null
+++ b/services/std_svc/spmd/spmd_pm.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdint.h>
+
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/spinlock.h>
+#include "spmd_private.h"
+
+static struct {
+ bool secondary_ep_locked;
+ uintptr_t secondary_ep;
+ spinlock_t lock;
+} g_spmd_pm;
+
+/*******************************************************************************
+ * spmd_pm_secondary_ep_register
+ ******************************************************************************/
+int spmd_pm_secondary_ep_register(uintptr_t entry_point)
+{
+ int ret = FFA_ERROR_INVALID_PARAMETER;
+
+ spin_lock(&g_spmd_pm.lock);
+
+ if (g_spmd_pm.secondary_ep_locked == true) {
+ goto out;
+ }
+
+ /*
+ * Check entry_point address is a PA within
+ * load_address <= entry_point < load_address + binary_size
+ */
+ if (!spmd_check_address_in_binary_image(entry_point)) {
+ ERROR("%s entry point is not within image boundaries\n",
+ __func__);
+ goto out;
+ }
+
+ g_spmd_pm.secondary_ep = entry_point;
+ g_spmd_pm.secondary_ep_locked = true;
+
+ VERBOSE("%s %lx\n", __func__, entry_point);
+
+ ret = 0;
+
+out:
+ spin_unlock(&g_spmd_pm.lock);
+
+ return ret;
+}
+
+/*******************************************************************************
+ * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part
+ * of the SPMC initialization path, they will initialize any SPs that they
+ * manage. Entry into SPMC is done after initialising minimal architectural
+ * state that guarantees safe execution.
+ ******************************************************************************/
+static void spmd_cpu_on_finish_handler(u_register_t unused)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ unsigned int linear_id = plat_my_core_pos();
+ el3_state_t *el3_state;
+ uintptr_t entry_point;
+ uint64_t rc;
+
+ assert(ctx != NULL);
+ assert(ctx->state != SPMC_STATE_ON);
+
+ spin_lock(&g_spmd_pm.lock);
+
+ /*
+ * Leave the possibility that the SPMC does not call
+ * FFA_SECONDARY_EP_REGISTER in which case re-use the
+ * primary core address for booting secondary cores.
+ */
+ if (g_spmd_pm.secondary_ep_locked == true) {
+ /*
+ * The CPU context has already been initialized at boot time
+ * (in spmd_spmc_init by a call to cm_setup_context). Adjust
+ * below the target core entry point based on the address
+ * passed to by FFA_SECONDARY_EP_REGISTER.
+ */
+ entry_point = g_spmd_pm.secondary_ep;
+ el3_state = get_el3state_ctx(&ctx->cpu_ctx);
+ write_ctx_reg(el3_state, CTX_ELR_EL3, entry_point);
+ }
+
+ spin_unlock(&g_spmd_pm.lock);
+
+ /* Mark CPU as initiating ON operation. */
+ ctx->state = SPMC_STATE_ON_PENDING;
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%" PRIu64 ") on CPU%u\n", __func__, rc,
+ linear_id);
+ ctx->state = SPMC_STATE_OFF;
+ return;
+ }
+
+ ctx->state = SPMC_STATE_ON;
+
+ VERBOSE("CPU %u on!\n", linear_id);
+}
+
+/*******************************************************************************
+ * spmd_cpu_off_handler
+ ******************************************************************************/
+static int32_t spmd_cpu_off_handler(u_register_t unused)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ unsigned int linear_id = plat_my_core_pos();
+ int64_t rc;
+
+ assert(ctx != NULL);
+ assert(ctx->state != SPMC_STATE_OFF);
+
+ /* Build an SPMD to SPMC direct message request. */
+ gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
+ spmd_build_spmc_message(gpregs, FFA_FWK_MSG_PSCI, PSCI_CPU_OFF);
+
+ /* Clear remaining x8 - x17 at EL3/SEL2 or EL3/SEL1 boundary. */
+ write_ctx_reg(gpregs, CTX_GPREG_X8, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X9, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X10, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X11, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X12, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X13, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X14, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X15, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X16, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X17, 0);
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%" PRIu64 ") on CPU%u\n", __func__, rc, linear_id);
+ }
+
+ /* Expect a direct message response from the SPMC. */
+ u_register_t ffa_resp_func = read_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx),
+ CTX_GPREG_X0);
+ if (ffa_resp_func != FFA_MSG_SEND_DIRECT_RESP_SMC32) {
+ ERROR("%s invalid SPMC response (%lx).\n",
+ __func__, ffa_resp_func);
+ return -EINVAL;
+ }
+
+ ctx->state = SPMC_STATE_OFF;
+
+ VERBOSE("CPU %u off!\n", linear_id);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Structure populated by the SPM Dispatcher to perform any bookkeeping before
+ * PSCI executes a power mgmt. operation.
+ ******************************************************************************/
+const spd_pm_ops_t spmd_pm = {
+ .svc_on_finish = spmd_cpu_on_finish_handler,
+ .svc_off = spmd_cpu_off_handler
+};
diff --git a/services/std_svc/spmd/spmd_private.h b/services/std_svc/spmd/spmd_private.h
new file mode 100644
index 0000000..fef7ef6
--- /dev/null
+++ b/services/std_svc/spmd/spmd_private.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMD_PRIVATE_H
+#define SPMD_PRIVATE_H
+
+#include <common/bl_common.h>
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define SPMD_C_RT_CTX_X19 0x0
+#define SPMD_C_RT_CTX_X20 0x8
+#define SPMD_C_RT_CTX_X21 0x10
+#define SPMD_C_RT_CTX_X22 0x18
+#define SPMD_C_RT_CTX_X23 0x20
+#define SPMD_C_RT_CTX_X24 0x28
+#define SPMD_C_RT_CTX_X25 0x30
+#define SPMD_C_RT_CTX_X26 0x38
+#define SPMD_C_RT_CTX_X27 0x40
+#define SPMD_C_RT_CTX_X28 0x48
+#define SPMD_C_RT_CTX_X29 0x50
+#define SPMD_C_RT_CTX_X30 0x58
+
+#define SPMD_C_RT_CTX_SIZE 0x60
+#define SPMD_C_RT_CTX_ENTRIES (SPMD_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+#include <lib/psci/psci_lib.h>
+#include <plat/common/platform.h>
+#include <services/ffa_svc.h>
+
+typedef enum spmc_state {
+ SPMC_STATE_RESET = 0,
+ SPMC_STATE_OFF,
+ SPMC_STATE_ON_PENDING,
+ SPMC_STATE_ON
+} spmc_state_t;
+
+/*
+ * Data structure used by the SPM dispatcher (SPMD) in EL3 to track context of
+ * the SPM core (SPMC) at the next lower EL.
+ */
+typedef struct spmd_spm_core_context {
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+ spmc_state_t state;
+ bool secure_interrupt_ongoing;
+#if ENABLE_SPMD_LP
+ uint8_t spmd_lp_sync_req_ongoing;
+#endif
+} spmd_spm_core_context_t;
+
+/* Flags to indicate ongoing requests for SPMD EL3 logical partitions */
+#define SPMD_LP_FFA_DIR_REQ_ONGOING U(0x1)
+#define SPMD_LP_FFA_INFO_GET_REG_ONGOING U(0x2)
+
+/*
+ * Reserve ID for NS physical FFA Endpoint.
+ */
+#define FFA_NS_ENDPOINT_ID U(0)
+
+/* Define SPMD target function IDs for framework messages to the SPMC */
+#define SPMD_FWK_MSG_FFA_VERSION_REQ U(0x8)
+#define SPMD_FWK_MSG_FFA_VERSION_RESP U(0x9)
+
+/* Function to build SPMD to SPMC message */
+void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target,
+ unsigned long long message);
+
+/* Functions used to enter/exit SPMC synchronously */
+uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *ctx);
+__dead2 void spmd_spm_core_sync_exit(uint64_t rc);
+
+/* Assembly helpers */
+uint64_t spmd_spm_core_enter(uint64_t *c_rt_ctx);
+void __dead2 spmd_spm_core_exit(uint64_t c_rt_ctx, uint64_t ret);
+
+/* SPMD SPD power management handlers */
+extern const spd_pm_ops_t spmd_pm;
+
+/* SPMC entry point information helper */
+entry_point_info_t *spmd_spmc_ep_info_get(void);
+
+/* SPMC ID getter */
+uint16_t spmd_spmc_id_get(void);
+
+/* SPMC context on CPU based on mpidr */
+spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr);
+
+/* SPMC context on current CPU get helper */
+spmd_spm_core_context_t *spmd_get_context(void);
+
+int spmd_pm_secondary_ep_register(uintptr_t entry_point);
+bool spmd_check_address_in_binary_image(uint64_t address);
+
+/*
+ * Platform hook in EL3 firmware to handle for Group0 secure interrupt.
+ * Return values:
+ * 0 = success
+ * otherwise it returns a negative value
+ */
+int plat_spmd_handle_group0_interrupt(uint32_t id);
+
+uint64_t spmd_ffa_error_return(void *handle, int error_code);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* SPMD_PRIVATE_H */
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
new file mode 100644
index 0000000..e782d09
--- /dev/null
+++ b/services/std_svc/std_svc_setup.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/pmf/pmf.h>
+#include <lib/psci/psci.h>
+#include <lib/runtime_instr.h>
+#include <services/drtm_svc.h>
+#include <services/errata_abi_svc.h>
+#include <services/pci_svc.h>
+#include <services/rmmd_svc.h>
+#include <services/sdei.h>
+#include <services/spm_mm_svc.h>
+#include <services/spmc_svc.h>
+#include <services/spmd_svc.h>
+#include <services/std_svc.h>
+#include <services/trng_svc.h>
+#include <smccc_helpers.h>
+#include <tools_share/uuid.h>
+
+/* Standard Service UUID */
+static uuid_t arm_svc_uid = {
+ {0x5b, 0x90, 0x8d, 0x10},
+ {0x63, 0xf8},
+ {0xe8, 0x47},
+ 0xae, 0x2d,
+ {0xc0, 0xfb, 0x56, 0x41, 0xf6, 0xe2}
+};
+
+/* Setup Standard Services */
+static int32_t std_svc_setup(void)
+{
+ uintptr_t svc_arg;
+ int ret = 0;
+
+ svc_arg = get_arm_std_svc_args(PSCI_FID_MASK);
+ assert(svc_arg);
+
+ /*
+ * PSCI is one of the specifications implemented as a Standard Service.
+ * The `psci_setup()` also does EL3 architectural setup.
+ */
+ if (psci_setup((const psci_lib_args_t *)svc_arg) != PSCI_E_SUCCESS) {
+ ret = 1;
+ }
+
+#if SPM_MM
+ if (spm_mm_setup() != 0) {
+ ret = 1;
+ }
+#endif
+
+#if defined(SPD_spmd)
+ if (spmd_setup() != 0) {
+ ret = 1;
+ }
+#endif
+
+#if ENABLE_RME
+ if (rmmd_setup() != 0) {
+ ret = 1;
+ }
+#endif
+
+#if SDEI_SUPPORT
+ /* SDEI initialisation */
+ sdei_init();
+#endif
+
+#if TRNG_SUPPORT
+ /* TRNG initialisation */
+ trng_setup();
+#endif /* TRNG_SUPPORT */
+
+#if DRTM_SUPPORT
+ if (drtm_setup() != 0) {
+ ret = 1;
+ }
+#endif /* DRTM_SUPPORT */
+
+ return ret;
+}
+
+/*
+ * Top-level Standard Service SMC handler. This handler will in turn dispatch
+ * calls to PSCI SMC handler
+ */
+static uintptr_t std_svc_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
+ /* 32-bit SMC function, clear top parameter bits */
+
+ x1 &= UINT32_MAX;
+ x2 &= UINT32_MAX;
+ x3 &= UINT32_MAX;
+ x4 &= UINT32_MAX;
+ }
+
+ /*
+ * Dispatch PSCI calls to PSCI SMC handler and return its return
+ * value
+ */
+ if (is_psci_fid(smc_fid)) {
+ uint64_t ret;
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+
+ /*
+ * Flush cache line so that even if CPU power down happens
+ * the timestamp update is reflected in memory.
+ */
+ PMF_WRITE_TIMESTAMP(rt_instr_svc,
+ RT_INSTR_ENTER_PSCI,
+ PMF_CACHE_MAINT,
+ get_cpu_data(cpu_data_pmf_ts[CPU_DATA_PMF_TS0_IDX]));
+#endif
+
+ ret = psci_smc_handler(smc_fid, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+ RT_INSTR_EXIT_PSCI,
+ PMF_NO_CACHE_MAINT);
+#endif
+
+ SMC_RET1(handle, ret);
+ }
+
+#if SPM_MM
+ /*
+ * Dispatch SPM calls to SPM SMC handler and return its return
+ * value
+ */
+ if (is_spm_mm_fid(smc_fid)) {
+ return spm_mm_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+#endif
+
+#if defined(SPD_spmd)
+ /*
+ * Dispatch FFA calls to the FFA SMC handler implemented by the SPM
+ * dispatcher and return its return value
+ */
+ if (is_ffa_fid(smc_fid)) {
+ return spmd_ffa_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+#endif
+
+#if SDEI_SUPPORT
+ if (is_sdei_fid(smc_fid)) {
+ return sdei_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
+ flags);
+ }
+#endif
+
+#if TRNG_SUPPORT
+ if (is_trng_fid(smc_fid)) {
+ return trng_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
+ flags);
+ }
+#endif /* TRNG_SUPPORT */
+
+#if ERRATA_ABI_SUPPORT
+ if (is_errata_fid(smc_fid)) {
+ return errata_abi_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+#endif /* ERRATA_ABI_SUPPORT */
+
+#if ENABLE_RME
+
+ if (is_rmmd_el3_fid(smc_fid)) {
+ return rmmd_rmm_el3_handler(smc_fid, x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+
+ if (is_rmi_fid(smc_fid)) {
+ return rmmd_rmi_handler(smc_fid, x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+#endif
+
+#if SMC_PCI_SUPPORT
+ if (is_pci_fid(smc_fid)) {
+ return pci_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
+ flags);
+ }
+#endif
+
+#if DRTM_SUPPORT
+ if (is_drtm_fid(smc_fid)) {
+ return drtm_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
+ flags);
+ }
+#endif /* DRTM_SUPPORT */
+
+ switch (smc_fid) {
+ case ARM_STD_SVC_CALL_COUNT:
+ /*
+ * Return the number of Standard Service Calls. PSCI is the only
+ * standard service implemented; so return number of PSCI calls
+ */
+ SMC_RET1(handle, PSCI_NUM_CALLS);
+
+ case ARM_STD_SVC_UID:
+ /* Return UID to the caller */
+ SMC_UUID_RET(handle, arm_svc_uid);
+
+ case ARM_STD_SVC_VERSION:
+ /* Return the version of current implementation */
+ SMC_RET2(handle, STD_SVC_VERSION_MAJOR, STD_SVC_VERSION_MINOR);
+
+ default:
+ VERBOSE("Unimplemented Standard Service Call: 0x%x \n", smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
+
+/* Register Standard Service Calls as runtime service */
+DECLARE_RT_SVC(
+ std_svc,
+
+ OEN_STD_START,
+ OEN_STD_END,
+ SMC_TYPE_FAST,
+ std_svc_setup,
+ std_svc_smc_handler
+);
diff --git a/services/std_svc/trng/trng_entropy_pool.c b/services/std_svc/trng/trng_entropy_pool.c
new file mode 100644
index 0000000..dd08c5e
--- /dev/null
+++ b/services/std_svc/trng/trng_entropy_pool.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2021-2022, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <lib/spinlock.h>
+#include <plat/common/plat_trng.h>
+
+/*
+ * # Entropy pool
+ * Note that the TRNG Firmware interface can request up to 192 bits of entropy
+ * in a single call or three 64bit words per call. We have 4 words in the pool
+ * so that when we have 1-63 bits in the pool, and we have a request for
+ * 192 bits of entropy, we don't have to throw out the leftover 1-63 bits of
+ * entropy.
+ */
+#define WORDS_IN_POOL (4)
+static uint64_t entropy[WORDS_IN_POOL];
+/* index in bits of the first bit of usable entropy */
+static uint32_t entropy_bit_index;
+/* then number of valid bits in the entropy pool */
+static uint32_t entropy_bit_size;
+
+static spinlock_t trng_pool_lock;
+
+#define BITS_PER_WORD (sizeof(entropy[0]) * 8)
+#define BITS_IN_POOL (WORDS_IN_POOL * BITS_PER_WORD)
+#define ENTROPY_MIN_WORD (entropy_bit_index / BITS_PER_WORD)
+#define ENTROPY_FREE_BIT (entropy_bit_size + entropy_bit_index)
+#define _ENTROPY_FREE_WORD (ENTROPY_FREE_BIT / BITS_PER_WORD)
+#define ENTROPY_FREE_INDEX (_ENTROPY_FREE_WORD % WORDS_IN_POOL)
+/* ENTROPY_WORD_INDEX(0) includes leftover bits in the lower bits */
+#define ENTROPY_WORD_INDEX(i) ((ENTROPY_MIN_WORD + i) % WORDS_IN_POOL)
+
+/*
+ * Fill the entropy pool until we have at least as many bits as requested.
+ * Returns true after filling the pool, and false if the entropy source is out
+ * of entropy and the pool could not be filled.
+ * Assumes locks are taken.
+ */
+static bool trng_fill_entropy(uint32_t nbits)
+{
+ while (nbits > entropy_bit_size) {
+ bool valid = plat_get_entropy(&entropy[ENTROPY_FREE_INDEX]);
+
+ if (valid) {
+ entropy_bit_size += BITS_PER_WORD;
+ assert(entropy_bit_size <= BITS_IN_POOL);
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+/*
+ * Pack entropy into the out buffer, filling and taking locks as needed.
+ * Returns true on success, false on failure.
+ *
+ * Note: out must have enough space for nbits of entropy
+ */
+bool trng_pack_entropy(uint32_t nbits, uint64_t *out)
+{
+ bool ret = true;
+ uint32_t bits_to_discard = nbits;
+ spin_lock(&trng_pool_lock);
+
+ if (!trng_fill_entropy(nbits)) {
+ ret = false;
+ goto out;
+ }
+
+ const unsigned int rshift = entropy_bit_index % BITS_PER_WORD;
+ const unsigned int lshift = BITS_PER_WORD - rshift;
+ const int to_fill = ((nbits + BITS_PER_WORD - 1) / BITS_PER_WORD);
+ int word_i;
+
+ for (word_i = 0; word_i < to_fill; word_i++) {
+ /*
+ * Repack the entropy from the pool into the passed in out
+ * buffer. This takes lesser bits from the valid upper bits
+ * of word_i and more bits from the lower bits of (word_i + 1).
+ *
+ * I found the following diagram useful. note: `e` represents
+ * valid entropy, ` ` represents invalid bits (not entropy) and
+ * `x` represents valid entropy that must not end up in the
+ * packed word.
+ *
+ * |---------entropy pool----------|
+ * C var |--(word_i + 1)-|----word_i-----|
+ * bit idx |7 6 5 4 3 2 1 0|7 6 5 4 3 2 1 0|
+ * [x,x,e,e,e,e,e,e|e,e, , , , , , ]
+ * | [e,e,e,e,e,e,e,e] |
+ * | |--out[word_i]--| |
+ * lshift|---| |--rshift---|
+ *
+ * ==== Which is implemented as ====
+ *
+ * |---------entropy pool----------|
+ * C var |--(word_i + 1)-|----word_i-----|
+ * bit idx |7 6 5 4 3 2 1 0|7 6 5 4 3 2 1 0|
+ * [x,x,e,e,e,e,e,e|e,e, , , , , , ]
+ * C expr << lshift >> rshift
+ * bit idx 5 4 3 2 1 0 7 6
+ * [e,e,e,e,e,e,0,0|0,0,0,0,0,0,e,e]
+ * ==== bit-wise or ====
+ * 5 4 3 2 1 0 7 6
+ * [e,e,e,e,e,e,e,e]
+ */
+ out[word_i] |= entropy[ENTROPY_WORD_INDEX(word_i)] >> rshift;
+
+ /**
+ * Discarding the used/packed entropy bits from the respective
+ * words, (word_i) and (word_i+1) as applicable.
+ * In each iteration of the loop, we pack 64bits of entropy to
+ * the output buffer. The bits are picked linearly starting from
+ * 1st word (entropy[0]) till 4th word (entropy[3]) and then
+ * rolls back (entropy[0]). Discarding of bits is managed
+ * similarly.
+ *
+ * The following diagram illustrates the logic:
+ *
+ * |---------entropy pool----------|
+ * C var |--(word_i + 1)-|----word_i-----|
+ * bit idx |7 6 5 4 3 2 1 0|7 6 5 4 3 2 1 0|
+ * [e,e,e,e,e,e,e,e|e,e,0,0,0,0,0,0]
+ * | [e,e,e,e,e,e,e,e] |
+ * | |--out[word_i]--| |
+ * lshift|---| |--rshift---|
+ * |e,e|0,0,0,0,0,0,0,0|0,0,0,0,0,0|
+ * |<== || ==>|
+ * bits_to_discard (from these bytes)
+ *
+ * variable(bits_to_discard): Tracks the amount of bits to be
+ * discarded and is updated accordingly in each iteration.
+ *
+ * It monitors these packed bits from respective word_i and
+ * word_i+1 and overwrites them with zeros accordingly.
+ * It discards linearly from the lowest index and moves upwards
+ * until bits_to_discard variable becomes zero.
+ *
+ * In the above diagram,for example, we pack 2bytes(7th and 6th
+ * from word_i) and 6bytes(0th till 5th from word_i+1), combine
+ * and pack them as 64bit to output buffer out[i].
+ * Depending on the number of bits requested, we discard the
+ * bits from these packed bytes by overwriting them with zeros.
+ */
+
+ /*
+ * If the bits to be discarded is lesser than the amount of bits
+ * copied to the output buffer from word_i, we discard that much
+ * amount of bits only.
+ */
+ if (bits_to_discard < (BITS_PER_WORD - rshift)) {
+ entropy[ENTROPY_WORD_INDEX(word_i)] &=
+ (~0ULL << ((bits_to_discard+rshift) % BITS_PER_WORD));
+ bits_to_discard = 0;
+ } else {
+ /*
+ * If the bits to be discarded is more than the amount of valid
+ * upper bits from word_i, which has been copied to the output
+ * buffer, we just set the entire word_i to 0, as the lower bits
+ * will be already zeros from previous operations, and the
+ * bits_to_discard is updated precisely.
+ */
+ entropy[ENTROPY_WORD_INDEX(word_i)] = 0;
+ bits_to_discard -= (BITS_PER_WORD - rshift);
+ }
+
+ /*
+ * Note that a shift of 64 bits is treated as a shift of 0 bits.
+ * When the shift amount is the same as the BITS_PER_WORD, we
+ * don't want to include the next word of entropy, so we skip
+ * the `|=` operation.
+ */
+ if (lshift != BITS_PER_WORD) {
+ out[word_i] |= entropy[ENTROPY_WORD_INDEX(word_i + 1)]
+ << lshift;
+ /**
+ * Discarding the remaining packed bits from upperword
+ * (word[i+1]) which was copied to output buffer by
+ * overwriting with zeros.
+ *
+ * If the remaining bits to be discarded is lesser than
+ * the amount of bits from [word_i+1], which has been
+ * copied to the output buffer, we overwrite that much
+ * amount of bits only.
+ */
+ if (bits_to_discard < (BITS_PER_WORD - lshift)) {
+ entropy[ENTROPY_WORD_INDEX(word_i+1)] &=
+ (~0ULL << ((bits_to_discard) % BITS_PER_WORD));
+ bits_to_discard = 0;
+ } else {
+ /*
+ * If bits to discard is more than the bits from word_i+1
+ * which got packed into the output, then we discard all
+ * those copied bits.
+ *
+ * Note: we cannot set the entire word_i+1 to 0, as
+ * there are still some unused valid entropy bits at the
+ * upper end for future use.
+ */
+ entropy[ENTROPY_WORD_INDEX(word_i+1)] &=
+ (~0ULL << ((BITS_PER_WORD - lshift) % BITS_PER_WORD));
+ bits_to_discard -= (BITS_PER_WORD - lshift);
+ }
+
+ }
+ }
+ const uint64_t mask = ~0ULL >> (BITS_PER_WORD - (nbits % BITS_PER_WORD));
+
+ out[to_fill - 1] &= mask;
+
+ entropy_bit_index = (entropy_bit_index + nbits) % BITS_IN_POOL;
+ entropy_bit_size -= nbits;
+
+out:
+ spin_unlock(&trng_pool_lock);
+
+ return ret;
+}
+
+void trng_entropy_pool_setup(void)
+{
+ int i;
+
+ for (i = 0; i < WORDS_IN_POOL; i++) {
+ entropy[i] = 0;
+ }
+ entropy_bit_index = 0;
+ entropy_bit_size = 0;
+}
diff --git a/services/std_svc/trng/trng_entropy_pool.h b/services/std_svc/trng/trng_entropy_pool.h
new file mode 100644
index 0000000..fab2367
--- /dev/null
+++ b/services/std_svc/trng/trng_entropy_pool.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TRNG_ENTROPY_POOL_H
+#define TRNG_ENTROPY_POOL_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+bool trng_pack_entropy(uint32_t nbits, uint64_t *out);
+void trng_entropy_pool_setup(void);
+
+#endif /* TRNG_ENTROPY_POOL_H */
diff --git a/services/std_svc/trng/trng_main.c b/services/std_svc/trng/trng_main.c
new file mode 100644
index 0000000..90098a8
--- /dev/null
+++ b/services/std_svc/trng/trng_main.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <arch_features.h>
+#include <lib/smccc.h>
+#include <services/trng_svc.h>
+#include <smccc_helpers.h>
+
+#include <plat/common/plat_trng.h>
+
+#include "trng_entropy_pool.h"
+
+static const uuid_t uuid_null;
+
+/* handle the RND call in SMC 32 bit mode */
+static uintptr_t trng_rnd32(uint32_t nbits, void *handle)
+{
+ uint32_t mask = ~0U;
+ uint64_t ent[2] = {0};
+
+ if (nbits == 0U || nbits > TRNG_RND32_ENTROPY_MAXBITS) {
+ SMC_RET1(handle, TRNG_E_INVALID_PARAMS);
+ }
+
+ if (!trng_pack_entropy(nbits, &ent[0])) {
+ SMC_RET1(handle, TRNG_E_NO_ENTROPY);
+ }
+
+ if ((nbits % 32U) != 0U) {
+ mask >>= 32U - (nbits % 32U);
+ }
+
+ switch ((nbits - 1U) / 32U) {
+ case 0:
+ SMC_RET4(handle, TRNG_E_SUCCESS, 0, 0, ent[0] & mask);
+ break; /* unreachable */
+ case 1:
+ SMC_RET4(handle, TRNG_E_SUCCESS, 0, (ent[0] >> 32) & mask,
+ ent[0] & 0xFFFFFFFF);
+ break; /* unreachable */
+ case 2:
+ SMC_RET4(handle, TRNG_E_SUCCESS, ent[1] & mask,
+ (ent[0] >> 32) & 0xFFFFFFFF, ent[0] & 0xFFFFFFFF);
+ break; /* unreachable */
+ default:
+ SMC_RET1(handle, TRNG_E_INVALID_PARAMS);
+ break; /* unreachable */
+ }
+}
+
+/* handle the RND call in SMC 64 bit mode */
+static uintptr_t trng_rnd64(uint32_t nbits, void *handle)
+{
+ uint64_t mask = ~0ULL;
+ uint64_t ent[3] = {0};
+
+ if (nbits == 0U || nbits > TRNG_RND64_ENTROPY_MAXBITS) {
+ SMC_RET1(handle, TRNG_E_INVALID_PARAMS);
+ }
+
+ if (!trng_pack_entropy(nbits, &ent[0])) {
+ SMC_RET1(handle, TRNG_E_NO_ENTROPY);
+ }
+
+ /* Mask off higher bits if only part of register requested */
+ if ((nbits % 64U) != 0U) {
+ mask >>= 64U - (nbits % 64U);
+ }
+
+ switch ((nbits - 1U) / 64U) {
+ case 0:
+ SMC_RET4(handle, TRNG_E_SUCCESS, 0, 0, ent[0] & mask);
+ break; /* unreachable */
+ case 1:
+ SMC_RET4(handle, TRNG_E_SUCCESS, 0, ent[1] & mask, ent[0]);
+ break; /* unreachable */
+ case 2:
+ SMC_RET4(handle, TRNG_E_SUCCESS, ent[2] & mask, ent[1], ent[0]);
+ break; /* unreachable */
+ default:
+ SMC_RET1(handle, TRNG_E_INVALID_PARAMS);
+ break; /* unreachable */
+ }
+}
+
+void trng_setup(void)
+{
+ trng_entropy_pool_setup();
+ plat_entropy_setup();
+}
+
+/* Predicate indicating that a function id is part of TRNG */
+bool is_trng_fid(uint32_t smc_fid)
+{
+ return ((smc_fid == ARM_TRNG_VERSION) ||
+ (smc_fid == ARM_TRNG_FEATURES) ||
+ (smc_fid == ARM_TRNG_GET_UUID) ||
+ (smc_fid == ARM_TRNG_RND32) ||
+ (smc_fid == ARM_TRNG_RND64));
+}
+
+uintptr_t trng_smc_handler(uint32_t smc_fid, u_register_t x1, u_register_t x2,
+ u_register_t x3, u_register_t x4, void *cookie,
+ void *handle, u_register_t flags)
+{
+ if (!memcmp(&plat_trng_uuid, &uuid_null, sizeof(uuid_t))) {
+ SMC_RET1(handle, TRNG_E_NOT_IMPLEMENTED);
+ }
+
+ switch (smc_fid) {
+ case ARM_TRNG_VERSION:
+ SMC_RET1(handle, MAKE_SMCCC_VERSION(
+ TRNG_VERSION_MAJOR, TRNG_VERSION_MINOR));
+ break; /* unreachable */
+
+ case ARM_TRNG_FEATURES:
+ if (is_trng_fid((uint32_t)x1)) {
+ SMC_RET1(handle, TRNG_E_SUCCESS);
+ } else {
+ SMC_RET1(handle, TRNG_E_NOT_SUPPORTED);
+ }
+ break; /* unreachable */
+
+ case ARM_TRNG_GET_UUID:
+ SMC_UUID_RET(handle, plat_trng_uuid);
+ break; /* unreachable */
+
+ case ARM_TRNG_RND32:
+ return trng_rnd32((uint32_t)x1, handle);
+
+ case ARM_TRNG_RND64:
+ return trng_rnd64((uint32_t)x1, handle);
+
+ default:
+ WARN("Unimplemented TRNG Service Call: 0x%x\n", smc_fid);
+ SMC_RET1(handle, TRNG_E_NOT_IMPLEMENTED);
+ break; /* unreachable */
+ }
+}