summaryrefslogtreecommitdiffstats
path: root/services/std_svc/spmd
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 17:43:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 17:43:51 +0000
commitbe58c81aff4cd4c0ccf43dbd7998da4a6a08c03b (patch)
tree779c248fb61c83f65d1f0dc867f2053d76b4e03a /services/std_svc/spmd
parentInitial commit. (diff)
downloadarm-trusted-firmware-be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b.tar.xz
arm-trusted-firmware-be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b.zip
Adding upstream version 2.10.0+dfsg.upstream/2.10.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'services/std_svc/spmd')
-rw-r--r--services/std_svc/spmd/aarch64/spmd_helpers.S73
-rw-r--r--services/std_svc/spmd/spmd.mk29
-rw-r--r--services/std_svc/spmd/spmd_logical_sp.c742
-rw-r--r--services/std_svc/spmd/spmd_main.c1292
-rw-r--r--services/std_svc/spmd/spmd_pm.c168
-rw-r--r--services/std_svc/spmd/spmd_private.h115
6 files changed, 2419 insertions, 0 deletions
diff --git a/services/std_svc/spmd/aarch64/spmd_helpers.S b/services/std_svc/spmd/aarch64/spmd_helpers.S
new file mode 100644
index 0000000..d7bffca
--- /dev/null
+++ b/services/std_svc/spmd/aarch64/spmd_helpers.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "../spmd_private.h"
+
+ .global spmd_spm_core_enter
+ .global spmd_spm_core_exit
+
+ /* ---------------------------------------------------------------------
+ * This function is called with SP_EL0 as stack. Here we stash our EL3
+ * callee-saved registers on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where the address of the C
+ * runtime context is to be saved.
+ * ---------------------------------------------------------------------
+ */
+func spmd_spm_core_enter
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #SPMD_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #SPMD_C_RT_CTX_X19]
+ stp x21, x22, [sp, #SPMD_C_RT_CTX_X21]
+ stp x23, x24, [sp, #SPMD_C_RT_CTX_X23]
+ stp x25, x26, [sp, #SPMD_C_RT_CTX_X25]
+ stp x27, x28, [sp, #SPMD_C_RT_CTX_X27]
+ stp x29, x30, [sp, #SPMD_C_RT_CTX_X29]
+
+ /* ---------------------------------------------------------------------
+ * Everything is setup now. el3_exit() will use the secure context to
+ * restore to the general purpose and EL3 system registers to ERET
+ * into the secure payload.
+ * ---------------------------------------------------------------------
+ */
+ b el3_exit
+endfunc spmd_spm_core_enter
+
+ /* ---------------------------------------------------------------------
+ * This function is called with 'x0' pointing to a C runtime context.
+ * It restores the saved registers and jumps to that runtime with 'x0'
+ * as the new SP register. This destroys the C runtime context that had
+ * been built on the stack below the saved context by the caller. Later
+ * the second parameter 'x1' is passed as a return value to the caller.
+ * ---------------------------------------------------------------------
+ */
+func spmd_spm_core_exit
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(SPMD_C_RT_CTX_X19 - SPMD_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(SPMD_C_RT_CTX_X21 - SPMD_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(SPMD_C_RT_CTX_X23 - SPMD_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(SPMD_C_RT_CTX_X25 - SPMD_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(SPMD_C_RT_CTX_X27 - SPMD_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(SPMD_C_RT_CTX_X29 - SPMD_C_RT_CTX_SIZE)]
+
+ /* ---------------------------------------------------------------------
+ * This should take us back to the instruction after the call to the
+ * last spm_secure_partition_enter().* Place the second parameter to x0
+ * so that the caller will see it as a return value from the original
+ * entry call.
+ * ---------------------------------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc spmd_spm_core_exit
diff --git a/services/std_svc/spmd/spmd.mk b/services/std_svc/spmd/spmd.mk
new file mode 100644
index 0000000..e567b53
--- /dev/null
+++ b/services/std_svc/spmd/spmd.mk
@@ -0,0 +1,29 @@
+#
+# Copyright (c) 2021-2023, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+SPMD_SOURCES += $(addprefix services/std_svc/spmd/, \
+ ${ARCH}/spmd_helpers.S \
+ spmd_pm.c \
+ spmd_main.c \
+ spmd_logical_sp.c)
+
+# Specify platform specific SPMD logical partition implementation.
+SPMD_LP_SOURCES := $(wildcard $(addprefix ${PLAT_DIR}/, \
+ ${PLAT}_spmd_logical_sp*.c))
+
+ifeq (${ENABLE_SPMD_LP}, 1)
+ifneq ($(wildcard $(SPMD_LP_SOURCES)),)
+SPMD_SOURCES += $(SPMD_LP_SOURCES)
+endif
+endif
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
+
+# Enable dynamic memory mapping
+# The SPMD component maps the SPMC DTB within BL31 virtual space.
+PLAT_XLAT_TABLES_DYNAMIC := 1
+$(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC))
diff --git a/services/std_svc/spmd/spmd_logical_sp.c b/services/std_svc/spmd/spmd_logical_sp.c
new file mode 100644
index 0000000..d992187
--- /dev/null
+++ b/services/std_svc/spmd/spmd_logical_sp.c
@@ -0,0 +1,742 @@
+/*
+ * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+#include "spmd_private.h"
+
+#include <common/debug.h>
+#include <common/uuid.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <services/el3_spmd_logical_sp.h>
+#include <services/spmc_svc.h>
+#include <smccc_helpers.h>
+
+
+/*
+ * Maximum ffa_partition_info entries that can be returned by an invocation
+ * of FFA_PARTITION_INFO_GET_REGS_64 is size in bytes, of available
+ * registers/args in struct ffa_value divided by size of struct
+ * ffa_partition_info. For this ABI, arg3-arg17 in ffa_value can be used, i.e.
+ * 15 uint64_t fields. For FF-A v1.1, this value should be 5.
+ */
+#define MAX_INFO_REGS_ENTRIES_PER_CALL \
+ (uint8_t)((15 * sizeof(uint64_t)) / \
+ sizeof(struct ffa_partition_info_v1_1))
+CASSERT(MAX_INFO_REGS_ENTRIES_PER_CALL == 5, assert_too_many_info_reg_entries);
+
+#if ENABLE_SPMD_LP
+static bool is_spmd_lp_inited;
+static bool is_spmc_inited;
+
+/*
+ * Helper function to obtain the array storing the EL3
+ * SPMD Logical Partition descriptors.
+ */
+static struct spmd_lp_desc *get_spmd_el3_lp_array(void)
+{
+ return (struct spmd_lp_desc *) SPMD_LP_DESCS_START;
+}
+
+/*******************************************************************************
+ * Validate any logical partition descriptors before we initialize.
+ * Initialization of said partitions will be taken care of during SPMD boot.
+ ******************************************************************************/
+static int el3_spmd_sp_desc_validate(struct spmd_lp_desc *lp_array)
+{
+ /* Check the array bounds are valid. */
+ assert(SPMD_LP_DESCS_END > SPMD_LP_DESCS_START);
+
+ /*
+ * No support for SPMD logical partitions when SPMC is at EL3.
+ */
+ assert(!is_spmc_at_el3());
+
+ /* If no SPMD logical partitions are implemented then simply bail out. */
+ if (SPMD_LP_DESCS_COUNT == 0U) {
+ return -1;
+ }
+
+ for (uint32_t index = 0U; index < SPMD_LP_DESCS_COUNT; index++) {
+ struct spmd_lp_desc *lp_desc = &lp_array[index];
+
+ /* Validate our logical partition descriptors. */
+ if (lp_desc == NULL) {
+ ERROR("Invalid SPMD Logical SP Descriptor\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure the ID follows the convention to indicate it resides
+ * in the secure world.
+ */
+ if (!ffa_is_secure_world_id(lp_desc->sp_id)) {
+ ERROR("Invalid SPMD Logical SP ID (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure SPMD logical partition is in valid range. */
+ if (!is_spmd_lp_id(lp_desc->sp_id)) {
+ ERROR("Invalid SPMD Logical Partition ID (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure the UUID is not the NULL UUID. */
+ if (lp_desc->uuid[0] == 0 && lp_desc->uuid[1] == 0 &&
+ lp_desc->uuid[2] == 0 && lp_desc->uuid[3] == 0) {
+ ERROR("Invalid UUID for SPMD Logical SP (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure init function callback is registered. */
+ if (lp_desc->init == NULL) {
+ ERROR("Missing init function for Logical SP(0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure that SPMD LP only supports sending direct requests. */
+ if (lp_desc->properties != FFA_PARTITION_DIRECT_REQ_SEND) {
+ ERROR("Invalid SPMD logical partition properties (0x%x)\n",
+ lp_desc->properties);
+ return -EINVAL;
+ }
+
+ /* Ensure that all partition IDs are unique. */
+ for (uint32_t inner_idx = index + 1;
+ inner_idx < SPMD_LP_DESCS_COUNT; inner_idx++) {
+ if (lp_desc->sp_id == lp_array[inner_idx].sp_id) {
+ ERROR("Duplicate SPMD logical SP ID Detected (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
+static void spmd_encode_ffa_error(struct ffa_value *retval, int32_t error_code)
+{
+ retval->func = FFA_ERROR;
+ retval->arg1 = FFA_TARGET_INFO_MBZ;
+ retval->arg2 = (uint32_t)error_code;
+ retval->arg3 = FFA_TARGET_INFO_MBZ;
+ retval->arg4 = FFA_TARGET_INFO_MBZ;
+ retval->arg5 = FFA_TARGET_INFO_MBZ;
+ retval->arg6 = FFA_TARGET_INFO_MBZ;
+ retval->arg7 = FFA_TARGET_INFO_MBZ;
+}
+
+static void spmd_build_direct_message_req(spmd_spm_core_context_t *ctx,
+ uint64_t x1, uint64_t x2,
+ uint64_t x3, uint64_t x4)
+{
+ gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
+
+ write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
+ write_ctx_reg(gpregs, CTX_GPREG_X1, x1);
+ write_ctx_reg(gpregs, CTX_GPREG_X2, x2);
+ write_ctx_reg(gpregs, CTX_GPREG_X3, x3);
+ write_ctx_reg(gpregs, CTX_GPREG_X4, x4);
+ write_ctx_reg(gpregs, CTX_GPREG_X5, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X6, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X7, 0U);
+}
+
+static void spmd_encode_ctx_to_ffa_value(spmd_spm_core_context_t *ctx,
+ struct ffa_value *retval)
+{
+ gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
+
+ retval->func = read_ctx_reg(gpregs, CTX_GPREG_X0);
+ retval->arg1 = read_ctx_reg(gpregs, CTX_GPREG_X1);
+ retval->arg2 = read_ctx_reg(gpregs, CTX_GPREG_X2);
+ retval->arg3 = read_ctx_reg(gpregs, CTX_GPREG_X3);
+ retval->arg4 = read_ctx_reg(gpregs, CTX_GPREG_X4);
+ retval->arg5 = read_ctx_reg(gpregs, CTX_GPREG_X5);
+ retval->arg6 = read_ctx_reg(gpregs, CTX_GPREG_X6);
+ retval->arg7 = read_ctx_reg(gpregs, CTX_GPREG_X7);
+ retval->arg8 = read_ctx_reg(gpregs, CTX_GPREG_X8);
+ retval->arg9 = read_ctx_reg(gpregs, CTX_GPREG_X9);
+ retval->arg10 = read_ctx_reg(gpregs, CTX_GPREG_X10);
+ retval->arg11 = read_ctx_reg(gpregs, CTX_GPREG_X11);
+ retval->arg12 = read_ctx_reg(gpregs, CTX_GPREG_X12);
+ retval->arg13 = read_ctx_reg(gpregs, CTX_GPREG_X13);
+ retval->arg14 = read_ctx_reg(gpregs, CTX_GPREG_X14);
+ retval->arg15 = read_ctx_reg(gpregs, CTX_GPREG_X15);
+ retval->arg16 = read_ctx_reg(gpregs, CTX_GPREG_X16);
+ retval->arg17 = read_ctx_reg(gpregs, CTX_GPREG_X17);
+}
+
+static void spmd_logical_sp_set_dir_req_ongoing(spmd_spm_core_context_t *ctx)
+{
+ ctx->spmd_lp_sync_req_ongoing |= SPMD_LP_FFA_DIR_REQ_ONGOING;
+}
+
+static void spmd_logical_sp_reset_dir_req_ongoing(spmd_spm_core_context_t *ctx)
+{
+ ctx->spmd_lp_sync_req_ongoing &= ~SPMD_LP_FFA_DIR_REQ_ONGOING;
+}
+
+static void spmd_build_ffa_info_get_regs(spmd_spm_core_context_t *ctx,
+ const uint32_t uuid[4],
+ const uint16_t start_index,
+ const uint16_t tag)
+{
+ gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
+
+ uint64_t arg1 = (uint64_t)uuid[1] << 32 | uuid[0];
+ uint64_t arg2 = (uint64_t)uuid[3] << 32 | uuid[2];
+ uint64_t arg3 = start_index | (uint64_t)tag << 16;
+
+ write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_PARTITION_INFO_GET_REGS_SMC64);
+ write_ctx_reg(gpregs, CTX_GPREG_X1, arg1);
+ write_ctx_reg(gpregs, CTX_GPREG_X2, arg2);
+ write_ctx_reg(gpregs, CTX_GPREG_X3, arg3);
+ write_ctx_reg(gpregs, CTX_GPREG_X4, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X5, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X6, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X7, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X8, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X9, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X10, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X11, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X12, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X13, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X14, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X15, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X16, 0U);
+ write_ctx_reg(gpregs, CTX_GPREG_X17, 0U);
+}
+
+static void spmd_logical_sp_set_info_regs_ongoing(spmd_spm_core_context_t *ctx)
+{
+ ctx->spmd_lp_sync_req_ongoing |= SPMD_LP_FFA_INFO_GET_REG_ONGOING;
+}
+
+static void spmd_logical_sp_reset_info_regs_ongoing(
+ spmd_spm_core_context_t *ctx)
+{
+ ctx->spmd_lp_sync_req_ongoing &= ~SPMD_LP_FFA_INFO_GET_REG_ONGOING;
+}
+
+static void spmd_fill_lp_info_array(
+ struct ffa_partition_info_v1_1 (*partitions)[EL3_SPMD_MAX_NUM_LP],
+ uint32_t uuid[4], uint16_t *lp_count_out)
+{
+ uint16_t lp_count = 0;
+ struct spmd_lp_desc *lp_array;
+ bool uuid_is_null = is_null_uuid(uuid);
+
+ if (SPMD_LP_DESCS_COUNT == 0U) {
+ *lp_count_out = 0;
+ return;
+ }
+
+ lp_array = get_spmd_el3_lp_array();
+ for (uint16_t index = 0; index < SPMD_LP_DESCS_COUNT; ++index) {
+ struct spmd_lp_desc *lp = &lp_array[index];
+
+ if (uuid_is_null || uuid_match(uuid, lp->uuid)) {
+ uint16_t array_index = lp_count;
+
+ ++lp_count;
+
+ (*partitions)[array_index].ep_id = lp->sp_id;
+ (*partitions)[array_index].execution_ctx_count = 1;
+ (*partitions)[array_index].properties = lp->properties;
+ (*partitions)[array_index].properties |=
+ (FFA_PARTITION_INFO_GET_AARCH64_STATE <<
+ FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT);
+ if (uuid_is_null) {
+ memcpy(&((*partitions)[array_index].uuid),
+ &lp->uuid, sizeof(lp->uuid));
+ }
+ }
+ }
+
+ *lp_count_out = lp_count;
+}
+
+static inline void spmd_pack_lp_count_props(
+ uint64_t *xn, uint16_t ep_id, uint16_t vcpu_count,
+ uint32_t properties)
+{
+ *xn = (uint64_t)ep_id;
+ *xn |= (uint64_t)vcpu_count << 16;
+ *xn |= (uint64_t)properties << 32;
+}
+
+static inline void spmd_pack_lp_uuid(uint64_t *xn_1, uint64_t *xn_2,
+ uint32_t uuid[4])
+{
+ *xn_1 = (uint64_t)uuid[0];
+ *xn_1 |= (uint64_t)uuid[1] << 32;
+ *xn_2 = (uint64_t)uuid[2];
+ *xn_2 |= (uint64_t)uuid[3] << 32;
+}
+#endif
+
+/*
+ * Initialize SPMD logical partitions. This function assumes that it is called
+ * only after the SPMC has successfully initialized.
+ */
+int32_t spmd_logical_sp_init(void)
+{
+#if ENABLE_SPMD_LP
+ int32_t rc = 0;
+ struct spmd_lp_desc *spmd_lp_descs;
+
+ assert(SPMD_LP_DESCS_COUNT <= EL3_SPMD_MAX_NUM_LP);
+
+ if (is_spmd_lp_inited == true) {
+ return 0;
+ }
+
+ if (is_spmc_inited == false) {
+ return -1;
+ }
+
+ spmd_lp_descs = get_spmd_el3_lp_array();
+
+ /* Perform initial validation of the SPMD Logical Partitions. */
+ rc = el3_spmd_sp_desc_validate(spmd_lp_descs);
+ if (rc != 0) {
+ ERROR("Logical SPMD Partition validation failed!\n");
+ return rc;
+ }
+
+ VERBOSE("SPMD Logical Secure Partition init start.\n");
+ for (unsigned int i = 0U; i < SPMD_LP_DESCS_COUNT; i++) {
+ rc = spmd_lp_descs[i].init();
+ if (rc != 0) {
+ ERROR("SPMD Logical SP (0x%x) failed to initialize\n",
+ spmd_lp_descs[i].sp_id);
+ return rc;
+ }
+ VERBOSE("SPMD Logical SP (0x%x) Initialized\n",
+ spmd_lp_descs[i].sp_id);
+ }
+
+ INFO("SPMD Logical Secure Partition init completed.\n");
+ if (rc == 0) {
+ is_spmd_lp_inited = true;
+ }
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+void spmd_logical_sp_set_spmc_initialized(void)
+{
+#if ENABLE_SPMD_LP
+ is_spmc_inited = true;
+#endif
+}
+
+void spmd_logical_sp_set_spmc_failure(void)
+{
+#if ENABLE_SPMD_LP
+ is_spmc_inited = false;
+#endif
+}
+
+/*
+ * This function takes an ffa_value structure populated with partition
+ * information from an FFA_PARTITION_INFO_GET_REGS ABI call, extracts
+ * the values and writes it into a ffa_partition_info_v1_1 structure for
+ * other code to consume.
+ */
+bool ffa_partition_info_regs_get_part_info(
+ struct ffa_value *args, uint8_t idx,
+ struct ffa_partition_info_v1_1 *partition_info)
+{
+ uint64_t *arg_ptrs;
+ uint64_t info, uuid_lo, uuid_high;
+
+ /*
+ * Each partition information is encoded in 3 registers, so there can be
+ * a maximum of 5 entries.
+ */
+ if (idx >= 5 || partition_info == NULL) {
+ return false;
+ }
+
+ /*
+ * List of pointers to args in return value. arg0/func encodes ff-a
+ * function, arg1 is reserved, arg2 encodes indices. arg3 and greater
+ * values reflect partition properties.
+ */
+ arg_ptrs = (uint64_t *)args + ((idx * 3) + 3);
+ info = *arg_ptrs;
+
+ arg_ptrs++;
+ uuid_lo = *arg_ptrs;
+
+ arg_ptrs++;
+ uuid_high = *arg_ptrs;
+
+ partition_info->ep_id = (uint16_t)(info & 0xFFFFU);
+ partition_info->execution_ctx_count = (uint16_t)((info >> 16) & 0xFFFFU);
+ partition_info->properties = (uint32_t)(info >> 32);
+ partition_info->uuid[0] = (uint32_t)(uuid_lo & 0xFFFFFFFFU);
+ partition_info->uuid[1] = (uint32_t)((uuid_lo >> 32) & 0xFFFFFFFFU);
+ partition_info->uuid[2] = (uint32_t)(uuid_high & 0xFFFFFFFFU);
+ partition_info->uuid[3] = (uint32_t)((uuid_high >> 32) & 0xFFFFFFFFU);
+
+ return true;
+}
+
+/*
+ * This function is called by the SPMD in response to
+ * an FFA_PARTITION_INFO_GET_REG ABI invocation by the SPMC. Secure partitions
+ * are allowed to discover the presence of EL3 SPMD logical partitions by
+ * invoking the aforementioned ABI and this function populates the required
+ * information about EL3 SPMD logical partitions.
+ */
+uint64_t spmd_el3_populate_logical_partition_info(void *handle, uint64_t x1,
+ uint64_t x2, uint64_t x3)
+{
+#if ENABLE_SPMD_LP
+ uint32_t target_uuid[4] = { 0 };
+ uint32_t w0;
+ uint32_t w1;
+ uint32_t w2;
+ uint32_t w3;
+ uint16_t start_index;
+ uint16_t tag;
+ static struct ffa_partition_info_v1_1 partitions[EL3_SPMD_MAX_NUM_LP];
+ uint16_t lp_count = 0;
+ uint16_t max_idx = 0;
+ uint16_t curr_idx = 0;
+ uint8_t num_entries_to_ret = 0;
+ struct ffa_value ret = { 0 };
+ uint64_t *arg_ptrs = (uint64_t *)&ret + 3;
+
+ w0 = (uint32_t)(x1 & 0xFFFFFFFFU);
+ w1 = (uint32_t)(x1 >> 32);
+ w2 = (uint32_t)(x2 & 0xFFFFFFFFU);
+ w3 = (uint32_t)(x2 >> 32);
+
+ target_uuid[0] = w0;
+ target_uuid[1] = w1;
+ target_uuid[2] = w2;
+ target_uuid[3] = w3;
+
+ start_index = (uint16_t)(x3 & 0xFFFFU);
+ tag = (uint16_t)((x3 >> 16) & 0xFFFFU);
+
+ assert(handle == cm_get_context(SECURE));
+
+ if (tag != 0) {
+ VERBOSE("Tag is not 0. Cannot return partition info.\n");
+ return spmd_ffa_error_return(handle, FFA_ERROR_RETRY);
+ }
+
+ memset(&partitions, 0, sizeof(partitions));
+
+ spmd_fill_lp_info_array(&partitions, target_uuid, &lp_count);
+
+ if (lp_count == 0) {
+ VERBOSE("No SPDM EL3 logical partitions exist.\n");
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ if (start_index >= lp_count) {
+ VERBOSE("start_index = %d, lp_count = %d (start index must be"
+ " less than partition count.\n",
+ start_index, lp_count);
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ max_idx = lp_count - 1;
+ num_entries_to_ret = (max_idx - start_index) + 1;
+ num_entries_to_ret =
+ MIN(num_entries_to_ret, MAX_INFO_REGS_ENTRIES_PER_CALL);
+ curr_idx = start_index + num_entries_to_ret - 1;
+ assert(curr_idx <= max_idx);
+
+ ret.func = FFA_SUCCESS_SMC64;
+ ret.arg2 = (uint64_t)((sizeof(struct ffa_partition_info_v1_1) & 0xFFFFU) << 48);
+ ret.arg2 |= (uint64_t)(curr_idx << 16);
+ ret.arg2 |= (uint64_t)max_idx;
+
+ for (uint16_t idx = start_index; idx <= curr_idx; ++idx) {
+ spmd_pack_lp_count_props(arg_ptrs, partitions[idx].ep_id,
+ partitions[idx].execution_ctx_count,
+ partitions[idx].properties);
+ arg_ptrs++;
+ if (is_null_uuid(target_uuid)) {
+ spmd_pack_lp_uuid(arg_ptrs, (arg_ptrs + 1),
+ partitions[idx].uuid);
+ }
+ arg_ptrs += 2;
+ }
+
+ SMC_RET18(handle, ret.func, ret.arg1, ret.arg2, ret.arg3, ret.arg4,
+ ret.arg5, ret.arg6, ret.arg7, ret.arg8, ret.arg9, ret.arg10,
+ ret.arg11, ret.arg12, ret.arg13, ret.arg14, ret.arg15,
+ ret.arg16, ret.arg17);
+#else
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+#endif
+}
+
+/* This function can be used by an SPMD logical partition to invoke the
+ * FFA_PARTITION_INFO_GET_REGS ABI to the SPMC, to discover the secure
+ * partitions in the system. The function takes a UUID, start index and
+ * tag and the partition information are returned in an ffa_value structure
+ * and can be consumed by using appropriate helper functions.
+ */
+bool spmd_el3_invoke_partition_info_get(
+ const uint32_t target_uuid[4],
+ const uint16_t start_index,
+ const uint16_t tag,
+ struct ffa_value *retval)
+{
+#if ENABLE_SPMD_LP
+ uint64_t rc = UINT64_MAX;
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+
+ if (retval == NULL) {
+ return false;
+ }
+
+ memset(retval, 0, sizeof(*retval));
+
+ if (!is_spmc_inited) {
+ VERBOSE("Cannot discover partition before,"
+ " SPMC is initialized.\n");
+ spmd_encode_ffa_error(retval, FFA_ERROR_DENIED);
+ return true;
+ }
+
+ if (tag != 0) {
+ VERBOSE("Tag must be zero. other tags unsupported\n");
+ spmd_encode_ffa_error(retval,
+ FFA_ERROR_INVALID_PARAMETER);
+ return true;
+ }
+
+ /* Save the non-secure context before entering SPMC */
+ cm_el1_sysregs_context_save(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_save(NON_SECURE);
+#endif
+
+ spmd_build_ffa_info_get_regs(ctx, target_uuid, start_index, tag);
+ spmd_logical_sp_set_info_regs_ongoing(ctx);
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%lx) on CPU%u\n", __func__, rc,
+ plat_my_core_pos());
+ panic();
+ }
+
+ spmd_logical_sp_reset_info_regs_ongoing(ctx);
+ spmd_encode_ctx_to_ffa_value(ctx, retval);
+
+ assert(is_ffa_error(retval) || is_ffa_success(retval));
+
+ cm_el1_sysregs_context_restore(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_restore(NON_SECURE);
+#endif
+ cm_set_next_eret_context(NON_SECURE);
+ return true;
+#else
+ return false;
+#endif
+}
+
+/*******************************************************************************
+ * This function sends an FF-A Direct Request from a partition in EL3 to a
+ * partition that may reside under an SPMC (only lower ELs supported). The main
+ * use of this API is for SPMD logical partitions.
+ * The API is expected to be used when there are platform specific SMCs that
+ * need to be routed to a secure partition that is FF-A compliant or when
+ * there are group 0 interrupts that need to be handled first in EL3 and then
+ * forwarded to an FF-A compliant secure partition. Therefore, it is expected
+ * that the handle to the context provided belongs to the non-secure context.
+ * This also means that interrupts/SMCs that trap to EL3 during secure execution
+ * cannot use this API.
+ * x1, x2, x3 and x4 are encoded as specified in the FF-A specification.
+ * retval is used to pass the direct response values to the caller.
+ * The function returns true if retval has valid values, and false otherwise.
+ ******************************************************************************/
+bool spmd_el3_ffa_msg_direct_req(uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *handle,
+ struct ffa_value *retval)
+{
+#if ENABLE_SPMD_LP
+
+ uint64_t rc = UINT64_MAX;
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+
+ if (retval == NULL) {
+ return false;
+ }
+
+ memset(retval, 0, sizeof(*retval));
+
+ if (!is_spmd_lp_inited || !is_spmc_inited) {
+ VERBOSE("Cannot send SPMD logical partition direct message,"
+ " Partitions not initialized or SPMC not initialized.\n");
+ spmd_encode_ffa_error(retval, FFA_ERROR_DENIED);
+ return true;
+ }
+
+ /*
+ * x2 must be zero, since there is no support for framework message via
+ * an SPMD logical partition. This is sort of a useless check and it is
+ * possible to not take parameter. However, as the framework extends it
+ * may be useful to have x2 and extend this function later with
+ * functionality based on x2.
+ */
+ if (x2 != 0) {
+ VERBOSE("x2 must be zero. Cannot send framework message.\n");
+ spmd_encode_ffa_error(retval, FFA_ERROR_DENIED);
+ return true;
+ }
+
+ /*
+ * Current context must be non-secure. API is expected to be used
+ * when entry into EL3 and the SPMD logical partition is via an
+ * interrupt that occurs when execution is in normal world and
+ * SMCs from normal world. FF-A compliant SPMCs are expected to
+ * trap interrupts during secure execution in lower ELs since they
+ * are usually not re-entrant and SMCs from secure world can be
+ * handled synchronously. There is no known use case for an SPMD
+ * logical partition to send a direct message to another partition
+ * in response to a secure interrupt or SMCs from secure world.
+ */
+ if (handle != cm_get_context(NON_SECURE)) {
+ VERBOSE("Handle must be for the non-secure context.\n");
+ spmd_encode_ffa_error(retval, FFA_ERROR_DENIED);
+ return true;
+ }
+
+ if (!is_spmd_lp_id(ffa_endpoint_source(x1))) {
+ VERBOSE("Source ID must be valid SPMD logical partition"
+ " ID.\n");
+ spmd_encode_ffa_error(retval,
+ FFA_ERROR_INVALID_PARAMETER);
+ return true;
+ }
+
+ if (is_spmd_lp_id(ffa_endpoint_destination(x1))) {
+ VERBOSE("Destination ID must not be SPMD logical partition"
+ " ID.\n");
+ spmd_encode_ffa_error(retval,
+ FFA_ERROR_INVALID_PARAMETER);
+ return true;
+ }
+
+ if (!ffa_is_secure_world_id(ffa_endpoint_destination(x1))) {
+ VERBOSE("Destination ID must be secure world ID.\n");
+ spmd_encode_ffa_error(retval,
+ FFA_ERROR_INVALID_PARAMETER);
+ return true;
+ }
+
+ if (ffa_endpoint_destination(x1) == SPMD_DIRECT_MSG_ENDPOINT_ID) {
+ VERBOSE("Destination ID must not be SPMD ID.\n");
+ spmd_encode_ffa_error(retval,
+ FFA_ERROR_INVALID_PARAMETER);
+ return true;
+ }
+
+ if (ffa_endpoint_destination(x1) == spmd_spmc_id_get()) {
+ VERBOSE("Destination ID must not be SPMC ID.\n");
+ spmd_encode_ffa_error(retval,
+ FFA_ERROR_INVALID_PARAMETER);
+ return true;
+ }
+
+ /* Save the non-secure context before entering SPMC */
+ cm_el1_sysregs_context_save(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_save(NON_SECURE);
+#endif
+
+ /*
+ * Perform synchronous entry into the SPMC. Synchronous entry is
+ * required because the spec requires that a direct message request
+ * from an SPMD LP look like a function call from it's perspective.
+ */
+ spmd_build_direct_message_req(ctx, x1, x2, x3, x4);
+ spmd_logical_sp_set_dir_req_ongoing(ctx);
+
+ rc = spmd_spm_core_sync_entry(ctx);
+
+ spmd_logical_sp_reset_dir_req_ongoing(ctx);
+
+ if (rc != 0ULL) {
+ ERROR("%s failed (%lx) on CPU%u\n", __func__, rc,
+ plat_my_core_pos());
+ panic();
+ } else {
+ spmd_encode_ctx_to_ffa_value(ctx, retval);
+
+ /*
+ * Only expect error or direct response,
+ * spmd_spm_core_sync_exit should not be called on other paths.
+ * Checks are asserts since the LSP can fail gracefully if the
+ * source or destination ids are not the same. Panic'ing would
+ * not provide any benefit.
+ */
+ assert(is_ffa_error(retval) || is_ffa_direct_msg_resp(retval));
+ assert(is_ffa_error(retval) ||
+ (ffa_endpoint_destination(retval->arg1) ==
+ ffa_endpoint_source(x1)));
+ assert(is_ffa_error(retval) ||
+ (ffa_endpoint_source(retval->arg1) ==
+ ffa_endpoint_destination(x1)));
+ }
+
+ cm_el1_sysregs_context_restore(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_restore(NON_SECURE);
+#endif
+ cm_set_next_eret_context(NON_SECURE);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool is_spmd_logical_sp_info_regs_req_in_progress(
+ spmd_spm_core_context_t *ctx)
+{
+#if ENABLE_SPMD_LP
+ return ((ctx->spmd_lp_sync_req_ongoing & SPMD_LP_FFA_INFO_GET_REG_ONGOING)
+ == SPMD_LP_FFA_INFO_GET_REG_ONGOING);
+#else
+ return false;
+#endif
+}
+
+bool is_spmd_logical_sp_dir_req_in_progress(
+ spmd_spm_core_context_t *ctx)
+{
+#if ENABLE_SPMD_LP
+ return ((ctx->spmd_lp_sync_req_ongoing & SPMD_LP_FFA_DIR_REQ_ONGOING)
+ == SPMD_LP_FFA_DIR_REQ_ONGOING);
+#else
+ return false;
+#endif
+}
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
new file mode 100644
index 0000000..066571e
--- /dev/null
+++ b/services/std_svc/spmd/spmd_main.c
@@ -0,0 +1,1292 @@
+/*
+ * Copyright (c) 2020-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <arch/aarch64/arch_features.h>
+#include <bl31/bl31.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <common/tbbr/tbbr_img_def.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/fconf/fconf.h>
+#include <lib/fconf/fconf_dyn_cfg_getter.h>
+#include <lib/smccc.h>
+#include <lib/spinlock.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+#include <services/el3_spmd_logical_sp.h>
+#include <services/ffa_svc.h>
+#include <services/spmc_svc.h>
+#include <services/spmd_svc.h>
+#include <smccc_helpers.h>
+#include "spmd_private.h"
+
+/*******************************************************************************
+ * SPM Core context information.
+ ******************************************************************************/
+static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * SPM Core attribute information is read from its manifest if the SPMC is not
+ * at EL3. Else, it is populated from the SPMC directly.
+ ******************************************************************************/
+static spmc_manifest_attribute_t spmc_attrs;
+
+/*******************************************************************************
+ * SPM Core entry point information. Discovered on the primary core and reused
+ * on secondary cores.
+ ******************************************************************************/
+static entry_point_info_t *spmc_ep_info;
+
+/*******************************************************************************
+ * SPM Core context on CPU based on mpidr.
+ ******************************************************************************/
+spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
+{
+ int core_idx = plat_core_pos_by_mpidr(mpidr);
+
+ if (core_idx < 0) {
+ ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx);
+ panic();
+ }
+
+ return &spm_core_context[core_idx];
+}
+
+/*******************************************************************************
+ * SPM Core context on current CPU get helper.
+ ******************************************************************************/
+spmd_spm_core_context_t *spmd_get_context(void)
+{
+ return spmd_get_context_by_mpidr(read_mpidr());
+}
+
+/*******************************************************************************
+ * SPM Core ID getter.
+ ******************************************************************************/
+uint16_t spmd_spmc_id_get(void)
+{
+ return spmc_attrs.spmc_id;
+}
+
+/*******************************************************************************
+ * Static function declaration.
+ ******************************************************************************/
+static int32_t spmd_init(void);
+static int spmd_spmc_init(void *pm_addr);
+
+static uint64_t spmd_smc_forward(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+/******************************************************************************
+ * Builds an SPMD to SPMC direct message request.
+ *****************************************************************************/
+void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
+ unsigned long long message)
+{
+ write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
+ write_ctx_reg(gpregs, CTX_GPREG_X1,
+ (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+ spmd_spmc_id_get());
+ write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
+ write_ctx_reg(gpregs, CTX_GPREG_X3, message);
+
+ /* Zero out x4-x7 for the direct request emitted towards the SPMC. */
+ write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
+}
+
+
+/*******************************************************************************
+ * This function takes an SPMC context pointer and performs a synchronous
+ * SPMC entry.
+ ******************************************************************************/
+uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
+{
+ uint64_t rc;
+
+ assert(spmc_ctx != NULL);
+
+ cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
+
+ /* Restore the context assigned above */
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_restore(SECURE);
+#else
+ cm_el1_sysregs_context_restore(SECURE);
+#endif
+ cm_set_next_eret_context(SECURE);
+
+ /* Enter SPMC */
+ rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
+
+ /* Save secure state */
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_save(SECURE);
+#else
+ cm_el1_sysregs_context_save(SECURE);
+#endif
+
+ return rc;
+}
+
+/*******************************************************************************
+ * This function returns to the place where spmd_spm_core_sync_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+
+ /* Get current CPU context from SPMC context */
+ assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
+
+ /*
+ * The SPMD must have initiated the original request through a
+ * synchronous entry into SPMC. Jump back to the original C runtime
+ * context with the value of rc in x0;
+ */
+ spmd_spm_core_exit(ctx->c_rt_ctx, rc);
+
+ panic();
+}
+
+/*******************************************************************************
+ * Jump to the SPM Core for the first time.
+ ******************************************************************************/
+static int32_t spmd_init(void)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ uint64_t rc;
+
+ VERBOSE("SPM Core init start.\n");
+
+ /* Primary boot core enters the SPMC for initialization. */
+ ctx->state = SPMC_STATE_ON_PENDING;
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
+ return 0;
+ }
+
+ ctx->state = SPMC_STATE_ON;
+
+ VERBOSE("SPM Core init end.\n");
+
+ spmd_logical_sp_set_spmc_initialized();
+ rc = spmd_logical_sp_init();
+ if (rc != 0) {
+ WARN("SPMD Logical partitions failed init.\n");
+ }
+
+ return 1;
+}
+
+/*******************************************************************************
+ * spmd_secure_interrupt_handler
+ * Enter the SPMC for further handling of the secure interrupt by the SPMC
+ * itself or a Secure Partition.
+ ******************************************************************************/
+static uint64_t spmd_secure_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
+ unsigned int linear_id = plat_my_core_pos();
+ int64_t rc;
+
+ /* Sanity check the security state when the exception was generated */
+ assert(get_interrupt_src_ss(flags) == NON_SECURE);
+
+ /* Sanity check the pointer to this cpu's context */
+ assert(handle == cm_get_context(NON_SECURE));
+
+ /* Save the non-secure context before entering SPMC */
+ cm_el1_sysregs_context_save(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_save(NON_SECURE);
+#endif
+
+ /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
+ write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
+ write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
+
+ /* Mark current core as handling a secure interrupt. */
+ ctx->secure_interrupt_ongoing = true;
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id);
+ }
+
+ ctx->secure_interrupt_ongoing = false;
+
+ cm_el1_sysregs_context_restore(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_restore(NON_SECURE);
+#endif
+ cm_set_next_eret_context(NON_SECURE);
+
+ SMC_RET0(&ctx->cpu_ctx);
+}
+
+#if (EL3_EXCEPTION_HANDLING == 0)
+/*******************************************************************************
+ * spmd_group0_interrupt_handler_nwd
+ * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the
+ * handling of the interrupt to the platform handler, and return only upon
+ * successfully handling the Group0 interrupt.
+ ******************************************************************************/
+static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ uint32_t intid;
+
+ /* Sanity check the security state when the exception was generated. */
+ assert(get_interrupt_src_ss(flags) == NON_SECURE);
+
+ /* Sanity check the pointer to this cpu's context. */
+ assert(handle == cm_get_context(NON_SECURE));
+
+ assert(id == INTR_ID_UNAVAILABLE);
+
+ assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
+
+ intid = plat_ic_acknowledge_interrupt();
+
+ if (plat_spmd_handle_group0_interrupt(intid) < 0) {
+ ERROR("Group0 interrupt %u not handled\n", intid);
+ panic();
+ }
+
+ /* Deactivate the corresponding Group0 interrupt. */
+ plat_ic_end_of_interrupt(intid);
+
+ return 0U;
+}
+#endif
+
+/*******************************************************************************
+ * spmd_handle_group0_intr_swd
+ * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using
+ * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the
+ * interrupt to the platform handler, and returns only upon successfully
+ * handling the Group0 interrupt.
+ ******************************************************************************/
+static uint64_t spmd_handle_group0_intr_swd(void *handle)
+{
+ uint32_t intid;
+
+ /* Sanity check the pointer to this cpu's context */
+ assert(handle == cm_get_context(SECURE));
+
+ assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
+
+ intid = plat_ic_acknowledge_interrupt();
+
+ /*
+ * TODO: Currently due to a limitation in SPMD implementation, the
+ * platform handler is expected to not delegate handling to NWd while
+ * processing Group0 secure interrupt.
+ */
+ if (plat_spmd_handle_group0_interrupt(intid) < 0) {
+ /* Group0 interrupt was not handled by the platform. */
+ ERROR("Group0 interrupt %u not handled\n", intid);
+ panic();
+ }
+
+ /* Deactivate the corresponding Group0 interrupt. */
+ plat_ic_end_of_interrupt(intid);
+
+ /* Return success. */
+ SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+}
+
+#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
+static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size,
+ unsigned int attr, uintptr_t *align_addr,
+ size_t *align_size)
+{
+ uintptr_t base_addr_align;
+ size_t mapped_size_align;
+ int rc;
+
+ /* Page aligned address and size if necessary */
+ base_addr_align = page_align(base_addr, DOWN);
+ mapped_size_align = page_align(size, UP);
+
+ if ((base_addr != base_addr_align) &&
+ (size == mapped_size_align)) {
+ mapped_size_align += PAGE_SIZE;
+ }
+
+ /*
+ * Map dynamically given region with its aligned base address and
+ * size
+ */
+ rc = mmap_add_dynamic_region((unsigned long long)base_addr_align,
+ base_addr_align,
+ mapped_size_align,
+ attr);
+ if (rc == 0) {
+ *align_addr = base_addr_align;
+ *align_size = mapped_size_align;
+ }
+
+ return rc;
+}
+
+static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr,
+ size_t size)
+{
+ uintptr_t root_base_addr_align, sec_base_addr_align;
+ size_t root_mapped_size_align, sec_mapped_size_align;
+ int rc;
+
+ assert(root_base_addr != 0UL);
+ assert(sec_base_addr != 0UL);
+ assert(size != 0UL);
+
+ /* Map the memory with required attributes */
+ rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT,
+ &root_base_addr_align,
+ &root_mapped_size_align);
+ if (rc != 0) {
+ ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region",
+ root_base_addr, rc);
+ panic();
+ }
+
+ rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE,
+ &sec_base_addr_align, &sec_mapped_size_align);
+ if (rc != 0) {
+ ERROR("%s %s %lu (%d)\n", "Error while mapping",
+ "secure region", sec_base_addr, rc);
+ panic();
+ }
+
+ /* Do copy operation */
+ (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size);
+
+ /* Unmap root memory region */
+ rc = mmap_remove_dynamic_region(root_base_addr_align,
+ root_mapped_size_align);
+ if (rc != 0) {
+ ERROR("%s %s %lu (%d)\n", "Error while unmapping",
+ "root region", root_base_addr_align, rc);
+ panic();
+ }
+
+ /* Unmap secure memory region */
+ rc = mmap_remove_dynamic_region(sec_base_addr_align,
+ sec_mapped_size_align);
+ if (rc != 0) {
+ ERROR("%s %s %lu (%d)\n", "Error while unmapping",
+ "secure region", sec_base_addr_align, rc);
+ panic();
+ }
+}
+#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
+
+/*******************************************************************************
+ * Loads SPMC manifest and inits SPMC.
+ ******************************************************************************/
+static int spmd_spmc_init(void *pm_addr)
+{
+ cpu_context_t *cpu_ctx;
+ unsigned int core_id;
+ uint32_t ep_attr, flags;
+ int rc;
+ const struct dyn_cfg_dtb_info_t *image_info __unused;
+
+ /* Load the SPM Core manifest */
+ rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
+ if (rc != 0) {
+ WARN("No or invalid SPM Core manifest image provided by BL2\n");
+ return rc;
+ }
+
+ /*
+ * Ensure that the SPM Core version is compatible with the SPM
+ * Dispatcher version.
+ */
+ if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
+ (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
+ WARN("Unsupported FFA version (%u.%u)\n",
+ spmc_attrs.major_version, spmc_attrs.minor_version);
+ return -EINVAL;
+ }
+
+ VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
+ spmc_attrs.minor_version);
+
+ VERBOSE("SPM Core run time EL%x.\n",
+ SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
+
+ /* Validate the SPMC ID, Ensure high bit is set */
+ if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
+ SPMC_SECURE_ID_MASK) == 0U) {
+ WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
+ return -EINVAL;
+ }
+
+ /* Validate the SPM Core execution state */
+ if ((spmc_attrs.exec_state != MODE_RW_64) &&
+ (spmc_attrs.exec_state != MODE_RW_32)) {
+ WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
+ spmc_attrs.exec_state);
+ return -EINVAL;
+ }
+
+ VERBOSE("%s%x.\n", "SPM Core execution state 0x",
+ spmc_attrs.exec_state);
+
+#if SPMD_SPM_AT_SEL2
+ /* Ensure manifest has not requested AArch32 state in S-EL2 */
+ if (spmc_attrs.exec_state == MODE_RW_32) {
+ WARN("AArch32 state at S-EL2 is not supported.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Check if S-EL2 is supported on this system if S-EL2
+ * is required for SPM
+ */
+ if (!is_feat_sel2_supported()) {
+ WARN("SPM Core run time S-EL2 is not supported.\n");
+ return -EINVAL;
+ }
+#endif /* SPMD_SPM_AT_SEL2 */
+
+ /* Initialise an entrypoint to set up the CPU context */
+ ep_attr = SECURE | EP_ST_ENABLE;
+ if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
+ ep_attr |= EP_EE_BIG;
+ }
+
+ SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
+
+ /*
+ * Populate SPSR for SPM Core based upon validated parameters from the
+ * manifest.
+ */
+ if (spmc_attrs.exec_state == MODE_RW_32) {
+ spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+ SPSR_E_LITTLE,
+ DAIF_FIQ_BIT |
+ DAIF_IRQ_BIT |
+ DAIF_ABT_BIT);
+ } else {
+
+#if SPMD_SPM_AT_SEL2
+ static const uint32_t runtime_el = MODE_EL2;
+#else
+ static const uint32_t runtime_el = MODE_EL1;
+#endif
+ spmc_ep_info->spsr = SPSR_64(runtime_el,
+ MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+ }
+
+#if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
+ image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID);
+ assert(image_info != NULL);
+
+ if ((image_info->config_addr == 0UL) ||
+ (image_info->secondary_config_addr == 0UL) ||
+ (image_info->config_max_size == 0UL)) {
+ return -EINVAL;
+ }
+
+ /* Copy manifest from root->secure region */
+ spmd_do_sec_cpy(image_info->config_addr,
+ image_info->secondary_config_addr,
+ image_info->config_max_size);
+
+ /* Update ep info of BL32 */
+ assert(spmc_ep_info != NULL);
+ spmc_ep_info->args.arg0 = image_info->secondary_config_addr;
+#endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
+
+ /* Set an initial SPMC context state for all cores. */
+ for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
+ spm_core_context[core_id].state = SPMC_STATE_OFF;
+
+ /* Setup an initial cpu context for the SPMC. */
+ cpu_ctx = &spm_core_context[core_id].cpu_ctx;
+ cm_setup_context(cpu_ctx, spmc_ep_info);
+
+ /*
+ * Pass the core linear ID to the SPMC through x4.
+ * (TF-A implementation defined behavior helping
+ * a legacy TOS migration to adopt FF-A).
+ */
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
+ }
+
+ /* Register power management hooks with PSCI */
+ psci_register_spd_pm_hook(&spmd_pm);
+
+ /* Register init function for deferred init. */
+ bl31_register_bl32_init(&spmd_init);
+
+ INFO("SPM Core setup done.\n");
+
+ /*
+ * Register an interrupt handler routing secure interrupts to SPMD
+ * while the NWd is running.
+ */
+ flags = 0;
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+ spmd_secure_interrupt_handler,
+ flags);
+ if (rc != 0) {
+ panic();
+ }
+
+ /*
+ * Permit configurations where the SPM resides at S-EL1/2 and upon a
+ * Group0 interrupt triggering while the normal world runs, the
+ * interrupt is routed either through the EHF or directly to the SPMD:
+ *
+ * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD
+ * for handling by spmd_group0_interrupt_handler_nwd.
+ *
+ * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF.
+ *
+ */
+#if (EL3_EXCEPTION_HANDLING == 0)
+ /*
+ * Register an interrupt handler routing Group0 interrupts to SPMD
+ * while the NWd is running.
+ */
+ rc = register_interrupt_type_handler(INTR_TYPE_EL3,
+ spmd_group0_interrupt_handler_nwd,
+ flags);
+ if (rc != 0) {
+ panic();
+ }
+#endif
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Initialize context of SPM Core.
+ ******************************************************************************/
+int spmd_setup(void)
+{
+ int rc;
+ void *spmc_manifest;
+
+ /*
+ * If the SPMC is at EL3, then just initialise it directly. The
+ * shenanigans of when it is at a lower EL are not needed.
+ */
+ if (is_spmc_at_el3()) {
+ /* Allow the SPMC to populate its attributes directly. */
+ spmc_populate_attrs(&spmc_attrs);
+
+ rc = spmc_setup();
+ if (rc != 0) {
+ WARN("SPMC initialisation failed 0x%x.\n", rc);
+ }
+ return 0;
+ }
+
+ spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (spmc_ep_info == NULL) {
+ WARN("No SPM Core image provided by BL2 boot loader.\n");
+ return 0;
+ }
+
+ /* Under no circumstances will this parameter be 0 */
+ assert(spmc_ep_info->pc != 0ULL);
+
+ /*
+ * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
+ * be used as a manifest for the SPM Core at the next lower EL/mode.
+ */
+ spmc_manifest = (void *)spmc_ep_info->args.arg0;
+ if (spmc_manifest == NULL) {
+ WARN("Invalid or absent SPM Core manifest.\n");
+ return 0;
+ }
+
+ /* Load manifest, init SPMC */
+ rc = spmd_spmc_init(spmc_manifest);
+ if (rc != 0) {
+ WARN("Booting device without SPM initialization.\n");
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Forward FF-A SMCs to the other security state.
+ ******************************************************************************/
+uint64_t spmd_smc_switch_state(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *handle)
+{
+ unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
+ unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
+
+ /* Save incoming security state */
+#if SPMD_SPM_AT_SEL2
+ if (secure_state_in == NON_SECURE) {
+ cm_el1_sysregs_context_save(secure_state_in);
+ }
+ cm_el2_sysregs_context_save(secure_state_in);
+#else
+ cm_el1_sysregs_context_save(secure_state_in);
+#endif
+
+ /* Restore outgoing security state */
+#if SPMD_SPM_AT_SEL2
+ if (secure_state_out == NON_SECURE) {
+ cm_el1_sysregs_context_restore(secure_state_out);
+ }
+ cm_el2_sysregs_context_restore(secure_state_out);
+#else
+ cm_el1_sysregs_context_restore(secure_state_out);
+#endif
+ cm_set_next_eret_context(secure_state_out);
+
+#if SPMD_SPM_AT_SEL2
+ /*
+ * If SPMC is at SEL2, save additional registers x8-x17, which may
+ * be used in FF-A calls such as FFA_PARTITION_INFO_GET_REGS.
+ * Note that technically, all SPMCs can support this, but this code is
+ * under ifdef to minimize breakage in case other SPMCs do not save
+ * and restore x8-x17.
+ * We also need to pass through these registers since not all FF-A ABIs
+ * modify x8-x17, in which case, SMCCC requires that these registers be
+ * preserved, so the SPMD passes through these registers and expects the
+ * SPMC to save and restore (potentially also modify) them.
+ */
+ SMC_RET18(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7),
+ SMC_GET_GP(handle, CTX_GPREG_X8),
+ SMC_GET_GP(handle, CTX_GPREG_X9),
+ SMC_GET_GP(handle, CTX_GPREG_X10),
+ SMC_GET_GP(handle, CTX_GPREG_X11),
+ SMC_GET_GP(handle, CTX_GPREG_X12),
+ SMC_GET_GP(handle, CTX_GPREG_X13),
+ SMC_GET_GP(handle, CTX_GPREG_X14),
+ SMC_GET_GP(handle, CTX_GPREG_X15),
+ SMC_GET_GP(handle, CTX_GPREG_X16),
+ SMC_GET_GP(handle, CTX_GPREG_X17)
+ );
+
+#else
+ SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+#endif
+}
+
+/*******************************************************************************
+ * Forward SMCs to the other security state.
+ ******************************************************************************/
+static uint64_t spmd_smc_forward(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ if (is_spmc_at_el3() && !secure_origin) {
+ return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+ }
+ return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
+ handle);
+
+}
+
+/*******************************************************************************
+ * Return FFA_ERROR with specified error code
+ ******************************************************************************/
+uint64_t spmd_ffa_error_return(void *handle, int error_code)
+{
+ SMC_RET8(handle, (uint32_t) FFA_ERROR,
+ FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+}
+
+/*******************************************************************************
+ * spmd_check_address_in_binary_image
+ ******************************************************************************/
+bool spmd_check_address_in_binary_image(uint64_t address)
+{
+ assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
+
+ return ((address >= spmc_attrs.load_address) &&
+ (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
+}
+
+/******************************************************************************
+ * spmd_is_spmc_message
+ *****************************************************************************/
+static bool spmd_is_spmc_message(unsigned int ep)
+{
+ if (is_spmc_at_el3()) {
+ return false;
+ }
+
+ return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
+ && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
+}
+
+/******************************************************************************
+ * spmd_handle_spmc_message
+ *****************************************************************************/
+static int spmd_handle_spmc_message(unsigned long long msg,
+ unsigned long long parm1, unsigned long long parm2,
+ unsigned long long parm3, unsigned long long parm4)
+{
+ VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
+ msg, parm1, parm2, parm3, parm4);
+
+ return -EINVAL;
+}
+
+/*******************************************************************************
+ * This function forwards FF-A SMCs to either the main SPMD handler or the
+ * SPMC at EL3, depending on the origin security state, if enabled.
+ ******************************************************************************/
+uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ if (is_spmc_at_el3()) {
+ /*
+ * If we have an SPMC at EL3 allow handling of the SMC first.
+ * The SPMC will call back through to SPMD handler if required.
+ */
+ if (is_caller_secure(flags)) {
+ return spmc_smc_handler(smc_fid,
+ is_caller_secure(flags),
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+ }
+ return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+ handle, flags);
+}
+
+/*******************************************************************************
+ * This function handles all SMCs in the range reserved for FFA. Each call is
+ * either forwarded to the other security state or handled by the SPM dispatcher
+ ******************************************************************************/
+uint64_t spmd_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ unsigned int linear_id = plat_my_core_pos();
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ bool secure_origin;
+ int32_t ret;
+ uint32_t input_version;
+
+ /* Determine which security state this SMC originated from */
+ secure_origin = is_caller_secure(flags);
+
+ VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
+ " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
+ linear_id, smc_fid, x1, x2, x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+
+ /*
+ * If there is an on-going info regs from EL3 SPMD LP, unconditionally
+ * return, we don't expect any other FF-A ABIs to be called between
+ * calls to FFA_PARTITION_INFO_GET_REGS.
+ */
+ if (is_spmd_logical_sp_info_regs_req_in_progress(ctx)) {
+ assert(secure_origin);
+ spmd_spm_core_sync_exit(0ULL);
+ }
+
+ switch (smc_fid) {
+ case FFA_ERROR:
+ /*
+ * Check if this is the first invocation of this interface on
+ * this CPU. If so, then indicate that the SPM Core initialised
+ * unsuccessfully.
+ */
+ if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
+ spmd_spm_core_sync_exit(x2);
+ }
+
+ /*
+ * If there was an SPMD logical partition direct request on-going,
+ * return back to the SPMD logical partition so the error can be
+ * consumed.
+ */
+ if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
+ assert(secure_origin);
+ spmd_spm_core_sync_exit(0ULL);
+ }
+
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ break; /* not reached */
+
+ case FFA_VERSION:
+ input_version = (uint32_t)(0xFFFFFFFF & x1);
+ /*
+ * If caller is secure and SPMC was initialized,
+ * return FFA_VERSION of SPMD.
+ * If caller is non secure and SPMC was initialized,
+ * forward to the EL3 SPMC if enabled, otherwise return
+ * the SPMC version if implemented at a lower EL.
+ * Sanity check to "input_version".
+ * If the EL3 SPMC is enabled, ignore the SPMC state as
+ * this is not used.
+ */
+ if ((input_version & FFA_VERSION_BIT31_MASK) ||
+ (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
+ ret = FFA_ERROR_NOT_SUPPORTED;
+ } else if (!secure_origin) {
+ if (is_spmc_at_el3()) {
+ /*
+ * Forward the call directly to the EL3 SPMC, if
+ * enabled, as we don't need to wrap the call in
+ * a direct request.
+ */
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+
+ gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
+ uint64_t rc;
+
+ if (spmc_attrs.major_version == 1 &&
+ spmc_attrs.minor_version == 0) {
+ ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
+ spmc_attrs.minor_version);
+ SMC_RET8(handle, (uint32_t)ret,
+ FFA_TARGET_INFO_MBZ,
+ FFA_TARGET_INFO_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ break;
+ }
+ /* Save non-secure system registers context */
+ cm_el1_sysregs_context_save(NON_SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_save(NON_SECURE);
+#endif
+
+ /*
+ * The incoming request has FFA_VERSION as X0 smc_fid
+ * and requested version in x1. Prepare a direct request
+ * from SPMD to SPMC with FFA_VERSION framework function
+ * identifier in X2 and requested version in X3.
+ */
+ spmd_build_spmc_message(gpregs,
+ SPMD_FWK_MSG_FFA_VERSION_REQ,
+ input_version);
+
+ /*
+ * Ensure x8-x17 NS GP register values are untouched when returning
+ * from the SPMC.
+ */
+ write_ctx_reg(gpregs, CTX_GPREG_X8, SMC_GET_GP(handle, CTX_GPREG_X8));
+ write_ctx_reg(gpregs, CTX_GPREG_X9, SMC_GET_GP(handle, CTX_GPREG_X9));
+ write_ctx_reg(gpregs, CTX_GPREG_X10, SMC_GET_GP(handle, CTX_GPREG_X10));
+ write_ctx_reg(gpregs, CTX_GPREG_X11, SMC_GET_GP(handle, CTX_GPREG_X11));
+ write_ctx_reg(gpregs, CTX_GPREG_X12, SMC_GET_GP(handle, CTX_GPREG_X12));
+ write_ctx_reg(gpregs, CTX_GPREG_X13, SMC_GET_GP(handle, CTX_GPREG_X13));
+ write_ctx_reg(gpregs, CTX_GPREG_X14, SMC_GET_GP(handle, CTX_GPREG_X14));
+ write_ctx_reg(gpregs, CTX_GPREG_X15, SMC_GET_GP(handle, CTX_GPREG_X15));
+ write_ctx_reg(gpregs, CTX_GPREG_X16, SMC_GET_GP(handle, CTX_GPREG_X16));
+ write_ctx_reg(gpregs, CTX_GPREG_X17, SMC_GET_GP(handle, CTX_GPREG_X17));
+
+ rc = spmd_spm_core_sync_entry(ctx);
+
+ if ((rc != 0ULL) ||
+ (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
+ FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
+ (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
+ (FFA_FWK_MSG_BIT |
+ SPMD_FWK_MSG_FFA_VERSION_RESP))) {
+ ERROR("Failed to forward FFA_VERSION\n");
+ ret = FFA_ERROR_NOT_SUPPORTED;
+ } else {
+ ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
+ }
+
+ /*
+ * x0-x4 are updated by spmd_smc_forward below.
+ * Zero out x5-x7 in the FFA_VERSION response.
+ */
+ write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
+
+ /*
+ * Return here after SPMC has handled FFA_VERSION.
+ * The returned SPMC version is held in X3.
+ * Forward this version in X0 to the non-secure caller.
+ */
+ return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, cookie, gpregs,
+ flags);
+ } else {
+ ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
+ FFA_VERSION_MINOR);
+ }
+
+ SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
+ FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+ break; /* not reached */
+
+ case FFA_FEATURES:
+ /*
+ * This is an optional interface. Do the minimal checks and
+ * forward to SPM Core which will handle it if implemented.
+ */
+
+ /* Forward SMC from Normal world to the SPM Core */
+ if (!secure_origin) {
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+
+ /*
+ * Return success if call was from secure world i.e. all
+ * FFA functions are supported. This is essentially a
+ * nop.
+ */
+ SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+
+ break; /* not reached */
+
+ case FFA_ID_GET:
+ /*
+ * Returns the ID of the calling FFA component.
+ */
+ if (!secure_origin) {
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ }
+
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+
+ break; /* not reached */
+
+ case FFA_SECONDARY_EP_REGISTER_SMC64:
+ if (secure_origin) {
+ ret = spmd_pm_secondary_ep_register(x1);
+
+ if (ret < 0) {
+ SMC_RET8(handle, FFA_ERROR_SMC64,
+ FFA_TARGET_INFO_MBZ, ret,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ } else {
+ SMC_RET8(handle, FFA_SUCCESS_SMC64,
+ FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ }
+ }
+
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ break; /* Not reached */
+
+ case FFA_SPM_ID_GET:
+ if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+ /*
+ * Returns the ID of the SPMC or SPMD depending on the FF-A
+ * instance where this function is invoked
+ */
+ if (!secure_origin) {
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ }
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+
+ break; /* not reached */
+
+ case FFA_MSG_SEND_DIRECT_REQ_SMC32:
+ case FFA_MSG_SEND_DIRECT_REQ_SMC64:
+ /*
+ * Regardless of secure_origin, SPMD logical partitions cannot
+ * handle direct messages. They can only initiate direct
+ * messages and consume direct responses or errors.
+ */
+ if (is_spmd_lp_id(ffa_endpoint_source(x1)) ||
+ is_spmd_lp_id(ffa_endpoint_destination(x1))) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER
+ );
+ }
+
+ /*
+ * When there is an ongoing SPMD logical partition direct
+ * request, there cannot be another direct request. Return
+ * error in this case. Panic'ing is an option but that does
+ * not provide the opportunity for caller to abort based on
+ * error codes.
+ */
+ if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
+ assert(secure_origin);
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_DENIED);
+ }
+
+ if (!secure_origin) {
+ /* Validate source endpoint is non-secure for non-secure caller. */
+ if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+ }
+ if (secure_origin && spmd_is_spmc_message(x1)) {
+ ret = spmd_handle_spmc_message(x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, ret,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ } else {
+ /* Forward direct message to the other world */
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+ break; /* Not reached */
+
+ case FFA_MSG_SEND_DIRECT_RESP_SMC32:
+ case FFA_MSG_SEND_DIRECT_RESP_SMC64:
+ if (secure_origin && (spmd_is_spmc_message(x1) ||
+ is_spmd_logical_sp_dir_req_in_progress(ctx))) {
+ spmd_spm_core_sync_exit(0ULL);
+ } else {
+ /* Forward direct message to the other world */
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+ break; /* Not reached */
+
+ case FFA_RX_RELEASE:
+ case FFA_RXTX_MAP_SMC32:
+ case FFA_RXTX_MAP_SMC64:
+ case FFA_RXTX_UNMAP:
+ case FFA_PARTITION_INFO_GET:
+#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
+ case FFA_NOTIFICATION_BITMAP_CREATE:
+ case FFA_NOTIFICATION_BITMAP_DESTROY:
+ case FFA_NOTIFICATION_BIND:
+ case FFA_NOTIFICATION_UNBIND:
+ case FFA_NOTIFICATION_SET:
+ case FFA_NOTIFICATION_GET:
+ case FFA_NOTIFICATION_INFO_GET:
+ case FFA_NOTIFICATION_INFO_GET_SMC64:
+ case FFA_MSG_SEND2:
+ case FFA_RX_ACQUIRE:
+#endif
+ case FFA_MSG_RUN:
+ /*
+ * Above calls should be invoked only by the Normal world and
+ * must not be forwarded from Secure world to Normal world.
+ */
+ if (secure_origin) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Forward the call to the other world */
+ /* fallthrough */
+ case FFA_MSG_SEND:
+ case FFA_MEM_DONATE_SMC32:
+ case FFA_MEM_DONATE_SMC64:
+ case FFA_MEM_LEND_SMC32:
+ case FFA_MEM_LEND_SMC64:
+ case FFA_MEM_SHARE_SMC32:
+ case FFA_MEM_SHARE_SMC64:
+ case FFA_MEM_RETRIEVE_REQ_SMC32:
+ case FFA_MEM_RETRIEVE_REQ_SMC64:
+ case FFA_MEM_RETRIEVE_RESP:
+ case FFA_MEM_RELINQUISH:
+ case FFA_MEM_RECLAIM:
+ case FFA_MEM_FRAG_TX:
+ case FFA_MEM_FRAG_RX:
+ case FFA_SUCCESS_SMC32:
+ case FFA_SUCCESS_SMC64:
+ /*
+ * If there is an ongoing direct request from an SPMD logical
+ * partition, return an error.
+ */
+ if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
+ assert(secure_origin);
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_DENIED);
+ }
+
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ break; /* not reached */
+
+ case FFA_MSG_WAIT:
+ /*
+ * Check if this is the first invocation of this interface on
+ * this CPU from the Secure world. If so, then indicate that the
+ * SPM Core initialised successfully.
+ */
+ if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
+ spmd_spm_core_sync_exit(0ULL);
+ }
+
+ /* Forward the call to the other world */
+ /* fallthrough */
+ case FFA_INTERRUPT:
+ case FFA_MSG_YIELD:
+ /* This interface must be invoked only by the Secure world */
+ if (!secure_origin) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
+ assert(secure_origin);
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_DENIED);
+ }
+
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ break; /* not reached */
+
+ case FFA_NORMAL_WORLD_RESUME:
+ if (secure_origin && ctx->secure_interrupt_ongoing) {
+ spmd_spm_core_sync_exit(0ULL);
+ } else {
+ return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+ break; /* Not reached */
+#if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
+ case FFA_PARTITION_INFO_GET_REGS_SMC64:
+ if (secure_origin) {
+ return spmd_el3_populate_logical_partition_info(handle, x1,
+ x2, x3);
+ }
+
+ /* Call only supported with SMCCC 1.2+ */
+ if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) {
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, cookie,
+ handle, flags);
+ break; /* Not reached */
+#endif
+ case FFA_EL3_INTR_HANDLE:
+ if (secure_origin) {
+ return spmd_handle_group0_intr_swd(handle);
+ } else {
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+ default:
+ WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+}
diff --git a/services/std_svc/spmd/spmd_pm.c b/services/std_svc/spmd/spmd_pm.c
new file mode 100644
index 0000000..fd89c81
--- /dev/null
+++ b/services/std_svc/spmd/spmd_pm.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdint.h>
+
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/spinlock.h>
+#include "spmd_private.h"
+
+static struct {
+ bool secondary_ep_locked;
+ uintptr_t secondary_ep;
+ spinlock_t lock;
+} g_spmd_pm;
+
+/*******************************************************************************
+ * spmd_pm_secondary_ep_register
+ ******************************************************************************/
+int spmd_pm_secondary_ep_register(uintptr_t entry_point)
+{
+ int ret = FFA_ERROR_INVALID_PARAMETER;
+
+ spin_lock(&g_spmd_pm.lock);
+
+ if (g_spmd_pm.secondary_ep_locked == true) {
+ goto out;
+ }
+
+ /*
+ * Check entry_point address is a PA within
+ * load_address <= entry_point < load_address + binary_size
+ */
+ if (!spmd_check_address_in_binary_image(entry_point)) {
+ ERROR("%s entry point is not within image boundaries\n",
+ __func__);
+ goto out;
+ }
+
+ g_spmd_pm.secondary_ep = entry_point;
+ g_spmd_pm.secondary_ep_locked = true;
+
+ VERBOSE("%s %lx\n", __func__, entry_point);
+
+ ret = 0;
+
+out:
+ spin_unlock(&g_spmd_pm.lock);
+
+ return ret;
+}
+
+/*******************************************************************************
+ * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part
+ * of the SPMC initialization path, they will initialize any SPs that they
+ * manage. Entry into SPMC is done after initialising minimal architectural
+ * state that guarantees safe execution.
+ ******************************************************************************/
+static void spmd_cpu_on_finish_handler(u_register_t unused)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ unsigned int linear_id = plat_my_core_pos();
+ el3_state_t *el3_state;
+ uintptr_t entry_point;
+ uint64_t rc;
+
+ assert(ctx != NULL);
+ assert(ctx->state != SPMC_STATE_ON);
+
+ spin_lock(&g_spmd_pm.lock);
+
+ /*
+ * Leave the possibility that the SPMC does not call
+ * FFA_SECONDARY_EP_REGISTER in which case re-use the
+ * primary core address for booting secondary cores.
+ */
+ if (g_spmd_pm.secondary_ep_locked == true) {
+ /*
+ * The CPU context has already been initialized at boot time
+ * (in spmd_spmc_init by a call to cm_setup_context). Adjust
+ * below the target core entry point based on the address
+ * passed to by FFA_SECONDARY_EP_REGISTER.
+ */
+ entry_point = g_spmd_pm.secondary_ep;
+ el3_state = get_el3state_ctx(&ctx->cpu_ctx);
+ write_ctx_reg(el3_state, CTX_ELR_EL3, entry_point);
+ }
+
+ spin_unlock(&g_spmd_pm.lock);
+
+ /* Mark CPU as initiating ON operation. */
+ ctx->state = SPMC_STATE_ON_PENDING;
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%" PRIu64 ") on CPU%u\n", __func__, rc,
+ linear_id);
+ ctx->state = SPMC_STATE_OFF;
+ return;
+ }
+
+ ctx->state = SPMC_STATE_ON;
+
+ VERBOSE("CPU %u on!\n", linear_id);
+}
+
+/*******************************************************************************
+ * spmd_cpu_off_handler
+ ******************************************************************************/
+static int32_t spmd_cpu_off_handler(u_register_t unused)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ unsigned int linear_id = plat_my_core_pos();
+ int64_t rc;
+
+ assert(ctx != NULL);
+ assert(ctx->state != SPMC_STATE_OFF);
+
+ /* Build an SPMD to SPMC direct message request. */
+ gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
+ spmd_build_spmc_message(gpregs, FFA_FWK_MSG_PSCI, PSCI_CPU_OFF);
+
+ /* Clear remaining x8 - x17 at EL3/SEL2 or EL3/SEL1 boundary. */
+ write_ctx_reg(gpregs, CTX_GPREG_X8, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X9, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X10, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X11, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X12, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X13, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X14, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X15, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X16, 0);
+ write_ctx_reg(gpregs, CTX_GPREG_X17, 0);
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%" PRIu64 ") on CPU%u\n", __func__, rc, linear_id);
+ }
+
+ /* Expect a direct message response from the SPMC. */
+ u_register_t ffa_resp_func = read_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx),
+ CTX_GPREG_X0);
+ if (ffa_resp_func != FFA_MSG_SEND_DIRECT_RESP_SMC32) {
+ ERROR("%s invalid SPMC response (%lx).\n",
+ __func__, ffa_resp_func);
+ return -EINVAL;
+ }
+
+ ctx->state = SPMC_STATE_OFF;
+
+ VERBOSE("CPU %u off!\n", linear_id);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Structure populated by the SPM Dispatcher to perform any bookkeeping before
+ * PSCI executes a power mgmt. operation.
+ ******************************************************************************/
+const spd_pm_ops_t spmd_pm = {
+ .svc_on_finish = spmd_cpu_on_finish_handler,
+ .svc_off = spmd_cpu_off_handler
+};
diff --git a/services/std_svc/spmd/spmd_private.h b/services/std_svc/spmd/spmd_private.h
new file mode 100644
index 0000000..fef7ef6
--- /dev/null
+++ b/services/std_svc/spmd/spmd_private.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMD_PRIVATE_H
+#define SPMD_PRIVATE_H
+
+#include <common/bl_common.h>
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define SPMD_C_RT_CTX_X19 0x0
+#define SPMD_C_RT_CTX_X20 0x8
+#define SPMD_C_RT_CTX_X21 0x10
+#define SPMD_C_RT_CTX_X22 0x18
+#define SPMD_C_RT_CTX_X23 0x20
+#define SPMD_C_RT_CTX_X24 0x28
+#define SPMD_C_RT_CTX_X25 0x30
+#define SPMD_C_RT_CTX_X26 0x38
+#define SPMD_C_RT_CTX_X27 0x40
+#define SPMD_C_RT_CTX_X28 0x48
+#define SPMD_C_RT_CTX_X29 0x50
+#define SPMD_C_RT_CTX_X30 0x58
+
+#define SPMD_C_RT_CTX_SIZE 0x60
+#define SPMD_C_RT_CTX_ENTRIES (SPMD_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+#include <lib/psci/psci_lib.h>
+#include <plat/common/platform.h>
+#include <services/ffa_svc.h>
+
+typedef enum spmc_state {
+ SPMC_STATE_RESET = 0,
+ SPMC_STATE_OFF,
+ SPMC_STATE_ON_PENDING,
+ SPMC_STATE_ON
+} spmc_state_t;
+
+/*
+ * Data structure used by the SPM dispatcher (SPMD) in EL3 to track context of
+ * the SPM core (SPMC) at the next lower EL.
+ */
+typedef struct spmd_spm_core_context {
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+ spmc_state_t state;
+ bool secure_interrupt_ongoing;
+#if ENABLE_SPMD_LP
+ uint8_t spmd_lp_sync_req_ongoing;
+#endif
+} spmd_spm_core_context_t;
+
+/* Flags to indicate ongoing requests for SPMD EL3 logical partitions */
+#define SPMD_LP_FFA_DIR_REQ_ONGOING U(0x1)
+#define SPMD_LP_FFA_INFO_GET_REG_ONGOING U(0x2)
+
+/*
+ * Reserve ID for NS physical FFA Endpoint.
+ */
+#define FFA_NS_ENDPOINT_ID U(0)
+
+/* Define SPMD target function IDs for framework messages to the SPMC */
+#define SPMD_FWK_MSG_FFA_VERSION_REQ U(0x8)
+#define SPMD_FWK_MSG_FFA_VERSION_RESP U(0x9)
+
+/* Function to build SPMD to SPMC message */
+void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target,
+ unsigned long long message);
+
+/* Functions used to enter/exit SPMC synchronously */
+uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *ctx);
+__dead2 void spmd_spm_core_sync_exit(uint64_t rc);
+
+/* Assembly helpers */
+uint64_t spmd_spm_core_enter(uint64_t *c_rt_ctx);
+void __dead2 spmd_spm_core_exit(uint64_t c_rt_ctx, uint64_t ret);
+
+/* SPMD SPD power management handlers */
+extern const spd_pm_ops_t spmd_pm;
+
+/* SPMC entry point information helper */
+entry_point_info_t *spmd_spmc_ep_info_get(void);
+
+/* SPMC ID getter */
+uint16_t spmd_spmc_id_get(void);
+
+/* SPMC context on CPU based on mpidr */
+spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr);
+
+/* SPMC context on current CPU get helper */
+spmd_spm_core_context_t *spmd_get_context(void);
+
+int spmd_pm_secondary_ep_register(uintptr_t entry_point);
+bool spmd_check_address_in_binary_image(uint64_t address);
+
+/*
+ * Platform hook in EL3 firmware to handle for Group0 secure interrupt.
+ * Return values:
+ * 0 = success
+ * otherwise it returns a negative value
+ */
+int plat_spmd_handle_group0_interrupt(uint32_t id);
+
+uint64_t spmd_ffa_error_return(void *handle, int error_code);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* SPMD_PRIVATE_H */