summaryrefslogtreecommitdiffstats
path: root/services/std_svc/spm
diff options
context:
space:
mode:
Diffstat (limited to 'services/std_svc/spm')
-rw-r--r--services/std_svc/spm/common/aarch64/spm_helpers.S74
-rw-r--r--services/std_svc/spm/common/include/spm_common.h42
-rw-r--r--services/std_svc/spm/common/spm.mk17
-rw-r--r--services/std_svc/spm/el3_spmc/logical_sp.c107
-rw-r--r--services/std_svc/spm/el3_spmc/spmc.h296
-rw-r--r--services/std_svc/spm/el3_spmc/spmc.mk44
-rw-r--r--services/std_svc/spm/el3_spmc/spmc_main.c1995
-rw-r--r--services/std_svc/spm/el3_spmc/spmc_pm.c283
-rw-r--r--services/std_svc/spm/el3_spmc/spmc_setup.c278
-rw-r--r--services/std_svc/spm/el3_spmc/spmc_shared_mem.c1861
-rw-r--r--services/std_svc/spm/el3_spmc/spmc_shared_mem.h115
-rw-r--r--services/std_svc/spm/spm_mm/aarch64/spm_mm_shim_exceptions.S128
-rw-r--r--services/std_svc/spm/spm_mm/spm_mm.mk34
-rw-r--r--services/std_svc/spm/spm_mm/spm_mm_main.c370
-rw-r--r--services/std_svc/spm/spm_mm/spm_mm_private.h69
-rw-r--r--services/std_svc/spm/spm_mm/spm_mm_setup.c260
-rw-r--r--services/std_svc/spm/spm_mm/spm_mm_shim_private.h26
-rw-r--r--services/std_svc/spm/spm_mm/spm_mm_xlat.c159
18 files changed, 6158 insertions, 0 deletions
diff --git a/services/std_svc/spm/common/aarch64/spm_helpers.S b/services/std_svc/spm/common/aarch64/spm_helpers.S
new file mode 100644
index 0000000..95e69fb
--- /dev/null
+++ b/services/std_svc/spm/common/aarch64/spm_helpers.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "spm_common.h"
+
+ .global spm_secure_partition_enter
+ .global spm_secure_partition_exit
+
+ /* ---------------------------------------------------------------------
+ * This function is called with SP_EL0 as stack. Here we stash our EL3
+ * callee-saved registers on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where the address of the C
+ * runtime context is to be saved.
+ * ---------------------------------------------------------------------
+ */
+func spm_secure_partition_enter
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #SP_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #SP_C_RT_CTX_X19]
+ stp x21, x22, [sp, #SP_C_RT_CTX_X21]
+ stp x23, x24, [sp, #SP_C_RT_CTX_X23]
+ stp x25, x26, [sp, #SP_C_RT_CTX_X25]
+ stp x27, x28, [sp, #SP_C_RT_CTX_X27]
+ stp x29, x30, [sp, #SP_C_RT_CTX_X29]
+
+ /* ---------------------------------------------------------------------
+ * Everything is setup now. el3_exit() will use the secure context to
+ * restore to the general purpose and EL3 system registers to ERET
+ * into the secure payload.
+ * ---------------------------------------------------------------------
+ */
+ b el3_exit
+endfunc spm_secure_partition_enter
+
+ /* ---------------------------------------------------------------------
+ * This function is called with 'x0' pointing to a C runtime context
+ * saved in spm_secure_partition_enter().
+ * It restores the saved registers and jumps to that runtime with 'x0'
+ * as the new SP register. This destroys the C runtime context that had
+ * been built on the stack below the saved context by the caller. Later
+ * the second parameter 'x1' is passed as a return value to the caller.
+ * ---------------------------------------------------------------------
+ */
+func spm_secure_partition_exit
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(SP_C_RT_CTX_X19 - SP_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(SP_C_RT_CTX_X21 - SP_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(SP_C_RT_CTX_X23 - SP_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(SP_C_RT_CTX_X25 - SP_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(SP_C_RT_CTX_X27 - SP_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(SP_C_RT_CTX_X29 - SP_C_RT_CTX_SIZE)]
+
+ /* ---------------------------------------------------------------------
+ * This should take us back to the instruction after the call to the
+ * last spm_secure_partition_enter().* Place the second parameter to x0
+ * so that the caller will see it as a return value from the original
+ * entry call.
+ * ---------------------------------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc spm_secure_partition_exit
diff --git a/services/std_svc/spm/common/include/spm_common.h b/services/std_svc/spm/common/include/spm_common.h
new file mode 100644
index 0000000..68805fc
--- /dev/null
+++ b/services/std_svc/spm/common/include/spm_common.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_COMMON_H
+#define SPM_COMMON_H
+
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define SP_C_RT_CTX_X19 0x0
+#define SP_C_RT_CTX_X20 0x8
+#define SP_C_RT_CTX_X21 0x10
+#define SP_C_RT_CTX_X22 0x18
+#define SP_C_RT_CTX_X23 0x20
+#define SP_C_RT_CTX_X24 0x28
+#define SP_C_RT_CTX_X25 0x30
+#define SP_C_RT_CTX_X26 0x38
+#define SP_C_RT_CTX_X27 0x40
+#define SP_C_RT_CTX_X28 0x48
+#define SP_C_RT_CTX_X29 0x50
+#define SP_C_RT_CTX_X30 0x58
+
+#define SP_C_RT_CTX_SIZE 0x60
+#define SP_C_RT_CTX_ENTRIES (SP_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+/* Assembly helpers */
+uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
+void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* SPM_COMMON_H */
diff --git a/services/std_svc/spm/common/spm.mk b/services/std_svc/spm/common/spm.mk
new file mode 100644
index 0000000..9aa96be
--- /dev/null
+++ b/services/std_svc/spm/common/spm.mk
@@ -0,0 +1,17 @@
+#
+# Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH},aarch64)
+ $(error "Error: SPM is only supported on aarch64.")
+endif
+
+INCLUDES += -Iservices/std_svc/spm/common/include
+
+SPM_SOURCES := $(addprefix services/std_svc/spm/common/,\
+ ${ARCH}/spm_helpers.S)
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
diff --git a/services/std_svc/spm/el3_spmc/logical_sp.c b/services/std_svc/spm/el3_spmc/logical_sp.c
new file mode 100644
index 0000000..e080832
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/logical_sp.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <services/el3_spmc_logical_sp.h>
+#include <services/ffa_svc.h>
+#include "spmc.h"
+
+/*******************************************************************************
+ * Validate any logical partition descriptors before we initialise.
+ * Initialization of said partitions will be taken care of during SPMC boot.
+ ******************************************************************************/
+int el3_sp_desc_validate(void)
+{
+ struct el3_lp_desc *lp_array;
+
+ /*
+ * Assert the number of descriptors is less than maximum allowed.
+ * This constant should be define on a per platform basis.
+ */
+ assert(EL3_LP_DESCS_COUNT <= MAX_EL3_LP_DESCS_COUNT);
+
+ /* Check the array bounds are valid. */
+ assert(EL3_LP_DESCS_END >= EL3_LP_DESCS_START);
+
+ /* If no logical partitions are implemented then simply bail out. */
+ if (EL3_LP_DESCS_COUNT == 0U) {
+ return 0;
+ }
+
+ lp_array = get_el3_lp_array();
+
+ for (unsigned int index = 0; index < EL3_LP_DESCS_COUNT; index++) {
+ struct el3_lp_desc *lp_desc = &lp_array[index];
+
+ /* Validate our logical partition descriptors. */
+ if (lp_desc == NULL) {
+ ERROR("Invalid Logical SP Descriptor\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure the ID follows the convention to indidate it resides
+ * in the secure world.
+ */
+ if (!ffa_is_secure_world_id(lp_desc->sp_id)) {
+ ERROR("Invalid Logical SP ID (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure we don't conflict with the SPMC partition ID. */
+ if (lp_desc->sp_id == FFA_SPMC_ID) {
+ ERROR("Logical SP ID clashes with SPMC ID(0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure the UUID is not the NULL UUID. */
+ if (lp_desc->uuid[0] == 0 && lp_desc->uuid[1] == 0 &&
+ lp_desc->uuid[2] == 0 && lp_desc->uuid[3] == 0) {
+ ERROR("Invalid UUID for Logical SP (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure init function callback is registered. */
+ if (lp_desc->init == NULL) {
+ ERROR("Missing init function for Logical SP(0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure that LP only supports receiving direct requests. */
+ if (lp_desc->properties &
+ ~(FFA_PARTITION_DIRECT_REQ_RECV)) {
+ ERROR("Invalid partition properties (0x%x)\n",
+ lp_desc->properties);
+ return -EINVAL;
+ }
+
+ /* Ensure direct request function callback is registered. */
+ if (lp_desc->direct_req == NULL) {
+ ERROR("No Direct Req handler for Logical SP (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+
+ /* Ensure that all partition IDs are unique. */
+ for (unsigned int inner_idx = index + 1;
+ inner_idx < EL3_LP_DESCS_COUNT; inner_idx++) {
+ if (lp_desc->sp_id == lp_array[inner_idx].sp_id) {
+ ERROR("Duplicate SP ID Detected (0x%x)\n",
+ lp_desc->sp_id);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
diff --git a/services/std_svc/spm/el3_spmc/spmc.h b/services/std_svc/spm/el3_spmc/spmc.h
new file mode 100644
index 0000000..5233650
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMC_H
+#define SPMC_H
+
+#include <stdint.h>
+
+#include <common/bl_common.h>
+#include <lib/psci/psci.h>
+#include <lib/spinlock.h>
+#include <services/el3_spmc_logical_sp.h>
+#include "spm_common.h"
+
+/*
+ * Ranges of FF-A IDs for Normal world and Secure world components. The
+ * convention matches that used by other SPMCs i.e. Hafnium and OP-TEE.
+ */
+#define FFA_NWD_ID_BASE 0x0
+#define FFA_NWD_ID_LIMIT 0x7FFF
+#define FFA_SWD_ID_BASE 0x8000
+#define FFA_SWD_ID_LIMIT SPMD_DIRECT_MSG_ENDPOINT_ID - 1
+#define FFA_SWD_ID_MASK 0x8000
+
+/* ID 0 is reserved for the normal world entity, (Hypervisor or OS Kernel). */
+#define FFA_NWD_ID U(0)
+/* First ID is reserved for the SPMC */
+#define FFA_SPMC_ID U(FFA_SWD_ID_BASE)
+/* SP IDs are allocated after the SPMC ID */
+#define FFA_SP_ID_BASE (FFA_SPMC_ID + 1)
+/* Align with Hafnium implementation */
+#define INV_SP_ID 0x7FFF
+
+/* FF-A Related helper macros. */
+#define FFA_ID_MASK U(0xFFFF)
+#define FFA_PARTITION_ID_SHIFT U(16)
+#define FFA_FEATURES_BIT31_MASK U(0x1u << 31)
+#define FFA_FEATURES_RET_REQ_NS_BIT U(0x1 << 1)
+
+#define FFA_RUN_EP_ID(ep_vcpu_ids) \
+ ((ep_vcpu_ids >> FFA_PARTITION_ID_SHIFT) & FFA_ID_MASK)
+#define FFA_RUN_VCPU_ID(ep_vcpu_ids) \
+ (ep_vcpu_ids & FFA_ID_MASK)
+
+#define FFA_PAGE_SIZE (4096)
+#define FFA_RXTX_PAGE_COUNT_MASK 0x1F
+
+/* Ensure that the page size used by TF-A is 4k aligned. */
+CASSERT((PAGE_SIZE % FFA_PAGE_SIZE) == 0, assert_aligned_page_size);
+
+/*
+ * Defines to allow an SP to subscribe for power management messages
+ */
+#define FFA_PM_MSG_SUB_CPU_OFF U(1 << 0)
+#define FFA_PM_MSG_SUB_CPU_SUSPEND U(1 << 1)
+#define FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME U(1 << 2)
+
+/*
+ * Runtime states of an execution context as per the FF-A v1.1 specification.
+ */
+enum sp_runtime_states {
+ RT_STATE_WAITING,
+ RT_STATE_RUNNING,
+ RT_STATE_PREEMPTED,
+ RT_STATE_BLOCKED
+};
+
+/*
+ * Runtime model of an execution context as per the FF-A v1.1 specification. Its
+ * value is valid only if the execution context is not in the waiting state.
+ */
+enum sp_runtime_model {
+ RT_MODEL_DIR_REQ,
+ RT_MODEL_RUN,
+ RT_MODEL_INIT,
+ RT_MODEL_INTR
+};
+
+enum sp_runtime_el {
+ EL1 = 0,
+ S_EL0,
+ S_EL1
+};
+
+enum sp_execution_state {
+ SP_STATE_AARCH64 = 0,
+ SP_STATE_AARCH32
+};
+
+enum mailbox_state {
+ /* There is no message in the mailbox. */
+ MAILBOX_STATE_EMPTY,
+
+ /* There is a message that has been populated in the mailbox. */
+ MAILBOX_STATE_FULL,
+};
+
+struct mailbox {
+ enum mailbox_state state;
+
+ /* RX/TX Buffers. */
+ void *rx_buffer;
+ const void *tx_buffer;
+
+ /* Size of RX/TX Buffer. */
+ uint32_t rxtx_page_count;
+
+ /* Lock access to mailbox. */
+ spinlock_t lock;
+};
+
+/*
+ * Execution context members for an SP. This is a bit like struct
+ * vcpu in a hypervisor.
+ */
+struct sp_exec_ctx {
+ /*
+ * Store the stack address to restore C runtime context from after
+ * returning from a synchronous entry into the SP.
+ */
+ uint64_t c_rt_ctx;
+
+ /* Space to maintain the architectural state of an SP. */
+ cpu_context_t cpu_ctx;
+
+ /* Track the current runtime state of the SP. */
+ enum sp_runtime_states rt_state;
+
+ /* Track the current runtime model of the SP. */
+ enum sp_runtime_model rt_model;
+};
+
+/*
+ * Structure to describe the cumulative properties of an SP.
+ */
+struct secure_partition_desc {
+ /*
+ * Execution contexts allocated to this endpoint. Ideally,
+ * we need as many contexts as there are physical cpus only
+ * for a S-EL1 SP which is MP-pinned.
+ */
+ struct sp_exec_ctx ec[PLATFORM_CORE_COUNT];
+
+ /* ID of the Secure Partition. */
+ uint16_t sp_id;
+
+ /* Runtime EL. */
+ enum sp_runtime_el runtime_el;
+
+ /* Partition UUID. */
+ uint32_t uuid[4];
+
+ /* Partition Properties. */
+ uint32_t properties;
+
+ /* Supported FF-A Version. */
+ uint32_t ffa_version;
+
+ /* Execution State. */
+ enum sp_execution_state execution_state;
+
+ /* Mailbox tracking. */
+ struct mailbox mailbox;
+
+ /* Secondary entrypoint. Only valid for a S-EL1 SP. */
+ uintptr_t secondary_ep;
+
+ /*
+ * Store whether the SP has subscribed to any power management messages.
+ */
+ uint16_t pwr_mgmt_msgs;
+
+ /*
+ * Store whether the SP has requested the use of the NS bit for memory
+ * management transactions if it is using FF-A v1.0.
+ */
+ bool ns_bit_requested;
+};
+
+/*
+ * This define identifies the only SP that will be initialised and participate
+ * in FF-A communication. The implementation leaves the door open for more SPs
+ * to be managed in future but for now it is reasonable to assume that either a
+ * single S-EL0 or a single S-EL1 SP will be supported. This define will be used
+ * to identify which SP descriptor to initialise and manage during SP runtime.
+ */
+#define ACTIVE_SP_DESC_INDEX 0
+
+/*
+ * Structure to describe the cumulative properties of the Hypervisor and
+ * NS-Endpoints.
+ */
+struct ns_endpoint_desc {
+ /*
+ * ID of the NS-Endpoint or Hypervisor.
+ */
+ uint16_t ns_ep_id;
+
+ /*
+ * Mailbox tracking.
+ */
+ struct mailbox mailbox;
+
+ /*
+ * Supported FF-A Version
+ */
+ uint32_t ffa_version;
+};
+
+/**
+ * Holds information returned for each partition by the FFA_PARTITION_INFO_GET
+ * interface.
+ */
+struct ffa_partition_info_v1_0 {
+ uint16_t ep_id;
+ uint16_t execution_ctx_count;
+ uint32_t properties;
+};
+
+/* Extended structure for v1.1. */
+struct ffa_partition_info_v1_1 {
+ uint16_t ep_id;
+ uint16_t execution_ctx_count;
+ uint32_t properties;
+ uint32_t uuid[4];
+};
+
+/* Reference to power management hooks */
+extern const spd_pm_ops_t spmc_pm;
+
+/* Setup Function for different SP types. */
+void spmc_sp_common_setup(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info,
+ int32_t boot_info_reg);
+void spmc_el1_sp_setup(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info);
+void spmc_sp_common_ep_commit(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info);
+
+/*
+ * Helper function to perform a synchronous entry into a SP.
+ */
+uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec);
+
+/*
+ * Helper function to obtain the descriptor of the current SP on a physical cpu.
+ */
+struct secure_partition_desc *spmc_get_current_sp_ctx(void);
+
+/*
+ * Helper function to obtain the execution context of an SP on a
+ * physical cpu.
+ */
+struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp);
+
+/*
+ * Helper function to obtain the index of the execution context of an SP on a
+ * physical cpu.
+ */
+unsigned int get_ec_index(struct secure_partition_desc *sp);
+
+uint64_t spmc_ffa_error_return(void *handle, int error_code);
+
+/*
+ * Ensure a partition ID does not clash and follows the secure world convention.
+ */
+bool is_ffa_secure_id_valid(uint16_t partition_id);
+
+/*
+ * Helper function to obtain the array storing the EL3
+ * Logical Partition descriptors.
+ */
+struct el3_lp_desc *get_el3_lp_array(void);
+
+/*
+ * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
+ * or OS kernel in the normal world or the last SP that was run.
+ */
+struct mailbox *spmc_get_mbox_desc(bool secure_origin);
+
+/*
+ * Helper function to obtain the context of an SP with a given partition ID.
+ */
+struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id);
+
+/*
+ * Add helper function to obtain the FF-A version of the calling
+ * partition.
+ */
+uint32_t get_partition_ffa_version(bool secure_origin);
+
+
+#endif /* SPMC_H */
diff --git a/services/std_svc/spm/el3_spmc/spmc.mk b/services/std_svc/spm/el3_spmc/spmc.mk
new file mode 100644
index 0000000..c674e71
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc.mk
@@ -0,0 +1,44 @@
+#
+# Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH},aarch64)
+ $(error "Error: SPMC is only supported on aarch64.")
+endif
+
+SPMC_SOURCES := $(addprefix services/std_svc/spm/el3_spmc/, \
+ spmc_main.c \
+ spmc_setup.c \
+ logical_sp.c \
+ spmc_pm.c \
+ spmc_shared_mem.c)
+
+# Specify platform specific logical partition implementation.
+SPMC_LP_SOURCES := $(addprefix ${PLAT_DIR}/, \
+ ${PLAT}_el3_spmc_logical_sp.c)
+
+
+SPMC_SOURCES += $(SPMC_LP_SOURCES)
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
+
+ifndef BL32
+# The SPMC is paired with a Test Secure Payload source and we intend to
+# build the Test Secure Payload if no other image has been provided
+# for BL32.
+#
+# In cases where an associated Secure Payload lies outside this build
+# system/source tree, the dispatcher Makefile can either invoke an external
+# build command or assume it is pre-built.
+
+BL32_ROOT := bl32/tsp
+
+# Conditionally include SP's Makefile. The assumption is that the TSP's build
+# system is compatible with that of Trusted Firmware, and it'll add and populate
+# necessary build targets and variables.
+
+include ${BL32_ROOT}/tsp.mk
+endif
diff --git a/services/std_svc/spm/el3_spmc/spmc_main.c b/services/std_svc/spm/el3_spmc/spmc_main.c
new file mode 100644
index 0000000..9b8621a
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_main.c
@@ -0,0 +1,1995 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <arch_helpers.h>
+#include <bl31/bl31.h>
+#include <bl31/ehf.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <common/runtime_svc.h>
+#include <common/uuid.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/smccc.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <libfdt.h>
+#include <plat/common/platform.h>
+#include <services/el3_spmc_logical_sp.h>
+#include <services/ffa_svc.h>
+#include <services/spmc_svc.h>
+#include <services/spmd_svc.h>
+#include "spmc.h"
+#include "spmc_shared_mem.h"
+
+#include <platform_def.h>
+
+/* Declare the maximum number of SPs and El3 LPs. */
+#define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
+
+/*
+ * Allocate a secure partition descriptor to describe each SP in the system that
+ * does not reside at EL3.
+ */
+static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
+
+/*
+ * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
+ * the system that interacts with a SP. It is used to track the Hypervisor
+ * buffer pair, version and ID for now. It could be extended to track VM
+ * properties when the SPMC supports indirect messaging.
+ */
+static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
+
+static uint64_t spmc_sp_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie);
+
+/*
+ * Helper function to obtain the array storing the EL3
+ * Logical Partition descriptors.
+ */
+struct el3_lp_desc *get_el3_lp_array(void)
+{
+ return (struct el3_lp_desc *) EL3_LP_DESCS_START;
+}
+
+/*
+ * Helper function to obtain the descriptor of the last SP to whom control was
+ * handed to on this physical cpu. Currently, we assume there is only one SP.
+ * TODO: Expand to track multiple partitions when required.
+ */
+struct secure_partition_desc *spmc_get_current_sp_ctx(void)
+{
+ return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
+}
+
+/*
+ * Helper function to obtain the execution context of an SP on the
+ * current physical cpu.
+ */
+struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
+{
+ return &(sp->ec[get_ec_index(sp)]);
+}
+
+/* Helper function to get pointer to SP context from its ID. */
+struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
+{
+ /* Check for Secure World Partitions. */
+ for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
+ if (sp_desc[i].sp_id == id) {
+ return &(sp_desc[i]);
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
+ * We assume that the first descriptor is reserved for this entity.
+ */
+struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
+{
+ return &(ns_ep_desc[0]);
+}
+
+/*
+ * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
+ * or OS kernel in the normal world or the last SP that was run.
+ */
+struct mailbox *spmc_get_mbox_desc(bool secure_origin)
+{
+ /* Obtain the RX/TX buffer pair descriptor. */
+ if (secure_origin) {
+ return &(spmc_get_current_sp_ctx()->mailbox);
+ } else {
+ return &(spmc_get_hyp_ctx()->mailbox);
+ }
+}
+
+/******************************************************************************
+ * This function returns to the place where spmc_sp_synchronous_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
+{
+ /*
+ * The SPM must have initiated the original request through a
+ * synchronous entry into the secure partition. Jump back to the
+ * original C runtime context with the value of rc in x0;
+ */
+ spm_secure_partition_exit(ec->c_rt_ctx, rc);
+
+ panic();
+}
+
+/*******************************************************************************
+ * Return FFA_ERROR with specified error code.
+ ******************************************************************************/
+uint64_t spmc_ffa_error_return(void *handle, int error_code)
+{
+ SMC_RET8(handle, FFA_ERROR,
+ FFA_TARGET_INFO_MBZ, error_code,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+}
+
+/******************************************************************************
+ * Helper function to validate a secure partition ID to ensure it does not
+ * conflict with any other FF-A component and follows the convention to
+ * indicate it resides within the secure world.
+ ******************************************************************************/
+bool is_ffa_secure_id_valid(uint16_t partition_id)
+{
+ struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
+
+ /* Ensure the ID is not the invalid partition ID. */
+ if (partition_id == INV_SP_ID) {
+ return false;
+ }
+
+ /* Ensure the ID is not the SPMD ID. */
+ if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
+ return false;
+ }
+
+ /*
+ * Ensure the ID follows the convention to indicate it resides
+ * in the secure world.
+ */
+ if (!ffa_is_secure_world_id(partition_id)) {
+ return false;
+ }
+
+ /* Ensure we don't conflict with the SPMC partition ID. */
+ if (partition_id == FFA_SPMC_ID) {
+ return false;
+ }
+
+ /* Ensure we do not already have an SP context with this ID. */
+ if (spmc_get_sp_ctx(partition_id)) {
+ return false;
+ }
+
+ /* Ensure we don't clash with any Logical SP's. */
+ for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
+ if (el3_lp_descs[i].sp_id == partition_id) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*******************************************************************************
+ * This function either forwards the request to the other world or returns
+ * with an ERET depending on the source of the call.
+ * We can assume that the destination is for an entity at a lower exception
+ * level as any messages destined for a logical SP resident in EL3 will have
+ * already been taken care of by the SPMC before entering this function.
+ ******************************************************************************/
+static uint64_t spmc_smc_return(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *handle,
+ void *cookie,
+ uint64_t flags,
+ uint16_t dst_id)
+{
+ /* If the destination is in the normal world always go via the SPMD. */
+ if (ffa_is_normal_world_id(dst_id)) {
+ return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
+ cookie, handle, flags);
+ }
+ /*
+ * If the caller is secure and we want to return to the secure world,
+ * ERET directly.
+ */
+ else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
+ SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
+ }
+ /* If we originated in the normal world then switch contexts. */
+ else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
+ return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
+ x3, x4, handle);
+ } else {
+ /* Unknown State. */
+ panic();
+ }
+
+ /* Shouldn't be Reached. */
+ return 0;
+}
+
+/*******************************************************************************
+ * FF-A ABI Handlers.
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Helper function to validate arg2 as part of a direct message.
+ ******************************************************************************/
+static inline bool direct_msg_validate_arg2(uint64_t x2)
+{
+ /* Check message type. */
+ if (x2 & FFA_FWK_MSG_BIT) {
+ /* We have a framework message, ensure it is a known message. */
+ if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
+ VERBOSE("Invalid message format 0x%lx.\n", x2);
+ return false;
+ }
+ } else {
+ /* We have a partition messages, ensure x2 is not set. */
+ if (x2 != (uint64_t) 0) {
+ VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
+ x2);
+ return false;
+ }
+ }
+ return true;
+}
+
+/*******************************************************************************
+ * Handle direct request messages and route to the appropriate destination.
+ ******************************************************************************/
+static uint64_t direct_req_smc_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint16_t dst_id = ffa_endpoint_destination(x1);
+ struct el3_lp_desc *el3_lp_descs;
+ struct secure_partition_desc *sp;
+ unsigned int idx;
+
+ /* Check if arg2 has been populated correctly based on message type. */
+ if (!direct_msg_validate_arg2(x2)) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ el3_lp_descs = get_el3_lp_array();
+
+ /* Check if the request is destined for a Logical Partition. */
+ for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
+ if (el3_lp_descs[i].sp_id == dst_id) {
+ return el3_lp_descs[i].direct_req(
+ smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+ }
+ }
+
+ /*
+ * If the request was not targeted to a LSP and from the secure world
+ * then it is invalid since a SP cannot call into the Normal world and
+ * there is no other SP to call into. If there are other SPs in future
+ * then the partition runtime model would need to be validated as well.
+ */
+ if (secure_origin) {
+ VERBOSE("Direct request not supported to the Normal World.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Check if the SP ID is valid. */
+ sp = spmc_get_sp_ctx(dst_id);
+ if (sp == NULL) {
+ VERBOSE("Direct request to unknown partition ID (0x%x).\n",
+ dst_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Check that the target execution context is in a waiting state before
+ * forwarding the direct request to it.
+ */
+ idx = get_ec_index(sp);
+ if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
+ VERBOSE("SP context on core%u is not waiting (%u).\n",
+ idx, sp->ec[idx].rt_model);
+ return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
+ }
+
+ /*
+ * Everything checks out so forward the request to the SP after updating
+ * its state and runtime model.
+ */
+ sp->ec[idx].rt_state = RT_STATE_RUNNING;
+ sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
+ return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
+ handle, cookie, flags, dst_id);
+}
+
+/*******************************************************************************
+ * Handle direct response messages and route to the appropriate destination.
+ ******************************************************************************/
+static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint16_t dst_id = ffa_endpoint_destination(x1);
+ struct secure_partition_desc *sp;
+ unsigned int idx;
+
+ /* Check if arg2 has been populated correctly based on message type. */
+ if (!direct_msg_validate_arg2(x2)) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Check that the response did not originate from the Normal world. */
+ if (!secure_origin) {
+ VERBOSE("Direct Response not supported from Normal World.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Check that the response is either targeted to the Normal world or the
+ * SPMC e.g. a PM response.
+ */
+ if ((dst_id != FFA_SPMC_ID) && ffa_is_secure_world_id(dst_id)) {
+ VERBOSE("Direct response to invalid partition ID (0x%x).\n",
+ dst_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Obtain the SP descriptor and update its runtime state. */
+ sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
+ if (sp == NULL) {
+ VERBOSE("Direct response to unknown partition ID (0x%x).\n",
+ dst_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Sanity check state is being tracked correctly in the SPMC. */
+ idx = get_ec_index(sp);
+ assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
+
+ /* Ensure SP execution context was in the right runtime model. */
+ if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
+ VERBOSE("SP context on core%u not handling direct req (%u).\n",
+ idx, sp->ec[idx].rt_model);
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ /* Update the state of the SP execution context. */
+ sp->ec[idx].rt_state = RT_STATE_WAITING;
+
+ /*
+ * If the receiver is not the SPMC then forward the response to the
+ * Normal world.
+ */
+ if (dst_id == FFA_SPMC_ID) {
+ spmc_sp_synchronous_exit(&sp->ec[idx], x4);
+ /* Should not get here. */
+ panic();
+ }
+
+ return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
+ handle, cookie, flags, dst_id);
+}
+
+/*******************************************************************************
+ * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
+ * cycles.
+ ******************************************************************************/
+static uint64_t msg_wait_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct secure_partition_desc *sp;
+ unsigned int idx;
+
+ /*
+ * Check that the response did not originate from the Normal world as
+ * only the secure world can call this ABI.
+ */
+ if (!secure_origin) {
+ VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
+ sp = spmc_get_current_sp_ctx();
+ if (sp == NULL) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Get the execution context of the SP that invoked FFA_MSG_WAIT.
+ */
+ idx = get_ec_index(sp);
+
+ /* Ensure SP execution context was in the right runtime model. */
+ if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ /* Sanity check the state is being tracked correctly in the SPMC. */
+ assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
+
+ /*
+ * Perform a synchronous exit if the partition was initialising. The
+ * state is updated after the exit.
+ */
+ if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
+ spmc_sp_synchronous_exit(&sp->ec[idx], x4);
+ /* Should not get here */
+ panic();
+ }
+
+ /* Update the state of the SP execution context. */
+ sp->ec[idx].rt_state = RT_STATE_WAITING;
+
+ /* Resume normal world if a secure interrupt was handled. */
+ if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
+ /* FFA_MSG_WAIT can only be called from the secure world. */
+ unsigned int secure_state_in = SECURE;
+ unsigned int secure_state_out = NON_SECURE;
+
+ cm_el1_sysregs_context_save(secure_state_in);
+ cm_el1_sysregs_context_restore(secure_state_out);
+ cm_set_next_eret_context(secure_state_out);
+ SMC_RET0(cm_get_context(secure_state_out));
+ }
+
+ /* Forward the response to the Normal world. */
+ return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
+ handle, cookie, flags, FFA_NWD_ID);
+}
+
+static uint64_t ffa_error_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct secure_partition_desc *sp;
+ unsigned int idx;
+
+ /* Check that the response did not originate from the Normal world. */
+ if (!secure_origin) {
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Get the descriptor of the SP that invoked FFA_ERROR. */
+ sp = spmc_get_current_sp_ctx();
+ if (sp == NULL) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Get the execution context of the SP that invoked FFA_ERROR. */
+ idx = get_ec_index(sp);
+
+ /*
+ * We only expect FFA_ERROR to be received during SP initialisation
+ * otherwise this is an invalid call.
+ */
+ if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
+ ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
+ spmc_sp_synchronous_exit(&sp->ec[idx], x2);
+ /* Should not get here. */
+ panic();
+ }
+
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+}
+
+static uint64_t ffa_version_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint32_t requested_version = x1 & FFA_VERSION_MASK;
+
+ if (requested_version & FFA_VERSION_BIT31_MASK) {
+ /* Invalid encoding, return an error. */
+ SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
+ /* Execution stops here. */
+ }
+
+ /* Determine the caller to store the requested version. */
+ if (secure_origin) {
+ /*
+ * Ensure that the SP is reporting the same version as
+ * specified in its manifest. If these do not match there is
+ * something wrong with the SP.
+ * TODO: Should we abort the SP? For now assert this is not
+ * case.
+ */
+ assert(requested_version ==
+ spmc_get_current_sp_ctx()->ffa_version);
+ } else {
+ /*
+ * If this is called by the normal world, record this
+ * information in its descriptor.
+ */
+ spmc_get_hyp_ctx()->ffa_version = requested_version;
+ }
+
+ SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
+ FFA_VERSION_MINOR));
+}
+
+/*******************************************************************************
+ * Helper function to obtain the FF-A version of the calling partition.
+ ******************************************************************************/
+uint32_t get_partition_ffa_version(bool secure_origin)
+{
+ if (secure_origin) {
+ return spmc_get_current_sp_ctx()->ffa_version;
+ } else {
+ return spmc_get_hyp_ctx()->ffa_version;
+ }
+}
+
+static uint64_t rxtx_map_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int ret;
+ uint32_t error_code;
+ uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
+ struct mailbox *mbox;
+ uintptr_t tx_address = x1;
+ uintptr_t rx_address = x2;
+ uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
+ uint32_t buf_size = page_count * FFA_PAGE_SIZE;
+
+ /*
+ * The SPMC does not support mapping of VM RX/TX pairs to facilitate
+ * indirect messaging with SPs. Check if the Hypervisor has invoked this
+ * ABI on behalf of a VM and reject it if this is the case.
+ */
+ if (tx_address == 0 || rx_address == 0) {
+ WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Ensure the specified buffers are not the same. */
+ if (tx_address == rx_address) {
+ WARN("TX Buffer must not be the same as RX Buffer.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Ensure the buffer size is not 0. */
+ if (buf_size == 0U) {
+ WARN("Buffer size must not be 0\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Ensure the buffer size is a multiple of the translation granule size
+ * in TF-A.
+ */
+ if (buf_size % PAGE_SIZE != 0U) {
+ WARN("Buffer size must be aligned to translation granule.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Obtain the RX/TX buffer pair descriptor. */
+ mbox = spmc_get_mbox_desc(secure_origin);
+
+ spin_lock(&mbox->lock);
+
+ /* Check if buffers have already been mapped. */
+ if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
+ WARN("RX/TX Buffers already mapped (%p/%p)\n",
+ (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
+ error_code = FFA_ERROR_DENIED;
+ goto err;
+ }
+
+ /* memmap the TX buffer as read only. */
+ ret = mmap_add_dynamic_region(tx_address, /* PA */
+ tx_address, /* VA */
+ buf_size, /* size */
+ mem_atts | MT_RO_DATA); /* attrs */
+ if (ret != 0) {
+ /* Return the correct error code. */
+ error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
+ FFA_ERROR_INVALID_PARAMETER;
+ WARN("Unable to map TX buffer: %d\n", error_code);
+ goto err;
+ }
+
+ /* memmap the RX buffer as read write. */
+ ret = mmap_add_dynamic_region(rx_address, /* PA */
+ rx_address, /* VA */
+ buf_size, /* size */
+ mem_atts | MT_RW_DATA); /* attrs */
+
+ if (ret != 0) {
+ error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
+ FFA_ERROR_INVALID_PARAMETER;
+ WARN("Unable to map RX buffer: %d\n", error_code);
+ /* Unmap the TX buffer again. */
+ mmap_remove_dynamic_region(tx_address, buf_size);
+ goto err;
+ }
+
+ mbox->tx_buffer = (void *) tx_address;
+ mbox->rx_buffer = (void *) rx_address;
+ mbox->rxtx_page_count = page_count;
+ spin_unlock(&mbox->lock);
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+ /* Execution stops here. */
+err:
+ spin_unlock(&mbox->lock);
+ return spmc_ffa_error_return(handle, error_code);
+}
+
+static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+ uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
+
+ /*
+ * The SPMC does not support mapping of VM RX/TX pairs to facilitate
+ * indirect messaging with SPs. Check if the Hypervisor has invoked this
+ * ABI on behalf of a VM and reject it if this is the case.
+ */
+ if (x1 != 0UL) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&mbox->lock);
+
+ /* Check if buffers are currently mapped. */
+ if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
+ spin_unlock(&mbox->lock);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Unmap RX Buffer */
+ if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
+ buf_size) != 0) {
+ WARN("Unable to unmap RX buffer!\n");
+ }
+
+ mbox->rx_buffer = 0;
+
+ /* Unmap TX Buffer */
+ if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
+ buf_size) != 0) {
+ WARN("Unable to unmap TX buffer!\n");
+ }
+
+ mbox->tx_buffer = 0;
+ mbox->rxtx_page_count = 0;
+
+ spin_unlock(&mbox->lock);
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
+/*
+ * Collate the partition information in a v1.1 partition information
+ * descriptor format, this will be converter later if required.
+ */
+static int partition_info_get_handler_v1_1(uint32_t *uuid,
+ struct ffa_partition_info_v1_1
+ *partitions,
+ uint32_t max_partitions,
+ uint32_t *partition_count)
+{
+ uint32_t index;
+ struct ffa_partition_info_v1_1 *desc;
+ bool null_uuid = is_null_uuid(uuid);
+ struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
+
+ /* Deal with Logical Partitions. */
+ for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
+ if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
+ /* Found a matching UUID, populate appropriately. */
+ if (*partition_count >= max_partitions) {
+ return FFA_ERROR_NO_MEMORY;
+ }
+
+ desc = &partitions[*partition_count];
+ desc->ep_id = el3_lp_descs[index].sp_id;
+ desc->execution_ctx_count = PLATFORM_CORE_COUNT;
+ desc->properties = el3_lp_descs[index].properties;
+ if (null_uuid) {
+ copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
+ }
+ (*partition_count)++;
+ }
+ }
+
+ /* Deal with physical SP's. */
+ for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
+ if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
+ /* Found a matching UUID, populate appropriately. */
+ if (*partition_count >= max_partitions) {
+ return FFA_ERROR_NO_MEMORY;
+ }
+
+ desc = &partitions[*partition_count];
+ desc->ep_id = sp_desc[index].sp_id;
+ /*
+ * Execution context count must match No. cores for
+ * S-EL1 SPs.
+ */
+ desc->execution_ctx_count = PLATFORM_CORE_COUNT;
+ desc->properties = sp_desc[index].properties;
+ if (null_uuid) {
+ copy_uuid(desc->uuid, sp_desc[index].uuid);
+ }
+ (*partition_count)++;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Handle the case where that caller only wants the count of partitions
+ * matching a given UUID and does not want the corresponding descriptors
+ * populated.
+ */
+static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
+{
+ uint32_t index = 0;
+ uint32_t partition_count = 0;
+ bool null_uuid = is_null_uuid(uuid);
+ struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
+
+ /* Deal with Logical Partitions. */
+ for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
+ if (null_uuid ||
+ uuid_match(uuid, el3_lp_descs[index].uuid)) {
+ (partition_count)++;
+ }
+ }
+
+ /* Deal with physical SP's. */
+ for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
+ if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
+ (partition_count)++;
+ }
+ }
+ return partition_count;
+}
+
+/*
+ * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
+ * the coresponding descriptor format from the v1.1 descriptor array.
+ */
+static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
+ *partitions,
+ struct mailbox *mbox,
+ int partition_count)
+{
+ uint32_t index;
+ uint32_t buf_size;
+ uint32_t descriptor_size;
+ struct ffa_partition_info_v1_0 *v1_0_partitions =
+ (struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
+
+ buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
+ descriptor_size = partition_count *
+ sizeof(struct ffa_partition_info_v1_0);
+
+ if (descriptor_size > buf_size) {
+ return FFA_ERROR_NO_MEMORY;
+ }
+
+ for (index = 0U; index < partition_count; index++) {
+ v1_0_partitions[index].ep_id = partitions[index].ep_id;
+ v1_0_partitions[index].execution_ctx_count =
+ partitions[index].execution_ctx_count;
+ v1_0_partitions[index].properties =
+ partitions[index].properties;
+ }
+ return 0;
+}
+
+/*
+ * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
+ * v1.0 implementations.
+ */
+static uint64_t partition_info_get_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int ret;
+ uint32_t partition_count = 0;
+ uint32_t size = 0;
+ uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+ struct mailbox *mbox;
+ uint64_t info_get_flags;
+ bool count_only;
+ uint32_t uuid[4];
+
+ uuid[0] = x1;
+ uuid[1] = x2;
+ uuid[2] = x3;
+ uuid[3] = x4;
+
+ /* Determine if the Partition descriptors should be populated. */
+ info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
+ count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
+
+ /* Handle the case where we don't need to populate the descriptors. */
+ if (count_only) {
+ partition_count = partition_info_get_handler_count_only(uuid);
+ if (partition_count == 0) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+ } else {
+ struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
+
+ /*
+ * Handle the case where the partition descriptors are required,
+ * check we have the buffers available and populate the
+ * appropriate structure version.
+ */
+
+ /* Obtain the v1.1 format of the descriptors. */
+ ret = partition_info_get_handler_v1_1(uuid, partitions,
+ MAX_SP_LP_PARTITIONS,
+ &partition_count);
+
+ /* Check if an error occurred during discovery. */
+ if (ret != 0) {
+ goto err;
+ }
+
+ /* If we didn't find any matches the UUID is unknown. */
+ if (partition_count == 0) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err;
+ }
+
+ /* Obtain the partition mailbox RX/TX buffer pair descriptor. */
+ mbox = spmc_get_mbox_desc(secure_origin);
+
+ /*
+ * If the caller has not bothered registering its RX/TX pair
+ * then return an error code.
+ */
+ spin_lock(&mbox->lock);
+ if (mbox->rx_buffer == NULL) {
+ ret = FFA_ERROR_BUSY;
+ goto err_unlock;
+ }
+
+ /* Ensure the RX buffer is currently free. */
+ if (mbox->state != MAILBOX_STATE_EMPTY) {
+ ret = FFA_ERROR_BUSY;
+ goto err_unlock;
+ }
+
+ /* Zero the RX buffer before populating. */
+ (void)memset(mbox->rx_buffer, 0,
+ mbox->rxtx_page_count * FFA_PAGE_SIZE);
+
+ /*
+ * Depending on the FF-A version of the requesting partition
+ * we may need to convert to a v1.0 format otherwise we can copy
+ * directly.
+ */
+ if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
+ ret = partition_info_populate_v1_0(partitions,
+ mbox,
+ partition_count);
+ if (ret != 0) {
+ goto err_unlock;
+ }
+ } else {
+ uint32_t buf_size = mbox->rxtx_page_count *
+ FFA_PAGE_SIZE;
+
+ /* Ensure the descriptor will fit in the buffer. */
+ size = sizeof(struct ffa_partition_info_v1_1);
+ if (partition_count * size > buf_size) {
+ ret = FFA_ERROR_NO_MEMORY;
+ goto err_unlock;
+ }
+ memcpy(mbox->rx_buffer, partitions,
+ partition_count * size);
+ }
+
+ mbox->state = MAILBOX_STATE_FULL;
+ spin_unlock(&mbox->lock);
+ }
+ SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
+
+err_unlock:
+ spin_unlock(&mbox->lock);
+err:
+ return spmc_ffa_error_return(handle, ret);
+}
+
+static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
+{
+ SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
+}
+
+static uint64_t ffa_features_retrieve_request(bool secure_origin,
+ uint32_t input_properties,
+ void *handle)
+{
+ /*
+ * If we're called by the normal world we don't support any
+ * additional features.
+ */
+ if (!secure_origin) {
+ if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ } else {
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ /*
+ * If v1.1 the NS bit must be set otherwise it is an invalid
+ * call. If v1.0 check and store whether the SP has requested
+ * the use of the NS bit.
+ */
+ if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
+ if ((input_properties &
+ FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+ return ffa_feature_success(handle,
+ FFA_FEATURES_RET_REQ_NS_BIT);
+ } else {
+ sp->ns_bit_requested = (input_properties &
+ FFA_FEATURES_RET_REQ_NS_BIT) !=
+ 0U;
+ }
+ if (sp->ns_bit_requested) {
+ return ffa_feature_success(handle,
+ FFA_FEATURES_RET_REQ_NS_BIT);
+ }
+ }
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
+static uint64_t ffa_features_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint32_t function_id = (uint32_t) x1;
+ uint32_t input_properties = (uint32_t) x2;
+
+ /* Check if a Feature ID was requested. */
+ if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
+ /* We currently don't support any additional features. */
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /*
+ * Handle the cases where we have separate handlers due to additional
+ * properties.
+ */
+ switch (function_id) {
+ case FFA_MEM_RETRIEVE_REQ_SMC32:
+ case FFA_MEM_RETRIEVE_REQ_SMC64:
+ return ffa_features_retrieve_request(secure_origin,
+ input_properties,
+ handle);
+ }
+
+ /*
+ * We don't currently support additional input properties for these
+ * other ABIs therefore ensure this value is set to 0.
+ */
+ if (input_properties != 0U) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Report if any other FF-A ABI is supported. */
+ switch (function_id) {
+ /* Supported features from both worlds. */
+ case FFA_ERROR:
+ case FFA_SUCCESS_SMC32:
+ case FFA_INTERRUPT:
+ case FFA_SPM_ID_GET:
+ case FFA_ID_GET:
+ case FFA_FEATURES:
+ case FFA_VERSION:
+ case FFA_RX_RELEASE:
+ case FFA_MSG_SEND_DIRECT_REQ_SMC32:
+ case FFA_MSG_SEND_DIRECT_REQ_SMC64:
+ case FFA_PARTITION_INFO_GET:
+ case FFA_RXTX_MAP_SMC32:
+ case FFA_RXTX_MAP_SMC64:
+ case FFA_RXTX_UNMAP:
+ case FFA_MEM_FRAG_TX:
+ case FFA_MSG_RUN:
+
+ /*
+ * We are relying on the fact that the other registers
+ * will be set to 0 as these values align with the
+ * currently implemented features of the SPMC. If this
+ * changes this function must be extended to handle
+ * reporting the additional functionality.
+ */
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+ /* Execution stops here. */
+
+ /* Supported ABIs only from the secure world. */
+ case FFA_SECONDARY_EP_REGISTER_SMC64:
+ case FFA_MSG_SEND_DIRECT_RESP_SMC32:
+ case FFA_MSG_SEND_DIRECT_RESP_SMC64:
+ case FFA_MEM_RELINQUISH:
+ case FFA_MSG_WAIT:
+
+ if (!secure_origin) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+ /* Execution stops here. */
+
+ /* Supported features only from the normal world. */
+ case FFA_MEM_SHARE_SMC32:
+ case FFA_MEM_SHARE_SMC64:
+ case FFA_MEM_LEND_SMC32:
+ case FFA_MEM_LEND_SMC64:
+ case FFA_MEM_RECLAIM:
+ case FFA_MEM_FRAG_RX:
+
+ if (secure_origin) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+ /* Execution stops here. */
+
+ default:
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+}
+
+static uint64_t ffa_id_get_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ if (secure_origin) {
+ SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
+ spmc_get_current_sp_ctx()->sp_id);
+ } else {
+ SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
+ spmc_get_hyp_ctx()->ns_ep_id);
+ }
+}
+
+/*
+ * Enable an SP to query the ID assigned to the SPMC.
+ */
+static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ assert(x1 == 0UL);
+ assert(x2 == 0UL);
+ assert(x3 == 0UL);
+ assert(x4 == 0UL);
+ assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
+ assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
+ assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
+
+ SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
+}
+
+static uint64_t ffa_run_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct secure_partition_desc *sp;
+ uint16_t target_id = FFA_RUN_EP_ID(x1);
+ uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
+ unsigned int idx;
+ unsigned int *rt_state;
+ unsigned int *rt_model;
+
+ /* Can only be called from the normal world. */
+ if (secure_origin) {
+ ERROR("FFA_RUN can only be called from NWd.\n");
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Cannot run a Normal world partition. */
+ if (ffa_is_normal_world_id(target_id)) {
+ ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Check that the target SP exists. */
+ sp = spmc_get_sp_ctx(target_id);
+ ERROR("Unknown partition ID (0x%x).\n", target_id);
+ if (sp == NULL) {
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ idx = get_ec_index(sp);
+ if (idx != vcpu_id) {
+ ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+ rt_state = &((sp->ec[idx]).rt_state);
+ rt_model = &((sp->ec[idx]).rt_model);
+ if (*rt_state == RT_STATE_RUNNING) {
+ ERROR("Partition (0x%x) is already running.\n", target_id);
+ return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
+ }
+
+ /*
+ * Sanity check that if the execution context was not waiting then it
+ * was either in the direct request or the run partition runtime model.
+ */
+ if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
+ assert(*rt_model == RT_MODEL_RUN ||
+ *rt_model == RT_MODEL_DIR_REQ);
+ }
+
+ /*
+ * If the context was waiting then update the partition runtime model.
+ */
+ if (*rt_state == RT_STATE_WAITING) {
+ *rt_model = RT_MODEL_RUN;
+ }
+
+ /*
+ * Forward the request to the correct SP vCPU after updating
+ * its state.
+ */
+ *rt_state = RT_STATE_RUNNING;
+
+ return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
+ handle, cookie, flags, target_id);
+}
+
+static uint64_t rx_release_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+
+ spin_lock(&mbox->lock);
+
+ if (mbox->state != MAILBOX_STATE_FULL) {
+ spin_unlock(&mbox->lock);
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ mbox->state = MAILBOX_STATE_EMPTY;
+ spin_unlock(&mbox->lock);
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
+/*
+ * Perform initial validation on the provided secondary entry point.
+ * For now ensure it does not lie within the BL31 Image or the SP's
+ * RX/TX buffers as these are mapped within EL3.
+ * TODO: perform validation for additional invalid memory regions.
+ */
+static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
+{
+ struct mailbox *mb;
+ uintptr_t buffer_size;
+ uintptr_t sp_rx_buffer;
+ uintptr_t sp_tx_buffer;
+ uintptr_t sp_rx_buffer_limit;
+ uintptr_t sp_tx_buffer_limit;
+
+ mb = &sp->mailbox;
+ buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
+ sp_rx_buffer = (uintptr_t) mb->rx_buffer;
+ sp_tx_buffer = (uintptr_t) mb->tx_buffer;
+ sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
+ sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
+
+ /*
+ * Check if the entry point lies within BL31, or the
+ * SP's RX or TX buffer.
+ */
+ if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
+ (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
+ (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*******************************************************************************
+ * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
+ * register an entry point for initialization during a secondary cold boot.
+ ******************************************************************************/
+static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ struct secure_partition_desc *sp;
+ struct sp_exec_ctx *sp_ctx;
+
+ /* This request cannot originate from the Normal world. */
+ if (!secure_origin) {
+ WARN("%s: Can only be called from SWd.\n", __func__);
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Get the context of the current SP. */
+ sp = spmc_get_current_sp_ctx();
+ if (sp == NULL) {
+ WARN("%s: Cannot find SP context.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Only an S-EL1 SP should be invoking this ABI. */
+ if (sp->runtime_el != S_EL1) {
+ WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ /* Ensure the SP is in its initialization state. */
+ sp_ctx = spmc_get_sp_ec(sp);
+ if (sp_ctx->rt_model != RT_MODEL_INIT) {
+ WARN("%s: Can only be called during SP initialization.\n",
+ __func__);
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+
+ /* Perform initial validation of the secondary entry point. */
+ if (validate_secondary_ep(x1, sp)) {
+ WARN("%s: Invalid entry point provided (0x%lx).\n",
+ __func__, x1);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Update the secondary entrypoint in SP context.
+ * We don't need a lock here as during partition initialization there
+ * will only be a single core online.
+ */
+ sp->secondary_ep = x1;
+ VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
+/*******************************************************************************
+ * This function will parse the Secure Partition Manifest. From manifest, it
+ * will fetch details for preparing Secure partition image context and secure
+ * partition image boot arguments if any.
+ ******************************************************************************/
+static int sp_manifest_parse(void *sp_manifest, int offset,
+ struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info,
+ int32_t *boot_info_reg)
+{
+ int32_t ret, node;
+ uint32_t config_32;
+
+ /*
+ * Look for the mandatory fields that are expected to be present in
+ * the SP manifests.
+ */
+ node = fdt_path_offset(sp_manifest, "/");
+ if (node < 0) {
+ ERROR("Did not find root node.\n");
+ return node;
+ }
+
+ ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
+ ARRAY_SIZE(sp->uuid), sp->uuid);
+ if (ret != 0) {
+ ERROR("Missing Secure Partition UUID.\n");
+ return ret;
+ }
+
+ ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
+ if (ret != 0) {
+ ERROR("Missing SP Exception Level information.\n");
+ return ret;
+ }
+
+ sp->runtime_el = config_32;
+
+ ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
+ if (ret != 0) {
+ ERROR("Missing Secure Partition FF-A Version.\n");
+ return ret;
+ }
+
+ sp->ffa_version = config_32;
+
+ ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
+ if (ret != 0) {
+ ERROR("Missing Secure Partition Execution State.\n");
+ return ret;
+ }
+
+ sp->execution_state = config_32;
+
+ ret = fdt_read_uint32(sp_manifest, node,
+ "messaging-method", &config_32);
+ if (ret != 0) {
+ ERROR("Missing Secure Partition messaging method.\n");
+ return ret;
+ }
+
+ /* Validate this entry, we currently only support direct messaging. */
+ if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
+ FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
+ WARN("Invalid Secure Partition messaging method (0x%x)\n",
+ config_32);
+ return -EINVAL;
+ }
+
+ sp->properties = config_32;
+
+ ret = fdt_read_uint32(sp_manifest, node,
+ "execution-ctx-count", &config_32);
+
+ if (ret != 0) {
+ ERROR("Missing SP Execution Context Count.\n");
+ return ret;
+ }
+
+ /*
+ * Ensure this field is set correctly in the manifest however
+ * since this is currently a hardcoded value for S-EL1 partitions
+ * we don't need to save it here, just validate.
+ */
+ if (config_32 != PLATFORM_CORE_COUNT) {
+ ERROR("SP Execution Context Count (%u) must be %u.\n",
+ config_32, PLATFORM_CORE_COUNT);
+ return -EINVAL;
+ }
+
+ /*
+ * Look for the optional fields that are expected to be present in
+ * an SP manifest.
+ */
+ ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
+ if (ret != 0) {
+ WARN("Missing Secure Partition ID.\n");
+ } else {
+ if (!is_ffa_secure_id_valid(config_32)) {
+ ERROR("Invalid Secure Partition ID (0x%x).\n",
+ config_32);
+ return -EINVAL;
+ }
+ sp->sp_id = config_32;
+ }
+
+ ret = fdt_read_uint32(sp_manifest, node,
+ "power-management-messages", &config_32);
+ if (ret != 0) {
+ WARN("Missing Power Management Messages entry.\n");
+ } else {
+ /*
+ * Ensure only the currently supported power messages have
+ * been requested.
+ */
+ if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
+ FFA_PM_MSG_SUB_CPU_SUSPEND |
+ FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
+ ERROR("Requested unsupported PM messages (%x)\n",
+ config_32);
+ return -EINVAL;
+ }
+ sp->pwr_mgmt_msgs = config_32;
+ }
+
+ ret = fdt_read_uint32(sp_manifest, node,
+ "gp-register-num", &config_32);
+ if (ret != 0) {
+ WARN("Missing boot information register.\n");
+ } else {
+ /* Check if a register number between 0-3 is specified. */
+ if (config_32 < 4) {
+ *boot_info_reg = config_32;
+ } else {
+ WARN("Incorrect boot information register (%u).\n",
+ config_32);
+ }
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function gets the Secure Partition Manifest base and maps the manifest
+ * region.
+ * Currently only one Secure Partition manifest is considered which is used to
+ * prepare the context for the single Secure Partition.
+ ******************************************************************************/
+static int find_and_prepare_sp_context(void)
+{
+ void *sp_manifest;
+ uintptr_t manifest_base;
+ uintptr_t manifest_base_align;
+ entry_point_info_t *next_image_ep_info;
+ int32_t ret, boot_info_reg = -1;
+ struct secure_partition_desc *sp;
+
+ next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (next_image_ep_info == NULL) {
+ WARN("No Secure Partition image provided by BL2.\n");
+ return -ENOENT;
+ }
+
+ sp_manifest = (void *)next_image_ep_info->args.arg0;
+ if (sp_manifest == NULL) {
+ WARN("Secure Partition manifest absent.\n");
+ return -ENOENT;
+ }
+
+ manifest_base = (uintptr_t)sp_manifest;
+ manifest_base_align = page_align(manifest_base, DOWN);
+
+ /*
+ * Map the secure partition manifest region in the EL3 translation
+ * regime.
+ * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
+ * alignment the region of 1 PAGE_SIZE from manifest align base may
+ * not completely accommodate the secure partition manifest region.
+ */
+ ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
+ manifest_base_align,
+ PAGE_SIZE * 2,
+ MT_RO_DATA);
+ if (ret != 0) {
+ ERROR("Error while mapping SP manifest (%d).\n", ret);
+ return ret;
+ }
+
+ ret = fdt_node_offset_by_compatible(sp_manifest, -1,
+ "arm,ffa-manifest-1.0");
+ if (ret < 0) {
+ ERROR("Error happened in SP manifest reading.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Store the size of the manifest so that it can be used later to pass
+ * the manifest as boot information later.
+ */
+ next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
+ INFO("Manifest size = %lu bytes.\n", next_image_ep_info->args.arg1);
+
+ /*
+ * Select an SP descriptor for initialising the partition's execution
+ * context on the primary CPU.
+ */
+ sp = spmc_get_current_sp_ctx();
+
+ /* Initialize entry point information for the SP */
+ SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
+ SECURE | EP_ST_ENABLE);
+
+ /* Parse the SP manifest. */
+ ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
+ &boot_info_reg);
+ if (ret != 0) {
+ ERROR("Error in Secure Partition manifest parsing.\n");
+ return ret;
+ }
+
+ /* Check that the runtime EL in the manifest was correct. */
+ if (sp->runtime_el != S_EL1) {
+ ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
+ return -EINVAL;
+ }
+
+ /* Perform any common initialisation. */
+ spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
+
+ /* Perform any initialisation specific to S-EL1 SPs. */
+ spmc_el1_sp_setup(sp, next_image_ep_info);
+
+ /* Initialize the SP context with the required ep info. */
+ spmc_sp_common_ep_commit(sp, next_image_ep_info);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and performs a synchronous entry
+ * into it.
+ ******************************************************************************/
+static int32_t logical_sp_init(void)
+{
+ int32_t rc = 0;
+ struct el3_lp_desc *el3_lp_descs;
+
+ /* Perform initial validation of the Logical Partitions. */
+ rc = el3_sp_desc_validate();
+ if (rc != 0) {
+ ERROR("Logical Partition validation failed!\n");
+ return rc;
+ }
+
+ el3_lp_descs = get_el3_lp_array();
+
+ INFO("Logical Secure Partition init start.\n");
+ for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
+ rc = el3_lp_descs[i].init();
+ if (rc != 0) {
+ ERROR("Logical SP (0x%x) Failed to Initialize\n",
+ el3_lp_descs[i].sp_id);
+ return rc;
+ }
+ VERBOSE("Logical SP (0x%x) Initialized\n",
+ el3_lp_descs[i].sp_id);
+ }
+
+ INFO("Logical Secure Partition init completed.\n");
+
+ return rc;
+}
+
+uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
+{
+ uint64_t rc;
+
+ assert(ec != NULL);
+
+ /* Assign the context of the SP to this CPU */
+ cm_set_context(&(ec->cpu_ctx), SECURE);
+
+ /* Restore the context assigned above */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ /* Invalidate TLBs at EL1. */
+ tlbivmalle1();
+ dsbish();
+
+ /* Enter Secure Partition */
+ rc = spm_secure_partition_enter(&ec->c_rt_ctx);
+
+ /* Save secure state */
+ cm_el1_sysregs_context_save(SECURE);
+
+ return rc;
+}
+
+/*******************************************************************************
+ * SPMC Helper Functions.
+ ******************************************************************************/
+static int32_t sp_init(void)
+{
+ uint64_t rc;
+ struct secure_partition_desc *sp;
+ struct sp_exec_ctx *ec;
+
+ sp = spmc_get_current_sp_ctx();
+ ec = spmc_get_sp_ec(sp);
+ ec->rt_model = RT_MODEL_INIT;
+ ec->rt_state = RT_STATE_RUNNING;
+
+ INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
+
+ rc = spmc_sp_synchronous_entry(ec);
+ if (rc != 0) {
+ /* Indicate SP init was not successful. */
+ ERROR("SP (0x%x) failed to initialize (%lu).\n",
+ sp->sp_id, rc);
+ return 0;
+ }
+
+ ec->rt_state = RT_STATE_WAITING;
+ INFO("Secure Partition initialized.\n");
+
+ return 1;
+}
+
+static void initalize_sp_descs(void)
+{
+ struct secure_partition_desc *sp;
+
+ for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
+ sp = &sp_desc[i];
+ sp->sp_id = INV_SP_ID;
+ sp->mailbox.rx_buffer = NULL;
+ sp->mailbox.tx_buffer = NULL;
+ sp->mailbox.state = MAILBOX_STATE_EMPTY;
+ sp->secondary_ep = 0;
+ }
+}
+
+static void initalize_ns_ep_descs(void)
+{
+ struct ns_endpoint_desc *ns_ep;
+
+ for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
+ ns_ep = &ns_ep_desc[i];
+ /*
+ * Clashes with the Hypervisor ID but will not be a
+ * problem in practice.
+ */
+ ns_ep->ns_ep_id = 0;
+ ns_ep->ffa_version = 0;
+ ns_ep->mailbox.rx_buffer = NULL;
+ ns_ep->mailbox.tx_buffer = NULL;
+ ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
+ }
+}
+
+/*******************************************************************************
+ * Initialize SPMC attributes for the SPMD.
+ ******************************************************************************/
+void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
+{
+ spmc_attrs->major_version = FFA_VERSION_MAJOR;
+ spmc_attrs->minor_version = FFA_VERSION_MINOR;
+ spmc_attrs->exec_state = MODE_RW_64;
+ spmc_attrs->spmc_id = FFA_SPMC_ID;
+}
+
+/*******************************************************************************
+ * Initialize contexts of all Secure Partitions.
+ ******************************************************************************/
+int32_t spmc_setup(void)
+{
+ int32_t ret;
+ uint32_t flags;
+
+ /* Initialize endpoint descriptors */
+ initalize_sp_descs();
+ initalize_ns_ep_descs();
+
+ /*
+ * Retrieve the information of the datastore for tracking shared memory
+ * requests allocated by platform code and zero the region if available.
+ */
+ ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
+ &spmc_shmem_obj_state.data_size);
+ if (ret != 0) {
+ ERROR("Failed to obtain memory descriptor backing store!\n");
+ return ret;
+ }
+ memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
+
+ /* Setup logical SPs. */
+ ret = logical_sp_init();
+ if (ret != 0) {
+ ERROR("Failed to initialize Logical Partitions.\n");
+ return ret;
+ }
+
+ /* Perform physical SP setup. */
+
+ /* Disable MMU at EL1 (initialized by BL2) */
+ disable_mmu_icache_el1();
+
+ /* Initialize context of the SP */
+ INFO("Secure Partition context setup start.\n");
+
+ ret = find_and_prepare_sp_context();
+ if (ret != 0) {
+ ERROR("Error in SP finding and context preparation.\n");
+ return ret;
+ }
+
+ /* Register power management hooks with PSCI */
+ psci_register_spd_pm_hook(&spmc_pm);
+
+ /*
+ * Register an interrupt handler for S-EL1 interrupts
+ * when generated during code executing in the
+ * non-secure state.
+ */
+ flags = 0;
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+ spmc_sp_interrupt_handler,
+ flags);
+ if (ret != 0) {
+ ERROR("Failed to register interrupt handler! (%d)\n", ret);
+ panic();
+ }
+
+ /* Register init function for deferred init. */
+ bl31_register_bl32_init(&sp_init);
+
+ INFO("Secure Partition setup done.\n");
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Secure Partition Manager SMC handler.
+ ******************************************************************************/
+uint64_t spmc_smc_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ switch (smc_fid) {
+
+ case FFA_VERSION:
+ return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_SPM_ID_GET:
+ return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+
+ case FFA_ID_GET:
+ return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_FEATURES:
+ return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_SECONDARY_EP_REGISTER_SMC64:
+ return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
+ x2, x3, x4, cookie, handle,
+ flags);
+
+ case FFA_MSG_SEND_DIRECT_REQ_SMC32:
+ case FFA_MSG_SEND_DIRECT_REQ_SMC64:
+ return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+
+ case FFA_MSG_SEND_DIRECT_RESP_SMC32:
+ case FFA_MSG_SEND_DIRECT_RESP_SMC64:
+ return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+
+ case FFA_RXTX_MAP_SMC32:
+ case FFA_RXTX_MAP_SMC64:
+ return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+ case FFA_RXTX_UNMAP:
+ return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_PARTITION_INFO_GET:
+ return partition_info_get_handler(smc_fid, secure_origin, x1,
+ x2, x3, x4, cookie, handle,
+ flags);
+
+ case FFA_RX_RELEASE:
+ return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_MSG_WAIT:
+ return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+ case FFA_ERROR:
+ return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+ case FFA_MSG_RUN:
+ return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+ case FFA_MEM_SHARE_SMC32:
+ case FFA_MEM_SHARE_SMC64:
+ case FFA_MEM_LEND_SMC32:
+ case FFA_MEM_LEND_SMC64:
+ return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
+ cookie, handle, flags);
+
+ case FFA_MEM_FRAG_TX:
+ return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_MEM_FRAG_RX:
+ return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ case FFA_MEM_RETRIEVE_REQ_SMC32:
+ case FFA_MEM_RETRIEVE_REQ_SMC64:
+ return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+
+ case FFA_MEM_RELINQUISH:
+ return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
+ x3, x4, cookie, handle, flags);
+
+ case FFA_MEM_RECLAIM:
+ return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
+ x4, cookie, handle, flags);
+
+ default:
+ WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
+ break;
+ }
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+}
+
+/*******************************************************************************
+ * This function is the handler registered for S-EL1 interrupts by the SPMC. It
+ * validates the interrupt and upon success arranges entry into the SP for
+ * handling the interrupt.
+ ******************************************************************************/
+static uint64_t spmc_sp_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ struct sp_exec_ctx *ec;
+ uint32_t linear_id = plat_my_core_pos();
+
+ /* Sanity check for a NULL pointer dereference. */
+ assert(sp != NULL);
+
+ /* Check the security state when the exception was generated. */
+ assert(get_interrupt_src_ss(flags) == NON_SECURE);
+
+ /* Panic if not an S-EL1 Partition. */
+ if (sp->runtime_el != S_EL1) {
+ ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
+ linear_id);
+ panic();
+ }
+
+ /* Obtain a reference to the SP execution context. */
+ ec = spmc_get_sp_ec(sp);
+
+ /* Ensure that the execution context is in waiting state else panic. */
+ if (ec->rt_state != RT_STATE_WAITING) {
+ ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
+ linear_id, RT_STATE_WAITING, ec->rt_state);
+ panic();
+ }
+
+ /* Update the runtime model and state of the partition. */
+ ec->rt_model = RT_MODEL_INTR;
+ ec->rt_state = RT_STATE_RUNNING;
+
+ VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
+
+ /*
+ * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
+ * populated as the SP can determine this by itself.
+ */
+ return spmd_smc_switch_state(FFA_INTERRUPT, false,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ handle);
+}
diff --git a/services/std_svc/spm/el3_spmc/spmc_pm.c b/services/std_svc/spm/el3_spmc/spmc_pm.c
new file mode 100644
index 0000000..d25344c
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_pm.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/spinlock.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <services/ffa_svc.h>
+#include "spmc.h"
+
+#include <platform_def.h>
+
+/*******************************************************************************
+ * spmc_build_pm_message
+ *
+ * Builds an SPMC to SP direct message request.
+ ******************************************************************************/
+static void spmc_build_pm_message(gp_regs_t *gpregs,
+ unsigned long long message,
+ uint8_t pm_msg_type,
+ uint16_t sp_id)
+{
+ write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
+ write_ctx_reg(gpregs, CTX_GPREG_X1,
+ (FFA_SPMC_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+ sp_id);
+ write_ctx_reg(gpregs, CTX_GPREG_X2, FFA_FWK_MSG_BIT |
+ (pm_msg_type & FFA_FWK_MSG_MASK));
+ write_ctx_reg(gpregs, CTX_GPREG_X3, message);
+}
+
+/*******************************************************************************
+ * This CPU has been turned on. Enter the SP to initialise S-EL1.
+ ******************************************************************************/
+static void spmc_cpu_on_finish_handler(u_register_t unused)
+{
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ struct sp_exec_ctx *ec;
+ unsigned int linear_id = plat_my_core_pos();
+ entry_point_info_t sec_ec_ep_info = {0};
+ uint64_t rc;
+
+ /* Sanity check for a NULL pointer dereference. */
+ assert(sp != NULL);
+
+ /* Initialize entry point information for the SP. */
+ SET_PARAM_HEAD(&sec_ec_ep_info, PARAM_EP, VERSION_1,
+ SECURE | EP_ST_ENABLE);
+
+ /*
+ * Check if the primary execution context registered an entry point else
+ * bail out early.
+ * TODO: Add support for boot reason in manifest to allow jumping to
+ * entrypoint into the primary execution context.
+ */
+ if (sp->secondary_ep == 0) {
+ WARN("%s: No secondary ep on core%u\n", __func__, linear_id);
+ return;
+ }
+
+ sec_ec_ep_info.pc = sp->secondary_ep;
+
+ /*
+ * Setup and initialise the SP execution context on this physical cpu.
+ */
+ spmc_el1_sp_setup(sp, &sec_ec_ep_info);
+ spmc_sp_common_ep_commit(sp, &sec_ec_ep_info);
+
+ /* Obtain a reference to the SP execution context. */
+ ec = spmc_get_sp_ec(sp);
+
+ /*
+ * TODO: Should we do some PM related state tracking of the SP execution
+ * context here?
+ */
+
+ /* Update the runtime model and state of the partition. */
+ ec->rt_model = RT_MODEL_INIT;
+ ec->rt_state = RT_STATE_RUNNING;
+
+ INFO("SP (0x%x) init start on core%u.\n", sp->sp_id, linear_id);
+
+ rc = spmc_sp_synchronous_entry(ec);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%lu) on CPU%u\n", __func__, rc, linear_id);
+ }
+
+ /* Update the runtime state of the partition. */
+ ec->rt_state = RT_STATE_WAITING;
+
+ VERBOSE("CPU %u on!\n", linear_id);
+}
+/*******************************************************************************
+ * Helper function to send a FF-A power management message to an SP.
+ ******************************************************************************/
+static int32_t spmc_send_pm_msg(uint8_t pm_msg_type,
+ unsigned long long psci_event)
+{
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ struct sp_exec_ctx *ec;
+ gp_regs_t *gpregs_ctx;
+ unsigned int linear_id = plat_my_core_pos();
+ u_register_t resp;
+ uint64_t rc;
+
+ /* Obtain a reference to the SP execution context. */
+ ec = spmc_get_sp_ec(sp);
+
+ /*
+ * TODO: Should we do some PM related state tracking of the SP execution
+ * context here?
+ */
+
+ /*
+ * Build an SPMC to SP direct message request.
+ * Note that x4-x6 should be populated with the original PSCI arguments.
+ */
+ spmc_build_pm_message(get_gpregs_ctx(&ec->cpu_ctx),
+ psci_event,
+ pm_msg_type,
+ sp->sp_id);
+
+ /* Sanity check partition state. */
+ assert(ec->rt_state == RT_STATE_WAITING);
+
+ /* Update the runtime model and state of the partition. */
+ ec->rt_model = RT_MODEL_DIR_REQ;
+ ec->rt_state = RT_STATE_RUNNING;
+
+ rc = spmc_sp_synchronous_entry(ec);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%lu) on CPU%u.\n", __func__, rc, linear_id);
+ assert(false);
+ return -EINVAL;
+ }
+
+ /*
+ * Validate we receive an expected response from the SP.
+ * TODO: We don't currently support aborting an SP in the scenario
+ * where it is misbehaving so assert these conditions are not
+ * met for now.
+ */
+ gpregs_ctx = get_gpregs_ctx(&ec->cpu_ctx);
+
+ /* Expect a direct message response from the SP. */
+ resp = read_ctx_reg(gpregs_ctx, CTX_GPREG_X0);
+ if (resp != FFA_MSG_SEND_DIRECT_RESP_SMC32) {
+ ERROR("%s invalid SP response (%lx).\n", __func__, resp);
+ assert(false);
+ return -EINVAL;
+ }
+
+ /* Ensure the sender and receiver are populated correctly. */
+ resp = read_ctx_reg(gpregs_ctx, CTX_GPREG_X1);
+ if (!(ffa_endpoint_source(resp) == sp->sp_id &&
+ ffa_endpoint_destination(resp) == FFA_SPMC_ID)) {
+ ERROR("%s invalid src/dst response (%lx).\n", __func__, resp);
+ assert(false);
+ return -EINVAL;
+ }
+
+ /* Expect a PM message response from the SP. */
+ resp = read_ctx_reg(gpregs_ctx, CTX_GPREG_X2);
+ if ((resp & FFA_FWK_MSG_BIT) == 0U ||
+ ((resp & FFA_FWK_MSG_MASK) != FFA_PM_MSG_PM_RESP)) {
+ ERROR("%s invalid PM response (%lx).\n", __func__, resp);
+ assert(false);
+ return -EINVAL;
+ }
+
+ /* Update the runtime state of the partition. */
+ ec->rt_state = RT_STATE_WAITING;
+
+ /* Return the status code returned by the SP */
+ return read_ctx_reg(gpregs_ctx, CTX_GPREG_X3);
+}
+
+/*******************************************************************************
+ * spmc_cpu_suspend_finish_handler
+ ******************************************************************************/
+static void spmc_cpu_suspend_finish_handler(u_register_t unused)
+{
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ unsigned int linear_id = plat_my_core_pos();
+ int32_t rc;
+
+ /* Sanity check for a NULL pointer dereference. */
+ assert(sp != NULL);
+
+ /*
+ * Check if the SP has subscribed for this power management message.
+ * If not then we don't have anything else to do here.
+ */
+ if ((sp->pwr_mgmt_msgs & FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME) == 0U) {
+ goto exit;
+ }
+
+ rc = spmc_send_pm_msg(FFA_PM_MSG_WB_REQ, FFA_WB_TYPE_NOTS2RAM);
+ if (rc < 0) {
+ ERROR("%s failed (%d) on CPU%u\n", __func__, rc, linear_id);
+ return;
+ }
+
+exit:
+ VERBOSE("CPU %u resumed!\n", linear_id);
+}
+
+/*******************************************************************************
+ * spmc_cpu_suspend_handler
+ ******************************************************************************/
+static void spmc_cpu_suspend_handler(u_register_t unused)
+{
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ unsigned int linear_id = plat_my_core_pos();
+ int32_t rc;
+
+ /* Sanity check for a NULL pointer dereference. */
+ assert(sp != NULL);
+
+ /*
+ * Check if the SP has subscribed for this power management message.
+ * If not then we don't have anything else to do here.
+ */
+ if ((sp->pwr_mgmt_msgs & FFA_PM_MSG_SUB_CPU_SUSPEND) == 0U) {
+ goto exit;
+ }
+
+ rc = spmc_send_pm_msg(FFA_FWK_MSG_PSCI, PSCI_CPU_SUSPEND_AARCH64);
+ if (rc < 0) {
+ ERROR("%s failed (%d) on CPU%u\n", __func__, rc, linear_id);
+ return;
+ }
+exit:
+ VERBOSE("CPU %u suspend!\n", linear_id);
+}
+
+/*******************************************************************************
+ * spmc_cpu_off_handler
+ ******************************************************************************/
+static int32_t spmc_cpu_off_handler(u_register_t unused)
+{
+ struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
+ unsigned int linear_id = plat_my_core_pos();
+ int32_t ret = 0;
+
+ /* Sanity check for a NULL pointer dereference. */
+ assert(sp != NULL);
+
+ /*
+ * Check if the SP has subscribed for this power management message.
+ * If not then we don't have anything else to do here.
+ */
+ if ((sp->pwr_mgmt_msgs & FFA_PM_MSG_SUB_CPU_OFF) == 0U) {
+ goto exit;
+ }
+
+ ret = spmc_send_pm_msg(FFA_FWK_MSG_PSCI, PSCI_CPU_OFF);
+ if (ret < 0) {
+ ERROR("%s failed (%d) on CPU%u\n", __func__, ret, linear_id);
+ return ret;
+ }
+
+exit:
+ VERBOSE("CPU %u off!\n", linear_id);
+ return ret;
+}
+
+/*******************************************************************************
+ * Structure populated by the SPM Core to perform any bookkeeping before
+ * PSCI executes a power mgmt. operation.
+ ******************************************************************************/
+const spd_pm_ops_t spmc_pm = {
+ .svc_on_finish = spmc_cpu_on_finish_handler,
+ .svc_off = spmc_cpu_off_handler,
+ .svc_suspend = spmc_cpu_suspend_handler,
+ .svc_suspend_finish = spmc_cpu_suspend_finish_handler
+};
diff --git a/services/std_svc/spm/el3_spmc/spmc_setup.c b/services/std_svc/spm/el3_spmc/spmc_setup.c
new file mode 100644
index 0000000..8ebae28
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_setup.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <libfdt.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <services/ffa_svc.h>
+#include "spm_common.h"
+#include "spmc.h"
+#include <tools_share/firmware_image_package.h>
+
+#include <platform_def.h>
+
+/*
+ * Statically allocate a page of memory for passing boot information to an SP.
+ */
+static uint8_t ffa_boot_info_mem[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+/*
+ * This function creates a initialization descriptor in the memory reserved
+ * for passing boot information to an SP. It then copies the partition manifest
+ * into this region and ensures that its reference in the initialization
+ * descriptor is updated.
+ */
+static void spmc_create_boot_info(entry_point_info_t *ep_info,
+ struct secure_partition_desc *sp)
+{
+ struct ffa_boot_info_header *boot_header;
+ struct ffa_boot_info_desc *boot_descriptor;
+ uintptr_t manifest_addr;
+
+ /*
+ * Calculate the maximum size of the manifest that can be accommodated
+ * in the boot information memory region.
+ */
+ const unsigned int
+ max_manifest_sz = sizeof(ffa_boot_info_mem) -
+ (sizeof(struct ffa_boot_info_header) +
+ sizeof(struct ffa_boot_info_desc));
+
+ /*
+ * The current implementation only supports the FF-A v1.1
+ * implementation of the boot protocol, therefore check
+ * that a v1.0 SP has not requested use of the protocol.
+ */
+ if (sp->ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ ERROR("FF-A boot protocol not supported for v1.0 clients\n");
+ return;
+ }
+
+ /*
+ * Check if the manifest will fit into the boot info memory region else
+ * bail.
+ */
+ if (ep_info->args.arg1 > max_manifest_sz) {
+ WARN("Unable to copy manifest into boot information. ");
+ WARN("Max sz = %u bytes. Manifest sz = %lu bytes\n",
+ max_manifest_sz, ep_info->args.arg1);
+ return;
+ }
+
+ /* Zero the memory region before populating. */
+ memset(ffa_boot_info_mem, 0, PAGE_SIZE);
+
+ /*
+ * Populate the ffa_boot_info_header at the start of the boot info
+ * region.
+ */
+ boot_header = (struct ffa_boot_info_header *) ffa_boot_info_mem;
+
+ /* Position the ffa_boot_info_desc after the ffa_boot_info_header. */
+ boot_header->offset_boot_info_desc =
+ sizeof(struct ffa_boot_info_header);
+ boot_descriptor = (struct ffa_boot_info_desc *)
+ (ffa_boot_info_mem +
+ boot_header->offset_boot_info_desc);
+
+ /*
+ * We must use the FF-A version coresponding to the version implemented
+ * by the SP. Currently this can only be v1.1.
+ */
+ boot_header->version = sp->ffa_version;
+
+ /* Populate the boot information header. */
+ boot_header->size_boot_info_desc = sizeof(struct ffa_boot_info_desc);
+
+ /* Set the signature "0xFFA". */
+ boot_header->signature = FFA_INIT_DESC_SIGNATURE;
+
+ /* Set the count. Currently 1 since only the manifest is specified. */
+ boot_header->count_boot_info_desc = 1;
+
+ /* Populate the boot information descriptor for the manifest. */
+ boot_descriptor->type =
+ FFA_BOOT_INFO_TYPE(FFA_BOOT_INFO_TYPE_STD) |
+ FFA_BOOT_INFO_TYPE_ID(FFA_BOOT_INFO_TYPE_ID_FDT);
+
+ boot_descriptor->flags =
+ FFA_BOOT_INFO_FLAG_NAME(FFA_BOOT_INFO_FLAG_NAME_UUID) |
+ FFA_BOOT_INFO_FLAG_CONTENT(FFA_BOOT_INFO_FLAG_CONTENT_ADR);
+
+ /*
+ * Copy the manifest into boot info region after the boot information
+ * descriptor.
+ */
+ boot_descriptor->size_boot_info = (uint32_t) ep_info->args.arg1;
+
+ manifest_addr = (uintptr_t) (ffa_boot_info_mem +
+ boot_header->offset_boot_info_desc +
+ boot_header->size_boot_info_desc);
+
+ memcpy((void *) manifest_addr, (void *) ep_info->args.arg0,
+ boot_descriptor->size_boot_info);
+
+ boot_descriptor->content = manifest_addr;
+
+ /* Calculate the size of the total boot info blob. */
+ boot_header->size_boot_info_blob = boot_header->offset_boot_info_desc +
+ boot_descriptor->size_boot_info +
+ (boot_header->count_boot_info_desc *
+ boot_header->size_boot_info_desc);
+
+ INFO("SP boot info @ 0x%lx, size: %u bytes.\n",
+ (uintptr_t) ffa_boot_info_mem,
+ boot_header->size_boot_info_blob);
+ INFO("SP manifest @ 0x%lx, size: %u bytes.\n",
+ boot_descriptor->content,
+ boot_descriptor->size_boot_info);
+}
+
+/*
+ * We are assuming that the index of the execution
+ * context used is the linear index of the current physical cpu.
+ */
+unsigned int get_ec_index(struct secure_partition_desc *sp)
+{
+ return plat_my_core_pos();
+}
+
+/* S-EL1 partition specific initialisation. */
+void spmc_el1_sp_setup(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info)
+{
+ /* Sanity check input arguments. */
+ assert(sp != NULL);
+ assert(ep_info != NULL);
+
+ /* Initialise the SPSR for S-EL1 SPs. */
+ ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+
+ /*
+ * TF-A Implementation defined behaviour to provide the linear
+ * core ID in the x4 register.
+ */
+ ep_info->args.arg4 = (uintptr_t) plat_my_core_pos();
+
+ /*
+ * Check whether setup is being performed for the primary or a secondary
+ * execution context. In the latter case, indicate to the SP that this
+ * is a warm boot.
+ * TODO: This check would need to be reworked if the same entry point is
+ * used for both primary and secondary initialisation.
+ */
+ if (sp->secondary_ep != 0U) {
+ /*
+ * Sanity check that the secondary entry point is still what was
+ * originally set.
+ */
+ assert(sp->secondary_ep == ep_info->pc);
+ ep_info->args.arg0 = FFA_WB_TYPE_S2RAM;
+ }
+}
+
+/* Common initialisation for all SPs. */
+void spmc_sp_common_setup(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info,
+ int32_t boot_info_reg)
+{
+ uint16_t sp_id;
+
+ /* Assign FF-A Partition ID if not already assigned. */
+ if (sp->sp_id == INV_SP_ID) {
+ sp_id = FFA_SP_ID_BASE + ACTIVE_SP_DESC_INDEX;
+ /*
+ * Ensure we don't clash with previously assigned partition
+ * IDs.
+ */
+ while (!is_ffa_secure_id_valid(sp_id)) {
+ sp_id++;
+
+ if (sp_id == FFA_SWD_ID_LIMIT) {
+ ERROR("Unable to determine valid SP ID.\n");
+ panic();
+ }
+ }
+ sp->sp_id = sp_id;
+ }
+
+ /*
+ * We currently only support S-EL1 partitions so ensure this is the
+ * case.
+ */
+ assert(sp->runtime_el == S_EL1);
+
+ /* Check if the SP wants to use the FF-A boot protocol. */
+ if (boot_info_reg >= 0) {
+ /*
+ * Create a boot information descriptor and copy the partition
+ * manifest into the reserved memory region for consumption by
+ * the SP.
+ */
+ spmc_create_boot_info(ep_info, sp);
+
+ /*
+ * We have consumed what we need from ep args so we can now
+ * zero them before we start populating with new information
+ * specifically for the SP.
+ */
+ zeromem(&ep_info->args, sizeof(ep_info->args));
+
+ /*
+ * Pass the address of the boot information in the
+ * boot_info_reg.
+ */
+ switch (boot_info_reg) {
+ case 0:
+ ep_info->args.arg0 = (uintptr_t) ffa_boot_info_mem;
+ break;
+ case 1:
+ ep_info->args.arg1 = (uintptr_t) ffa_boot_info_mem;
+ break;
+ case 2:
+ ep_info->args.arg2 = (uintptr_t) ffa_boot_info_mem;
+ break;
+ case 3:
+ ep_info->args.arg3 = (uintptr_t) ffa_boot_info_mem;
+ break;
+ default:
+ ERROR("Invalid value for \"gp-register-num\" %d.\n",
+ boot_info_reg);
+ }
+ } else {
+ /*
+ * We don't need any of the information that was populated
+ * in ep_args so we can clear them.
+ */
+ zeromem(&ep_info->args, sizeof(ep_info->args));
+ }
+}
+
+/*
+ * Initialise the SP context now we have populated the common and EL specific
+ * entrypoint information.
+ */
+void spmc_sp_common_ep_commit(struct secure_partition_desc *sp,
+ entry_point_info_t *ep_info)
+{
+ cpu_context_t *cpu_ctx;
+
+ cpu_ctx = &(spmc_get_sp_ec(sp)->cpu_ctx);
+ print_entry_point_info(ep_info);
+ cm_setup_context(cpu_ctx, ep_info);
+}
diff --git a/services/std_svc/spm/el3_spmc/spmc_shared_mem.c b/services/std_svc/spm/el3_spmc/spmc_shared_mem.c
new file mode 100644
index 0000000..89d7b31
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_shared_mem.c
@@ -0,0 +1,1861 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <errno.h>
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/object_pool.h>
+#include <lib/spinlock.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <services/ffa_svc.h>
+#include "spmc.h"
+#include "spmc_shared_mem.h"
+
+#include <platform_def.h>
+
+/**
+ * struct spmc_shmem_obj - Shared memory object.
+ * @desc_size: Size of @desc.
+ * @desc_filled: Size of @desc already received.
+ * @in_use: Number of clients that have called ffa_mem_retrieve_req
+ * without a matching ffa_mem_relinquish call.
+ * @desc: FF-A memory region descriptor passed in ffa_mem_share.
+ */
+struct spmc_shmem_obj {
+ size_t desc_size;
+ size_t desc_filled;
+ size_t in_use;
+ struct ffa_mtd desc;
+};
+
+/*
+ * Declare our data structure to store the metadata of memory share requests.
+ * The main datastore is allocated on a per platform basis to ensure enough
+ * storage can be made available.
+ * The address of the data store will be populated by the SPMC during its
+ * initialization.
+ */
+
+struct spmc_shmem_obj_state spmc_shmem_obj_state = {
+ /* Set start value for handle so top 32 bits are needed quickly. */
+ .next_handle = 0xffffffc0U,
+};
+
+/**
+ * spmc_shmem_obj_size - Convert from descriptor size to object size.
+ * @desc_size: Size of struct ffa_memory_region_descriptor object.
+ *
+ * Return: Size of struct spmc_shmem_obj object.
+ */
+static size_t spmc_shmem_obj_size(size_t desc_size)
+{
+ return desc_size + offsetof(struct spmc_shmem_obj, desc);
+}
+
+/**
+ * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
+ * @state: Global state.
+ * @desc_size: Size of struct ffa_memory_region_descriptor object that
+ * allocated object will hold.
+ *
+ * Return: Pointer to newly allocated object, or %NULL if there not enough space
+ * left. The returned pointer is only valid while @state is locked, to
+ * used it again after unlocking @state, spmc_shmem_obj_lookup must be
+ * called.
+ */
+static struct spmc_shmem_obj *
+spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
+{
+ struct spmc_shmem_obj *obj;
+ size_t free = state->data_size - state->allocated;
+ size_t obj_size;
+
+ if (state->data == NULL) {
+ ERROR("Missing shmem datastore!\n");
+ return NULL;
+ }
+
+ obj_size = spmc_shmem_obj_size(desc_size);
+
+ /* Ensure the obj size has not overflowed. */
+ if (obj_size < desc_size) {
+ WARN("%s(0x%zx) desc_size overflow\n",
+ __func__, desc_size);
+ return NULL;
+ }
+
+ if (obj_size > free) {
+ WARN("%s(0x%zx) failed, free 0x%zx\n",
+ __func__, desc_size, free);
+ return NULL;
+ }
+ obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
+ obj->desc = (struct ffa_mtd) {0};
+ obj->desc_size = desc_size;
+ obj->desc_filled = 0;
+ obj->in_use = 0;
+ state->allocated += obj_size;
+ return obj;
+}
+
+/**
+ * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
+ * @state: Global state.
+ * @obj: Object to free.
+ *
+ * Release memory used by @obj. Other objects may move, so on return all
+ * pointers to struct spmc_shmem_obj object should be considered invalid, not
+ * just @obj.
+ *
+ * The current implementation always compacts the remaining objects to simplify
+ * the allocator and to avoid fragmentation.
+ */
+
+static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
+ struct spmc_shmem_obj *obj)
+{
+ size_t free_size = spmc_shmem_obj_size(obj->desc_size);
+ uint8_t *shift_dest = (uint8_t *)obj;
+ uint8_t *shift_src = shift_dest + free_size;
+ size_t shift_size = state->allocated - (shift_src - state->data);
+
+ if (shift_size != 0U) {
+ memmove(shift_dest, shift_src, shift_size);
+ }
+ state->allocated -= free_size;
+}
+
+/**
+ * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
+ * @state: Global state.
+ * @handle: Unique handle of object to return.
+ *
+ * Return: struct spmc_shmem_obj_state object with handle matching @handle.
+ * %NULL, if not object in @state->data has a matching handle.
+ */
+static struct spmc_shmem_obj *
+spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
+{
+ uint8_t *curr = state->data;
+
+ while (curr - state->data < state->allocated) {
+ struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
+
+ if (obj->desc.handle == handle) {
+ return obj;
+ }
+ curr += spmc_shmem_obj_size(obj->desc_size);
+ }
+ return NULL;
+}
+
+/**
+ * spmc_shmem_obj_get_next - Get the next memory object from an offset.
+ * @offset: Offset used to track which objects have previously been
+ * returned.
+ *
+ * Return: the next struct spmc_shmem_obj_state object from the provided
+ * offset.
+ * %NULL, if there are no more objects.
+ */
+static struct spmc_shmem_obj *
+spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
+{
+ uint8_t *curr = state->data + *offset;
+
+ if (curr - state->data < state->allocated) {
+ struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
+
+ *offset += spmc_shmem_obj_size(obj->desc_size);
+
+ return obj;
+ }
+ return NULL;
+}
+
+/*******************************************************************************
+ * FF-A memory descriptor helper functions.
+ ******************************************************************************/
+/**
+ * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
+ * clients FF-A version.
+ * @desc: The memory transaction descriptor.
+ * @index: The index of the emad element to be accessed.
+ * @ffa_version: FF-A version of the provided structure.
+ * @emad_size: Will be populated with the size of the returned emad
+ * descriptor.
+ * Return: A pointer to the requested emad structure.
+ */
+static void *
+spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
+ uint32_t ffa_version, size_t *emad_size)
+{
+ uint8_t *emad;
+ /*
+ * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
+ * format, otherwise assume it is a v1.1 format.
+ */
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ /* Cast our descriptor to the v1.0 format. */
+ struct ffa_mtd_v1_0 *mtd_v1_0 =
+ (struct ffa_mtd_v1_0 *) desc;
+ emad = (uint8_t *) &(mtd_v1_0->emad);
+ *emad_size = sizeof(struct ffa_emad_v1_0);
+ } else {
+ if (!is_aligned(desc->emad_offset, 16)) {
+ WARN("Emad offset is not aligned.\n");
+ return NULL;
+ }
+ emad = ((uint8_t *) desc + desc->emad_offset);
+ *emad_size = desc->emad_size;
+ }
+ return (emad + (*emad_size * index));
+}
+
+/**
+ * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
+ * FF-A version of the descriptor.
+ * @obj: Object containing ffa_memory_region_descriptor.
+ *
+ * Return: struct ffa_comp_mrd object corresponding to the composite memory
+ * region descriptor.
+ */
+static struct ffa_comp_mrd *
+spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
+{
+ size_t emad_size;
+ /*
+ * The comp_mrd_offset field of the emad descriptor remains consistent
+ * between FF-A versions therefore we can use the v1.0 descriptor here
+ * in all cases.
+ */
+ struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
+ ffa_version,
+ &emad_size);
+ /* Ensure the emad array was found. */
+ if (emad == NULL) {
+ return NULL;
+ }
+
+ /* Ensure the composite descriptor offset is aligned. */
+ if (!is_aligned(emad->comp_mrd_offset, 8)) {
+ WARN("Unaligned composite memory region descriptor offset.\n");
+ return NULL;
+ }
+
+ return (struct ffa_comp_mrd *)
+ ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
+}
+
+/**
+ * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
+ * @obj: Object containing ffa_memory_region_descriptor.
+ *
+ * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
+ */
+static size_t
+spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj,
+ uint32_t ffa_version)
+{
+ struct ffa_comp_mrd *comp_mrd;
+
+ comp_mrd = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
+ if (comp_mrd == NULL) {
+ return 0;
+ }
+ return comp_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
+}
+
+/**
+ * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
+ * a given memory transaction.
+ * @sp_id: Partition ID to validate.
+ * @desc: Descriptor of the memory transaction.
+ *
+ * Return: true if ID is valid, else false.
+ */
+bool spmc_shmem_obj_validate_id(const struct ffa_mtd *desc, uint16_t sp_id)
+{
+ bool found = false;
+
+ /* Validate the partition is a valid participant. */
+ for (unsigned int i = 0U; i < desc->emad_count; i++) {
+ size_t emad_size;
+ struct ffa_emad_v1_0 *emad;
+
+ emad = spmc_shmem_obj_get_emad(desc, i,
+ MAKE_FFA_VERSION(1, 1),
+ &emad_size);
+ if (sp_id == emad->mapd.endpoint_id) {
+ found = true;
+ break;
+ }
+ }
+ return found;
+}
+
+/*
+ * Compare two memory regions to determine if any range overlaps with another
+ * ongoing memory transaction.
+ */
+static bool
+overlapping_memory_regions(struct ffa_comp_mrd *region1,
+ struct ffa_comp_mrd *region2)
+{
+ uint64_t region1_start;
+ uint64_t region1_size;
+ uint64_t region1_end;
+ uint64_t region2_start;
+ uint64_t region2_size;
+ uint64_t region2_end;
+
+ assert(region1 != NULL);
+ assert(region2 != NULL);
+
+ if (region1 == region2) {
+ return true;
+ }
+
+ /*
+ * Check each memory region in the request against existing
+ * transactions.
+ */
+ for (size_t i = 0; i < region1->address_range_count; i++) {
+
+ region1_start = region1->address_range_array[i].address;
+ region1_size =
+ region1->address_range_array[i].page_count *
+ PAGE_SIZE_4KB;
+ region1_end = region1_start + region1_size;
+
+ for (size_t j = 0; j < region2->address_range_count; j++) {
+
+ region2_start = region2->address_range_array[j].address;
+ region2_size =
+ region2->address_range_array[j].page_count *
+ PAGE_SIZE_4KB;
+ region2_end = region2_start + region2_size;
+
+ /* Check if regions are not overlapping. */
+ if (!((region2_end <= region1_start) ||
+ (region1_end <= region2_start))) {
+ WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
+ region1_start, region1_end,
+ region2_start, region2_end);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/*******************************************************************************
+ * FF-A v1.0 Memory Descriptor Conversion Helpers.
+ ******************************************************************************/
+/**
+ * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
+ * converted descriptor.
+ * @orig: The original v1.0 memory transaction descriptor.
+ * @desc_size: The size of the original v1.0 memory transaction descriptor.
+ *
+ * Return: the size required to store the descriptor store in the v1.1 format.
+ */
+static size_t
+spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
+{
+ size_t size = 0;
+ struct ffa_comp_mrd *mrd;
+ struct ffa_emad_v1_0 *emad_array = orig->emad;
+
+ /* Get the size of the v1.1 descriptor. */
+ size += sizeof(struct ffa_mtd);
+
+ /* Add the size of the emad descriptors. */
+ size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
+
+ /* Add the size of the composite mrds. */
+ size += sizeof(struct ffa_comp_mrd);
+
+ /* Add the size of the constituent mrds. */
+ mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
+ emad_array[0].comp_mrd_offset);
+
+ /* Check the calculated address is within the memory descriptor. */
+ if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) {
+ return 0;
+ }
+ size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
+
+ return size;
+}
+
+/**
+ * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
+ * converted descriptor.
+ * @orig: The original v1.1 memory transaction descriptor.
+ * @desc_size: The size of the original v1.1 memory transaction descriptor.
+ *
+ * Return: the size required to store the descriptor store in the v1.0 format.
+ */
+static size_t
+spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
+{
+ size_t size = 0;
+ struct ffa_comp_mrd *mrd;
+ struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
+ ((uint8_t *) orig +
+ orig->emad_offset);
+
+ /* Get the size of the v1.0 descriptor. */
+ size += sizeof(struct ffa_mtd_v1_0);
+
+ /* Add the size of the v1.0 emad descriptors. */
+ size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
+
+ /* Add the size of the composite mrds. */
+ size += sizeof(struct ffa_comp_mrd);
+
+ /* Add the size of the constituent mrds. */
+ mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
+ emad_array[0].comp_mrd_offset);
+
+ /* Check the calculated address is within the memory descriptor. */
+ if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) {
+ return 0;
+ }
+ size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
+
+ return size;
+}
+
+/**
+ * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
+ * @out_obj: The shared memory object to populate the converted descriptor.
+ * @orig: The shared memory object containing the v1.0 descriptor.
+ *
+ * Return: true if the conversion is successful else false.
+ */
+static bool
+spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
+ struct spmc_shmem_obj *orig)
+{
+ struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
+ struct ffa_mtd *out = &out_obj->desc;
+ struct ffa_emad_v1_0 *emad_array_in;
+ struct ffa_emad_v1_0 *emad_array_out;
+ struct ffa_comp_mrd *mrd_in;
+ struct ffa_comp_mrd *mrd_out;
+
+ size_t mrd_in_offset;
+ size_t mrd_out_offset;
+ size_t mrd_size = 0;
+
+ /* Populate the new descriptor format from the v1.0 struct. */
+ out->sender_id = mtd_orig->sender_id;
+ out->memory_region_attributes = mtd_orig->memory_region_attributes;
+ out->flags = mtd_orig->flags;
+ out->handle = mtd_orig->handle;
+ out->tag = mtd_orig->tag;
+ out->emad_count = mtd_orig->emad_count;
+ out->emad_size = sizeof(struct ffa_emad_v1_0);
+
+ /*
+ * We will locate the emad descriptors directly after the ffa_mtd
+ * struct. This will be 8-byte aligned.
+ */
+ out->emad_offset = sizeof(struct ffa_mtd);
+
+ emad_array_in = mtd_orig->emad;
+ emad_array_out = (struct ffa_emad_v1_0 *)
+ ((uint8_t *) out + out->emad_offset);
+
+ /* Copy across the emad structs. */
+ for (unsigned int i = 0U; i < out->emad_count; i++) {
+ memcpy(&emad_array_out[i], &emad_array_in[i],
+ sizeof(struct ffa_emad_v1_0));
+ }
+
+ /* Place the mrd descriptors after the end of the emad descriptors.*/
+ mrd_in_offset = emad_array_in->comp_mrd_offset;
+ mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
+ mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
+
+ /* Add the size of the composite memory region descriptor. */
+ mrd_size += sizeof(struct ffa_comp_mrd);
+
+ /* Find the mrd descriptor. */
+ mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
+
+ /* Add the size of the constituent memory region descriptors. */
+ mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
+
+ /*
+ * Update the offset in the emads by the delta between the input and
+ * output addresses.
+ */
+ for (unsigned int i = 0U; i < out->emad_count; i++) {
+ emad_array_out[i].comp_mrd_offset =
+ emad_array_in[i].comp_mrd_offset +
+ (mrd_out_offset - mrd_in_offset);
+ }
+
+ /* Verify that we stay within bound of the memory descriptors. */
+ if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
+ (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
+ ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
+ (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
+ ERROR("%s: Invalid mrd structure.\n", __func__);
+ return false;
+ }
+
+ /* Copy the mrd descriptors directly. */
+ memcpy(mrd_out, mrd_in, mrd_size);
+
+ return true;
+}
+
+/**
+ * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
+ * v1.0 memory object.
+ * @out_obj: The shared memory object to populate the v1.0 descriptor.
+ * @orig: The shared memory object containing the v1.1 descriptor.
+ *
+ * Return: true if the conversion is successful else false.
+ */
+static bool
+spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
+ struct spmc_shmem_obj *orig)
+{
+ struct ffa_mtd *mtd_orig = &orig->desc;
+ struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
+ struct ffa_emad_v1_0 *emad_in;
+ struct ffa_emad_v1_0 *emad_array_in;
+ struct ffa_emad_v1_0 *emad_array_out;
+ struct ffa_comp_mrd *mrd_in;
+ struct ffa_comp_mrd *mrd_out;
+
+ size_t mrd_in_offset;
+ size_t mrd_out_offset;
+ size_t emad_out_array_size;
+ size_t mrd_size = 0;
+
+ /* Populate the v1.0 descriptor format from the v1.1 struct. */
+ out->sender_id = mtd_orig->sender_id;
+ out->memory_region_attributes = mtd_orig->memory_region_attributes;
+ out->flags = mtd_orig->flags;
+ out->handle = mtd_orig->handle;
+ out->tag = mtd_orig->tag;
+ out->emad_count = mtd_orig->emad_count;
+
+ /* Determine the location of the emad array in both descriptors. */
+ emad_array_in = (struct ffa_emad_v1_0 *)
+ ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
+ emad_array_out = out->emad;
+
+ /* Copy across the emad structs. */
+ emad_in = emad_array_in;
+ for (unsigned int i = 0U; i < out->emad_count; i++) {
+ memcpy(&emad_array_out[i], emad_in,
+ sizeof(struct ffa_emad_v1_0));
+
+ emad_in += mtd_orig->emad_size;
+ }
+
+ /* Place the mrd descriptors after the end of the emad descriptors. */
+ emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
+
+ mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
+ emad_out_array_size;
+
+ mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
+
+ mrd_in_offset = mtd_orig->emad_offset +
+ (mtd_orig->emad_size * mtd_orig->emad_count);
+
+ /* Add the size of the composite memory region descriptor. */
+ mrd_size += sizeof(struct ffa_comp_mrd);
+
+ /* Find the mrd descriptor. */
+ mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
+
+ /* Add the size of the constituent memory region descriptors. */
+ mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
+
+ /*
+ * Update the offset in the emads by the delta between the input and
+ * output addresses.
+ */
+ emad_in = emad_array_in;
+
+ for (unsigned int i = 0U; i < out->emad_count; i++) {
+ emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
+ (mrd_out_offset -
+ mrd_in_offset);
+ emad_in += mtd_orig->emad_size;
+ }
+
+ /* Verify that we stay within bound of the memory descriptors. */
+ if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
+ (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
+ ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
+ (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
+ ERROR("%s: Invalid mrd structure.\n", __func__);
+ return false;
+ }
+
+ /* Copy the mrd descriptors directly. */
+ memcpy(mrd_out, mrd_in, mrd_size);
+
+ return true;
+}
+
+/**
+ * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
+ * the v1.0 format and populates the
+ * provided buffer.
+ * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
+ * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
+ * @buf_size: Size of the buffer to populate.
+ * @offset: The offset of the converted descriptor to copy.
+ * @copy_size: Will be populated with the number of bytes copied.
+ * @out_desc_size: Will be populated with the total size of the v1.0
+ * descriptor.
+ *
+ * Return: 0 if conversion and population succeeded.
+ * Note: This function invalidates the reference to @orig therefore
+ * `spmc_shmem_obj_lookup` must be called if further usage is required.
+ */
+static uint32_t
+spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
+ size_t buf_size, size_t offset,
+ size_t *copy_size, size_t *v1_0_desc_size)
+{
+ struct spmc_shmem_obj *v1_0_obj;
+
+ /* Calculate the size that the v1.0 descriptor will require. */
+ *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
+ &orig_obj->desc, orig_obj->desc_size);
+
+ if (*v1_0_desc_size == 0) {
+ ERROR("%s: cannot determine size of descriptor.\n",
+ __func__);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ /* Get a new obj to store the v1.0 descriptor. */
+ v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
+ *v1_0_desc_size);
+
+ if (!v1_0_obj) {
+ return FFA_ERROR_NO_MEMORY;
+ }
+
+ /* Perform the conversion from v1.1 to v1.0. */
+ if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
+ spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
+ return FFA_ERROR_INVALID_PARAMETER;
+ }
+
+ *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
+ memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
+
+ /*
+ * We're finished with the v1.0 descriptor for now so free it.
+ * Note that this will invalidate any references to the v1.1
+ * descriptor.
+ */
+ spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
+
+ return 0;
+}
+
+/**
+ * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
+ * @obj: Object containing ffa_memory_region_descriptor.
+ * @ffa_version: FF-A version of the provided descriptor.
+ *
+ * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
+ * offset or count is invalid.
+ */
+static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
+ uint32_t ffa_version)
+{
+ uint32_t comp_mrd_offset = 0;
+
+ if (obj->desc.emad_count == 0U) {
+ WARN("%s: unsupported attribute desc count %u.\n",
+ __func__, obj->desc.emad_count);
+ return -EINVAL;
+ }
+
+ for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
+ size_t size;
+ size_t count;
+ size_t expected_size;
+ size_t total_page_count;
+ size_t emad_size;
+ size_t desc_size;
+ size_t header_emad_size;
+ uint32_t offset;
+ struct ffa_comp_mrd *comp;
+ struct ffa_emad_v1_0 *emad;
+
+ emad = spmc_shmem_obj_get_emad(&obj->desc, emad_num,
+ ffa_version, &emad_size);
+ if (emad == NULL) {
+ WARN("%s: invalid emad structure.\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Validate the calculated emad address resides within the
+ * descriptor.
+ */
+ if ((uintptr_t) emad >=
+ (uintptr_t)((uint8_t *) &obj->desc + obj->desc_size)) {
+ WARN("Invalid emad access.\n");
+ return -EINVAL;
+ }
+
+ offset = emad->comp_mrd_offset;
+
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ desc_size = sizeof(struct ffa_mtd_v1_0);
+ } else {
+ desc_size = sizeof(struct ffa_mtd);
+ }
+
+ header_emad_size = desc_size +
+ (obj->desc.emad_count * emad_size);
+
+ if (offset < header_emad_size) {
+ WARN("%s: invalid object, offset %u < header + emad %zu\n",
+ __func__, offset, header_emad_size);
+ return -EINVAL;
+ }
+
+ size = obj->desc_size;
+
+ if (offset > size) {
+ WARN("%s: invalid object, offset %u > total size %zu\n",
+ __func__, offset, obj->desc_size);
+ return -EINVAL;
+ }
+ size -= offset;
+
+ if (size < sizeof(struct ffa_comp_mrd)) {
+ WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
+ __func__, offset, obj->desc_size);
+ return -EINVAL;
+ }
+ size -= sizeof(struct ffa_comp_mrd);
+
+ count = size / sizeof(struct ffa_cons_mrd);
+
+ comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
+
+ if (comp == NULL) {
+ WARN("%s: invalid comp_mrd offset\n", __func__);
+ return -EINVAL;
+ }
+
+ if (comp->address_range_count != count) {
+ WARN("%s: invalid object, desc count %u != %zu\n",
+ __func__, comp->address_range_count, count);
+ return -EINVAL;
+ }
+
+ expected_size = offset + sizeof(*comp) +
+ spmc_shmem_obj_ffa_constituent_size(obj,
+ ffa_version);
+
+ if (expected_size != obj->desc_size) {
+ WARN("%s: invalid object, computed size %zu != size %zu\n",
+ __func__, expected_size, obj->desc_size);
+ return -EINVAL;
+ }
+
+ if (obj->desc_filled < obj->desc_size) {
+ /*
+ * The whole descriptor has not yet been received.
+ * Skip final checks.
+ */
+ return 0;
+ }
+
+ /*
+ * The offset provided to the composite memory region descriptor
+ * should be consistent across endpoint descriptors. Store the
+ * first entry and compare against subsequent entries.
+ */
+ if (comp_mrd_offset == 0) {
+ comp_mrd_offset = offset;
+ } else {
+ if (comp_mrd_offset != offset) {
+ ERROR("%s: mismatching offsets provided, %u != %u\n",
+ __func__, offset, comp_mrd_offset);
+ return -EINVAL;
+ }
+ }
+
+ total_page_count = 0;
+
+ for (size_t i = 0; i < count; i++) {
+ total_page_count +=
+ comp->address_range_array[i].page_count;
+ }
+ if (comp->total_page_count != total_page_count) {
+ WARN("%s: invalid object, desc total_page_count %u != %zu\n",
+ __func__, comp->total_page_count,
+ total_page_count);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/**
+ * spmc_shmem_check_state_obj - Check if the descriptor describes memory
+ * regions that are currently involved with an
+ * existing memory transactions. This implies that
+ * the memory is not in a valid state for lending.
+ * @obj: Object containing ffa_memory_region_descriptor.
+ *
+ * Return: 0 if object is valid, -EINVAL if invalid memory state.
+ */
+static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
+ uint32_t ffa_version)
+{
+ size_t obj_offset = 0;
+ struct spmc_shmem_obj *inflight_obj;
+
+ struct ffa_comp_mrd *other_mrd;
+ struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
+ ffa_version);
+
+ if (requested_mrd == NULL) {
+ return -EINVAL;
+ }
+
+ inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
+ &obj_offset);
+
+ while (inflight_obj != NULL) {
+ /*
+ * Don't compare the transaction to itself or to partially
+ * transmitted descriptors.
+ */
+ if ((obj->desc.handle != inflight_obj->desc.handle) &&
+ (obj->desc_size == obj->desc_filled)) {
+ other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
+ FFA_VERSION_COMPILED);
+ if (other_mrd == NULL) {
+ return -EINVAL;
+ }
+ if (overlapping_memory_regions(requested_mrd,
+ other_mrd)) {
+ return -EINVAL;
+ }
+ }
+
+ inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
+ &obj_offset);
+ }
+ return 0;
+}
+
+static long spmc_ffa_fill_desc(struct mailbox *mbox,
+ struct spmc_shmem_obj *obj,
+ uint32_t fragment_length,
+ ffa_mtd_flag32_t mtd_flag,
+ uint32_t ffa_version,
+ void *smc_handle)
+{
+ int ret;
+ size_t emad_size;
+ uint32_t handle_low;
+ uint32_t handle_high;
+ struct ffa_emad_v1_0 *emad;
+ struct ffa_emad_v1_0 *other_emad;
+
+ if (mbox->rxtx_page_count == 0U) {
+ WARN("%s: buffer pair not registered.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_arg;
+ }
+
+ if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
+ WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
+ fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_arg;
+ }
+
+ if (fragment_length > obj->desc_size - obj->desc_filled) {
+ WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
+ fragment_length, obj->desc_size - obj->desc_filled);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_arg;
+ }
+
+ memcpy((uint8_t *)&obj->desc + obj->desc_filled,
+ (uint8_t *) mbox->tx_buffer, fragment_length);
+
+ /* Ensure that the sender ID resides in the normal world. */
+ if (ffa_is_secure_world_id(obj->desc.sender_id)) {
+ WARN("%s: Invalid sender ID 0x%x.\n",
+ __func__, obj->desc.sender_id);
+ ret = FFA_ERROR_DENIED;
+ goto err_arg;
+ }
+
+ /* Ensure the NS bit is set to 0. */
+ if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
+ WARN("%s: NS mem attributes flags MBZ.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_arg;
+ }
+
+ /*
+ * We don't currently support any optional flags so ensure none are
+ * requested.
+ */
+ if (obj->desc.flags != 0U && mtd_flag != 0U &&
+ (obj->desc.flags != mtd_flag)) {
+ WARN("%s: invalid memory transaction flags %u != %u\n",
+ __func__, obj->desc.flags, mtd_flag);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_arg;
+ }
+
+ if (obj->desc_filled == 0U) {
+ /* First fragment, descriptor header has been copied */
+ obj->desc.handle = spmc_shmem_obj_state.next_handle++;
+ obj->desc.flags |= mtd_flag;
+ }
+
+ obj->desc_filled += fragment_length;
+ ret = spmc_shmem_check_obj(obj, ffa_version);
+ if (ret != 0) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_bad_desc;
+ }
+
+ handle_low = (uint32_t)obj->desc.handle;
+ handle_high = obj->desc.handle >> 32;
+
+ if (obj->desc_filled != obj->desc_size) {
+ SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
+ handle_high, obj->desc_filled,
+ (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
+ }
+
+ /* The full descriptor has been received, perform any final checks. */
+
+ /*
+ * If a partition ID resides in the secure world validate that the
+ * partition ID is for a known partition. Ignore any partition ID
+ * belonging to the normal world as it is assumed the Hypervisor will
+ * have validated these.
+ */
+ for (size_t i = 0; i < obj->desc.emad_count; i++) {
+ emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
+ &emad_size);
+ if (emad == NULL) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_bad_desc;
+ }
+
+ ffa_endpoint_id16_t ep_id = emad->mapd.endpoint_id;
+
+ if (ffa_is_secure_world_id(ep_id)) {
+ if (spmc_get_sp_ctx(ep_id) == NULL) {
+ WARN("%s: Invalid receiver id 0x%x\n",
+ __func__, ep_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_bad_desc;
+ }
+ }
+ }
+
+ /* Ensure partition IDs are not duplicated. */
+ for (size_t i = 0; i < obj->desc.emad_count; i++) {
+ emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
+ &emad_size);
+ if (emad == NULL) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_bad_desc;
+ }
+ for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
+ other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
+ ffa_version,
+ &emad_size);
+ if (other_emad == NULL) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_bad_desc;
+ }
+
+ if (emad->mapd.endpoint_id ==
+ other_emad->mapd.endpoint_id) {
+ WARN("%s: Duplicated endpoint id 0x%x\n",
+ __func__, emad->mapd.endpoint_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_bad_desc;
+ }
+ }
+ }
+
+ ret = spmc_shmem_check_state_obj(obj, ffa_version);
+ if (ret) {
+ ERROR("%s: invalid memory region descriptor.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_bad_desc;
+ }
+
+ /*
+ * Everything checks out, if the sender was using FF-A v1.0, convert
+ * the descriptor format to use the v1.1 structures.
+ */
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ struct spmc_shmem_obj *v1_1_obj;
+ uint64_t mem_handle;
+
+ /* Calculate the size that the v1.1 descriptor will required. */
+ size_t v1_1_desc_size =
+ spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
+ obj->desc_size);
+
+ if (v1_1_desc_size == 0U) {
+ ERROR("%s: cannot determine size of descriptor.\n",
+ __func__);
+ goto err_arg;
+ }
+
+ /* Get a new obj to store the v1.1 descriptor. */
+ v1_1_obj =
+ spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
+
+ if (!v1_1_obj) {
+ ret = FFA_ERROR_NO_MEMORY;
+ goto err_arg;
+ }
+
+ /* Perform the conversion from v1.0 to v1.1. */
+ v1_1_obj->desc_size = v1_1_desc_size;
+ v1_1_obj->desc_filled = v1_1_desc_size;
+ if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
+ ERROR("%s: Could not convert mtd!\n", __func__);
+ spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
+ goto err_arg;
+ }
+
+ /*
+ * We're finished with the v1.0 descriptor so free it
+ * and continue our checks with the new v1.1 descriptor.
+ */
+ mem_handle = obj->desc.handle;
+ spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
+ obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
+ if (obj == NULL) {
+ ERROR("%s: Failed to find converted descriptor.\n",
+ __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ return spmc_ffa_error_return(smc_handle, ret);
+ }
+ }
+
+ /* Allow for platform specific operations to be performed. */
+ ret = plat_spmc_shmem_begin(&obj->desc);
+ if (ret != 0) {
+ goto err_arg;
+ }
+
+ SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
+ 0, 0, 0);
+
+err_bad_desc:
+err_arg:
+ spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
+ return spmc_ffa_error_return(smc_handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
+ * @client: Client state.
+ * @total_length: Total length of shared memory descriptor.
+ * @fragment_length: Length of fragment of shared memory descriptor passed in
+ * this call.
+ * @address: Not supported, must be 0.
+ * @page_count: Not supported, must be 0.
+ * @smc_handle: Handle passed to smc call. Used to return
+ * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
+ *
+ * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
+ * to share or lend memory from non-secure os to secure os (with no stream
+ * endpoints).
+ *
+ * Return: 0 on success, error code on failure.
+ */
+long spmc_ffa_mem_send(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t total_length,
+ uint32_t fragment_length,
+ uint64_t address,
+ uint32_t page_count,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+
+{
+ long ret;
+ struct spmc_shmem_obj *obj;
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+ ffa_mtd_flag32_t mtd_flag;
+ uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+
+ if (address != 0U || page_count != 0U) {
+ WARN("%s: custom memory region for message not supported.\n",
+ __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ if (secure_origin) {
+ WARN("%s: unsupported share direction.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Check if the descriptor is smaller than the v1.0 descriptor. The
+ * descriptor cannot be smaller than this structure.
+ */
+ if (fragment_length < sizeof(struct ffa_mtd_v1_0)) {
+ WARN("%s: bad first fragment size %u < %zu\n",
+ __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
+ mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
+ } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
+ mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
+ } else {
+ WARN("%s: invalid memory management operation.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&spmc_shmem_obj_state.lock);
+ obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
+ if (obj == NULL) {
+ ret = FFA_ERROR_NO_MEMORY;
+ goto err_unlock;
+ }
+
+ spin_lock(&mbox->lock);
+ ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
+ ffa_version, handle);
+ spin_unlock(&mbox->lock);
+
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ return ret;
+
+err_unlock:
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
+ * @client: Client state.
+ * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
+ * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
+ * @fragment_length: Length of fragments transmitted.
+ * @sender_id: Vmid of sender in bits [31:16]
+ * @smc_handle: Handle passed to smc call. Used to return
+ * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
+ *
+ * Return: @smc_handle on success, error code on failure.
+ */
+long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t handle_low,
+ uint64_t handle_high,
+ uint32_t fragment_length,
+ uint32_t sender_id,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ long ret;
+ uint32_t desc_sender_id;
+ uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+
+ struct spmc_shmem_obj *obj;
+ uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
+
+ spin_lock(&spmc_shmem_obj_state.lock);
+
+ obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
+ if (obj == NULL) {
+ WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
+ __func__, mem_handle);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock;
+ }
+
+ desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
+ if (sender_id != desc_sender_id) {
+ WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
+ sender_id, desc_sender_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock;
+ }
+
+ if (obj->desc_filled == obj->desc_size) {
+ WARN("%s: object desc already filled, %zu\n", __func__,
+ obj->desc_filled);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock;
+ }
+
+ spin_lock(&mbox->lock);
+ ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
+ handle);
+ spin_unlock(&mbox->lock);
+
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ return ret;
+
+err_unlock:
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
+ * if the caller implements a version greater
+ * than FF-A 1.0 or if they have requested
+ * the functionality.
+ * TODO: We are assuming that the caller is
+ * an SP. To support retrieval from the
+ * normal world this function will need to be
+ * expanded accordingly.
+ * @resp: Descriptor populated in callers RX buffer.
+ * @sp_ctx: Context of the calling SP.
+ */
+void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
+ struct secure_partition_desc *sp_ctx)
+{
+ if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
+ sp_ctx->ns_bit_requested) {
+ /*
+ * Currently memory senders must reside in the normal
+ * world, and we do not have the functionlaity to change
+ * the state of memory dynamically. Therefore we can always set
+ * the NS bit to 1.
+ */
+ resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
+ }
+}
+
+/**
+ * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
+ * @smc_fid: FID of SMC
+ * @total_length: Total length of retrieve request descriptor if this is
+ * the first call. Otherwise (unsupported) must be 0.
+ * @fragment_length: Length of fragment of retrieve request descriptor passed
+ * in this call. Only @fragment_length == @length is
+ * supported by this implementation.
+ * @address: Not supported, must be 0.
+ * @page_count: Not supported, must be 0.
+ * @smc_handle: Handle passed to smc call. Used to return
+ * FFA_MEM_RETRIEVE_RESP.
+ *
+ * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
+ * Used by secure os to retrieve memory already shared by non-secure os.
+ * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
+ * the client must call FFA_MEM_FRAG_RX until the full response has been
+ * received.
+ *
+ * Return: @handle on success, error code on failure.
+ */
+long
+spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t total_length,
+ uint32_t fragment_length,
+ uint64_t address,
+ uint32_t page_count,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int ret;
+ size_t buf_size;
+ size_t copy_size = 0;
+ size_t min_desc_size;
+ size_t out_desc_size = 0;
+
+ /*
+ * Currently we are only accessing fields that are the same in both the
+ * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
+ * here. We only need validate against the appropriate struct size.
+ */
+ struct ffa_mtd *resp;
+ const struct ffa_mtd *req;
+ struct spmc_shmem_obj *obj = NULL;
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+ uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+ struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
+
+ if (!secure_origin) {
+ WARN("%s: unsupported retrieve req direction.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ if (address != 0U || page_count != 0U) {
+ WARN("%s: custom memory region not supported.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&mbox->lock);
+
+ req = mbox->tx_buffer;
+ resp = mbox->rx_buffer;
+ buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
+
+ if (mbox->rxtx_page_count == 0U) {
+ WARN("%s: buffer pair not registered.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ if (mbox->state != MAILBOX_STATE_EMPTY) {
+ WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
+ ret = FFA_ERROR_DENIED;
+ goto err_unlock_mailbox;
+ }
+
+ if (fragment_length != total_length) {
+ WARN("%s: fragmented retrieve request not supported.\n",
+ __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ if (req->emad_count == 0U) {
+ WARN("%s: unsupported attribute desc count %u.\n",
+ __func__, obj->desc.emad_count);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ /* Determine the appropriate minimum descriptor size. */
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ min_desc_size = sizeof(struct ffa_mtd_v1_0);
+ } else {
+ min_desc_size = sizeof(struct ffa_mtd);
+ }
+ if (total_length < min_desc_size) {
+ WARN("%s: invalid length %u < %zu\n", __func__, total_length,
+ min_desc_size);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ spin_lock(&spmc_shmem_obj_state.lock);
+
+ obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
+ if (obj == NULL) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (obj->desc_filled != obj->desc_size) {
+ WARN("%s: incomplete object desc filled %zu < size %zu\n",
+ __func__, obj->desc_filled, obj->desc_size);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
+ WARN("%s: wrong sender id 0x%x != 0x%x\n",
+ __func__, req->sender_id, obj->desc.sender_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (req->emad_count != 0U && req->tag != obj->desc.tag) {
+ WARN("%s: wrong tag 0x%lx != 0x%lx\n",
+ __func__, req->tag, obj->desc.tag);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
+ WARN("%s: mistmatch of endpoint counts %u != %u\n",
+ __func__, req->emad_count, obj->desc.emad_count);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ /* Ensure the NS bit is set to 0 in the request. */
+ if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
+ WARN("%s: NS mem attributes flags MBZ.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (req->flags != 0U) {
+ if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
+ (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
+ /*
+ * If the retrieve request specifies the memory
+ * transaction ensure it matches what we expect.
+ */
+ WARN("%s: wrong mem transaction flags %x != %x\n",
+ __func__, req->flags, obj->desc.flags);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
+ req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
+ /*
+ * Current implementation does not support donate and
+ * it supports no other flags.
+ */
+ WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+ }
+
+ /* Validate the caller is a valid participant. */
+ if (!spmc_shmem_obj_validate_id(&obj->desc, sp_ctx->sp_id)) {
+ WARN("%s: Invalid endpoint ID (0x%x).\n",
+ __func__, sp_ctx->sp_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ /* Validate that the provided emad offset and structure is valid.*/
+ for (size_t i = 0; i < req->emad_count; i++) {
+ size_t emad_size;
+ struct ffa_emad_v1_0 *emad;
+
+ emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
+ &emad_size);
+ if (emad == NULL) {
+ WARN("%s: invalid emad structure.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if ((uintptr_t) emad >= (uintptr_t)
+ ((uint8_t *) req + total_length)) {
+ WARN("Invalid emad access.\n");
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+ }
+
+ /*
+ * Validate all the endpoints match in the case of multiple
+ * borrowers. We don't mandate that the order of the borrowers
+ * must match in the descriptors therefore check to see if the
+ * endpoints match in any order.
+ */
+ for (size_t i = 0; i < req->emad_count; i++) {
+ bool found = false;
+ size_t emad_size;
+ struct ffa_emad_v1_0 *emad;
+ struct ffa_emad_v1_0 *other_emad;
+
+ emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
+ &emad_size);
+ if (emad == NULL) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ for (size_t j = 0; j < obj->desc.emad_count; j++) {
+ other_emad = spmc_shmem_obj_get_emad(
+ &obj->desc, j, MAKE_FFA_VERSION(1, 1),
+ &emad_size);
+
+ if (other_emad == NULL) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (req->emad_count &&
+ emad->mapd.endpoint_id ==
+ other_emad->mapd.endpoint_id) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ WARN("%s: invalid receiver id (0x%x).\n",
+ __func__, emad->mapd.endpoint_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+ }
+
+ mbox->state = MAILBOX_STATE_FULL;
+
+ if (req->emad_count != 0U) {
+ obj->in_use++;
+ }
+
+ /*
+ * If the caller is v1.0 convert the descriptor, otherwise copy
+ * directly.
+ */
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
+ &copy_size,
+ &out_desc_size);
+ if (ret != 0U) {
+ ERROR("%s: Failed to process descriptor.\n", __func__);
+ goto err_unlock_all;
+ }
+ } else {
+ copy_size = MIN(obj->desc_size, buf_size);
+ out_desc_size = obj->desc_size;
+
+ memcpy(resp, &obj->desc, copy_size);
+ }
+
+ /* Set the NS bit in the response if applicable. */
+ spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
+
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ spin_unlock(&mbox->lock);
+
+ SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
+ copy_size, 0, 0, 0, 0, 0);
+
+err_unlock_all:
+ spin_unlock(&spmc_shmem_obj_state.lock);
+err_unlock_mailbox:
+ spin_unlock(&mbox->lock);
+ return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
+ * @client: Client state.
+ * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
+ * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
+ * @fragment_offset: Byte offset in descriptor to resume at.
+ * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
+ * hypervisor. 0 otherwise.
+ * @smc_handle: Handle passed to smc call. Used to return
+ * FFA_MEM_FRAG_TX.
+ *
+ * Return: @smc_handle on success, error code on failure.
+ */
+long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t handle_low,
+ uint32_t handle_high,
+ uint32_t fragment_offset,
+ uint32_t sender_id,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int ret;
+ void *src;
+ size_t buf_size;
+ size_t copy_size;
+ size_t full_copy_size;
+ uint32_t desc_sender_id;
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+ uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
+ struct spmc_shmem_obj *obj;
+ uint32_t ffa_version = get_partition_ffa_version(secure_origin);
+
+ if (!secure_origin) {
+ WARN("%s: can only be called from swld.\n",
+ __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&spmc_shmem_obj_state.lock);
+
+ obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
+ if (obj == NULL) {
+ WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
+ __func__, mem_handle);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_shmem;
+ }
+
+ desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
+ if (sender_id != 0U && sender_id != desc_sender_id) {
+ WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
+ sender_id, desc_sender_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_shmem;
+ }
+
+ if (fragment_offset >= obj->desc_size) {
+ WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
+ __func__, fragment_offset, obj->desc_size);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_shmem;
+ }
+
+ spin_lock(&mbox->lock);
+
+ if (mbox->rxtx_page_count == 0U) {
+ WARN("%s: buffer pair not registered.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (mbox->state != MAILBOX_STATE_EMPTY) {
+ WARN("%s: RX Buffer is full!\n", __func__);
+ ret = FFA_ERROR_DENIED;
+ goto err_unlock_all;
+ }
+
+ buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
+
+ mbox->state = MAILBOX_STATE_FULL;
+
+ /*
+ * If the caller is v1.0 convert the descriptor, otherwise copy
+ * directly.
+ */
+ if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
+ size_t out_desc_size;
+
+ ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
+ buf_size,
+ fragment_offset,
+ &copy_size,
+ &out_desc_size);
+ if (ret != 0U) {
+ ERROR("%s: Failed to process descriptor.\n", __func__);
+ goto err_unlock_all;
+ }
+ } else {
+ full_copy_size = obj->desc_size - fragment_offset;
+ copy_size = MIN(full_copy_size, buf_size);
+
+ src = &obj->desc;
+
+ memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
+ }
+
+ spin_unlock(&mbox->lock);
+ spin_unlock(&spmc_shmem_obj_state.lock);
+
+ SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
+ copy_size, sender_id, 0, 0, 0);
+
+err_unlock_all:
+ spin_unlock(&mbox->lock);
+err_unlock_shmem:
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
+ * @client: Client state.
+ *
+ * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
+ * Used by secure os release previously shared memory to non-secure os.
+ *
+ * The handle to release must be in the client's (secure os's) transmit buffer.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int spmc_ffa_mem_relinquish(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t handle_low,
+ uint32_t handle_high,
+ uint32_t fragment_offset,
+ uint32_t sender_id,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int ret;
+ struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
+ struct spmc_shmem_obj *obj;
+ const struct ffa_mem_relinquish_descriptor *req;
+ struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
+
+ if (!secure_origin) {
+ WARN("%s: unsupported relinquish direction.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&mbox->lock);
+
+ if (mbox->rxtx_page_count == 0U) {
+ WARN("%s: buffer pair not registered.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ req = mbox->tx_buffer;
+
+ if (req->flags != 0U) {
+ WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ if (req->endpoint_count == 0) {
+ WARN("%s: endpoint count cannot be 0.\n", __func__);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_mailbox;
+ }
+
+ spin_lock(&spmc_shmem_obj_state.lock);
+
+ obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
+ if (obj == NULL) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ /*
+ * Validate the endpoint ID was populated correctly. We don't currently
+ * support proxy endpoints so the endpoint count should always be 1.
+ */
+ if (req->endpoint_count != 1U) {
+ WARN("%s: unsupported endpoint count %u != 1\n", __func__,
+ req->endpoint_count);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ /* Validate provided endpoint ID matches the partition ID. */
+ if (req->endpoint_array[0] != sp_ctx->sp_id) {
+ WARN("%s: invalid endpoint ID %u != %u\n", __func__,
+ req->endpoint_array[0], sp_ctx->sp_id);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ /* Validate the caller is a valid participant. */
+ if (!spmc_shmem_obj_validate_id(&obj->desc, sp_ctx->sp_id)) {
+ WARN("%s: Invalid endpoint ID (0x%x).\n",
+ __func__, req->endpoint_array[0]);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+
+ if (obj->in_use == 0U) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock_all;
+ }
+ obj->in_use--;
+
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ spin_unlock(&mbox->lock);
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+
+err_unlock_all:
+ spin_unlock(&spmc_shmem_obj_state.lock);
+err_unlock_mailbox:
+ spin_unlock(&mbox->lock);
+ return spmc_ffa_error_return(handle, ret);
+}
+
+/**
+ * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
+ * @client: Client state.
+ * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
+ * @handle_high: Unique handle of shared memory object to reclaim.
+ * Bit[63:32].
+ * @flags: Unsupported, ignored.
+ *
+ * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
+ * Used by non-secure os reclaim memory previously shared with secure os.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int spmc_ffa_mem_reclaim(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t handle_low,
+ uint32_t handle_high,
+ uint32_t mem_flags,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int ret;
+ struct spmc_shmem_obj *obj;
+ uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
+
+ if (secure_origin) {
+ WARN("%s: unsupported reclaim direction.\n", __func__);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ if (mem_flags != 0U) {
+ WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
+ return spmc_ffa_error_return(handle,
+ FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&spmc_shmem_obj_state.lock);
+
+ obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
+ if (obj == NULL) {
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock;
+ }
+ if (obj->in_use != 0U) {
+ ret = FFA_ERROR_DENIED;
+ goto err_unlock;
+ }
+
+ if (obj->desc_filled != obj->desc_size) {
+ WARN("%s: incomplete object desc filled %zu < size %zu\n",
+ __func__, obj->desc_filled, obj->desc_size);
+ ret = FFA_ERROR_INVALID_PARAMETER;
+ goto err_unlock;
+ }
+
+ /* Allow for platform specific operations to be performed. */
+ ret = plat_spmc_shmem_reclaim(&obj->desc);
+ if (ret != 0) {
+ goto err_unlock;
+ }
+
+ spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
+ spin_unlock(&spmc_shmem_obj_state.lock);
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+
+err_unlock:
+ spin_unlock(&spmc_shmem_obj_state.lock);
+ return spmc_ffa_error_return(handle, ret);
+}
diff --git a/services/std_svc/spm/el3_spmc/spmc_shared_mem.h b/services/std_svc/spm/el3_spmc/spmc_shared_mem.h
new file mode 100644
index 0000000..839f7a1
--- /dev/null
+++ b/services/std_svc/spm/el3_spmc/spmc_shared_mem.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMC_SHARED_MEM_H
+#define SPMC_SHARED_MEM_H
+
+#include <services/el3_spmc_ffa_memory.h>
+
+/**
+ * struct ffa_mem_relinquish_descriptor - Relinquish request descriptor.
+ * @handle:
+ * Id of shared memory object to relinquish.
+ * @flags:
+ * If bit 0 is set clear memory after unmapping from borrower. Must be 0
+ * for share. Bit[1]: Time slicing. Not supported, must be 0. All other
+ * bits are reserved 0.
+ * @endpoint_count:
+ * Number of entries in @endpoint_array.
+ * @endpoint_array:
+ * Array of endpoint ids.
+ */
+struct ffa_mem_relinquish_descriptor {
+ uint64_t handle;
+ uint32_t flags;
+ uint32_t endpoint_count;
+ ffa_endpoint_id16_t endpoint_array[];
+};
+CASSERT(sizeof(struct ffa_mem_relinquish_descriptor) == 16,
+ assert_ffa_mem_relinquish_descriptor_size_mismatch);
+
+/**
+ * struct spmc_shmem_obj_state - Global state.
+ * @data: Backing store for spmc_shmem_obj objects.
+ * @data_size: The size allocated for the backing store.
+ * @allocated: Number of bytes allocated in @data.
+ * @next_handle: Handle used for next allocated object.
+ * @lock: Lock protecting all state in this file.
+ */
+struct spmc_shmem_obj_state {
+ uint8_t *data;
+ size_t data_size;
+ size_t allocated;
+ uint64_t next_handle;
+ spinlock_t lock;
+};
+
+extern struct spmc_shmem_obj_state spmc_shmem_obj_state;
+extern int plat_spmc_shmem_begin(struct ffa_mtd *desc);
+extern int plat_spmc_shmem_reclaim(struct ffa_mtd *desc);
+
+long spmc_ffa_mem_send(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t total_length,
+ uint32_t fragment_length,
+ uint64_t address,
+ uint32_t page_count,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t handle_low,
+ uint64_t handle_high,
+ uint32_t fragment_length,
+ uint32_t sender_id,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+long spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t total_length,
+ uint32_t fragment_length,
+ uint64_t address,
+ uint32_t page_count,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t handle_low,
+ uint32_t handle_high,
+ uint32_t fragment_offset,
+ uint32_t sender_id,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+
+int spmc_ffa_mem_relinquish(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t handle_low,
+ uint32_t handle_high,
+ uint32_t fragment_offset,
+ uint32_t sender_id,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+int spmc_ffa_mem_reclaim(uint32_t smc_fid,
+ bool secure_origin,
+ uint32_t handle_low,
+ uint32_t handle_high,
+ uint32_t mem_flags,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags);
+
+#endif /* SPMC_SHARED_MEM_H */
diff --git a/services/std_svc/spm/spm_mm/aarch64/spm_mm_shim_exceptions.S b/services/std_svc/spm/spm_mm/aarch64/spm_mm_shim_exceptions.S
new file mode 100644
index 0000000..836f75c
--- /dev/null
+++ b/services/std_svc/spm/spm_mm/aarch64/spm_mm_shim_exceptions.S
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <context.h>
+
+/* -----------------------------------------------------------------------------
+ * Very simple stackless exception handlers used by the spm shim layer.
+ * -----------------------------------------------------------------------------
+ */
+ .globl spm_shim_exceptions_ptr
+
+vector_base spm_shim_exceptions_ptr, .spm_shim_exceptions
+
+ /* -----------------------------------------------------
+ * Current EL with SP0 : 0x0 - 0x200
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionSP0, .spm_shim_exceptions
+ b .
+end_vector_entry SynchronousExceptionSP0
+
+vector_entry IrqSP0, .spm_shim_exceptions
+ b .
+end_vector_entry IrqSP0
+
+vector_entry FiqSP0, .spm_shim_exceptions
+ b .
+end_vector_entry FiqSP0
+
+vector_entry SErrorSP0, .spm_shim_exceptions
+ b .
+end_vector_entry SErrorSP0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x400
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionSPx, .spm_shim_exceptions
+ b .
+end_vector_entry SynchronousExceptionSPx
+
+vector_entry IrqSPx, .spm_shim_exceptions
+ b .
+end_vector_entry IrqSPx
+
+vector_entry FiqSPx, .spm_shim_exceptions
+ b .
+end_vector_entry FiqSPx
+
+vector_entry SErrorSPx, .spm_shim_exceptions
+ b .
+end_vector_entry SErrorSPx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600. No exceptions
+ * are handled since secure_partition does not implement
+ * a lower EL
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionA64, .spm_shim_exceptions
+ msr tpidr_el1, x30
+ mrs x30, esr_el1
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+ cmp x30, #EC_AARCH64_SVC
+ b.eq do_smc
+
+ cmp x30, #EC_AARCH32_SVC
+ b.eq do_smc
+
+ cmp x30, #EC_AARCH64_SYS
+ b.eq handle_sys_trap
+
+ /* Fail in all the other cases */
+ b panic
+
+ /* ---------------------------------------------
+ * Tell SPM that we are done initialising
+ * ---------------------------------------------
+ */
+do_smc:
+ mrs x30, tpidr_el1
+ smc #0
+ exception_return
+
+ /* AArch64 system instructions trap are handled as a panic for now */
+handle_sys_trap:
+panic:
+ b panic
+end_vector_entry SynchronousExceptionA64
+
+vector_entry IrqA64, .spm_shim_exceptions
+ b .
+end_vector_entry IrqA64
+
+vector_entry FiqA64, .spm_shim_exceptions
+ b .
+end_vector_entry FiqA64
+
+vector_entry SErrorA64, .spm_shim_exceptions
+ b .
+end_vector_entry SErrorA64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionA32, .spm_shim_exceptions
+ b .
+end_vector_entry SynchronousExceptionA32
+
+vector_entry IrqA32, .spm_shim_exceptions
+ b .
+end_vector_entry IrqA32
+
+vector_entry FiqA32, .spm_shim_exceptions
+ b .
+end_vector_entry FiqA32
+
+vector_entry SErrorA32, .spm_shim_exceptions
+ b .
+end_vector_entry SErrorA32
diff --git a/services/std_svc/spm/spm_mm/spm_mm.mk b/services/std_svc/spm/spm_mm/spm_mm.mk
new file mode 100644
index 0000000..f6691c3
--- /dev/null
+++ b/services/std_svc/spm/spm_mm/spm_mm.mk
@@ -0,0 +1,34 @@
+#
+# Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${SPD},none)
+ $(error "Error: SPD and SPM_MM are incompatible build options.")
+endif
+ifneq (${ARCH},aarch64)
+ $(error "Error: SPM_MM is only supported on aarch64.")
+endif
+ifeq (${ENABLE_SVE_FOR_NS},1)
+ $(error "Error: SPM_MM is not compatible with ENABLE_SVE_FOR_NS")
+endif
+ifeq (${ENABLE_SME_FOR_NS},1)
+ $(error "Error: SPM_MM is not compatible with ENABLE_SME_FOR_NS")
+endif
+ifeq (${CTX_INCLUDE_FPREGS},0)
+ $(warning "Warning: SPM_MM: CTX_INCLUDE_FPREGS is set to 0")
+endif
+
+SPM_MM_SOURCES := $(addprefix services/std_svc/spm/spm_mm/, \
+ ${ARCH}/spm_mm_shim_exceptions.S \
+ spm_mm_main.c \
+ spm_mm_setup.c \
+ spm_mm_xlat.c)
+
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
+
+# required so that SPM code executing at S-EL0 can access the timer registers
+NS_TIMER_SWITCH := 1
diff --git a/services/std_svc/spm/spm_mm/spm_mm_main.c b/services/std_svc/spm/spm_mm/spm_mm_main.c
new file mode 100644
index 0000000..8525cd2
--- /dev/null
+++ b/services/std_svc/spm/spm_mm/spm_mm_main.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <bl31/bl31.h>
+#include <bl31/ehf.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/smccc.h>
+#include <lib/spinlock.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/platform.h>
+#include <services/spm_mm_partition.h>
+#include <services/spm_mm_svc.h>
+#include <smccc_helpers.h>
+
+#include "spm_common.h"
+#include "spm_mm_private.h"
+
+/*******************************************************************************
+ * Secure Partition context information.
+ ******************************************************************************/
+static sp_context_t sp_ctx;
+
+/*******************************************************************************
+ * Set state of a Secure Partition context.
+ ******************************************************************************/
+void sp_state_set(sp_context_t *sp_ptr, sp_state_t state)
+{
+ spin_lock(&(sp_ptr->state_lock));
+ sp_ptr->state = state;
+ spin_unlock(&(sp_ptr->state_lock));
+}
+
+/*******************************************************************************
+ * Wait until the state of a Secure Partition is the specified one and change it
+ * to the desired state.
+ ******************************************************************************/
+void sp_state_wait_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to)
+{
+ int success = 0;
+
+ while (success == 0) {
+ spin_lock(&(sp_ptr->state_lock));
+
+ if (sp_ptr->state == from) {
+ sp_ptr->state = to;
+
+ success = 1;
+ }
+
+ spin_unlock(&(sp_ptr->state_lock));
+ }
+}
+
+/*******************************************************************************
+ * Check if the state of a Secure Partition is the specified one and, if so,
+ * change it to the desired state. Returns 0 on success, -1 on error.
+ ******************************************************************************/
+int sp_state_try_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to)
+{
+ int ret = -1;
+
+ spin_lock(&(sp_ptr->state_lock));
+
+ if (sp_ptr->state == from) {
+ sp_ptr->state = to;
+
+ ret = 0;
+ }
+
+ spin_unlock(&(sp_ptr->state_lock));
+
+ return ret;
+}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and performs a synchronous entry
+ * into it.
+ ******************************************************************************/
+static uint64_t spm_sp_synchronous_entry(sp_context_t *ctx)
+{
+ uint64_t rc;
+
+ assert(ctx != NULL);
+
+ /* Assign the context of the SP to this CPU */
+ cm_set_context(&(ctx->cpu_ctx), SECURE);
+
+ /* Restore the context assigned above */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ /* Invalidate TLBs at EL1. */
+ tlbivmalle1();
+ dsbish();
+
+ /* Enter Secure Partition */
+ rc = spm_secure_partition_enter(&ctx->c_rt_ctx);
+
+ /* Save secure state */
+ cm_el1_sysregs_context_save(SECURE);
+
+ return rc;
+}
+
+/*******************************************************************************
+ * This function returns to the place where spm_sp_synchronous_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 static void spm_sp_synchronous_exit(uint64_t rc)
+{
+ sp_context_t *ctx = &sp_ctx;
+
+ /*
+ * The SPM must have initiated the original request through a
+ * synchronous entry into the secure partition. Jump back to the
+ * original C runtime context with the value of rc in x0;
+ */
+ spm_secure_partition_exit(ctx->c_rt_ctx, rc);
+
+ panic();
+}
+
+/*******************************************************************************
+ * Jump to each Secure Partition for the first time.
+ ******************************************************************************/
+static int32_t spm_init(void)
+{
+ uint64_t rc;
+ sp_context_t *ctx;
+
+ INFO("Secure Partition init...\n");
+
+ ctx = &sp_ctx;
+
+ ctx->state = SP_STATE_RESET;
+
+ rc = spm_sp_synchronous_entry(ctx);
+ assert(rc == 0);
+
+ ctx->state = SP_STATE_IDLE;
+
+ INFO("Secure Partition initialized.\n");
+
+ return !rc;
+}
+
+/*******************************************************************************
+ * Initialize contexts of all Secure Partitions.
+ ******************************************************************************/
+int32_t spm_mm_setup(void)
+{
+ sp_context_t *ctx;
+
+ /* Disable MMU at EL1 (initialized by BL2) */
+ disable_mmu_icache_el1();
+
+ /* Initialize context of the SP */
+ INFO("Secure Partition context setup start...\n");
+
+ ctx = &sp_ctx;
+
+ /* Assign translation tables context. */
+ ctx->xlat_ctx_handle = spm_get_sp_xlat_context();
+
+ spm_sp_setup(ctx);
+
+ /* Register init function for deferred init. */
+ bl31_register_bl32_init(&spm_init);
+
+ INFO("Secure Partition setup done.\n");
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Function to perform a call to a Secure Partition.
+ ******************************************************************************/
+uint64_t spm_mm_sp_call(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3)
+{
+ uint64_t rc;
+ sp_context_t *sp_ptr = &sp_ctx;
+
+#if CTX_INCLUDE_FPREGS
+ /*
+ * SP runs to completion, no need to restore FP registers of secure context.
+ * Save FP registers only for non secure context.
+ */
+ fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE)));
+#endif
+
+ /* Wait until the Secure Partition is idle and set it to busy. */
+ sp_state_wait_switch(sp_ptr, SP_STATE_IDLE, SP_STATE_BUSY);
+
+ /* Set values for registers on SP entry */
+ cpu_context_t *cpu_ctx = &(sp_ptr->cpu_ctx);
+
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X0, smc_fid);
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1, x1);
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X2, x2);
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3, x3);
+
+ /* Jump to the Secure Partition. */
+ rc = spm_sp_synchronous_entry(sp_ptr);
+
+ /* Flag Secure Partition as idle. */
+ assert(sp_ptr->state == SP_STATE_BUSY);
+ sp_state_set(sp_ptr, SP_STATE_IDLE);
+
+#if CTX_INCLUDE_FPREGS
+ /*
+ * SP runs to completion, no need to save FP registers of secure context.
+ * Restore only non secure world FP registers.
+ */
+ fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE)));
+#endif
+
+ return rc;
+}
+
+/*******************************************************************************
+ * MM_COMMUNICATE handler
+ ******************************************************************************/
+static uint64_t mm_communicate(uint32_t smc_fid, uint64_t mm_cookie,
+ uint64_t comm_buffer_address,
+ uint64_t comm_size_address, void *handle)
+{
+ uint64_t rc;
+
+ /* Cookie. Reserved for future use. It must be zero. */
+ if (mm_cookie != 0U) {
+ ERROR("MM_COMMUNICATE: cookie is not zero\n");
+ SMC_RET1(handle, SPM_MM_INVALID_PARAMETER);
+ }
+
+ if (comm_buffer_address == 0U) {
+ ERROR("MM_COMMUNICATE: comm_buffer_address is zero\n");
+ SMC_RET1(handle, SPM_MM_INVALID_PARAMETER);
+ }
+
+ if (comm_size_address != 0U) {
+ VERBOSE("MM_COMMUNICATE: comm_size_address is not 0 as recommended.\n");
+ }
+
+ /*
+ * The current secure partition design mandates
+ * - at any point, only a single core can be
+ * executing in the secure partiton.
+ * - a core cannot be preempted by an interrupt
+ * while executing in secure partition.
+ * Raise the running priority of the core to the
+ * interrupt level configured for secure partition
+ * so as to block any interrupt from preempting this
+ * core.
+ */
+ ehf_activate_priority(PLAT_SP_PRI);
+
+ /* Save the Normal world context */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ rc = spm_mm_sp_call(smc_fid, comm_buffer_address, comm_size_address,
+ plat_my_core_pos());
+
+ /* Restore non-secure state */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ /*
+ * Exited from secure partition. This core can take
+ * interrupts now.
+ */
+ ehf_deactivate_priority(PLAT_SP_PRI);
+
+ SMC_RET1(handle, rc);
+}
+
+/*******************************************************************************
+ * Secure Partition Manager SMC handler.
+ ******************************************************************************/
+uint64_t spm_mm_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ unsigned int ns;
+
+ /* Determine which security state this SMC originated from */
+ ns = is_caller_non_secure(flags);
+
+ if (ns == SMC_FROM_SECURE) {
+
+ /* Handle SMCs from Secure world. */
+
+ assert(handle == cm_get_context(SECURE));
+
+ /* Make next ERET jump to S-EL0 instead of S-EL1. */
+ cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
+
+ switch (smc_fid) {
+
+ case SPM_MM_VERSION_AARCH32:
+ SMC_RET1(handle, SPM_MM_VERSION_COMPILED);
+
+ case MM_SP_EVENT_COMPLETE_AARCH64:
+ spm_sp_synchronous_exit(x1);
+
+ case MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64:
+ INFO("Received MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64 SMC\n");
+
+ if (sp_ctx.state != SP_STATE_RESET) {
+ WARN("MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64 is available at boot time only\n");
+ SMC_RET1(handle, SPM_MM_NOT_SUPPORTED);
+ }
+ SMC_RET1(handle,
+ spm_memory_attributes_get_smc_handler(
+ &sp_ctx, x1));
+
+ case MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64:
+ INFO("Received MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC\n");
+
+ if (sp_ctx.state != SP_STATE_RESET) {
+ WARN("MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64 is available at boot time only\n");
+ SMC_RET1(handle, SPM_MM_NOT_SUPPORTED);
+ }
+ SMC_RET1(handle,
+ spm_memory_attributes_set_smc_handler(
+ &sp_ctx, x1, x2, x3));
+ default:
+ break;
+ }
+ } else {
+
+ /* Handle SMCs from Non-secure world. */
+
+ assert(handle == cm_get_context(NON_SECURE));
+
+ switch (smc_fid) {
+
+ case MM_VERSION_AARCH32:
+ SMC_RET1(handle, MM_VERSION_COMPILED);
+
+ case MM_COMMUNICATE_AARCH32:
+ case MM_COMMUNICATE_AARCH64:
+ return mm_communicate(smc_fid, x1, x2, x3, handle);
+
+ case MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64:
+ case MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64:
+ /* SMC interfaces reserved for secure callers. */
+ SMC_RET1(handle, SPM_MM_NOT_SUPPORTED);
+
+ default:
+ break;
+ }
+ }
+
+ SMC_RET1(handle, SMC_UNK);
+}
diff --git a/services/std_svc/spm/spm_mm/spm_mm_private.h b/services/std_svc/spm/spm_mm/spm_mm_private.h
new file mode 100644
index 0000000..0eff1c0
--- /dev/null
+++ b/services/std_svc/spm/spm_mm/spm_mm_private.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_MM_PRIVATE_H
+#define SPM_MM_PRIVATE_H
+
+#include <context.h>
+#include "spm_common.h"
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define SP_C_RT_CTX_X19 0x0
+#define SP_C_RT_CTX_X20 0x8
+#define SP_C_RT_CTX_X21 0x10
+#define SP_C_RT_CTX_X22 0x18
+#define SP_C_RT_CTX_X23 0x20
+#define SP_C_RT_CTX_X24 0x28
+#define SP_C_RT_CTX_X25 0x30
+#define SP_C_RT_CTX_X26 0x38
+#define SP_C_RT_CTX_X27 0x40
+#define SP_C_RT_CTX_X28 0x48
+#define SP_C_RT_CTX_X29 0x50
+#define SP_C_RT_CTX_X30 0x58
+
+#define SP_C_RT_CTX_SIZE 0x60
+#define SP_C_RT_CTX_ENTRIES (SP_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#include <lib/spinlock.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+typedef enum sp_state {
+ SP_STATE_RESET = 0,
+ SP_STATE_IDLE,
+ SP_STATE_BUSY
+} sp_state_t;
+
+typedef struct sp_context {
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+ xlat_ctx_t *xlat_ctx_handle;
+
+ sp_state_t state;
+ spinlock_t state_lock;
+} sp_context_t;
+
+
+void spm_sp_setup(sp_context_t *sp_ctx);
+
+xlat_ctx_t *spm_get_sp_xlat_context(void);
+
+int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
+ uintptr_t base_va);
+int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
+ u_register_t page_address,
+ u_register_t pages_count,
+ u_register_t smc_attributes);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* SPM_MM_PRIVATE_H */
diff --git a/services/std_svc/spm/spm_mm/spm_mm_setup.c b/services/std_svc/spm/spm_mm/spm_mm_setup.c
new file mode 100644
index 0000000..04dc212
--- /dev/null
+++ b/services/std_svc/spm/spm_mm/spm_mm_setup.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <context.h>
+#include <common/debug.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <platform_def.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <services/spm_mm_partition.h>
+
+#include "spm_common.h"
+#include "spm_mm_private.h"
+#include "spm_mm_shim_private.h"
+
+/* Setup context of the Secure Partition */
+void spm_sp_setup(sp_context_t *sp_ctx)
+{
+ cpu_context_t *ctx = &(sp_ctx->cpu_ctx);
+
+ /* Pointer to the MP information from the platform port. */
+ const spm_mm_boot_info_t *sp_boot_info =
+ plat_get_secure_partition_boot_info(NULL);
+
+ /*
+ * Initialize CPU context
+ * ----------------------
+ */
+
+ entry_point_info_t ep_info = {0};
+
+ SET_PARAM_HEAD(&ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
+
+ /* Setup entrypoint and SPSR */
+ ep_info.pc = sp_boot_info->sp_image_base;
+ ep_info.spsr = SPSR_64(MODE_EL0, MODE_SP_EL0, DISABLE_ALL_EXCEPTIONS);
+
+ /*
+ * X0: Virtual address of a buffer shared between EL3 and Secure EL0.
+ * The buffer will be mapped in the Secure EL1 translation regime
+ * with Normal IS WBWA attributes and RO data and Execute Never
+ * instruction access permissions.
+ *
+ * X1: Size of the buffer in bytes
+ *
+ * X2: cookie value (Implementation Defined)
+ *
+ * X3: cookie value (Implementation Defined)
+ *
+ * X4 to X7 = 0
+ */
+ ep_info.args.arg0 = sp_boot_info->sp_shared_buf_base;
+ ep_info.args.arg1 = sp_boot_info->sp_shared_buf_size;
+ ep_info.args.arg2 = PLAT_SPM_COOKIE_0;
+ ep_info.args.arg3 = PLAT_SPM_COOKIE_1;
+
+ cm_setup_context(ctx, &ep_info);
+
+ /*
+ * SP_EL0: A non-zero value will indicate to the SP that the SPM has
+ * initialized the stack pointer for the current CPU through
+ * implementation defined means. The value will be 0 otherwise.
+ */
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_SP_EL0,
+ sp_boot_info->sp_stack_base + sp_boot_info->sp_pcpu_stack_size);
+
+ /*
+ * Setup translation tables
+ * ------------------------
+ */
+
+#if ENABLE_ASSERTIONS
+
+ /* Get max granularity supported by the platform. */
+ unsigned int max_granule = xlat_arch_get_max_supported_granule_size();
+
+ VERBOSE("Max translation granule size supported: %u KiB\n",
+ max_granule / 1024U);
+
+ unsigned int max_granule_mask = max_granule - 1U;
+
+ /* Base must be aligned to the max granularity */
+ assert((sp_boot_info->sp_ns_comm_buf_base & max_granule_mask) == 0);
+
+ /* Size must be a multiple of the max granularity */
+ assert((sp_boot_info->sp_ns_comm_buf_size & max_granule_mask) == 0);
+
+#endif /* ENABLE_ASSERTIONS */
+
+ /* This region contains the exception vectors used at S-EL1. */
+ const mmap_region_t sel1_exception_vectors =
+ MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
+ SPM_SHIM_EXCEPTIONS_SIZE,
+ MT_CODE | MT_SECURE | MT_PRIVILEGED);
+ mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
+ &sel1_exception_vectors);
+
+ mmap_add_ctx(sp_ctx->xlat_ctx_handle,
+ plat_get_secure_partition_mmap(NULL));
+
+ init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
+
+ /*
+ * MMU-related registers
+ * ---------------------
+ */
+ xlat_ctx_t *xlat_ctx = sp_ctx->xlat_ctx_handle;
+
+ uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, 0, xlat_ctx->base_table,
+ xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
+ EL1_EL0_REGIME);
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_MAIR_EL1,
+ mmu_cfg_params[MMU_CFG_MAIR]);
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TCR_EL1,
+ mmu_cfg_params[MMU_CFG_TCR]);
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TTBR0_EL1,
+ mmu_cfg_params[MMU_CFG_TTBR0]);
+
+ /* Setup SCTLR_EL1 */
+ u_register_t sctlr_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1);
+
+ sctlr_el1 |=
+ /*SCTLR_EL1_RES1 |*/
+ /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
+ SCTLR_UCI_BIT |
+ /* RW regions at xlat regime EL1&0 are forced to be XN. */
+ SCTLR_WXN_BIT |
+ /* Don't trap to EL1 execution of WFI or WFE at EL0. */
+ SCTLR_NTWI_BIT | SCTLR_NTWE_BIT |
+ /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */
+ SCTLR_UCT_BIT |
+ /* Don't trap to EL1 execution of DZ ZVA at EL0. */
+ SCTLR_DZE_BIT |
+ /* Enable SP Alignment check for EL0 */
+ SCTLR_SA0_BIT |
+ /* Don't change PSTATE.PAN on taking an exception to EL1 */
+ SCTLR_SPAN_BIT |
+ /* Allow cacheable data and instr. accesses to normal memory. */
+ SCTLR_C_BIT | SCTLR_I_BIT |
+ /* Enable MMU. */
+ SCTLR_M_BIT
+ ;
+
+ sctlr_el1 &= ~(
+ /* Explicit data accesses at EL0 are little-endian. */
+ SCTLR_E0E_BIT |
+ /*
+ * Alignment fault checking disabled when at EL1 and EL0 as
+ * the UEFI spec permits unaligned accesses.
+ */
+ SCTLR_A_BIT |
+ /* Accesses to DAIF from EL0 are trapped to EL1. */
+ SCTLR_UMA_BIT
+ );
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
+
+ /*
+ * Setup other system registers
+ * ----------------------------
+ */
+
+ /* Shim Exception Vector Base Address */
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_VBAR_EL1,
+ SPM_SHIM_EXCEPTIONS_PTR);
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
+ EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
+
+ /*
+ * FPEN: Allow the Secure Partition to access FP/SIMD registers.
+ * Note that SPM will not do any saving/restoring of these registers on
+ * behalf of the SP. This falls under the SP's responsibility.
+ * TTA: Enable access to trace registers.
+ * ZEN (v8.2): Trap SVE instructions and access to SVE registers.
+ */
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CPACR_EL1,
+ CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
+
+ /*
+ * Prepare information in buffer shared between EL3 and S-EL0
+ * ----------------------------------------------------------
+ */
+
+ void *shared_buf_ptr = (void *) sp_boot_info->sp_shared_buf_base;
+
+ /* Copy the boot information into the shared buffer with the SP. */
+ assert((uintptr_t)shared_buf_ptr + sizeof(spm_mm_boot_info_t)
+ <= (sp_boot_info->sp_shared_buf_base + sp_boot_info->sp_shared_buf_size));
+
+ assert(sp_boot_info->sp_shared_buf_base <=
+ (UINTPTR_MAX - sp_boot_info->sp_shared_buf_size + 1));
+
+ assert(sp_boot_info != NULL);
+
+ memcpy((void *) shared_buf_ptr, (const void *) sp_boot_info,
+ sizeof(spm_mm_boot_info_t));
+
+ /* Pointer to the MP information from the platform port. */
+ spm_mm_mp_info_t *sp_mp_info =
+ ((spm_mm_boot_info_t *) shared_buf_ptr)->mp_info;
+
+ assert(sp_mp_info != NULL);
+
+ /*
+ * Point the shared buffer MP information pointer to where the info will
+ * be populated, just after the boot info.
+ */
+ ((spm_mm_boot_info_t *) shared_buf_ptr)->mp_info =
+ (spm_mm_mp_info_t *) ((uintptr_t)shared_buf_ptr
+ + sizeof(spm_mm_boot_info_t));
+
+ /*
+ * Update the shared buffer pointer to where the MP information for the
+ * payload will be populated
+ */
+ shared_buf_ptr = ((spm_mm_boot_info_t *) shared_buf_ptr)->mp_info;
+
+ /*
+ * Copy the cpu information into the shared buffer area after the boot
+ * information.
+ */
+ assert(sp_boot_info->num_cpus <= PLATFORM_CORE_COUNT);
+
+ assert((uintptr_t)shared_buf_ptr
+ <= (sp_boot_info->sp_shared_buf_base + sp_boot_info->sp_shared_buf_size -
+ (sp_boot_info->num_cpus * sizeof(*sp_mp_info))));
+
+ memcpy(shared_buf_ptr, (const void *) sp_mp_info,
+ sp_boot_info->num_cpus * sizeof(*sp_mp_info));
+
+ /*
+ * Calculate the linear indices of cores in boot information for the
+ * secure partition and flag the primary CPU
+ */
+ sp_mp_info = (spm_mm_mp_info_t *) shared_buf_ptr;
+
+ for (unsigned int index = 0; index < sp_boot_info->num_cpus; index++) {
+ u_register_t mpidr = sp_mp_info[index].mpidr;
+
+ sp_mp_info[index].linear_id = plat_core_pos_by_mpidr(mpidr);
+ if (plat_my_core_pos() == sp_mp_info[index].linear_id)
+ sp_mp_info[index].flags |= MP_INFO_FLAG_PRIMARY_CPU;
+ }
+}
diff --git a/services/std_svc/spm/spm_mm/spm_mm_shim_private.h b/services/std_svc/spm/spm_mm/spm_mm_shim_private.h
new file mode 100644
index 0000000..f69c748
--- /dev/null
+++ b/services/std_svc/spm/spm_mm/spm_mm_shim_private.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_MM_SHIM_PRIVATE_H
+#define SPM_MM_SHIM_PRIVATE_H
+
+#include <stdint.h>
+
+#include <lib/utils_def.h>
+
+/* Assembly source */
+IMPORT_SYM(uintptr_t, spm_shim_exceptions_ptr, SPM_SHIM_EXCEPTIONS_PTR);
+
+/* Linker symbols */
+IMPORT_SYM(uintptr_t, __SPM_SHIM_EXCEPTIONS_START__, SPM_SHIM_EXCEPTIONS_START);
+IMPORT_SYM(uintptr_t, __SPM_SHIM_EXCEPTIONS_END__, SPM_SHIM_EXCEPTIONS_END);
+
+/* Definitions */
+
+#define SPM_SHIM_EXCEPTIONS_SIZE \
+ (SPM_SHIM_EXCEPTIONS_END - SPM_SHIM_EXCEPTIONS_START)
+
+#endif /* SPM_MM_SHIM_PRIVATE_H */
diff --git a/services/std_svc/spm/spm_mm/spm_mm_xlat.c b/services/std_svc/spm/spm_mm/spm_mm_xlat.c
new file mode 100644
index 0000000..6261016
--- /dev/null
+++ b/services/std_svc/spm/spm_mm/spm_mm_xlat.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <errno.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <platform_def.h>
+#include <plat/common/platform.h>
+#include <services/spm_mm_partition.h>
+#include <services/spm_mm_svc.h>
+
+#include "spm_mm_private.h"
+#include "spm_mm_shim_private.h"
+
+/* Place translation tables by default along with the ones used by BL31. */
+#ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
+#define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
+#endif
+#ifndef PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME
+#define PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME ".bss"
+#endif
+
+/* Allocate and initialise the translation context for the secure partitions. */
+REGISTER_XLAT_CONTEXT2(sp,
+ PLAT_SP_IMAGE_MMAP_REGIONS,
+ PLAT_SP_IMAGE_MAX_XLAT_TABLES,
+ PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
+ EL1_EL0_REGIME, PLAT_SP_IMAGE_XLAT_SECTION_NAME,
+ PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME);
+
+/* Lock used for SP_MEMORY_ATTRIBUTES_GET and SP_MEMORY_ATTRIBUTES_SET */
+static spinlock_t mem_attr_smc_lock;
+
+/* Get handle of Secure Partition translation context */
+xlat_ctx_t *spm_get_sp_xlat_context(void)
+{
+ return &sp_xlat_ctx;
+};
+
+/*
+ * Attributes are encoded using a different format in the SMC interface than in
+ * the Trusted Firmware, where the mmap_attr_t enum type is used. This function
+ * converts an attributes value from the SMC format to the mmap_attr_t format by
+ * setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER.
+ * The other fields are left as 0 because they are ignored by the function
+ * xlat_change_mem_attributes_ctx().
+ */
+static unsigned int smc_attr_to_mmap_attr(unsigned int attributes)
+{
+ unsigned int tf_attr = 0U;
+
+ unsigned int access = (attributes & MM_SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
+ >> MM_SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT;
+
+ if (access == MM_SP_MEMORY_ATTRIBUTES_ACCESS_RW) {
+ tf_attr |= MT_RW | MT_USER;
+ } else if (access == MM_SP_MEMORY_ATTRIBUTES_ACCESS_RO) {
+ tf_attr |= MT_RO | MT_USER;
+ } else {
+ /* Other values are reserved. */
+ assert(access == MM_SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS);
+ /* The only requirement is that there's no access from EL0 */
+ tf_attr |= MT_RO | MT_PRIVILEGED;
+ }
+
+ if ((attributes & MM_SP_MEMORY_ATTRIBUTES_NON_EXEC) == 0) {
+ tf_attr |= MT_EXECUTE;
+ } else {
+ tf_attr |= MT_EXECUTE_NEVER;
+ }
+
+ return tf_attr;
+}
+
+/*
+ * This function converts attributes from the Trusted Firmware format into the
+ * SMC interface format.
+ */
+static unsigned int smc_mmap_to_smc_attr(unsigned int attr)
+{
+ unsigned int smc_attr = 0U;
+
+ unsigned int data_access;
+
+ if ((attr & MT_USER) == 0) {
+ /* No access from EL0. */
+ data_access = MM_SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS;
+ } else {
+ if ((attr & MT_RW) != 0) {
+ assert(MT_TYPE(attr) != MT_DEVICE);
+ data_access = MM_SP_MEMORY_ATTRIBUTES_ACCESS_RW;
+ } else {
+ data_access = MM_SP_MEMORY_ATTRIBUTES_ACCESS_RO;
+ }
+ }
+
+ smc_attr |= (data_access & MM_SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
+ << MM_SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT;
+
+ if ((attr & MT_EXECUTE_NEVER) != 0U) {
+ smc_attr |= MM_SP_MEMORY_ATTRIBUTES_NON_EXEC;
+ }
+
+ return smc_attr;
+}
+
+int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
+ uintptr_t base_va)
+{
+ uint32_t attributes;
+
+ spin_lock(&mem_attr_smc_lock);
+
+ int rc = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
+ base_va, &attributes);
+
+ spin_unlock(&mem_attr_smc_lock);
+
+ /* Convert error codes of xlat_get_mem_attributes_ctx() into SPM. */
+ assert((rc == 0) || (rc == -EINVAL));
+
+ if (rc == 0) {
+ return (int32_t) smc_mmap_to_smc_attr(attributes);
+ } else {
+ return SPM_MM_INVALID_PARAMETER;
+ }
+}
+
+int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
+ u_register_t page_address,
+ u_register_t pages_count,
+ u_register_t smc_attributes)
+{
+ uintptr_t base_va = (uintptr_t) page_address;
+ size_t size = (size_t) (pages_count * PAGE_SIZE);
+ uint32_t attributes = (uint32_t) smc_attributes;
+
+ INFO(" Start address : 0x%lx\n", base_va);
+ INFO(" Number of pages: %i (%zi bytes)\n", (int) pages_count, size);
+ INFO(" Attributes : 0x%x\n", attributes);
+
+ spin_lock(&mem_attr_smc_lock);
+
+ int ret = xlat_change_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
+ base_va, size,
+ smc_attr_to_mmap_attr(attributes));
+
+ spin_unlock(&mem_attr_smc_lock);
+
+ /* Convert error codes of xlat_change_mem_attributes_ctx() into SPM. */
+ assert((ret == 0) || (ret == -EINVAL));
+
+ return (ret == 0) ? SPM_MM_SUCCESS : SPM_MM_INVALID_PARAMETER;
+}