summaryrefslogtreecommitdiffstats
path: root/services/std_svc/rmmd
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--services/std_svc/rmmd/aarch64/rmmd_helpers.S73
-rw-r--r--services/std_svc/rmmd/rmmd.mk19
-rw-r--r--services/std_svc/rmmd/rmmd_attest.c153
-rw-r--r--services/std_svc/rmmd/rmmd_initial_context.h33
-rw-r--r--services/std_svc/rmmd/rmmd_main.c449
-rw-r--r--services/std_svc/rmmd/rmmd_private.h63
-rw-r--r--services/std_svc/rmmd/trp/linker.lds71
-rw-r--r--services/std_svc/rmmd/trp/trp.mk21
-rw-r--r--services/std_svc/rmmd/trp/trp_entry.S121
-rw-r--r--services/std_svc/rmmd/trp/trp_helpers.c58
-rw-r--r--services/std_svc/rmmd/trp/trp_main.c150
-rw-r--r--services/std_svc/rmmd/trp/trp_private.h57
12 files changed, 1268 insertions, 0 deletions
diff --git a/services/std_svc/rmmd/aarch64/rmmd_helpers.S b/services/std_svc/rmmd/aarch64/rmmd_helpers.S
new file mode 100644
index 0000000..6229baf
--- /dev/null
+++ b/services/std_svc/rmmd/aarch64/rmmd_helpers.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "../rmmd_private.h"
+#include <asm_macros.S>
+
+ .global rmmd_rmm_enter
+ .global rmmd_rmm_exit
+
+ /* ---------------------------------------------------------------------
+ * This function is called with SP_EL0 as stack. Here we stash our EL3
+ * callee-saved registers on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where the address of the C
+ * runtime context is to be saved.
+ * ---------------------------------------------------------------------
+ */
+func rmmd_rmm_enter
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #RMMD_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #RMMD_C_RT_CTX_X19]
+ stp x21, x22, [sp, #RMMD_C_RT_CTX_X21]
+ stp x23, x24, [sp, #RMMD_C_RT_CTX_X23]
+ stp x25, x26, [sp, #RMMD_C_RT_CTX_X25]
+ stp x27, x28, [sp, #RMMD_C_RT_CTX_X27]
+ stp x29, x30, [sp, #RMMD_C_RT_CTX_X29]
+
+ /* ---------------------------------------------------------------------
+ * Everything is setup now. el3_exit() will use the secure context to
+ * restore to the general purpose and EL3 system registers to ERET
+ * into the secure payload.
+ * ---------------------------------------------------------------------
+ */
+ b el3_exit
+endfunc rmmd_rmm_enter
+
+ /* ---------------------------------------------------------------------
+ * This function is called with 'x0' pointing to a C runtime context.
+ * It restores the saved registers and jumps to that runtime with 'x0'
+ * as the new SP register. This destroys the C runtime context that had
+ * been built on the stack below the saved context by the caller. Later
+ * the second parameter 'x1' is passed as a return value to the caller.
+ * ---------------------------------------------------------------------
+ */
+func rmmd_rmm_exit
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(RMMD_C_RT_CTX_X19 - RMMD_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(RMMD_C_RT_CTX_X21 - RMMD_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(RMMD_C_RT_CTX_X23 - RMMD_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(RMMD_C_RT_CTX_X25 - RMMD_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(RMMD_C_RT_CTX_X27 - RMMD_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(RMMD_C_RT_CTX_X29 - RMMD_C_RT_CTX_SIZE)]
+
+ /* ---------------------------------------------------------------------
+ * This should take us back to the instruction after the call to the
+ * last rmmd_rmm_enter().* Place the second parameter to x0
+ * so that the caller will see it as a return value from the original
+ * entry call.
+ * ---------------------------------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc rmmd_rmm_exit
diff --git a/services/std_svc/rmmd/rmmd.mk b/services/std_svc/rmmd/rmmd.mk
new file mode 100644
index 0000000..bcf54e1
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd.mk
@@ -0,0 +1,19 @@
+#
+# Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH},aarch64)
+ $(error "Error: RMMD is only supported on aarch64.")
+endif
+
+include services/std_svc/rmmd/trp/trp.mk
+
+RMMD_SOURCES += $(addprefix services/std_svc/rmmd/, \
+ ${ARCH}/rmmd_helpers.S \
+ rmmd_main.c \
+ rmmd_attest.c)
+
+# Let the top-level Makefile know that we intend to include RMM image
+NEED_RMM := yes
diff --git a/services/std_svc/rmmd/rmmd_attest.c b/services/std_svc/rmmd/rmmd_attest.c
new file mode 100644
index 0000000..25adf50
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd_attest.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <stdint.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <lib/spinlock.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/platform.h>
+#include "rmmd_private.h"
+#include <services/rmmd_svc.h>
+
+static spinlock_t lock;
+
+/* For printing Realm attestation token hash */
+#define DIGITS_PER_BYTE 2UL
+#define LENGTH_OF_TERMINATING_ZERO_IN_BYTES 1UL
+#define BYTES_PER_LINE_BASE 4UL
+
+static void print_challenge(uint8_t *hash, size_t hash_size)
+{
+ size_t leftover;
+ /*
+ * bytes_per_line is always a power of two, so it can be used to
+ * construct mask with it when it is necessary to count remainder.
+ *
+ */
+ const size_t bytes_per_line = 1 << BYTES_PER_LINE_BASE;
+ char hash_text[(1 << BYTES_PER_LINE_BASE) * DIGITS_PER_BYTE +
+ LENGTH_OF_TERMINATING_ZERO_IN_BYTES];
+ const char hex_chars[] = {'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
+ unsigned int i;
+
+ for (i = 0U; i < hash_size; ++i) {
+ hash_text[(i & (bytes_per_line - 1)) * DIGITS_PER_BYTE] =
+ hex_chars[hash[i] >> 4];
+ hash_text[(i & (bytes_per_line - 1)) * DIGITS_PER_BYTE + 1] =
+ hex_chars[hash[i] & 0x0f];
+ if (((i + 1) & (bytes_per_line - 1)) == 0U) {
+ hash_text[bytes_per_line * DIGITS_PER_BYTE] = '\0';
+ VERBOSE("hash part %u = %s\n",
+ (i >> BYTES_PER_LINE_BASE) + 1, hash_text);
+ }
+ }
+
+ leftover = (size_t)i & (bytes_per_line - 1);
+
+ if (leftover != 0UL) {
+ hash_text[leftover * DIGITS_PER_BYTE] = '\0';
+ VERBOSE("hash part %u = %s\n", (i >> BYTES_PER_LINE_BASE) + 1,
+ hash_text);
+ }
+}
+
+/*
+ * Helper function to validate that the buffer base and length are
+ * within range.
+ */
+static int validate_buffer_params(uint64_t buf_pa, uint64_t buf_len)
+{
+ unsigned long shared_buf_page;
+ uintptr_t shared_buf_base;
+
+ (void)plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base);
+
+ shared_buf_page = shared_buf_base & ~PAGE_SIZE_MASK;
+
+ /* Validate the buffer pointer */
+ if ((buf_pa & ~PAGE_SIZE_MASK) != shared_buf_page) {
+ ERROR("Buffer PA out of range\n");
+ return E_RMM_BAD_ADDR;
+ }
+
+ /* Validate the size of the shared area */
+ if (((buf_pa + buf_len - 1UL) & ~PAGE_SIZE_MASK) != shared_buf_page) {
+ ERROR("Invalid buffer length\n");
+ return E_RMM_INVAL;
+ }
+
+ return 0; /* No error */
+}
+
+int rmmd_attest_get_platform_token(uint64_t buf_pa, uint64_t *buf_size,
+ uint64_t c_size)
+{
+ int err;
+ uint8_t temp_buf[SHA512_DIGEST_SIZE];
+
+ err = validate_buffer_params(buf_pa, *buf_size);
+ if (err != 0) {
+ return err;
+ }
+
+ if ((c_size != SHA256_DIGEST_SIZE) &&
+ (c_size != SHA384_DIGEST_SIZE) &&
+ (c_size != SHA512_DIGEST_SIZE)) {
+ ERROR("Invalid hash size: %lu\n", c_size);
+ return E_RMM_INVAL;
+ }
+
+ spin_lock(&lock);
+
+ (void)memcpy(temp_buf, (void *)buf_pa, c_size);
+
+ print_challenge((uint8_t *)temp_buf, c_size);
+
+ /* Get the platform token. */
+ err = plat_rmmd_get_cca_attest_token((uintptr_t)buf_pa,
+ buf_size, (uintptr_t)temp_buf, c_size);
+
+ if (err != 0) {
+ ERROR("Failed to get platform token: %d.\n", err);
+ err = E_RMM_UNK;
+ }
+
+ spin_unlock(&lock);
+
+ return err;
+}
+
+int rmmd_attest_get_signing_key(uint64_t buf_pa, uint64_t *buf_size,
+ uint64_t ecc_curve)
+{
+ int err;
+
+ err = validate_buffer_params(buf_pa, *buf_size);
+ if (err != 0) {
+ return err;
+ }
+
+ if (ecc_curve != ATTEST_KEY_CURVE_ECC_SECP384R1) {
+ ERROR("Invalid ECC curve specified\n");
+ return E_RMM_INVAL;
+ }
+
+ spin_lock(&lock);
+
+ /* Get the Realm attestation key. */
+ err = plat_rmmd_get_cca_realm_attest_key((uintptr_t)buf_pa, buf_size,
+ (unsigned int)ecc_curve);
+ if (err != 0) {
+ ERROR("Failed to get attestation key: %d.\n", err);
+ err = E_RMM_UNK;
+ }
+
+ spin_unlock(&lock);
+
+ return err;
+}
diff --git a/services/std_svc/rmmd/rmmd_initial_context.h b/services/std_svc/rmmd/rmmd_initial_context.h
new file mode 100644
index 0000000..d7a743d
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd_initial_context.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RMMD_INITIAL_CONTEXT_H
+#define RMMD_INITIAL_CONTEXT_H
+
+#include <arch.h>
+
+/*
+ * SPSR_EL2
+ * M=0x9 (0b1001 EL2h)
+ * M[4]=0
+ * DAIF=0xF Exceptions masked on entry.
+ * BTYPE=0 BTI not yet supported.
+ * SSBS=0 Not yet supported.
+ * IL=0 Not an illegal exception return.
+ * SS=0 Not single stepping.
+ * PAN=1 RMM shouldn't access realm memory.
+ * UAO=0
+ * DIT=0
+ * TCO=0
+ * NZCV=0
+ */
+#define REALM_SPSR_EL2 ( \
+ SPSR_M_EL2H | \
+ (0xF << SPSR_DAIF_SHIFT) | \
+ SPSR_PAN_BIT \
+ )
+
+#endif /* RMMD_INITIAL_CONTEXT_H */
diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c
new file mode 100644
index 0000000..6bd9fdf
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd_main.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <arch_features.h>
+#include <bl31/bl31.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/el3_runtime/pubsub.h>
+#include <lib/gpt_rme/gpt_rme.h>
+
+#include <lib/spinlock.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+#include <services/rmmd_svc.h>
+#include <smccc_helpers.h>
+#include <lib/extensions/sve.h>
+#include "rmmd_initial_context.h"
+#include "rmmd_private.h"
+
+/*******************************************************************************
+ * RMM boot failure flag
+ ******************************************************************************/
+static bool rmm_boot_failed;
+
+/*******************************************************************************
+ * RMM context information.
+ ******************************************************************************/
+rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * RMM entry point information. Discovered on the primary core and reused
+ * on secondary cores.
+ ******************************************************************************/
+static entry_point_info_t *rmm_ep_info;
+
+/*******************************************************************************
+ * Static function declaration.
+ ******************************************************************************/
+static int32_t rmm_init(void);
+
+/*******************************************************************************
+ * This function takes an RMM context pointer and performs a synchronous entry
+ * into it.
+ ******************************************************************************/
+uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx)
+{
+ uint64_t rc;
+
+ assert(rmm_ctx != NULL);
+
+ cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
+
+ /* Restore the realm context assigned above */
+ cm_el1_sysregs_context_restore(REALM);
+ cm_el2_sysregs_context_restore(REALM);
+ cm_set_next_eret_context(REALM);
+
+ /* Enter RMM */
+ rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
+
+ /*
+ * Save realm context. EL1 and EL2 Non-secure
+ * contexts will be restored before exiting to
+ * Non-secure world, therefore there is no need
+ * to clear EL1 and EL2 context registers.
+ */
+ cm_el1_sysregs_context_save(REALM);
+ cm_el2_sysregs_context_save(REALM);
+
+ return rc;
+}
+
+/*******************************************************************************
+ * This function returns to the place where rmmd_rmm_sync_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 void rmmd_rmm_sync_exit(uint64_t rc)
+{
+ rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
+
+ /* Get context of the RMM in use by this CPU. */
+ assert(cm_get_context(REALM) == &(ctx->cpu_ctx));
+
+ /*
+ * The RMMD must have initiated the original request through a
+ * synchronous entry into RMM. Jump back to the original C runtime
+ * context with the value of rc in x0;
+ */
+ rmmd_rmm_exit(ctx->c_rt_ctx, rc);
+
+ panic();
+}
+
+static void rmm_el2_context_init(el2_sysregs_t *regs)
+{
+ regs->ctx_regs[CTX_SPSR_EL2 >> 3] = REALM_SPSR_EL2;
+ regs->ctx_regs[CTX_SCTLR_EL2 >> 3] = SCTLR_EL2_RES1;
+}
+
+/*******************************************************************************
+ * Enable architecture extensions on first entry to Realm world.
+ ******************************************************************************/
+static void manage_extensions_realm(cpu_context_t *ctx)
+{
+#if ENABLE_SVE_FOR_NS
+ /*
+ * Enable SVE and FPU in realm context when it is enabled for NS.
+ * Realm manager must ensure that the SVE and FPU register
+ * contexts are properly managed.
+ */
+ sve_enable(ctx);
+#else
+ /*
+ * Disable SVE and FPU in realm context when it is disabled for NS.
+ */
+ sve_disable(ctx);
+#endif /* ENABLE_SVE_FOR_NS */
+}
+
+/*******************************************************************************
+ * Jump to the RMM for the first time.
+ ******************************************************************************/
+static int32_t rmm_init(void)
+{
+ long rc;
+ rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
+
+ INFO("RMM init start.\n");
+
+ /* Enable architecture extensions */
+ manage_extensions_realm(&ctx->cpu_ctx);
+
+ /* Initialize RMM EL2 context. */
+ rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
+
+ rc = rmmd_rmm_sync_entry(ctx);
+ if (rc != E_RMM_BOOT_SUCCESS) {
+ ERROR("RMM init failed: %ld\n", rc);
+ /* Mark the boot as failed for all the CPUs */
+ rmm_boot_failed = true;
+ return 0;
+ }
+
+ INFO("RMM init end.\n");
+
+ return 1;
+}
+
+/*******************************************************************************
+ * Load and read RMM manifest, setup RMM.
+ ******************************************************************************/
+int rmmd_setup(void)
+{
+ size_t shared_buf_size __unused;
+ uintptr_t shared_buf_base;
+ uint32_t ep_attr;
+ unsigned int linear_id = plat_my_core_pos();
+ rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id];
+ rmm_manifest_t *manifest;
+ int rc;
+
+ /* Make sure RME is supported. */
+ assert(get_armv9_2_feat_rme_support() != 0U);
+
+ rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM);
+ if (rmm_ep_info == NULL) {
+ WARN("No RMM image provided by BL2 boot loader, Booting "
+ "device without RMM initialization. SMCs destined for "
+ "RMM will return SMC_UNK\n");
+ return -ENOENT;
+ }
+
+ /* Under no circumstances will this parameter be 0 */
+ assert(rmm_ep_info->pc == RMM_BASE);
+
+ /* Initialise an entrypoint to set up the CPU context */
+ ep_attr = EP_REALM;
+ if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) {
+ ep_attr |= EP_EE_BIG;
+ }
+
+ SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr);
+ rmm_ep_info->spsr = SPSR_64(MODE_EL2,
+ MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+
+ shared_buf_size =
+ plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base);
+
+ assert((shared_buf_size == SZ_4K) &&
+ ((void *)shared_buf_base != NULL));
+
+ /* Load the boot manifest at the beginning of the shared area */
+ manifest = (rmm_manifest_t *)shared_buf_base;
+ rc = plat_rmmd_load_manifest(manifest);
+ if (rc != 0) {
+ ERROR("Error loading RMM Boot Manifest (%i)\n", rc);
+ return rc;
+ }
+ flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size);
+
+ /*
+ * Prepare coldboot arguments for RMM:
+ * arg0: This CPUID (primary processor).
+ * arg1: Version for this Boot Interface.
+ * arg2: PLATFORM_CORE_COUNT.
+ * arg3: Base address for the EL3 <-> RMM shared area. The boot
+ * manifest will be stored at the beginning of this area.
+ */
+ rmm_ep_info->args.arg0 = linear_id;
+ rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION;
+ rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT;
+ rmm_ep_info->args.arg3 = shared_buf_base;
+
+ /* Initialise RMM context with this entry point information */
+ cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info);
+
+ INFO("RMM setup done.\n");
+
+ /* Register init function for deferred init. */
+ bl31_register_rmm_init(&rmm_init);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Forward SMC to the other security state
+ ******************************************************************************/
+static uint64_t rmmd_smc_forward(uint32_t src_sec_state,
+ uint32_t dst_sec_state, uint64_t x0,
+ uint64_t x1, uint64_t x2, uint64_t x3,
+ uint64_t x4, void *handle)
+{
+ cpu_context_t *ctx = cm_get_context(dst_sec_state);
+
+ /* Save incoming security state */
+ cm_el1_sysregs_context_save(src_sec_state);
+ cm_el2_sysregs_context_save(src_sec_state);
+
+ /* Restore outgoing security state */
+ cm_el1_sysregs_context_restore(dst_sec_state);
+ cm_el2_sysregs_context_restore(dst_sec_state);
+ cm_set_next_eret_context(dst_sec_state);
+
+ /*
+ * As per SMCCCv1.2, we need to preserve x4 to x7 unless
+ * being used as return args. Hence we differentiate the
+ * onward and backward path. Support upto 8 args in the
+ * onward path and 4 args in return path.
+ * Register x4 will be preserved by RMM in case it is not
+ * used in return path.
+ */
+ if (src_sec_state == NON_SECURE) {
+ SMC_RET8(ctx, x0, x1, x2, x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+ }
+
+ SMC_RET5(ctx, x0, x1, x2, x3, x4);
+}
+
+/*******************************************************************************
+ * This function handles all SMCs in the range reserved for RMI. Each call is
+ * either forwarded to the other security state or handled by the RMM dispatcher
+ ******************************************************************************/
+uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+ uint64_t x3, uint64_t x4, void *cookie,
+ void *handle, uint64_t flags)
+{
+ uint32_t src_sec_state;
+
+ /* If RMM failed to boot, treat any RMI SMC as unknown */
+ if (rmm_boot_failed) {
+ WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n");
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ /* Determine which security state this SMC originated from */
+ src_sec_state = caller_sec_state(flags);
+
+ /* RMI must not be invoked by the Secure world */
+ if (src_sec_state == SMC_FROM_SECURE) {
+ WARN("RMMD: RMI invoked by secure world.\n");
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ /*
+ * Forward an RMI call from the Normal world to the Realm world as it
+ * is.
+ */
+ if (src_sec_state == SMC_FROM_NON_SECURE) {
+ VERBOSE("RMMD: RMI call from non-secure world.\n");
+ return rmmd_smc_forward(NON_SECURE, REALM, smc_fid,
+ x1, x2, x3, x4, handle);
+ }
+
+ if (src_sec_state != SMC_FROM_REALM) {
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ switch (smc_fid) {
+ case RMM_RMI_REQ_COMPLETE: {
+ uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+
+ return rmmd_smc_forward(REALM, NON_SECURE, x1,
+ x2, x3, x4, x5, handle);
+ }
+ default:
+ WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
+
+/*******************************************************************************
+ * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM
+ * is done after initialising minimal architectural state that guarantees safe
+ * execution.
+ ******************************************************************************/
+static void *rmmd_cpu_on_finish_handler(const void *arg)
+{
+ long rc;
+ uint32_t linear_id = plat_my_core_pos();
+ rmmd_rmm_context_t *ctx = &rmm_context[linear_id];
+
+ if (rmm_boot_failed) {
+ /* RMM Boot failed on a previous CPU. Abort. */
+ ERROR("RMM Failed to initialize. Ignoring for CPU%d\n",
+ linear_id);
+ return NULL;
+ }
+
+ /*
+ * Prepare warmboot arguments for RMM:
+ * arg0: This CPUID.
+ * arg1 to arg3: Not used.
+ */
+ rmm_ep_info->args.arg0 = linear_id;
+ rmm_ep_info->args.arg1 = 0ULL;
+ rmm_ep_info->args.arg2 = 0ULL;
+ rmm_ep_info->args.arg3 = 0ULL;
+
+ /* Initialise RMM context with this entry point information */
+ cm_setup_context(&ctx->cpu_ctx, rmm_ep_info);
+
+ /* Enable architecture extensions */
+ manage_extensions_realm(&ctx->cpu_ctx);
+
+ /* Initialize RMM EL2 context. */
+ rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
+
+ rc = rmmd_rmm_sync_entry(ctx);
+
+ if (rc != E_RMM_BOOT_SUCCESS) {
+ ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc);
+ /* Mark the boot as failed for any other booting CPU */
+ rmm_boot_failed = true;
+ }
+
+ return NULL;
+}
+
+/* Subscribe to PSCI CPU on to initialize RMM on secondary */
+SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
+
+/* Convert GPT lib error to RMMD GTS error */
+static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address)
+{
+ int ret;
+
+ if (error == 0) {
+ return E_RMM_OK;
+ }
+
+ if (error == -EINVAL) {
+ ret = E_RMM_BAD_ADDR;
+ } else {
+ /* This is the only other error code we expect */
+ assert(error == -EPERM);
+ ret = E_RMM_BAD_PAS;
+ }
+
+ ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n",
+ error, address, smc_fid);
+ return ret;
+}
+
+/*******************************************************************************
+ * This function handles RMM-EL3 interface SMCs
+ ******************************************************************************/
+uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+ uint64_t x3, uint64_t x4, void *cookie,
+ void *handle, uint64_t flags)
+{
+ uint32_t src_sec_state;
+ int ret;
+
+ /* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */
+ if (rmm_boot_failed) {
+ WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n");
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ /* Determine which security state this SMC originated from */
+ src_sec_state = caller_sec_state(flags);
+
+ if (src_sec_state != SMC_FROM_REALM) {
+ WARN("RMMD: RMM-EL3 call originated from secure or normal world\n");
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ switch (smc_fid) {
+ case RMM_GTSI_DELEGATE:
+ ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
+ SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
+ case RMM_GTSI_UNDELEGATE:
+ ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
+ SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
+ case RMM_ATTEST_GET_PLAT_TOKEN:
+ ret = rmmd_attest_get_platform_token(x1, &x2, x3);
+ SMC_RET2(handle, ret, x2);
+ case RMM_ATTEST_GET_REALM_KEY:
+ ret = rmmd_attest_get_signing_key(x1, &x2, x3);
+ SMC_RET2(handle, ret, x2);
+
+ case RMM_BOOT_COMPLETE:
+ VERBOSE("RMMD: running rmmd_rmm_sync_exit\n");
+ rmmd_rmm_sync_exit(x1);
+
+ default:
+ WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
diff --git a/services/std_svc/rmmd/rmmd_private.h b/services/std_svc/rmmd/rmmd_private.h
new file mode 100644
index 0000000..4954a43
--- /dev/null
+++ b/services/std_svc/rmmd/rmmd_private.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RMMD_PRIVATE_H
+#define RMMD_PRIVATE_H
+
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define RMMD_C_RT_CTX_X19 0x0
+#define RMMD_C_RT_CTX_X20 0x8
+#define RMMD_C_RT_CTX_X21 0x10
+#define RMMD_C_RT_CTX_X22 0x18
+#define RMMD_C_RT_CTX_X23 0x20
+#define RMMD_C_RT_CTX_X24 0x28
+#define RMMD_C_RT_CTX_X25 0x30
+#define RMMD_C_RT_CTX_X26 0x38
+#define RMMD_C_RT_CTX_X27 0x40
+#define RMMD_C_RT_CTX_X28 0x48
+#define RMMD_C_RT_CTX_X29 0x50
+#define RMMD_C_RT_CTX_X30 0x58
+
+#define RMMD_C_RT_CTX_SIZE 0x60
+#define RMMD_C_RT_CTX_ENTRIES (RMMD_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+
+/*
+ * Data structure used by the RMM dispatcher (RMMD) in EL3 to track context of
+ * the RMM at R-EL2.
+ */
+typedef struct rmmd_rmm_context {
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+} rmmd_rmm_context_t;
+
+/* Functions used to enter/exit the RMM synchronously */
+uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *ctx);
+__dead2 void rmmd_rmm_sync_exit(uint64_t rc);
+
+/* Functions implementing attestation utilities for RMM */
+int rmmd_attest_get_platform_token(uint64_t buf_pa, uint64_t *buf_size,
+ uint64_t c_size);
+int rmmd_attest_get_signing_key(uint64_t buf_pa, uint64_t *buf_size,
+ uint64_t ecc_curve);
+
+/* Assembly helpers */
+uint64_t rmmd_rmm_enter(uint64_t *c_rt_ctx);
+void __dead2 rmmd_rmm_exit(uint64_t c_rt_ctx, uint64_t ret);
+
+/* Reference to PM ops for the RMMD */
+extern const spd_pm_ops_t rmmd_pm;
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* RMMD_PRIVATE_H */
diff --git a/services/std_svc/rmmd/trp/linker.lds b/services/std_svc/rmmd/trp/linker.lds
new file mode 100644
index 0000000..2b7f383
--- /dev/null
+++ b/services/std_svc/rmmd/trp/linker.lds
@@ -0,0 +1,71 @@
+/*
+ * (C) COPYRIGHT 2021 Arm Limited or its affiliates.
+ * ALL RIGHTS RESERVED
+ */
+
+#include <common/bl_common.ld.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+/* Mapped using 4K pages, requires us to align different sections with
+ * different property at the same granularity. */
+PAGE_SIZE_4K = 4096;
+
+OUTPUT_FORMAT("elf64-littleaarch64")
+OUTPUT_ARCH(aarch64)
+ENTRY(trp_head)
+
+MEMORY {
+ RAM (rwx): ORIGIN = RMM_BASE, LENGTH = RMM_LIMIT - RMM_BASE
+}
+
+
+SECTIONS
+{
+ . = RMM_BASE;
+
+ .text : {
+ *(.head.text)
+ . = ALIGN(8);
+ *(.text*)
+ } >RAM
+
+ . = ALIGN(PAGE_SIZE_4K);
+
+ .rodata : {
+ *(.rodata*)
+ } >RAM
+
+ . = ALIGN(PAGE_SIZE_4K);
+
+ __RW_START__ = . ;
+
+ .data : {
+ *(.data*)
+ } >RAM
+
+ .bss (NOLOAD) : {
+ __BSS_START__ = .;
+ *(.bss*)
+ __BSS_END__ = .;
+ } >RAM
+ __BSS_SIZE__ = SIZEOF(.bss);
+
+
+ STACK_SECTION >RAM
+
+
+ /*
+ * Define a linker symbol to mark the end of the RW memory area for this
+ * image.
+ */
+ __RW_END__ = .;
+ __RMM_END__ = .;
+
+
+ /DISCARD/ : { *(.dynstr*) }
+ /DISCARD/ : { *(.dynamic*) }
+ /DISCARD/ : { *(.plt*) }
+ /DISCARD/ : { *(.interp*) }
+ /DISCARD/ : { *(.gnu*) }
+ /DISCARD/ : { *(.note*) }
+}
diff --git a/services/std_svc/rmmd/trp/trp.mk b/services/std_svc/rmmd/trp/trp.mk
new file mode 100644
index 0000000..44bbf22
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp.mk
@@ -0,0 +1,21 @@
+#
+# Copyright (c) 2021-2022 Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+RMM_SOURCES += services/std_svc/rmmd/trp/trp_entry.S \
+ services/std_svc/rmmd/trp/trp_main.c \
+ services/std_svc/rmmd/trp/trp_helpers.c
+
+RMM_LINKERFILE := services/std_svc/rmmd/trp/linker.lds
+
+# Include the platform-specific TRP Makefile
+# If no platform-specific TRP Makefile exists, it means TRP is not supported
+# on this platform.
+TRP_PLAT_MAKEFILE := $(wildcard ${PLAT_DIR}/trp/trp-${PLAT}.mk)
+ifeq (,${TRP_PLAT_MAKEFILE})
+ $(error TRP is not supported on platform ${PLAT})
+else
+ include ${TRP_PLAT_MAKEFILE}
+endif
diff --git a/services/std_svc/rmmd/trp/trp_entry.S b/services/std_svc/rmmd/trp/trp_entry.S
new file mode 100644
index 0000000..47c1df1
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp_entry.S
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <services/rmmd_svc.h>
+
+#include <platform_def.h>
+#include "trp_private.h"
+
+.global trp_head
+.global trp_smc
+
+.section ".head.text", "ax"
+
+ /* ---------------------------------------------
+ * Populate the params in x0-x7 from the pointer
+ * to the smc args structure in x0.
+ * ---------------------------------------------
+ */
+ .macro restore_args_call_smc
+ ldp x6, x7, [x0, #TRP_ARG6]
+ ldp x4, x5, [x0, #TRP_ARG4]
+ ldp x2, x3, [x0, #TRP_ARG2]
+ ldp x0, x1, [x0, #TRP_ARG0]
+ smc #0
+ .endm
+
+ /* ---------------------------------------------
+ * Entry point for TRP
+ * ---------------------------------------------
+ */
+trp_head:
+ /*
+ * Stash arguments from previous boot stage
+ */
+ mov x20, x0
+ mov x21, x1
+ mov x22, x2
+ mov x23, x3
+
+ /*
+ * Validate CPUId before allocating a stack.
+ */
+ cmp x20, #PLATFORM_CORE_COUNT
+ b.lo 1f
+
+ mov_imm x0, RMM_BOOT_COMPLETE
+ mov_imm x1, E_RMM_BOOT_CPU_ID_OUT_OF_RANGE
+ smc #0
+
+ /* EL3 should never return back here, so panic if it does */
+ b trp_panic
+
+1:
+ bl plat_set_my_stack
+
+ /*
+ * Find out whether this is a cold or warm boot
+ */
+ ldr x1, cold_boot_flag
+ cbz x1, warm_boot
+
+ /*
+ * Update cold boot flag to indicate cold boot is done
+ */
+ adr x2, cold_boot_flag
+ str xzr, [x2]
+
+ /* ---------------------------------------------
+ * Zero out BSS section
+ * ---------------------------------------------
+ */
+ ldr x0, =__BSS_START__
+ ldr x1, =__BSS_SIZE__
+ bl zeromem
+
+ mov x0, x20
+ mov x1, x21
+ mov x2, x22
+ mov x3, x23
+ bl trp_setup
+ bl trp_main
+warm_boot:
+ mov_imm x0, RMM_BOOT_COMPLETE
+ mov x1, xzr /* RMM_BOOT_SUCCESS */
+ smc #0
+ b trp_handler
+
+trp_panic:
+ no_ret plat_panic_handler
+
+ /*
+ * Flag to mark if it is a cold boot.
+ * 1: cold boot, 0: warmboot.
+ */
+.align 3
+cold_boot_flag:
+ .dword 1
+
+ /* ---------------------------------------------
+ * Direct SMC call to BL31 service provided by
+ * RMM Dispatcher
+ * ---------------------------------------------
+ */
+func trp_smc
+ restore_args_call_smc
+ ret
+endfunc trp_smc
+
+ /* ---------------------------------------------
+ * RMI call handler
+ * ---------------------------------------------
+ */
+func trp_handler
+ bl trp_rmi_handler
+ restore_args_call_smc
+ b trp_handler
+endfunc trp_handler
diff --git a/services/std_svc/rmmd/trp/trp_helpers.c b/services/std_svc/rmmd/trp/trp_helpers.c
new file mode 100644
index 0000000..159f3a5
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp_helpers.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <plat/common/platform.h>
+#include <services/rmmd_svc.h>
+#include "trp_private.h"
+
+/*
+ * Per cpu data structure to populate parameters for an SMC in C code and use
+ * a pointer to this structure in assembler code to populate x0-x7
+ */
+static trp_args_t trp_smc_args[PLATFORM_CORE_COUNT];
+
+/*
+ * Set the arguments for SMC call
+ */
+trp_args_t *set_smc_args(uint64_t arg0,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ uint32_t linear_id;
+ trp_args_t *pcpu_smc_args;
+
+ /*
+ * Return to Secure Monitor by raising an SMC. The results of the
+ * service are passed as an arguments to the SMC
+ */
+ linear_id = plat_my_core_pos();
+ pcpu_smc_args = &trp_smc_args[linear_id];
+ write_trp_arg(pcpu_smc_args, TRP_ARG0, arg0);
+ write_trp_arg(pcpu_smc_args, TRP_ARG1, arg1);
+ write_trp_arg(pcpu_smc_args, TRP_ARG2, arg2);
+ write_trp_arg(pcpu_smc_args, TRP_ARG3, arg3);
+ write_trp_arg(pcpu_smc_args, TRP_ARG4, arg4);
+ write_trp_arg(pcpu_smc_args, TRP_ARG5, arg5);
+ write_trp_arg(pcpu_smc_args, TRP_ARG6, arg6);
+ write_trp_arg(pcpu_smc_args, TRP_ARG7, arg7);
+
+ return pcpu_smc_args;
+}
+
+/*
+ * Abort the boot process with the reason given in err.
+ */
+__dead2 void trp_boot_abort(uint64_t err)
+{
+ (void)trp_smc(set_smc_args(RMM_BOOT_COMPLETE, err, 0, 0, 0, 0, 0, 0));
+ panic();
+}
diff --git a/services/std_svc/rmmd/trp/trp_main.c b/services/std_svc/rmmd/trp/trp_main.c
new file mode 100644
index 0000000..5a56af0
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp_main.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <common/debug.h>
+#include <plat/common/platform.h>
+#include <services/rmm_core_manifest.h>
+#include <services/rmmd_svc.h>
+#include <services/trp/platform_trp.h>
+#include <trp_helpers.h>
+#include "trp_private.h"
+
+#include <platform_def.h>
+
+/* Parameters received from the previous image */
+static unsigned int trp_boot_abi_version;
+static uintptr_t trp_shared_region_start;
+
+/* Parameters received from boot manifest */
+uint32_t trp_boot_manifest_version;
+
+/*******************************************************************************
+ * Setup function for TRP.
+ ******************************************************************************/
+void trp_setup(uint64_t x0,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3)
+{
+ /*
+ * Validate boot parameters.
+ *
+ * According to the Boot Interface ABI v.0.1, the
+ * parameters recived from EL3 are:
+ * x0: CPUID (verified earlier so not used)
+ * x1: Boot Interface version
+ * x2: PLATFORM_CORE_COUNT
+ * x3: Pointer to the shared memory area.
+ */
+
+ (void)x0;
+
+ if (TRP_RMM_EL3_VERSION_GET_MAJOR(x1) != TRP_RMM_EL3_ABI_VERS_MAJOR) {
+ trp_boot_abort(E_RMM_BOOT_VERSION_MISMATCH);
+ }
+
+ if ((void *)x3 == NULL) {
+ trp_boot_abort(E_RMM_BOOT_INVALID_SHARED_BUFFER);
+ }
+
+ if (x2 > TRP_PLATFORM_CORE_COUNT) {
+ trp_boot_abort(E_RMM_BOOT_CPUS_OUT_OF_RANGE);
+ }
+
+ trp_boot_abi_version = x1;
+ trp_shared_region_start = x3;
+ flush_dcache_range((uintptr_t)&trp_boot_abi_version,
+ sizeof(trp_boot_abi_version));
+ flush_dcache_range((uintptr_t)&trp_shared_region_start,
+ sizeof(trp_shared_region_start));
+
+ /* Perform early platform-specific setup */
+ trp_early_platform_setup((rmm_manifest_t *)trp_shared_region_start);
+}
+
+/* Main function for TRP */
+void trp_main(void)
+{
+ NOTICE("TRP: %s\n", version_string);
+ NOTICE("TRP: %s\n", build_message);
+ NOTICE("TRP: Supported RMM-EL3 Interface ABI: v.%u.%u\n",
+ TRP_RMM_EL3_ABI_VERS_MAJOR, TRP_RMM_EL3_ABI_VERS_MINOR);
+ NOTICE("TRP: Boot Manifest Version : v.%u.%u\n",
+ RMMD_GET_MANIFEST_VERSION_MAJOR(trp_boot_manifest_version),
+ RMMD_GET_MANIFEST_VERSION_MINOR(trp_boot_manifest_version));
+ INFO("TRP: Memory base : 0x%lx\n", (unsigned long)RMM_BASE);
+ INFO("TRP: Base address for the shared region : 0x%lx\n",
+ (unsigned long)trp_shared_region_start);
+ INFO("TRP: Total size : 0x%lx bytes\n", (unsigned long)(RMM_END
+ - RMM_BASE));
+ INFO("TRP: RMM-EL3 Interface ABI reported by EL3: v.%u.%u\n",
+ TRP_RMM_EL3_VERSION_GET_MAJOR(trp_boot_abi_version),
+ TRP_RMM_EL3_VERSION_GET_MINOR(trp_boot_abi_version));
+}
+
+/*******************************************************************************
+ * Returning RMI version back to Normal World
+ ******************************************************************************/
+static trp_args_t *trp_ret_rmi_version(void)
+{
+ VERBOSE("RMM version is %u.%u\n", RMI_ABI_VERSION_MAJOR,
+ RMI_ABI_VERSION_MINOR);
+ return set_smc_args(RMM_RMI_REQ_COMPLETE, RMI_ABI_VERSION,
+ 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * Transitioning granule of NON-SECURE type to REALM type
+ ******************************************************************************/
+static trp_args_t *trp_asc_mark_realm(unsigned long long x1)
+{
+ unsigned long long ret;
+
+ VERBOSE("Delegating granule 0x%llx\n", x1);
+ ret = trp_smc(set_smc_args(RMM_GTSI_DELEGATE, x1, 0, 0, 0, 0, 0, 0));
+
+ if (ret != 0ULL) {
+ ERROR("Granule transition from NON-SECURE type to REALM type "
+ "failed 0x%llx\n", ret);
+ }
+ return set_smc_args(RMM_RMI_REQ_COMPLETE, ret, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * Transitioning granule of REALM type to NON-SECURE type
+ ******************************************************************************/
+static trp_args_t *trp_asc_mark_nonsecure(unsigned long long x1)
+{
+ unsigned long long ret;
+
+ VERBOSE("Undelegating granule 0x%llx\n", x1);
+ ret = trp_smc(set_smc_args(RMM_GTSI_UNDELEGATE, x1, 0, 0, 0, 0, 0, 0));
+
+ if (ret != 0ULL) {
+ ERROR("Granule transition from REALM type to NON-SECURE type "
+ "failed 0x%llx\n", ret);
+ }
+ return set_smc_args(RMM_RMI_REQ_COMPLETE, ret, 0, 0, 0, 0, 0, 0);
+}
+
+/*******************************************************************************
+ * Main RMI SMC handler function
+ ******************************************************************************/
+trp_args_t *trp_rmi_handler(unsigned long fid, unsigned long long x1)
+{
+ switch (fid) {
+ case RMI_RMM_REQ_VERSION:
+ return trp_ret_rmi_version();
+ case RMI_RMM_GRANULE_DELEGATE:
+ return trp_asc_mark_realm(x1);
+ case RMI_RMM_GRANULE_UNDELEGATE:
+ return trp_asc_mark_nonsecure(x1);
+ default:
+ ERROR("Invalid SMC code to %s, FID %lu\n", __func__, fid);
+ }
+ return set_smc_args(SMC_UNK, 0, 0, 0, 0, 0, 0, 0);
+}
diff --git a/services/std_svc/rmmd/trp/trp_private.h b/services/std_svc/rmmd/trp/trp_private.h
new file mode 100644
index 0000000..945ae1c
--- /dev/null
+++ b/services/std_svc/rmmd/trp/trp_private.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TRP_PRIVATE_H
+#define TRP_PRIVATE_H
+
+#include <services/rmmd_svc.h>
+#include <trp_helpers.h>
+
+/* Definitions for RMM-EL3 Interface ABI VERSION */
+#define TRP_RMM_EL3_ABI_VERS_MAJOR RMM_EL3_IFC_VERSION_MAJOR
+#define TRP_RMM_EL3_ABI_VERS_MINOR RMM_EL3_IFC_VERSION_MINOR
+#define TRP_RMM_EL3_ABI_VERS (((TRP_RMM_EL3_ABI_VERS_MAJOR & 0x7FFF) << 16) | \
+ (TRP_RMM_EL3_ABI_VERS_MINOR & 0xFFFF))
+
+#define TRP_PLATFORM_CORE_COUNT PLATFORM_CORE_COUNT
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#define write_trp_arg(args, offset, val) (((args)->regs[offset >> 3]) \
+ = val)
+/* RMI SMC64 FIDs handled by the TRP */
+#define RMI_RMM_REQ_VERSION SMC64_RMI_FID(U(0))
+#define RMI_RMM_GRANULE_DELEGATE SMC64_RMI_FID(U(1))
+#define RMI_RMM_GRANULE_UNDELEGATE SMC64_RMI_FID(U(2))
+
+/* Definitions for RMI VERSION */
+#define RMI_ABI_VERSION_MAJOR U(0x0)
+#define RMI_ABI_VERSION_MINOR U(0x0)
+#define RMI_ABI_VERSION (((RMI_ABI_VERSION_MAJOR & 0x7FFF) \
+ << 16) | \
+ (RMI_ABI_VERSION_MINOR & 0xFFFF))
+
+#define TRP_RMM_EL3_VERSION_GET_MAJOR(x) \
+ RMM_EL3_IFC_VERSION_GET_MAJOR((x))
+#define TRP_RMM_EL3_VERSION_GET_MINOR(x) \
+ RMM_EL3_IFC_VERSION_GET_MAJOR_MINOR((x))
+
+/* Helper to issue SMC calls to BL31 */
+uint64_t trp_smc(trp_args_t *);
+
+/* The main function to executed only by Primary CPU */
+void trp_main(void);
+
+/* Setup TRP. Executed only by Primary CPU */
+void trp_setup(uint64_t x0,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3);
+
+#endif /* __ASSEMBLER__ */
+#endif /* TRP_PRIVATE_H */