summaryrefslogtreecommitdiffstats
path: root/plat/common
diff options
context:
space:
mode:
Diffstat (limited to 'plat/common')
-rw-r--r--plat/common/aarch32/crash_console_helpers.S68
-rw-r--r--plat/common/aarch32/plat_common.c21
-rw-r--r--plat/common/aarch32/plat_sp_min_common.c25
-rw-r--r--plat/common/aarch32/platform_helpers.S89
-rw-r--r--plat/common/aarch32/platform_mp_stack.S47
-rw-r--r--plat/common/aarch32/platform_up_stack.S47
-rw-r--r--plat/common/aarch64/crash_console_helpers.S187
-rw-r--r--plat/common/aarch64/plat_common.c104
-rw-r--r--plat/common/aarch64/plat_ehf.c37
-rw-r--r--plat/common/aarch64/platform_helpers.S122
-rw-r--r--plat/common/aarch64/platform_mp_stack.S61
-rw-r--r--plat/common/aarch64/platform_up_stack.S50
-rw-r--r--plat/common/plat_bl1_common.c117
-rw-r--r--plat/common/plat_bl_common.c137
-rw-r--r--plat/common/plat_gicv2.c336
-rw-r--r--plat/common/plat_gicv3.c370
-rw-r--r--plat/common/plat_log_common.c29
-rw-r--r--plat/common/plat_psci_common.c167
-rw-r--r--plat/common/plat_spmd_manifest.c194
-rw-r--r--plat/common/tbbr/plat_tbbr.c52
-rw-r--r--plat/common/ubsan.c220
21 files changed, 2480 insertions, 0 deletions
diff --git a/plat/common/aarch32/crash_console_helpers.S b/plat/common/aarch32/crash_console_helpers.S
new file mode 100644
index 0000000..ea04f56
--- /dev/null
+++ b/plat/common/aarch32/crash_console_helpers.S
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * If a platform wishes to use the functions in this file it has to be added to
+ * the Makefile of the platform. It is not included in the common Makefile.
+ */
+
+#include <asm_macros.S>
+#include <drivers/console.h>
+
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl plat_crash_console_flush
+
+ /* -----------------------------------------------------
+ * int plat_crash_console_init(void)
+ * Use normal console by default. Switch it to crash
+ * mode so serial consoles become active again.
+ * NOTE: This default implementation will only work for
+ * crashes that occur after a normal console (marked
+ * valid for the crash state) has been registered with
+ * the console framework. To debug crashes that occur
+ * earlier, the platform has to override these functions
+ * with an implementation that initializes a console
+ * driver with hardcoded parameters. See
+ * docs/porting-guide.rst for more information.
+ * -----------------------------------------------------
+ */
+func plat_crash_console_init
+#if defined(IMAGE_BL1)
+ /*
+ * BL1 code can possibly crash so early that the data segment is not yet
+ * accessible. Don't risk undefined behavior by trying to run the normal
+ * console framework. Platforms that want to debug BL1 will need to
+ * override this with custom functions that can run from registers only.
+ */
+ mov r0, #0
+ bx lr
+#else /* IMAGE_BL1 */
+ mov r3, lr
+ mov r0, #CONSOLE_FLAG_CRASH
+ bl console_switch_state
+ mov r0, #1
+ bx r3
+#endif
+endfunc plat_crash_console_init
+
+ /* -----------------------------------------------------
+ * void plat_crash_console_putc(int character)
+ * Output through the normal console by default.
+ * -----------------------------------------------------
+ */
+func plat_crash_console_putc
+ b console_putc
+endfunc plat_crash_console_putc
+
+ /* -----------------------------------------------------
+ * void plat_crash_console_flush(void)
+ * Flush normal console by default.
+ * -----------------------------------------------------
+ */
+func plat_crash_console_flush
+ b console_flush
+endfunc plat_crash_console_flush
diff --git a/plat/common/aarch32/plat_common.c b/plat/common/aarch32/plat_common.c
new file mode 100644
index 0000000..2c1a8fa
--- /dev/null
+++ b/plat/common/aarch32/plat_common.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/xlat_tables/xlat_mmu_helpers.h>
+#include <plat/common/platform.h>
+
+/*
+ * The following platform setup functions are weakly defined. They
+ * provide typical implementations that may be re-used by multiple
+ * platforms but may also be overridden by a platform if required.
+ */
+#pragma weak bl32_plat_enable_mmu
+
+
+void bl32_plat_enable_mmu(uint32_t flags)
+{
+ enable_mmu_svc_mon(flags);
+}
diff --git a/plat/common/aarch32/plat_sp_min_common.c b/plat/common/aarch32/plat_sp_min_common.c
new file mode 100644
index 0000000..9493587
--- /dev/null
+++ b/plat/common/aarch32/plat_sp_min_common.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/console.h>
+#include <plat/common/platform.h>
+#include <platform_sp_min.h>
+
+/*
+ * The following platform setup functions are weakly defined. They
+ * provide typical implementations that may be re-used by multiple
+ * platforms but may also be overridden by a platform if required.
+ */
+#pragma weak sp_min_plat_runtime_setup
+
+void sp_min_plat_runtime_setup(void)
+{
+ /*
+ * Finish the use of console driver in SP_MIN so that any runtime logs
+ * from SP_MIN will be suppressed.
+ */
+ console_switch_state(CONSOLE_FLAG_RUNTIME);
+}
diff --git a/plat/common/aarch32/platform_helpers.S b/plat/common/aarch32/platform_helpers.S
new file mode 100644
index 0000000..75cc456
--- /dev/null
+++ b/plat/common/aarch32/platform_helpers.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .weak plat_report_exception
+ .weak plat_report_prefetch_abort
+ .weak plat_report_data_abort
+ .weak plat_reset_handler
+ .weak plat_disable_acp
+ .weak bl1_plat_prepare_exit
+ .weak platform_mem_init
+ .weak plat_panic_handler
+
+ /* -----------------------------------------------------
+ * Placeholder function which should be redefined by
+ * each platform.
+ * -----------------------------------------------------
+ */
+func plat_report_exception
+ bx lr
+endfunc plat_report_exception
+
+ /* -----------------------------------------------------
+ * Placeholder function which should be redefined by
+ * each platform.
+ * -----------------------------------------------------
+ */
+func plat_report_prefetch_abort
+ bx lr
+endfunc plat_report_prefetch_abort
+
+ /* -----------------------------------------------------
+ * Placeholder function which should be redefined by
+ * each platform.
+ * -----------------------------------------------------
+ */
+func plat_report_data_abort
+ bx lr
+endfunc plat_report_data_abort
+
+ /* -----------------------------------------------------
+ * Placeholder function which should be redefined by
+ * each platform.
+ * -----------------------------------------------------
+ */
+func plat_reset_handler
+ bx lr
+endfunc plat_reset_handler
+
+ /* -----------------------------------------------------
+ * Placeholder function which should be redefined by
+ * each platform.
+ * -----------------------------------------------------
+ */
+func plat_disable_acp
+ bx lr
+endfunc plat_disable_acp
+
+ /* ---------------------------------------------------------------------
+ * Placeholder function which should be redefined by
+ * each platform.
+ * ---------------------------------------------------------------------
+ */
+func platform_mem_init
+ bx lr
+endfunc platform_mem_init
+
+ /* -----------------------------------------------------
+ * void bl1_plat_prepare_exit(entry_point_info_t *ep_info);
+ * Called before exiting BL1. Default: do nothing
+ * -----------------------------------------------------
+ */
+func bl1_plat_prepare_exit
+ bx lr
+endfunc bl1_plat_prepare_exit
+
+ /* -----------------------------------------------------
+ * void plat_panic_handler(void) __dead2;
+ * Endless loop by default.
+ * -----------------------------------------------------
+ */
+func plat_panic_handler
+ b plat_panic_handler
+endfunc plat_panic_handler
diff --git a/plat/common/aarch32/platform_mp_stack.S b/plat/common/aarch32/platform_mp_stack.S
new file mode 100644
index 0000000..6c3d08d
--- /dev/null
+++ b/plat/common/aarch32/platform_mp_stack.S
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+ .weak plat_get_my_stack
+ .weak plat_set_my_stack
+
+ /* -----------------------------------------------------
+ * uintptr_t plat_get_my_stack (u_register_t mpidr)
+ *
+ * For a given CPU, this function returns the stack
+ * pointer for a stack allocated in device memory.
+ * -----------------------------------------------------
+ */
+func plat_get_my_stack
+ push {r4, lr}
+ get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ pop {r4, pc}
+endfunc plat_get_my_stack
+
+ /* -----------------------------------------------------
+ * void plat_set_my_stack ()
+ *
+ * For the current CPU, this function sets the stack
+ * pointer to a stack allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func plat_set_my_stack
+ mov r4, lr
+ get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ mov sp, r0
+ bx r4
+endfunc plat_set_my_stack
+
+ /* -----------------------------------------------------
+ * Per-cpu stacks in normal memory. Each cpu gets a
+ * stack of PLATFORM_STACK_SIZE bytes.
+ * -----------------------------------------------------
+ */
+declare_stack platform_normal_stacks, tzfw_normal_stacks, \
+ PLATFORM_STACK_SIZE, PLATFORM_CORE_COUNT
diff --git a/plat/common/aarch32/platform_up_stack.S b/plat/common/aarch32/platform_up_stack.S
new file mode 100644
index 0000000..836c13a
--- /dev/null
+++ b/plat/common/aarch32/platform_up_stack.S
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+ .weak plat_get_my_stack
+ .weak plat_set_my_stack
+
+ /* -----------------------------------------------------
+ * unsigned long plat_get_my_stack ()
+ *
+ * For cold-boot BL images, only the primary CPU needs
+ * a stack. This function returns the stack pointer for
+ * a stack allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func plat_get_my_stack
+ get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ bx lr
+endfunc plat_get_my_stack
+
+ /* -----------------------------------------------------
+ * void plat_set_my_stack ()
+ *
+ * For cold-boot BL images, only the primary CPU needs
+ * a stack. This function sets the stack pointer to a
+ * stack allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func plat_set_my_stack
+ get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ mov sp, r0
+ bx lr
+endfunc plat_set_my_stack
+
+ /* -----------------------------------------------------
+ * Per-cpu stacks in normal memory. Each cpu gets a
+ * stack of PLATFORM_STACK_SIZE bytes.
+ * -----------------------------------------------------
+ */
+declare_stack platform_normal_stacks, tzfw_normal_stacks, \
+ PLATFORM_STACK_SIZE, 1, CACHE_WRITEBACK_GRANULE
diff --git a/plat/common/aarch64/crash_console_helpers.S b/plat/common/aarch64/crash_console_helpers.S
new file mode 100644
index 0000000..e2950f5
--- /dev/null
+++ b/plat/common/aarch64/crash_console_helpers.S
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * If a platform wishes to use the functions in this file it has to be added to
+ * the Makefile of the platform. It is not included in the common Makefile.
+ */
+
+#include <asm_macros.S>
+#include <drivers/console.h>
+
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl plat_crash_console_flush
+
+ /*
+ * Spinlock to syncronize access to crash_console_triggered. We cannot
+ * acquire spinlocks when the cache is disabled, so in some cases (like
+ * late during CPU suspend) some risk remains.
+ */
+.section .data.crash_console_spinlock
+ define_asm_spinlock crash_console_spinlock
+
+ /*
+ * Flag to make sure that only one CPU can write a crash dump even if
+ * multiple crash at the same time. Interleaving crash dumps on the same
+ * console would just make the output unreadable, so it's better to only
+ * get a single but uncorrupted dump. This also means that we don't have
+ * to duplicate the reg_stash below for each CPU.
+ */
+.section .data.crash_console_triggered
+ crash_console_triggered: .byte 0
+
+ /*
+ * Space to stash away some register values while we're calling into
+ * console drivers and don't have a real stack available. We need x14,
+ * x15 and x30 for bookkeeping within the plat_crash_console functions
+ * themselves, and some console drivers use x16 and x17 as additional
+ * scratch space that is not preserved by the main crash reporting
+ * framework. (Note that x16 and x17 should really never be expected to
+ * retain their values across any function call, even between carefully
+ * designed assembly functions, since the linker is always free to
+ * insert a function call veneer that uses these registers as scratch
+ * space at any time. The current crash reporting framework doesn't
+ * really respect that, but since TF is usually linked as a single
+ * contiguous binary of less than 128MB, it seems to work in practice.)
+ */
+.section .data.crash_console_reg_stash
+ .align 3
+ crash_console_reg_stash: .quad 0, 0, 0, 0, 0
+
+ /* --------------------------------------------------------------------
+ * int plat_crash_console_init(void)
+ * Takes the crash console spinlock (if possible) and checks the trigger
+ * flag to make sure we're the first CPU to dump. If not, return an
+ * error (so crash dumping will fail but the CPU will still call
+ * plat_panic_handler() which may do important platform-specific tasks
+ * that may be needed on all crashing CPUs). In either case, the lock
+ * will be released so other CPUs can make forward progress on this.
+ * Clobbers: x0 - x4, x30
+ * --------------------------------------------------------------------
+ */
+func plat_crash_console_init
+#if defined(IMAGE_BL31)
+ mov x4, x30 /* x3 and x4 are not clobbered by spin_lock() */
+ mov x3, #0 /* return value */
+
+ mrs x1, sctlr_el3
+ tst x1, #SCTLR_C_BIT
+ beq skip_spinlock /* can't synchronize when cache disabled */
+
+ adrp x0, crash_console_spinlock
+ add x0, x0, :lo12:crash_console_spinlock
+ bl spin_lock
+
+skip_spinlock:
+ adrp x1, crash_console_triggered
+ add x1, x1, :lo12:crash_console_triggered
+ ldarb w2, [x1]
+ cmp w2, #0
+ bne init_error
+
+ mov x3, #1 /* set return value to success */
+ stlrb w3, [x1]
+
+init_error:
+ bl spin_unlock /* harmless if we didn't acquire the lock */
+ mov x0, x3
+ ret x4
+#else /* Only one CPU in BL1/BL2, no need to synchronize anything */
+ mov x0, #1
+ ret
+#endif
+endfunc plat_crash_console_init
+
+ /* --------------------------------------------------------------------
+ * int plat_crash_console_putc(char c)
+ * Prints the character on all consoles registered with the console
+ * framework that have CONSOLE_FLAG_CRASH set. Note that this is only
+ * helpful for crashes that occur after the platform intialization code
+ * has registered a console. Platforms using this implementation need to
+ * ensure that all console drivers they use that have the CRASH flag set
+ * support this (i.e. are written in assembly and comply to the register
+ * clobber requirements of plat_crash_console_putc().
+ * --------------------------------------------------------------------
+ */
+func plat_crash_console_putc
+ adrp x1, crash_console_reg_stash
+ add x1, x1, :lo12:crash_console_reg_stash
+ stp x14, x15, [x1]
+ stp x16, x17, [x1, #16]
+ str x30, [x1, #32]
+
+ mov w14, w0 /* W14 = character to print */
+ adrp x15, console_list
+ ldr x15, [x15, :lo12:console_list] /* X15 = first console struct */
+
+putc_loop:
+ cbz x15, putc_done
+ ldr w1, [x15, #CONSOLE_T_FLAGS]
+ tst w1, #CONSOLE_FLAG_CRASH
+ b.eq putc_continue
+ ldr x2, [x15, #CONSOLE_T_PUTC]
+ cbz x2, putc_continue
+ cmp w14, #'\n'
+ b.ne putc
+ tst w1, #CONSOLE_FLAG_TRANSLATE_CRLF
+ b.eq putc
+ mov x1, x15
+ mov w0, #'\r'
+ blr x2
+ ldr x2, [x15, #CONSOLE_T_PUTC]
+putc:
+ mov x1, x15
+ mov w0, w14
+ blr x2
+putc_continue:
+ ldr x15, [x15] /* X15 = next struct */
+ b putc_loop
+
+putc_done:
+ adrp x1, crash_console_reg_stash
+ add x1, x1, :lo12:crash_console_reg_stash
+ ldp x14, x15, [x1]
+ ldp x16, x17, [x1, #16]
+ ldr x30, [x1, #32]
+ ret
+endfunc plat_crash_console_putc
+
+ /* --------------------------------------------------------------------
+ * int plat_crash_console_flush(char c)
+ * Flushes all consoles registered with the console framework that have
+ * CONSOLE_FLAG_CRASH set. Same requirements as putc().
+ * --------------------------------------------------------------------
+ */
+func plat_crash_console_flush
+ adrp x1, crash_console_reg_stash
+ add x1, x1, :lo12:crash_console_reg_stash
+ stp x30, x15, [x1]
+ stp x16, x17, [x1, #16]
+
+ adrp x15, console_list
+ ldr x15, [x15, :lo12:console_list] /* X15 = first console struct */
+
+flush_loop:
+ cbz x15, flush_done
+ ldr w1, [x15, #CONSOLE_T_FLAGS]
+ tst w1, #CONSOLE_FLAG_CRASH
+ b.eq flush_continue
+ ldr x2, [x15, #CONSOLE_T_FLUSH]
+ cbz x2, flush_continue
+ mov x0, x15
+ blr x2
+flush_continue:
+ ldr x15, [x15] /* X15 = next struct */
+ b flush_loop
+
+flush_done:
+ adrp x1, crash_console_reg_stash
+ add x1, x1, :lo12:crash_console_reg_stash
+ ldp x30, x15, [x1]
+ ldp x16, x17, [x1, #16]
+ ret
+endfunc plat_crash_console_flush
diff --git a/plat/common/aarch64/plat_common.c b/plat/common/aarch64/plat_common.c
new file mode 100644
index 0000000..8ce1d6c
--- /dev/null
+++ b/plat/common/aarch64/plat_common.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2014-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <inttypes.h>
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/console.h>
+#if RAS_EXTENSION
+#include <lib/extensions/ras.h>
+#endif
+#include <lib/xlat_tables/xlat_mmu_helpers.h>
+#include <plat/common/platform.h>
+
+/*
+ * The following platform setup functions are weakly defined. They
+ * provide typical implementations that may be re-used by multiple
+ * platforms but may also be overridden by a platform if required.
+ */
+#pragma weak bl31_plat_runtime_setup
+
+#if SDEI_SUPPORT
+#pragma weak plat_sdei_handle_masked_trigger
+#pragma weak plat_sdei_validate_entry_point
+#endif
+
+#pragma weak plat_ea_handler = plat_default_ea_handler
+
+void bl31_plat_runtime_setup(void)
+{
+ console_switch_state(CONSOLE_FLAG_RUNTIME);
+}
+
+/*
+ * Helper function for platform_get_pos() when platform compatibility is
+ * disabled. This is to enable SPDs using the older platform API to continue
+ * to work.
+ */
+unsigned int platform_core_pos_helper(unsigned long mpidr)
+{
+ int idx = plat_core_pos_by_mpidr(mpidr);
+ assert(idx >= 0);
+ return idx;
+}
+
+#if SDEI_SUPPORT
+/*
+ * Function that handles spurious SDEI interrupts while events are masked.
+ */
+void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr)
+{
+ WARN("Spurious SDEI interrupt %u on masked PE %" PRIx64 "\n", intr, mpidr);
+}
+
+/*
+ * Default Function to validate SDEI entry point, which returns success.
+ * Platforms may override this with their own validation mechanism.
+ */
+int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode)
+{
+ return 0;
+}
+#endif
+
+const char *get_el_str(unsigned int el)
+{
+ if (el == MODE_EL3) {
+ return "EL3";
+ } else if (el == MODE_EL2) {
+ return "EL2";
+ }
+ return "EL1";
+}
+
+/* RAS functions common to AArch64 ARM platforms */
+void plat_default_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
+ void *handle, uint64_t flags)
+{
+#if RAS_EXTENSION
+ /* Call RAS EA handler */
+ int handled = ras_ea_handler(ea_reason, syndrome, cookie, handle, flags);
+ if (handled != 0)
+ return;
+#endif
+ unsigned int level = (unsigned int)GET_EL(read_spsr_el3());
+
+ ERROR_NL();
+ ERROR("Unhandled External Abort received on 0x%lx from %s\n",
+ read_mpidr_el1(), get_el_str(level));
+ ERROR("exception reason=%u syndrome=0x%" PRIx64 "\n", ea_reason, syndrome);
+#if HANDLE_EA_EL3_FIRST_NS
+ /* Skip backtrace for lower EL */
+ if (level != MODE_EL3) {
+ console_flush();
+ do_panic();
+ }
+#endif
+ panic();
+}
diff --git a/plat/common/aarch64/plat_ehf.c b/plat/common/aarch64/plat_ehf.c
new file mode 100644
index 0000000..da76884
--- /dev/null
+++ b/plat/common/aarch64/plat_ehf.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, Broadcom
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl31/ehf.h>
+
+#include <platform_def.h>
+
+/*
+ * Enumeration of priority levels on ARM platforms.
+ */
+ehf_pri_desc_t plat_exceptions[] = {
+#if RAS_EXTENSION
+ /* RAS Priority */
+ EHF_PRI_DESC(PLAT_PRI_BITS, PLAT_RAS_PRI),
+#endif
+
+#if SDEI_SUPPORT
+ /* Critical priority SDEI */
+ EHF_PRI_DESC(PLAT_PRI_BITS, PLAT_SDEI_CRITICAL_PRI),
+
+ /* Normal priority SDEI */
+ EHF_PRI_DESC(PLAT_PRI_BITS, PLAT_SDEI_NORMAL_PRI),
+#endif
+#if SPM_MM
+ EHF_PRI_DESC(PLAT_PRI_BITS, PLAT_SP_PRI),
+#endif
+ /* Plaform specific exceptions description */
+#ifdef PLAT_EHF_DESC
+ PLAT_EHF_DESC,
+#endif
+};
+
+/* Plug in ARM exceptions to Exception Handling Framework. */
+EHF_REGISTER_PRIORITIES(plat_exceptions, ARRAY_SIZE(plat_exceptions), PLAT_PRI_BITS);
diff --git a/plat/common/aarch64/platform_helpers.S b/plat/common/aarch64/platform_helpers.S
new file mode 100644
index 0000000..bc650c9
--- /dev/null
+++ b/plat/common/aarch64/platform_helpers.S
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <drivers/console.h>
+#include <platform_def.h>
+
+ .weak plat_report_exception
+ .weak plat_reset_handler
+ .weak plat_disable_acp
+ .weak bl1_plat_prepare_exit
+ .weak plat_panic_handler
+ .weak bl31_plat_enable_mmu
+ .weak bl32_plat_enable_mmu
+
+ .weak plat_handle_uncontainable_ea
+ .weak plat_handle_double_fault
+ .weak plat_handle_el3_ea
+
+#define MPIDR_RES_BIT_MASK 0xff000000
+
+ /* -----------------------------------------------------
+ * Placeholder function which should be redefined by
+ * each platform.
+ * -----------------------------------------------------
+ */
+func plat_report_exception
+ ret
+endfunc plat_report_exception
+
+ /* -----------------------------------------------------
+ * Placeholder function which should be redefined by
+ * each platform. This function should preserve x19 - x29.
+ * -----------------------------------------------------
+ */
+func plat_reset_handler
+ ret
+endfunc plat_reset_handler
+
+ /* -----------------------------------------------------
+ * Placeholder function which should be redefined by
+ * each platform. This function is allowed to use
+ * registers x0 - x17.
+ * -----------------------------------------------------
+ */
+func plat_disable_acp
+ ret
+endfunc plat_disable_acp
+
+ /* -----------------------------------------------------
+ * void bl1_plat_prepare_exit(entry_point_info_t *ep_info);
+ * Called before exiting BL1. Default: do nothing
+ * -----------------------------------------------------
+ */
+func bl1_plat_prepare_exit
+ ret
+endfunc bl1_plat_prepare_exit
+
+ /* -----------------------------------------------------
+ * void plat_panic_handler(void) __dead2;
+ * Endless loop by default.
+ * -----------------------------------------------------
+ */
+func plat_panic_handler
+ wfi
+ b plat_panic_handler
+endfunc plat_panic_handler
+
+ /* -----------------------------------------------------
+ * void bl31_plat_enable_mmu(uint32_t flags);
+ *
+ * Enable MMU in BL31.
+ * -----------------------------------------------------
+ */
+func bl31_plat_enable_mmu
+ b enable_mmu_direct_el3
+endfunc bl31_plat_enable_mmu
+
+ /* -----------------------------------------------------
+ * void bl32_plat_enable_mmu(uint32_t flags);
+ *
+ * Enable MMU in BL32.
+ * -----------------------------------------------------
+ */
+func bl32_plat_enable_mmu
+ b enable_mmu_direct_el1
+endfunc bl32_plat_enable_mmu
+
+
+ /* -----------------------------------------------------
+ * Platform handler for Uncontainable External Abort.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ * -----------------------------------------------------
+ */
+func plat_handle_uncontainable_ea
+ b report_unhandled_exception
+endfunc plat_handle_uncontainable_ea
+
+ /* -----------------------------------------------------
+ * Platform handler for Double Fault.
+ *
+ * x0: EA reason
+ * x1: EA syndrome
+ * -----------------------------------------------------
+ */
+func plat_handle_double_fault
+ b report_unhandled_exception
+endfunc plat_handle_double_fault
+
+ /* -----------------------------------------------------
+ * Platform handler for EL3 External Abort.
+ * -----------------------------------------------------
+ */
+func plat_handle_el3_ea
+ b report_unhandled_exception
+endfunc plat_handle_el3_ea
diff --git a/plat/common/aarch64/platform_mp_stack.S b/plat/common/aarch64/platform_mp_stack.S
new file mode 100644
index 0000000..c0668ea
--- /dev/null
+++ b/plat/common/aarch64/platform_mp_stack.S
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <platform_def.h>
+
+ .local platform_normal_stacks
+ .weak plat_get_my_stack
+ .weak plat_set_my_stack
+
+ /* ---------------------------------------------------------------------
+ * When the compatibility layer is disabled, the platform APIs
+ * plat_get_my_stack() and plat_set_my_stack() are supported by the
+ * platform and the previous APIs platform_get_stack() and
+ * platform_set_stack() are defined in terms of new APIs making use of
+ * the fact that they are only ever invoked for the current CPU. This
+ * is to enable components of Trusted Firmware like SPDs using the old
+ * platform APIs to continue to work.
+ * --------------------------------------------------------------------
+ */
+
+ /* -----------------------------------------------------
+ * uintptr_t plat_get_my_stack ()
+ *
+ * For the current CPU, this function returns the stack
+ * pointer for a stack allocated in device memory.
+ * -----------------------------------------------------
+ */
+func plat_get_my_stack
+ mov x10, x30
+ get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ ret x10
+endfunc plat_get_my_stack
+
+ /* -----------------------------------------------------
+ * void plat_set_my_stack ()
+ *
+ * For the current CPU, this function sets the stack
+ * pointer to a stack allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func plat_set_my_stack
+ mov x9, x30
+ bl plat_get_my_stack
+ mov sp, x0
+ ret x9
+endfunc plat_set_my_stack
+
+ /* -----------------------------------------------------
+ * Per-CPU stacks in normal memory. Each CPU gets a
+ * stack of PLATFORM_STACK_SIZE bytes.
+ * -----------------------------------------------------
+ */
+declare_stack platform_normal_stacks, tzfw_normal_stacks, \
+ PLATFORM_STACK_SIZE, PLATFORM_CORE_COUNT, \
+ CACHE_WRITEBACK_GRANULE
diff --git a/plat/common/aarch64/platform_up_stack.S b/plat/common/aarch64/platform_up_stack.S
new file mode 100644
index 0000000..c6e5e2d
--- /dev/null
+++ b/plat/common/aarch64/platform_up_stack.S
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+
+ .local platform_normal_stacks
+ .weak plat_set_my_stack
+ .weak plat_get_my_stack
+
+ /* -----------------------------------------------------
+ * uintptr_t plat_get_my_stack ()
+ *
+ * For cold-boot BL images, only the primary CPU needs a
+ * stack. This function returns the stack pointer for a
+ * stack allocated in device memory.
+ * -----------------------------------------------------
+ */
+func plat_get_my_stack
+ get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ ret
+endfunc plat_get_my_stack
+
+ /* -----------------------------------------------------
+ * void plat_set_my_stack ()
+ *
+ * For cold-boot BL images, only the primary CPU needs a
+ * stack. This function sets the stack pointer to a stack
+ * allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func plat_set_my_stack
+ get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ mov sp, x0
+ ret
+endfunc plat_set_my_stack
+
+ /* -----------------------------------------------------
+ * Single cpu stack in normal memory.
+ * Used for C code during boot, PLATFORM_STACK_SIZE bytes
+ * are allocated
+ * -----------------------------------------------------
+ */
+declare_stack platform_normal_stacks, tzfw_normal_stacks, \
+ PLATFORM_STACK_SIZE, 1, CACHE_WRITEBACK_GRANULE
diff --git a/plat/common/plat_bl1_common.c b/plat/common/plat_bl1_common.c
new file mode 100644
index 0000000..bcf9f89
--- /dev/null
+++ b/plat/common/plat_bl1_common.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015-2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+
+#include <platform_def.h>
+
+#include <arch_helpers.h>
+#include <bl1/bl1.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <plat/common/platform.h>
+
+/*
+ * The following platform functions are weakly defined. They
+ * are default implementations that allow BL1 to compile in
+ * absence of real definitions. The Platforms may override
+ * with more complex definitions.
+ */
+#pragma weak bl1_plat_get_next_image_id
+#pragma weak bl1_plat_set_ep_info
+#pragma weak bl1_plat_get_image_desc
+#pragma weak bl1_plat_fwu_done
+#pragma weak bl1_plat_handle_pre_image_load
+#pragma weak bl1_plat_handle_post_image_load
+
+unsigned int bl1_plat_get_next_image_id(void)
+{
+ /* BL2 load will be done by default. */
+ return BL2_IMAGE_ID;
+}
+
+void bl1_plat_set_ep_info(unsigned int image_id,
+ struct entry_point_info *ep_info)
+{
+
+}
+
+int bl1_plat_handle_pre_image_load(unsigned int image_id)
+{
+ return 0;
+}
+
+/*
+ * Following is the default definition that always
+ * returns BL2 image details.
+ */
+struct image_desc *bl1_plat_get_image_desc(unsigned int image_id)
+{
+ static image_desc_t bl2_img_desc = BL2_IMAGE_DESC;
+ return &bl2_img_desc;
+}
+
+__dead2 void bl1_plat_fwu_done(void *client_cookie, void *reserved)
+{
+ while (true)
+ wfi();
+}
+
+/*
+ * The Platforms must override with real definition.
+ */
+#pragma weak bl1_plat_mem_check
+
+int bl1_plat_mem_check(uintptr_t mem_base, unsigned int mem_size,
+ unsigned int flags)
+{
+ assert(0);
+ return -ENOMEM;
+}
+
+/*
+ * Default implementation for bl1_plat_handle_post_image_load(). This function
+ * populates the default arguments to BL2. The BL2 memory layout structure
+ * is allocated and the calculated layout is populated in arg1 to BL2.
+ */
+int bl1_plat_handle_post_image_load(unsigned int image_id)
+{
+ meminfo_t *bl2_secram_layout;
+ meminfo_t *bl1_secram_layout;
+ image_desc_t *image_desc;
+ entry_point_info_t *ep_info;
+
+ if (image_id != BL2_IMAGE_ID)
+ return 0;
+
+ /* Get the image descriptor */
+ image_desc = bl1_plat_get_image_desc(BL2_IMAGE_ID);
+ assert(image_desc != NULL);
+
+ /* Get the entry point info */
+ ep_info = &image_desc->ep_info;
+
+ /* Find out how much free trusted ram remains after BL1 load */
+ bl1_secram_layout = bl1_plat_sec_mem_layout();
+
+ /*
+ * Create a new layout of memory for BL2 as seen by BL1 i.e.
+ * tell it the amount of total and free memory available.
+ * This layout is created at the first free address visible
+ * to BL2. BL2 will read the memory layout before using its
+ * memory for other purposes.
+ */
+ bl2_secram_layout = (meminfo_t *) bl1_secram_layout->total_base;
+
+ bl1_calc_bl2_mem_layout(bl1_secram_layout, bl2_secram_layout);
+
+ ep_info->args.arg1 = (uintptr_t)bl2_secram_layout;
+
+ VERBOSE("BL1: BL2 memory layout address = %p\n",
+ (void *) bl2_secram_layout);
+ return 0;
+}
diff --git a/plat/common/plat_bl_common.c b/plat/common/plat_bl_common.c
new file mode 100644
index 0000000..89b77ba
--- /dev/null
+++ b/plat/common/plat_bl_common.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <lib/xlat_tables/xlat_tables_compat.h>
+#include <plat/common/platform.h>
+#include <services/arm_arch_svc.h>
+#include <smccc_helpers.h>
+#include <tools_share/firmware_encrypted.h>
+
+/*
+ * The following platform functions are weakly defined. The Platforms
+ * may redefine with strong definition.
+ */
+#pragma weak bl2_el3_plat_prepare_exit
+#pragma weak plat_error_handler
+#pragma weak bl2_plat_preload_setup
+#pragma weak bl2_plat_handle_pre_image_load
+#pragma weak bl2_plat_handle_post_image_load
+#pragma weak plat_try_next_boot_source
+#pragma weak plat_get_enc_key_info
+#pragma weak plat_is_smccc_feature_available
+#pragma weak plat_get_soc_version
+#pragma weak plat_get_soc_revision
+
+int32_t plat_get_soc_version(void)
+{
+ return SMC_ARCH_CALL_NOT_SUPPORTED;
+}
+
+int32_t plat_get_soc_revision(void)
+{
+ return SMC_ARCH_CALL_NOT_SUPPORTED;
+}
+
+int32_t plat_is_smccc_feature_available(u_register_t fid __unused)
+{
+ return SMC_ARCH_CALL_NOT_SUPPORTED;
+}
+
+void bl2_el3_plat_prepare_exit(void)
+{
+}
+
+void __dead2 plat_error_handler(int err)
+{
+ while (1)
+ wfi();
+}
+
+void bl2_plat_preload_setup(void)
+{
+}
+
+int bl2_plat_handle_pre_image_load(unsigned int image_id)
+{
+ return 0;
+}
+
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+ return 0;
+}
+
+int plat_try_next_boot_source(void)
+{
+ return 0;
+}
+
+/*
+ * Weak implementation to provide dummy decryption key only for test purposes,
+ * platforms must override this API for any real world firmware encryption
+ * use-case.
+ */
+int plat_get_enc_key_info(enum fw_enc_status_t fw_enc_status, uint8_t *key,
+ size_t *key_len, unsigned int *flags,
+ const uint8_t *img_id, size_t img_id_len)
+{
+#define DUMMY_FIP_ENC_KEY { 0x12, 0x34, 0x56, 0x78, 0x90, 0xab, 0xcd, 0xef, \
+ 0x12, 0x34, 0x56, 0x78, 0x90, 0xab, 0xcd, 0xef, \
+ 0x12, 0x34, 0x56, 0x78, 0x90, 0xab, 0xcd, 0xef, \
+ 0x12, 0x34, 0x56, 0x78, 0x90, 0xab, 0xcd, 0xef }
+
+ const uint8_t dummy_key[] = DUMMY_FIP_ENC_KEY;
+
+ assert(*key_len >= sizeof(dummy_key));
+
+ *key_len = sizeof(dummy_key);
+ memcpy(key, dummy_key, *key_len);
+ *flags = 0;
+
+ return 0;
+}
+
+/*
+ * Set up the page tables for the generic and platform-specific memory regions.
+ * The size of the Trusted SRAM seen by the BL image must be specified as well
+ * as an array specifying the generic memory regions which can be;
+ * - Code section;
+ * - Read-only data section;
+ * - Init code section, if applicable
+ * - Coherent memory region, if applicable.
+ */
+
+void __init setup_page_tables(const mmap_region_t *bl_regions,
+ const mmap_region_t *plat_regions)
+{
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+ const mmap_region_t *regions = bl_regions;
+
+ while (regions->size != 0U) {
+ VERBOSE("Region: 0x%lx - 0x%lx has attributes 0x%x\n",
+ regions->base_va,
+ regions->base_va + regions->size,
+ regions->attr);
+ regions++;
+ }
+#endif
+ /*
+ * Map the Trusted SRAM with appropriate memory attributes.
+ * Subsequent mappings will adjust the attributes for specific regions.
+ */
+ mmap_add(bl_regions);
+
+ /* Now (re-)map the platform-specific memory regions */
+ mmap_add(plat_regions);
+
+ /* Create the page tables to reflect the above mappings */
+ init_xlat_tables();
+}
diff --git a/plat/common/plat_gicv2.c b/plat/common/plat_gicv2.c
new file mode 100644
index 0000000..0f988dc
--- /dev/null
+++ b/plat/common/plat_gicv2.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Portions copyright (c) 2021-2022, ProvenRun S.A.S. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <bl31/interrupt_mgmt.h>
+#include <drivers/arm/gic_common.h>
+#include <drivers/arm/gicv2.h>
+#include <plat/common/platform.h>
+
+/*
+ * The following platform GIC functions are weakly defined. They
+ * provide typical implementations that may be re-used by multiple
+ * platforms but may also be overridden by a platform if required.
+ */
+#pragma weak plat_ic_get_pending_interrupt_id
+#pragma weak plat_ic_get_pending_interrupt_type
+#pragma weak plat_ic_acknowledge_interrupt
+#pragma weak plat_ic_get_interrupt_type
+#pragma weak plat_ic_end_of_interrupt
+#pragma weak plat_interrupt_type_to_line
+
+#pragma weak plat_ic_get_running_priority
+#pragma weak plat_ic_is_spi
+#pragma weak plat_ic_is_ppi
+#pragma weak plat_ic_is_sgi
+#pragma weak plat_ic_get_interrupt_active
+#pragma weak plat_ic_enable_interrupt
+#pragma weak plat_ic_disable_interrupt
+#pragma weak plat_ic_set_interrupt_priority
+#pragma weak plat_ic_set_interrupt_type
+#pragma weak plat_ic_raise_el3_sgi
+#pragma weak plat_ic_raise_ns_sgi
+#pragma weak plat_ic_raise_s_el1_sgi
+#pragma weak plat_ic_set_spi_routing
+
+/*
+ * This function returns the highest priority pending interrupt at
+ * the Interrupt controller
+ */
+uint32_t plat_ic_get_pending_interrupt_id(void)
+{
+ unsigned int id;
+
+ id = gicv2_get_pending_interrupt_id();
+ if (id == GIC_SPURIOUS_INTERRUPT)
+ return INTR_ID_UNAVAILABLE;
+
+ return id;
+}
+
+/*
+ * This function returns the type of the highest priority pending interrupt
+ * at the Interrupt controller. In the case of GICv2, the Highest Priority
+ * Pending interrupt register (`GICC_HPPIR`) is read to determine the id of
+ * the pending interrupt. The type of interrupt depends upon the id value
+ * as follows.
+ * 1. id < PENDING_G1_INTID (1022) is reported as a S-EL1 interrupt
+ * 2. id = PENDING_G1_INTID (1022) is reported as a Non-secure interrupt.
+ * 3. id = GIC_SPURIOUS_INTERRUPT (1023) is reported as an invalid interrupt
+ * type.
+ */
+uint32_t plat_ic_get_pending_interrupt_type(void)
+{
+ unsigned int id;
+
+ id = gicv2_get_pending_interrupt_type();
+
+ /* Assume that all secure interrupts are S-EL1 interrupts */
+ if (id < PENDING_G1_INTID) {
+#if GICV2_G0_FOR_EL3
+ return INTR_TYPE_EL3;
+#else
+ return INTR_TYPE_S_EL1;
+#endif
+ }
+
+ if (id == GIC_SPURIOUS_INTERRUPT)
+ return INTR_TYPE_INVAL;
+
+ return INTR_TYPE_NS;
+}
+
+/*
+ * This function returns the highest priority pending interrupt at
+ * the Interrupt controller and indicates to the Interrupt controller
+ * that the interrupt processing has started.
+ */
+uint32_t plat_ic_acknowledge_interrupt(void)
+{
+ return gicv2_acknowledge_interrupt();
+}
+
+/*
+ * This function returns the type of the interrupt `id`, depending on how
+ * the interrupt has been configured in the interrupt controller
+ */
+uint32_t plat_ic_get_interrupt_type(uint32_t id)
+{
+ unsigned int type;
+
+ type = gicv2_get_interrupt_group(id);
+
+ /* Assume that all secure interrupts are S-EL1 interrupts */
+ return (type == GICV2_INTR_GROUP1) ? INTR_TYPE_NS :
+#if GICV2_G0_FOR_EL3
+ INTR_TYPE_EL3;
+#else
+ INTR_TYPE_S_EL1;
+#endif
+}
+
+/*
+ * This functions is used to indicate to the interrupt controller that
+ * the processing of the interrupt corresponding to the `id` has
+ * finished.
+ */
+void plat_ic_end_of_interrupt(uint32_t id)
+{
+ gicv2_end_of_interrupt(id);
+}
+
+/*
+ * An ARM processor signals interrupt exceptions through the IRQ and FIQ pins.
+ * The interrupt controller knows which pin/line it uses to signal a type of
+ * interrupt. It lets the interrupt management framework determine
+ * for a type of interrupt and security state, which line should be used in the
+ * SCR_EL3 to control its routing to EL3. The interrupt line is represented
+ * as the bit position of the IRQ or FIQ bit in the SCR_EL3.
+ */
+uint32_t plat_interrupt_type_to_line(uint32_t type,
+ uint32_t security_state)
+{
+ assert((type == INTR_TYPE_S_EL1) || (type == INTR_TYPE_EL3) ||
+ (type == INTR_TYPE_NS));
+
+ assert(sec_state_is_valid(security_state));
+
+ /* Non-secure interrupts are signaled on the IRQ line always */
+ if (type == INTR_TYPE_NS)
+ return __builtin_ctz(SCR_IRQ_BIT);
+
+ /*
+ * Secure interrupts are signaled using the IRQ line if the FIQ is
+ * not enabled else they are signaled using the FIQ line.
+ */
+ return ((gicv2_is_fiq_enabled() != 0U) ? __builtin_ctz(SCR_FIQ_BIT) :
+ __builtin_ctz(SCR_IRQ_BIT));
+}
+
+unsigned int plat_ic_get_running_priority(void)
+{
+ return gicv2_get_running_priority();
+}
+
+int plat_ic_is_spi(unsigned int id)
+{
+ return (id >= MIN_SPI_ID) && (id <= MAX_SPI_ID);
+}
+
+int plat_ic_is_ppi(unsigned int id)
+{
+ return (id >= MIN_PPI_ID) && (id < MIN_SPI_ID);
+}
+
+int plat_ic_is_sgi(unsigned int id)
+{
+ return (id >= MIN_SGI_ID) && (id < MIN_PPI_ID);
+}
+
+unsigned int plat_ic_get_interrupt_active(unsigned int id)
+{
+ return gicv2_get_interrupt_active(id);
+}
+
+void plat_ic_enable_interrupt(unsigned int id)
+{
+ gicv2_enable_interrupt(id);
+}
+
+void plat_ic_disable_interrupt(unsigned int id)
+{
+ gicv2_disable_interrupt(id);
+}
+
+void plat_ic_set_interrupt_priority(unsigned int id, unsigned int priority)
+{
+ gicv2_set_interrupt_priority(id, priority);
+}
+
+int plat_ic_has_interrupt_type(unsigned int type)
+{
+ int has_interrupt_type = 0;
+
+ switch (type) {
+#if GICV2_G0_FOR_EL3
+ case INTR_TYPE_EL3:
+#else
+ case INTR_TYPE_S_EL1:
+#endif
+ case INTR_TYPE_NS:
+ has_interrupt_type = 1;
+ break;
+ default:
+ /* Do nothing in default case */
+ break;
+ }
+
+ return has_interrupt_type;
+}
+
+void plat_ic_set_interrupt_type(unsigned int id, unsigned int type)
+{
+ unsigned int gicv2_type = 0U;
+
+ /* Map canonical interrupt type to GICv2 type */
+ switch (type) {
+#if GICV2_G0_FOR_EL3
+ case INTR_TYPE_EL3:
+#else
+ case INTR_TYPE_S_EL1:
+#endif
+ gicv2_type = GICV2_INTR_GROUP0;
+ break;
+ case INTR_TYPE_NS:
+ gicv2_type = GICV2_INTR_GROUP1;
+ break;
+ default:
+ assert(0); /* Unreachable */
+ break;
+ }
+
+ gicv2_set_interrupt_type(id, gicv2_type);
+}
+
+void plat_ic_raise_el3_sgi(int sgi_num, u_register_t target)
+{
+#if GICV2_G0_FOR_EL3
+ int id;
+
+ /* Target must be a valid MPIDR in the system */
+ id = plat_core_pos_by_mpidr(target);
+ assert(id >= 0);
+
+ /* Verify that this is a secure SGI */
+ assert(plat_ic_get_interrupt_type(sgi_num) == INTR_TYPE_EL3);
+
+ gicv2_raise_sgi(sgi_num, false, id);
+#else
+ assert(false);
+#endif
+}
+
+void plat_ic_raise_ns_sgi(int sgi_num, u_register_t target)
+{
+ int id;
+
+ /* Target must be a valid MPIDR in the system */
+ id = plat_core_pos_by_mpidr(target);
+ assert(id >= 0);
+
+ /* Verify that this is a non-secure SGI */
+ assert(plat_ic_get_interrupt_type(sgi_num) == INTR_TYPE_NS);
+
+ gicv2_raise_sgi(sgi_num, true, id);
+}
+
+void plat_ic_raise_s_el1_sgi(int sgi_num, u_register_t target)
+{
+#if GICV2_G0_FOR_EL3
+ assert(false);
+#else
+ int id;
+
+ /* Target must be a valid MPIDR in the system */
+ id = plat_core_pos_by_mpidr(target);
+ assert(id >= 0);
+
+ /* Verify that this is a secure EL1 SGI */
+ assert(plat_ic_get_interrupt_type(sgi_num) == INTR_TYPE_S_EL1);
+
+ gicv2_raise_sgi(sgi_num, false, id);
+#endif
+}
+
+void plat_ic_set_spi_routing(unsigned int id, unsigned int routing_mode,
+ u_register_t mpidr)
+{
+ int proc_num = 0;
+
+ switch (routing_mode) {
+ case INTR_ROUTING_MODE_PE:
+ proc_num = plat_core_pos_by_mpidr(mpidr);
+ assert(proc_num >= 0);
+ break;
+ case INTR_ROUTING_MODE_ANY:
+ /* Bit mask selecting all 8 CPUs as candidates */
+ proc_num = -1;
+ break;
+ default:
+ assert(0); /* Unreachable */
+ break;
+ }
+
+ gicv2_set_spi_routing(id, proc_num);
+}
+
+void plat_ic_set_interrupt_pending(unsigned int id)
+{
+ gicv2_set_interrupt_pending(id);
+}
+
+void plat_ic_clear_interrupt_pending(unsigned int id)
+{
+ gicv2_clear_interrupt_pending(id);
+}
+
+unsigned int plat_ic_set_priority_mask(unsigned int mask)
+{
+ return gicv2_set_pmr(mask);
+}
+
+unsigned int plat_ic_get_interrupt_id(unsigned int raw)
+{
+ unsigned int id = (raw & INT_ID_MASK);
+
+ if (id == GIC_SPURIOUS_INTERRUPT)
+ id = INTR_ID_UNAVAILABLE;
+
+ return id;
+}
diff --git a/plat/common/plat_gicv3.c b/plat/common/plat_gicv3.c
new file mode 100644
index 0000000..e1420bb
--- /dev/null
+++ b/plat/common/plat_gicv3.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Portions copyright (c) 2021-2022, ProvenRun S.A.S. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <bl31/interrupt_mgmt.h>
+#include <drivers/arm/gic_common.h>
+#include <drivers/arm/gicv3.h>
+#include <lib/cassert.h>
+#include <plat/common/platform.h>
+
+#ifdef IMAGE_BL31
+
+/*
+ * The following platform GIC functions are weakly defined. They
+ * provide typical implementations that may be re-used by multiple
+ * platforms but may also be overridden by a platform if required.
+ */
+#pragma weak plat_ic_get_pending_interrupt_id
+#pragma weak plat_ic_get_pending_interrupt_type
+#pragma weak plat_ic_acknowledge_interrupt
+#pragma weak plat_ic_get_interrupt_type
+#pragma weak plat_ic_end_of_interrupt
+#pragma weak plat_interrupt_type_to_line
+
+#pragma weak plat_ic_get_running_priority
+#pragma weak plat_ic_is_spi
+#pragma weak plat_ic_is_ppi
+#pragma weak plat_ic_is_sgi
+#pragma weak plat_ic_get_interrupt_active
+#pragma weak plat_ic_enable_interrupt
+#pragma weak plat_ic_disable_interrupt
+#pragma weak plat_ic_set_interrupt_priority
+#pragma weak plat_ic_set_interrupt_type
+#pragma weak plat_ic_raise_el3_sgi
+#pragma weak plat_ic_raise_ns_sgi
+#pragma weak plat_ic_raise_s_el1_sgi
+#pragma weak plat_ic_set_spi_routing
+#pragma weak plat_ic_set_interrupt_pending
+#pragma weak plat_ic_clear_interrupt_pending
+
+CASSERT((INTR_TYPE_S_EL1 == INTR_GROUP1S) &&
+ (INTR_TYPE_NS == INTR_GROUP1NS) &&
+ (INTR_TYPE_EL3 == INTR_GROUP0), assert_interrupt_type_mismatch);
+
+/*
+ * This function returns the highest priority pending interrupt at
+ * the Interrupt controller
+ */
+uint32_t plat_ic_get_pending_interrupt_id(void)
+{
+ unsigned int irqnr;
+
+ assert(IS_IN_EL3());
+ irqnr = gicv3_get_pending_interrupt_id();
+ return gicv3_is_intr_id_special_identifier(irqnr) ?
+ INTR_ID_UNAVAILABLE : irqnr;
+}
+
+/*
+ * This function returns the type of the highest priority pending interrupt
+ * at the Interrupt controller. In the case of GICv3, the Highest Priority
+ * Pending interrupt system register (`ICC_HPPIR0_EL1`) is read to determine
+ * the id of the pending interrupt. The type of interrupt depends upon the
+ * id value as follows.
+ * 1. id = PENDING_G1S_INTID (1020) is reported as a S-EL1 interrupt
+ * 2. id = PENDING_G1NS_INTID (1021) is reported as a Non-secure interrupt.
+ * 3. id = GIC_SPURIOUS_INTERRUPT (1023) is reported as an invalid interrupt
+ * type.
+ * 4. All other interrupt id's are reported as EL3 interrupt.
+ */
+uint32_t plat_ic_get_pending_interrupt_type(void)
+{
+ unsigned int irqnr;
+ uint32_t type;
+
+ assert(IS_IN_EL3());
+ irqnr = gicv3_get_pending_interrupt_type();
+
+ switch (irqnr) {
+ case PENDING_G1S_INTID:
+ type = INTR_TYPE_S_EL1;
+ break;
+ case PENDING_G1NS_INTID:
+ type = INTR_TYPE_NS;
+ break;
+ case GIC_SPURIOUS_INTERRUPT:
+ type = INTR_TYPE_INVAL;
+ break;
+ default:
+ type = INTR_TYPE_EL3;
+ break;
+ }
+
+ return type;
+}
+
+/*
+ * This function returns the highest priority pending interrupt at
+ * the Interrupt controller and indicates to the Interrupt controller
+ * that the interrupt processing has started.
+ */
+uint32_t plat_ic_acknowledge_interrupt(void)
+{
+ assert(IS_IN_EL3());
+ return gicv3_acknowledge_interrupt();
+}
+
+/*
+ * This function returns the type of the interrupt `id`, depending on how
+ * the interrupt has been configured in the interrupt controller
+ */
+uint32_t plat_ic_get_interrupt_type(uint32_t id)
+{
+ assert(IS_IN_EL3());
+ return gicv3_get_interrupt_type(id, plat_my_core_pos());
+}
+
+/*
+ * This functions is used to indicate to the interrupt controller that
+ * the processing of the interrupt corresponding to the `id` has
+ * finished.
+ */
+void plat_ic_end_of_interrupt(uint32_t id)
+{
+ assert(IS_IN_EL3());
+ gicv3_end_of_interrupt(id);
+}
+
+/*
+ * An ARM processor signals interrupt exceptions through the IRQ and FIQ pins.
+ * The interrupt controller knows which pin/line it uses to signal a type of
+ * interrupt. It lets the interrupt management framework determine for a type of
+ * interrupt and security state, which line should be used in the SCR_EL3 to
+ * control its routing to EL3. The interrupt line is represented as the bit
+ * position of the IRQ or FIQ bit in the SCR_EL3.
+ */
+uint32_t plat_interrupt_type_to_line(uint32_t type,
+ uint32_t security_state)
+{
+ assert((type == INTR_TYPE_S_EL1) ||
+ (type == INTR_TYPE_EL3) ||
+ (type == INTR_TYPE_NS));
+
+ assert(sec_state_is_valid(security_state));
+ assert(IS_IN_EL3());
+
+ switch (type) {
+ case INTR_TYPE_S_EL1:
+ /*
+ * The S-EL1 interrupts are signaled as IRQ in S-EL0/1 contexts
+ * and as FIQ in the NS-EL0/1/2 contexts
+ */
+ if (security_state == SECURE)
+ return __builtin_ctz(SCR_IRQ_BIT);
+ else
+ return __builtin_ctz(SCR_FIQ_BIT);
+ assert(0); /* Unreachable */
+ case INTR_TYPE_NS:
+ /*
+ * The Non secure interrupts will be signaled as FIQ in S-EL0/1
+ * contexts and as IRQ in the NS-EL0/1/2 contexts.
+ */
+ if (security_state == SECURE)
+ return __builtin_ctz(SCR_FIQ_BIT);
+ else
+ return __builtin_ctz(SCR_IRQ_BIT);
+ assert(0); /* Unreachable */
+ case INTR_TYPE_EL3:
+ /*
+ * The EL3 interrupts are signaled as FIQ in both S-EL0/1 and
+ * NS-EL0/1/2 contexts
+ */
+ return __builtin_ctz(SCR_FIQ_BIT);
+ default:
+ panic();
+ }
+}
+
+unsigned int plat_ic_get_running_priority(void)
+{
+ return gicv3_get_running_priority();
+}
+
+int plat_ic_is_spi(unsigned int id)
+{
+ return (id >= MIN_SPI_ID) && (id <= MAX_SPI_ID);
+}
+
+int plat_ic_is_ppi(unsigned int id)
+{
+ return (id >= MIN_PPI_ID) && (id < MIN_SPI_ID);
+}
+
+int plat_ic_is_sgi(unsigned int id)
+{
+ return (id >= MIN_SGI_ID) && (id < MIN_PPI_ID);
+}
+
+unsigned int plat_ic_get_interrupt_active(unsigned int id)
+{
+ return gicv3_get_interrupt_active(id, plat_my_core_pos());
+}
+
+void plat_ic_enable_interrupt(unsigned int id)
+{
+ gicv3_enable_interrupt(id, plat_my_core_pos());
+}
+
+void plat_ic_disable_interrupt(unsigned int id)
+{
+ gicv3_disable_interrupt(id, plat_my_core_pos());
+}
+
+void plat_ic_set_interrupt_priority(unsigned int id, unsigned int priority)
+{
+ gicv3_set_interrupt_priority(id, plat_my_core_pos(), priority);
+}
+
+int plat_ic_has_interrupt_type(unsigned int type)
+{
+ assert((type == INTR_TYPE_EL3) || (type == INTR_TYPE_S_EL1) ||
+ (type == INTR_TYPE_NS));
+ return 1;
+}
+
+void plat_ic_set_interrupt_type(unsigned int id, unsigned int type)
+{
+ gicv3_set_interrupt_type(id, plat_my_core_pos(), type);
+}
+
+void plat_ic_raise_el3_sgi(int sgi_num, u_register_t target)
+{
+ /* Target must be a valid MPIDR in the system */
+ assert(plat_core_pos_by_mpidr(target) >= 0);
+
+ /* Verify that this is a secure EL3 SGI */
+ assert(plat_ic_get_interrupt_type((unsigned int)sgi_num) ==
+ INTR_TYPE_EL3);
+
+ gicv3_raise_sgi((unsigned int)sgi_num, GICV3_G0, target);
+}
+
+void plat_ic_raise_ns_sgi(int sgi_num, u_register_t target)
+{
+ /* Target must be a valid MPIDR in the system */
+ assert(plat_core_pos_by_mpidr(target) >= 0);
+
+ /* Verify that this is a non-secure SGI */
+ assert(plat_ic_get_interrupt_type((unsigned int)sgi_num) ==
+ INTR_TYPE_NS);
+
+ gicv3_raise_sgi((unsigned int)sgi_num, GICV3_G1NS, target);
+}
+
+void plat_ic_raise_s_el1_sgi(int sgi_num, u_register_t target)
+{
+ /* Target must be a valid MPIDR in the system */
+ assert(plat_core_pos_by_mpidr(target) >= 0);
+
+ /* Verify that this is a secure EL1 SGI */
+ assert(plat_ic_get_interrupt_type((unsigned int)sgi_num) ==
+ INTR_TYPE_S_EL1);
+
+ gicv3_raise_sgi((unsigned int)sgi_num, GICV3_G1S, target);
+}
+
+void plat_ic_set_spi_routing(unsigned int id, unsigned int routing_mode,
+ u_register_t mpidr)
+{
+ unsigned int irm = 0;
+
+ switch (routing_mode) {
+ case INTR_ROUTING_MODE_PE:
+ assert(plat_core_pos_by_mpidr(mpidr) >= 0);
+ irm = GICV3_IRM_PE;
+ break;
+ case INTR_ROUTING_MODE_ANY:
+ irm = GICV3_IRM_ANY;
+ break;
+ default:
+ assert(0); /* Unreachable */
+ break;
+ }
+
+ gicv3_set_spi_routing(id, irm, mpidr);
+}
+
+void plat_ic_set_interrupt_pending(unsigned int id)
+{
+ /* Disallow setting SGIs pending */
+ assert(id >= MIN_PPI_ID);
+ gicv3_set_interrupt_pending(id, plat_my_core_pos());
+}
+
+void plat_ic_clear_interrupt_pending(unsigned int id)
+{
+ /* Disallow setting SGIs pending */
+ assert(id >= MIN_PPI_ID);
+ gicv3_clear_interrupt_pending(id, plat_my_core_pos());
+}
+
+unsigned int plat_ic_set_priority_mask(unsigned int mask)
+{
+ return gicv3_set_pmr(mask);
+}
+
+unsigned int plat_ic_get_interrupt_id(unsigned int raw)
+{
+ unsigned int id = raw & INT_ID_MASK;
+
+ return gicv3_is_intr_id_special_identifier(id) ?
+ INTR_ID_UNAVAILABLE : id;
+}
+#endif
+#ifdef IMAGE_BL32
+
+#pragma weak plat_ic_get_pending_interrupt_id
+#pragma weak plat_ic_acknowledge_interrupt
+#pragma weak plat_ic_end_of_interrupt
+
+/* In AArch32, the secure group1 interrupts are targeted to Secure PL1 */
+#ifndef __aarch64__
+#define IS_IN_EL1() IS_IN_SECURE()
+#endif
+
+/*
+ * This function returns the highest priority pending interrupt at
+ * the Interrupt controller
+ */
+uint32_t plat_ic_get_pending_interrupt_id(void)
+{
+ unsigned int irqnr;
+
+ assert(IS_IN_EL1());
+ irqnr = gicv3_get_pending_interrupt_id_sel1();
+ return (irqnr == GIC_SPURIOUS_INTERRUPT) ?
+ INTR_ID_UNAVAILABLE : irqnr;
+}
+
+/*
+ * This function returns the highest priority pending interrupt at
+ * the Interrupt controller and indicates to the Interrupt controller
+ * that the interrupt processing has started.
+ */
+uint32_t plat_ic_acknowledge_interrupt(void)
+{
+ assert(IS_IN_EL1());
+ return gicv3_acknowledge_interrupt_sel1();
+}
+
+/*
+ * This functions is used to indicate to the interrupt controller that
+ * the processing of the interrupt corresponding to the `id` has
+ * finished.
+ */
+void plat_ic_end_of_interrupt(uint32_t id)
+{
+ assert(IS_IN_EL1());
+ gicv3_end_of_interrupt_sel1(id);
+}
+#endif
diff --git a/plat/common/plat_log_common.c b/plat/common/plat_log_common.c
new file mode 100644
index 0000000..66b9758
--- /dev/null
+++ b/plat/common/plat_log_common.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/debug.h>
+#include <plat/common/platform.h>
+
+/* Allow platforms to override the log prefix string */
+#pragma weak plat_log_get_prefix
+
+static const char *plat_prefix_str[] = {
+ "ERROR: ", "NOTICE: ", "WARNING: ", "INFO: ", "VERBOSE: "};
+
+const char *plat_log_get_prefix(unsigned int log_level)
+{
+ unsigned int level;
+
+ if (log_level < LOG_LEVEL_ERROR) {
+ level = LOG_LEVEL_ERROR;
+ } else if (log_level > LOG_LEVEL_VERBOSE) {
+ level = LOG_LEVEL_VERBOSE;
+ } else {
+ level = log_level;
+ }
+
+ return plat_prefix_str[(level / 10U) - 1U];
+}
diff --git a/plat/common/plat_psci_common.c b/plat/common/plat_psci_common.c
new file mode 100644
index 0000000..c32e59f
--- /dev/null
+++ b/plat/common/plat_psci_common.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch.h>
+#include <lib/pmf/pmf.h>
+#include <lib/psci/psci.h>
+#include <lib/utils_def.h>
+#include <plat/common/platform.h>
+
+#if ENABLE_PSCI_STAT && ENABLE_PMF
+#pragma weak plat_psci_stat_accounting_start
+#pragma weak plat_psci_stat_accounting_stop
+#pragma weak plat_psci_stat_get_residency
+
+/* Maximum time-stamp value read from architectural counters */
+#ifdef __aarch64__
+#define MAX_TS UINT64_MAX
+#else
+#define MAX_TS UINT32_MAX
+#endif
+
+/* Following are used as ID's to capture time-stamp */
+#define PSCI_STAT_ID_ENTER_LOW_PWR 0
+#define PSCI_STAT_ID_EXIT_LOW_PWR 1
+#define PSCI_STAT_TOTAL_IDS 2
+
+PMF_DECLARE_CAPTURE_TIMESTAMP(psci_svc)
+PMF_DECLARE_GET_TIMESTAMP(psci_svc)
+PMF_REGISTER_SERVICE(psci_svc, PMF_PSCI_STAT_SVC_ID, PSCI_STAT_TOTAL_IDS,
+ PMF_STORE_ENABLE)
+
+/*
+ * This function calculates the stats residency in microseconds,
+ * taking in account the wrap around condition.
+ */
+static u_register_t calc_stat_residency(unsigned long long pwrupts,
+ unsigned long long pwrdnts)
+{
+ /* The divisor to use to convert raw timestamp into microseconds. */
+ u_register_t residency_div;
+ u_register_t res;
+
+ /*
+ * Calculate divisor so that it can be directly used to
+ * convert time-stamp into microseconds.
+ */
+ residency_div = read_cntfrq_el0() / MHZ_TICKS_PER_SEC;
+ assert(residency_div > 0U);
+
+ if (pwrupts < pwrdnts)
+ res = MAX_TS - pwrdnts + pwrupts;
+ else
+ res = pwrupts - pwrdnts;
+
+ return res / residency_div;
+}
+
+/*
+ * Capture timestamp before entering a low power state.
+ * Cache maintenance may be needed when reading these timestamps.
+ */
+void plat_psci_stat_accounting_start(
+ __unused const psci_power_state_t *state_info)
+{
+ assert(state_info != NULL);
+ PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
+ PMF_CACHE_MAINT);
+}
+
+/*
+ * Capture timestamp after exiting a low power state.
+ * Cache maintenance may be needed when reading these timestamps.
+ */
+void plat_psci_stat_accounting_stop(
+ __unused const psci_power_state_t *state_info)
+{
+ assert(state_info != NULL);
+ PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
+ PMF_CACHE_MAINT);
+}
+
+/*
+ * Calculate the residency for the given level and power state
+ * information.
+ */
+u_register_t plat_psci_stat_get_residency(unsigned int lvl,
+ const psci_power_state_t *state_info,
+ unsigned int last_cpu_idx)
+{
+ plat_local_state_t state;
+ unsigned long long pwrup_ts = 0, pwrdn_ts = 0;
+ unsigned int pmf_flags;
+
+ assert((lvl >= PSCI_CPU_PWR_LVL) && (lvl <= PLAT_MAX_PWR_LVL));
+ assert(state_info != NULL);
+ assert(last_cpu_idx <= PLATFORM_CORE_COUNT);
+
+ if (lvl == PSCI_CPU_PWR_LVL)
+ assert(last_cpu_idx == plat_my_core_pos());
+
+ /*
+ * If power down is requested, then timestamp capture will
+ * be with caches OFF. Hence we have to do cache maintenance
+ * when reading the timestamp.
+ */
+ state = state_info->pwr_domain_state[PSCI_CPU_PWR_LVL];
+ if (is_local_state_off(state) != 0) {
+ pmf_flags = PMF_CACHE_MAINT;
+ } else {
+ assert(is_local_state_retn(state) == 1);
+ pmf_flags = PMF_NO_CACHE_MAINT;
+ }
+
+ PMF_GET_TIMESTAMP_BY_INDEX(psci_svc,
+ PSCI_STAT_ID_ENTER_LOW_PWR,
+ last_cpu_idx,
+ pmf_flags,
+ pwrdn_ts);
+
+ PMF_GET_TIMESTAMP_BY_INDEX(psci_svc,
+ PSCI_STAT_ID_EXIT_LOW_PWR,
+ plat_my_core_pos(),
+ pmf_flags,
+ pwrup_ts);
+
+ return calc_stat_residency(pwrup_ts, pwrdn_ts);
+}
+#endif /* ENABLE_PSCI_STAT && ENABLE_PMF */
+
+/*
+ * The PSCI generic code uses this API to let the platform participate in state
+ * coordination during a power management operation. It compares the platform
+ * specific local power states requested by each cpu for a given power domain
+ * and returns the coordinated target power state that the domain should
+ * enter. A platform assigns a number to a local power state. This default
+ * implementation assumes that the platform assigns these numbers in order of
+ * increasing depth of the power state i.e. for two power states X & Y, if X < Y
+ * then X represents a shallower power state than Y. As a result, the
+ * coordinated target local power state for a power domain will be the minimum
+ * of the requested local power states.
+ */
+plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
+ const plat_local_state_t *states,
+ unsigned int ncpu)
+{
+ plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
+ const plat_local_state_t *st = states;
+ unsigned int n = ncpu;
+
+ assert(ncpu > 0U);
+
+ do {
+ temp = *st;
+ st++;
+ if (temp < target)
+ target = temp;
+ n--;
+ } while (n > 0U);
+
+ return target;
+}
diff --git a/plat/common/plat_spmd_manifest.c b/plat/common/plat_spmd_manifest.c
new file mode 100644
index 0000000..b1fc13c
--- /dev/null
+++ b/plat/common/plat_spmd_manifest.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <libfdt.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <platform_def.h>
+#include <services/spm_core_manifest.h>
+
+#define ATTRIBUTE_ROOT_NODE_STR "attribute"
+
+/*******************************************************************************
+ * SPMC attribute node parser
+ ******************************************************************************/
+static int manifest_parse_attribute(spmc_manifest_attribute_t *attr,
+ const void *fdt,
+ int node)
+{
+ uint32_t val32;
+ int rc;
+
+ assert((attr != NULL) && (fdt != NULL));
+
+ rc = fdt_read_uint32(fdt, node, "maj_ver", &attr->major_version);
+ if (rc != 0) {
+ ERROR("Missing FFA %s version in SPM Core manifest.\n",
+ "major");
+ return rc;
+ }
+
+ rc = fdt_read_uint32(fdt, node, "min_ver", &attr->minor_version);
+ if (rc != 0) {
+ ERROR("Missing FFA %s version in SPM Core manifest.\n",
+ "minor");
+ return rc;
+ }
+
+ rc = fdt_read_uint32(fdt, node, "spmc_id", &val32);
+ if (rc != 0) {
+ ERROR("Missing SPMC ID in manifest.\n");
+ return rc;
+ }
+
+ attr->spmc_id = val32 & 0xffff;
+
+ rc = fdt_read_uint32(fdt, node, "exec_state", &attr->exec_state);
+ if (rc != 0) {
+ NOTICE("%s not specified in SPM Core manifest.\n",
+ "Execution state");
+ }
+
+ rc = fdt_read_uint32(fdt, node, "binary_size", &attr->binary_size);
+ if (rc != 0) {
+ NOTICE("%s not specified in SPM Core manifest.\n",
+ "Binary size");
+ }
+
+ rc = fdt_read_uint64(fdt, node, "load_address", &attr->load_address);
+ if (rc != 0) {
+ NOTICE("%s not specified in SPM Core manifest.\n",
+ "Load address");
+ }
+
+ rc = fdt_read_uint64(fdt, node, "entrypoint", &attr->entrypoint);
+ if (rc != 0) {
+ NOTICE("%s not specified in SPM Core manifest.\n",
+ "Entry point");
+ }
+
+ VERBOSE("SPM Core manifest attribute section:\n");
+ VERBOSE(" version: %u.%u\n", attr->major_version, attr->minor_version);
+ VERBOSE(" spmc_id: 0x%x\n", attr->spmc_id);
+ VERBOSE(" binary_size: 0x%x\n", attr->binary_size);
+ VERBOSE(" load_address: 0x%" PRIx64 "\n", attr->load_address);
+ VERBOSE(" entrypoint: 0x%" PRIx64 "\n", attr->entrypoint);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Root node handler
+ ******************************************************************************/
+static int manifest_parse_root(spmc_manifest_attribute_t *manifest,
+ const void *fdt,
+ int root)
+{
+ int node;
+
+ assert(manifest != NULL);
+
+ node = fdt_subnode_offset_namelen(fdt, root, ATTRIBUTE_ROOT_NODE_STR,
+ sizeof(ATTRIBUTE_ROOT_NODE_STR) - 1);
+ if (node < 0) {
+ ERROR("Root node doesn't contain subnode '%s'\n",
+ ATTRIBUTE_ROOT_NODE_STR);
+ return node;
+ }
+
+ return manifest_parse_attribute(manifest, fdt, node);
+}
+
+/*******************************************************************************
+ * Platform handler to parse a SPM Core manifest.
+ ******************************************************************************/
+int plat_spm_core_manifest_load(spmc_manifest_attribute_t *manifest,
+ const void *pm_addr)
+{
+ int rc, unmap_ret;
+ uintptr_t pm_base, pm_base_align;
+ size_t mapped_size;
+
+ assert(manifest != NULL);
+ assert(pm_addr != NULL);
+
+ /*
+ * Assume TOS_FW_CONFIG is not necessarily aligned to a page
+ * boundary, thus calculate the remaining space between SPMC
+ * manifest start address and upper page limit.
+ *
+ */
+ pm_base = (uintptr_t)pm_addr;
+ pm_base_align = page_align(pm_base, UP);
+
+ if (pm_base == pm_base_align) {
+ /* Page aligned */
+ mapped_size = PAGE_SIZE;
+ } else {
+ mapped_size = pm_base_align - pm_base;
+ }
+
+ /* Check space within the page at least maps the FDT header */
+ if (mapped_size < sizeof(struct fdt_header)) {
+ ERROR("Error while mapping SPM Core manifest.\n");
+ return -EINVAL;
+ }
+
+ /* Map first SPMC manifest page in the SPMD translation regime */
+ pm_base_align = page_align(pm_base, DOWN);
+ rc = mmap_add_dynamic_region((unsigned long long)pm_base_align,
+ pm_base_align,
+ PAGE_SIZE,
+ MT_RO_DATA);
+ if (rc != 0) {
+ ERROR("Error while mapping SPM Core manifest (%d).\n", rc);
+ return rc;
+ }
+
+ rc = fdt_check_header(pm_addr);
+ if (rc != 0) {
+ ERROR("Wrong format for SPM Core manifest (%d).\n", rc);
+ goto exit_unmap;
+ }
+
+ /* Check SPMC manifest fits within the upper mapped page boundary */
+ if (mapped_size < fdt_totalsize(pm_addr)) {
+ ERROR("SPM Core manifest too large.\n");
+ rc = -EINVAL;
+ goto exit_unmap;
+ }
+
+ VERBOSE("Reading SPM Core manifest at address %p\n", pm_addr);
+
+ rc = fdt_node_offset_by_compatible(pm_addr, -1,
+ "arm,ffa-core-manifest-1.0");
+ if (rc < 0) {
+ ERROR("Unrecognized SPM Core manifest\n");
+ goto exit_unmap;
+ }
+
+ rc = manifest_parse_root(manifest, pm_addr, rc);
+
+exit_unmap:
+ unmap_ret = mmap_remove_dynamic_region(pm_base_align, PAGE_SIZE);
+ if (unmap_ret != 0) {
+ ERROR("Error while unmapping SPM Core manifest (%d).\n",
+ unmap_ret);
+ if (rc == 0) {
+ rc = unmap_ret;
+ }
+ }
+
+ return rc;
+}
diff --git a/plat/common/tbbr/plat_tbbr.c b/plat/common/tbbr/plat_tbbr.c
new file mode 100644
index 0000000..12ab0a9
--- /dev/null
+++ b/plat/common/tbbr/plat_tbbr.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <drivers/auth/auth_mod.h>
+#include <plat/common/platform.h>
+#if USE_TBBR_DEFS
+#include <tools_share/tbbr_oid.h>
+#else
+#include <platform_oid.h>
+#endif
+
+/*
+ * Store a new non-volatile counter value. This implementation
+ * only allows updating of the platform's Trusted NV counter when a
+ * certificate protected by the Trusted NV counter is signed with
+ * the ROT key. This avoids a compromised secondary certificate from
+ * updating the platform's Trusted NV counter, which could lead to the
+ * platform becoming unusable. The function is suitable for all TBBR
+ * compliant platforms.
+ *
+ * Return: 0 = success, Otherwise = error
+ */
+int plat_set_nv_ctr2(void *cookie, const auth_img_desc_t *img_desc,
+ unsigned int nv_ctr)
+{
+ int trusted_nv_ctr;
+
+ assert(cookie != NULL);
+ assert(img_desc != NULL);
+
+ trusted_nv_ctr = strcmp(cookie, TRUSTED_FW_NVCOUNTER_OID) == 0;
+
+ /*
+ * Only update the Trusted NV Counter if the certificate
+ * has been signed with the ROT key. Non Trusted NV counter
+ * updates are unconditional.
+ */
+ if (!trusted_nv_ctr || img_desc->parent == NULL)
+ return plat_set_nv_ctr(cookie, nv_ctr);
+
+ /*
+ * Trusted certificates not signed with the ROT key are not
+ * allowed to update the Trusted NV Counter.
+ */
+ return 1;
+}
diff --git a/plat/common/ubsan.c b/plat/common/ubsan.c
new file mode 100644
index 0000000..45b0f7c
--- /dev/null
+++ b/plat/common/ubsan.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2019, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <arch_helpers.h>
+#include <context.h>
+#include <common/debug.h>
+#include <plat/common/platform.h>
+
+struct source_location {
+ const char *file_name;
+ uint32_t line;
+ uint32_t column;
+};
+
+struct type_descriptor {
+ uint16_t type_kind;
+ uint16_t type_info;
+ char type_name[1];
+};
+
+struct type_mismatch_data {
+ struct source_location loc;
+ struct type_descriptor *type;
+ unsigned long alignment;
+ unsigned char type_check_kind;
+};
+
+struct overflow_data {
+ struct source_location loc;
+ struct type_descriptor *type;
+};
+
+struct shift_out_of_bounds_data {
+ struct source_location loc;
+ struct type_descriptor *lhs_type;
+ struct type_descriptor *rhs_type;
+};
+
+struct out_of_bounds_data {
+ struct source_location loc;
+ struct type_descriptor *array_type;
+ struct type_descriptor *index_type;
+};
+
+struct unreachable_data {
+ struct source_location loc;
+};
+
+struct vla_bound_data {
+ struct source_location loc;
+ struct type_descriptor *type;
+};
+
+struct invalid_value_data {
+ struct source_location loc;
+ struct type_descriptor *type;
+};
+
+struct nonnull_arg_data {
+ struct source_location loc;
+};
+
+/*
+ * When compiling with -fsanitize=undefined the compiler expects functions
+ * with the following signatures. The functions are never called directly,
+ * only when undefined behavior is detected in instrumented code.
+ */
+void __ubsan_handle_type_mismatch_abort(struct type_mismatch_data *data,
+ unsigned long ptr);
+void __ubsan_handle_type_mismatch_v1_abort(struct type_mismatch_data *data,
+ unsigned long ptr);
+void __ubsan_handle_add_overflow_abort(struct overflow_data *data,
+ unsigned long lhs, unsigned long rhs);
+void __ubsan_handle_sub_overflow_abort(struct overflow_data *data,
+ unsigned long lhs, unsigned long rhs);
+void __ubsan_handle_mul_overflow_abort(struct overflow_data *data,
+ unsigned long lhs, unsigned long rhs);
+void __ubsan_handle_negate_overflow_abort(struct overflow_data *data,
+ unsigned long old_val);
+void __ubsan_handle_pointer_overflow_abort(struct overflow_data *data,
+ unsigned long old_val);
+void __ubsan_handle_divrem_overflow_abort(struct overflow_data *data,
+ unsigned long lhs, unsigned long rhs);
+void __ubsan_handle_shift_out_of_bounds_abort(struct shift_out_of_bounds_data *data,
+ unsigned long lhs, unsigned long rhs);
+void __ubsan_handle_out_of_bounds_abort(struct out_of_bounds_data *data,
+ unsigned long idx);
+void __ubsan_handle_unreachable_abort(struct unreachable_data *data);
+void __ubsan_handle_missing_return_abort(struct unreachable_data *data);
+void __ubsan_handle_vla_bound_not_positive_abort(struct vla_bound_data *data,
+ unsigned long bound);
+void __ubsan_handle_load_invalid_value_abort(struct invalid_value_data *data,
+ unsigned long val);
+void __ubsan_handle_nonnull_arg_abort(struct nonnull_arg_data *data
+#if __GCC_VERSION < 60000
+ , size_t arg_no
+#endif
+ );
+
+static void print_loc(const char *func, struct source_location *loc)
+{
+ ERROR("Undefined behavior at %s:%d col %d (%s)",
+ loc->file_name, loc->line, loc->column, func);
+}
+
+
+void __ubsan_handle_type_mismatch_abort(struct type_mismatch_data *data,
+ unsigned long ptr __unused)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_type_mismatch_v1_abort(struct type_mismatch_data *data,
+ unsigned long ptr __unused)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_add_overflow_abort(struct overflow_data *data,
+ unsigned long lhs __unused,
+ unsigned long rhs __unused)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_sub_overflow_abort(struct overflow_data *data,
+ unsigned long lhs __unused,
+ unsigned long rhs __unused)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_mul_overflow_abort(struct overflow_data *data,
+ unsigned long lhs __unused,
+ unsigned long rhs __unused)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_negate_overflow_abort(struct overflow_data *data,
+ unsigned long old_val __unused)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_pointer_overflow_abort(struct overflow_data *data,
+ unsigned long old_val __unused)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_divrem_overflow_abort(struct overflow_data *data,
+ unsigned long lhs __unused,
+ unsigned long rhs __unused)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_shift_out_of_bounds_abort(struct shift_out_of_bounds_data *data,
+ unsigned long lhs __unused,
+ unsigned long rhs __unused)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_out_of_bounds_abort(struct out_of_bounds_data *data,
+ unsigned long idx __unused)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_unreachable_abort(struct unreachable_data *data)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_missing_return_abort(struct unreachable_data *data)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_vla_bound_not_positive_abort(struct vla_bound_data *data,
+ unsigned long bound __unused)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_load_invalid_value_abort(struct invalid_value_data *data,
+ unsigned long val __unused)
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}
+
+void __ubsan_handle_nonnull_arg_abort(struct nonnull_arg_data *data
+#if __GCC_VERSION < 60000
+ , size_t arg_no __unused
+#endif
+ )
+{
+ print_loc(__func__, &data->loc);
+ plat_panic_handler();
+}