summaryrefslogtreecommitdiffstats
path: root/bl32/sp_min
diff options
context:
space:
mode:
Diffstat (limited to 'bl32/sp_min')
-rw-r--r--bl32/sp_min/aarch32/entrypoint.S382
-rw-r--r--bl32/sp_min/sp_min.ld.S150
-rw-r--r--bl32/sp_min/sp_min.mk77
-rw-r--r--bl32/sp_min/sp_min_main.c249
-rw-r--r--bl32/sp_min/sp_min_private.h14
-rw-r--r--bl32/sp_min/wa_cve_2017_5715_bpiall.S74
-rw-r--r--bl32/sp_min/wa_cve_2017_5715_icache_inv.S75
7 files changed, 1021 insertions, 0 deletions
diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S
new file mode 100644
index 0000000..f102967
--- /dev/null
+++ b/bl32/sp_min/aarch32/entrypoint.S
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <common/runtime_svc.h>
+#include <context.h>
+#include <el3_common_macros.S>
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/pmf/aarch32/pmf_asm_macros.S>
+#include <lib/runtime_instr.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <smccc_helpers.h>
+#include <smccc_macros.S>
+
+ .globl sp_min_vector_table
+ .globl sp_min_entrypoint
+ .globl sp_min_warm_entrypoint
+ .globl sp_min_handle_smc
+ .globl sp_min_handle_fiq
+
+#define FIXUP_SIZE ((BL32_LIMIT) - (BL32_BASE))
+
+ .macro route_fiq_to_sp_min reg
+ /* -----------------------------------------------------
+ * FIQs are secure interrupts trapped by Monitor and non
+ * secure is not allowed to mask the FIQs.
+ * -----------------------------------------------------
+ */
+ ldcopr \reg, SCR
+ orr \reg, \reg, #SCR_FIQ_BIT
+ bic \reg, \reg, #SCR_FW_BIT
+ stcopr \reg, SCR
+ .endm
+
+ .macro clrex_on_monitor_entry
+#if (ARM_ARCH_MAJOR == 7)
+ /*
+ * ARMv7 architectures need to clear the exclusive access when
+ * entering Monitor mode.
+ */
+ clrex
+#endif
+ .endm
+
+vector_base sp_min_vector_table
+ b sp_min_entrypoint
+ b plat_panic_handler /* Undef */
+ b sp_min_handle_smc /* Syscall */
+ b report_prefetch_abort /* Prefetch abort */
+ b report_data_abort /* Data abort */
+ b plat_panic_handler /* Reserved */
+ b plat_panic_handler /* IRQ */
+ b sp_min_handle_fiq /* FIQ */
+
+
+/*
+ * The Cold boot/Reset entrypoint for SP_MIN
+ */
+func sp_min_entrypoint
+#if !RESET_TO_SP_MIN
+ /* ---------------------------------------------------------------
+ * Preceding bootloader has populated r0 with a pointer to a
+ * 'bl_params_t' structure & r1 with a pointer to platform
+ * specific structure
+ * ---------------------------------------------------------------
+ */
+ mov r9, r0
+ mov r10, r1
+ mov r11, r2
+ mov r12, r3
+
+ /* ---------------------------------------------------------------------
+ * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
+ * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
+ * and primary/secondary CPU logic should not be executed in this case.
+ *
+ * Also, assume that the previous bootloader has already initialised the
+ * SCTLR, including the CPU endianness, and has initialised the memory.
+ * ---------------------------------------------------------------------
+ */
+ el3_entrypoint_common \
+ _init_sctlr=0 \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=1 \
+ _exception_vectors=sp_min_vector_table \
+ _pie_fixup_size=FIXUP_SIZE
+
+ /* ---------------------------------------------------------------------
+ * Relay the previous bootloader's arguments to the platform layer
+ * ---------------------------------------------------------------------
+ */
+#else
+ /* ---------------------------------------------------------------------
+ * For RESET_TO_SP_MIN systems which have a programmable reset address,
+ * sp_min_entrypoint() is executed only on the cold boot path so we can
+ * skip the warm boot mailbox mechanism.
+ * ---------------------------------------------------------------------
+ */
+ el3_entrypoint_common \
+ _init_sctlr=1 \
+ _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
+ _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
+ _init_memory=1 \
+ _init_c_runtime=1 \
+ _exception_vectors=sp_min_vector_table \
+ _pie_fixup_size=FIXUP_SIZE
+
+ /* ---------------------------------------------------------------------
+ * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
+ * to run so there's no argument to relay from a previous bootloader.
+ * Zero the arguments passed to the platform layer to reflect that.
+ * ---------------------------------------------------------------------
+ */
+ mov r9, #0
+ mov r10, #0
+ mov r11, #0
+ mov r12, #0
+
+#endif /* RESET_TO_SP_MIN */
+
+#if SP_MIN_WITH_SECURE_FIQ
+ route_fiq_to_sp_min r4
+#endif
+
+ mov r0, r9
+ mov r1, r10
+ mov r2, r11
+ mov r3, r12
+ bl sp_min_early_platform_setup2
+ bl sp_min_plat_arch_setup
+
+ /* Jump to the main function */
+ bl sp_min_main
+
+ /* -------------------------------------------------------------
+ * Clean the .data & .bss sections to main memory. This ensures
+ * that any global data which was initialised by the primary CPU
+ * is visible to secondary CPUs before they enable their data
+ * caches and participate in coherency.
+ * -------------------------------------------------------------
+ */
+ ldr r0, =__DATA_START__
+ ldr r1, =__DATA_END__
+ sub r1, r1, r0
+ bl clean_dcache_range
+
+ ldr r0, =__BSS_START__
+ ldr r1, =__BSS_END__
+ sub r1, r1, r0
+ bl clean_dcache_range
+
+ bl smc_get_next_ctx
+
+ /* r0 points to `smc_ctx_t` */
+ /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
+ b sp_min_exit
+endfunc sp_min_entrypoint
+
+
+/*
+ * SMC handling function for SP_MIN.
+ */
+func sp_min_handle_smc
+ /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
+ str lr, [sp, #SMC_CTX_LR_MON]
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ /*
+ * Read the timestamp value and store it on top of the C runtime stack.
+ * The value will be saved to the per-cpu data once the C stack is
+ * available, as a valid stack is needed to call _cpu_data()
+ */
+ strd r0, r1, [sp, #SMC_CTX_GPREG_R0]
+ ldcopr16 r0, r1, CNTPCT_64
+ ldr lr, [sp, #SMC_CTX_SP_MON]
+ strd r0, r1, [lr, #-8]!
+ str lr, [sp, #SMC_CTX_SP_MON]
+ ldrd r0, r1, [sp, #SMC_CTX_GPREG_R0]
+#endif
+
+ smccc_save_gp_mode_regs
+
+ clrex_on_monitor_entry
+
+ /*
+ * `sp` still points to `smc_ctx_t`. Save it to a register
+ * and restore the C runtime stack pointer to `sp`.
+ */
+ mov r2, sp /* handle */
+ ldr sp, [r2, #SMC_CTX_SP_MON]
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ /* Save handle to a callee saved register */
+ mov r6, r2
+
+ /*
+ * Restore the timestamp value and store it in per-cpu data. The value
+ * will be extracted from per-cpu data by the C level SMC handler and
+ * saved to the PMF timestamp region.
+ */
+ ldrd r4, r5, [sp], #8
+ bl _cpu_data
+ strd r4, r5, [r0, #CPU_DATA_PMF_TS0_OFFSET]
+
+ /* Restore handle */
+ mov r2, r6
+#endif
+
+ ldr r0, [r2, #SMC_CTX_SCR]
+ and r3, r0, #SCR_NS_BIT /* flags */
+
+ /* Switch to Secure Mode*/
+ bic r0, #SCR_NS_BIT
+ stcopr r0, SCR
+ isb
+
+ ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
+ /* Check whether an SMC64 is issued */
+ tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
+ beq 1f
+ /* SMC32 is not detected. Return error back to caller */
+ mov r0, #SMC_UNK
+ str r0, [r2, #SMC_CTX_GPREG_R0]
+ mov r0, r2
+ b sp_min_exit
+1:
+ /* SMC32 is detected */
+ mov r1, #0 /* cookie */
+ bl handle_runtime_svc
+
+ /* `r0` points to `smc_ctx_t` */
+ b sp_min_exit
+endfunc sp_min_handle_smc
+
+/*
+ * Secure Interrupts handling function for SP_MIN.
+ */
+func sp_min_handle_fiq
+#if !SP_MIN_WITH_SECURE_FIQ
+ b plat_panic_handler
+#else
+ /* FIQ has a +4 offset for lr compared to preferred return address */
+ sub lr, lr, #4
+ /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
+ str lr, [sp, #SMC_CTX_LR_MON]
+
+ smccc_save_gp_mode_regs
+
+ clrex_on_monitor_entry
+
+ /* load run-time stack */
+ mov r2, sp
+ ldr sp, [r2, #SMC_CTX_SP_MON]
+
+ /* Switch to Secure Mode */
+ ldr r0, [r2, #SMC_CTX_SCR]
+ bic r0, #SCR_NS_BIT
+ stcopr r0, SCR
+ isb
+
+ push {r2, r3}
+ bl sp_min_fiq
+ pop {r0, r3}
+
+ b sp_min_exit
+#endif
+endfunc sp_min_handle_fiq
+
+/*
+ * The Warm boot entrypoint for SP_MIN.
+ */
+func sp_min_warm_entrypoint
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ /*
+ * This timestamp update happens with cache off. The next
+ * timestamp collection will need to do cache maintenance prior
+ * to timestamp update.
+ */
+ pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR
+ ldcopr16 r2, r3, CNTPCT_64
+ strd r2, r3, [r0]
+#endif
+ /*
+ * On the warm boot path, most of the EL3 initialisations performed by
+ * 'el3_entrypoint_common' must be skipped:
+ *
+ * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
+ * programming the reset address do we need to initialied the SCTLR.
+ * In other cases, we assume this has been taken care by the
+ * entrypoint code.
+ *
+ * - No need to determine the type of boot, we know it is a warm boot.
+ *
+ * - Do not try to distinguish between primary and secondary CPUs, this
+ * notion only exists for a cold boot.
+ *
+ * - No need to initialise the memory or the C runtime environment,
+ * it has been done once and for all on the cold boot path.
+ */
+ el3_entrypoint_common \
+ _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=0 \
+ _exception_vectors=sp_min_vector_table \
+ _pie_fixup_size=0
+
+ /*
+ * We're about to enable MMU and participate in PSCI state coordination.
+ *
+ * The PSCI implementation invokes platform routines that enable CPUs to
+ * participate in coherency. On a system where CPUs are not
+ * cache-coherent without appropriate platform specific programming,
+ * having caches enabled until such time might lead to coherency issues
+ * (resulting from stale data getting speculatively fetched, among
+ * others). Therefore we keep data caches disabled even after enabling
+ * the MMU for such platforms.
+ *
+ * On systems with hardware-assisted coherency, or on single cluster
+ * platforms, such platform specific programming is not required to
+ * enter coherency (as CPUs already are); and there's no reason to have
+ * caches disabled either.
+ */
+#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
+ mov r0, #0
+#else
+ mov r0, #DISABLE_DCACHE
+#endif
+ bl bl32_plat_enable_mmu
+
+#if SP_MIN_WITH_SECURE_FIQ
+ route_fiq_to_sp_min r0
+#endif
+
+ bl sp_min_warm_boot
+ bl smc_get_next_ctx
+ /* r0 points to `smc_ctx_t` */
+ /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ /* Save smc_ctx_t */
+ mov r5, r0
+
+ pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI
+ mov r4, r0
+
+ /*
+ * Invalidate before updating timestamp to ensure previous timestamp
+ * updates on the same cache line with caches disabled are properly
+ * seen by the same core. Without the cache invalidate, the core might
+ * write into a stale cache line.
+ */
+ mov r1, #PMF_TS_SIZE
+ bl inv_dcache_range
+
+ ldcopr16 r0, r1, CNTPCT_64
+ strd r0, r1, [r4]
+
+ /* Restore smc_ctx_t */
+ mov r0, r5
+#endif
+
+ b sp_min_exit
+endfunc sp_min_warm_entrypoint
+
+/*
+ * The function to restore the registers from SMC context and return
+ * to the mode restored to SPSR.
+ *
+ * Arguments : r0 must point to the SMC context to restore from.
+ */
+func sp_min_exit
+ monitor_exit
+endfunc sp_min_exit
diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S
new file mode 100644
index 0000000..475affa
--- /dev/null
+++ b/bl32/sp_min/sp_min.ld.S
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/bl_common.ld.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+OUTPUT_FORMAT(elf32-littlearm)
+OUTPUT_ARCH(arm)
+ENTRY(sp_min_vector_table)
+
+MEMORY {
+ RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE
+}
+
+#ifdef PLAT_SP_MIN_EXTRA_LD_SCRIPT
+#include <plat_sp_min.ld.S>
+#endif
+
+SECTIONS
+{
+ . = BL32_BASE;
+ ASSERT(. == ALIGN(PAGE_SIZE),
+ "BL32_BASE address is not aligned on a page boundary.")
+
+#if SEPARATE_CODE_AND_RODATA
+ .text . : {
+ __TEXT_START__ = .;
+ *entrypoint.o(.text*)
+ *(SORT_BY_ALIGNMENT(.text*))
+ *(.vectors)
+ . = ALIGN(PAGE_SIZE);
+ __TEXT_END__ = .;
+ } >RAM
+
+ /* .ARM.extab and .ARM.exidx are only added because Clang need them */
+ .ARM.extab . : {
+ *(.ARM.extab* .gnu.linkonce.armextab.*)
+ } >RAM
+
+ .ARM.exidx . : {
+ *(.ARM.exidx* .gnu.linkonce.armexidx.*)
+ } >RAM
+
+ .rodata . : {
+ __RODATA_START__ = .;
+ *(SORT_BY_ALIGNMENT(.rodata*))
+
+ RODATA_COMMON
+
+ /* Place pubsub sections for events */
+ . = ALIGN(8);
+#include <lib/el3_runtime/pubsub_events.h>
+
+ . = ALIGN(PAGE_SIZE);
+ __RODATA_END__ = .;
+ } >RAM
+#else
+ ro . : {
+ __RO_START__ = .;
+ *entrypoint.o(.text*)
+ *(SORT_BY_ALIGNMENT(.text*))
+ *(SORT_BY_ALIGNMENT(.rodata*))
+
+ RODATA_COMMON
+
+ /* Place pubsub sections for events */
+ . = ALIGN(8);
+#include <lib/el3_runtime/pubsub_events.h>
+
+ *(.vectors)
+ __RO_END_UNALIGNED__ = .;
+
+ /*
+ * Memory page(s) mapped to this section will be marked as
+ * read-only, executable. No RW data from the next section must
+ * creep in. Ensure the rest of the current memory page is unused.
+ */
+ . = ALIGN(PAGE_SIZE);
+ __RO_END__ = .;
+ } >RAM
+#endif
+
+ ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
+ "cpu_ops not defined for this platform.")
+ /*
+ * Define a linker symbol to mark start of the RW memory area for this
+ * image.
+ */
+ __RW_START__ = . ;
+
+ DATA_SECTION >RAM
+ RELA_SECTION >RAM
+
+#ifdef BL32_PROGBITS_LIMIT
+ ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.")
+#endif
+
+ STACK_SECTION >RAM
+ BSS_SECTION >RAM
+ XLAT_TABLE_SECTION >RAM
+
+ __BSS_SIZE__ = SIZEOF(.bss);
+
+#if USE_COHERENT_MEM
+ /*
+ * The base address of the coherent memory section must be page-aligned (4K)
+ * to guarantee that the coherent data are stored on their own pages and
+ * are not mixed with normal data. This is required to set up the correct
+ * memory attributes for the coherent data page tables.
+ */
+ coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+ __COHERENT_RAM_START__ = .;
+ /*
+ * Bakery locks are stored in coherent memory
+ *
+ * Each lock's data is contiguous and fully allocated by the compiler
+ */
+ *(bakery_lock)
+ *(tzfw_coherent_mem)
+ __COHERENT_RAM_END_UNALIGNED__ = .;
+ /*
+ * Memory page(s) mapped to this section will be marked
+ * as device memory. No other unexpected data must creep in.
+ * Ensure the rest of the current memory page is unused.
+ */
+ . = ALIGN(PAGE_SIZE);
+ __COHERENT_RAM_END__ = .;
+ } >RAM
+
+ __COHERENT_RAM_UNALIGNED_SIZE__ =
+ __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
+#endif
+
+ /*
+ * Define a linker symbol to mark the end of the RW memory area for this
+ * image.
+ */
+ __RW_END__ = .;
+
+ __BL32_END__ = .;
+
+ /DISCARD/ : {
+ *(.dynsym .dynstr .hash .gnu.hash)
+ }
+
+ ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.")
+}
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
new file mode 100644
index 0000000..ab1287d
--- /dev/null
+++ b/bl32/sp_min/sp_min.mk
@@ -0,0 +1,77 @@
+#
+# Copyright (c) 2016-2022, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH}, aarch32)
+ $(error SP_MIN is only supported on AArch32 platforms)
+endif
+
+include lib/extensions/amu/amu.mk
+include lib/psci/psci_lib.mk
+
+INCLUDES += -Iinclude/bl32/sp_min
+
+BL32_SOURCES += bl32/sp_min/sp_min_main.c \
+ bl32/sp_min/aarch32/entrypoint.S \
+ common/runtime_svc.c \
+ plat/common/aarch32/plat_sp_min_common.c\
+ services/std_svc/std_svc_setup.c \
+ ${PSCI_LIB_SOURCES}
+
+ifeq (${DISABLE_MTPMU},1)
+BL32_SOURCES += lib/extensions/mtpmu/aarch32/mtpmu.S
+endif
+
+ifeq (${ENABLE_PMF}, 1)
+BL32_SOURCES += lib/pmf/pmf_main.c
+endif
+
+ifeq (${ENABLE_AMU},1)
+BL32_SOURCES += ${AMU_SOURCES}
+endif
+
+ifeq (${WORKAROUND_CVE_2017_5715},1)
+BL32_SOURCES += bl32/sp_min/wa_cve_2017_5715_bpiall.S \
+ bl32/sp_min/wa_cve_2017_5715_icache_inv.S
+else
+ifeq (${WORKAROUND_CVE_2022_23960},1)
+BL32_SOURCES += bl32/sp_min/wa_cve_2017_5715_icache_inv.S
+endif
+endif
+
+ifeq (${TRNG_SUPPORT},1)
+BL32_SOURCES += services/std_svc/trng/trng_main.c \
+ services/std_svc/trng/trng_entropy_pool.c
+endif
+
+ifeq (${ENABLE_SYS_REG_TRACE_FOR_NS},1)
+BL32_SOURCES += lib/extensions/sys_reg_trace/aarch32/sys_reg_trace.c
+endif
+
+ifeq (${ENABLE_TRF_FOR_NS},1)
+BL32_SOURCES += lib/extensions/trf/aarch32/trf.c
+endif
+
+BL32_LINKERFILE := bl32/sp_min/sp_min.ld.S
+
+# Include the platform-specific SP_MIN Makefile
+# If no platform-specific SP_MIN Makefile exists, it means SP_MIN is not supported
+# on this platform.
+SP_MIN_PLAT_MAKEFILE := $(wildcard ${PLAT_DIR}/sp_min/sp_min-${PLAT}.mk)
+ifeq (,${SP_MIN_PLAT_MAKEFILE})
+ $(error SP_MIN is not supported on platform ${PLAT})
+else
+ include ${SP_MIN_PLAT_MAKEFILE}
+endif
+
+RESET_TO_SP_MIN := 0
+$(eval $(call add_define,RESET_TO_SP_MIN))
+$(eval $(call assert_boolean,RESET_TO_SP_MIN))
+
+# Flag to allow SP_MIN to handle FIQ interrupts in monitor mode. The platform
+# port is free to override this value. It is default disabled.
+SP_MIN_WITH_SECURE_FIQ ?= 0
+$(eval $(call add_define,SP_MIN_WITH_SECURE_FIQ))
+$(eval $(call assert_boolean,SP_MIN_WITH_SECURE_FIQ))
diff --git a/bl32/sp_min/sp_min_main.c b/bl32/sp_min/sp_min_main.c
new file mode 100644
index 0000000..f050160
--- /dev/null
+++ b/bl32/sp_min/sp_min_main.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <context.h>
+#include <drivers/console.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/pmf/pmf.h>
+#include <lib/psci/psci.h>
+#include <lib/runtime_instr.h>
+#include <lib/utils.h>
+#include <plat/common/platform.h>
+#include <platform_sp_min.h>
+#include <services/std_svc.h>
+#include <smccc_helpers.h>
+
+#include "sp_min_private.h"
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+PMF_REGISTER_SERVICE_SMC(rt_instr_svc, PMF_RT_INSTR_SVC_ID,
+ RT_INSTR_TOTAL_IDS, PMF_STORE_ENABLE)
+#endif
+
+/* Pointers to per-core cpu contexts */
+static void *sp_min_cpu_ctx_ptr[PLATFORM_CORE_COUNT];
+
+/* SP_MIN only stores the non secure smc context */
+static smc_ctx_t sp_min_smc_context[PLATFORM_CORE_COUNT];
+
+/******************************************************************************
+ * Define the smccc helper library APIs
+ *****************************************************************************/
+void *smc_get_ctx(unsigned int security_state)
+{
+ assert(security_state == NON_SECURE);
+ return &sp_min_smc_context[plat_my_core_pos()];
+}
+
+void smc_set_next_ctx(unsigned int security_state)
+{
+ assert(security_state == NON_SECURE);
+ /* SP_MIN stores only non secure smc context. Nothing to do here */
+}
+
+void *smc_get_next_ctx(void)
+{
+ return &sp_min_smc_context[plat_my_core_pos()];
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the calling CPU that was set as the context for the specified security
+ * state. NULL is returned if no such structure has been specified.
+ ******************************************************************************/
+void *cm_get_context(uint32_t security_state)
+{
+ assert(security_state == NON_SECURE);
+ return sp_min_cpu_ctx_ptr[plat_my_core_pos()];
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the calling CPU
+ ******************************************************************************/
+void cm_set_context(void *context, uint32_t security_state)
+{
+ assert(security_state == NON_SECURE);
+ sp_min_cpu_ctx_ptr[plat_my_core_pos()] = context;
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the CPU identified by `cpu_idx` that was set as the context for the
+ * specified security state. NULL is returned if no such structure has been
+ * specified.
+ ******************************************************************************/
+void *cm_get_context_by_index(unsigned int cpu_idx,
+ unsigned int security_state)
+{
+ assert(security_state == NON_SECURE);
+ return sp_min_cpu_ctx_ptr[cpu_idx];
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the CPU identified by CPU index.
+ ******************************************************************************/
+void cm_set_context_by_index(unsigned int cpu_idx, void *context,
+ unsigned int security_state)
+{
+ assert(security_state == NON_SECURE);
+ sp_min_cpu_ctx_ptr[cpu_idx] = context;
+}
+
+static void copy_cpu_ctx_to_smc_stx(const regs_t *cpu_reg_ctx,
+ smc_ctx_t *next_smc_ctx)
+{
+ next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
+ next_smc_ctx->r1 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R1);
+ next_smc_ctx->r2 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R2);
+ next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
+ next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
+ next_smc_ctx->scr = read_ctx_reg(cpu_reg_ctx, CTX_SCR);
+}
+
+/*******************************************************************************
+ * This function invokes the PSCI library interface to initialize the
+ * non secure cpu context and copies the relevant cpu context register values
+ * to smc context. These registers will get programmed during `smc_exit`.
+ ******************************************************************************/
+static void sp_min_prepare_next_image_entry(void)
+{
+ entry_point_info_t *next_image_info;
+ cpu_context_t *ctx = cm_get_context(NON_SECURE);
+ u_register_t ns_sctlr;
+
+ /* Program system registers to proceed to non-secure */
+ next_image_info = sp_min_plat_get_bl33_ep_info();
+ assert(next_image_info);
+ assert(NON_SECURE == GET_SECURITY_STATE(next_image_info->h.attr));
+
+ INFO("SP_MIN: Preparing exit to normal world\n");
+
+ psci_prepare_next_non_secure_ctx(next_image_info);
+ smc_set_next_ctx(NON_SECURE);
+
+ /* Copy r0, lr and spsr from cpu context to SMC context */
+ copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
+ smc_get_next_ctx());
+
+ /* Temporarily set the NS bit to access NS SCTLR */
+ write_scr(read_scr() | SCR_NS_BIT);
+ isb();
+ ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
+ write_sctlr(ns_sctlr);
+ isb();
+
+ write_scr(read_scr() & ~SCR_NS_BIT);
+ isb();
+}
+
+/******************************************************************************
+ * Implement the ARM Standard Service function to get arguments for a
+ * particular service.
+ *****************************************************************************/
+uintptr_t get_arm_std_svc_args(unsigned int svc_mask)
+{
+ /* Setup the arguments for PSCI Library */
+ DEFINE_STATIC_PSCI_LIB_ARGS_V1(psci_args, sp_min_warm_entrypoint);
+
+ /* PSCI is the only ARM Standard Service implemented */
+ assert(svc_mask == PSCI_FID_MASK);
+
+ return (uintptr_t)&psci_args;
+}
+
+/******************************************************************************
+ * The SP_MIN main function. Do the platform and PSCI Library setup. Also
+ * initialize the runtime service framework.
+ *****************************************************************************/
+void sp_min_main(void)
+{
+ NOTICE("SP_MIN: %s\n", version_string);
+ NOTICE("SP_MIN: %s\n", build_message);
+
+ /* Perform the SP_MIN platform setup */
+ sp_min_platform_setup();
+
+ /* Initialize the runtime services e.g. psci */
+ INFO("SP_MIN: Initializing runtime services\n");
+ runtime_svc_init();
+
+ /*
+ * We are ready to enter the next EL. Prepare entry into the image
+ * corresponding to the desired security state after the next ERET.
+ */
+ sp_min_prepare_next_image_entry();
+
+ /*
+ * Perform any platform specific runtime setup prior to cold boot exit
+ * from SP_MIN.
+ */
+ sp_min_plat_runtime_setup();
+
+ console_flush();
+}
+
+/******************************************************************************
+ * This function is invoked during warm boot. Invoke the PSCI library
+ * warm boot entry point which takes care of Architectural and platform setup/
+ * restore. Copy the relevant cpu_context register values to smc context which
+ * will get programmed during `smc_exit`.
+ *****************************************************************************/
+void sp_min_warm_boot(void)
+{
+ smc_ctx_t *next_smc_ctx;
+ cpu_context_t *ctx = cm_get_context(NON_SECURE);
+ u_register_t ns_sctlr;
+
+ psci_warmboot_entrypoint();
+
+ smc_set_next_ctx(NON_SECURE);
+
+ next_smc_ctx = smc_get_next_ctx();
+ zeromem(next_smc_ctx, sizeof(smc_ctx_t));
+
+ copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
+ next_smc_ctx);
+
+ /* Temporarily set the NS bit to access NS SCTLR */
+ write_scr(read_scr() | SCR_NS_BIT);
+ isb();
+ ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
+ write_sctlr(ns_sctlr);
+ isb();
+
+ write_scr(read_scr() & ~SCR_NS_BIT);
+ isb();
+}
+
+#if SP_MIN_WITH_SECURE_FIQ
+/******************************************************************************
+ * This function is invoked on secure interrupts. By construction of the
+ * SP_MIN, secure interrupts can only be handled when core executes in non
+ * secure state.
+ *****************************************************************************/
+void sp_min_fiq(void)
+{
+ uint32_t id;
+
+ id = plat_ic_acknowledge_interrupt();
+ sp_min_plat_fiq_handler(id);
+ plat_ic_end_of_interrupt(id);
+}
+#endif /* SP_MIN_WITH_SECURE_FIQ */
diff --git a/bl32/sp_min/sp_min_private.h b/bl32/sp_min/sp_min_private.h
new file mode 100644
index 0000000..628581a
--- /dev/null
+++ b/bl32/sp_min/sp_min_private.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SP_MIN_PRIVATE_H
+#define SP_MIN_PRIVATE_H
+
+void sp_min_main(void);
+void sp_min_warm_boot(void);
+void sp_min_fiq(void);
+
+#endif /* SP_MIN_PRIVATE_H */
diff --git a/bl32/sp_min/wa_cve_2017_5715_bpiall.S b/bl32/sp_min/wa_cve_2017_5715_bpiall.S
new file mode 100644
index 0000000..385f3d4
--- /dev/null
+++ b/bl32/sp_min/wa_cve_2017_5715_bpiall.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl wa_cve_2017_5715_bpiall_vbar
+
+vector_base wa_cve_2017_5715_bpiall_vbar
+ /* We encode the exception entry in the bottom 3 bits of SP */
+ add sp, sp, #1 /* Reset: 0b111 */
+ add sp, sp, #1 /* Undef: 0b110 */
+ add sp, sp, #1 /* Syscall: 0b101 */
+ add sp, sp, #1 /* Prefetch abort: 0b100 */
+ add sp, sp, #1 /* Data abort: 0b011 */
+ add sp, sp, #1 /* Reserved: 0b010 */
+ add sp, sp, #1 /* IRQ: 0b001 */
+ nop /* FIQ: 0b000 */
+
+ /*
+ * Invalidate the branch predictor, `r0` is a dummy register
+ * and is unused.
+ */
+ stcopr r0, BPIALL
+ isb
+
+ /*
+ * As we cannot use any temporary registers and cannot
+ * clobber SP, we can decode the exception entry using
+ * an unrolled binary search.
+ *
+ * Note, if this code is re-used by other secure payloads,
+ * the below exception entry vectors must be changed to
+ * the vectors specific to that secure payload.
+ */
+
+ tst sp, #4
+ bne 1f
+
+ tst sp, #2
+ bne 3f
+
+ /* Expected encoding: 0x1 and 0x0 */
+ tst sp, #1
+ /* Restore original value of SP by clearing the bottom 3 bits */
+ bic sp, sp, #0x7
+ bne plat_panic_handler /* IRQ */
+ b sp_min_handle_fiq /* FIQ */
+
+1:
+ tst sp, #2
+ bne 2f
+
+ /* Expected encoding: 0x4 and 0x5 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne sp_min_handle_smc /* Syscall */
+ b plat_panic_handler /* Prefetch abort */
+
+2:
+ /* Expected encoding: 0x7 and 0x6 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne sp_min_entrypoint /* Reset */
+ b plat_panic_handler /* Undef */
+
+3:
+ /* Expected encoding: 0x2 and 0x3 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne plat_panic_handler /* Data abort */
+ b plat_panic_handler /* Reserved */
diff --git a/bl32/sp_min/wa_cve_2017_5715_icache_inv.S b/bl32/sp_min/wa_cve_2017_5715_icache_inv.S
new file mode 100644
index 0000000..d0a4625
--- /dev/null
+++ b/bl32/sp_min/wa_cve_2017_5715_icache_inv.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl wa_cve_2017_5715_icache_inv_vbar
+
+vector_base wa_cve_2017_5715_icache_inv_vbar
+ /* We encode the exception entry in the bottom 3 bits of SP */
+ add sp, sp, #1 /* Reset: 0b111 */
+ add sp, sp, #1 /* Undef: 0b110 */
+ add sp, sp, #1 /* Syscall: 0b101 */
+ add sp, sp, #1 /* Prefetch abort: 0b100 */
+ add sp, sp, #1 /* Data abort: 0b011 */
+ add sp, sp, #1 /* Reserved: 0b010 */
+ add sp, sp, #1 /* IRQ: 0b001 */
+ nop /* FIQ: 0b000 */
+
+ /*
+ * Invalidate the instruction cache, which we assume also
+ * invalidates the branch predictor. This may depend on
+ * other CPU specific changes (e.g. an ACTLR setting).
+ */
+ stcopr r0, ICIALLU
+ isb
+
+ /*
+ * As we cannot use any temporary registers and cannot
+ * clobber SP, we can decode the exception entry using
+ * an unrolled binary search.
+ *
+ * Note, if this code is re-used by other secure payloads,
+ * the below exception entry vectors must be changed to
+ * the vectors specific to that secure payload.
+ */
+
+ tst sp, #4
+ bne 1f
+
+ tst sp, #2
+ bne 3f
+
+ /* Expected encoding: 0x1 and 0x0 */
+ tst sp, #1
+ /* Restore original value of SP by clearing the bottom 3 bits */
+ bic sp, sp, #0x7
+ bne plat_panic_handler /* IRQ */
+ b sp_min_handle_fiq /* FIQ */
+
+1:
+ /* Expected encoding: 0x4 and 0x5 */
+ tst sp, #2
+ bne 2f
+
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne sp_min_handle_smc /* Syscall */
+ b plat_panic_handler /* Prefetch abort */
+
+2:
+ /* Expected encoding: 0x7 and 0x6 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne sp_min_entrypoint /* Reset */
+ b plat_panic_handler /* Undef */
+
+3:
+ /* Expected encoding: 0x2 and 0x3 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne plat_panic_handler /* Data abort */
+ b plat_panic_handler /* Reserved */