From 102b0d2daa97dae68d3eed54d8fe37a9cc38a892 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 28 Apr 2024 11:13:47 +0200 Subject: Adding upstream version 2.8.0+dfsg. Signed-off-by: Daniel Baumann --- plat/arm/common/aarch32/arm_bl2_mem_params_desc.c | 91 +++++ plat/arm/common/aarch32/arm_helpers.S | 77 ++++ plat/arm/common/aarch64/arm_bl2_mem_params_desc.c | 227 +++++++++++ plat/arm/common/aarch64/arm_helpers.S | 136 +++++++ plat/arm/common/aarch64/arm_pauth.c | 28 ++ plat/arm/common/aarch64/arm_sdei.c | 66 ++++ plat/arm/common/aarch64/execution_state_switch.c | 180 +++++++++ plat/arm/common/arm_bl1_fwu.c | 102 +++++ plat/arm/common/arm_bl1_setup.c | 258 ++++++++++++ plat/arm/common/arm_bl2_el3_setup.c | 100 +++++ plat/arm/common/arm_bl2_setup.c | 322 +++++++++++++++ plat/arm/common/arm_bl2u_setup.c | 97 +++++ plat/arm/common/arm_bl31_setup.c | 435 +++++++++++++++++++++ plat/arm/common/arm_cci.c | 50 +++ plat/arm/common/arm_ccn.c | 57 +++ plat/arm/common/arm_common.c | 243 ++++++++++++ plat/arm/common/arm_common.mk | 456 ++++++++++++++++++++++ plat/arm/common/arm_console.c | 71 ++++ plat/arm/common/arm_dyn_cfg.c | 231 +++++++++++ plat/arm/common/arm_dyn_cfg_helpers.c | 369 +++++++++++++++++ plat/arm/common/arm_err.c | 20 + plat/arm/common/arm_gicv2.c | 114 ++++++ plat/arm/common/arm_gicv3.c | 247 ++++++++++++ plat/arm/common/arm_image_load.c | 141 +++++++ plat/arm/common/arm_io_storage.c | 250 ++++++++++++ plat/arm/common/arm_nor_psci_mem_protect.c | 138 +++++++ plat/arm/common/arm_pm.c | 210 ++++++++++ plat/arm/common/arm_sip_svc.c | 143 +++++++ plat/arm/common/arm_topology.c | 58 +++ plat/arm/common/arm_tzc400.c | 79 ++++ plat/arm/common/arm_tzc_dmc500.c | 79 ++++ plat/arm/common/fconf/arm_fconf_io.c | 381 ++++++++++++++++++ plat/arm/common/fconf/arm_fconf_sp.c | 165 ++++++++ plat/arm/common/fconf/fconf_ethosn_getter.c | 354 +++++++++++++++++ plat/arm/common/fconf/fconf_nv_cntr_getter.c | 62 +++ plat/arm/common/fconf/fconf_sdei_getter.c | 103 +++++ plat/arm/common/fconf/fconf_sec_intr_config.c | 131 +++++++ plat/arm/common/sp_min/arm_sp_min.mk | 20 + plat/arm/common/sp_min/arm_sp_min_setup.c | 241 ++++++++++++ plat/arm/common/trp/arm_trp.mk | 12 + plat/arm/common/trp/arm_trp_setup.c | 72 ++++ plat/arm/common/tsp/arm_tsp.mk | 10 + plat/arm/common/tsp/arm_tsp_setup.c | 86 ++++ 43 files changed, 6712 insertions(+) create mode 100644 plat/arm/common/aarch32/arm_bl2_mem_params_desc.c create mode 100644 plat/arm/common/aarch32/arm_helpers.S create mode 100644 plat/arm/common/aarch64/arm_bl2_mem_params_desc.c create mode 100644 plat/arm/common/aarch64/arm_helpers.S create mode 100644 plat/arm/common/aarch64/arm_pauth.c create mode 100644 plat/arm/common/aarch64/arm_sdei.c create mode 100644 plat/arm/common/aarch64/execution_state_switch.c create mode 100644 plat/arm/common/arm_bl1_fwu.c create mode 100644 plat/arm/common/arm_bl1_setup.c create mode 100644 plat/arm/common/arm_bl2_el3_setup.c create mode 100644 plat/arm/common/arm_bl2_setup.c create mode 100644 plat/arm/common/arm_bl2u_setup.c create mode 100644 plat/arm/common/arm_bl31_setup.c create mode 100644 plat/arm/common/arm_cci.c create mode 100644 plat/arm/common/arm_ccn.c create mode 100644 plat/arm/common/arm_common.c create mode 100644 plat/arm/common/arm_common.mk create mode 100644 plat/arm/common/arm_console.c create mode 100644 plat/arm/common/arm_dyn_cfg.c create mode 100644 plat/arm/common/arm_dyn_cfg_helpers.c create mode 100644 plat/arm/common/arm_err.c create mode 100644 plat/arm/common/arm_gicv2.c create mode 100644 plat/arm/common/arm_gicv3.c create mode 100644 plat/arm/common/arm_image_load.c create mode 100644 plat/arm/common/arm_io_storage.c create mode 100644 plat/arm/common/arm_nor_psci_mem_protect.c create mode 100644 plat/arm/common/arm_pm.c create mode 100644 plat/arm/common/arm_sip_svc.c create mode 100644 plat/arm/common/arm_topology.c create mode 100644 plat/arm/common/arm_tzc400.c create mode 100644 plat/arm/common/arm_tzc_dmc500.c create mode 100644 plat/arm/common/fconf/arm_fconf_io.c create mode 100644 plat/arm/common/fconf/arm_fconf_sp.c create mode 100644 plat/arm/common/fconf/fconf_ethosn_getter.c create mode 100644 plat/arm/common/fconf/fconf_nv_cntr_getter.c create mode 100644 plat/arm/common/fconf/fconf_sdei_getter.c create mode 100644 plat/arm/common/fconf/fconf_sec_intr_config.c create mode 100644 plat/arm/common/sp_min/arm_sp_min.mk create mode 100644 plat/arm/common/sp_min/arm_sp_min_setup.c create mode 100644 plat/arm/common/trp/arm_trp.mk create mode 100644 plat/arm/common/trp/arm_trp_setup.c create mode 100644 plat/arm/common/tsp/arm_tsp.mk create mode 100644 plat/arm/common/tsp/arm_tsp_setup.c (limited to 'plat/arm/common') diff --git a/plat/arm/common/aarch32/arm_bl2_mem_params_desc.c b/plat/arm/common/aarch32/arm_bl2_mem_params_desc.c new file mode 100644 index 0000000..18f1a37 --- /dev/null +++ b/plat/arm/common/aarch32/arm_bl2_mem_params_desc.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include + +/******************************************************************************* + * Following descriptor provides BL image/ep information that gets used + * by BL2 to load the images and also subset of this information is + * passed to next BL image. The image loading sequence is managed by + * populating the images in required loading order. The image execution + * sequence is managed by populating the `next_handoff_image_id` with + * the next executable image id. + ******************************************************************************/ +static bl_mem_params_node_t bl2_mem_params_descs[] = { +#ifdef SCP_BL2_BASE + /* Fill SCP_BL2 related information if it exists */ + { + .image_id = SCP_BL2_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY, + VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY, + VERSION_2, image_info_t, 0), + .image_info.image_base = SCP_BL2_BASE, + .image_info.image_max_size = PLAT_CSS_MAX_SCP_BL2_SIZE, + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, +#endif /* SCP_BL2_BASE */ + + /* Fill BL32 related information */ + { + .image_id = BL32_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, + SECURE | EXECUTABLE | EP_FIRST_EXE), + .ep_info.pc = BL32_BASE, + .ep_info.spsr = SPSR_MODE32(MODE32_mon, SPSR_T_ARM, + SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP), + .image_info.image_base = BL32_BASE, + .image_info.image_max_size = BL32_LIMIT - BL32_BASE, + + .next_handoff_image_id = BL33_IMAGE_ID, + }, + /* Fill HW_CONFIG related information if it exists */ + { + .image_id = HW_CONFIG_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY, + VERSION_2, entry_point_info_t, + NON_SECURE | NON_EXECUTABLE), + SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY, + VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING), + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + /* Fill BL33 related information */ + { + .image_id = BL33_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE), +#ifdef PRELOADED_BL33_BASE + .ep_info.pc = PRELOADED_BL33_BASE, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING), +#else + .ep_info.pc = PLAT_ARM_NS_IMAGE_BASE, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), + .image_info.image_base = PLAT_ARM_NS_IMAGE_BASE, + .image_info.image_max_size = ARM_DRAM1_BASE + ARM_DRAM1_SIZE + - PLAT_ARM_NS_IMAGE_BASE, +#endif /* PRELOADED_BL33_BASE */ + + .next_handoff_image_id = INVALID_IMAGE_ID, + } +}; + +REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs) diff --git a/plat/arm/common/aarch32/arm_helpers.S b/plat/arm/common/aarch32/arm_helpers.S new file mode 100644 index 0000000..1da2d4c --- /dev/null +++ b/plat/arm/common/aarch32/arm_helpers.S @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + + .weak plat_arm_calc_core_pos + .weak plat_my_core_pos + .globl plat_crash_console_init + .globl plat_crash_console_putc + .globl plat_crash_console_flush + + /* ----------------------------------------------------- + * unsigned int plat_my_core_pos(void) + * This function uses the plat_arm_calc_core_pos() + * definition to get the index of the calling CPU. + * ----------------------------------------------------- + */ +func plat_my_core_pos + ldcopr r0, MPIDR + b plat_arm_calc_core_pos +endfunc plat_my_core_pos + + /* ----------------------------------------------------- + * unsigned int plat_arm_calc_core_pos(uint64_t mpidr) + * Helper function to calculate the core position. + * With this function: CorePos = (ClusterId * 4) + + * CoreId + * ----------------------------------------------------- + */ +func plat_arm_calc_core_pos + and r1, r0, #MPIDR_CPU_MASK + and r0, r0, #MPIDR_CLUSTER_MASK + add r0, r1, r0, LSR #6 + bx lr +endfunc plat_arm_calc_core_pos + + /* --------------------------------------------- + * int plat_crash_console_init(void) + * Function to initialize the crash console + * without a C Runtime to print crash report. + * Clobber list : r0 - r3 + * --------------------------------------------- + */ +func plat_crash_console_init + ldr r0, =PLAT_ARM_CRASH_UART_BASE + ldr r1, =PLAT_ARM_CRASH_UART_CLK_IN_HZ + ldr r2, =ARM_CONSOLE_BAUDRATE + b console_pl011_core_init +endfunc plat_crash_console_init + + /* --------------------------------------------- + * int plat_crash_console_putc(int c) + * Function to print a character on the crash + * console without a C Runtime. + * Clobber list : r1 - r2 + * --------------------------------------------- + */ +func plat_crash_console_putc + ldr r1, =PLAT_ARM_CRASH_UART_BASE + b console_pl011_core_putc +endfunc plat_crash_console_putc + + /* --------------------------------------------- + * void plat_crash_console_flush() + * Function to force a write of all buffered + * data that hasn't been output. + * Out : void. + * Clobber list : r0 + * --------------------------------------------- + */ +func plat_crash_console_flush + ldr r0, =PLAT_ARM_CRASH_UART_BASE + b console_pl011_core_flush +endfunc plat_crash_console_flush diff --git a/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c b/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c new file mode 100644 index 0000000..3d7b361 --- /dev/null +++ b/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include + +/******************************************************************************* + * Following descriptor provides BL image/ep information that gets used + * by BL2 to load the images and also subset of this information is + * passed to next BL image. The image loading sequence is managed by + * populating the images in required loading order. The image execution + * sequence is managed by populating the `next_handoff_image_id` with + * the next executable image id. + ******************************************************************************/ +static bl_mem_params_node_t bl2_mem_params_descs[] = { +#ifdef SCP_BL2_BASE + /* Fill SCP_BL2 related information if it exists */ + { + .image_id = SCP_BL2_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY, + VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY, + VERSION_2, image_info_t, 0), + .image_info.image_base = SCP_BL2_BASE, + .image_info.image_max_size = PLAT_CSS_MAX_SCP_BL2_SIZE, + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, +#endif /* SCP_BL2_BASE */ + +#ifdef EL3_PAYLOAD_BASE + /* Fill EL3 payload related information (BL31 is EL3 payload)*/ + { + .image_id = BL31_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, + SECURE | EXECUTABLE | EP_FIRST_EXE), + .ep_info.pc = EL3_PAYLOAD_BASE, + .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, + IMAGE_ATTRIB_PLAT_SETUP | IMAGE_ATTRIB_SKIP_LOADING), + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + +#else /* EL3_PAYLOAD_BASE */ + + /* Fill BL31 related information */ + { + .image_id = BL31_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, + SECURE | EXECUTABLE | EP_FIRST_EXE), + .ep_info.pc = BL31_BASE, + .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS), +#if DEBUG + .ep_info.args.arg3 = ARM_BL31_PLAT_PARAM_VAL, +#endif + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP), + .image_info.image_base = BL31_BASE, + .image_info.image_max_size = BL31_LIMIT - BL31_BASE, + +# if defined(BL32_BASE) + .next_handoff_image_id = BL32_IMAGE_ID, +# elif ENABLE_RME + .next_handoff_image_id = RMM_IMAGE_ID, +# else + .next_handoff_image_id = BL33_IMAGE_ID, +# endif + }, + /* Fill HW_CONFIG related information */ + { + .image_id = HW_CONFIG_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY, + VERSION_2, entry_point_info_t, + NON_SECURE | NON_EXECUTABLE), + SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY, + VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING), + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + /* Fill SOC_FW_CONFIG related information */ + { + .image_id = SOC_FW_CONFIG_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY, + VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE), + SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY, + VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING), + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + +# if ENABLE_RME + /* Fill RMM related information */ + { + .image_id = RMM_IMAGE_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, EP_REALM | EXECUTABLE), + .ep_info.pc = RMM_BASE, + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), + .image_info.image_base = RMM_BASE, + .image_info.image_max_size = RMM_LIMIT - RMM_BASE, + .next_handoff_image_id = BL33_IMAGE_ID, + }, +# endif + +# ifdef BL32_BASE + /* Fill BL32 related information */ + { + .image_id = BL32_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, SECURE | EXECUTABLE), + .ep_info.pc = BL32_BASE, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), + .image_info.image_base = BL32_BASE, + .image_info.image_max_size = BL32_LIMIT - BL32_BASE, + +# if ENABLE_RME + .next_handoff_image_id = RMM_IMAGE_ID, +# else + .next_handoff_image_id = BL33_IMAGE_ID, +# endif + }, + + /* + * Fill BL32 external 1 related information. + * A typical use for extra1 image is with OP-TEE where it is the pager + * image. + */ + { + .image_id = BL32_EXTRA1_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING), + .image_info.image_base = BL32_BASE, + .image_info.image_max_size = BL32_LIMIT - BL32_BASE, + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + + /* + * Fill BL32 external 2 related information. + * A typical use for extra2 image is with OP-TEE where it is the paged + * image. + */ + { + .image_id = BL32_EXTRA2_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE), + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING), +#ifdef SPD_opteed + .image_info.image_base = ARM_OPTEE_PAGEABLE_LOAD_BASE, + .image_info.image_max_size = ARM_OPTEE_PAGEABLE_LOAD_SIZE, +#endif + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + + /* Fill TOS_FW_CONFIG related information */ + { + .image_id = TOS_FW_CONFIG_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY, + VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE), + SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY, + VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING), + .next_handoff_image_id = INVALID_IMAGE_ID, + }, +# endif /* BL32_BASE */ + + /* Fill BL33 related information */ + { + .image_id = BL33_IMAGE_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE), +# ifdef PRELOADED_BL33_BASE + .ep_info.pc = PRELOADED_BL33_BASE, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING), +# else + .ep_info.pc = PLAT_ARM_NS_IMAGE_BASE, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), + .image_info.image_base = PLAT_ARM_NS_IMAGE_BASE, + .image_info.image_max_size = ARM_DRAM1_BASE + ARM_DRAM1_SIZE + - PLAT_ARM_NS_IMAGE_BASE, +# endif /* PRELOADED_BL33_BASE */ + + .next_handoff_image_id = INVALID_IMAGE_ID, + }, + /* Fill NT_FW_CONFIG related information */ + { + .image_id = NT_FW_CONFIG_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY, + VERSION_2, entry_point_info_t, + NON_SECURE | NON_EXECUTABLE), + SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY, + VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING), + .next_handoff_image_id = INVALID_IMAGE_ID, + } +#endif /* EL3_PAYLOAD_BASE */ +}; + +REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs) diff --git a/plat/arm/common/aarch64/arm_helpers.S b/plat/arm/common/aarch64/arm_helpers.S new file mode 100644 index 0000000..b470781 --- /dev/null +++ b/plat/arm/common/aarch64/arm_helpers.S @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include +#include + + .weak plat_arm_calc_core_pos + .weak plat_my_core_pos + .globl plat_crash_console_init + .globl plat_crash_console_putc + .globl plat_crash_console_flush + .globl platform_mem_init + + + /* ----------------------------------------------------- + * unsigned int plat_my_core_pos(void) + * This function uses the plat_arm_calc_core_pos() + * definition to get the index of the calling CPU. + * ----------------------------------------------------- + */ +func plat_my_core_pos + mrs x0, mpidr_el1 + b plat_arm_calc_core_pos +endfunc plat_my_core_pos + + /* ----------------------------------------------------- + * unsigned int plat_arm_calc_core_pos(u_register_t mpidr) + * Helper function to calculate the core position. + * With this function: CorePos = (ClusterId * 4) + + * CoreId + * ----------------------------------------------------- + */ +func plat_arm_calc_core_pos + and x1, x0, #MPIDR_CPU_MASK + and x0, x0, #MPIDR_CLUSTER_MASK + add x0, x1, x0, LSR #6 + ret +endfunc plat_arm_calc_core_pos + + /* --------------------------------------------- + * int plat_crash_console_init(void) + * Function to initialize the crash console + * without a C Runtime to print crash report. + * Clobber list : x0 - x4 + * --------------------------------------------- + */ +func plat_crash_console_init + mov_imm x0, PLAT_ARM_CRASH_UART_BASE + mov_imm x1, PLAT_ARM_CRASH_UART_CLK_IN_HZ + mov_imm x2, ARM_CONSOLE_BAUDRATE + b console_pl011_core_init +endfunc plat_crash_console_init + + /* --------------------------------------------- + * int plat_crash_console_putc(int c) + * Function to print a character on the crash + * console without a C Runtime. + * Clobber list : x1, x2 + * --------------------------------------------- + */ +func plat_crash_console_putc + mov_imm x1, PLAT_ARM_CRASH_UART_BASE + b console_pl011_core_putc +endfunc plat_crash_console_putc + + /* --------------------------------------------- + * void plat_crash_console_flush() + * Function to force a write of all buffered + * data that hasn't been output. + * Out : void. + * Clobber list : r0 + * --------------------------------------------- + */ +func plat_crash_console_flush + mov_imm x0, PLAT_ARM_CRASH_UART_BASE + b console_pl011_core_flush +endfunc plat_crash_console_flush + + /* --------------------------------------------------------------------- + * We don't need to carry out any memory initialization on ARM + * platforms. The Secure RAM is accessible straight away. + * --------------------------------------------------------------------- + */ +func platform_mem_init + ret +endfunc platform_mem_init + +/* + * Need to use coherent stack when ARM Cryptocell is used to autheticate images + * since Cryptocell uses DMA to transfer data and it is not coherent with the + * AP CPU. + */ +#if ARM_CRYPTOCELL_INTEG +#if defined(IMAGE_BL1) || defined(IMAGE_BL2) + .globl plat_get_my_stack + .globl plat_set_my_stack + .local platform_coherent_stacks + + /* ------------------------------------------------------- + * uintptr_t plat_get_my_stack () + * + * For cold-boot BL images, only the primary CPU needs a + * stack. This function returns the stack pointer for a + * stack allocated in coherent memory. + * ------------------------------------------------------- + */ +func plat_get_my_stack + get_up_stack platform_coherent_stacks, PLATFORM_STACK_SIZE + ret +endfunc plat_get_my_stack + + /* ------------------------------------------------------- + * void plat_set_my_stack () + * + * For cold-boot BL images, only the primary CPU needs a + * stack. This function sets the stack pointer to a stack + * allocated in coherent memory. + * ------------------------------------------------------- + */ +func plat_set_my_stack + get_up_stack platform_coherent_stacks, PLATFORM_STACK_SIZE + mov sp, x0 + ret +endfunc plat_set_my_stack + + /* ---------------------------------------------------- + * Single cpu stack in coherent memory. + * ---------------------------------------------------- + */ +declare_stack platform_coherent_stacks, tzfw_coherent_mem, \ + PLATFORM_STACK_SIZE, 1, CACHE_WRITEBACK_GRANULE + +#endif /* defined(IMAGE_BL1) || defined(IMAGE_BL2) */ +#endif /* ARM_CRYPTOCELL_INTEG */ diff --git a/plat/arm/common/aarch64/arm_pauth.c b/plat/arm/common/aarch64/arm_pauth.c new file mode 100644 index 0000000..7cea8a0 --- /dev/null +++ b/plat/arm/common/aarch64/arm_pauth.c @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2019, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include + +/* + * This is only a toy implementation to generate a seemingly random + * 128-bit key from sp, x30 and cntpct_el0 values. + * A production system must re-implement this function to generate + * keys from a reliable randomness source. + */ +uint128_t plat_init_apkey(void) +{ + uint64_t return_addr = (uint64_t)__builtin_return_address(0U); + uint64_t frame_addr = (uint64_t)__builtin_frame_address(0U); + uint64_t cntpct = read_cntpct_el0(); + + /* Generate 128-bit key */ + uint64_t key_lo = (return_addr << 13) ^ frame_addr ^ cntpct; + uint64_t key_hi = (frame_addr << 15) ^ return_addr ^ cntpct; + + return ((uint128_t)(key_hi) << 64) | key_lo; +} diff --git a/plat/arm/common/aarch64/arm_sdei.c b/plat/arm/common/aarch64/arm_sdei.c new file mode 100644 index 0000000..3c74a46 --- /dev/null +++ b/plat/arm/common/aarch64/arm_sdei.c @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/* SDEI configuration for ARM platforms */ + +#include +#include +#include + +#if SDEI_IN_FCONF +#include +#endif +#include +#include + + +#if SDEI_IN_FCONF +/* Private event mappings */ +static sdei_ev_map_t arm_sdei_private[PLAT_SDEI_DP_EVENT_MAX_CNT + 1] = { 0 }; + +/* Shared event mappings */ +static sdei_ev_map_t arm_sdei_shared[PLAT_SDEI_DS_EVENT_MAX_CNT] = { 0 }; + +void plat_sdei_setup(void) +{ + uint32_t i; + + arm_sdei_private[0] = (sdei_ev_map_t)SDEI_DEFINE_EVENT_0(ARM_SDEI_SGI); + + for (i = 0; i < FCONF_GET_PROPERTY(sdei, dyn_config, private_ev_cnt); i++) { + arm_sdei_private[i + 1] = (sdei_ev_map_t)SDEI_PRIVATE_EVENT( + FCONF_GET_PROPERTY(sdei, dyn_config, private_ev_nums[i]), + FCONF_GET_PROPERTY(sdei, dyn_config, private_ev_intrs[i]), + FCONF_GET_PROPERTY(sdei, dyn_config, private_ev_flags[i])); + } + + for (i = 0; i < FCONF_GET_PROPERTY(sdei, dyn_config, shared_ev_cnt); i++) { + arm_sdei_shared[i] = (sdei_ev_map_t)SDEI_SHARED_EVENT( \ + FCONF_GET_PROPERTY(sdei, dyn_config, shared_ev_nums[i]), + FCONF_GET_PROPERTY(sdei, dyn_config, shared_ev_intrs[i]), + FCONF_GET_PROPERTY(sdei, dyn_config, shared_ev_flags[i])); + } + INFO("FCONF: SDEI platform setup\n"); +} +#else +/* Private event mappings */ +static sdei_ev_map_t arm_sdei_private[] = { + PLAT_ARM_PRIVATE_SDEI_EVENTS +}; + +/* Shared event mappings */ +static sdei_ev_map_t arm_sdei_shared[] = { + PLAT_ARM_SHARED_SDEI_EVENTS +}; + +void plat_sdei_setup(void) +{ + INFO("SDEI platform setup\n"); +} +#endif /* SDEI_IN_FCONF */ + +/* Export ARM SDEI events */ +REGISTER_SDEI_MAP(arm_sdei_private, arm_sdei_shared); diff --git a/plat/arm/common/aarch64/execution_state_switch.c b/plat/arm/common/aarch64/execution_state_switch.c new file mode 100644 index 0000000..2353e6a --- /dev/null +++ b/plat/arm/common/aarch64/execution_state_switch.c @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * Handle SMC from a lower exception level to switch its execution state + * (either from AArch64 to AArch32, or vice versa). + * + * smc_fid: + * SMC function ID - either ARM_SIP_SVC_STATE_SWITCH_64 or + * ARM_SIP_SVC_STATE_SWITCH_32. + * pc_hi, pc_lo: + * PC upon re-entry to the calling exception level; width dependent on the + * calling exception level. + * cookie_hi, cookie_lo: + * Opaque pointer pairs received from the caller to pass it back, upon + * re-entry. + * handle: + * Handle to saved context. + */ +int arm_execution_state_switch(unsigned int smc_fid, + uint32_t pc_hi, + uint32_t pc_lo, + uint32_t cookie_hi, + uint32_t cookie_lo, + void *handle) +{ + bool caller_64, thumb = false, from_el2; + unsigned int el, endianness; + u_register_t spsr, pc, scr, sctlr; + entry_point_info_t ep; + cpu_context_t *ctx = (cpu_context_t *) handle; + el3_state_t *el3_ctx = get_el3state_ctx(ctx); + + /* Validate supplied entry point */ + pc = (u_register_t) (((uint64_t) pc_hi << 32) | pc_lo); + if (arm_validate_ns_entrypoint(pc) != 0) + goto invalid_param; + + /* That the SMC originated from NS is already validated by the caller */ + + /* + * Disallow state switch if any of the secondaries have been brought up. + */ + if (psci_secondaries_brought_up() != 0) + goto exec_denied; + + spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3); + caller_64 = (GET_RW(spsr) == MODE_RW_64); + + if (caller_64) { + /* + * If the call originated from AArch64, expect 32-bit pointers when + * switching to AArch32. + */ + if ((pc_hi != 0U) || (cookie_hi != 0U)) + goto invalid_param; + + pc = pc_lo; + + /* Instruction state when entering AArch32 */ + thumb = (pc & 1U) != 0U; + } else { + /* Construct AArch64 PC */ + pc = (((u_register_t) pc_hi) << 32) | pc_lo; + } + + /* Make sure PC is 4-byte aligned, except for Thumb */ + if (((pc & 0x3U) != 0U) && !thumb) + goto invalid_param; + + /* + * EL3 controls register width of the immediate lower EL only. Expect + * this request from EL2/Hyp unless: + * + * - EL2 is not implemented; + * - EL2 is implemented, but was disabled. This can be inferred from + * SCR_EL3.HCE. + */ + from_el2 = caller_64 ? (GET_EL(spsr) == MODE_EL2) : + (GET_M32(spsr) == MODE32_hyp); + scr = read_ctx_reg(el3_ctx, CTX_SCR_EL3); + if (!from_el2) { + /* The call is from NS privilege level other than HYP */ + + /* + * Disallow switching state if there's a Hypervisor in place; + * this request must be taken up with the Hypervisor instead. + */ + if ((scr & SCR_HCE_BIT) != 0U) + goto exec_denied; + } + + /* + * Return to the caller using the same endianness. Extract + * endianness bit from the respective system control register + * directly. + */ + sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1(); + endianness = ((sctlr & SCTLR_EE_BIT) != 0U) ? 1U : 0U; + + /* Construct SPSR for the exception state we're about to switch to */ + if (caller_64) { + unsigned long long impl; + + /* + * Switching from AArch64 to AArch32. Ensure this CPU implements + * the target EL in AArch32. + */ + impl = from_el2 ? el_implemented(2) : el_implemented(1); + if (impl != EL_IMPL_A64_A32) + goto exec_denied; + + /* Return to the equivalent AArch32 privilege level */ + el = from_el2 ? MODE32_hyp : MODE32_svc; + spsr = SPSR_MODE32((u_register_t) el, + thumb ? SPSR_T_THUMB : SPSR_T_ARM, + endianness, DISABLE_ALL_EXCEPTIONS); + } else { + /* + * Switching from AArch32 to AArch64. Since it's not possible to + * implement an EL as AArch32-only (from which this call was + * raised), it's safe to assume AArch64 is also implemented. + */ + el = from_el2 ? MODE_EL2 : MODE_EL1; + spsr = SPSR_64((u_register_t) el, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + } + + /* + * Use the context management library to re-initialize the existing + * context with the execution state flipped. Since the library takes + * entry_point_info_t pointer as the argument, construct a dummy one + * with PC, state width, endianness, security etc. appropriately set. + * Other entries in the entry point structure are irrelevant for + * purpose. + */ + zeromem(&ep, sizeof(ep)); + ep.pc = pc; + ep.spsr = (uint32_t) spsr; + SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1, + ((unsigned int) ((endianness != 0U) ? EP_EE_BIG : + EP_EE_LITTLE) + | NON_SECURE | EP_ST_DISABLE)); + + /* + * Re-initialize the system register context, and exit EL3 as if for the + * first time. State switch is effectively a soft reset of the + * calling EL. + */ + cm_init_my_context(&ep); + cm_prepare_el3_exit_ns(); + + /* + * State switch success. The caller of SMC wouldn't see the SMC + * returning. Instead, execution starts at the supplied entry point, + * with context pointers populated in registers 0 and 1. + */ + SMC_RET2(handle, cookie_hi, cookie_lo); + +invalid_param: + SMC_RET1(handle, STATE_SW_E_PARAM); + +exec_denied: + /* State switch denied */ + SMC_RET1(handle, STATE_SW_E_DENIED); +} diff --git a/plat/arm/common/arm_bl1_fwu.c b/plat/arm/common/arm_bl1_fwu.c new file mode 100644 index 0000000..ce2c356 --- /dev/null +++ b/plat/arm/common/arm_bl1_fwu.c @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#pragma weak bl1_plat_get_image_desc + +/* Struct to keep track of usable memory */ +typedef struct bl1_mem_info { + uintptr_t mem_base; + unsigned int mem_size; +} bl1_mem_info_t; + +static bl1_mem_info_t fwu_addr_map_secure[] = { + { + .mem_base = ARM_SHARED_RAM_BASE, + .mem_size = ARM_SHARED_RAM_SIZE + }, + { + .mem_size = 0 + } +}; + +static bl1_mem_info_t fwu_addr_map_non_secure[] = { + { + .mem_base = ARM_NS_DRAM1_BASE, + .mem_size = ARM_NS_DRAM1_SIZE + }, + { + .mem_base = PLAT_ARM_NVM_BASE, + .mem_size = PLAT_ARM_NVM_SIZE + }, + { + .mem_size = 0 + } +}; + +int bl1_plat_mem_check(uintptr_t mem_base, + unsigned int mem_size, + unsigned int flags) +{ + unsigned int index = 0; + bl1_mem_info_t *mmap; + + assert(mem_base); + assert(mem_size); + /* + * The caller of this function is responsible for checking upfront that + * the end address doesn't overflow. We double-check this in debug + * builds. + */ + assert(!check_uptr_overflow(mem_base, mem_size - 1)); + + /* + * Check the given image source and size. + */ + if (GET_SECURITY_STATE(flags) == SECURE) + mmap = fwu_addr_map_secure; + else + mmap = fwu_addr_map_non_secure; + + while (mmap[index].mem_size) { + if ((mem_base >= mmap[index].mem_base) && + ((mem_base + mem_size) + <= (mmap[index].mem_base + + mmap[index].mem_size))) + return 0; + + index++; + } + + return -ENOMEM; +} + +/******************************************************************************* + * This function does linear search for image_id and returns image_desc. + ******************************************************************************/ +image_desc_t *bl1_plat_get_image_desc(unsigned int image_id) +{ + unsigned int index = 0; + + while (bl1_tbbr_image_descs[index].image_id != INVALID_IMAGE_ID) { + if (bl1_tbbr_image_descs[index].image_id == image_id) + return &bl1_tbbr_image_descs[index]; + index++; + } + + return NULL; +} diff --git a/plat/arm/common/arm_bl1_setup.c b/plat/arm/common/arm_bl1_setup.c new file mode 100644 index 0000000..7000236 --- /dev/null +++ b/plat/arm/common/arm_bl1_setup.c @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2015-2022, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Weak definitions may be overridden in specific ARM standard platform */ +#pragma weak bl1_early_platform_setup +#pragma weak bl1_plat_arch_setup +#pragma weak bl1_plat_sec_mem_layout +#pragma weak arm_bl1_early_platform_setup +#pragma weak bl1_plat_prepare_exit +#pragma weak bl1_plat_get_next_image_id +#pragma weak plat_arm_bl1_fwu_needed +#pragma weak arm_bl1_plat_arch_setup +#pragma weak arm_bl1_platform_setup + +#define MAP_BL1_TOTAL MAP_REGION_FLAT( \ + bl1_tzram_layout.total_base, \ + bl1_tzram_layout.total_size, \ + MT_MEMORY | MT_RW | EL3_PAS) +/* + * If SEPARATE_CODE_AND_RODATA=1 we define a region for each section + * otherwise one region is defined containing both + */ +#if SEPARATE_CODE_AND_RODATA +#define MAP_BL1_RO MAP_REGION_FLAT( \ + BL_CODE_BASE, \ + BL1_CODE_END - BL_CODE_BASE, \ + MT_CODE | EL3_PAS), \ + MAP_REGION_FLAT( \ + BL1_RO_DATA_BASE, \ + BL1_RO_DATA_END \ + - BL_RO_DATA_BASE, \ + MT_RO_DATA | EL3_PAS) +#else +#define MAP_BL1_RO MAP_REGION_FLAT( \ + BL_CODE_BASE, \ + BL1_CODE_END - BL_CODE_BASE, \ + MT_CODE | EL3_PAS) +#endif + +/* Data structure which holds the extents of the trusted SRAM for BL1*/ +static meminfo_t bl1_tzram_layout; + +/* Boolean variable to hold condition whether firmware update needed or not */ +static bool is_fwu_needed; + +struct meminfo *bl1_plat_sec_mem_layout(void) +{ + return &bl1_tzram_layout; +} + +/******************************************************************************* + * BL1 specific platform actions shared between ARM standard platforms. + ******************************************************************************/ +void arm_bl1_early_platform_setup(void) +{ + +#if !ARM_DISABLE_TRUSTED_WDOG + /* Enable watchdog */ + plat_arm_secure_wdt_start(); +#endif + + /* Initialize the console to provide early debug support */ + arm_console_boot_init(); + + /* Allow BL1 to see the whole Trusted RAM */ + bl1_tzram_layout.total_base = ARM_BL_RAM_BASE; + bl1_tzram_layout.total_size = ARM_BL_RAM_SIZE; +} + +void bl1_early_platform_setup(void) +{ + arm_bl1_early_platform_setup(); + + /* + * Initialize Interconnect for this cluster during cold boot. + * No need for locks as no other CPU is active. + */ + plat_arm_interconnect_init(); + /* + * Enable Interconnect coherency for the primary CPU's cluster. + */ + plat_arm_interconnect_enter_coherency(); +} + +/****************************************************************************** + * Perform the very early platform specific architecture setup shared between + * ARM standard platforms. This only does basic initialization. Later + * architectural setup (bl1_arch_setup()) does not do anything platform + * specific. + *****************************************************************************/ +void arm_bl1_plat_arch_setup(void) +{ +#if USE_COHERENT_MEM && !ARM_CRYPTOCELL_INTEG + /* + * Ensure ARM platforms don't use coherent memory in BL1 unless + * cryptocell integration is enabled. + */ + assert((BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE) == 0U); +#endif + + const mmap_region_t bl_regions[] = { + MAP_BL1_TOTAL, + MAP_BL1_RO, +#if USE_ROMLIB + ARM_MAP_ROMLIB_CODE, + ARM_MAP_ROMLIB_DATA, +#endif +#if ARM_CRYPTOCELL_INTEG + ARM_MAP_BL_COHERENT_RAM, +#endif + {0} + }; + + setup_page_tables(bl_regions, plat_arm_get_mmap()); +#ifdef __aarch64__ + enable_mmu_el3(0); +#else + enable_mmu_svc_mon(0); +#endif /* __aarch64__ */ + + arm_setup_romlib(); +} + +void bl1_plat_arch_setup(void) +{ + arm_bl1_plat_arch_setup(); +} + +/* + * Perform the platform specific architecture setup shared between + * ARM standard platforms. + */ +void arm_bl1_platform_setup(void) +{ + const struct dyn_cfg_dtb_info_t *fw_config_info; + image_desc_t *desc; + uint32_t fw_config_max_size; + int err = -1; + + /* Initialise the IO layer and register platform IO devices */ + plat_arm_io_setup(); + + /* Check if we need FWU before further processing */ + is_fwu_needed = plat_arm_bl1_fwu_needed(); + if (is_fwu_needed) { + ERROR("Skip platform setup as FWU detected\n"); + return; + } + + /* Set global DTB info for fixed fw_config information */ + fw_config_max_size = ARM_FW_CONFIG_LIMIT - ARM_FW_CONFIG_BASE; + set_config_info(ARM_FW_CONFIG_BASE, ~0UL, fw_config_max_size, FW_CONFIG_ID); + + /* Fill the device tree information struct with the info from the config dtb */ + err = fconf_load_config(FW_CONFIG_ID); + if (err < 0) { + ERROR("Loading of FW_CONFIG failed %d\n", err); + plat_error_handler(err); + } + + /* + * FW_CONFIG loaded successfully. If FW_CONFIG device tree parsing + * is successful then load TB_FW_CONFIG device tree. + */ + fw_config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, FW_CONFIG_ID); + if (fw_config_info != NULL) { + err = fconf_populate_dtb_registry(fw_config_info->config_addr); + if (err < 0) { + ERROR("Parsing of FW_CONFIG failed %d\n", err); + plat_error_handler(err); + } + /* load TB_FW_CONFIG */ + err = fconf_load_config(TB_FW_CONFIG_ID); + if (err < 0) { + ERROR("Loading of TB_FW_CONFIG failed %d\n", err); + plat_error_handler(err); + } + } else { + ERROR("Invalid FW_CONFIG address\n"); + plat_error_handler(err); + } + + /* The BL2 ep_info arg0 is modified to point to FW_CONFIG */ + desc = bl1_plat_get_image_desc(BL2_IMAGE_ID); + assert(desc != NULL); + desc->ep_info.args.arg0 = fw_config_info->config_addr; + +#if CRYPTO_SUPPORT + /* Share the Mbed TLS heap info with other images */ + arm_bl1_set_mbedtls_heap(); +#endif /* CRYPTO_SUPPORT */ + + /* + * Allow access to the System counter timer module and program + * counter frequency for non secure images during FWU + */ +#ifdef ARM_SYS_TIMCTL_BASE + arm_configure_sys_timer(); +#endif +#if (ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_GENERIC_TIMER) + write_cntfrq_el0(plat_get_syscnt_freq2()); +#endif +} + +void bl1_plat_prepare_exit(entry_point_info_t *ep_info) +{ +#if !ARM_DISABLE_TRUSTED_WDOG + /* Disable watchdog before leaving BL1 */ + plat_arm_secure_wdt_stop(); +#endif + +#ifdef EL3_PAYLOAD_BASE + /* + * Program the EL3 payload's entry point address into the CPUs mailbox + * in order to release secondary CPUs from their holding pen and make + * them jump there. + */ + plat_arm_program_trusted_mailbox(ep_info->pc); + dsbsy(); + sev(); +#endif +} + +/* + * On Arm platforms, the FWU process is triggered when the FIP image has + * been tampered with. + */ +bool plat_arm_bl1_fwu_needed(void) +{ + return !arm_io_is_toc_valid(); +} + +/******************************************************************************* + * The following function checks if Firmware update is needed, + * by checking if TOC in FIP image is valid or not. + ******************************************************************************/ +unsigned int bl1_plat_get_next_image_id(void) +{ + return is_fwu_needed ? NS_BL1U_IMAGE_ID : BL2_IMAGE_ID; +} diff --git a/plat/arm/common/arm_bl2_el3_setup.c b/plat/arm/common/arm_bl2_el3_setup.c new file mode 100644 index 0000000..97b5a88 --- /dev/null +++ b/plat/arm/common/arm_bl2_el3_setup.c @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include +#include + +#pragma weak bl2_el3_early_platform_setup +#pragma weak bl2_el3_plat_arch_setup +#pragma weak bl2_el3_plat_prepare_exit + +#define MAP_BL2_EL3_TOTAL MAP_REGION_FLAT( \ + bl2_el3_tzram_layout.total_base, \ + bl2_el3_tzram_layout.total_size, \ + MT_MEMORY | MT_RW | MT_SECURE) + +static meminfo_t bl2_el3_tzram_layout; + +/* + * Perform arm specific early platform setup. At this moment we only initialize + * the console and the memory layout. + */ +void arm_bl2_el3_early_platform_setup(void) +{ + /* Initialize the console to provide early debug support */ + arm_console_boot_init(); + + /* + * Allow BL2 to see the whole Trusted RAM. This is determined + * statically since we cannot rely on BL1 passing this information + * in the BL2_AT_EL3 case. + */ + bl2_el3_tzram_layout.total_base = ARM_BL_RAM_BASE; + bl2_el3_tzram_layout.total_size = ARM_BL_RAM_SIZE; + + /* Initialise the IO layer and register platform IO devices */ + plat_arm_io_setup(); +} + +void bl2_el3_early_platform_setup(u_register_t arg0 __unused, + u_register_t arg1 __unused, + u_register_t arg2 __unused, + u_register_t arg3 __unused) +{ + arm_bl2_el3_early_platform_setup(); + + /* + * Initialize Interconnect for this cluster during cold boot. + * No need for locks as no other CPU is active. + */ + plat_arm_interconnect_init(); + /* + * Enable Interconnect coherency for the primary CPU's cluster. + */ + plat_arm_interconnect_enter_coherency(); + + generic_delay_timer_init(); +} + +/******************************************************************************* + * Perform the very early platform specific architectural setup here. At the + * moment this is only initializes the mmu in a quick and dirty way. + ******************************************************************************/ +void arm_bl2_el3_plat_arch_setup(void) +{ + +#if USE_COHERENT_MEM + /* Ensure ARM platforms dont use coherent memory in BL2_AT_EL3 */ + assert(BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE == 0U); +#endif + + const mmap_region_t bl_regions[] = { + MAP_BL2_EL3_TOTAL, + ARM_MAP_BL_RO, + {0} + }; + + setup_page_tables(bl_regions, plat_arm_get_mmap()); + +#ifdef __aarch64__ + enable_mmu_el3(0); +#else + enable_mmu_svc_mon(0); +#endif +} + +void bl2_el3_plat_arch_setup(void) +{ + arm_bl2_el3_plat_arch_setup(); +} + +void bl2_el3_plat_prepare_exit(void) +{ +} diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c new file mode 100644 index 0000000..08c014d --- /dev/null +++ b/plat/arm/common/arm_bl2_setup.c @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if ENABLE_RME +#include +#endif /* ENABLE_RME */ +#ifdef SPD_opteed +#include +#endif +#include +#if ENABLE_RME +#include +#endif /* ENABLE_RME */ +#include +#include + +/* Data structure which holds the extents of the trusted SRAM for BL2 */ +static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE); + +/* Base address of fw_config received from BL1 */ +static uintptr_t config_base; + +/* + * Check that BL2_BASE is above ARM_FW_CONFIG_LIMIT. This reserved page is + * for `meminfo_t` data structure and fw_configs passed from BL1. + */ +CASSERT(BL2_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl2_base_overflows); + +/* Weak definitions may be overridden in specific ARM standard platform */ +#pragma weak bl2_early_platform_setup2 +#pragma weak bl2_platform_setup +#pragma weak bl2_plat_arch_setup +#pragma weak bl2_plat_sec_mem_layout + +#if ENABLE_RME +#define MAP_BL2_TOTAL MAP_REGION_FLAT( \ + bl2_tzram_layout.total_base, \ + bl2_tzram_layout.total_size, \ + MT_MEMORY | MT_RW | MT_ROOT) +#else +#define MAP_BL2_TOTAL MAP_REGION_FLAT( \ + bl2_tzram_layout.total_base, \ + bl2_tzram_layout.total_size, \ + MT_MEMORY | MT_RW | MT_SECURE) +#endif /* ENABLE_RME */ + +#pragma weak arm_bl2_plat_handle_post_image_load + +/******************************************************************************* + * BL1 has passed the extents of the trusted SRAM that should be visible to BL2 + * in x0. This memory layout is sitting at the base of the free trusted SRAM. + * Copy it to a safe location before its reclaimed by later BL2 functionality. + ******************************************************************************/ +void arm_bl2_early_platform_setup(uintptr_t fw_config, + struct meminfo *mem_layout) +{ + /* Initialize the console to provide early debug support */ + arm_console_boot_init(); + + /* Setup the BL2 memory layout */ + bl2_tzram_layout = *mem_layout; + + config_base = fw_config; + + /* Initialise the IO layer and register platform IO devices */ + plat_arm_io_setup(); + + /* Load partition table */ +#if ARM_GPT_SUPPORT + partition_init(GPT_IMAGE_ID); +#endif /* ARM_GPT_SUPPORT */ + +} + +void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1, u_register_t arg2, u_register_t arg3) +{ + arm_bl2_early_platform_setup((uintptr_t)arg0, (meminfo_t *)arg1); + + generic_delay_timer_init(); +} + +/* + * Perform BL2 preload setup. Currently we initialise the dynamic + * configuration here. + */ +void bl2_plat_preload_setup(void) +{ + arm_bl2_dyn_cfg_init(); + +#if ARM_GPT_SUPPORT && !PSA_FWU_SUPPORT + /* Always use the FIP from bank 0 */ + arm_set_fip_addr(0U); +#endif /* ARM_GPT_SUPPORT && !PSA_FWU_SUPPORT */ +} + +/* + * Perform ARM standard platform setup. + */ +void arm_bl2_platform_setup(void) +{ +#if !ENABLE_RME + /* Initialize the secure environment */ + plat_arm_security_setup(); +#endif + +#if defined(PLAT_ARM_MEM_PROT_ADDR) + arm_nor_psci_do_static_mem_protect(); +#endif +} + +void bl2_platform_setup(void) +{ + arm_bl2_platform_setup(); +} + +#if ENABLE_RME + +static void arm_bl2_plat_gpt_setup(void) +{ + /* + * The GPT library might modify the gpt regions structure to optimize + * the layout, so the array cannot be constant. + */ + pas_region_t pas_regions[] = { + ARM_PAS_KERNEL, + ARM_PAS_SECURE, + ARM_PAS_REALM, + ARM_PAS_EL3_DRAM, + ARM_PAS_GPTS + }; + + /* Initialize entire protected space to GPT_GPI_ANY. */ + if (gpt_init_l0_tables(GPCCR_PPS_4GB, ARM_L0_GPT_ADDR_BASE, + ARM_L0_GPT_SIZE) < 0) { + ERROR("gpt_init_l0_tables() failed!\n"); + panic(); + } + + /* Carve out defined PAS ranges. */ + if (gpt_init_pas_l1_tables(GPCCR_PGS_4K, + ARM_L1_GPT_ADDR_BASE, + ARM_L1_GPT_SIZE, + pas_regions, + (unsigned int)(sizeof(pas_regions) / + sizeof(pas_region_t))) < 0) { + ERROR("gpt_init_pas_l1_tables() failed!\n"); + panic(); + } + + INFO("Enabling Granule Protection Checks\n"); + if (gpt_enable() < 0) { + ERROR("gpt_enable() failed!\n"); + panic(); + } +} + +#endif /* ENABLE_RME */ + +/******************************************************************************* + * Perform the very early platform specific architectural setup here. + * When RME is enabled the secure environment is initialised before + * initialising and enabling Granule Protection. + * This function initialises the MMU in a quick and dirty way. + ******************************************************************************/ +void arm_bl2_plat_arch_setup(void) +{ +#if USE_COHERENT_MEM && !ARM_CRYPTOCELL_INTEG + /* + * Ensure ARM platforms don't use coherent memory in BL2 unless + * cryptocell integration is enabled. + */ + assert((BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE) == 0U); +#endif + + const mmap_region_t bl_regions[] = { + MAP_BL2_TOTAL, + ARM_MAP_BL_RO, +#if USE_ROMLIB + ARM_MAP_ROMLIB_CODE, + ARM_MAP_ROMLIB_DATA, +#endif +#if ARM_CRYPTOCELL_INTEG + ARM_MAP_BL_COHERENT_RAM, +#endif + ARM_MAP_BL_CONFIG_REGION, +#if ENABLE_RME + ARM_MAP_L0_GPT_REGION, +#endif + {0} + }; + +#if ENABLE_RME + /* Initialise the secure environment */ + plat_arm_security_setup(); +#endif + setup_page_tables(bl_regions, plat_arm_get_mmap()); + +#ifdef __aarch64__ +#if ENABLE_RME + /* BL2 runs in EL3 when RME enabled. */ + assert(get_armv9_2_feat_rme_support() != 0U); + enable_mmu_el3(0); + + /* Initialise and enable granule protection after MMU. */ + arm_bl2_plat_gpt_setup(); +#else + enable_mmu_el1(0); +#endif +#else + enable_mmu_svc_mon(0); +#endif + + arm_setup_romlib(); +} + +void bl2_plat_arch_setup(void) +{ + const struct dyn_cfg_dtb_info_t *tb_fw_config_info; + + arm_bl2_plat_arch_setup(); + + /* Fill the properties struct with the info from the config dtb */ + fconf_populate("FW_CONFIG", config_base); + + /* TB_FW_CONFIG was also loaded by BL1 */ + tb_fw_config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TB_FW_CONFIG_ID); + assert(tb_fw_config_info != NULL); + + fconf_populate("TB_FW", tb_fw_config_info->config_addr); +} + +int arm_bl2_handle_post_image_load(unsigned int image_id) +{ + int err = 0; + bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); +#ifdef SPD_opteed + bl_mem_params_node_t *pager_mem_params = NULL; + bl_mem_params_node_t *paged_mem_params = NULL; +#endif + assert(bl_mem_params != NULL); + + switch (image_id) { +#ifdef __aarch64__ + case BL32_IMAGE_ID: +#ifdef SPD_opteed + pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID); + assert(pager_mem_params); + + paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID); + assert(paged_mem_params); + + err = parse_optee_header(&bl_mem_params->ep_info, + &pager_mem_params->image_info, + &paged_mem_params->image_info); + if (err != 0) { + WARN("OPTEE header parse error.\n"); + } +#endif + bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl32_entry(); + break; +#endif + + case BL33_IMAGE_ID: + /* BL33 expects to receive the primary CPU MPID (through r0) */ + bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); + bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl33_entry(); + break; + +#ifdef SCP_BL2_BASE + case SCP_BL2_IMAGE_ID: + /* The subsequent handling of SCP_BL2 is platform specific */ + err = plat_arm_bl2_handle_scp_bl2(&bl_mem_params->image_info); + if (err) { + WARN("Failure in platform-specific handling of SCP_BL2 image.\n"); + } + break; +#endif + default: + /* Do nothing in default case */ + break; + } + + return err; +} + +/******************************************************************************* + * This function can be used by the platforms to update/use image + * information for given `image_id`. + ******************************************************************************/ +int arm_bl2_plat_handle_post_image_load(unsigned int image_id) +{ +#if defined(SPD_spmd) && BL2_ENABLE_SP_LOAD + /* For Secure Partitions we don't need post processing */ + if ((image_id >= (MAX_NUMBER_IDS - MAX_SP_IDS)) && + (image_id < MAX_NUMBER_IDS)) { + return 0; + } +#endif + return arm_bl2_handle_post_image_load(image_id); +} + +int bl2_plat_handle_post_image_load(unsigned int image_id) +{ + return arm_bl2_plat_handle_post_image_load(image_id); +} diff --git a/plat/arm/common/arm_bl2u_setup.c b/plat/arm/common/arm_bl2u_setup.c new file mode 100644 index 0000000..3614c7d --- /dev/null +++ b/plat/arm/common/arm_bl2u_setup.c @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include + +#include + +#include +#include +#include +#include +#include + +/* Weak definitions may be overridden in specific ARM standard platform */ +#pragma weak bl2u_platform_setup +#pragma weak bl2u_early_platform_setup +#pragma weak bl2u_plat_arch_setup + +#define MAP_BL2U_TOTAL MAP_REGION_FLAT( \ + BL2U_BASE, \ + BL2U_LIMIT - BL2U_BASE, \ + MT_MEMORY | MT_RW | MT_SECURE) + +/* + * Perform ARM standard platform setup for BL2U + */ +void arm_bl2u_platform_setup(void) +{ + /* Initialize the secure environment */ + plat_arm_security_setup(); +} + +void bl2u_platform_setup(void) +{ + arm_bl2u_platform_setup(); +} + +void arm_bl2u_early_platform_setup(struct meminfo *mem_layout, void *plat_info) +{ + /* Initialize the console to provide early debug support */ + arm_console_boot_init(); + + generic_delay_timer_init(); +} + +/******************************************************************************* + * BL1 can pass platform dependent information to BL2U in x1. + * In case of ARM CSS platforms x1 contains SCP_BL2U image info. + * In case of ARM FVP platforms x1 is not used. + * In both cases, x0 contains the extents of the memory available to BL2U + ******************************************************************************/ +void bl2u_early_platform_setup(struct meminfo *mem_layout, void *plat_info) +{ + arm_bl2u_early_platform_setup(mem_layout, plat_info); +} + +/******************************************************************************* + * Perform the very early platform specific architectural setup here. At the + * moment this is only initializes the mmu in a quick and dirty way. + * The memory that is used by BL2U is only mapped. + ******************************************************************************/ +void arm_bl2u_plat_arch_setup(void) +{ + +#if USE_COHERENT_MEM + /* Ensure ARM platforms dont use coherent memory in BL2U */ + assert((BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE) == 0U); +#endif + + const mmap_region_t bl_regions[] = { + MAP_BL2U_TOTAL, + ARM_MAP_BL_RO, +#if USE_ROMLIB + ARM_MAP_ROMLIB_CODE, + ARM_MAP_ROMLIB_DATA, +#endif + {0} + }; + + setup_page_tables(bl_regions, plat_arm_get_mmap()); + +#ifdef __aarch64__ + enable_mmu_el1(0); +#else + enable_mmu_svc_mon(0); +#endif + arm_setup_romlib(); +} + +void bl2u_plat_arch_setup(void) +{ + arm_bl2u_plat_arch_setup(); +} diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c new file mode 100644 index 0000000..cf403b1 --- /dev/null +++ b/plat/arm/common/arm_bl31_setup.c @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#if ENABLE_RME +#include +#endif +#include +#include +#include +#include +#include + +/* + * Placeholder variables for copying the arguments that have been passed to + * BL31 from BL2. + */ +static entry_point_info_t bl32_image_ep_info; +static entry_point_info_t bl33_image_ep_info; +#if ENABLE_RME +static entry_point_info_t rmm_image_ep_info; +#endif + +#if !RESET_TO_BL31 +/* + * Check that BL31_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page + * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2. + */ +CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows); +#endif + +/* Weak definitions may be overridden in specific ARM standard platform */ +#pragma weak bl31_early_platform_setup2 +#pragma weak bl31_platform_setup +#pragma weak bl31_plat_arch_setup +#pragma weak bl31_plat_get_next_image_ep_info + +#define MAP_BL31_TOTAL MAP_REGION_FLAT( \ + BL31_START, \ + BL31_END - BL31_START, \ + MT_MEMORY | MT_RW | EL3_PAS) +#if RECLAIM_INIT_CODE +IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE); +IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED); +IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED); + +#define BL_INIT_CODE_END ((BL_CODE_END_UNALIGNED + PAGE_SIZE - 1) & \ + ~(PAGE_SIZE - 1)) +#define BL_STACKS_END ((BL_STACKS_END_UNALIGNED + PAGE_SIZE - 1) & \ + ~(PAGE_SIZE - 1)) + +#define MAP_BL_INIT_CODE MAP_REGION_FLAT( \ + BL_INIT_CODE_BASE, \ + BL_INIT_CODE_END \ + - BL_INIT_CODE_BASE, \ + MT_CODE | EL3_PAS) +#endif + +#if SEPARATE_NOBITS_REGION +#define MAP_BL31_NOBITS MAP_REGION_FLAT( \ + BL31_NOBITS_BASE, \ + BL31_NOBITS_LIMIT \ + - BL31_NOBITS_BASE, \ + MT_MEMORY | MT_RW | EL3_PAS) + +#endif +/******************************************************************************* + * Return a pointer to the 'entry_point_info' structure of the next image for the + * security state specified. BL33 corresponds to the non-secure image type + * while BL32 corresponds to the secure image type. A NULL pointer is returned + * if the image does not exist. + ******************************************************************************/ +struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type) +{ + entry_point_info_t *next_image_info; + + assert(sec_state_is_valid(type)); + if (type == NON_SECURE) { + next_image_info = &bl33_image_ep_info; + } +#if ENABLE_RME + else if (type == REALM) { + next_image_info = &rmm_image_ep_info; + } +#endif + else { + next_image_info = &bl32_image_ep_info; + } + + /* + * None of the images on the ARM development platforms can have 0x0 + * as the entrypoint + */ + if (next_image_info->pc) + return next_image_info; + else + return NULL; +} + +/******************************************************************************* + * Perform any BL31 early platform setup common to ARM standard platforms. + * Here is an opportunity to copy parameters passed by the calling EL (S-EL1 + * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be + * done before the MMU is initialized so that the memory layout can be used + * while creating page tables. BL2 has flushed this information to memory, so + * we are guaranteed to pick up good data. + ******************************************************************************/ +void __init arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_config, + uintptr_t hw_config, void *plat_params_from_bl2) +{ + /* Initialize the console to provide early debug support */ + arm_console_boot_init(); + +#if RESET_TO_BL31 + /* There are no parameters from BL2 if BL31 is a reset vector */ + assert(from_bl2 == NULL); + assert(plat_params_from_bl2 == NULL); + +# ifdef BL32_BASE + /* Populate entry point information for BL32 */ + SET_PARAM_HEAD(&bl32_image_ep_info, + PARAM_EP, + VERSION_1, + 0); + SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE); + bl32_image_ep_info.pc = BL32_BASE; + bl32_image_ep_info.spsr = arm_get_spsr_for_bl32_entry(); + +#if defined(SPD_spmd) + /* SPM (hafnium in secure world) expects SPM Core manifest base address + * in x0, which in !RESET_TO_BL31 case loaded after base of non shared + * SRAM(after 4KB offset of SRAM). But in RESET_TO_BL31 case all non + * shared SRAM is allocated to BL31, so to avoid overwriting of manifest + * keep it in the last page. + */ + bl32_image_ep_info.args.arg0 = ARM_TRUSTED_SRAM_BASE + + PLAT_ARM_TRUSTED_SRAM_SIZE - PAGE_SIZE; +#endif + +# endif /* BL32_BASE */ + + /* Populate entry point information for BL33 */ + SET_PARAM_HEAD(&bl33_image_ep_info, + PARAM_EP, + VERSION_1, + 0); + /* + * Tell BL31 where the non-trusted software image + * is located and the entry state information + */ + bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); + + bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry(); + SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); + +#if ENABLE_RME + /* + * Populate entry point information for RMM. + * Only PC needs to be set as other fields are determined by RMMD. + */ + rmm_image_ep_info.pc = RMM_BASE; +#endif /* ENABLE_RME */ + +#else /* RESET_TO_BL31 */ + + /* + * In debug builds, we pass a special value in 'plat_params_from_bl2' + * to verify platform parameters from BL2 to BL31. + * In release builds, it's not used. + */ + assert(((unsigned long long)plat_params_from_bl2) == + ARM_BL31_PLAT_PARAM_VAL); + + /* + * Check params passed from BL2 should not be NULL, + */ + bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2; + assert(params_from_bl2 != NULL); + assert(params_from_bl2->h.type == PARAM_BL_PARAMS); + assert(params_from_bl2->h.version >= VERSION_2); + + bl_params_node_t *bl_params = params_from_bl2->head; + + /* + * Copy BL33, BL32 and RMM (if present), entry point information. + * They are stored in Secure RAM, in BL2's address space. + */ + while (bl_params != NULL) { + if (bl_params->image_id == BL32_IMAGE_ID) { + bl32_image_ep_info = *bl_params->ep_info; + } +#if ENABLE_RME + else if (bl_params->image_id == RMM_IMAGE_ID) { + rmm_image_ep_info = *bl_params->ep_info; + } +#endif + else if (bl_params->image_id == BL33_IMAGE_ID) { + bl33_image_ep_info = *bl_params->ep_info; + } + + bl_params = bl_params->next_params_info; + } + + if (bl33_image_ep_info.pc == 0U) + panic(); +#if ENABLE_RME + if (rmm_image_ep_info.pc == 0U) + panic(); +#endif +#endif /* RESET_TO_BL31 */ + +# if ARM_LINUX_KERNEL_AS_BL33 + /* + * According to the file ``Documentation/arm64/booting.txt`` of the + * Linux kernel tree, Linux expects the physical address of the device + * tree blob (DTB) in x0, while x1-x3 are reserved for future use and + * must be 0. + * Repurpose the option to load Hafnium hypervisor in the normal world. + * It expects its manifest address in x0. This is essentially the linux + * dts (passed to the primary VM) by adding 'hypervisor' and chosen + * nodes specifying the Hypervisor configuration. + */ +#if RESET_TO_BL31 + bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE; +#else + bl33_image_ep_info.args.arg0 = (u_register_t)hw_config; +#endif + bl33_image_ep_info.args.arg1 = 0U; + bl33_image_ep_info.args.arg2 = 0U; + bl33_image_ep_info.args.arg3 = 0U; +# endif +} + +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + arm_bl31_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3); + + /* + * Initialize Interconnect for this cluster during cold boot. + * No need for locks as no other CPU is active. + */ + plat_arm_interconnect_init(); + + /* + * Enable Interconnect coherency for the primary CPU's cluster. + * Earlier bootloader stages might already do this (e.g. Trusted + * Firmware's BL1 does it) but we can't assume so. There is no harm in + * executing this code twice anyway. + * Platform specific PSCI code will enable coherency for other + * clusters. + */ + plat_arm_interconnect_enter_coherency(); +} + +/******************************************************************************* + * Perform any BL31 platform setup common to ARM standard platforms + ******************************************************************************/ +void arm_bl31_platform_setup(void) +{ + /* Initialize the GIC driver, cpu and distributor interfaces */ + plat_arm_gic_driver_init(); + plat_arm_gic_init(); + +#if RESET_TO_BL31 + /* + * Do initial security configuration to allow DRAM/device access + * (if earlier BL has not already done so). + */ + plat_arm_security_setup(); + +#if defined(PLAT_ARM_MEM_PROT_ADDR) + arm_nor_psci_do_dyn_mem_protect(); +#endif /* PLAT_ARM_MEM_PROT_ADDR */ + +#endif /* RESET_TO_BL31 */ + + /* Enable and initialize the System level generic timer */ + mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF, + CNTCR_FCREQ(0U) | CNTCR_EN); + + /* Allow access to the System counter timer module */ + arm_configure_sys_timer(); + + /* Initialize power controller before setting up topology */ + plat_arm_pwrc_setup(); + +#if RAS_EXTENSION + ras_init(); +#endif + +#if USE_DEBUGFS + debugfs_init(); +#endif /* USE_DEBUGFS */ +} + +/******************************************************************************* + * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM + * standard platforms + * Perform BL31 platform setup + ******************************************************************************/ +void arm_bl31_plat_runtime_setup(void) +{ + console_switch_state(CONSOLE_FLAG_RUNTIME); + + /* Initialize the runtime console */ + arm_console_runtime_init(); + +#if RECLAIM_INIT_CODE + arm_free_init_memory(); +#endif + +#if PLAT_RO_XLAT_TABLES + arm_xlat_make_tables_readonly(); +#endif +} + +#if RECLAIM_INIT_CODE +/* + * Make memory for image boot time code RW to reclaim it as stack for the + * secondary cores, or RO where it cannot be reclaimed: + * + * |-------- INIT SECTION --------| + * ----------------------------------------- + * | CORE 0 | CORE 1 | CORE 2 | EXTRA | + * | STACK | STACK | STACK | SPACE | + * ----------------------------------------- + * <-------------------> <------> + * MAKE RW AND XN MAKE + * FOR STACKS RO AND XN + */ +void arm_free_init_memory(void) +{ + int ret = 0; + + if (BL_STACKS_END < BL_INIT_CODE_END) { + /* Reclaim some of the init section as stack if possible. */ + if (BL_INIT_CODE_BASE < BL_STACKS_END) { + ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE, + BL_STACKS_END - BL_INIT_CODE_BASE, + MT_RW_DATA); + } + /* Make the rest of the init section read-only. */ + ret |= xlat_change_mem_attributes(BL_STACKS_END, + BL_INIT_CODE_END - BL_STACKS_END, + MT_RO_DATA); + } else { + /* The stacks cover the init section, so reclaim it all. */ + ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE, + BL_INIT_CODE_END - BL_INIT_CODE_BASE, + MT_RW_DATA); + } + + if (ret != 0) { + ERROR("Could not reclaim initialization code"); + panic(); + } +} +#endif + +void __init bl31_platform_setup(void) +{ + arm_bl31_platform_setup(); +} + +void bl31_plat_runtime_setup(void) +{ + arm_bl31_plat_runtime_setup(); +} + +/******************************************************************************* + * Perform the very early platform specific architectural setup shared between + * ARM standard platforms. This only does basic initialization. Later + * architectural setup (bl31_arch_setup()) does not do anything platform + * specific. + ******************************************************************************/ +void __init arm_bl31_plat_arch_setup(void) +{ + const mmap_region_t bl_regions[] = { + MAP_BL31_TOTAL, +#if ENABLE_RME + ARM_MAP_L0_GPT_REGION, +#endif +#if RECLAIM_INIT_CODE + MAP_BL_INIT_CODE, +#endif +#if SEPARATE_NOBITS_REGION + MAP_BL31_NOBITS, +#endif + ARM_MAP_BL_RO, +#if USE_ROMLIB + ARM_MAP_ROMLIB_CODE, + ARM_MAP_ROMLIB_DATA, +#endif +#if USE_COHERENT_MEM + ARM_MAP_BL_COHERENT_RAM, +#endif + {0} + }; + + setup_page_tables(bl_regions, plat_arm_get_mmap()); + + enable_mmu_el3(0); + +#if ENABLE_RME + /* + * Initialise Granule Protection library and enable GPC for the primary + * processor. The tables have already been initialized by a previous BL + * stage, so there is no need to provide any PAS here. This function + * sets up pointers to those tables. + */ + if (gpt_runtime_init() < 0) { + ERROR("gpt_runtime_init() failed!\n"); + panic(); + } +#endif /* ENABLE_RME */ + + arm_setup_romlib(); +} + +void __init bl31_plat_arch_setup(void) +{ + arm_bl31_plat_arch_setup(); +} diff --git a/plat/arm/common/arm_cci.c b/plat/arm/common/arm_cci.c new file mode 100644 index 0000000..3795fc5 --- /dev/null +++ b/plat/arm/common/arm_cci.c @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include +#include + +static const int cci_map[] = { + PLAT_ARM_CCI_CLUSTER0_SL_IFACE_IX, + PLAT_ARM_CCI_CLUSTER1_SL_IFACE_IX +}; + +/****************************************************************************** + * The following functions are defined as weak to allow a platform to override + * the way ARM CCI driver is initialised and used. + *****************************************************************************/ +#pragma weak plat_arm_interconnect_init +#pragma weak plat_arm_interconnect_enter_coherency +#pragma weak plat_arm_interconnect_exit_coherency + + +/****************************************************************************** + * Helper function to initialize ARM CCI driver. + *****************************************************************************/ +void __init plat_arm_interconnect_init(void) +{ + cci_init(PLAT_ARM_CCI_BASE, cci_map, ARRAY_SIZE(cci_map)); +} + +/****************************************************************************** + * Helper function to place current master into coherency + *****************************************************************************/ +void plat_arm_interconnect_enter_coherency(void) +{ + cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1())); +} + +/****************************************************************************** + * Helper function to remove current master from coherency + *****************************************************************************/ +void plat_arm_interconnect_exit_coherency(void) +{ + cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1())); +} diff --git a/plat/arm/common/arm_ccn.c b/plat/arm/common/arm_ccn.c new file mode 100644 index 0000000..2e681ca --- /dev/null +++ b/plat/arm/common/arm_ccn.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include + +static const unsigned char master_to_rn_id_map[] = { + PLAT_ARM_CLUSTER_TO_CCN_ID_MAP +}; + +static const ccn_desc_t arm_ccn_desc = { + .periphbase = PLAT_ARM_CCN_BASE, + .num_masters = ARRAY_SIZE(master_to_rn_id_map), + .master_to_rn_id_map = master_to_rn_id_map +}; + +CASSERT(PLAT_ARM_CLUSTER_COUNT == ARRAY_SIZE(master_to_rn_id_map), + assert_invalid_cluster_count_for_ccn_variant); + +/****************************************************************************** + * The following functions are defined as weak to allow a platform to override + * the way ARM CCN driver is initialised and used. + *****************************************************************************/ +#pragma weak plat_arm_interconnect_init +#pragma weak plat_arm_interconnect_enter_coherency +#pragma weak plat_arm_interconnect_exit_coherency + + +/****************************************************************************** + * Helper function to initialize ARM CCN driver. + *****************************************************************************/ +void __init plat_arm_interconnect_init(void) +{ + ccn_init(&arm_ccn_desc); +} + +/****************************************************************************** + * Helper function to place current master into coherency + *****************************************************************************/ +void plat_arm_interconnect_enter_coherency(void) +{ + ccn_enter_snoop_dvm_domain(1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1())); +} + +/****************************************************************************** + * Helper function to remove current master from coherency + *****************************************************************************/ +void plat_arm_interconnect_exit_coherency(void) +{ + ccn_exit_snoop_dvm_domain(1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1())); +} diff --git a/plat/arm/common/arm_common.c b/plat/arm/common/arm_common.c new file mode 100644 index 0000000..fc68114 --- /dev/null +++ b/plat/arm/common/arm_common.c @@ -0,0 +1,243 @@ +/* + * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Weak definitions may be overridden in specific ARM standard platform */ +#pragma weak plat_get_ns_image_entrypoint +#pragma weak plat_arm_get_mmap + +/* Conditionally provide a weak definition of plat_get_syscnt_freq2 to avoid + * conflicts with the definition in plat/common. */ +#pragma weak plat_get_syscnt_freq2 + +/* Get ARM SOC-ID */ +#pragma weak plat_arm_get_soc_id + +/******************************************************************************* + * Changes the memory attributes for the region of mapped memory where the BL + * image's translation tables are located such that the tables will have + * read-only permissions. + ******************************************************************************/ +#if PLAT_RO_XLAT_TABLES +void arm_xlat_make_tables_readonly(void) +{ + int rc = xlat_make_tables_readonly(); + + if (rc != 0) { + ERROR("Failed to make translation tables read-only at EL%u.\n", + get_current_el()); + panic(); + } + + INFO("Translation tables are now read-only at EL%u.\n", + get_current_el()); +} +#endif + +void arm_setup_romlib(void) +{ +#if USE_ROMLIB + if (!rom_lib_init(ROMLIB_VERSION)) + panic(); +#endif +} + +uintptr_t plat_get_ns_image_entrypoint(void) +{ +#ifdef PRELOADED_BL33_BASE + return PRELOADED_BL33_BASE; +#else + return PLAT_ARM_NS_IMAGE_BASE; +#endif +} + +/******************************************************************************* + * Gets SPSR for BL32 entry + ******************************************************************************/ +uint32_t arm_get_spsr_for_bl32_entry(void) +{ + /* + * The Secure Payload Dispatcher service is responsible for + * setting the SPSR prior to entry into the BL32 image. + */ + return 0; +} + +/******************************************************************************* + * Gets SPSR for BL33 entry + ******************************************************************************/ +#ifdef __aarch64__ +uint32_t arm_get_spsr_for_bl33_entry(void) +{ + unsigned int mode; + uint32_t spsr; + + /* Figure out what mode we enter the non-secure world in */ + mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1; + + /* + * TODO: Consider the possibility of specifying the SPSR in + * the FIP ToC and allowing the platform to have a say as + * well. + */ + spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} +#else +/******************************************************************************* + * Gets SPSR for BL33 entry + ******************************************************************************/ +uint32_t arm_get_spsr_for_bl33_entry(void) +{ + unsigned int hyp_status, mode, spsr; + + hyp_status = GET_VIRT_EXT(read_id_pfr1()); + + mode = (hyp_status) ? MODE32_hyp : MODE32_svc; + + /* + * TODO: Consider the possibility of specifying the SPSR in + * the FIP ToC and allowing the platform to have a say as + * well. + */ + spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1, + SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS); + return spsr; +} +#endif /* __aarch64__ */ + +/******************************************************************************* + * Configures access to the system counter timer module. + ******************************************************************************/ +#ifdef ARM_SYS_TIMCTL_BASE +void arm_configure_sys_timer(void) +{ + unsigned int reg_val; + + /* Read the frequency of the system counter */ + unsigned int freq_val = plat_get_syscnt_freq2(); + +#if ARM_CONFIG_CNTACR + reg_val = (1U << CNTACR_RPCT_SHIFT) | (1U << CNTACR_RVCT_SHIFT); + reg_val |= (1U << CNTACR_RFRQ_SHIFT) | (1U << CNTACR_RVOFF_SHIFT); + reg_val |= (1U << CNTACR_RWVT_SHIFT) | (1U << CNTACR_RWPT_SHIFT); + mmio_write_32(ARM_SYS_TIMCTL_BASE + CNTACR_BASE(PLAT_ARM_NSTIMER_FRAME_ID), reg_val); +#endif /* ARM_CONFIG_CNTACR */ + + reg_val = (1U << CNTNSAR_NS_SHIFT(PLAT_ARM_NSTIMER_FRAME_ID)); + mmio_write_32(ARM_SYS_TIMCTL_BASE + CNTNSAR, reg_val); + + /* + * Initialize CNTFRQ register in CNTCTLBase frame. The CNTFRQ + * system register initialized during psci_arch_setup() is different + * from this and has to be updated independently. + */ + mmio_write_32(ARM_SYS_TIMCTL_BASE + CNTCTLBASE_CNTFRQ, freq_val); + +#if defined(PLAT_juno) || defined(PLAT_n1sdp) || defined(PLAT_morello) + /* + * Initialize CNTFRQ register in Non-secure CNTBase frame. + * This is required for Juno, N1SDP and Morello because they do not + * follow ARM ARM in that the value updated in CNTFRQ is not + * reflected in CNTBASEN_CNTFRQ. Hence update the value manually. + */ + mmio_write_32(ARM_SYS_CNT_BASE_NS + CNTBASEN_CNTFRQ, freq_val); +#endif +} +#endif /* ARM_SYS_TIMCTL_BASE */ + +/******************************************************************************* + * Returns ARM platform specific memory map regions. + ******************************************************************************/ +const mmap_region_t *plat_arm_get_mmap(void) +{ + return plat_arm_mmap; +} + +#ifdef ARM_SYS_CNTCTL_BASE + +unsigned int plat_get_syscnt_freq2(void) +{ + unsigned int counter_base_frequency; + + /* Read the frequency from Frequency modes table */ + counter_base_frequency = mmio_read_32(ARM_SYS_CNTCTL_BASE + CNTFID_OFF); + + /* The first entry of the frequency modes table must not be 0 */ + if (counter_base_frequency == 0U) + panic(); + + return counter_base_frequency; +} + +#endif /* ARM_SYS_CNTCTL_BASE */ + +#if SDEI_SUPPORT +/* + * Translate SDEI entry point to PA, and perform standard ARM entry point + * validation on it. + */ +int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode) +{ + uint64_t par, pa; + u_register_t scr_el3; + + /* Doing Non-secure address translation requires SCR_EL3.NS set */ + scr_el3 = read_scr_el3(); + write_scr_el3(scr_el3 | SCR_NS_BIT); + isb(); + + assert((client_mode == MODE_EL2) || (client_mode == MODE_EL1)); + if (client_mode == MODE_EL2) { + /* + * Translate entry point to Physical Address using the EL2 + * translation regime. + */ + ats1e2r(ep); + } else { + /* + * Translate entry point to Physical Address using the EL1&0 + * translation regime, including stage 2. + */ + AT(ats12e1r, ep); + } + isb(); + par = read_par_el1(); + + /* Restore original SCRL_EL3 */ + write_scr_el3(scr_el3); + isb(); + + /* If the translation resulted in fault, return failure */ + if ((par & PAR_F_MASK) != 0) + return -1; + + /* Extract Physical Address from PAR */ + pa = (par & (PAR_ADDR_MASK << PAR_ADDR_SHIFT)); + + /* Perform NS entry point validation on the physical address */ + return arm_validate_ns_entrypoint(pa); +} +#endif + +const mmap_region_t *plat_get_addr_mmap(void) +{ + return plat_arm_mmap; +} diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk new file mode 100644 index 0000000..7162ce9 --- /dev/null +++ b/plat/arm/common/arm_common.mk @@ -0,0 +1,456 @@ +# +# Copyright (c) 2015-2022, Arm Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +include common/fdt_wrappers.mk + +ifeq (${ARCH}, aarch64) + # On ARM standard platorms, the TSP can execute from Trusted SRAM, Trusted + # DRAM (if available) or the TZC secured area of DRAM. + # TZC secured DRAM is the default. + + ARM_TSP_RAM_LOCATION ?= dram + + ifeq (${ARM_TSP_RAM_LOCATION}, tsram) + ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_SRAM_ID + else ifeq (${ARM_TSP_RAM_LOCATION}, tdram) + ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_DRAM_ID + else ifeq (${ARM_TSP_RAM_LOCATION}, dram) + ARM_TSP_RAM_LOCATION_ID = ARM_DRAM_ID + else + $(error "Unsupported ARM_TSP_RAM_LOCATION value") + endif + + # Process flags + # Process ARM_BL31_IN_DRAM flag + ARM_BL31_IN_DRAM := 0 + $(eval $(call assert_boolean,ARM_BL31_IN_DRAM)) + $(eval $(call add_define,ARM_BL31_IN_DRAM)) +else + ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_SRAM_ID +endif + +$(eval $(call add_define,ARM_TSP_RAM_LOCATION_ID)) + + +# For the original power-state parameter format, the State-ID can be encoded +# according to the recommended encoding or zero. This flag determines which +# State-ID encoding to be parsed. +ARM_RECOM_STATE_ID_ENC := 0 + +# If the PSCI_EXTENDED_STATE_ID is set, then ARM_RECOM_STATE_ID_ENC need to +# be set. Else throw a build error. +ifeq (${PSCI_EXTENDED_STATE_ID}, 1) + ifeq (${ARM_RECOM_STATE_ID_ENC}, 0) + $(error Build option ARM_RECOM_STATE_ID_ENC needs to be set if \ + PSCI_EXTENDED_STATE_ID is set for ARM platforms) + endif +endif + +# Process ARM_RECOM_STATE_ID_ENC flag +$(eval $(call assert_boolean,ARM_RECOM_STATE_ID_ENC)) +$(eval $(call add_define,ARM_RECOM_STATE_ID_ENC)) + +# Process ARM_DISABLE_TRUSTED_WDOG flag +# By default, Trusted Watchdog is always enabled unless +# SPIN_ON_BL1_EXIT or ENABLE_RME is set +ARM_DISABLE_TRUSTED_WDOG := 0 +ifneq ($(filter 1,${SPIN_ON_BL1_EXIT} ${ENABLE_RME}),) +ARM_DISABLE_TRUSTED_WDOG := 1 +endif +$(eval $(call assert_boolean,ARM_DISABLE_TRUSTED_WDOG)) +$(eval $(call add_define,ARM_DISABLE_TRUSTED_WDOG)) + +# Process ARM_CONFIG_CNTACR +ARM_CONFIG_CNTACR := 1 +$(eval $(call assert_boolean,ARM_CONFIG_CNTACR)) +$(eval $(call add_define,ARM_CONFIG_CNTACR)) + +# Process ARM_BL31_IN_DRAM flag +ARM_BL31_IN_DRAM := 0 +$(eval $(call assert_boolean,ARM_BL31_IN_DRAM)) +$(eval $(call add_define,ARM_BL31_IN_DRAM)) + +# As per CCA security model, all root firmware must execute from on-chip secure +# memory. This means we must not run BL31 from TZC-protected DRAM. +ifeq (${ARM_BL31_IN_DRAM},1) + ifeq (${ENABLE_RME},1) + $(error "BL31 must not run from DRAM on RME-systems. Please set ARM_BL31_IN_DRAM to 0") + endif +endif + +# Process ARM_PLAT_MT flag +ARM_PLAT_MT := 0 +$(eval $(call assert_boolean,ARM_PLAT_MT)) +$(eval $(call add_define,ARM_PLAT_MT)) + +# Use translation tables library v2 by default +ARM_XLAT_TABLES_LIB_V1 := 0 +$(eval $(call assert_boolean,ARM_XLAT_TABLES_LIB_V1)) +$(eval $(call add_define,ARM_XLAT_TABLES_LIB_V1)) + +# Don't have the Linux kernel as a BL33 image by default +ARM_LINUX_KERNEL_AS_BL33 := 0 +$(eval $(call assert_boolean,ARM_LINUX_KERNEL_AS_BL33)) +$(eval $(call add_define,ARM_LINUX_KERNEL_AS_BL33)) + +ifeq (${ARM_LINUX_KERNEL_AS_BL33},1) + ifneq (${ARCH},aarch64) + ifneq (${RESET_TO_SP_MIN},1) + $(error "ARM_LINUX_KERNEL_AS_BL33 is only available if RESET_TO_SP_MIN=1.") + endif + endif + ifndef PRELOADED_BL33_BASE + $(error "PRELOADED_BL33_BASE must be set if ARM_LINUX_KERNEL_AS_BL33 is used.") + endif + ifeq (${RESET_TO_BL31},1) + ifndef ARM_PRELOADED_DTB_BASE + $(error "ARM_PRELOADED_DTB_BASE must be set if ARM_LINUX_KERNEL_AS_BL33 is + used with RESET_TO_BL31.") + endif + $(eval $(call add_define,ARM_PRELOADED_DTB_BASE)) + endif +endif + +# Arm(R) Ethos(TM)-N NPU SiP service +ARM_ETHOSN_NPU_DRIVER := 0 +$(eval $(call assert_boolean,ARM_ETHOSN_NPU_DRIVER)) +$(eval $(call add_define,ARM_ETHOSN_NPU_DRIVER)) + +# Use an implementation of SHA-256 with a smaller memory footprint but reduced +# speed. +$(eval $(call add_define,MBEDTLS_SHA256_SMALLER)) + +# Add the build options to pack Trusted OS Extra1 and Trusted OS Extra2 images +# in the FIP if the platform requires. +ifneq ($(BL32_EXTRA1),) +$(eval $(call TOOL_ADD_IMG,bl32_extra1,--tos-fw-extra1)) +endif +ifneq ($(BL32_EXTRA2),) +$(eval $(call TOOL_ADD_IMG,bl32_extra2,--tos-fw-extra2)) +endif + +# Enable PSCI_STAT_COUNT/RESIDENCY APIs on ARM platforms +ENABLE_PSCI_STAT := 1 +ENABLE_PMF := 1 + +# Override the standard libc with optimised libc_asm +OVERRIDE_LIBC := 1 +ifeq (${OVERRIDE_LIBC},1) + include lib/libc/libc_asm.mk +endif + +# On ARM platforms, separate the code and read-only data sections to allow +# mapping the former as executable and the latter as execute-never. +SEPARATE_CODE_AND_RODATA := 1 + +# On ARM platforms, disable SEPARATE_NOBITS_REGION by default. Both PROGBITS +# and NOBITS sections of BL31 image are adjacent to each other and loaded +# into Trusted SRAM. +SEPARATE_NOBITS_REGION := 0 + +# In order to support SEPARATE_NOBITS_REGION for Arm platforms, we need to load +# BL31 PROGBITS into secure DRAM space and BL31 NOBITS into SRAM. Hence mandate +# the build to require that ARM_BL31_IN_DRAM is enabled as well. +ifeq ($(SEPARATE_NOBITS_REGION),1) + ifneq ($(ARM_BL31_IN_DRAM),1) + $(error For SEPARATE_NOBITS_REGION, ARM_BL31_IN_DRAM must be enabled) + endif + ifneq ($(RECLAIM_INIT_CODE),0) + $(error For SEPARATE_NOBITS_REGION, RECLAIM_INIT_CODE cannot be supported) + endif +endif + +# Disable ARM Cryptocell by default +ARM_CRYPTOCELL_INTEG := 0 +$(eval $(call assert_boolean,ARM_CRYPTOCELL_INTEG)) +$(eval $(call add_define,ARM_CRYPTOCELL_INTEG)) + +# Enable PIE support for RESET_TO_BL31/RESET_TO_SP_MIN case +ifneq ($(filter 1,${RESET_TO_BL31} ${RESET_TO_SP_MIN}),) + ENABLE_PIE := 1 +endif + +# CryptoCell integration relies on coherent buffers for passing data from +# the AP CPU to the CryptoCell +ifeq (${ARM_CRYPTOCELL_INTEG},1) + ifeq (${USE_COHERENT_MEM},0) + $(error "ARM_CRYPTOCELL_INTEG needs USE_COHERENT_MEM to be set.") + endif +endif + +# Disable GPT parser support, use FIP image by default +ARM_GPT_SUPPORT := 0 +$(eval $(call assert_boolean,ARM_GPT_SUPPORT)) +$(eval $(call add_define,ARM_GPT_SUPPORT)) + +# Include necessary sources to parse GPT image +ifeq (${ARM_GPT_SUPPORT}, 1) + BL2_SOURCES += drivers/partition/gpt.c \ + drivers/partition/partition.c +endif + +# Enable CRC instructions via extension for ARMv8-A CPUs. +# For ARMv8.1-A, and onwards CRC instructions are default enabled. +# Enable HW computed CRC support unconditionally in BL2 component. +ifeq (${ARM_ARCH_MAJOR},8) + ifeq (${ARM_ARCH_MINOR},0) + BL2_CPPFLAGS += -march=armv8-a+crc + endif +endif + +ifeq ($(PSA_FWU_SUPPORT),1) + # GPT support is recommended as per PSA FWU specification hence + # PSA FWU implementation is tightly coupled with GPT support, + # and it does not support other formats. + ifneq ($(ARM_GPT_SUPPORT),1) + $(error For PSA_FWU_SUPPORT, ARM_GPT_SUPPORT must be enabled) + endif + FWU_MK := drivers/fwu/fwu.mk + $(info Including ${FWU_MK}) + include ${FWU_MK} +endif + +ifeq (${ARCH}, aarch64) +PLAT_INCLUDES += -Iinclude/plat/arm/common/aarch64 +endif + +PLAT_BL_COMMON_SOURCES += plat/arm/common/${ARCH}/arm_helpers.S \ + plat/arm/common/arm_common.c \ + plat/arm/common/arm_console.c + +ifeq (${ARM_XLAT_TABLES_LIB_V1}, 1) +PLAT_BL_COMMON_SOURCES += lib/xlat_tables/xlat_tables_common.c \ + lib/xlat_tables/${ARCH}/xlat_tables.c +else +ifeq (${XLAT_MPU_LIB_V1}, 1) +include lib/xlat_mpu/xlat_mpu.mk +PLAT_BL_COMMON_SOURCES += ${XLAT_MPU_LIB_V1_SRCS} +else +include lib/xlat_tables_v2/xlat_tables.mk +PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS} +endif +endif + +ARM_IO_SOURCES += plat/arm/common/arm_io_storage.c \ + plat/arm/common/fconf/arm_fconf_io.c +ifeq (${SPD},spmd) + ifeq (${BL2_ENABLE_SP_LOAD},1) + ARM_IO_SOURCES += plat/arm/common/fconf/arm_fconf_sp.c + endif +endif + +BL1_SOURCES += drivers/io/io_fip.c \ + drivers/io/io_memmap.c \ + drivers/io/io_storage.c \ + plat/arm/common/arm_bl1_setup.c \ + plat/arm/common/arm_err.c \ + ${ARM_IO_SOURCES} + +ifdef EL3_PAYLOAD_BASE +# Need the plat_arm_program_trusted_mailbox() function to release secondary CPUs from +# their holding pen +BL1_SOURCES += plat/arm/common/arm_pm.c +endif + +BL2_SOURCES += drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + drivers/io/io_fip.c \ + drivers/io/io_memmap.c \ + drivers/io/io_storage.c \ + plat/arm/common/arm_bl2_setup.c \ + plat/arm/common/arm_err.c \ + common/tf_crc32.c \ + ${ARM_IO_SOURCES} + +# Firmware Configuration Framework sources +include lib/fconf/fconf.mk + +BL1_SOURCES += ${FCONF_SOURCES} ${FCONF_DYN_SOURCES} +BL2_SOURCES += ${FCONF_SOURCES} ${FCONF_DYN_SOURCES} + +# Add `libfdt` and Arm common helpers required for Dynamic Config +include lib/libfdt/libfdt.mk + +DYN_CFG_SOURCES += plat/arm/common/arm_dyn_cfg.c \ + plat/arm/common/arm_dyn_cfg_helpers.c \ + common/uuid.c + +DYN_CFG_SOURCES += ${FDT_WRAPPERS_SOURCES} + +BL1_SOURCES += ${DYN_CFG_SOURCES} +BL2_SOURCES += ${DYN_CFG_SOURCES} + +ifeq (${BL2_AT_EL3},1) +BL2_SOURCES += plat/arm/common/arm_bl2_el3_setup.c +endif + +# Because BL1/BL2 execute in AArch64 mode but BL32 in AArch32 we need to use +# the AArch32 descriptors. +ifeq (${JUNO_AARCH32_EL3_RUNTIME},1) +BL2_SOURCES += plat/arm/common/aarch32/arm_bl2_mem_params_desc.c +else +ifneq (${PLAT}, corstone1000) +BL2_SOURCES += plat/arm/common/${ARCH}/arm_bl2_mem_params_desc.c +endif +endif +BL2_SOURCES += plat/arm/common/arm_image_load.c \ + common/desc_image_load.c +ifeq (${SPD},opteed) +BL2_SOURCES += lib/optee/optee_utils.c +endif + +BL2U_SOURCES += drivers/delay_timer/delay_timer.c \ + drivers/delay_timer/generic_delay_timer.c \ + plat/arm/common/arm_bl2u_setup.c + +BL31_SOURCES += plat/arm/common/arm_bl31_setup.c \ + plat/arm/common/arm_pm.c \ + plat/arm/common/arm_topology.c \ + plat/common/plat_psci_common.c + +ifneq ($(filter 1,${ENABLE_PMF} ${ARM_ETHOSN_NPU_DRIVER}),) +ARM_SVC_HANDLER_SRCS := + +ifeq (${ENABLE_PMF},1) +ARM_SVC_HANDLER_SRCS += lib/pmf/pmf_smc.c +endif + +ifeq (${ARM_ETHOSN_NPU_DRIVER},1) +ARM_SVC_HANDLER_SRCS += plat/arm/common/fconf/fconf_ethosn_getter.c \ + drivers/delay_timer/delay_timer.c \ + drivers/arm/ethosn/ethosn_smc.c +endif + +ifeq (${ARCH}, aarch64) +BL31_SOURCES += plat/arm/common/aarch64/execution_state_switch.c\ + plat/arm/common/arm_sip_svc.c \ + ${ARM_SVC_HANDLER_SRCS} +else +BL32_SOURCES += plat/arm/common/arm_sip_svc.c \ + ${ARM_SVC_HANDLER_SRCS} +endif +endif + +ifeq (${EL3_EXCEPTION_HANDLING},1) +BL31_SOURCES += plat/common/aarch64/plat_ehf.c +endif + +ifeq (${SDEI_SUPPORT},1) +BL31_SOURCES += plat/arm/common/aarch64/arm_sdei.c +ifeq (${SDEI_IN_FCONF},1) +BL31_SOURCES += plat/arm/common/fconf/fconf_sdei_getter.c +endif +endif + +# RAS sources +ifeq (${RAS_EXTENSION},1) +BL31_SOURCES += lib/extensions/ras/std_err_record.c \ + lib/extensions/ras/ras_common.c +endif + +# Pointer Authentication sources +ifeq (${ENABLE_PAUTH}, 1) +PLAT_BL_COMMON_SOURCES += plat/arm/common/aarch64/arm_pauth.c \ + lib/extensions/pauth/pauth_helpers.S +endif + +ifeq (${SPD},spmd) +BL31_SOURCES += plat/common/plat_spmd_manifest.c \ + common/uuid.c \ + ${LIBFDT_SRCS} + +BL31_SOURCES += ${FDT_WRAPPERS_SOURCES} +endif + +ifeq (${DRTM_SUPPORT},1) +BL31_SOURCES += plat/arm/common/arm_err.c +endif + +ifneq (${TRUSTED_BOARD_BOOT},0) + + # Include common TBB sources + AUTH_SOURCES := drivers/auth/auth_mod.c \ + drivers/auth/img_parser_mod.c + + # Include the selected chain of trust sources. + ifeq (${COT},tbbr) + BL1_SOURCES += drivers/auth/tbbr/tbbr_cot_common.c \ + drivers/auth/tbbr/tbbr_cot_bl1.c + ifneq (${COT_DESC_IN_DTB},0) + BL2_SOURCES += lib/fconf/fconf_cot_getter.c + else + BL2_SOURCES += drivers/auth/tbbr/tbbr_cot_common.c \ + drivers/auth/tbbr/tbbr_cot_bl2.c + endif + else ifeq (${COT},dualroot) + AUTH_SOURCES += drivers/auth/dualroot/cot.c + else ifeq (${COT},cca) + AUTH_SOURCES += drivers/auth/cca/cot.c + else + $(error Unknown chain of trust ${COT}) + endif + + BL1_SOURCES += ${AUTH_SOURCES} \ + bl1/tbbr/tbbr_img_desc.c \ + plat/arm/common/arm_bl1_fwu.c \ + plat/common/tbbr/plat_tbbr.c + + BL2_SOURCES += ${AUTH_SOURCES} \ + plat/common/tbbr/plat_tbbr.c + + $(eval $(call TOOL_ADD_IMG,ns_bl2u,--fwu,FWU_)) + + IMG_PARSER_LIB_MK := drivers/auth/mbedtls/mbedtls_x509.mk + + $(info Including ${IMG_PARSER_LIB_MK}) + include ${IMG_PARSER_LIB_MK} +endif + +# Include Measured Boot makefile before any Crypto library makefile. +# Crypto library makefile may need default definitions of Measured Boot build +# flags present in Measured Boot makefile. +ifneq ($(filter 1,${MEASURED_BOOT} ${DRTM_SUPPORT}),) + MEASURED_BOOT_MK := drivers/measured_boot/event_log/event_log.mk + $(info Including ${MEASURED_BOOT_MK}) + include ${MEASURED_BOOT_MK} + + ifneq (${MBOOT_EL_HASH_ALG}, sha256) + $(eval $(call add_define,TF_MBEDTLS_MBOOT_USE_SHA512)) + endif + + ifeq (${MEASURED_BOOT},1) + BL1_SOURCES += ${EVENT_LOG_SOURCES} + BL2_SOURCES += ${EVENT_LOG_SOURCES} + endif + + ifeq (${DRTM_SUPPORT},1) + BL31_SOURCES += ${EVENT_LOG_SOURCES} + endif +endif + +ifneq ($(filter 1,${MEASURED_BOOT} ${TRUSTED_BOARD_BOOT} ${DRTM_SUPPORT}),) + CRYPTO_SOURCES := drivers/auth/crypto_mod.c \ + lib/fconf/fconf_tbbr_getter.c + BL1_SOURCES += ${CRYPTO_SOURCES} + BL2_SOURCES += ${CRYPTO_SOURCES} + BL31_SOURCES += drivers/auth/crypto_mod.c + + # We expect to locate the *.mk files under the directories specified below + ifeq (${ARM_CRYPTOCELL_INTEG},0) + CRYPTO_LIB_MK := drivers/auth/mbedtls/mbedtls_crypto.mk + else + CRYPTO_LIB_MK := drivers/auth/cryptocell/cryptocell_crypto.mk + endif + + $(info Including ${CRYPTO_LIB_MK}) + include ${CRYPTO_LIB_MK} +endif + +ifeq (${RECLAIM_INIT_CODE}, 1) + ifeq (${ARM_XLAT_TABLES_LIB_V1}, 1) + $(error "To reclaim init code xlat tables v2 must be used") + endif +endif diff --git a/plat/arm/common/arm_console.c b/plat/arm/common/arm_console.c new file mode 100644 index 0000000..51830c9 --- /dev/null +++ b/plat/arm/common/arm_console.c @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include + +#include +#include +#include +#include + +#pragma weak arm_console_runtime_init +#pragma weak arm_console_runtime_end + +/******************************************************************************* + * Functions that set up the console + ******************************************************************************/ +static console_t arm_boot_console; +static console_t arm_runtime_console; + +/* Initialize the console to provide early debug support */ +void __init arm_console_boot_init(void) +{ + /* If the console was initialized already, don't initialize again */ + if (arm_boot_console.base == PLAT_ARM_BOOT_UART_BASE) { + return; + } + + int rc = console_pl011_register(PLAT_ARM_BOOT_UART_BASE, + PLAT_ARM_BOOT_UART_CLK_IN_HZ, + ARM_CONSOLE_BAUDRATE, + &arm_boot_console); + if (rc == 0) { + /* + * The crash console doesn't use the multi console API, it uses + * the core console functions directly. It is safe to call panic + * and let it print debug information. + */ + panic(); + } + + console_set_scope(&arm_boot_console, CONSOLE_FLAG_BOOT); +} + +void arm_console_boot_end(void) +{ + console_flush(); + (void)console_unregister(&arm_boot_console); +} + +/* Initialize the runtime console */ +void arm_console_runtime_init(void) +{ + int rc = console_pl011_register(PLAT_ARM_RUN_UART_BASE, + PLAT_ARM_RUN_UART_CLK_IN_HZ, + ARM_CONSOLE_BAUDRATE, + &arm_runtime_console); + if (rc == 0) + panic(); + + console_set_scope(&arm_runtime_console, CONSOLE_FLAG_RUNTIME); +} + +void arm_console_runtime_end(void) +{ + console_flush(); +} diff --git a/plat/arm/common/arm_dyn_cfg.c b/plat/arm/common/arm_dyn_cfg.c new file mode 100644 index 0000000..c88621e --- /dev/null +++ b/plat/arm/common/arm_dyn_cfg.c @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include + +#include + +#include +#include +#include +#if CRYPTO_SUPPORT +#include MBEDTLS_CONFIG_FILE +#endif /* CRYPTO_SUPPORT */ +#include +#include +#include + +#include +#include + +#if CRYPTO_SUPPORT + +static void *mbedtls_heap_addr; +static size_t mbedtls_heap_size; + +/* + * This function is the implementation of the shared Mbed TLS heap between + * BL1 and BL2 for Arm platforms. The shared heap address is passed from BL1 + * to BL2 with a pointer. This pointer resides inside the TB_FW_CONFIG file + * which is a DTB. + * + * This function is placed inside an #if directive for the below reasons: + * - To allocate space for the Mbed TLS heap --only if-- Trusted Board Boot + * is enabled. + * - This implementation requires the DTB to be present so that BL1 has a + * mechanism to pass the pointer to BL2. + */ +int arm_get_mbedtls_heap(void **heap_addr, size_t *heap_size) +{ + assert(heap_addr != NULL); + assert(heap_size != NULL); + +#if defined(IMAGE_BL1) || BL2_AT_EL3 || defined(IMAGE_BL31) + + /* If in BL1 or BL2_AT_EL3 define a heap */ + static unsigned char heap[TF_MBEDTLS_HEAP_SIZE]; + + *heap_addr = heap; + *heap_size = sizeof(heap); + mbedtls_heap_addr = heap; + mbedtls_heap_size = sizeof(heap); + +#elif defined(IMAGE_BL2) + + /* If in BL2, retrieve the already allocated heap's info from DTB */ + *heap_addr = FCONF_GET_PROPERTY(tbbr, dyn_config, mbedtls_heap_addr); + *heap_size = FCONF_GET_PROPERTY(tbbr, dyn_config, mbedtls_heap_size); + +#endif + + return 0; +} + +/* + * Puts the shared Mbed TLS heap information to the DTB. + * Executed only from BL1. + */ +void arm_bl1_set_mbedtls_heap(void) +{ + int err; + uintptr_t tb_fw_cfg_dtb; + const struct dyn_cfg_dtb_info_t *tb_fw_config_info; + + /* + * If tb_fw_cfg_dtb==NULL then DTB is not present for the current + * platform. As such, we don't attempt to write to the DTB at all. + * + * If mbedtls_heap_addr==NULL, then it means we are using the default + * heap implementation. As such, BL2 will have its own heap for sure + * and hence there is no need to pass any information to the DTB. + * + * In the latter case, if we still wanted to write in the DTB the heap + * information, we would need to call plat_get_mbedtls_heap to retrieve + * the default heap's address and size. + */ + + tb_fw_config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TB_FW_CONFIG_ID); + assert(tb_fw_config_info != NULL); + + tb_fw_cfg_dtb = tb_fw_config_info->config_addr; + + if ((tb_fw_cfg_dtb != 0UL) && (mbedtls_heap_addr != NULL)) { + /* As libfdt uses void *, we can't avoid this cast */ + void *dtb = (void *)tb_fw_cfg_dtb; + + err = arm_set_dtb_mbedtls_heap_info(dtb, + mbedtls_heap_addr, mbedtls_heap_size); + if (err < 0) { + ERROR("%swrite shared Mbed TLS heap information%s", + "BL1: unable to ", " to DTB\n"); + panic(); + } +#if !MEASURED_BOOT + /* + * Ensure that the info written to the DTB is visible to other + * images. It's critical because BL2 won't be able to proceed + * without the heap info. + * + * In MEASURED_BOOT case flushing is done in a function which + * is called after heap information is written in the DTB. + */ + flush_dcache_range(tb_fw_cfg_dtb, fdt_totalsize(dtb)); +#endif /* !MEASURED_BOOT */ + } +} +#endif /* CRYPTO_SUPPORT */ + +/* + * BL2 utility function to initialize dynamic configuration specified by + * FW_CONFIG. Populate the bl_mem_params_node_t of other FW_CONFIGs if + * specified in FW_CONFIG. + */ +void arm_bl2_dyn_cfg_init(void) +{ + unsigned int i; + bl_mem_params_node_t *cfg_mem_params = NULL; + uintptr_t image_base; + uint32_t image_size; + unsigned int error_config_id = MAX_IMAGE_IDS; + const unsigned int config_ids[] = { + HW_CONFIG_ID, + SOC_FW_CONFIG_ID, + NT_FW_CONFIG_ID, + TOS_FW_CONFIG_ID + }; + + const struct dyn_cfg_dtb_info_t *dtb_info; + + /* Iterate through all the fw config IDs */ + for (i = 0; i < ARRAY_SIZE(config_ids); i++) { + /* Get the config load address and size */ + cfg_mem_params = get_bl_mem_params_node(config_ids[i]); + if (cfg_mem_params == NULL) { + VERBOSE("%sconfig_id = %d in bl_mem_params_node\n", + "Couldn't find ", config_ids[i]); + continue; + } + + dtb_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, config_ids[i]); + if (dtb_info == NULL) { + VERBOSE("%sconfig_id %d load info in FW_CONFIG\n", + "Couldn't find ", config_ids[i]); + continue; + } + + image_base = dtb_info->config_addr; + image_size = dtb_info->config_max_size; + + /* + * Do some runtime checks on the load addresses of soc_fw_config, + * tos_fw_config, nt_fw_config. This is not a comprehensive check + * of all invalid addresses but to prevent trivial porting errors. + */ + if (config_ids[i] != HW_CONFIG_ID) { + + if (check_uptr_overflow(image_base, image_size)) { + VERBOSE("%s=%d as its %s is overflowing uptr\n", + "skip loading of firmware config", + config_ids[i], + "load-address"); + error_config_id = config_ids[i]; + continue; + } +#ifdef BL31_BASE + /* Ensure the configs don't overlap with BL31 */ + if ((image_base >= BL31_BASE) && + (image_base <= BL31_LIMIT)) { + VERBOSE("%s=%d as its %s is overlapping BL31\n", + "skip loading of firmware config", + config_ids[i], + "load-address"); + error_config_id = config_ids[i]; + continue; + } +#endif + /* Ensure the configs are loaded in a valid address */ + if (image_base < ARM_BL_RAM_BASE) { + VERBOSE("%s=%d as its %s is invalid\n", + "skip loading of firmware config", + config_ids[i], + "load-address"); + error_config_id = config_ids[i]; + continue; + } +#ifdef BL32_BASE + /* + * If BL32 is present, ensure that the configs don't + * overlap with it. + */ + if ((image_base >= BL32_BASE) && + (image_base <= BL32_LIMIT)) { + VERBOSE("%s=%d as its %s is overlapping BL32\n", + "skip loading of firmware config", + config_ids[i], + "load-address"); + error_config_id = config_ids[i]; + continue; + } +#endif + } + + cfg_mem_params->image_info.image_base = image_base; + cfg_mem_params->image_info.image_max_size = (uint32_t)image_size; + + /* + * Remove the IMAGE_ATTRIB_SKIP_LOADING attribute from + * HW_CONFIG or FW_CONFIG nodes + */ + cfg_mem_params->image_info.h.attr &= ~IMAGE_ATTRIB_SKIP_LOADING; + } + + if (error_config_id != MAX_IMAGE_IDS) { + ERROR("Invalid config file %u\n", error_config_id); + panic(); + } +} diff --git a/plat/arm/common/arm_dyn_cfg_helpers.c b/plat/arm/common/arm_dyn_cfg_helpers.c new file mode 100644 index 0000000..e88ea65 --- /dev/null +++ b/plat/arm/common/arm_dyn_cfg_helpers.c @@ -0,0 +1,369 @@ +/* + * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#if MEASURED_BOOT +#include +#endif +#include + +#include +#include +#include + +#include +#include + +#define DTB_PROP_MBEDTLS_HEAP_ADDR "mbedtls_heap_addr" +#define DTB_PROP_MBEDTLS_HEAP_SIZE "mbedtls_heap_size" + +#if MEASURED_BOOT +#ifdef SPD_opteed +/* + * Currently OP-TEE does not support reading DTBs from Secure memory + * and this property should be removed when this feature is supported. + */ +#define DTB_PROP_HW_SM_LOG_ADDR "tpm_event_log_sm_addr" +#endif /* SPD_opteed */ +#define DTB_PROP_HW_LOG_ADDR "tpm_event_log_addr" +#define DTB_PROP_HW_LOG_SIZE "tpm_event_log_size" +#endif /* MEASURED_BOOT */ + +/******************************************************************************* + * Validate the tb_fw_config is a valid DTB file and returns the node offset + * to "arm,tb_fw" property. + * Arguments: + * void *dtb - pointer to the TB_FW_CONFIG in memory + * int *node - Returns the node offset to "arm,tb_fw" property if found. + * + * Returns 0 on success and -1 on error. + ******************************************************************************/ +int arm_dyn_tb_fw_cfg_init(void *dtb, int *node) +{ + assert(dtb != NULL); + assert(node != NULL); + + /* Check if the pointer to DT is correct */ + if (fdt_check_header(dtb) != 0) { + WARN("Invalid DTB file passed as%s\n", " TB_FW_CONFIG"); + return -1; + } + + /* Assert the node offset point to "arm,tb_fw" compatible property */ + *node = fdt_node_offset_by_compatible(dtb, -1, "arm,tb_fw"); + if (*node < 0) { + WARN("The compatible property '%s' not%s", "arm,tb_fw", + " found in the config\n"); + return -1; + } + + VERBOSE("Dyn cfg: '%s'%s", "arm,tb_fw", " found in the config\n"); + return 0; +} + +/* + * This function writes the Mbed TLS heap address and size in the DTB. When it + * is called, it is guaranteed that a DTB is available. However it is not + * guaranteed that the shared Mbed TLS heap implementation is used. Thus we + * return error code from here and it's the responsibility of the caller to + * determine the action upon error. + * + * This function is supposed to be called only by BL1. + * + * Returns: + * 0 = success + * -1 = error + */ +int arm_set_dtb_mbedtls_heap_info(void *dtb, void *heap_addr, size_t heap_size) +{ + int dtb_root; + + /* + * Verify that the DTB is valid, before attempting to write to it, + * and get the DTB root node. + */ + int err = arm_dyn_tb_fw_cfg_init(dtb, &dtb_root); + if (err < 0) { + ERROR("Invalid%s loaded. Unable to get root node\n", + " TB_FW_CONFIG"); + return -1; + } + + /* + * Write the heap address and size in the DTB. + * + * NOTE: The variables heap_addr and heap_size are corrupted + * by the "fdtw_write_inplace_cells" function. After the + * function calls they must NOT be reused. + */ + err = fdtw_write_inplace_cells(dtb, dtb_root, + DTB_PROP_MBEDTLS_HEAP_ADDR, 2, &heap_addr); + if (err < 0) { + ERROR("%sDTB property '%s'\n", + "Unable to write ", DTB_PROP_MBEDTLS_HEAP_ADDR); + return -1; + } + + err = fdtw_write_inplace_cells(dtb, dtb_root, + DTB_PROP_MBEDTLS_HEAP_SIZE, 1, &heap_size); + if (err < 0) { + ERROR("%sDTB property '%s'\n", + "Unable to write ", DTB_PROP_MBEDTLS_HEAP_SIZE); + return -1; + } + + return 0; +} + +#if MEASURED_BOOT +/* + * Write the Event Log address and its size in the DTB. + * + * Returns: + * 0 = success + * < 0 = error + */ +static int arm_set_event_log_info(uintptr_t config_base, +#ifdef SPD_opteed + uintptr_t sm_log_addr, +#endif + uintptr_t log_addr, size_t log_size) +{ + /* As libfdt uses void *, we can't avoid this cast */ + void *dtb = (void *)config_base; + const char *compatible = "arm,tpm_event_log"; + int err, node; + + /* + * Verify that the DTB is valid, before attempting to write to it, + * and get the DTB root node. + */ + + /* Check if the pointer to DT is correct */ + err = fdt_check_header(dtb); + if (err < 0) { + WARN("Invalid DTB file passed\n"); + return err; + } + + /* Assert the node offset point to compatible property */ + node = fdt_node_offset_by_compatible(dtb, -1, compatible); + if (node < 0) { + WARN("The compatible property '%s' not%s", compatible, + " found in the config\n"); + return node; + } + + VERBOSE("Dyn cfg: '%s'%s", compatible, " found in the config\n"); + +#ifdef SPD_opteed + if (sm_log_addr != 0UL) { + err = fdtw_write_inplace_cells(dtb, node, + DTB_PROP_HW_SM_LOG_ADDR, 2, &sm_log_addr); + if (err < 0) { + ERROR("%sDTB property '%s'\n", + "Unable to write ", DTB_PROP_HW_SM_LOG_ADDR); + return err; + } + } +#endif + err = fdtw_write_inplace_cells(dtb, node, + DTB_PROP_HW_LOG_ADDR, 2, &log_addr); + if (err < 0) { + ERROR("%sDTB property '%s'\n", + "Unable to write ", DTB_PROP_HW_LOG_ADDR); + return err; + } + + err = fdtw_write_inplace_cells(dtb, node, + DTB_PROP_HW_LOG_SIZE, 1, &log_size); + if (err < 0) { + ERROR("%sDTB property '%s'\n", + "Unable to write ", DTB_PROP_HW_LOG_SIZE); + } else { + /* + * Ensure that the info written to the DTB is visible + * to other images. + */ + flush_dcache_range(config_base, fdt_totalsize(dtb)); + } + + return err; +} + +/* + * This function writes the Event Log address and its size + * in the TOS_FW_CONFIG DTB. + * + * This function is supposed to be called only by BL2. + * + * Returns: + * 0 = success + * < 0 = error + */ +int arm_set_tos_fw_info(uintptr_t log_addr, size_t log_size) +{ + uintptr_t config_base; + const bl_mem_params_node_t *cfg_mem_params; + int err; + + assert(log_addr != 0UL); + + /* Get the config load address and size of TOS_FW_CONFIG */ + cfg_mem_params = get_bl_mem_params_node(TOS_FW_CONFIG_ID); + assert(cfg_mem_params != NULL); + + config_base = cfg_mem_params->image_info.image_base; + + /* Write the Event Log address and its size in the DTB */ + err = arm_set_event_log_info(config_base, +#ifdef SPD_opteed + 0UL, +#endif + log_addr, log_size); + if (err < 0) { + ERROR("%sEvent Log data to TOS_FW_CONFIG\n", + "Unable to write "); + } + + return err; +} + +/* + * This function writes the Event Log address and its size + * in the NT_FW_CONFIG DTB. + * + * This function is supposed to be called only by BL2. + * + * Returns: + * 0 = success + * < 0 = error + */ +int arm_set_nt_fw_info( +#ifdef SPD_opteed + uintptr_t log_addr, +#endif + size_t log_size, uintptr_t *ns_log_addr) +{ + uintptr_t config_base; + uintptr_t ns_addr; + const bl_mem_params_node_t *cfg_mem_params; + int err; + + assert(ns_log_addr != NULL); + + /* Get the config load address and size from NT_FW_CONFIG */ + cfg_mem_params = get_bl_mem_params_node(NT_FW_CONFIG_ID); + assert(cfg_mem_params != NULL); + + config_base = cfg_mem_params->image_info.image_base; + + /* Calculate Event Log address in Non-secure memory */ + ns_addr = cfg_mem_params->image_info.image_base + + cfg_mem_params->image_info.image_max_size; + + /* Check for memory space */ + if ((uint64_t)(ns_addr + log_size) > ARM_NS_DRAM1_END) { + return -1; + } + + /* Write the Event Log address and its size in the DTB */ + err = arm_set_event_log_info(config_base, +#ifdef SPD_opteed + log_addr, +#endif + ns_addr, log_size); + + /* Return Event Log address in Non-secure memory */ + *ns_log_addr = (err < 0) ? 0UL : ns_addr; + return err; +} + +/* + * This function writes the Event Log address and its size + * in the TB_FW_CONFIG DTB. + * + * This function is supposed to be called only by BL1. + * + * Returns: + * 0 = success + * < 0 = error + */ +int arm_set_tb_fw_info(uintptr_t log_addr, size_t log_size) +{ + /* + * Read tb_fw_config device tree for Event Log properties + * and write the Event Log address and its size in the DTB + */ + const struct dyn_cfg_dtb_info_t *tb_fw_config_info; + uintptr_t tb_fw_cfg_dtb; + int err; + + tb_fw_config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TB_FW_CONFIG_ID); + assert(tb_fw_config_info != NULL); + + tb_fw_cfg_dtb = tb_fw_config_info->config_addr; + + err = arm_set_event_log_info(tb_fw_cfg_dtb, +#ifdef SPD_opteed + 0UL, +#endif + log_addr, log_size); + return err; +} + +/* + * This function reads the Event Log address and its size + * properties present in TB_FW_CONFIG DTB. + * + * This function is supposed to be called only by BL2. + * + * Returns: + * 0 = success + * < 0 = error + * Alongside returns Event Log address and its size. + */ + +int arm_get_tb_fw_info(uint64_t *log_addr, size_t *log_size) +{ + /* As libfdt uses void *, we can't avoid this cast */ + const struct dyn_cfg_dtb_info_t *tb_fw_config_info; + int node, rc; + + tb_fw_config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TB_FW_CONFIG_ID); + assert(tb_fw_config_info != NULL); + + void *dtb = (void *)tb_fw_config_info->config_addr; + const char *compatible = "arm,tpm_event_log"; + + /* Assert the node offset point to compatible property */ + node = fdt_node_offset_by_compatible(dtb, -1, compatible); + if (node < 0) { + WARN("The compatible property '%s'%s", compatible, + " not specified in TB_FW config.\n"); + return node; + } + + VERBOSE("Dyn cfg: '%s'%s", compatible, " found in the config\n"); + + rc = fdt_read_uint64(dtb, node, DTB_PROP_HW_LOG_ADDR, log_addr); + if (rc != 0) { + ERROR("%s%s", DTB_PROP_HW_LOG_ADDR, + " not specified in TB_FW config.\n"); + return rc; + } + + rc = fdt_read_uint32(dtb, node, DTB_PROP_HW_LOG_SIZE, (uint32_t *)log_size); + if (rc != 0) { + ERROR("%s%s", DTB_PROP_HW_LOG_SIZE, + " not specified in TB_FW config.\n"); + } + + return rc; +} +#endif /* MEASURED_BOOT */ diff --git a/plat/arm/common/arm_err.c b/plat/arm/common/arm_err.c new file mode 100644 index 0000000..fa36e8d --- /dev/null +++ b/plat/arm/common/arm_err.c @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include + +void __dead2 plat_error_handler(int err) +{ + plat_arm_error_handler(err); +} + +void __dead2 plat_system_reset(void) +{ + plat_arm_system_reset(); +} diff --git a/plat/arm/common/arm_gicv2.c b/plat/arm/common/arm_gicv2.c new file mode 100644 index 0000000..80a845f --- /dev/null +++ b/plat/arm/common/arm_gicv2.c @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include + +/****************************************************************************** + * The following functions are defined as weak to allow a platform to override + * the way the GICv2 driver is initialised and used. + *****************************************************************************/ +#pragma weak plat_arm_gic_driver_init +#pragma weak plat_arm_gic_init +#pragma weak plat_arm_gic_cpuif_enable +#pragma weak plat_arm_gic_cpuif_disable +#pragma weak plat_arm_gic_pcpu_init + +/****************************************************************************** + * On a GICv2 system, the Group 1 secure interrupts are treated as Group 0 + * interrupts. + *****************************************************************************/ +static const interrupt_prop_t arm_interrupt_props[] = { + PLAT_ARM_G1S_IRQ_PROPS(GICV2_INTR_GROUP0), + PLAT_ARM_G0_IRQ_PROPS(GICV2_INTR_GROUP0) +}; + +static unsigned int target_mask_array[PLATFORM_CORE_COUNT]; + +static const gicv2_driver_data_t arm_gic_data = { + .gicd_base = PLAT_ARM_GICD_BASE, + .gicc_base = PLAT_ARM_GICC_BASE, + .interrupt_props = arm_interrupt_props, + .interrupt_props_num = ARRAY_SIZE(arm_interrupt_props), + .target_masks = target_mask_array, + .target_masks_num = ARRAY_SIZE(target_mask_array), +}; + +/****************************************************************************** + * ARM common helper to initialize the GICv2 only driver. + *****************************************************************************/ +void plat_arm_gic_driver_init(void) +{ + gicv2_driver_init(&arm_gic_data); +} + +void plat_arm_gic_init(void) +{ + gicv2_distif_init(); + gicv2_pcpu_distif_init(); + gicv2_set_pe_target_mask(plat_my_core_pos()); + gicv2_cpuif_enable(); +} + +/****************************************************************************** + * ARM common helper to enable the GICv2 CPU interface + *****************************************************************************/ +void plat_arm_gic_cpuif_enable(void) +{ + gicv2_cpuif_enable(); +} + +/****************************************************************************** + * ARM common helper to disable the GICv2 CPU interface + *****************************************************************************/ +void plat_arm_gic_cpuif_disable(void) +{ + gicv2_cpuif_disable(); +} + +/****************************************************************************** + * ARM common helper to initialize the per cpu distributor interface in GICv2 + *****************************************************************************/ +void plat_arm_gic_pcpu_init(void) +{ + gicv2_pcpu_distif_init(); + gicv2_set_pe_target_mask(plat_my_core_pos()); +} + +/****************************************************************************** + * Stubs for Redistributor power management. Although GICv2 doesn't have + * Redistributor interface, these are provided for the sake of uniform GIC API + *****************************************************************************/ +void plat_arm_gic_redistif_on(void) +{ + return; +} + +void plat_arm_gic_redistif_off(void) +{ + return; +} + + +/****************************************************************************** + * ARM common helper to save & restore the GICv3 on resume from system suspend. + * The normal world currently takes care of saving and restoring the GICv2 + * registers due to legacy reasons. Hence we just initialize the Distributor + * on resume from system suspend. + *****************************************************************************/ +void plat_arm_gic_save(void) +{ + return; +} + +void plat_arm_gic_resume(void) +{ + gicv2_distif_init(); + gicv2_pcpu_distif_init(); +} diff --git a/plat/arm/common/arm_gicv3.c b/plat/arm/common/arm_gicv3.c new file mode 100644 index 0000000..469e22a --- /dev/null +++ b/plat/arm/common/arm_gicv3.c @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +/****************************************************************************** + * The following functions are defined as weak to allow a platform to override + * the way the GICv3 driver is initialised and used. + *****************************************************************************/ +#pragma weak plat_arm_gic_driver_init +#pragma weak plat_arm_gic_init +#pragma weak plat_arm_gic_cpuif_enable +#pragma weak plat_arm_gic_cpuif_disable +#pragma weak plat_arm_gic_pcpu_init +#pragma weak plat_arm_gic_redistif_on +#pragma weak plat_arm_gic_redistif_off + +/* The GICv3 driver only needs to be initialized in EL3 */ +static uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT]; + +/* Default GICR base address to be used for GICR probe. */ +static const uintptr_t gicr_base_addrs[2] = { + PLAT_ARM_GICR_BASE, /* GICR Base address of the primary CPU */ + 0U /* Zero Termination */ +}; + +/* List of zero terminated GICR frame addresses which CPUs will probe */ +static const uintptr_t *gicr_frames = gicr_base_addrs; + +static const interrupt_prop_t arm_interrupt_props[] = { + PLAT_ARM_G1S_IRQ_PROPS(INTR_GROUP1S), + PLAT_ARM_G0_IRQ_PROPS(INTR_GROUP0) +}; + +/* + * We save and restore the GICv3 context on system suspend. Allocate the + * data in the designated EL3 Secure carve-out memory. The `used` attribute + * is used to prevent the compiler from removing the gicv3 contexts. + */ +static gicv3_redist_ctx_t rdist_ctx __section("arm_el3_tzc_dram") __used; +static gicv3_dist_ctx_t dist_ctx __section("arm_el3_tzc_dram") __used; + +/* Define accessor function to get reference to the GICv3 context */ +DEFINE_LOAD_SYM_ADDR(rdist_ctx) +DEFINE_LOAD_SYM_ADDR(dist_ctx) + +/* + * MPIDR hashing function for translating MPIDRs read from GICR_TYPER register + * to core position. + * + * Calculating core position is dependent on MPIDR_EL1.MT bit. However, affinity + * values read from GICR_TYPER don't have an MT field. To reuse the same + * translation used for CPUs, we insert MT bit read from the PE's MPIDR into + * that read from GICR_TYPER. + * + * Assumptions: + * + * - All CPUs implemented in the system have MPIDR_EL1.MT bit set; + * - No CPUs implemented in the system use affinity level 3. + */ +static unsigned int arm_gicv3_mpidr_hash(u_register_t mpidr) +{ + mpidr |= (read_mpidr_el1() & MPIDR_MT_MASK); + return plat_arm_calc_core_pos(mpidr); +} + +static const gicv3_driver_data_t arm_gic_data __unused = { + .gicd_base = PLAT_ARM_GICD_BASE, + .gicr_base = 0U, + .interrupt_props = arm_interrupt_props, + .interrupt_props_num = ARRAY_SIZE(arm_interrupt_props), + .rdistif_num = PLATFORM_CORE_COUNT, + .rdistif_base_addrs = rdistif_base_addrs, + .mpidr_to_core_pos = arm_gicv3_mpidr_hash +}; + +/* + * By default, gicr_frames will be pointing to gicr_base_addrs. If + * the platform supports a non-contiguous GICR frames (GICR frames located + * at uneven offset), plat_arm_override_gicr_frames function can be used by + * such platform to override the gicr_frames. + */ +void plat_arm_override_gicr_frames(const uintptr_t *plat_gicr_frames) +{ + assert(plat_gicr_frames != NULL); + gicr_frames = plat_gicr_frames; +} + +void __init plat_arm_gic_driver_init(void) +{ + /* + * The GICv3 driver is initialized in EL3 and does not need + * to be initialized again in SEL1. This is because the S-EL1 + * can use GIC system registers to manage interrupts and does + * not need GIC interface base addresses to be configured. + */ +#if (!defined(__aarch64__) && defined(IMAGE_BL32)) || \ + (defined(__aarch64__) && defined(IMAGE_BL31)) + gicv3_driver_init(&arm_gic_data); + + if (gicv3_rdistif_probe(gicr_base_addrs[0]) == -1) { + ERROR("No GICR base frame found for Primary CPU\n"); + panic(); + } +#endif +} + +/****************************************************************************** + * ARM common helper to initialize the GIC. Only invoked by BL31 + *****************************************************************************/ +void __init plat_arm_gic_init(void) +{ + gicv3_distif_init(); + gicv3_rdistif_init(plat_my_core_pos()); + gicv3_cpuif_enable(plat_my_core_pos()); +} + +/****************************************************************************** + * ARM common helper to enable the GIC CPU interface + *****************************************************************************/ +void plat_arm_gic_cpuif_enable(void) +{ + gicv3_cpuif_enable(plat_my_core_pos()); +} + +/****************************************************************************** + * ARM common helper to disable the GIC CPU interface + *****************************************************************************/ +void plat_arm_gic_cpuif_disable(void) +{ + gicv3_cpuif_disable(plat_my_core_pos()); +} + +/****************************************************************************** + * ARM common helper function to iterate over all GICR frames and discover the + * corresponding per-cpu redistributor frame as well as initialize the + * corresponding interface in GICv3. + *****************************************************************************/ +void plat_arm_gic_pcpu_init(void) +{ + int result; + const uintptr_t *plat_gicr_frames = gicr_frames; + + do { + result = gicv3_rdistif_probe(*plat_gicr_frames); + + /* If the probe is successful, no need to proceed further */ + if (result == 0) + break; + + plat_gicr_frames++; + } while (*plat_gicr_frames != 0U); + + if (result == -1) { + ERROR("No GICR base frame found for CPU 0x%lx\n", read_mpidr()); + panic(); + } + gicv3_rdistif_init(plat_my_core_pos()); +} + +/****************************************************************************** + * ARM common helpers to power GIC redistributor interface + *****************************************************************************/ +void plat_arm_gic_redistif_on(void) +{ + gicv3_rdistif_on(plat_my_core_pos()); +} + +void plat_arm_gic_redistif_off(void) +{ + gicv3_rdistif_off(plat_my_core_pos()); +} + +/****************************************************************************** + * ARM common helper to save & restore the GICv3 on resume from system suspend + *****************************************************************************/ +void plat_arm_gic_save(void) +{ + gicv3_redist_ctx_t * const rdist_context = + (gicv3_redist_ctx_t *)LOAD_ADDR_OF(rdist_ctx); + gicv3_dist_ctx_t * const dist_context = + (gicv3_dist_ctx_t *)LOAD_ADDR_OF(dist_ctx); + + /* + * If an ITS is available, save its context before + * the Redistributor using: + * gicv3_its_save_disable(gits_base, &its_ctx[i]) + * Additionally, an implementation-defined sequence may + * be required to save the whole ITS state. + */ + + /* + * Save the GIC Redistributors and ITS contexts before the + * Distributor context. As we only handle SYSTEM SUSPEND API, + * we only need to save the context of the CPU that is issuing + * the SYSTEM SUSPEND call, i.e. the current CPU. + */ + gicv3_rdistif_save(plat_my_core_pos(), rdist_context); + + /* Save the GIC Distributor context */ + gicv3_distif_save(dist_context); + + /* + * From here, all the components of the GIC can be safely powered down + * as long as there is an alternate way to handle wakeup interrupt + * sources. + */ +} + +void plat_arm_gic_resume(void) +{ + const gicv3_redist_ctx_t *rdist_context = + (gicv3_redist_ctx_t *)LOAD_ADDR_OF(rdist_ctx); + const gicv3_dist_ctx_t *dist_context = + (gicv3_dist_ctx_t *)LOAD_ADDR_OF(dist_ctx); + + /* Restore the GIC Distributor context */ + gicv3_distif_init_restore(dist_context); + + /* + * Restore the GIC Redistributor and ITS contexts after the + * Distributor context. As we only handle SYSTEM SUSPEND API, + * we only need to restore the context of the CPU that issued + * the SYSTEM SUSPEND call. + */ + gicv3_rdistif_init_restore(plat_my_core_pos(), rdist_context); + + /* + * If an ITS is available, restore its context after + * the Redistributor using: + * gicv3_its_restore(gits_base, &its_ctx[i]) + * An implementation-defined sequence may be required to + * restore the whole ITS state. The ITS must also be + * re-enabled after this sequence has been executed. + */ +} diff --git a/plat/arm/common/arm_image_load.c b/plat/arm/common/arm_image_load.c new file mode 100644 index 0000000..c411c6c --- /dev/null +++ b/plat/arm/common/arm_image_load.c @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#if defined(SPD_spmd) +#include +#endif +#include +#include + +#pragma weak plat_flush_next_bl_params +#pragma weak plat_get_bl_image_load_info +#pragma weak plat_get_next_bl_params + +static bl_params_t *next_bl_params_cpy_ptr; + +/******************************************************************************* + * This function flushes the data structures so that they are visible + * in memory for the next BL image. + ******************************************************************************/ +void plat_flush_next_bl_params(void) +{ + assert(next_bl_params_cpy_ptr != NULL); + + flush_bl_params_desc_args(bl_mem_params_desc_ptr, + bl_mem_params_desc_num, + next_bl_params_cpy_ptr); +} + +#if defined(SPD_spmd) && BL2_ENABLE_SP_LOAD +/******************************************************************************* + * This function appends Secure Partitions to list of loadable images. + ******************************************************************************/ +static void plat_add_sp_images_load_info(struct bl_load_info *load_info) +{ + bl_load_info_node_t *curr_node = load_info->head; + bl_load_info_node_t *prev_node; + + /* Shortcut for empty SP list */ + if (sp_mem_params_descs[0].image_id == 0) { + ERROR("No Secure Partition Image available\n"); + return; + } + + /* Traverse through the bl images list */ + do { + curr_node = curr_node->next_load_info; + } while (curr_node->next_load_info != NULL); + + prev_node = curr_node; + + for (unsigned int index = 0; index < MAX_SP_IDS; index++) { + if (sp_mem_params_descs[index].image_id == 0) { + return; + } + curr_node = &sp_mem_params_descs[index].load_node_mem; + /* Populate the image information */ + curr_node->image_id = sp_mem_params_descs[index].image_id; + curr_node->image_info = &sp_mem_params_descs[index].image_info; + + prev_node->next_load_info = curr_node; + prev_node = curr_node; + } + + INFO("Reached Max number of SPs\n"); +} +#endif + +/******************************************************************************* + * This function returns the list of loadable images. + ******************************************************************************/ +struct bl_load_info *plat_get_bl_image_load_info(void) +{ +#if defined(SPD_spmd) && BL2_ENABLE_SP_LOAD + bl_load_info_t *bl_load_info; + + bl_load_info = get_bl_load_info_from_mem_params_desc(); + plat_add_sp_images_load_info(bl_load_info); + + return bl_load_info; +#else + return get_bl_load_info_from_mem_params_desc(); +#endif +} + +/******************************************************************************* + * ARM helper function to return the list of executable images.Since the default + * descriptors are allocated within BL2 RW memory, this prevents BL31/BL32 + * overlay of BL2 memory. Hence this function also copies the descriptors to a + * pre-allocated memory indicated by ARM_BL2_MEM_DESC_BASE. + ******************************************************************************/ +struct bl_params *arm_get_next_bl_params(void) +{ + bl_mem_params_node_t *bl2_mem_params_descs_cpy + = (bl_mem_params_node_t *)ARM_BL2_MEM_DESC_BASE; + const bl_params_t *next_bl_params; + + next_bl_params_cpy_ptr = + (bl_params_t *)(ARM_BL2_MEM_DESC_BASE + + (bl_mem_params_desc_num * sizeof(bl_mem_params_node_t))); + + /* + * Copy the memory descriptors to ARM_BL2_MEM_DESC_BASE area. + */ + (void) memcpy(bl2_mem_params_descs_cpy, bl_mem_params_desc_ptr, + (bl_mem_params_desc_num * sizeof(bl_mem_params_node_t))); + + /* + * Modify the global 'bl_mem_params_desc_ptr' to point to the + * copied location. + */ + bl_mem_params_desc_ptr = bl2_mem_params_descs_cpy; + + next_bl_params = get_next_bl_params_from_mem_params_desc(); + assert(next_bl_params != NULL); + + /* + * Copy 'next_bl_params' to the reserved location after the copied + * memory descriptors. + */ + (void) memcpy(next_bl_params_cpy_ptr, next_bl_params, + (sizeof(bl_params_t))); + + populate_next_bl_params_config(next_bl_params_cpy_ptr); + + return next_bl_params_cpy_ptr; +} + +/******************************************************************************* + * This function returns the list of executable images + ******************************************************************************/ +struct bl_params *plat_get_next_bl_params(void) +{ + return arm_get_next_bl_params(); +} + diff --git a/plat/arm/common/arm_io_storage.c b/plat/arm/common/arm_io_storage.c new file mode 100644 index 0000000..19ee1b0 --- /dev/null +++ b/plat/arm/common/arm_io_storage.c @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2015-2021, ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* IO devices */ +static const io_dev_connector_t *fip_dev_con; +uintptr_t fip_dev_handle; +static const io_dev_connector_t *memmap_dev_con; +uintptr_t memmap_dev_handle; + +#if ARM_GPT_SUPPORT +/* fip partition names */ +static const char * const fip_part_names[] = {"FIP_A", "FIP_B"}; +CASSERT(sizeof(fip_part_names)/sizeof(char *) == NR_OF_FW_BANKS, + assert_fip_partition_names_missing); +#endif /* ARM_GPT_SUPPORT */ + +/* Weak definitions may be overridden in specific ARM standard platform */ +#pragma weak plat_arm_io_setup +#pragma weak plat_arm_get_alt_image_source + +int open_fip(const uintptr_t spec) +{ + int result; + uintptr_t local_image_handle; + + /* See if a Firmware Image Package is available */ + result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID); + if (result == 0) { + result = io_open(fip_dev_handle, spec, &local_image_handle); + if (result == 0) { + VERBOSE("Using FIP\n"); + io_close(local_image_handle); + } + } + return result; +} + +int open_memmap(const uintptr_t spec) +{ + int result; + uintptr_t local_image_handle; + + result = io_dev_init(memmap_dev_handle, (uintptr_t)NULL); + if (result == 0) { + result = io_open(memmap_dev_handle, spec, &local_image_handle); + if (result == 0) { + VERBOSE("Using Memmap\n"); + io_close(local_image_handle); + } + } + return result; +} + +int arm_io_setup(void) +{ + int io_result; + + io_result = register_io_dev_fip(&fip_dev_con); + if (io_result < 0) { + return io_result; + } + + io_result = register_io_dev_memmap(&memmap_dev_con); + if (io_result < 0) { + return io_result; + } + + /* Open connections to devices and cache the handles */ + io_result = io_dev_open(fip_dev_con, (uintptr_t)NULL, + &fip_dev_handle); + if (io_result < 0) { + return io_result; + } + + io_result = io_dev_open(memmap_dev_con, (uintptr_t)NULL, + &memmap_dev_handle); + + return io_result; +} + +void plat_arm_io_setup(void) +{ + int err; + + err = arm_io_setup(); + if (err < 0) { + panic(); + } +} + +int plat_arm_get_alt_image_source( + unsigned int image_id __unused, + uintptr_t *dev_handle __unused, + uintptr_t *image_spec __unused) +{ + /* By default do not try an alternative */ + return -ENOENT; +} + +/* Return an IO device handle and specification which can be used to access + * an image. Use this to enforce platform load policy */ +int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle, + uintptr_t *image_spec) +{ + int result; + const struct plat_io_policy *policy; + + policy = FCONF_GET_PROPERTY(arm, io_policies, image_id); + result = policy->check(policy->image_spec); + if (result == 0) { + *image_spec = policy->image_spec; + *dev_handle = *(policy->dev_handle); + } else { + VERBOSE("Trying alternative IO\n"); + result = plat_arm_get_alt_image_source(image_id, dev_handle, + image_spec); + } + + return result; +} + +/* + * See if a Firmware Image Package is available, + * by checking if TOC is valid or not. + */ +bool arm_io_is_toc_valid(void) +{ + return (io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID) == 0); +} + +#if ARM_GPT_SUPPORT +/****************************************************************************** + * Retrieve partition entry details such as offset and length, and set these + * details in the I/O policy of the requested image. + * + * @image_id: image id whose I/O policy to be updated + * + * @part_name: partition name whose details to be retrieved + * + * Returns 0 on success, error otherwise + * Alongside, returns device handle and image specification of requested + * image. + ******************************************************************************/ +int arm_set_image_source(unsigned int image_id, const char *part_name, + uintptr_t *dev_handle, uintptr_t *image_spec) +{ + const partition_entry_t *entry = get_partition_entry(part_name); + + if (entry == NULL) { + ERROR("Unable to find the %s partition\n", part_name); + return -ENOENT; + } + + struct plat_io_policy *policy = FCONF_GET_PROPERTY(arm, + io_policies, + image_id); + + assert(policy != NULL); + assert(policy->image_spec != 0UL); + + io_block_spec_t *spec = (io_block_spec_t *)policy->image_spec; + /* set offset and length of the image */ + spec->offset = PLAT_ARM_FLASH_IMAGE_BASE + entry->start; + spec->length = entry->length; + + *dev_handle = *(policy->dev_handle); + *image_spec = policy->image_spec; + + return 0; +} + +/******************************************************************************* + * Set the source offset and length of the FIP image in its I/O policy. + * + * @active_fw_bank_idx: active firmware bank index gathered from FWU metadata. + ******************************************************************************/ +void arm_set_fip_addr(uint32_t active_fw_bank_idx) +{ + uintptr_t dev_handle __unused; + uintptr_t image_spec __unused; + + assert(active_fw_bank_idx < NR_OF_FW_BANKS); + + INFO("Booting with partition %s\n", fip_part_names[active_fw_bank_idx]); + + int result = arm_set_image_source(FIP_IMAGE_ID, + fip_part_names[active_fw_bank_idx], + &dev_handle, + &image_spec); + if (result != 0) { + panic(); + } +} +#endif /* ARM_GPT_SUPPORT */ + +#if PSA_FWU_SUPPORT +/******************************************************************************* + * Read the FIP partition of the GPT image corresponding to the active firmware + * bank to get its offset and length, and update these details in the I/O policy + * of the FIP image. + ******************************************************************************/ +void plat_fwu_set_images_source(const struct fwu_metadata *metadata) +{ + arm_set_fip_addr(metadata->active_index); +} + +/******************************************************************************* + * Read the requested FWU metadata partition of the GPT image to get its offset + * and length, and update these details in the I/O policy of the requested FWU + * metadata image. + ******************************************************************************/ +int plat_fwu_set_metadata_image_source(unsigned int image_id, + uintptr_t *dev_handle, + uintptr_t *image_spec) +{ + int result = -1; + + if (image_id == FWU_METADATA_IMAGE_ID) { + result = arm_set_image_source(FWU_METADATA_IMAGE_ID, + "FWU-Metadata", + dev_handle, + image_spec); + } else if (image_id == BKUP_FWU_METADATA_IMAGE_ID) { + result = arm_set_image_source(BKUP_FWU_METADATA_IMAGE_ID, + "Bkup-FWU-Metadata", + dev_handle, + image_spec); + } + + return result; +} +#endif /* PSA_FWU_SUPPORT */ diff --git a/plat/arm/common/arm_nor_psci_mem_protect.c b/plat/arm/common/arm_nor_psci_mem_protect.c new file mode 100644 index 0000000..1fa234d --- /dev/null +++ b/plat/arm/common/arm_nor_psci_mem_protect.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include +#include +#include + +/* + * DRAM1 is used also to load the NS boot loader. For this reason we + * cannot clear the full DRAM1, because in that case we would clear + * the NS images (especially for RESET_TO_BL31 and RESET_TO_SPMIN cases). + * For this reason we reserve 64 MB for the NS images and protect the RAM + * until the end of DRAM1. + * We limit the size of DRAM2 to 1 GB to avoid big delays while booting + */ +#define DRAM1_NS_IMAGE_LIMIT (PLAT_ARM_NS_IMAGE_BASE + (32 << TWO_MB_SHIFT)) +#define DRAM1_PROTECTED_SIZE (ARM_NS_DRAM1_END+1u - DRAM1_NS_IMAGE_LIMIT) + +static mem_region_t arm_ram_ranges[] = { + {DRAM1_NS_IMAGE_LIMIT, DRAM1_PROTECTED_SIZE}, +#ifdef __aarch64__ + {ARM_DRAM2_BASE, 1u << ONE_GB_SHIFT}, +#endif +}; + +/******************************************************************************* + * Function that reads the content of the memory protect variable that + * enables clearing of non secure memory when system boots. This variable + * should be stored in a secure NVRAM. + ******************************************************************************/ +int arm_psci_read_mem_protect(int *enabled) +{ + int tmp; + + tmp = *(int *) PLAT_ARM_MEM_PROT_ADDR; + *enabled = (tmp == 1) ? 1 : 0; + return 0; +} + +/******************************************************************************* + * Function that writes the content of the memory protect variable that + * enables overwritten of non secure memory when system boots. + ******************************************************************************/ +int arm_nor_psci_write_mem_protect(int val) +{ + unsigned long enable = (val != 0) ? 1UL : 0UL; + + if (nor_unlock(PLAT_ARM_MEM_PROT_ADDR) != 0) { + ERROR("unlocking memory protect variable\n"); + return -1; + } + + if (enable == 1UL) { + /* + * If we want to write a value different than 0 + * then we have to erase the full block because + * otherwise we cannot ensure that the value programmed + * into the flash is going to be the same than the value + * requested by the caller + */ + if (nor_erase(PLAT_ARM_MEM_PROT_ADDR) != 0) { + ERROR("erasing block containing memory protect variable\n"); + return -1; + } + } + + if (nor_word_program(PLAT_ARM_MEM_PROT_ADDR, enable) != 0) { + ERROR("programming memory protection variable\n"); + return -1; + } + return 0; +} + +/******************************************************************************* + * Function used for required psci operations performed when + * system boots + ******************************************************************************/ +/* + * PLAT_MEM_PROTECT_VA_FRAME is a address specifically + * selected in a way that is not needed an additional + * translation table for memprotect. It happens because + * we use a chunk of size 2MB and it means that it can + * be mapped in a level 2 table and the level 2 table + * for 0xc0000000 is already used and the entry for + * 0xc0000000 is not used. + */ +#if defined(PLAT_XLAT_TABLES_DYNAMIC) +void arm_nor_psci_do_dyn_mem_protect(void) +{ + int enable; + + arm_psci_read_mem_protect(&enable); + if (enable == 0) + return; + + INFO("PSCI: Overwriting non secure memory\n"); + clear_map_dyn_mem_regions(arm_ram_ranges, + ARRAY_SIZE(arm_ram_ranges), + PLAT_ARM_MEM_PROTEC_VA_FRAME, + 1 << TWO_MB_SHIFT); +} +#endif + +/******************************************************************************* + * Function used for required psci operations performed when + * system boots and dynamic memory is not used. + ******************************************************************************/ +void arm_nor_psci_do_static_mem_protect(void) +{ + int enable; + + (void) arm_psci_read_mem_protect(&enable); + if (enable == 0) + return; + + INFO("PSCI: Overwriting non secure memory\n"); + clear_mem_regions(arm_ram_ranges, + ARRAY_SIZE(arm_ram_ranges)); + (void) arm_nor_psci_write_mem_protect(0); +} + +/******************************************************************************* + * Function that checks if a region is protected by the memory protect + * mechanism + ******************************************************************************/ +int arm_psci_mem_protect_chk(uintptr_t base, u_register_t length) +{ + return mem_region_in_array_chk(arm_ram_ranges, + ARRAY_SIZE(arm_ram_ranges), + base, length); +} diff --git a/plat/arm/common/arm_pm.c b/plat/arm/common/arm_pm.c new file mode 100644 index 0000000..5434c94 --- /dev/null +++ b/plat/arm/common/arm_pm.c @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include + +#include +#include +#include +#include + +/* Allow ARM Standard platforms to override these functions */ +#pragma weak plat_arm_program_trusted_mailbox + +#if !ARM_RECOM_STATE_ID_ENC +/******************************************************************************* + * ARM standard platform handler called to check the validity of the power state + * parameter. + ******************************************************************************/ +int arm_validate_power_state(unsigned int power_state, + psci_power_state_t *req_state) +{ + unsigned int pstate = psci_get_pstate_type(power_state); + unsigned int pwr_lvl = psci_get_pstate_pwrlvl(power_state); + unsigned int i; + + assert(req_state != NULL); + + if (pwr_lvl > PLAT_MAX_PWR_LVL) + return PSCI_E_INVALID_PARAMS; + + /* Sanity check the requested state */ + if (pstate == PSTATE_TYPE_STANDBY) { + /* + * It's possible to enter standby only on power level 0 + * Ignore any other power level. + */ + if (pwr_lvl != ARM_PWR_LVL0) + return PSCI_E_INVALID_PARAMS; + + req_state->pwr_domain_state[ARM_PWR_LVL0] = + ARM_LOCAL_STATE_RET; + } else { + for (i = ARM_PWR_LVL0; i <= pwr_lvl; i++) + req_state->pwr_domain_state[i] = + ARM_LOCAL_STATE_OFF; + } + + /* + * We expect the 'state id' to be zero. + */ + if (psci_get_pstate_id(power_state) != 0U) + return PSCI_E_INVALID_PARAMS; + + return PSCI_E_SUCCESS; +} + +#else +/******************************************************************************* + * ARM standard platform handler called to check the validity of the power + * state parameter. The power state parameter has to be a composite power + * state. + ******************************************************************************/ +int arm_validate_power_state(unsigned int power_state, + psci_power_state_t *req_state) +{ + unsigned int state_id; + int i; + + assert(req_state != NULL); + + /* + * Currently we are using a linear search for finding the matching + * entry in the idle power state array. This can be made a binary + * search if the number of entries justify the additional complexity. + */ + for (i = 0; !!arm_pm_idle_states[i]; i++) { + if (power_state == arm_pm_idle_states[i]) + break; + } + + /* Return error if entry not found in the idle state array */ + if (!arm_pm_idle_states[i]) + return PSCI_E_INVALID_PARAMS; + + i = 0; + state_id = psci_get_pstate_id(power_state); + + /* Parse the State ID and populate the state info parameter */ + while (state_id) { + req_state->pwr_domain_state[i++] = state_id & + ARM_LOCAL_PSTATE_MASK; + state_id >>= ARM_LOCAL_PSTATE_WIDTH; + } + + return PSCI_E_SUCCESS; +} +#endif /* __ARM_RECOM_STATE_ID_ENC__ */ + +/******************************************************************************* + * ARM standard platform handler called to check the validity of the non secure + * entrypoint. Returns 0 if the entrypoint is valid, or -1 otherwise. + ******************************************************************************/ +int arm_validate_ns_entrypoint(uintptr_t entrypoint) +{ + /* + * Check if the non secure entrypoint lies within the non + * secure DRAM. + */ + if ((entrypoint >= ARM_NS_DRAM1_BASE) && (entrypoint < + (ARM_NS_DRAM1_BASE + ARM_NS_DRAM1_SIZE))) { + return 0; + } +#ifdef __aarch64__ + if ((entrypoint >= ARM_DRAM2_BASE) && (entrypoint < + (ARM_DRAM2_BASE + ARM_DRAM2_SIZE))) { + return 0; + } +#endif + + return -1; +} + +int arm_validate_psci_entrypoint(uintptr_t entrypoint) +{ + return (arm_validate_ns_entrypoint(entrypoint) == 0) ? PSCI_E_SUCCESS : + PSCI_E_INVALID_ADDRESS; +} + +/****************************************************************************** + * Helper function to save the platform state before a system suspend. Save the + * state of the system components which are not in the Always ON power domain. + *****************************************************************************/ +void arm_system_pwr_domain_save(void) +{ + /* Assert system power domain is available on the platform */ + assert(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL2); + + plat_arm_gic_save(); + + /* + * Unregister console now so that it is not registered for a second + * time during resume. + */ + arm_console_runtime_end(); + + /* + * All the other peripheral which are configured by ARM TF are + * re-initialized on resume from system suspend. Hence we + * don't save their state here. + */ +} + +/****************************************************************************** + * Helper function to resume the platform from system suspend. Reinitialize + * the system components which are not in the Always ON power domain. + * TODO: Unify the platform setup when waking up from cold boot and system + * resume in arm_bl31_platform_setup(). + *****************************************************************************/ +void arm_system_pwr_domain_resume(void) +{ + /* Initialize the console */ + arm_console_runtime_init(); + + /* Assert system power domain is available on the platform */ + assert(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL2); + + plat_arm_gic_resume(); + + plat_arm_security_setup(); + arm_configure_sys_timer(); +} + +/******************************************************************************* + * ARM platform function to program the mailbox for a cpu before it is released + * from reset. This function assumes that the Trusted mail box base is within + * the ARM_SHARED_RAM region + ******************************************************************************/ +void plat_arm_program_trusted_mailbox(uintptr_t address) +{ + uintptr_t *mailbox = (void *) PLAT_ARM_TRUSTED_MAILBOX_BASE; + + *mailbox = address; + + /* + * Ensure that the PLAT_ARM_TRUSTED_MAILBOX_BASE is within + * ARM_SHARED_RAM region. + */ + assert((PLAT_ARM_TRUSTED_MAILBOX_BASE >= ARM_SHARED_RAM_BASE) && + ((PLAT_ARM_TRUSTED_MAILBOX_BASE + sizeof(*mailbox)) <= \ + (ARM_SHARED_RAM_BASE + ARM_SHARED_RAM_SIZE))); +} + +/******************************************************************************* + * The ARM Standard platform definition of platform porting API + * `plat_setup_psci_ops`. + ******************************************************************************/ +int __init plat_setup_psci_ops(uintptr_t sec_entrypoint, + const plat_psci_ops_t **psci_ops) +{ + *psci_ops = plat_arm_psci_override_pm_ops(&plat_arm_psci_pm_ops); + + /* Setup mailbox with entry point. */ + plat_arm_program_trusted_mailbox(sec_entrypoint); + return 0; +} diff --git a/plat/arm/common/arm_sip_svc.c b/plat/arm/common/arm_sip_svc.c new file mode 100644 index 0000000..6456c78 --- /dev/null +++ b/plat/arm/common/arm_sip_svc.c @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2016-2019,2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* ARM SiP Service UUID */ +DEFINE_SVC_UUID2(arm_sip_svc_uid, + 0x556d75e2, 0x6033, 0xb54b, 0xb5, 0x75, + 0x62, 0x79, 0xfd, 0x11, 0x37, 0xff); + +static int arm_sip_setup(void) +{ + if (pmf_setup() != 0) { + return 1; + } + +#if USE_DEBUGFS + + if (debugfs_smc_setup() != 0) { + return 1; + } + +#endif /* USE_DEBUGFS */ + + return 0; +} + +/* + * This function handles ARM defined SiP Calls + */ +static uintptr_t arm_sip_handler(unsigned int smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + int call_count = 0; + +#if ENABLE_PMF + + /* + * Dispatch PMF calls to PMF SMC handler and return its return + * value + */ + if (is_pmf_fid(smc_fid)) { + return pmf_smc_handler(smc_fid, x1, x2, x3, x4, cookie, + handle, flags); + } + +#endif /* ENABLE_PMF */ + +#if USE_DEBUGFS + + if (is_debugfs_fid(smc_fid)) { + return debugfs_smc_handler(smc_fid, x1, x2, x3, x4, cookie, + handle, flags); + } + +#endif /* USE_DEBUGFS */ + +#if ARM_ETHOSN_NPU_DRIVER + + if (is_ethosn_fid(smc_fid)) { + return ethosn_smc_handler(smc_fid, x1, x2, x3, x4, cookie, + handle, flags); + } + +#endif /* ARM_ETHOSN_NPU_DRIVER */ + + switch (smc_fid) { + case ARM_SIP_SVC_EXE_STATE_SWITCH: { + /* Execution state can be switched only if EL3 is AArch64 */ +#ifdef __aarch64__ + /* Allow calls from non-secure only */ + if (!is_caller_non_secure(flags)) + SMC_RET1(handle, STATE_SW_E_DENIED); + + /* + * Pointers used in execution state switch are all 32 bits wide + */ + return (uintptr_t) arm_execution_state_switch(smc_fid, + (uint32_t) x1, (uint32_t) x2, (uint32_t) x3, + (uint32_t) x4, handle); +#else + /* State switch denied */ + SMC_RET1(handle, STATE_SW_E_DENIED); +#endif /* __aarch64__ */ + } + + case ARM_SIP_SVC_CALL_COUNT: + /* PMF calls */ + call_count += PMF_NUM_SMC_CALLS; + +#if ARM_ETHOSN_NPU_DRIVER + /* ETHOSN calls */ + call_count += ETHOSN_NUM_SMC_CALLS; +#endif /* ARM_ETHOSN_NPU_DRIVER */ + + /* State switch call */ + call_count += 1; + + SMC_RET1(handle, call_count); + + case ARM_SIP_SVC_UID: + /* Return UID to the caller */ + SMC_UUID_RET(handle, arm_sip_svc_uid); + + case ARM_SIP_SVC_VERSION: + /* Return the version of current implementation */ + SMC_RET2(handle, ARM_SIP_SVC_VERSION_MAJOR, ARM_SIP_SVC_VERSION_MINOR); + + default: + WARN("Unimplemented ARM SiP Service Call: 0x%x \n", smc_fid); + SMC_RET1(handle, SMC_UNK); + } + +} + + +/* Define a runtime service descriptor for fast SMC calls */ +DECLARE_RT_SVC( + arm_sip_svc, + OEN_SIP_START, + OEN_SIP_END, + SMC_TYPE_FAST, + arm_sip_setup, + arm_sip_handler +); diff --git a/plat/arm/common/arm_topology.c b/plat/arm/common/arm_topology.c new file mode 100644 index 0000000..c9993a7 --- /dev/null +++ b/plat/arm/common/arm_topology.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include + +/******************************************************************************* + * This function validates an MPIDR by checking whether it falls within the + * acceptable bounds. An error code (-1) is returned if an incorrect mpidr + * is passed. + ******************************************************************************/ +int arm_check_mpidr(u_register_t mpidr) +{ + unsigned int cluster_id, cpu_id; + uint64_t valid_mask; + +#if ARM_PLAT_MT + unsigned int pe_id; + + valid_mask = ~(MPIDR_AFFLVL_MASK | + (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT) | + (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT) | + (MPIDR_AFFLVL_MASK << MPIDR_AFF3_SHIFT)); + cluster_id = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK; + cpu_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; + pe_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK; +#else + valid_mask = ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK); + cluster_id = (unsigned int) ((mpidr >> MPIDR_AFF1_SHIFT) & + MPIDR_AFFLVL_MASK); + cpu_id = (unsigned int) ((mpidr >> MPIDR_AFF0_SHIFT) & + MPIDR_AFFLVL_MASK); +#endif /* ARM_PLAT_MT */ + + mpidr &= MPIDR_AFFINITY_MASK; + if ((mpidr & valid_mask) != 0U) + return -1; + + if (cluster_id >= PLAT_ARM_CLUSTER_COUNT) + return -1; + + /* Validate cpu_id by checking whether it represents a CPU in + one of the two clusters present on the platform. */ + if (cpu_id >= plat_arm_get_cluster_core_count(mpidr)) + return -1; + +#if ARM_PLAT_MT + if (pe_id >= plat_arm_get_cpu_pe_count(mpidr)) + return -1; +#endif /* ARM_PLAT_MT */ + + return 0; +} diff --git a/plat/arm/common/arm_tzc400.c b/plat/arm/common/arm_tzc400.c new file mode 100644 index 0000000..370ef0a --- /dev/null +++ b/plat/arm/common/arm_tzc400.c @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include + +/* Weak definitions may be overridden in specific ARM standard platform */ +#pragma weak plat_arm_security_setup + + +/******************************************************************************* + * Initialize the TrustZone Controller for ARM standard platforms. + * When booting an EL3 payload, this is simplified: we configure region 0 with + * secure access only and do not enable any other region. + ******************************************************************************/ +void arm_tzc400_setup(uintptr_t tzc_base, + const arm_tzc_regions_info_t *tzc_regions) +{ +#ifndef EL3_PAYLOAD_BASE + unsigned int region_index = 1U; + const arm_tzc_regions_info_t *p; + const arm_tzc_regions_info_t init_tzc_regions[] = { + ARM_TZC_REGIONS_DEF, + {0} + }; +#endif + + INFO("Configuring TrustZone Controller\n"); + + tzc400_init(tzc_base); + + /* Disable filters. */ + tzc400_disable_filters(); + +#ifndef EL3_PAYLOAD_BASE + if (tzc_regions == NULL) + p = init_tzc_regions; + else + p = tzc_regions; + + /* Region 0 set to no access by default */ + tzc400_configure_region0(TZC_REGION_S_NONE, 0); + + /* Rest Regions set according to tzc_regions array */ + for (; p->base != 0ULL; p++) { + tzc400_configure_region(PLAT_ARM_TZC_FILTERS, region_index, + p->base, p->end, p->sec_attr, p->nsaid_permissions); + region_index++; + } + + INFO("Total %u regions set.\n", region_index); + +#else /* if defined(EL3_PAYLOAD_BASE) */ + + /* Allow Secure and Non-secure access to DRAM for EL3 payloads */ + tzc400_configure_region0(TZC_REGION_S_RDWR, PLAT_ARM_TZC_NS_DEV_ACCESS); + +#endif /* EL3_PAYLOAD_BASE */ + + /* + * Raise an exception if a NS device tries to access secure memory + * TODO: Add interrupt handling support. + */ + tzc400_set_action(TZC_ACTION_ERR); + + /* Enable filters. */ + tzc400_enable_filters(); +} + +void plat_arm_security_setup(void) +{ + arm_tzc400_setup(PLAT_ARM_TZC_BASE, NULL); +} diff --git a/plat/arm/common/arm_tzc_dmc500.c b/plat/arm/common/arm_tzc_dmc500.c new file mode 100644 index 0000000..e9f897f --- /dev/null +++ b/plat/arm/common/arm_tzc_dmc500.c @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include + +#include +#include +#include + +/******************************************************************************* + * Initialize the DMC500-TrustZone Controller for ARM standard platforms. + * When booting an EL3 payload, this is simplified: we configure region 0 with + * secure access only and do not enable any other region. + ******************************************************************************/ +void arm_tzc_dmc500_setup(tzc_dmc500_driver_data_t *plat_driver_data, + const arm_tzc_regions_info_t *tzc_regions) +{ +#ifndef EL3_PAYLOAD_BASE + unsigned int region_index = 1U; + const arm_tzc_regions_info_t *p; + const arm_tzc_regions_info_t init_tzc_regions[] = { + ARM_TZC_REGIONS_DEF, + {0} + }; +#endif + + assert(plat_driver_data); + + INFO("Configuring DMC-500 TZ Settings\n"); + + tzc_dmc500_driver_init(plat_driver_data); + +#ifndef EL3_PAYLOAD_BASE + if (tzc_regions == NULL) + p = init_tzc_regions; + else + p = tzc_regions; + + /* Region 0 set to no access by default */ + tzc_dmc500_configure_region0(TZC_REGION_S_NONE, 0); + + /* Rest Regions set according to tzc_regions array */ + for (; p->base != 0ULL; p++) { + tzc_dmc500_configure_region(region_index, p->base, p->end, + p->sec_attr, p->nsaid_permissions); + region_index++; + } + + INFO("Total %u regions set.\n", region_index); + +#else + /* Allow secure access only to DRAM for EL3 payloads */ + tzc_dmc500_configure_region0(TZC_REGION_S_RDWR, 0); +#endif + /* + * Raise an exception if a NS device tries to access secure memory + * TODO: Add interrupt handling support. + */ + tzc_dmc500_set_action(TZC_ACTION_RV_LOWERR); + + /* + * Flush the configuration settings to have an affect. Validate + * flush by checking FILTER_EN is set on region 1 attributes + * register. + */ + tzc_dmc500_config_complete(); + + /* + * Wait for the flush to complete. + * TODO: Have a timeout for this loop + */ + while (tzc_dmc500_verify_complete()) + ; +} diff --git a/plat/arm/common/fconf/arm_fconf_io.c b/plat/arm/common/fconf/arm_fconf_io.c new file mode 100644 index 0000000..6c32331 --- /dev/null +++ b/plat/arm/common/fconf/arm_fconf_io.c @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2019-2022, ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if PSA_FWU_SUPPORT +/* metadata entry details */ +static io_block_spec_t fwu_metadata_spec; +#endif /* PSA_FWU_SUPPORT */ + +io_block_spec_t fip_block_spec = { +/* + * This is fixed FIP address used by BL1, BL2 loads partition table + * to get FIP address. + */ +#if ARM_GPT_SUPPORT + .offset = PLAT_ARM_FLASH_IMAGE_BASE + PLAT_ARM_FIP_OFFSET_IN_GPT, +#else + .offset = PLAT_ARM_FLASH_IMAGE_BASE, +#endif /* ARM_GPT_SUPPORT */ + .length = PLAT_ARM_FLASH_IMAGE_MAX_SIZE +}; + +#if ARM_GPT_SUPPORT +static const io_block_spec_t gpt_spec = { + .offset = PLAT_ARM_FLASH_IMAGE_BASE, + /* + * PLAT_PARTITION_BLOCK_SIZE = 512 + * PLAT_PARTITION_MAX_ENTRIES = 128 + * each sector has 4 partition entries, and there are + * 2 reserved sectors i.e. protective MBR and primary + * GPT header hence length gets calculated as, + * length = 512 * (128/4 + 2) + */ + .length = PLAT_PARTITION_BLOCK_SIZE * + (PLAT_PARTITION_MAX_ENTRIES / 4 + 2), +}; +#endif /* ARM_GPT_SUPPORT */ + +const io_uuid_spec_t arm_uuid_spec[MAX_NUMBER_IDS] = { + [BL2_IMAGE_ID] = {UUID_TRUSTED_BOOT_FIRMWARE_BL2}, + [TB_FW_CONFIG_ID] = {UUID_TB_FW_CONFIG}, + [FW_CONFIG_ID] = {UUID_FW_CONFIG}, +#if !ARM_IO_IN_DTB + [SCP_BL2_IMAGE_ID] = {UUID_SCP_FIRMWARE_SCP_BL2}, + [BL31_IMAGE_ID] = {UUID_EL3_RUNTIME_FIRMWARE_BL31}, + [BL32_IMAGE_ID] = {UUID_SECURE_PAYLOAD_BL32}, + [BL32_EXTRA1_IMAGE_ID] = {UUID_SECURE_PAYLOAD_BL32_EXTRA1}, + [BL32_EXTRA2_IMAGE_ID] = {UUID_SECURE_PAYLOAD_BL32_EXTRA2}, + [BL33_IMAGE_ID] = {UUID_NON_TRUSTED_FIRMWARE_BL33}, + [HW_CONFIG_ID] = {UUID_HW_CONFIG}, + [SOC_FW_CONFIG_ID] = {UUID_SOC_FW_CONFIG}, + [TOS_FW_CONFIG_ID] = {UUID_TOS_FW_CONFIG}, + [NT_FW_CONFIG_ID] = {UUID_NT_FW_CONFIG}, + [RMM_IMAGE_ID] = {UUID_REALM_MONITOR_MGMT_FIRMWARE}, +#endif /* ARM_IO_IN_DTB */ +#if TRUSTED_BOARD_BOOT + [TRUSTED_BOOT_FW_CERT_ID] = {UUID_TRUSTED_BOOT_FW_CERT}, +#if !ARM_IO_IN_DTB + [CCA_CONTENT_CERT_ID] = {UUID_CCA_CONTENT_CERT}, + [CORE_SWD_KEY_CERT_ID] = {UUID_CORE_SWD_KEY_CERT}, + [PLAT_KEY_CERT_ID] = {UUID_PLAT_KEY_CERT}, + [TRUSTED_KEY_CERT_ID] = {UUID_TRUSTED_KEY_CERT}, + [SCP_FW_KEY_CERT_ID] = {UUID_SCP_FW_KEY_CERT}, + [SOC_FW_KEY_CERT_ID] = {UUID_SOC_FW_KEY_CERT}, + [TRUSTED_OS_FW_KEY_CERT_ID] = {UUID_TRUSTED_OS_FW_KEY_CERT}, + [NON_TRUSTED_FW_KEY_CERT_ID] = {UUID_NON_TRUSTED_FW_KEY_CERT}, + [SCP_FW_CONTENT_CERT_ID] = {UUID_SCP_FW_CONTENT_CERT}, + [SOC_FW_CONTENT_CERT_ID] = {UUID_SOC_FW_CONTENT_CERT}, + [TRUSTED_OS_FW_CONTENT_CERT_ID] = {UUID_TRUSTED_OS_FW_CONTENT_CERT}, + [NON_TRUSTED_FW_CONTENT_CERT_ID] = {UUID_NON_TRUSTED_FW_CONTENT_CERT}, +#if defined(SPD_spmd) + [SIP_SP_CONTENT_CERT_ID] = {UUID_SIP_SECURE_PARTITION_CONTENT_CERT}, + [PLAT_SP_CONTENT_CERT_ID] = {UUID_PLAT_SECURE_PARTITION_CONTENT_CERT}, +#endif +#endif /* ARM_IO_IN_DTB */ +#endif /* TRUSTED_BOARD_BOOT */ +}; + +/* By default, ARM platforms load images from the FIP */ +struct plat_io_policy policies[MAX_NUMBER_IDS] = { +#if ARM_GPT_SUPPORT + [GPT_IMAGE_ID] = { + &memmap_dev_handle, + (uintptr_t)&gpt_spec, + open_memmap + }, +#endif /* ARM_GPT_SUPPORT */ +#if PSA_FWU_SUPPORT + [FWU_METADATA_IMAGE_ID] = { + &memmap_dev_handle, + /* filled runtime from partition information */ + (uintptr_t)&fwu_metadata_spec, + open_memmap + }, + [BKUP_FWU_METADATA_IMAGE_ID] = { + &memmap_dev_handle, + /* filled runtime from partition information */ + (uintptr_t)&fwu_metadata_spec, + open_memmap + }, +#endif /* PSA_FWU_SUPPORT */ + [FIP_IMAGE_ID] = { + &memmap_dev_handle, + (uintptr_t)&fip_block_spec, + open_memmap + }, + [BL2_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[BL2_IMAGE_ID], + open_fip + }, + [TB_FW_CONFIG_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[TB_FW_CONFIG_ID], + open_fip + }, + [FW_CONFIG_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[FW_CONFIG_ID], + open_fip + }, +#if !ARM_IO_IN_DTB + [SCP_BL2_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[SCP_BL2_IMAGE_ID], + open_fip + }, + [BL31_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[BL31_IMAGE_ID], + open_fip + }, + [BL32_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[BL32_IMAGE_ID], + open_fip + }, + [BL32_EXTRA1_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[BL32_EXTRA1_IMAGE_ID], + open_fip + }, + [BL32_EXTRA2_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[BL32_EXTRA2_IMAGE_ID], + open_fip + }, + [BL33_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[BL33_IMAGE_ID], + open_fip + }, + [RMM_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[RMM_IMAGE_ID], + open_fip + }, + [HW_CONFIG_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[HW_CONFIG_ID], + open_fip + }, + [SOC_FW_CONFIG_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[SOC_FW_CONFIG_ID], + open_fip + }, + [TOS_FW_CONFIG_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[TOS_FW_CONFIG_ID], + open_fip + }, + [NT_FW_CONFIG_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[NT_FW_CONFIG_ID], + open_fip + }, +#endif /* ARM_IO_IN_DTB */ +#if TRUSTED_BOARD_BOOT + [TRUSTED_BOOT_FW_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[TRUSTED_BOOT_FW_CERT_ID], + open_fip + }, +#if !ARM_IO_IN_DTB + [CCA_CONTENT_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[CCA_CONTENT_CERT_ID], + open_fip + }, + [CORE_SWD_KEY_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[CORE_SWD_KEY_CERT_ID], + open_fip + }, + [PLAT_KEY_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[PLAT_KEY_CERT_ID], + open_fip + }, + [TRUSTED_KEY_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[TRUSTED_KEY_CERT_ID], + open_fip + }, + [SCP_FW_KEY_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[SCP_FW_KEY_CERT_ID], + open_fip + }, + [SOC_FW_KEY_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[SOC_FW_KEY_CERT_ID], + open_fip + }, + [TRUSTED_OS_FW_KEY_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[TRUSTED_OS_FW_KEY_CERT_ID], + open_fip + }, + [NON_TRUSTED_FW_KEY_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[NON_TRUSTED_FW_KEY_CERT_ID], + open_fip + }, + [SCP_FW_CONTENT_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[SCP_FW_CONTENT_CERT_ID], + open_fip + }, + [SOC_FW_CONTENT_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[SOC_FW_CONTENT_CERT_ID], + open_fip + }, + [TRUSTED_OS_FW_CONTENT_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[TRUSTED_OS_FW_CONTENT_CERT_ID], + open_fip + }, + [NON_TRUSTED_FW_CONTENT_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[NON_TRUSTED_FW_CONTENT_CERT_ID], + open_fip + }, +#if defined(SPD_spmd) + [SIP_SP_CONTENT_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[SIP_SP_CONTENT_CERT_ID], + open_fip + }, + [PLAT_SP_CONTENT_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[PLAT_SP_CONTENT_CERT_ID], + open_fip + }, +#endif +#endif /* ARM_IO_IN_DTB */ +#endif /* TRUSTED_BOARD_BOOT */ +}; + +#ifdef IMAGE_BL2 + +#if TRUSTED_BOARD_BOOT +#define FCONF_ARM_IO_UUID_NUMBER U(24) +#else +#define FCONF_ARM_IO_UUID_NUMBER U(10) +#endif + +static io_uuid_spec_t fconf_arm_uuids[FCONF_ARM_IO_UUID_NUMBER]; +static OBJECT_POOL_ARRAY(fconf_arm_uuids_pool, fconf_arm_uuids); + +struct policies_load_info { + unsigned int image_id; + const char *name; +}; + +/* image id to property name table */ +static const struct policies_load_info load_info[FCONF_ARM_IO_UUID_NUMBER] = { + {SCP_BL2_IMAGE_ID, "scp_bl2_uuid"}, + {BL31_IMAGE_ID, "bl31_uuid"}, + {BL32_IMAGE_ID, "bl32_uuid"}, + {BL32_EXTRA1_IMAGE_ID, "bl32_extra1_uuid"}, + {BL32_EXTRA2_IMAGE_ID, "bl32_extra2_uuid"}, + {BL33_IMAGE_ID, "bl33_uuid"}, + {HW_CONFIG_ID, "hw_cfg_uuid"}, + {SOC_FW_CONFIG_ID, "soc_fw_cfg_uuid"}, + {TOS_FW_CONFIG_ID, "tos_fw_cfg_uuid"}, + {NT_FW_CONFIG_ID, "nt_fw_cfg_uuid"}, +#if TRUSTED_BOARD_BOOT + {CCA_CONTENT_CERT_ID, "cca_cert_uuid"}, + {CORE_SWD_KEY_CERT_ID, "core_swd_cert_uuid"}, + {PLAT_KEY_CERT_ID, "plat_cert_uuid"}, + {TRUSTED_KEY_CERT_ID, "t_key_cert_uuid"}, + {SCP_FW_KEY_CERT_ID, "scp_fw_key_uuid"}, + {SOC_FW_KEY_CERT_ID, "soc_fw_key_uuid"}, + {TRUSTED_OS_FW_KEY_CERT_ID, "tos_fw_key_cert_uuid"}, + {NON_TRUSTED_FW_KEY_CERT_ID, "nt_fw_key_cert_uuid"}, + {SCP_FW_CONTENT_CERT_ID, "scp_fw_content_cert_uuid"}, + {SOC_FW_CONTENT_CERT_ID, "soc_fw_content_cert_uuid"}, + {TRUSTED_OS_FW_CONTENT_CERT_ID, "tos_fw_content_cert_uuid"}, + {NON_TRUSTED_FW_CONTENT_CERT_ID, "nt_fw_content_cert_uuid"}, +#if defined(SPD_spmd) + {SIP_SP_CONTENT_CERT_ID, "sip_sp_content_cert_uuid"}, + {PLAT_SP_CONTENT_CERT_ID, "plat_sp_content_cert_uuid"}, +#endif +#endif /* TRUSTED_BOARD_BOOT */ +}; + +int fconf_populate_arm_io_policies(uintptr_t config) +{ + int err, node; + unsigned int i; + + union uuid_helper_t uuid_helper; + io_uuid_spec_t *uuid_ptr; + + /* As libfdt uses void *, we can't avoid this cast */ + const void *dtb = (void *)config; + + /* Assert the node offset point to "arm,io-fip-handle" compatible property */ + const char *compatible_str = "arm,io-fip-handle"; + node = fdt_node_offset_by_compatible(dtb, -1, compatible_str); + if (node < 0) { + ERROR("FCONF: Can't find %s compatible in dtb\n", compatible_str); + return node; + } + + /* Locate the uuid cells and read the value for all the load info uuid */ + for (i = 0; i < FCONF_ARM_IO_UUID_NUMBER; i++) { + uuid_ptr = pool_alloc(&fconf_arm_uuids_pool); + err = fdtw_read_uuid(dtb, node, load_info[i].name, 16, + (uint8_t *)&uuid_helper); + if (err < 0) { + WARN("FCONF: Read cell failed for %s\n", load_info[i].name); + return err; + } + + VERBOSE("FCONF: arm-io_policies.%s cell found with value = " + "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n", + load_info[i].name, + uuid_helper.uuid_struct.time_low[0], uuid_helper.uuid_struct.time_low[1], + uuid_helper.uuid_struct.time_low[2], uuid_helper.uuid_struct.time_low[3], + uuid_helper.uuid_struct.time_mid[0], uuid_helper.uuid_struct.time_mid[1], + uuid_helper.uuid_struct.time_hi_and_version[0], + uuid_helper.uuid_struct.time_hi_and_version[1], + uuid_helper.uuid_struct.clock_seq_hi_and_reserved, + uuid_helper.uuid_struct.clock_seq_low, + uuid_helper.uuid_struct.node[0], uuid_helper.uuid_struct.node[1], + uuid_helper.uuid_struct.node[2], uuid_helper.uuid_struct.node[3], + uuid_helper.uuid_struct.node[4], uuid_helper.uuid_struct.node[5]); + + uuid_ptr->uuid = uuid_helper.uuid_struct; + policies[load_info[i].image_id].image_spec = (uintptr_t)uuid_ptr; + policies[load_info[i].image_id].dev_handle = &fip_dev_handle; + policies[load_info[i].image_id].check = open_fip; + } + return 0; +} + +#if ARM_IO_IN_DTB +FCONF_REGISTER_POPULATOR(TB_FW, arm_io, fconf_populate_arm_io_policies); +#endif /* ARM_IO_IN_DTB */ + +#endif /* IMAGE_BL2 */ diff --git a/plat/arm/common/fconf/arm_fconf_sp.c b/plat/arm/common/fconf/arm_fconf_sp.c new file mode 100644 index 0000000..18c83c7 --- /dev/null +++ b/plat/arm/common/fconf/arm_fconf_sp.c @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef IMAGE_BL2 + +bl_mem_params_node_t sp_mem_params_descs[MAX_SP_IDS]; + +struct arm_sp_t arm_sp; + +int fconf_populate_arm_sp(uintptr_t config) +{ + int sp_node, node, err; + union uuid_helper_t uuid_helper; + unsigned int index = 0; + uint32_t val32; + const unsigned int sip_start = SP_PKG1_ID; + unsigned int sip_index = sip_start; +#if defined(ARM_COT_dualroot) + const unsigned int sip_end = sip_start + MAX_SP_IDS / 2; + /* Allocating index range for platform SPs */ + const unsigned int plat_start = SP_PKG5_ID; + unsigned int plat_index = plat_start; + const unsigned int plat_end = plat_start + MAX_SP_IDS / 2; + bool is_plat_owned = false; +#endif /* ARM_COT_dualroot */ + + /* As libfdt use void *, we can't avoid this cast */ + const void *dtb = (void *)config; + + /* Assert the node offset point to "arm,sp" compatible property */ + const char *compatible_str = "arm,sp"; + + node = fdt_node_offset_by_compatible(dtb, -1, compatible_str); + if (node < 0) { + ERROR("FCONF: Can't find %s in dtb\n", compatible_str); + return node; + } + + fdt_for_each_subnode(sp_node, dtb, node) { + if (index == MAX_SP_IDS) { + ERROR("FCONF: Reached max number of SPs\n"); + return -1; + } + +#if defined(ARM_COT_dualroot) + if ((sip_index == sip_end) || (plat_index == plat_end)) { + ERROR("FCONF: Reached max number of plat/SiP SPs\n"); + return -1; + } +#endif /* ARM_COT_dualroot */ + + /* Read UUID */ + err = fdtw_read_uuid(dtb, sp_node, "uuid", 16, + (uint8_t *)&uuid_helper); + if (err < 0) { + ERROR("FCONF: cannot read SP uuid\n"); + return -1; + } + + arm_sp.uuids[index] = uuid_helper; + + /* Read Load address */ + err = fdt_read_uint32(dtb, sp_node, "load-address", &val32); + if (err < 0) { + ERROR("FCONF: cannot read SP load address\n"); + return -1; + } + arm_sp.load_addr[index] = val32; + + VERBOSE("FCONF: %s UUID" + " %02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" + " load_addr=%lx\n", + __func__, + uuid_helper.uuid_struct.time_low[0], uuid_helper.uuid_struct.time_low[1], + uuid_helper.uuid_struct.time_low[2], uuid_helper.uuid_struct.time_low[3], + uuid_helper.uuid_struct.time_mid[0], uuid_helper.uuid_struct.time_mid[1], + uuid_helper.uuid_struct.time_hi_and_version[0], + uuid_helper.uuid_struct.time_hi_and_version[1], + uuid_helper.uuid_struct.clock_seq_hi_and_reserved, + uuid_helper.uuid_struct.clock_seq_low, + uuid_helper.uuid_struct.node[0], uuid_helper.uuid_struct.node[1], + uuid_helper.uuid_struct.node[2], uuid_helper.uuid_struct.node[3], + uuid_helper.uuid_struct.node[4], uuid_helper.uuid_struct.node[5], + arm_sp.load_addr[index]); + + /* Read owner field only for dualroot CoT */ +#if defined(ARM_COT_dualroot) + /* Owner is an optional field, no need to catch error */ + fdtw_read_string(dtb, sp_node, "owner", + arm_sp.owner[index], ARM_SP_OWNER_NAME_LEN); + + /* If owner is empty mark it as SiP owned */ + if ((strncmp(arm_sp.owner[index], "SiP", + ARM_SP_OWNER_NAME_LEN) == 0) || + (strncmp(arm_sp.owner[index], "", + ARM_SP_OWNER_NAME_LEN) == 0)) { + is_plat_owned = false; + } else if (strcmp(arm_sp.owner[index], "Plat") == 0) { + is_plat_owned = true; + } else { + ERROR("FCONF: %s is not a valid SP owner\n", + arm_sp.owner[index]); + return -1; + } + /* + * Add SP information in mem param descriptor and IO policies + * structure. + */ + if (is_plat_owned) { + sp_mem_params_descs[index].image_id = plat_index; + policies[plat_index].image_spec = + (uintptr_t)&arm_sp.uuids[index]; + policies[plat_index].dev_handle = &fip_dev_handle; + policies[plat_index].check = open_fip; + plat_index++; + } else +#endif /* ARM_COT_dualroot */ + { + sp_mem_params_descs[index].image_id = sip_index; + policies[sip_index].image_spec = + (uintptr_t)&arm_sp.uuids[index]; + policies[sip_index].dev_handle = &fip_dev_handle; + policies[sip_index].check = open_fip; + sip_index++; + } + SET_PARAM_HEAD(&sp_mem_params_descs[index].image_info, + PARAM_IMAGE_BINARY, VERSION_2, 0); + sp_mem_params_descs[index].image_info.image_max_size = + ARM_SP_MAX_SIZE; + sp_mem_params_descs[index].next_handoff_image_id = + INVALID_IMAGE_ID; + sp_mem_params_descs[index].image_info.image_base = + arm_sp.load_addr[index]; + index++; + } + + if ((sp_node < 0) && (sp_node != -FDT_ERR_NOTFOUND)) { + ERROR("%u: fdt_for_each_subnode(): %d\n", __LINE__, node); + return sp_node; + } + + arm_sp.number_of_sp = index; + return 0; +} + +FCONF_REGISTER_POPULATOR(TB_FW, arm_sp, fconf_populate_arm_sp); + +#endif /* IMAGE_BL2 */ diff --git a/plat/arm/common/fconf/fconf_ethosn_getter.c b/plat/arm/common/fconf/fconf_ethosn_getter.c new file mode 100644 index 0000000..0b48a98 --- /dev/null +++ b/plat/arm/common/fconf/fconf_ethosn_getter.c @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2021-2022, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include + +#include +#include +#include +#include + +struct ethosn_config_t ethosn_config = {0}; + +struct ethosn_sub_allocator_t { + const char *name; + size_t name_len; + uint32_t stream_id; +}; + +static bool fdt_node_is_enabled(const void *fdt, int node) +{ + int len; + const char *node_status; + + node_status = fdt_getprop(fdt, node, "status", &len); + if (node_status == NULL || + (len == 5 && /* Includes null character */ + strncmp(node_status, "okay", 4U) == 0)) { + return true; + } + + return false; +} + +static bool fdt_node_has_reserved_memory(const void *fdt, int dev_node) +{ + return fdt_get_property(fdt, dev_node, "memory-region", NULL) != NULL; +} + +static int fdt_node_get_iommus_stream_id(const void *fdt, int node, uint32_t *stream_id) +{ + int err; + uint32_t iommus_array[2] = {0U}; + + err = fdt_read_uint32_array(fdt, node, "iommus", 2U, iommus_array); + if (err) { + return err; + } + + *stream_id = iommus_array[1]; + return 0; +} + +static int fdt_node_populate_sub_allocators(const void *fdt, + int alloc_node, + struct ethosn_sub_allocator_t *sub_allocators, + size_t num_allocs) +{ + int sub_node; + size_t i; + int err = -FDT_ERR_NOTFOUND; + uint32_t found_sub_allocators = 0U; + + fdt_for_each_subnode(sub_node, fdt, alloc_node) { + const char *node_name; + + if (!fdt_node_is_enabled(fdt, sub_node)) { + /* Ignore disabled node */ + continue; + } + + if (fdt_node_check_compatible(fdt, sub_node, "ethosn-memory") != 0) { + continue; + } + + node_name = fdt_get_name(fdt, sub_node, NULL); + for (i = 0U; i < num_allocs; ++i) { + if (strncmp(node_name, sub_allocators[i].name, + sub_allocators[i].name_len) != 0) { + continue; + } + + err = fdt_node_get_iommus_stream_id(fdt, sub_node, + &sub_allocators[i].stream_id); + if (err) { + ERROR("FCONF: Failed to get stream ID from sub-allocator %s\n", + node_name); + return err; + } + + ++found_sub_allocators; + /* Nothing more to do for this node */ + break; + } + + /* Check that at least one of the sub-allocators matched */ + if (i == num_allocs) { + ERROR("FCONF: Unknown sub-allocator %s\n", node_name); + return -FDT_ERR_BADSTRUCTURE; + } + } + + if ((sub_node < 0) && (sub_node != -FDT_ERR_NOTFOUND)) { + ERROR("FCONF: Failed to parse sub-allocators\n"); + return -FDT_ERR_BADSTRUCTURE; + } + + if (err == -FDT_ERR_NOTFOUND) { + ERROR("FCONF: No matching sub-allocator found\n"); + return err; + } + + if (found_sub_allocators != num_allocs) { + ERROR("FCONF: Not all sub-allocators were found\n"); + return -FDT_ERR_BADSTRUCTURE; + } + + return 0; +} + +static int fdt_node_populate_main_allocator(const void *fdt, + int alloc_node, + struct ethosn_main_allocator_t *allocator) +{ + int err; + struct ethosn_sub_allocator_t sub_allocators[] = { + {.name = "firmware", .name_len = 8U}, + {.name = "working_data", .name_len = 12U} + }; + + err = fdt_node_populate_sub_allocators(fdt, alloc_node, sub_allocators, + ARRAY_SIZE(sub_allocators)); + if (err) { + return err; + } + + allocator->firmware.stream_id = sub_allocators[0].stream_id; + allocator->working_data.stream_id = sub_allocators[1].stream_id; + + return 0; +} + +static int fdt_node_populate_asset_allocator(const void *fdt, + int alloc_node, + struct ethosn_asset_allocator_t *allocator) +{ + int err; + struct ethosn_sub_allocator_t sub_allocators[] = { + {.name = "command_stream", .name_len = 14U}, + {.name = "weight_data", .name_len = 11U}, + {.name = "buffer_data", .name_len = 11U}, + {.name = "intermediate_data", .name_len = 17U} + }; + + err = fdt_node_populate_sub_allocators(fdt, alloc_node, sub_allocators, + ARRAY_SIZE(sub_allocators)); + if (err) { + return err; + } + + + allocator->command_stream.stream_id = sub_allocators[0].stream_id; + allocator->weight_data.stream_id = sub_allocators[1].stream_id; + allocator->buffer_data.stream_id = sub_allocators[2].stream_id; + allocator->intermediate_data.stream_id = sub_allocators[3].stream_id; + return 0; +} + +static int fdt_node_populate_core(const void *fdt, + int device_node, + int core_node, + bool has_reserved_memory, + uint32_t core_index, + struct ethosn_core_t *core) +{ + int err; + int sub_node; + uintptr_t core_addr; + + err = fdt_get_reg_props_by_index(fdt, device_node, core_index, + &core_addr, NULL); + if (err < 0) { + ERROR("FCONF: Failed to read reg property for NPU core %u\n", + core_index); + return err; + } + + err = -FDT_ERR_NOTFOUND; + fdt_for_each_subnode(sub_node, fdt, core_node) { + + if (!fdt_node_is_enabled(fdt, sub_node)) { + continue; + } + + if (fdt_node_check_compatible(fdt, + sub_node, + "ethosn-main_allocator") != 0) { + continue; + } + + if (has_reserved_memory) { + ERROR("FCONF: Main allocator not supported when using reserved memory\n"); + return -FDT_ERR_BADSTRUCTURE; + } + + if (err != -FDT_ERR_NOTFOUND) { + ERROR("FCONF: NPU core 0x%lx has more than one main allocator\n", + core_addr); + return -FDT_ERR_BADSTRUCTURE; + } + + err = fdt_node_populate_main_allocator(fdt, sub_node, &core->main_allocator); + if (err) { + ERROR("FCONF: Failed to parse main allocator for NPU core 0x%lx\n", + core_addr); + return err; + } + } + + if ((sub_node < 0) && (sub_node != -FDT_ERR_NOTFOUND)) { + ERROR("FCONF: Failed to parse core sub nodes\n"); + return -FDT_ERR_BADSTRUCTURE; + } + + if (!has_reserved_memory && err) { + ERROR("FCONF: Main allocator not found for NPU core 0x%lx\n", + core_addr); + return err; + } + + core->addr = core_addr; + + return 0; +} + +int fconf_populate_ethosn_config(uintptr_t config) +{ + int ethosn_node; + uint32_t dev_count = 0U; + const void *hw_conf_dtb = (const void *)config; + + INFO("Probing Arm(R) Ethos(TM)-N NPU\n"); + + fdt_for_each_compatible_node(hw_conf_dtb, ethosn_node, "ethosn") { + struct ethosn_device_t *dev = ðosn_config.devices[dev_count]; + uint32_t dev_asset_alloc_count = 0U; + uint32_t dev_core_count = 0U; + bool has_reserved_memory; + int sub_node; + + if (!fdt_node_is_enabled(hw_conf_dtb, ethosn_node)) { + continue; + } + + if (dev_count >= ETHOSN_DEV_NUM_MAX) { + ERROR("FCONF: Reached max number of NPUs\n"); + return -FDT_ERR_BADSTRUCTURE; + } + + has_reserved_memory = fdt_node_has_reserved_memory(hw_conf_dtb, ethosn_node); + fdt_for_each_subnode(sub_node, hw_conf_dtb, ethosn_node) { + int err; + + if (!fdt_node_is_enabled(hw_conf_dtb, sub_node)) { + /* Ignore disabled sub node */ + continue; + } + + if (fdt_node_check_compatible(hw_conf_dtb, + sub_node, + "ethosn-core") == 0) { + + if (dev_core_count >= ETHOSN_DEV_CORE_NUM_MAX) { + ERROR("FCONF: Reached max number of NPU cores for NPU %u\n", + dev_count); + return -FDT_ERR_BADSTRUCTURE; + } + + err = fdt_node_populate_core(hw_conf_dtb, + ethosn_node, + sub_node, + has_reserved_memory, + dev_core_count, + &(dev->cores[dev_core_count])); + if (err) { + return err; + } + ++dev_core_count; + } else if (fdt_node_check_compatible(hw_conf_dtb, + sub_node, + "ethosn-asset_allocator") == 0) { + + if (dev_asset_alloc_count >= + ETHOSN_DEV_ASSET_ALLOCATOR_NUM_MAX) { + ERROR("FCONF: Reached max number of asset allocators for NPU %u\n", + dev_count); + return -FDT_ERR_BADSTRUCTURE; + } + + if (has_reserved_memory) { + ERROR("FCONF: Asset allocator not supported when using reserved memory\n"); + return -FDT_ERR_BADSTRUCTURE; + } + + err = fdt_node_populate_asset_allocator(hw_conf_dtb, + sub_node, + &(dev->asset_allocators[dev_asset_alloc_count])); + if (err) { + ERROR("FCONF: Failed to parse asset allocator for NPU %u\n", + dev_count); + return err; + } + ++dev_asset_alloc_count; + } + } + + if ((sub_node < 0) && (sub_node != -FDT_ERR_NOTFOUND)) { + ERROR("FCONF: Failed to parse sub nodes for NPU %u\n", + dev_count); + return -FDT_ERR_BADSTRUCTURE; + } + + if (dev_core_count == 0U) { + ERROR("FCONF: NPU %u must have at least one enabled core\n", + dev_count); + return -FDT_ERR_BADSTRUCTURE; + } + + if (!has_reserved_memory && dev_asset_alloc_count == 0U) { + ERROR("FCONF: NPU %u must have at least one asset allocator\n", + dev_count); + return -FDT_ERR_BADSTRUCTURE; + } + + dev->num_cores = dev_core_count; + dev->num_allocators = dev_asset_alloc_count; + dev->has_reserved_memory = has_reserved_memory; + ++dev_count; + } + + if (dev_count == 0U) { + ERROR("FCONF: Can't find 'ethosn' compatible node in dtb\n"); + return -FDT_ERR_BADSTRUCTURE; + } + + ethosn_config.num_devices = dev_count; + + return 0; +} + +FCONF_REGISTER_POPULATOR(HW_CONFIG, ethosn_config, fconf_populate_ethosn_config); diff --git a/plat/arm/common/fconf/fconf_nv_cntr_getter.c b/plat/arm/common/fconf/fconf_nv_cntr_getter.c new file mode 100644 index 0000000..8d645ef --- /dev/null +++ b/plat/arm/common/fconf/fconf_nv_cntr_getter.c @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2020, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include + +#include + +#include + +/******************************************************************************* + * fconf_populate_cot_descs() - Populate available nv-counters and update global + * structure. + * @config[in]: Pointer to the device tree blob in memory + * + * Return 0 on success or an error value otherwise. + ******************************************************************************/ +static int fconf_populate_nv_cntrs(uintptr_t config) +{ + int rc, node, child; + uint32_t id; + uintptr_t reg; + + /* As libfdt uses void *, we can't avoid this cast */ + const void *dtb = (void *)config; + const char *compatible_str = "arm, non-volatile-counter"; + + node = fdt_node_offset_by_compatible(dtb, -1, compatible_str); + if (node < 0) { + ERROR("FCONF: Can't find %s compatible in node\n", + compatible_str); + return node; + } + + fdt_for_each_subnode(child, dtb, node) { + + rc = fdt_read_uint32(dtb, child, "id", &id); + if (rc < 0) { + ERROR("FCONF: Can't find %s property in node\n", "id"); + return rc; + } + + assert(id < MAX_NV_CTR_IDS); + + rc = fdt_get_reg_props_by_index(dtb, child, 0, ®, NULL); + if (rc < 0) { + ERROR("FCONF: Can't find %s property in node\n", "reg"); + return rc; + } + + nv_cntr_base_addr[id] = reg; + } + + return 0; +} + +FCONF_REGISTER_POPULATOR(TB_FW, nv_cntrs, fconf_populate_nv_cntrs); diff --git a/plat/arm/common/fconf/fconf_sdei_getter.c b/plat/arm/common/fconf/fconf_sdei_getter.c new file mode 100644 index 0000000..c26e316 --- /dev/null +++ b/plat/arm/common/fconf/fconf_sdei_getter.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include + +#include +#include +#include +#include + +#define PRIVATE_EVENT_NUM(i) private_events[3 * (i)] +#define PRIVATE_EVENT_INTR(i) private_events[3 * (i) + 1] +#define PRIVATE_EVENT_FLAGS(i) private_events[3 * (i) + 2] + +#define SHARED_EVENT_NUM(i) shared_events[3 * (i)] +#define SHARED_EVENT_INTR(i) shared_events[3 * (i) + 1] +#define SHARED_EVENT_FLAGS(i) shared_events[3 * (i) + 2] + +struct sdei_dyn_config_t sdei_dyn_config; + +int fconf_populate_sdei_dyn_config(uintptr_t config) +{ + uint32_t i; + int node, err; + uint32_t private_events[PLAT_SDEI_DP_EVENT_MAX_CNT * 3]; + uint32_t shared_events[PLAT_SDEI_DS_EVENT_MAX_CNT * 3]; + + const void *dtb = (void *)config; + + /* Check that the node offset points to compatible property */ + node = fdt_node_offset_by_compatible(dtb, -1, "arm,sdei-1.0"); + if (node < 0) { + ERROR("FCONF: Can't find 'arm,sdei-1.0' compatible node in dtb\n"); + return node; + } + + /* Read number of private mappings */ + err = fdt_read_uint32(dtb, node, "private_event_count", + &sdei_dyn_config.private_ev_cnt); + if (err < 0) { + ERROR("FCONF: Read cell failed for 'private_event_count': %u\n", + sdei_dyn_config.private_ev_cnt); + return err; + } + + /* Check if the value is in range */ + if (sdei_dyn_config.private_ev_cnt > PLAT_SDEI_DP_EVENT_MAX_CNT) { + ERROR("FCONF: Invalid value for 'private_event_count': %u\n", + sdei_dyn_config.private_ev_cnt); + return -1; + } + + /* Read private mappings */ + err = fdt_read_uint32_array(dtb, node, "private_events", + sdei_dyn_config.private_ev_cnt * 3, private_events); + if (err < 0) { + ERROR("FCONF: Read cell failed for 'private_events': %d\n", err); + return err; + } + + /* Move data to fconf struct */ + for (i = 0; i < sdei_dyn_config.private_ev_cnt; i++) { + sdei_dyn_config.private_ev_nums[i] = PRIVATE_EVENT_NUM(i); + sdei_dyn_config.private_ev_intrs[i] = PRIVATE_EVENT_INTR(i); + sdei_dyn_config.private_ev_flags[i] = PRIVATE_EVENT_FLAGS(i); + } + + /* Read number of shared mappings */ + err = fdt_read_uint32(dtb, node, "shared_event_count", + &sdei_dyn_config.shared_ev_cnt); + if (err < 0) { + ERROR("FCONF: Read cell failed for 'shared_event_count'\n"); + return err; + } + + /* Check if the value is in range */ + if (sdei_dyn_config.shared_ev_cnt > PLAT_SDEI_DS_EVENT_MAX_CNT) { + ERROR("FCONF: Invalid value for 'shared_event_count': %u\n", + sdei_dyn_config.shared_ev_cnt); + return -1; + } + + /* Read shared mappings */ + err = fdt_read_uint32_array(dtb, node, "shared_events", + sdei_dyn_config.shared_ev_cnt * 3, shared_events); + if (err < 0) { + ERROR("FCONF: Read cell failed for 'shared_events': %d\n", err); + return err; + } + + /* Move data to fconf struct */ + for (i = 0; i < sdei_dyn_config.shared_ev_cnt; i++) { + sdei_dyn_config.shared_ev_nums[i] = SHARED_EVENT_NUM(i); + sdei_dyn_config.shared_ev_intrs[i] = SHARED_EVENT_INTR(i); + sdei_dyn_config.shared_ev_flags[i] = SHARED_EVENT_FLAGS(i); + } + + return 0; +} + +FCONF_REGISTER_POPULATOR(HW_CONFIG, sdei, fconf_populate_sdei_dyn_config); diff --git a/plat/arm/common/fconf/fconf_sec_intr_config.c b/plat/arm/common/fconf/fconf_sec_intr_config.c new file mode 100644 index 0000000..f28be24 --- /dev/null +++ b/plat/arm/common/fconf/fconf_sec_intr_config.c @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#include + +#include +#include +#include +#include + +#define G0_INTR_NUM(i) g0_intr_prop[3U * (i)] +#define G0_INTR_PRIORITY(i) g0_intr_prop[3U * (i) + 1] +#define G0_INTR_CONFIG(i) g0_intr_prop[3U * (i) + 2] + +#define G1S_INTR_NUM(i) g1s_intr_prop[3U * (i)] +#define G1S_INTR_PRIORITY(i) g1s_intr_prop[3U * (i) + 1] +#define G1S_INTR_CONFIG(i) g1s_intr_prop[3U * (i) + 2] + +struct sec_intr_prop_t sec_intr_prop; + +static void print_intr_prop(interrupt_prop_t prop) +{ + VERBOSE("FCONF: Secure Interrupt NUM: %d, PRI: %d, TYPE: %d\n", + prop.intr_num, prop.intr_pri, prop.intr_cfg); +} + +int fconf_populate_sec_intr_config(uintptr_t config) +{ + int node, err; + uint32_t g0_intr_count, g1s_intr_count; + uint32_t g0_intr_prop[SEC_INT_COUNT_MAX * 3]; + uint32_t g1s_intr_prop[SEC_INT_COUNT_MAX * 3]; + + /* Necessary to work with libfdt APIs */ + const void *hw_config_dtb = (const void *)config; + + node = fdt_node_offset_by_compatible(hw_config_dtb, -1, + "arm,secure_interrupt_desc"); + if (node < 0) { + ERROR("FCONF: Unable to locate node with %s compatible property\n", + "arm,secure_interrupt_desc"); + return node; + } + + /* Read number of Group 0 interrupts specified by platform */ + err = fdt_read_uint32(hw_config_dtb, node, "g0_intr_cnt", &g0_intr_count); + if (err < 0) { + ERROR("FCONF: Could not locate g0s_intr_cnt property\n"); + return err; + } + + /* At least 1 Group 0 interrupt description has to be provided*/ + if (g0_intr_count < 1U) { + ERROR("FCONF: Invalid number of Group 0 interrupts count specified\n"); + return -1; + } + + /* Read number of Group 1 secure interrupts specified by platform */ + err = fdt_read_uint32(hw_config_dtb, node, "g1s_intr_cnt", + &g1s_intr_count); + if (err < 0) { + ERROR("FCONF: Could not locate g1s_intr_cnt property\n"); + return err; + } + + /* At least one Group 1 interrupt description has to be provided*/ + if (g1s_intr_count < 1U) { + ERROR("FCONF: Invalid number of Group 1 secure interrupts count specified\n"); + return -1; + } + + /* + * Check if the total number of secure interrupts described are within + * the limit defined statically by the platform. + */ + if ((g0_intr_count + g1s_intr_count) > SEC_INT_COUNT_MAX) { + ERROR("FCONF: Total number of secure interrupts exceed limit the of %d\n", + SEC_INT_COUNT_MAX); + return -1; + } + + sec_intr_prop.count = g0_intr_count + g1s_intr_count; + + /* Read the Group 0 interrupt descriptors */ + err = fdt_read_uint32_array(hw_config_dtb, node, "g0_intr_desc", + g0_intr_count * 3, g0_intr_prop); + if (err < 0) { + ERROR("FCONF: Read cell failed for 'g0s_intr_desc': %d\n", err); + return err; + } + + /* Read the Group 1 secure interrupt descriptors */ + err = fdt_read_uint32_array(hw_config_dtb, node, "g1s_intr_desc", + g1s_intr_count * 3, g1s_intr_prop); + if (err < 0) { + ERROR("FCONF: Read cell failed for 'g1s_intr_desc': %d\n", err); + return err; + } + + /* Populate Group 0 interrupt descriptors into fconf based C struct */ + for (uint32_t i = 0; i < g0_intr_count; i++) { + interrupt_prop_t sec_intr_property; + + /* Secure Interrupt Group: INTR_GROUP0 i.e., 0x1 */ + sec_intr_property.intr_grp = 1; + sec_intr_property.intr_num = G0_INTR_NUM(i); + sec_intr_property.intr_pri = G0_INTR_PRIORITY(i); + sec_intr_property.intr_cfg = G0_INTR_CONFIG(i); + sec_intr_prop.descriptor[i] = sec_intr_property; + print_intr_prop(sec_intr_property); + } + + /* Populate G1 secure interrupt descriptors into fconf based C struct */ + for (uint32_t i = 0; i < g1s_intr_count; i++) { + interrupt_prop_t sec_intr_property; + + /* Secure Interrupt Group: INTR_GROUP1S i.e., 0x0 */ + sec_intr_property.intr_grp = 0; + sec_intr_property.intr_num = G1S_INTR_NUM(i); + sec_intr_property.intr_pri = G1S_INTR_PRIORITY(i); + sec_intr_property.intr_cfg = G1S_INTR_CONFIG(i); + sec_intr_prop.descriptor[i + g0_intr_count] = sec_intr_property; + print_intr_prop(sec_intr_property); + } + + return 0; +} + +FCONF_REGISTER_POPULATOR(HW_CONFIG, sec_intr_prop, fconf_populate_sec_intr_config); diff --git a/plat/arm/common/sp_min/arm_sp_min.mk b/plat/arm/common/sp_min/arm_sp_min.mk new file mode 100644 index 0000000..dbd451c --- /dev/null +++ b/plat/arm/common/sp_min/arm_sp_min.mk @@ -0,0 +1,20 @@ +# +# Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# SP MIN source files common to ARM standard platforms + +# Skip building BL1, BL2 and BL2U if RESET_TO_SP_MIN flag is set. +ifeq (${RESET_TO_SP_MIN},1) + BL1_SOURCES = + BL2_SOURCES = + BL2U_SOURCES = +endif + +BL32_SOURCES += plat/arm/common/arm_pm.c \ + plat/arm/common/arm_topology.c \ + plat/arm/common/sp_min/arm_sp_min_setup.c \ + plat/common/aarch32/platform_mp_stack.S \ + plat/common/plat_psci_common.c diff --git a/plat/arm/common/sp_min/arm_sp_min_setup.c b/plat/arm/common/sp_min/arm_sp_min_setup.c new file mode 100644 index 0000000..f15c137 --- /dev/null +++ b/plat/arm/common/sp_min/arm_sp_min_setup.c @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +static entry_point_info_t bl33_image_ep_info; + +/* Weak definitions may be overridden in specific ARM standard platform */ +#pragma weak sp_min_platform_setup +#pragma weak sp_min_plat_arch_setup +#pragma weak plat_arm_sp_min_early_platform_setup + +#define MAP_BL_SP_MIN_TOTAL MAP_REGION_FLAT( \ + BL32_BASE, \ + BL32_END - BL32_BASE, \ + MT_MEMORY | MT_RW | MT_SECURE) + +/* + * Check that BL32_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page + * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2. + */ +#if !RESET_TO_SP_MIN +CASSERT(BL32_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl32_base_overflows); +#endif + +/******************************************************************************* + * Return a pointer to the 'entry_point_info' structure of the next image for the + * security state specified. BL33 corresponds to the non-secure image type + * while BL32 corresponds to the secure image type. A NULL pointer is returned + * if the image does not exist. + ******************************************************************************/ +entry_point_info_t *sp_min_plat_get_bl33_ep_info(void) +{ + entry_point_info_t *next_image_info; + + next_image_info = &bl33_image_ep_info; + + /* + * None of the images on the ARM development platforms can have 0x0 + * as the entrypoint + */ + if (next_image_info->pc) + return next_image_info; + else + return NULL; +} + +/******************************************************************************* + * Utility function to perform early platform setup. + ******************************************************************************/ +void arm_sp_min_early_platform_setup(void *from_bl2, uintptr_t tos_fw_config, + uintptr_t hw_config, void *plat_params_from_bl2) +{ + /* Initialize the console to provide early debug support */ + arm_console_boot_init(); + +#if RESET_TO_SP_MIN + /* There are no parameters from BL2 if SP_MIN is a reset vector */ + assert(from_bl2 == NULL); + assert(plat_params_from_bl2 == NULL); + + /* Populate entry point information for BL33 */ + SET_PARAM_HEAD(&bl33_image_ep_info, + PARAM_EP, + VERSION_1, + 0); + /* + * Tell SP_MIN where the non-trusted software image + * is located and the entry state information + */ + bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); + bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry(); + SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); + +# if ARM_LINUX_KERNEL_AS_BL33 + /* + * According to the file ``Documentation/arm/Booting`` of the Linux + * kernel tree, Linux expects: + * r0 = 0 + * r1 = machine type number, optional in DT-only platforms (~0 if so) + * r2 = Physical address of the device tree blob + */ + bl33_image_ep_info.args.arg0 = 0U; + bl33_image_ep_info.args.arg1 = ~0U; + bl33_image_ep_info.args.arg2 = (u_register_t)ARM_PRELOADED_DTB_BASE; +# endif + +#else /* RESET_TO_SP_MIN */ + + /* + * Check params passed from BL2 should not be NULL, + */ + bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2; + assert(params_from_bl2 != NULL); + assert(params_from_bl2->h.type == PARAM_BL_PARAMS); + assert(params_from_bl2->h.version >= VERSION_2); + + bl_params_node_t *bl_params = params_from_bl2->head; + + /* + * Copy BL33 entry point information. + * They are stored in Secure RAM, in BL2's address space. + */ + while (bl_params) { + if (bl_params->image_id == BL33_IMAGE_ID) { + bl33_image_ep_info = *bl_params->ep_info; + break; + } + + bl_params = bl_params->next_params_info; + } + + if (bl33_image_ep_info.pc == 0) + panic(); + +#endif /* RESET_TO_SP_MIN */ + +} + +/******************************************************************************* + * Default implementation for sp_min_platform_setup2() for ARM platforms + ******************************************************************************/ +void plat_arm_sp_min_early_platform_setup(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + arm_sp_min_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3); + + /* + * Initialize Interconnect for this cluster during cold boot. + * No need for locks as no other CPU is active. + */ + plat_arm_interconnect_init(); + + /* + * Enable Interconnect coherency for the primary CPU's cluster. + * Earlier bootloader stages might already do this (e.g. Trusted + * Firmware's BL1 does it) but we can't assume so. There is no harm in + * executing this code twice anyway. + * Platform specific PSCI code will enable coherency for other + * clusters. + */ + plat_arm_interconnect_enter_coherency(); +} + +void sp_min_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + plat_arm_sp_min_early_platform_setup(arg0, arg1, arg2, arg3); +} + +/******************************************************************************* + * Perform any SP_MIN platform runtime setup prior to SP_MIN exit. + * Common to ARM standard platforms. + ******************************************************************************/ +void arm_sp_min_plat_runtime_setup(void) +{ + /* Initialize the runtime console */ + arm_console_runtime_init(); + +#if PLAT_RO_XLAT_TABLES + arm_xlat_make_tables_readonly(); +#endif +} + +/******************************************************************************* + * Perform platform specific setup for SP_MIN + ******************************************************************************/ +void sp_min_platform_setup(void) +{ + /* Initialize the GIC driver, cpu and distributor interfaces */ + plat_arm_gic_driver_init(); + plat_arm_gic_init(); + + /* + * Do initial security configuration to allow DRAM/device access + * (if earlier BL has not already done so). + */ +#if RESET_TO_SP_MIN && !JUNO_AARCH32_EL3_RUNTIME + plat_arm_security_setup(); + +#if defined(PLAT_ARM_MEM_PROT_ADDR) + arm_nor_psci_do_dyn_mem_protect(); +#endif /* PLAT_ARM_MEM_PROT_ADDR */ + +#endif + + /* Enable and initialize the System level generic timer */ +#ifdef ARM_SYS_CNTCTL_BASE + mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF, + CNTCR_FCREQ(0U) | CNTCR_EN); +#endif +#ifdef ARM_SYS_TIMCTL_BASE + /* Allow access to the System counter timer module */ + arm_configure_sys_timer(); +#endif + /* Initialize power controller before setting up topology */ + plat_arm_pwrc_setup(); +} + +void sp_min_plat_runtime_setup(void) +{ + arm_sp_min_plat_runtime_setup(); +} + +/******************************************************************************* + * Perform the very early platform specific architectural setup here. At the + * moment this only initializes the MMU + ******************************************************************************/ +void arm_sp_min_plat_arch_setup(void) +{ + const mmap_region_t bl_regions[] = { + MAP_BL_SP_MIN_TOTAL, + ARM_MAP_BL_RO, +#if USE_COHERENT_MEM + ARM_MAP_BL_COHERENT_RAM, +#endif + {0} + }; + + setup_page_tables(bl_regions, plat_arm_get_mmap()); + + enable_mmu_svc_mon(0); +} + +void sp_min_plat_arch_setup(void) +{ + arm_sp_min_plat_arch_setup(); +} diff --git a/plat/arm/common/trp/arm_trp.mk b/plat/arm/common/trp/arm_trp.mk new file mode 100644 index 0000000..204c14a --- /dev/null +++ b/plat/arm/common/trp/arm_trp.mk @@ -0,0 +1,12 @@ +# +# Copyright (c) 2021-2022, Arm Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# TRP source files common to ARM standard platforms +RMM_SOURCES += plat/arm/common/trp/arm_trp_setup.c \ + plat/arm/common/arm_topology.c \ + plat/common/aarch64/platform_mp_stack.S + +INCLUDES += -Iinclude/services/trp diff --git a/plat/arm/common/trp/arm_trp_setup.c b/plat/arm/common/trp/arm_trp_setup.c new file mode 100644 index 0000000..59b4c06 --- /dev/null +++ b/plat/arm/common/trp/arm_trp_setup.c @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2021-2022, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/******************************************************************************* + * Received from boot manifest and populated here + ******************************************************************************/ +extern uint32_t trp_boot_manifest_version; + +/******************************************************************************* + * Initialize the UART + ******************************************************************************/ +static console_t arm_trp_runtime_console; + +static int arm_trp_process_manifest(rmm_manifest_t *manifest) +{ + /* Verify the Boot Manifest Version. Only the Major is considered */ + if (RMMD_MANIFEST_VERSION_MAJOR != + RMMD_GET_MANIFEST_VERSION_MAJOR(manifest->version)) { + return E_RMM_BOOT_MANIFEST_VERSION_NOT_SUPPORTED; + } + + trp_boot_manifest_version = manifest->version; + flush_dcache_range((uintptr_t)manifest, sizeof(rmm_manifest_t)); + + return 0; +} + +void arm_trp_early_platform_setup(rmm_manifest_t *manifest) +{ + int rc; + + rc = arm_trp_process_manifest(manifest); + if (rc != 0) { + trp_boot_abort(rc); + } + + /* + * Initialize a different console than already in use to display + * messages from trp + */ + rc = console_pl011_register(PLAT_ARM_TRP_UART_BASE, + PLAT_ARM_TRP_UART_CLK_IN_HZ, + ARM_CONSOLE_BAUDRATE, + &arm_trp_runtime_console); + if (rc == 0) { + panic(); + } + + console_set_scope(&arm_trp_runtime_console, + CONSOLE_FLAG_BOOT | CONSOLE_FLAG_RUNTIME); + +} + +void trp_early_platform_setup(rmm_manifest_t *manifest) +{ + arm_trp_early_platform_setup(manifest); +} diff --git a/plat/arm/common/tsp/arm_tsp.mk b/plat/arm/common/tsp/arm_tsp.mk new file mode 100644 index 0000000..4ad77c6 --- /dev/null +++ b/plat/arm/common/tsp/arm_tsp.mk @@ -0,0 +1,10 @@ +# +# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# TSP source files common to ARM standard platforms +BL32_SOURCES += plat/arm/common/arm_topology.c \ + plat/arm/common/tsp/arm_tsp_setup.c \ + plat/common/aarch64/platform_mp_stack.S diff --git a/plat/arm/common/tsp/arm_tsp_setup.c b/plat/arm/common/tsp/arm_tsp_setup.c new file mode 100644 index 0000000..a4da8c3 --- /dev/null +++ b/plat/arm/common/tsp/arm_tsp_setup.c @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include + +/* Weak definitions may be overridden in specific ARM standard platform */ +#pragma weak tsp_early_platform_setup +#pragma weak tsp_platform_setup +#pragma weak tsp_plat_arch_setup + +#define MAP_BL_TSP_TOTAL MAP_REGION_FLAT( \ + BL32_BASE, \ + BL32_END - BL32_BASE, \ + MT_MEMORY | MT_RW | MT_SECURE) + +/******************************************************************************* + * Initialize the UART + ******************************************************************************/ +static console_t arm_tsp_runtime_console; + +void arm_tsp_early_platform_setup(void) +{ + /* + * Initialize a different console than already in use to display + * messages from TSP + */ + int rc = console_pl011_register(PLAT_ARM_TSP_UART_BASE, + PLAT_ARM_TSP_UART_CLK_IN_HZ, + ARM_CONSOLE_BAUDRATE, + &arm_tsp_runtime_console); + if (rc == 0) + panic(); + + console_set_scope(&arm_tsp_runtime_console, + CONSOLE_FLAG_BOOT | CONSOLE_FLAG_RUNTIME); +} + +void tsp_early_platform_setup(void) +{ + arm_tsp_early_platform_setup(); +} + +/******************************************************************************* + * Perform platform specific setup placeholder + ******************************************************************************/ +void tsp_platform_setup(void) +{ + plat_arm_gic_driver_init(); +} + +/******************************************************************************* + * Perform the very early platform specific architectural setup here. At the + * moment this is only intializes the MMU + ******************************************************************************/ +void tsp_plat_arch_setup(void) +{ +#if USE_COHERENT_MEM + /* Ensure ARM platforms don't use coherent memory in TSP */ + assert((BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE) == 0U); +#endif + + const mmap_region_t bl_regions[] = { + MAP_BL_TSP_TOTAL, + ARM_MAP_BL_RO, + {0} + }; + + setup_page_tables(bl_regions, plat_arm_get_mmap()); + enable_mmu_el1(0); + +#if PLAT_RO_XLAT_TABLES + arm_xlat_make_tables_readonly(); +#endif +} -- cgit v1.2.3