diff options
Diffstat (limited to 'plat/arm/css')
29 files changed, 2845 insertions, 0 deletions
diff --git a/plat/arm/css/common/aarch32/css_helpers.S b/plat/arm/css/common/aarch32/css_helpers.S new file mode 100644 index 0000000..d47e13d --- /dev/null +++ b/plat/arm/css/common/aarch32/css_helpers.S @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <cpu_macros.S> +#include <platform_def.h> + + .weak plat_secondary_cold_boot_setup + .weak plat_get_my_entrypoint + .globl css_calc_core_pos_swap_cluster + .weak plat_is_my_cpu_primary + + /* --------------------------------------------------------------------- + * void plat_secondary_cold_boot_setup(void); + * In the normal boot flow, cold-booting secondary + * CPUs is not yet implemented and they panic. + * --------------------------------------------------------------------- + */ +func plat_secondary_cold_boot_setup + /* TODO: Implement secondary CPU cold boot setup on CSS platforms */ +cb_panic: + b cb_panic +endfunc plat_secondary_cold_boot_setup + + /* --------------------------------------------------------------------- + * uintptr_t plat_get_my_entrypoint (void); + * + * Main job of this routine is to distinguish between a cold and a warm + * boot. On CSS platforms, this distinction is based on the contents of + * the Trusted Mailbox. It is initialised to zero by the SCP before the + * AP cores are released from reset. Therefore, a zero mailbox means + * it's a cold reset. + * + * This functions returns the contents of the mailbox, i.e.: + * - 0 for a cold boot; + * - the warm boot entrypoint for a warm boot. + * --------------------------------------------------------------------- + */ +func plat_get_my_entrypoint + ldr r0, =PLAT_ARM_TRUSTED_MAILBOX_BASE + ldr r0, [r0] + bx lr +endfunc plat_get_my_entrypoint + + /* ----------------------------------------------------------- + * unsigned int css_calc_core_pos_swap_cluster(u_register_t mpidr) + * Utility function to calculate the core position by + * swapping the cluster order. This is necessary in order to + * match the format of the boot information passed by the SCP + * and read in plat_is_my_cpu_primary below. + * ----------------------------------------------------------- + */ +func css_calc_core_pos_swap_cluster + and r1, r0, #MPIDR_CPU_MASK + and r0, r0, #MPIDR_CLUSTER_MASK + eor r0, r0, #(1 << MPIDR_AFFINITY_BITS) // swap cluster order + add r0, r1, r0, LSR #6 + bx lr +endfunc css_calc_core_pos_swap_cluster + + /* ----------------------------------------------------- + * unsigned int plat_is_my_cpu_primary (void); + * + * Find out whether the current cpu is the primary + * cpu (applicable ony after a cold boot) + * ----------------------------------------------------- + */ +#if CSS_USE_SCMI_SDS_DRIVER +func plat_is_my_cpu_primary + mov r10, lr + bl plat_my_core_pos + mov r4, r0 + bl sds_get_primary_cpu_id + /* Check for error */ + mov r1, #0xffffffff + cmp r0, r1 + beq 1f + cmp r0, r4 + moveq r0, #1 + movne r0, #0 + bx r10 +1: + no_ret plat_panic_handler +endfunc plat_is_my_cpu_primary +#else +func plat_is_my_cpu_primary + mov r10, lr + bl plat_my_core_pos + ldr r1, =SCP_BOOT_CFG_ADDR + ldr r1, [r1] + ubfx r1, r1, #PLAT_CSS_PRIMARY_CPU_SHIFT, \ + #PLAT_CSS_PRIMARY_CPU_BIT_WIDTH + cmp r0, r1 + moveq r0, #1 + movne r0, #0 + bx r10 +endfunc plat_is_my_cpu_primary +#endif diff --git a/plat/arm/css/common/aarch64/css_helpers.S b/plat/arm/css/common/aarch64/css_helpers.S new file mode 100644 index 0000000..01669be --- /dev/null +++ b/plat/arm/css/common/aarch64/css_helpers.S @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <cpu_macros.S> +#include <platform_def.h> + + .weak plat_secondary_cold_boot_setup + .weak plat_get_my_entrypoint + .globl css_calc_core_pos_swap_cluster + .weak plat_is_my_cpu_primary + + /* --------------------------------------------------------------------- + * void plat_secondary_cold_boot_setup(void); + * + * In the normal boot flow, cold-booting secondary CPUs is not yet + * implemented and they panic. + * + * When booting an EL3 payload, secondary CPUs are placed in a holding + * pen, waiting for their mailbox to be populated. Note that all CPUs + * share the same mailbox ; therefore, populating it will release all + * CPUs from their holding pen. If finer-grained control is needed then + * this should be handled in the code that secondary CPUs jump to. + * --------------------------------------------------------------------- + */ +func plat_secondary_cold_boot_setup +#ifndef EL3_PAYLOAD_BASE + /* TODO: Implement secondary CPU cold boot setup on CSS platforms */ +cb_panic: + b cb_panic +#else + mov_imm x0, PLAT_ARM_TRUSTED_MAILBOX_BASE + + /* Wait until the mailbox gets populated */ +poll_mailbox: + ldr x1, [x0] + cbz x1, 1f + br x1 +1: + wfe + b poll_mailbox +#endif /* EL3_PAYLOAD_BASE */ +endfunc plat_secondary_cold_boot_setup + + /* --------------------------------------------------------------------- + * uintptr_t plat_get_my_entrypoint (void); + * + * Main job of this routine is to distinguish between a cold and a warm + * boot. On CSS platforms, this distinction is based on the contents of + * the Trusted Mailbox. It is initialised to zero by the SCP before the + * AP cores are released from reset. Therefore, a zero mailbox means + * it's a cold reset. + * + * This functions returns the contents of the mailbox, i.e.: + * - 0 for a cold boot; + * - the warm boot entrypoint for a warm boot. + * --------------------------------------------------------------------- + */ +func plat_get_my_entrypoint + mov_imm x0, PLAT_ARM_TRUSTED_MAILBOX_BASE + ldr x0, [x0] + ret +endfunc plat_get_my_entrypoint + + /* ----------------------------------------------------------- + * unsigned int css_calc_core_pos_swap_cluster(u_register_t mpidr) + * Utility function to calculate the core position by + * swapping the cluster order. This is necessary in order to + * match the format of the boot information passed by the SCP + * and read in plat_is_my_cpu_primary below. + * ----------------------------------------------------------- + */ +func css_calc_core_pos_swap_cluster + and x1, x0, #MPIDR_CPU_MASK + and x0, x0, #MPIDR_CLUSTER_MASK + eor x0, x0, #(1 << MPIDR_AFFINITY_BITS) // swap cluster order + add x0, x1, x0, LSR #6 + ret +endfunc css_calc_core_pos_swap_cluster + + /* ----------------------------------------------------- + * unsigned int plat_is_my_cpu_primary (void); + * + * Find out whether the current cpu is the primary + * cpu (applicable ony after a cold boot) + * ----------------------------------------------------- + */ +#if CSS_USE_SCMI_SDS_DRIVER +func plat_is_my_cpu_primary + mov x9, x30 + bl plat_my_core_pos + mov x4, x0 + bl sds_get_primary_cpu_id + /* Check for error */ + mov x1, #0xffffffff + cmp x0, x1 + b.eq 1f + cmp x0, x4 + cset w0, eq + ret x9 +1: + no_ret plat_panic_handler +endfunc plat_is_my_cpu_primary +#else +func plat_is_my_cpu_primary + mov x9, x30 + bl plat_my_core_pos + mov_imm x1, SCP_BOOT_CFG_ADDR + ldr x1, [x1] + ubfx x1, x1, #PLAT_CSS_PRIMARY_CPU_SHIFT, \ + #PLAT_CSS_PRIMARY_CPU_BIT_WIDTH + cmp x0, x1 + cset w0, eq + ret x9 +endfunc plat_is_my_cpu_primary +#endif diff --git a/plat/arm/css/common/css_bl1_setup.c b/plat/arm/css/common/css_bl1_setup.c new file mode 100644 index 0000000..596cc3d --- /dev/null +++ b/plat/arm/css/common/css_bl1_setup.c @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <common/bl_common.h> +#include <common/debug.h> +#include <plat/arm/common/plat_arm.h> +#include <plat/arm/soc/common/soc_css.h> +#include <plat/common/platform.h> + +void bl1_platform_setup(void) +{ + arm_bl1_platform_setup(); + /* + * Do ARM CSS SoC security setup. + * BL1 needs to enable normal world access to memory. + */ + soc_css_security_setup(); +} + diff --git a/plat/arm/css/common/css_bl2_setup.c b/plat/arm/css/common/css_bl2_setup.c new file mode 100644 index 0000000..002c6eb --- /dev/null +++ b/plat/arm/css/common/css_bl2_setup.c @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <string.h> + +#include <common/bl_common.h> +#include <common/debug.h> +#include <drivers/arm/css/css_scp.h> +#include <lib/mmio.h> +#include <lib/utils.h> +#include <plat/arm/common/plat_arm.h> +#include <platform_def.h> + +/* Weak definition may be overridden in specific CSS based platform */ +#pragma weak plat_arm_bl2_handle_scp_bl2 + +/******************************************************************************* + * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol. + * Return 0 on success, -1 otherwise. + ******************************************************************************/ +int plat_arm_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info) +{ + int ret; + + INFO("BL2: Initiating SCP_BL2 transfer to SCP\n"); + + ret = css_scp_boot_image_xfer((void *)scp_bl2_image_info->image_base, + scp_bl2_image_info->image_size); + + if (ret == 0) + ret = css_scp_boot_ready(); + + if (ret == 0) + INFO("BL2: SCP_BL2 transferred to SCP\n"); + else + ERROR("BL2: SCP_BL2 transfer failure\n"); + + return ret; +} + +#if !CSS_USE_SCMI_SDS_DRIVER +# if defined(EL3_PAYLOAD_BASE) || JUNO_AARCH32_EL3_RUNTIME + +/* + * We need to override some of the platform functions when booting an EL3 + * payload or SP_MIN on Juno AArch32. This needs to be done only for + * SCPI/BOM SCP systems as in case of SDS, the structures remain in memory and + * don't need to be overwritten. + */ + +static unsigned int scp_boot_config; + +void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + arm_bl2_early_platform_setup((uintptr_t)arg0, (meminfo_t *)arg1); + + /* Save SCP Boot config before it gets overwritten by SCP_BL2 loading */ + scp_boot_config = mmio_read_32(SCP_BOOT_CFG_ADDR); + VERBOSE("BL2: Saved SCP Boot config = 0x%x\n", scp_boot_config); +} + +void bl2_platform_setup(void) +{ + arm_bl2_platform_setup(); + + /* + * Before releasing the AP cores out of reset, the SCP writes some data + * at the beginning of the Trusted SRAM. It is is overwritten before + * reaching this function. We need to restore this data, as if the + * target had just come out of reset. This implies: + * - zeroing the first 128 bytes of Trusted SRAM using zeromem instead + * of zero_normalmem since this is device memory. + * - restoring the SCP boot configuration. + */ + VERBOSE("BL2: Restoring SCP reset data in Trusted SRAM\n"); + zeromem((void *) ARM_SHARED_RAM_BASE, 128); + mmio_write_32(SCP_BOOT_CFG_ADDR, scp_boot_config); +} + +# endif /* EL3_PAYLOAD_BASE */ + +#endif /* CSS_USE_SCMI_SDS_DRIVER */ diff --git a/plat/arm/css/common/css_bl2u_setup.c b/plat/arm/css/common/css_bl2u_setup.c new file mode 100644 index 0000000..15cf4f6 --- /dev/null +++ b/plat/arm/css/common/css_bl2u_setup.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <common/bl_common.h> +#include <common/debug.h> +#include <drivers/arm/css/css_scp.h> +#include <plat/arm/common/plat_arm.h> +#include <plat/common/platform.h> + +/* Weak definition may be overridden in specific CSS based platform */ +#pragma weak bl2u_plat_handle_scp_bl2u + +/* Data structure which holds the SCP_BL2U image info for BL2U */ +static image_info_t scp_bl2u_image_info; + +/******************************************************************************* + * BL1 can pass platform dependent information to BL2U in x1. + * In case of ARM CSS platforms x1 contains SCP_BL2U image info. + * In case of ARM FVP platforms x1 is not used. + * In both cases, x0 contains the extents of the memory available to BL2U + ******************************************************************************/ +void bl2u_early_platform_setup(meminfo_t *mem_layout, void *plat_info) +{ + if (!plat_info) + panic(); + + arm_bl2u_early_platform_setup(mem_layout, plat_info); + + scp_bl2u_image_info = *(image_info_t *)plat_info; +} + +/******************************************************************************* + * Transfer SCP_BL2U from Trusted RAM using the SCP Download protocol. + ******************************************************************************/ +int bl2u_plat_handle_scp_bl2u(void) +{ + int ret; + + INFO("BL2U: Initiating SCP_BL2U transfer to SCP\n"); + + ret = css_scp_boot_image_xfer((void *)scp_bl2u_image_info.image_base, + scp_bl2u_image_info.image_size); + + if (ret == 0) + ret = css_scp_boot_ready(); + + if (ret == 0) + INFO("BL2U: SCP_BL2U transferred to SCP\n"); + else + ERROR("BL2U: SCP_BL2U transfer failure\n"); + + return ret; +} diff --git a/plat/arm/css/common/css_common.mk b/plat/arm/css/common/css_common.mk new file mode 100644 index 0000000..1e4851c --- /dev/null +++ b/plat/arm/css/common/css_common.mk @@ -0,0 +1,97 @@ +# +# Copyright (c) 2015-2022, Arm Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + + +# By default, SCP images are needed by CSS platforms. +CSS_LOAD_SCP_IMAGES ?= 1 + +# By default, SCMI driver is disabled for CSS platforms +CSS_USE_SCMI_SDS_DRIVER ?= 0 + +PLAT_INCLUDES += -Iinclude/plat/arm/css/common/aarch64 + + +PLAT_BL_COMMON_SOURCES += plat/arm/css/common/${ARCH}/css_helpers.S + +BL1_SOURCES += plat/arm/css/common/css_bl1_setup.c + +BL2_SOURCES += plat/arm/css/common/css_bl2_setup.c + +BL2U_SOURCES += plat/arm/css/common/css_bl2u_setup.c + +BL31_SOURCES += plat/arm/css/common/css_pm.c \ + plat/arm/css/common/css_topology.c + +ifeq (${CSS_USE_SCMI_SDS_DRIVER},0) +BL31_SOURCES += drivers/arm/css/mhu/css_mhu.c \ + drivers/arm/css/scp/css_pm_scpi.c \ + drivers/arm/css/scpi/css_scpi.c +else +BL31_SOURCES += drivers/arm/css/mhu/css_mhu_doorbell.c \ + drivers/arm/css/scmi/scmi_ap_core_proto.c \ + drivers/arm/css/scmi/scmi_common.c \ + drivers/arm/css/scmi/scmi_pwr_dmn_proto.c \ + drivers/arm/css/scmi/scmi_sys_pwr_proto.c \ + drivers/delay_timer/delay_timer.c \ + drivers/arm/css/scp/css_pm_scmi.c +endif + +# Process CSS_LOAD_SCP_IMAGES flag +$(eval $(call assert_boolean,CSS_LOAD_SCP_IMAGES)) +$(eval $(call add_define,CSS_LOAD_SCP_IMAGES)) + +ifeq (${CSS_LOAD_SCP_IMAGES},1) + NEED_SCP_BL2 := yes + ifneq (${TRUSTED_BOARD_BOOT},0) + $(eval $(call TOOL_ADD_IMG,scp_bl2u,--scp-fwu-cfg,FWU_)) + endif + + ifeq (${CSS_USE_SCMI_SDS_DRIVER},1) + BL2U_SOURCES += drivers/arm/css/scp/css_sds.c \ + drivers/arm/css/sds/sds.c + + BL2_SOURCES += drivers/arm/css/scp/css_sds.c \ + drivers/arm/css/sds/sds.c + else + BL2U_SOURCES += drivers/arm/css/mhu/css_mhu.c \ + drivers/arm/css/scp/css_bom_bootloader.c \ + drivers/arm/css/scpi/css_scpi.c + + BL2_SOURCES += drivers/arm/css/mhu/css_mhu.c \ + drivers/arm/css/scp/css_bom_bootloader.c \ + drivers/arm/css/scpi/css_scpi.c + # Enable option to detect whether the SCP ROM firmware in use predates version + # 1.7.0 and therefore, is incompatible. + CSS_DETECT_PRE_1_7_0_SCP := 1 + + # Process CSS_DETECT_PRE_1_7_0_SCP flag + $(eval $(call assert_boolean,CSS_DETECT_PRE_1_7_0_SCP)) + $(eval $(call add_define,CSS_DETECT_PRE_1_7_0_SCP)) + endif +endif + +ifeq (${CSS_USE_SCMI_SDS_DRIVER},1) + PLAT_BL_COMMON_SOURCES += drivers/arm/css/sds/${ARCH}/sds_helpers.S +endif + +# Process CSS_USE_SCMI_SDS_DRIVER flag +$(eval $(call assert_boolean,CSS_USE_SCMI_SDS_DRIVER)) +$(eval $(call add_define,CSS_USE_SCMI_SDS_DRIVER)) + +# Process CSS_NON_SECURE_UART flag +# This undocumented build option is only to enable debug access to the UART +# from non secure code, which is useful on some platforms. +# Default (obviously) is off. +CSS_NON_SECURE_UART := 0 +$(eval $(call assert_boolean,CSS_NON_SECURE_UART)) +$(eval $(call add_define,CSS_NON_SECURE_UART)) + +# Process CSS_SYSTEM_GRACEFUL_RESET flag +# This build option can be used on CSS platforms that require all the CPUs +# to execute the CPU specific power down sequence to complete a warm reboot +# sequence in which only the CPUs are power cycled. +CSS_SYSTEM_GRACEFUL_RESET := 0 +$(eval $(call add_define,CSS_SYSTEM_GRACEFUL_RESET)) diff --git a/plat/arm/css/common/css_pm.c b/plat/arm/css/common/css_pm.c new file mode 100644 index 0000000..9b2639c --- /dev/null +++ b/plat/arm/css/common/css_pm.c @@ -0,0 +1,415 @@ +/* + * Copyright (c) 2015-2022, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <bl31/interrupt_mgmt.h> +#include <common/debug.h> +#include <drivers/arm/css/css_scp.h> +#include <lib/cassert.h> +#include <plat/arm/common/plat_arm.h> + +#include <plat/common/platform.h> + +#include <plat/arm/css/common/css_pm.h> + +/* Allow CSS platforms to override `plat_arm_psci_pm_ops` */ +#pragma weak plat_arm_psci_pm_ops + +#if ARM_RECOM_STATE_ID_ENC +/* + * The table storing the valid idle power states. Ensure that the + * array entries are populated in ascending order of state-id to + * enable us to use binary search during power state validation. + * The table must be terminated by a NULL entry. + */ +const unsigned int arm_pm_idle_states[] = { + /* State-id - 0x001 */ + arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN, + ARM_LOCAL_STATE_RET, ARM_PWR_LVL0, PSTATE_TYPE_STANDBY), + /* State-id - 0x002 */ + arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN, + ARM_LOCAL_STATE_OFF, ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN), + /* State-id - 0x022 */ + arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF, + ARM_LOCAL_STATE_OFF, ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN), +#if PLAT_MAX_PWR_LVL > ARM_PWR_LVL1 + /* State-id - 0x222 */ + arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF, + ARM_LOCAL_STATE_OFF, ARM_PWR_LVL2, PSTATE_TYPE_POWERDOWN), +#endif + 0, +}; +#endif /* __ARM_RECOM_STATE_ID_ENC__ */ + +/* + * All the power management helpers in this file assume at least cluster power + * level is supported. + */ +CASSERT(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL1, + assert_max_pwr_lvl_supported_mismatch); + +/* + * Ensure that the PLAT_MAX_PWR_LVL is not greater than CSS_SYSTEM_PWR_DMN_LVL + * assumed by the CSS layer. + */ +CASSERT(PLAT_MAX_PWR_LVL <= CSS_SYSTEM_PWR_DMN_LVL, + assert_max_pwr_lvl_higher_than_css_sys_lvl); + +/******************************************************************************* + * Handler called when a power domain is about to be turned on. The + * level and mpidr determine the affinity instance. + ******************************************************************************/ +int css_pwr_domain_on(u_register_t mpidr) +{ + css_scp_on(mpidr); + + return PSCI_E_SUCCESS; +} + +static void css_pwr_domain_on_finisher_common( + const psci_power_state_t *target_state) +{ + assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF); + + /* + * Perform the common cluster specific operations i.e enable coherency + * if this cluster was off. + */ + if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) + plat_arm_interconnect_enter_coherency(); +} + +/******************************************************************************* + * Handler called when a power level has just been powered on after + * being turned off earlier. The target_state encodes the low power state that + * each level has woken up from. This handler would never be invoked with + * the system power domain uninitialized as either the primary would have taken + * care of it as part of cold boot or the first core awakened from system + * suspend would have already initialized it. + ******************************************************************************/ +void css_pwr_domain_on_finish(const psci_power_state_t *target_state) +{ + /* Assert that the system power domain need not be initialized */ + assert(css_system_pwr_state(target_state) == ARM_LOCAL_STATE_RUN); + + css_pwr_domain_on_finisher_common(target_state); +} + +/******************************************************************************* + * Handler called when a power domain has just been powered on and the cpu + * and its cluster are fully participating in coherent transaction on the + * interconnect. Data cache must be enabled for CPU at this point. + ******************************************************************************/ +void css_pwr_domain_on_finish_late(const psci_power_state_t *target_state) +{ + /* Program the gic per-cpu distributor or re-distributor interface */ + plat_arm_gic_pcpu_init(); + + /* Enable the gic cpu interface */ + plat_arm_gic_cpuif_enable(); + + /* Setup the CPU power down request interrupt for secondary core(s) */ + css_setup_cpu_pwr_down_intr(); +} + +/******************************************************************************* + * Common function called while turning a cpu off or suspending it. It is called + * from css_off() or css_suspend() when these functions in turn are called for + * power domain at the highest power level which will be powered down. It + * performs the actions common to the OFF and SUSPEND calls. + ******************************************************************************/ +static void css_power_down_common(const psci_power_state_t *target_state) +{ + /* Prevent interrupts from spuriously waking up this cpu */ + plat_arm_gic_cpuif_disable(); + + /* Turn redistributor off */ + plat_arm_gic_redistif_off(); + + /* Cluster is to be turned off, so disable coherency */ + if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { + plat_arm_interconnect_exit_coherency(); + +#if HW_ASSISTED_COHERENCY + uint32_t reg; + + /* + * If we have determined this core to be the last man standing and we + * intend to power down the cluster proactively, we provide a hint to + * the power controller that cluster power is not required when all + * cores are powered down. + * Note that this is only an advisory to power controller and is supported + * by SoCs with DynamIQ Shared Units only. + */ + reg = read_clusterpwrdn(); + + /* Clear and set bit 0 : Cluster power not required */ + reg &= ~DSU_CLUSTER_PWR_MASK; + reg |= DSU_CLUSTER_PWR_OFF; + write_clusterpwrdn(reg); +#endif + } +} + +/******************************************************************************* + * Handler called when a power domain is about to be turned off. The + * target_state encodes the power state that each level should transition to. + ******************************************************************************/ +void css_pwr_domain_off(const psci_power_state_t *target_state) +{ + assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF); + css_power_down_common(target_state); + css_scp_off(target_state); +} + +/******************************************************************************* + * Handler called when a power domain is about to be suspended. The + * target_state encodes the power state that each level should transition to. + ******************************************************************************/ +void css_pwr_domain_suspend(const psci_power_state_t *target_state) +{ + /* + * CSS currently supports retention only at cpu level. Just return + * as nothing is to be done for retention. + */ + if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET) + return; + + + assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF); + css_power_down_common(target_state); + + /* Perform system domain state saving if issuing system suspend */ + if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) { + arm_system_pwr_domain_save(); + + /* Power off the Redistributor after having saved its context */ + plat_arm_gic_redistif_off(); + } + + css_scp_suspend(target_state); +} + +/******************************************************************************* + * Handler called when a power domain has just been powered on after + * having been suspended earlier. The target_state encodes the low power state + * that each level has woken up from. + * TODO: At the moment we reuse the on finisher and reinitialize the secure + * context. Need to implement a separate suspend finisher. + ******************************************************************************/ +void css_pwr_domain_suspend_finish( + const psci_power_state_t *target_state) +{ + /* Return as nothing is to be done on waking up from retention. */ + if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET) + return; + + /* Perform system domain restore if woken up from system suspend */ + if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) + /* + * At this point, the Distributor must be powered on to be ready + * to have its state restored. The Redistributor will be powered + * on as part of gicv3_rdistif_init_restore. + */ + arm_system_pwr_domain_resume(); + + css_pwr_domain_on_finisher_common(target_state); + + /* Enable the gic cpu interface */ + plat_arm_gic_cpuif_enable(); +} + +/******************************************************************************* + * Handlers to shutdown/reboot the system + ******************************************************************************/ +void __dead2 css_system_off(void) +{ + css_scp_sys_shutdown(); +} + +void __dead2 css_system_reset(void) +{ + css_scp_sys_reboot(); +} + +/******************************************************************************* + * Handler called when the CPU power domain is about to enter standby. + ******************************************************************************/ +void css_cpu_standby(plat_local_state_t cpu_state) +{ + unsigned int scr; + + assert(cpu_state == ARM_LOCAL_STATE_RET); + + scr = read_scr_el3(); + /* + * Enable the Non secure interrupt to wake the CPU. + * In GICv3 affinity routing mode, the non secure group1 interrupts use + * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ. + * Enabling both the bits works for both GICv2 mode and GICv3 affinity + * routing mode. + */ + write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT); + isb(); + dsb(); + wfi(); + + /* + * Restore SCR to the original value, synchronisation of scr_el3 is + * done by eret while el3_exit to save some execution cycles. + */ + write_scr_el3(scr); +} + +/******************************************************************************* + * Handler called to return the 'req_state' for system suspend. + ******************************************************************************/ +void css_get_sys_suspend_power_state(psci_power_state_t *req_state) +{ + unsigned int i; + + /* + * System Suspend is supported only if the system power domain node + * is implemented. + */ + assert(PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL); + + for (i = ARM_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++) + req_state->pwr_domain_state[i] = ARM_LOCAL_STATE_OFF; +} + +/******************************************************************************* + * Handler to query CPU/cluster power states from SCP + ******************************************************************************/ +int css_node_hw_state(u_register_t mpidr, unsigned int power_level) +{ + return css_scp_get_power_state(mpidr, power_level); +} + +/* + * The system power domain suspend is only supported only via + * PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain + * will be downgraded to the lower level. + */ +static int css_validate_power_state(unsigned int power_state, + psci_power_state_t *req_state) +{ + int rc; + rc = arm_validate_power_state(power_state, req_state); + + /* + * Ensure that we don't overrun the pwr_domain_state array in the case + * where the platform supported max power level is less than the system + * power level + */ + +#if (PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL) + + /* + * Ensure that the system power domain level is never suspended + * via PSCI CPU SUSPEND API. Currently system suspend is only + * supported via PSCI SYSTEM SUSPEND API. + */ + + req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] = + ARM_LOCAL_STATE_RUN; +#endif + + return rc; +} + +/* + * Custom `translate_power_state_by_mpidr` handler for CSS. Unlike in the + * `css_validate_power_state`, we do not downgrade the system power + * domain level request in `power_state` as it will be used to query the + * PSCI_STAT_COUNT/RESIDENCY at the system power domain level. + */ +static int css_translate_power_state_by_mpidr(u_register_t mpidr, + unsigned int power_state, + psci_power_state_t *output_state) +{ + return arm_validate_power_state(power_state, output_state); +} + +/* + * Setup the SGI interrupt that will be used trigger the execution of power + * down sequence for all the secondary cores. This interrupt is setup to be + * handled in EL3 context at a priority defined by the platform. + */ +void css_setup_cpu_pwr_down_intr(void) +{ +#if CSS_SYSTEM_GRACEFUL_RESET + plat_ic_set_interrupt_type(CSS_CPU_PWR_DOWN_REQ_INTR, INTR_TYPE_EL3); + plat_ic_set_interrupt_priority(CSS_CPU_PWR_DOWN_REQ_INTR, + PLAT_REBOOT_PRI); + plat_ic_enable_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR); +#endif +} + +/* + * For a graceful shutdown/reboot, each CPU in the system should do their power + * down sequence. On a PSCI shutdown/reboot request, only one CPU gets an + * opportunity to do the powerdown sequence. To achieve graceful reset, of all + * cores in the system, the CPU gets the opportunity raise warm reboot SGI to + * rest of the CPUs which are online. Add handler for the reboot SGI where the + * rest of the CPU execute the powerdown sequence. + */ +int css_reboot_interrupt_handler(uint32_t intr_raw, uint32_t flags, + void *handle, void *cookie) +{ + assert(intr_raw == CSS_CPU_PWR_DOWN_REQ_INTR); + + /* Deactivate warm reboot SGI */ + plat_ic_end_of_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR); + + /* + * Disable GIC CPU interface to prevent pending interrupt from waking + * up the AP from WFI. + */ + plat_arm_gic_cpuif_disable(); + plat_arm_gic_redistif_off(); + + psci_pwrdown_cpu(PLAT_MAX_PWR_LVL); + + dmbsy(); + + wfi(); + return 0; +} + +/******************************************************************************* + * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard + * platform will take care of registering the handlers with PSCI. + ******************************************************************************/ +plat_psci_ops_t plat_arm_psci_pm_ops = { + .pwr_domain_on = css_pwr_domain_on, + .pwr_domain_on_finish = css_pwr_domain_on_finish, + .pwr_domain_on_finish_late = css_pwr_domain_on_finish_late, + .pwr_domain_off = css_pwr_domain_off, + .cpu_standby = css_cpu_standby, + .pwr_domain_suspend = css_pwr_domain_suspend, + .pwr_domain_suspend_finish = css_pwr_domain_suspend_finish, + .system_off = css_system_off, + .system_reset = css_system_reset, + .validate_power_state = css_validate_power_state, + .validate_ns_entrypoint = arm_validate_psci_entrypoint, + .translate_power_state_by_mpidr = css_translate_power_state_by_mpidr, + .get_node_hw_state = css_node_hw_state, + .get_sys_suspend_power_state = css_get_sys_suspend_power_state, + +#if defined(PLAT_ARM_MEM_PROT_ADDR) + .mem_protect_chk = arm_psci_mem_protect_chk, + .read_mem_protect = arm_psci_read_mem_protect, + .write_mem_protect = arm_nor_psci_write_mem_protect, +#endif +#if CSS_USE_SCMI_SDS_DRIVER + .system_reset2 = css_system_reset2, +#endif +}; diff --git a/plat/arm/css/common/css_topology.c b/plat/arm/css/common/css_topology.c new file mode 100644 index 0000000..8aca744 --- /dev/null +++ b/plat/arm/css/common/css_topology.c @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <plat/arm/common/plat_arm.h> +#include <plat/common/platform.h> + +#if ARM_PLAT_MT +#pragma weak plat_arm_get_cpu_pe_count +#endif + +/****************************************************************************** + * This function implements a part of the critical interface between the psci + * generic layer and the platform that allows the former to query the platform + * to convert an MPIDR to a unique linear index. An error code (-1) is + * returned in case the MPIDR is invalid. + *****************************************************************************/ +int plat_core_pos_by_mpidr(u_register_t mpidr) +{ + if (arm_check_mpidr(mpidr) == 0) { +#if ARM_PLAT_MT + assert((read_mpidr_el1() & MPIDR_MT_MASK) != 0); + + /* + * The DTB files don't provide the MT bit in the mpidr argument + * so set it manually before calculating core position + */ + mpidr |= MPIDR_MT_MASK; +#endif + return plat_arm_calc_core_pos(mpidr); + } + return -1; +} + +#if ARM_PLAT_MT +/****************************************************************************** + * This function returns the PE count within the physical cpu corresponding to + * `mpidr`. Now one cpu only have one thread, so just return 1. + *****************************************************************************/ +unsigned int plat_arm_get_cpu_pe_count(u_register_t mpidr) +{ + return 1; +} +#endif /* ARM_PLAT_MT */ diff --git a/plat/arm/css/common/sp_min/css_sp_min.mk b/plat/arm/css/common/sp_min/css_sp_min.mk new file mode 100644 index 0000000..ae489fd --- /dev/null +++ b/plat/arm/css/common/sp_min/css_sp_min.mk @@ -0,0 +1,22 @@ +# +# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# SP MIN source files common to CSS platforms +BL32_SOURCES += plat/arm/css/common/css_pm.c \ + plat/arm/css/common/css_topology.c + +ifeq (${CSS_USE_SCMI_SDS_DRIVER},0) +BL32_SOURCES += drivers/arm/css/mhu/css_mhu.c \ + drivers/arm/css/scp/css_pm_scpi.c \ + drivers/arm/css/scpi/css_scpi.c +else +BL32_SOURCES += drivers/arm/css/mhu/css_mhu_doorbell.c \ + drivers/arm/css/scp/css_pm_scmi.c \ + drivers/delay_timer/delay_timer.c \ + drivers/arm/css/scmi/scmi_common.c \ + drivers/arm/css/scmi/scmi_pwr_dmn_proto.c \ + drivers/arm/css/scmi/scmi_sys_pwr_proto.c +endif diff --git a/plat/arm/css/sgi/aarch64/sgi_helper.S b/plat/arm/css/sgi/aarch64/sgi_helper.S new file mode 100644 index 0000000..ced59e8 --- /dev/null +++ b/plat/arm/css/sgi/aarch64/sgi_helper.S @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <asm_macros.S> +#include <platform_def.h> +#include <cortex_a75.h> +#include <neoverse_n1.h> +#include <neoverse_v1.h> +#include <neoverse_n2.h> +#include <cpu_macros.S> + + .globl plat_arm_calc_core_pos + .globl plat_reset_handler + + /* ----------------------------------------------------- + * unsigned int plat_arm_calc_core_pos(u_register_t mpidr) + * + * Helper function to calculate the core position. + * (ChipId * PLAT_ARM_CLUSTER_COUNT * + * CSS_SGI_MAX_CPUS_PER_CLUSTER * CSS_SGI_MAX_PE_PER_CPU) + + * (ClusterId * CSS_SGI_MAX_CPUS_PER_CLUSTER * CSS_SGI_MAX_PE_PER_CPU) + + * (CPUId * CSS_SGI_MAX_PE_PER_CPU) + + * ThreadId + * + * which can be simplified as: + * + * ((((ChipId * PLAT_ARM_CLUSTER_COUNT) + ClusterId) * + * CSS_SGI_MAX_CPUS_PER_CLUSTER) + CPUId) * CSS_SGI_MAX_PE_PER_CPU + + * ThreadId + * ------------------------------------------------------ + */ + +func plat_arm_calc_core_pos + mov x4, x0 + + /* + * The MT bit in MPIDR is always set for SGI platforms + * and the affinity level 0 corresponds to thread affinity level. + */ + + /* Extract individual affinity fields from MPIDR */ + ubfx x0, x4, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS + ubfx x1, x4, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS + ubfx x2, x4, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS + ubfx x3, x4, #MPIDR_AFF3_SHIFT, #MPIDR_AFFINITY_BITS + + /* Compute linear position */ + mov x4, #PLAT_ARM_CLUSTER_COUNT + madd x2, x3, x4, x2 + mov x4, #CSS_SGI_MAX_CPUS_PER_CLUSTER + madd x1, x2, x4, x1 + mov x4, #CSS_SGI_MAX_PE_PER_CPU + madd x0, x1, x4, x0 + ret +endfunc plat_arm_calc_core_pos + + /* ----------------------------------------------------- + * void plat_reset_handler(void); + * + * Determine the CPU MIDR and disable power down bit for + * that CPU. + * ----------------------------------------------------- + */ +func plat_reset_handler + jump_if_cpu_midr CORTEX_A75_MIDR, A75 + jump_if_cpu_midr NEOVERSE_N1_MIDR, N1 + jump_if_cpu_midr NEOVERSE_V1_MIDR, V1 + jump_if_cpu_midr NEOVERSE_N2_MIDR, N2 + ret + + /* ----------------------------------------------------- + * Disable CPU power down bit in power control register + * ----------------------------------------------------- + */ +A75: + mrs x0, CORTEX_A75_CPUPWRCTLR_EL1 + bic x0, x0, #CORTEX_A75_CORE_PWRDN_EN_MASK + msr CORTEX_A75_CPUPWRCTLR_EL1, x0 + isb + ret + +N1: + mrs x0, NEOVERSE_N1_CPUPWRCTLR_EL1 + bic x0, x0, #NEOVERSE_N1_CORE_PWRDN_EN_MASK + msr NEOVERSE_N1_CPUPWRCTLR_EL1, x0 + isb + ret + +V1: + mrs x0, NEOVERSE_V1_CPUPWRCTLR_EL1 + bic x0, x0, #NEOVERSE_V1_CPUPWRCTLR_EL1_CORE_PWRDN_BIT + msr NEOVERSE_V1_CPUPWRCTLR_EL1, x0 + isb + ret + +N2: + mrs x0, NEOVERSE_N2_CPUPWRCTLR_EL1 + bic x0, x0, #NEOVERSE_N2_CORE_PWRDN_EN_BIT + msr NEOVERSE_N2_CPUPWRCTLR_EL1, x0 + isb + ret +endfunc plat_reset_handler diff --git a/plat/arm/css/sgi/include/plat_macros.S b/plat/arm/css/sgi/include/plat_macros.S new file mode 100644 index 0000000..521bcc3 --- /dev/null +++ b/plat/arm/css/sgi/include/plat_macros.S @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef PLAT_MACROS_S +#define PLAT_MACROS_S + +#include <css_macros.S> + +/* --------------------------------------------- + * The below required platform porting macro + * prints out relevant platform registers + * whenever an unhandled exception is taken in + * BL31. + * + * There are currently no platform specific regs + * to print. + * --------------------------------------------- + */ + .macro plat_crash_print_regs + .endm + +#endif /* PLAT_MACROS_S */ diff --git a/plat/arm/css/sgi/include/sgi_base_platform_def.h b/plat/arm/css/sgi/include/sgi_base_platform_def.h new file mode 100644 index 0000000..c1fadc6 --- /dev/null +++ b/plat/arm/css/sgi/include/sgi_base_platform_def.h @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SGI_BASE_PLATFORM_DEF_H +#define SGI_BASE_PLATFORM_DEF_H + +#include <lib/utils_def.h> +#include <lib/xlat_tables/xlat_tables_defs.h> +#include <plat/arm/common/arm_def.h> +#include <plat/arm/common/arm_spm_def.h> +#include <plat/arm/css/common/css_def.h> +#include <plat/common/common_def.h> + +#define PLATFORM_CORE_COUNT (CSS_SGI_CHIP_COUNT * \ + PLAT_ARM_CLUSTER_COUNT * \ + CSS_SGI_MAX_CPUS_PER_CLUSTER * \ + CSS_SGI_MAX_PE_PER_CPU) + +#define PLAT_ARM_TRUSTED_SRAM_SIZE 0x00080000 /* 512 KB */ + +/* Remote chip address offset */ +#define CSS_SGI_REMOTE_CHIP_MEM_OFFSET(n) \ + ((ULL(1) << CSS_SGI_ADDR_BITS_PER_CHIP) * (n)) + +/* + * PLAT_ARM_MMAP_ENTRIES depends on the number of entries in the + * plat_arm_mmap array defined for each BL stage. In addition to that, on + * multi-chip platforms, address regions on each of the remote chips are + * also mapped. In BL31, for instance, three address regions on the remote + * chips are accessed - secure ram, css device and soc device regions. + */ +#if defined(IMAGE_BL31) +# if SPM_MM +# define PLAT_ARM_MMAP_ENTRIES (9 + ((CSS_SGI_CHIP_COUNT - 1) * 3)) +# define MAX_XLAT_TABLES (7 + ((CSS_SGI_CHIP_COUNT - 1) * 3)) +# define PLAT_SP_IMAGE_MMAP_REGIONS 10 +# define PLAT_SP_IMAGE_MAX_XLAT_TABLES 12 +# else +# define PLAT_ARM_MMAP_ENTRIES (5 + ((CSS_SGI_CHIP_COUNT - 1) * 3)) +# define MAX_XLAT_TABLES (6 + ((CSS_SGI_CHIP_COUNT - 1) * 3)) +# endif +#elif defined(IMAGE_BL32) +# define PLAT_ARM_MMAP_ENTRIES 8 +# define MAX_XLAT_TABLES 5 +#elif defined(IMAGE_BL2) +# define PLAT_ARM_MMAP_ENTRIES (11 + (CSS_SGI_CHIP_COUNT - 1)) + +/* + * MAX_XLAT_TABLES entries need to be doubled because when the address width + * exceeds 40 bits an additional level of translation is required. In case of + * multichip platforms peripherals also fall into address space with width + * > 40 bits + * + */ +# define MAX_XLAT_TABLES (7 + ((CSS_SGI_CHIP_COUNT - 1) * 2)) +#elif !USE_ROMLIB +# define PLAT_ARM_MMAP_ENTRIES 11 +# define MAX_XLAT_TABLES 7 +#else +# define PLAT_ARM_MMAP_ENTRIES 12 +# define MAX_XLAT_TABLES 6 +#endif + +/* + * PLAT_ARM_MAX_BL1_RW_SIZE is calculated using the current BL1 RW debug size + * plus a little space for growth. + */ +#define PLAT_ARM_MAX_BL1_RW_SIZE (64 * 1024) /* 64 KB */ + +/* + * PLAT_ARM_MAX_ROMLIB_RW_SIZE is define to use a full page + */ + +#if USE_ROMLIB +#define PLAT_ARM_MAX_ROMLIB_RW_SIZE 0x1000 +#define PLAT_ARM_MAX_ROMLIB_RO_SIZE 0xe000 +#else +#define PLAT_ARM_MAX_ROMLIB_RW_SIZE 0 +#define PLAT_ARM_MAX_ROMLIB_RO_SIZE 0 +#endif + +/* + * PLAT_ARM_MAX_BL2_SIZE is calculated using the current BL2 debug size plus a + * little space for growth. Additional 8KiB space is added per chip in + * order to accommodate the additional level of translation required for "TZC" + * peripheral access which lies in >4TB address space. + * + */ +#if TRUSTED_BOARD_BOOT +# define PLAT_ARM_MAX_BL2_SIZE (0x20000 + ((CSS_SGI_CHIP_COUNT - 1) * \ + 0x2000)) +#else +# define PLAT_ARM_MAX_BL2_SIZE (0x14000 + ((CSS_SGI_CHIP_COUNT - 1) * \ + 0x2000)) +#endif + +/* + * Since BL31 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL31_SIZE is + * calculated using the current BL31 PROGBITS debug size plus the sizes of BL2 + * and BL1-RW. CSS_SGI_BL31_SIZE - is tuned with respect to the actual BL31 + * PROGBITS size which is around 64-68KB at the time this change is being made. + * A buffer of ~35KB is added to account for future expansion of the image, + * making it a total of 100KB. + */ +#define CSS_SGI_BL31_SIZE (100 * 1024) /* 100 KB */ +#define PLAT_ARM_MAX_BL31_SIZE (CSS_SGI_BL31_SIZE + \ + PLAT_ARM_MAX_BL2_SIZE + \ + PLAT_ARM_MAX_BL1_RW_SIZE) + +/* + * Size of cacheable stacks + */ +#if defined(IMAGE_BL1) +# if TRUSTED_BOARD_BOOT +# define PLATFORM_STACK_SIZE 0x1000 +# else +# define PLATFORM_STACK_SIZE 0x440 +# endif +#elif defined(IMAGE_BL2) +# if TRUSTED_BOARD_BOOT +# define PLATFORM_STACK_SIZE 0x1000 +# else +# define PLATFORM_STACK_SIZE 0x400 +# endif +#elif defined(IMAGE_BL2U) +# define PLATFORM_STACK_SIZE 0x400 +#elif defined(IMAGE_BL31) +# if SPM_MM +# define PLATFORM_STACK_SIZE 0x500 +# else +# define PLATFORM_STACK_SIZE 0x400 +# endif +#elif defined(IMAGE_BL32) +# define PLATFORM_STACK_SIZE 0x440 +#endif + +/* PL011 UART related constants */ +#define SOC_CSS_SEC_UART_BASE UL(0x2A410000) +#define SOC_CSS_NSEC_UART_BASE UL(0x2A400000) +#define SOC_CSS_UART_SIZE UL(0x10000) +#define SOC_CSS_UART_CLK_IN_HZ UL(7372800) + +/* UART related constants */ +#define PLAT_ARM_BOOT_UART_BASE SOC_CSS_SEC_UART_BASE +#define PLAT_ARM_BOOT_UART_CLK_IN_HZ SOC_CSS_UART_CLK_IN_HZ + +#define PLAT_ARM_RUN_UART_BASE SOC_CSS_SEC_UART_BASE +#define PLAT_ARM_RUN_UART_CLK_IN_HZ SOC_CSS_UART_CLK_IN_HZ + +#define PLAT_ARM_CRASH_UART_BASE SOC_CSS_SEC_UART_BASE +#define PLAT_ARM_CRASH_UART_CLK_IN_HZ SOC_CSS_UART_CLK_IN_HZ + +#define PLAT_ARM_NSTIMER_FRAME_ID 0 + +#define PLAT_ARM_TRUSTED_ROM_BASE 0x0 +#define PLAT_ARM_TRUSTED_ROM_SIZE 0x00080000 /* 512KB */ + +#define PLAT_ARM_NSRAM_BASE 0x06000000 +#define PLAT_ARM_NSRAM_SIZE 0x00080000 /* 512KB */ + +#define PLAT_ARM_DRAM2_BASE ULL(0x8080000000) +#define PLAT_ARM_DRAM2_SIZE ULL(0x180000000) + +#define PLAT_ARM_G1S_IRQ_PROPS(grp) CSS_G1S_IRQ_PROPS(grp) +#define PLAT_ARM_G0_IRQ_PROPS(grp) ARM_G0_IRQ_PROPS(grp) + +#define CSS_SGI_DEVICE_BASE (0x20000000) +#define CSS_SGI_DEVICE_SIZE (0x20000000) +#define CSS_SGI_MAP_DEVICE MAP_REGION_FLAT( \ + CSS_SGI_DEVICE_BASE, \ + CSS_SGI_DEVICE_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE) + +#define ARM_MAP_SHARED_RAM_REMOTE_CHIP(n) \ + MAP_REGION_FLAT( \ + CSS_SGI_REMOTE_CHIP_MEM_OFFSET(n) + \ + ARM_SHARED_RAM_BASE, \ + ARM_SHARED_RAM_SIZE, \ + MT_NON_CACHEABLE | MT_RW | MT_SECURE \ + ) + +#define CSS_SGI_MAP_DEVICE_REMOTE_CHIP(n) \ + MAP_REGION_FLAT( \ + CSS_SGI_REMOTE_CHIP_MEM_OFFSET(n) + \ + CSS_SGI_DEVICE_BASE, \ + CSS_SGI_DEVICE_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE \ + ) + +#define SOC_CSS_MAP_DEVICE_REMOTE_CHIP(n) \ + MAP_REGION_FLAT( \ + CSS_SGI_REMOTE_CHIP_MEM_OFFSET(n) + \ + SOC_CSS_DEVICE_BASE, \ + SOC_CSS_DEVICE_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE \ + ) + +/* Map the secure region for access from S-EL0 */ +#define PLAT_ARM_SECURE_MAP_DEVICE MAP_REGION_FLAT( \ + SOC_CSS_DEVICE_BASE, \ + SOC_CSS_DEVICE_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE | MT_USER) + +#define PLAT_SP_PRI PLAT_RAS_PRI + +#if SPM_MM && RAS_EXTENSION +/* + * CPER buffer memory of 128KB is reserved and it is placed adjacent to the + * memory shared between EL3 and S-EL0. + */ +#define CSS_SGI_SP_CPER_BUF_BASE (PLAT_SP_IMAGE_NS_BUF_BASE + \ + PLAT_SP_IMAGE_NS_BUF_SIZE) +#define CSS_SGI_SP_CPER_BUF_SIZE ULL(0x20000) +#define CSS_SGI_SP_CPER_BUF_MMAP MAP_REGION2( \ + CSS_SGI_SP_CPER_BUF_BASE, \ + CSS_SGI_SP_CPER_BUF_BASE, \ + CSS_SGI_SP_CPER_BUF_SIZE, \ + MT_RW_DATA | MT_NS | MT_USER, \ + PAGE_SIZE) + +/* + * Secure partition stack follows right after the memory space reserved for + * CPER buffer memory. + */ +#define PLAT_ARM_SP_IMAGE_STACK_BASE (PLAT_SP_IMAGE_NS_BUF_BASE + \ + PLAT_SP_IMAGE_NS_BUF_SIZE + \ + CSS_SGI_SP_CPER_BUF_SIZE) +#elif SPM_MM +/* + * Secure partition stack follows right after the memory region that is shared + * between EL3 and S-EL0. + */ +#define PLAT_ARM_SP_IMAGE_STACK_BASE (PLAT_SP_IMAGE_NS_BUF_BASE + \ + PLAT_SP_IMAGE_NS_BUF_SIZE) +#endif /* SPM_MM && RAS_EXTENSION */ + +/* Platform ID address */ +#define SSC_VERSION (SSC_REG_BASE + SSC_VERSION_OFFSET) +#ifndef __ASSEMBLER__ +/* SSC_VERSION related accessors */ +/* Returns the part number of the platform */ +#define GET_SGI_PART_NUM \ + GET_SSC_VERSION_PART_NUM(mmio_read_32(SSC_VERSION)) +/* Returns the configuration number of the platform */ +#define GET_SGI_CONFIG_NUM \ + GET_SSC_VERSION_CONFIG(mmio_read_32(SSC_VERSION)) +#endif /* __ASSEMBLER__ */ + +/******************************************************************************* + * Memprotect definitions + ******************************************************************************/ +/* PSCI memory protect definitions: + * This variable is stored in a non-secure flash because some ARM reference + * platforms do not have secure NVRAM. Real systems that provided MEM_PROTECT + * support must use a secure NVRAM to store the PSCI MEM_PROTECT definitions. + */ +#define PLAT_ARM_MEM_PROT_ADDR (V2M_FLASH0_BASE + \ + V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE) + +/*Secure Watchdog Constants */ +#define SBSA_SECURE_WDOG_BASE UL(0x2A480000) +#define SBSA_SECURE_WDOG_TIMEOUT UL(100) + +/* Number of SCMI channels on the platform */ +#define PLAT_ARM_SCMI_CHANNEL_COUNT CSS_SGI_CHIP_COUNT + +/* + * Mapping definition of the TrustZone Controller for ARM SGI/RD platforms + * where both the DRAM regions are marked for non-secure access. This applies + * to multi-chip platforms. + */ +#define SGI_PLAT_TZC_NS_REMOTE_REGIONS_DEF(n) \ + {CSS_SGI_REMOTE_CHIP_MEM_OFFSET(n) + ARM_DRAM1_BASE, \ + CSS_SGI_REMOTE_CHIP_MEM_OFFSET(n) + ARM_DRAM1_END, \ + ARM_TZC_NS_DRAM_S_ACCESS, PLAT_ARM_TZC_NS_DEV_ACCESS}, \ + {CSS_SGI_REMOTE_CHIP_MEM_OFFSET(n) + ARM_DRAM2_BASE, \ + CSS_SGI_REMOTE_CHIP_MEM_OFFSET(n) + ARM_DRAM2_END, \ + ARM_TZC_NS_DRAM_S_ACCESS, PLAT_ARM_TZC_NS_DEV_ACCESS} + +#if SPM_MM + +/* + * Stand-alone MM logs would be routed via secure UART. Define page table + * entry for secure UART which would be common to all platforms. + */ +#define SOC_PLATFORM_SECURE_UART MAP_REGION_FLAT( \ + SOC_CSS_SEC_UART_BASE, \ + SOC_CSS_UART_SIZE, \ + MT_DEVICE | MT_RW | \ + MT_SECURE | MT_USER) + +#endif + +/* SDS ID for unusable CPU MPID list structure */ +#define SDS_ISOLATED_CPU_LIST_ID U(128) + +#endif /* SGI_BASE_PLATFORM_DEF_H */ diff --git a/plat/arm/css/sgi/include/sgi_dmc620_tzc_regions.h b/plat/arm/css/sgi/include/sgi_dmc620_tzc_regions.h new file mode 100644 index 0000000..e939163 --- /dev/null +++ b/plat/arm/css/sgi/include/sgi_dmc620_tzc_regions.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SGI_DMC620_TZC_REGIONS_H +#define SGI_DMC620_TZC_REGIONS_H + +#include <drivers/arm/tzc_dmc620.h> + +#if SPM_MM +#define CSS_SGI_DMC620_TZC_REGIONS_DEF \ + { \ + .region_base = ARM_AP_TZC_DRAM1_BASE, \ + .region_top = PLAT_SP_IMAGE_NS_BUF_BASE - 1, \ + .sec_attr = TZC_DMC620_REGION_S_RDWR \ + }, { \ + .region_base = PLAT_SP_IMAGE_NS_BUF_BASE, \ + .region_top = PLAT_ARM_SP_IMAGE_STACK_BASE - 1, \ + .sec_attr = TZC_DMC620_REGION_S_NS_RDWR \ + }, { \ + .region_base = PLAT_ARM_SP_IMAGE_STACK_BASE, \ + .region_top = ARM_AP_TZC_DRAM1_END, \ + .sec_attr = TZC_DMC620_REGION_S_RDWR \ + } +#else +#define CSS_SGI_DMC620_TZC_REGIONS_DEF \ + { \ + .region_base = ARM_AP_TZC_DRAM1_BASE, \ + .region_top = ARM_AP_TZC_DRAM1_END, \ + .sec_attr = TZC_DMC620_REGION_S_RDWR \ + } +#endif /* SPM_MM */ + +#endif /* SGI_DMC620_TZC_REGIONS_H */ diff --git a/plat/arm/css/sgi/include/sgi_plat.h b/plat/arm/css/sgi/include/sgi_plat.h new file mode 100644 index 0000000..a5fbded --- /dev/null +++ b/plat/arm/css/sgi/include/sgi_plat.h @@ -0,0 +1,13 @@ +/* + * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SGI_PLAT_H +#define SGI_PLAT_H + +/* BL31 platform setup common to all SGI based platforms */ +void sgi_bl31_common_platform_setup(void); + +#endif /* SGI_PLAT_H */ diff --git a/plat/arm/css/sgi/include/sgi_ras.h b/plat/arm/css/sgi/include/sgi_ras.h new file mode 100644 index 0000000..e69a684 --- /dev/null +++ b/plat/arm/css/sgi/include/sgi_ras.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SGI_RAS_H +#define SGI_RAS_H + +/* + * Mapping the RAS interrupt with SDEI event number and the event + * id used with Standalone MM code + */ +struct sgi_ras_ev_map { + int sdei_ev_num; /* SDEI Event number */ + int intr; /* Physical intr number */ +}; + +int sgi_ras_intr_handler_setup(void); + +#endif /* SGI_RAS_H */ diff --git a/plat/arm/css/sgi/include/sgi_sdei.h b/plat/arm/css/sgi/include/sgi_sdei.h new file mode 100644 index 0000000..f380122 --- /dev/null +++ b/plat/arm/css/sgi/include/sgi_sdei.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SGI_SDEI_H +#define SGI_SDEI_H + +#if SDEI_SUPPORT + +/* ARM SDEI dynamic shared event numbers */ +#define SGI_SDEI_DS_EVENT_0 U(804) +#define SGI_SDEI_DS_EVENT_1 U(805) + +#define PLAT_ARM_PRIVATE_SDEI_EVENTS \ + SDEI_DEFINE_EVENT_0(ARM_SDEI_SGI), \ + SDEI_EXPLICIT_EVENT(SGI_SDEI_DS_EVENT_0, SDEI_MAPF_CRITICAL), \ + SDEI_EXPLICIT_EVENT(SGI_SDEI_DS_EVENT_1, SDEI_MAPF_CRITICAL), + +#define PLAT_ARM_SHARED_SDEI_EVENTS + +#endif /* SDEI_SUPPORT */ + +#endif /* SGI_SDEI_H */ diff --git a/plat/arm/css/sgi/include/sgi_soc_css_def.h b/plat/arm/css/sgi/include/sgi_soc_css_def.h new file mode 100644 index 0000000..f78b45a --- /dev/null +++ b/plat/arm/css/sgi/include/sgi_soc_css_def.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SGI_SOC_CSS_DEF_H +#define SGI_SOC_CSS_DEF_H + +#include <lib/utils_def.h> +#include <plat/arm/board/common/v2m_def.h> +#include <plat/arm/soc/common/soc_css_def.h> +#include <plat/common/common_def.h> + +/* + * Definitions common to all ARM CSSv1-based development platforms + */ + +/* Platform ID address */ +#define BOARD_CSS_PLAT_ID_REG_ADDR UL(0x7ffe00e0) + +/* Platform ID related accessors */ +#define BOARD_CSS_PLAT_ID_REG_ID_MASK 0x0f +#define BOARD_CSS_PLAT_ID_REG_ID_SHIFT 0x0 +#define BOARD_CSS_PLAT_TYPE_EMULATOR 0x02 + +#ifndef __ASSEMBLER__ + +#include <lib/mmio.h> + +#define BOARD_CSS_GET_PLAT_TYPE(addr) \ + ((mmio_read_32(addr) & BOARD_CSS_PLAT_ID_REG_ID_MASK) \ + >> BOARD_CSS_PLAT_ID_REG_ID_SHIFT) + +#endif /* __ASSEMBLER__ */ + +#define MAX_IO_DEVICES 3 +#define MAX_IO_HANDLES 4 + +/* Reserve the last block of flash for PSCI MEM PROTECT flag */ +#define PLAT_ARM_FLASH_IMAGE_BASE V2M_FLASH0_BASE +#define PLAT_ARM_FLASH_IMAGE_MAX_SIZE (V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE) + +#define PLAT_ARM_NVM_BASE V2M_FLASH0_BASE +#define PLAT_ARM_NVM_SIZE (V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE) + +#endif /* SGI_SOC_CSS_DEF_H */ diff --git a/plat/arm/css/sgi/include/sgi_soc_css_def_v2.h b/plat/arm/css/sgi/include/sgi_soc_css_def_v2.h new file mode 100644 index 0000000..acf31eb --- /dev/null +++ b/plat/arm/css/sgi/include/sgi_soc_css_def_v2.h @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SGI_SOC_CSS_DEF_V2_H +#define SGI_SOC_CSS_DEF_V2_H + +#include <lib/utils_def.h> +#include <plat/common/common_def.h> + +/* + * Definitions common to all ARM CSS SoCs + */ + +/* Following covers ARM CSS SoC Peripherals */ + +#define SOC_SYSTEM_PERIPH_BASE UL(0x0C000000) +#define SOC_SYSTEM_PERIPH_SIZE UL(0x02000000) + +#define SOC_PLATFORM_PERIPH_BASE UL(0x0E000000) +#define SOC_PLATFORM_PERIPH_SIZE UL(0x02000000) + +#define SOC_CSS_PCIE_CONTROL_BASE UL(0x0ef20000) + +/* Memory controller */ +#define SOC_MEMCNTRL_BASE UL(0x10000000) +#define SOC_MEMCNTRL_SIZE UL(0x10000000) + +/* SoC NIC-400 Global Programmers View (GPV) */ +#define SOC_CSS_NIC400_BASE UL(0x0ED00000) + +#define SOC_CSS_NIC400_USB_EHCI U(0) +#define SOC_CSS_NIC400_TLX_MASTER U(1) +#define SOC_CSS_NIC400_USB_OHCI U(2) +#define SOC_CSS_NIC400_PL354_SMC U(3) +/* + * The apb4_bridge controls access to: + * - the PCIe configuration registers + * - the MMU units for USB, HDLCD and DMA + */ +#define SOC_CSS_NIC400_APB4_BRIDGE U(4) + +/* Non-volatile counters */ +#define SOC_TRUSTED_NVCTR_BASE UL(0x0EE70000) +#define TFW_NVCTR_BASE (SOC_TRUSTED_NVCTR_BASE + 0x0000) +#define TFW_NVCTR_SIZE U(4) +#define NTFW_CTR_BASE (SOC_TRUSTED_NVCTR_BASE + 0x0004) +#define NTFW_CTR_SIZE U(4) + +/* Keys */ +#define SOC_KEYS_BASE UL(0x0EE80000) +#define TZ_PUB_KEY_HASH_BASE (SOC_KEYS_BASE + 0x0000) +#define TZ_PUB_KEY_HASH_SIZE U(32) +#define HU_KEY_BASE (SOC_KEYS_BASE + 0x0020) +#define HU_KEY_SIZE U(16) +#define END_KEY_BASE (SOC_KEYS_BASE + 0x0044) +#define END_KEY_SIZE U(32) + +#define SOC_PLATFORM_PERIPH_MAP_DEVICE MAP_REGION_FLAT( \ + SOC_PLATFORM_PERIPH_BASE, \ + SOC_PLATFORM_PERIPH_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE) + +#if SPM_MM +/* + * Memory map definition for the platform peripheral memory region that is + * accessible from S-EL0 (with secure user mode access). + */ +#define SOC_PLATFORM_PERIPH_MAP_DEVICE_USER \ + MAP_REGION_FLAT( \ + SOC_PLATFORM_PERIPH_BASE, \ + SOC_PLATFORM_PERIPH_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE | MT_USER) +#endif + +#define SOC_SYSTEM_PERIPH_MAP_DEVICE MAP_REGION_FLAT( \ + SOC_SYSTEM_PERIPH_BASE, \ + SOC_SYSTEM_PERIPH_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE) + +#define SOC_MEMCNTRL_MAP_DEVICE MAP_REGION_FLAT( \ + SOC_MEMCNTRL_BASE, \ + SOC_MEMCNTRL_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE) + +#define SOC_MEMCNTRL_MAP_DEVICE_REMOTE_CHIP(n) \ + MAP_REGION_FLAT( \ + CSS_SGI_REMOTE_CHIP_MEM_OFFSET(n) + SOC_MEMCNTRL_BASE, \ + SOC_MEMCNTRL_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE) + +/* + * The bootsec_bridge controls access to a bunch of peripherals, e.g. the UARTs. + */ +#define SOC_CSS_NIC400_BOOTSEC_BRIDGE U(5) +#define SOC_CSS_NIC400_BOOTSEC_BRIDGE_UART1 UL(1 << 12) + +/* + * Required platform porting definitions common to all ARM CSS SoCs + */ +/* 2MB used for SCP DDR retraining */ +#define PLAT_ARM_SCP_TZC_DRAM1_SIZE UL(0x00200000) + +/* V2M motherboard system registers & offsets */ +#define V2M_SYSREGS_BASE UL(0x0C010000) +#define V2M_SYS_LED U(0x8) + +/* + * V2M sysled bit definitions. The values written to this + * register are defined in arch.h & runtime_svc.h. Only + * used by the primary cpu to diagnose any cold boot issues. + * + * SYS_LED[0] - Security state (S=0/NS=1) + * SYS_LED[2:1] - Exception Level (EL3-EL0) + * SYS_LED[7:3] - Exception Class (Sync/Async & origin) + * + */ +#define V2M_SYS_LED_SS_SHIFT U(0) +#define V2M_SYS_LED_EL_SHIFT U(1) +#define V2M_SYS_LED_EC_SHIFT U(3) + +#define V2M_SYS_LED_SS_MASK U(0x01) +#define V2M_SYS_LED_EL_MASK U(0x03) +#define V2M_SYS_LED_EC_MASK U(0x1f) + +/* NOR Flash */ +#define V2M_FLASH0_BASE UL(0x08000000) +#define V2M_FLASH0_SIZE UL(0x04000000) +#define V2M_FLASH_BLOCK_SIZE UL(0x00040000) /* 256 KB */ + +/* + * The flash can be mapped either as read-only or read-write. + * + * If it is read-write then it should also be mapped as device memory because + * NOR flash programming involves sending a fixed, ordered sequence of commands. + * + * If it is read-only then it should also be mapped as: + * - Normal memory, because reading from NOR flash is transparent, it is like + * reading from RAM. + * - Non-executable by default. If some parts of the flash need to be executable + * then platform code is responsible for re-mapping the appropriate portion + * of it as executable. + */ +#define V2M_MAP_FLASH0_RW MAP_REGION_FLAT(V2M_FLASH0_BASE,\ + V2M_FLASH0_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE) + +#define V2M_MAP_FLASH0_RO MAP_REGION_FLAT(V2M_FLASH0_BASE,\ + V2M_FLASH0_SIZE, \ + MT_RO_DATA | MT_SECURE) + +#define SGI_MAP_FLASH0_RO MAP_REGION_FLAT(V2M_FLASH0_BASE,\ + V2M_FLASH0_SIZE, \ + MT_DEVICE | MT_RO | MT_SECURE) + +/* Platform ID address */ +#define BOARD_CSS_PLAT_ID_REG_ADDR UL(0x0EFE00E0) + +/* Platform ID related accessors */ +#define BOARD_CSS_PLAT_ID_REG_ID_MASK U(0x0F) +#define BOARD_CSS_PLAT_ID_REG_ID_SHIFT U(0x00) +#define BOARD_CSS_PLAT_ID_REG_VERSION_MASK U(0xF00) +#define BOARD_CSS_PLAT_ID_REG_VERSION_SHIFT U(0x08) +#define BOARD_CSS_PLAT_TYPE_RTL U(0x00) +#define BOARD_CSS_PLAT_TYPE_FPGA U(0x01) +#define BOARD_CSS_PLAT_TYPE_EMULATOR U(0x02) +#define BOARD_CSS_PLAT_TYPE_FVP U(0x03) + +#ifndef __ASSEMBLER__ + +#include <lib/mmio.h> + +#define BOARD_CSS_GET_PLAT_TYPE(addr) \ + ((mmio_read_32(addr) & BOARD_CSS_PLAT_ID_REG_ID_MASK) \ + >> BOARD_CSS_PLAT_ID_REG_ID_SHIFT) + +#endif /* __ASSEMBLER__ */ + + +#define MAX_IO_DEVICES U(3) +#define MAX_IO_HANDLES U(4) + +/* Reserve the last block of flash for PSCI MEM PROTECT flag */ +#define PLAT_ARM_FLASH_IMAGE_BASE V2M_FLASH0_BASE +#define PLAT_ARM_FLASH_IMAGE_MAX_SIZE (V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE) + +#if ARM_GPT_SUPPORT +/* + * Offset of the FIP in the GPT image. BL1 component uses this option + * as it does not load the partition table to get the FIP base + * address. At sector 34 by default (i.e. after reserved sectors 0-33) + * Offset = 34 * 512(sector size) = 17408 i.e. 0x4400 + */ +#define PLAT_ARM_FIP_OFFSET_IN_GPT 0x4400 +#endif /* ARM_GPT_SUPPORT */ + +#define PLAT_ARM_NVM_BASE V2M_FLASH0_BASE +#define PLAT_ARM_NVM_SIZE (V2M_FLASH0_SIZE - V2M_FLASH_BLOCK_SIZE) + +#endif /* SGI_SOC_CSS_DEF_V2_H */ diff --git a/plat/arm/css/sgi/include/sgi_soc_platform_def.h b/plat/arm/css/sgi/include/sgi_soc_platform_def.h new file mode 100644 index 0000000..3b8d9c6 --- /dev/null +++ b/plat/arm/css/sgi/include/sgi_soc_platform_def.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SGI_SOC_PLATFORM_DEF_H +#define SGI_SOC_PLATFORM_DEF_H + +#include <plat/arm/board/common/v2m_def.h> +#include <plat/arm/soc/common/soc_css_def.h> +#include <sgi_base_platform_def.h> +#include <sgi_soc_css_def.h> + +/* Map the System registers to access from S-EL0 */ +#define CSS_SYSTEMREG_DEVICE_BASE (0x1C010000) +#define CSS_SYSTEMREG_DEVICE_SIZE (0x00010000) +#define PLAT_ARM_SECURE_MAP_SYSTEMREG MAP_REGION_FLAT( \ + CSS_SYSTEMREG_DEVICE_BASE, \ + CSS_SYSTEMREG_DEVICE_SIZE, \ + (MT_DEVICE | MT_RW | \ + MT_SECURE | MT_USER)) + +/* Map the NOR2 Flash to access from S-EL0 */ +#define CSS_NOR2_FLASH_DEVICE_BASE (0x10000000) +#define CSS_NOR2_FLASH_DEVICE_SIZE (0x04000000) +#define PLAT_ARM_SECURE_MAP_NOR2 MAP_REGION_FLAT( \ + CSS_NOR2_FLASH_DEVICE_BASE, \ + CSS_NOR2_FLASH_DEVICE_SIZE, \ + (MT_DEVICE | MT_RW | \ + MT_SECURE | MT_USER)) + +#endif /* SGI_SOC_PLATFORM_DEF_H */ diff --git a/plat/arm/css/sgi/include/sgi_soc_platform_def_v2.h b/plat/arm/css/sgi/include/sgi_soc_platform_def_v2.h new file mode 100644 index 0000000..20dd682 --- /dev/null +++ b/plat/arm/css/sgi/include/sgi_soc_platform_def_v2.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SGI_SOC_PLATFORM_DEF_V2_H +#define SGI_SOC_PLATFORM_DEF_V2_H + +#include <sgi_base_platform_def.h> +#include <sgi_soc_css_def_v2.h> + +/* Map the System registers to access from S-EL0 */ +#define CSS_SYSTEMREG_DEVICE_BASE (0x0C010000) +#define CSS_SYSTEMREG_DEVICE_SIZE (0x00010000) +#define PLAT_ARM_SECURE_MAP_SYSTEMREG MAP_REGION_FLAT( \ + CSS_SYSTEMREG_DEVICE_BASE, \ + CSS_SYSTEMREG_DEVICE_SIZE, \ + (MT_DEVICE | MT_RW | \ + MT_SECURE | MT_USER)) + +/* Map the NOR2 Flash to access from S-EL0 */ +#define CSS_NOR2_FLASH_DEVICE_BASE (0x001054000000) +#define CSS_NOR2_FLASH_DEVICE_SIZE (0x000004000000) +#define PLAT_ARM_SECURE_MAP_NOR2 MAP_REGION_FLAT( \ + CSS_NOR2_FLASH_DEVICE_BASE, \ + CSS_NOR2_FLASH_DEVICE_SIZE, \ + (MT_DEVICE | MT_RW | \ + MT_SECURE | MT_USER)) + +#endif /* SGI_SOC_PLATFORM_DEF_V2_H */ diff --git a/plat/arm/css/sgi/include/sgi_variant.h b/plat/arm/css/sgi/include/sgi_variant.h new file mode 100644 index 0000000..223ac3e --- /dev/null +++ b/plat/arm/css/sgi/include/sgi_variant.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SGI_VARIANT_H +#define SGI_VARIANT_H + +/* SSC_VERSION values for SGI575 */ +#define SGI575_SSC_VER_PART_NUM 0x0783 + +/* SID Version values for RD-N1E1-Edge */ +#define RD_N1E1_EDGE_SID_VER_PART_NUM 0x0786 +#define RD_E1_EDGE_CONFIG_ID 0x2 + +/* SID Version values for RD-V1 */ +#define RD_V1_SID_VER_PART_NUM 0x078a + +/* SID Version values for RD-N2 */ +#define RD_N2_SID_VER_PART_NUM 0x07B7 + +/* SID Version values for RD-N2 variants */ +#define RD_N2_CFG1_SID_VER_PART_NUM 0x07B6 + +/* SID Version values for RD-V2 */ +#define RD_V2_SID_VER_PART_NUM 0x07F2 +#define RD_V2_CONFIG_ID 0x1 + +/* Structure containing SGI platform variant information */ +typedef struct sgi_platform_info { + unsigned int platform_id; /* Part Number of the platform */ + unsigned int config_id; /* Config Id of the platform */ + unsigned int chip_id; /* Chip Id or Node number */ + unsigned int multi_chip_mode; /* Multi-chip mode availability */ +} sgi_platform_info_t; + +extern sgi_platform_info_t sgi_plat_info; + +/* returns the part number of the platform*/ +unsigned int plat_arm_sgi_get_platform_id(void); + +/* returns the configuration id of the platform */ +unsigned int plat_arm_sgi_get_config_id(void); + +/* returns true if operating in multi-chip configuration */ +unsigned int plat_arm_sgi_get_multi_chip_mode(void); + +#endif /* SGI_VARIANT_H */ diff --git a/plat/arm/css/sgi/sgi-common.mk b/plat/arm/css/sgi/sgi-common.mk new file mode 100644 index 0000000..282a5f0 --- /dev/null +++ b/plat/arm/css/sgi/sgi-common.mk @@ -0,0 +1,86 @@ +# +# Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +CSS_USE_SCMI_SDS_DRIVER := 1 + +CSS_ENT_BASE := plat/arm/css/sgi + +RAS_EXTENSION := 0 + +SDEI_SUPPORT := 0 + +EL3_EXCEPTION_HANDLING := 0 + +HANDLE_EA_EL3_FIRST_NS := 0 + +CSS_SGI_CHIP_COUNT := 1 + +CSS_SGI_PLATFORM_VARIANT := 0 + +# Do not enable SVE +ENABLE_SVE_FOR_NS := 0 + +CTX_INCLUDE_FPREGS := 1 + +INTERCONNECT_SOURCES := ${CSS_ENT_BASE}/sgi_interconnect.c + +PLAT_INCLUDES += -I${CSS_ENT_BASE}/include + +# GIC-600 configuration +GICV3_SUPPORT_GIC600 := 1 + +# Include GICv3 driver files +include drivers/arm/gic/v3/gicv3.mk + +ENT_GIC_SOURCES := ${GICV3_SOURCES} \ + plat/common/plat_gicv3.c \ + plat/arm/common/arm_gicv3.c + +PLAT_BL_COMMON_SOURCES += ${CSS_ENT_BASE}/aarch64/sgi_helper.S + +BL1_SOURCES += ${INTERCONNECT_SOURCES} \ + drivers/arm/sbsa/sbsa.c + +BL2_SOURCES += ${CSS_ENT_BASE}/sgi_image_load.c \ + drivers/arm/css/sds/sds.c + +BL31_SOURCES += ${INTERCONNECT_SOURCES} \ + ${ENT_GIC_SOURCES} \ + ${CSS_ENT_BASE}/sgi_bl31_setup.c \ + ${CSS_ENT_BASE}/sgi_topology.c + +ifeq (${RAS_EXTENSION},1) +BL31_SOURCES += ${CSS_ENT_BASE}/sgi_ras.c +endif + +ifneq (${RESET_TO_BL31},0) + $(error "Using BL31 as the reset vector is not supported on ${PLAT} platform. \ + Please set RESET_TO_BL31 to 0.") +endif + +$(eval $(call add_define,SGI_PLAT)) + +$(eval $(call add_define,CSS_SGI_CHIP_COUNT)) + +$(eval $(call add_define,CSS_SGI_PLATFORM_VARIANT)) + +override CSS_LOAD_SCP_IMAGES := 0 +override NEED_BL2U := no +override ARM_PLAT_MT := 1 +override PSCI_EXTENDED_STATE_ID := 1 +override ARM_RECOM_STATE_ID_ENC := 1 + +# System coherency is managed in hardware +HW_ASSISTED_COHERENCY := 1 + +# When building for systems with hardware-assisted coherency, there's no need to +# use USE_COHERENT_MEM. Require that USE_COHERENT_MEM must be set to 0 too. +USE_COHERENT_MEM := 0 + +include plat/arm/common/arm_common.mk +include plat/arm/css/common/css_common.mk +include plat/arm/soc/common/soc_css.mk +include plat/arm/board/common/board_common.mk diff --git a/plat/arm/css/sgi/sgi_bl31_setup.c b/plat/arm/css/sgi/sgi_bl31_setup.c new file mode 100644 index 0000000..27cf183 --- /dev/null +++ b/plat/arm/css/sgi/sgi_bl31_setup.c @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <libfdt.h> + +#include <common/bl_common.h> +#include <common/debug.h> +#include <drivers/arm/css/css_mhu_doorbell.h> +#include <drivers/arm/css/scmi.h> +#include <plat/arm/common/plat_arm.h> + +#include <plat/common/platform.h> + +#include <plat/arm/css/common/css_pm.h> + +#include <sgi_ras.h> +#include <sgi_variant.h> + +sgi_platform_info_t sgi_plat_info; + +static scmi_channel_plat_info_t sgi575_scmi_plat_info = { + .scmi_mbx_mem = CSS_SCMI_PAYLOAD_BASE, + .db_reg_addr = PLAT_CSS_MHU_BASE + CSS_SCMI_MHU_DB_REG_OFF, + .db_preserve_mask = 0xfffffffe, + .db_modify_mask = 0x1, + .ring_doorbell = &mhu_ring_doorbell, +}; + +static scmi_channel_plat_info_t plat_rd_scmi_info[] = { + { + .scmi_mbx_mem = CSS_SCMI_PAYLOAD_BASE, + .db_reg_addr = PLAT_CSS_MHU_BASE + SENDER_REG_SET(0), + .db_preserve_mask = 0xfffffffe, + .db_modify_mask = 0x1, + .ring_doorbell = &mhuv2_ring_doorbell, + }, + #if (CSS_SGI_CHIP_COUNT > 1) + { + .scmi_mbx_mem = CSS_SCMI_PAYLOAD_BASE + + CSS_SGI_REMOTE_CHIP_MEM_OFFSET(1), + .db_reg_addr = PLAT_CSS_MHU_BASE + + CSS_SGI_REMOTE_CHIP_MEM_OFFSET(1) + SENDER_REG_SET(0), + .db_preserve_mask = 0xfffffffe, + .db_modify_mask = 0x1, + .ring_doorbell = &mhuv2_ring_doorbell, + }, + #endif + #if (CSS_SGI_CHIP_COUNT > 2) + { + .scmi_mbx_mem = CSS_SCMI_PAYLOAD_BASE + + CSS_SGI_REMOTE_CHIP_MEM_OFFSET(2), + .db_reg_addr = PLAT_CSS_MHU_BASE + + CSS_SGI_REMOTE_CHIP_MEM_OFFSET(2) + SENDER_REG_SET(0), + .db_preserve_mask = 0xfffffffe, + .db_modify_mask = 0x1, + .ring_doorbell = &mhuv2_ring_doorbell, + }, + #endif + #if (CSS_SGI_CHIP_COUNT > 3) + { + .scmi_mbx_mem = CSS_SCMI_PAYLOAD_BASE + + CSS_SGI_REMOTE_CHIP_MEM_OFFSET(3), + .db_reg_addr = PLAT_CSS_MHU_BASE + + CSS_SGI_REMOTE_CHIP_MEM_OFFSET(3) + SENDER_REG_SET(0), + .db_preserve_mask = 0xfffffffe, + .db_modify_mask = 0x1, + .ring_doorbell = &mhuv2_ring_doorbell, + }, + #endif +}; + +scmi_channel_plat_info_t *plat_css_get_scmi_info(int channel_id) +{ + if (sgi_plat_info.platform_id == RD_N1E1_EDGE_SID_VER_PART_NUM || + sgi_plat_info.platform_id == RD_V1_SID_VER_PART_NUM || + sgi_plat_info.platform_id == RD_N2_SID_VER_PART_NUM || + sgi_plat_info.platform_id == RD_V2_SID_VER_PART_NUM || + sgi_plat_info.platform_id == RD_N2_CFG1_SID_VER_PART_NUM) { + if (channel_id >= ARRAY_SIZE(plat_rd_scmi_info)) + panic(); + return &plat_rd_scmi_info[channel_id]; + } + else if (sgi_plat_info.platform_id == SGI575_SSC_VER_PART_NUM) + return &sgi575_scmi_plat_info; + else + panic(); +} + +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ + sgi_plat_info.platform_id = plat_arm_sgi_get_platform_id(); + sgi_plat_info.config_id = plat_arm_sgi_get_config_id(); + sgi_plat_info.multi_chip_mode = plat_arm_sgi_get_multi_chip_mode(); + + arm_bl31_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3); +} + +void sgi_bl31_common_platform_setup(void) +{ + arm_bl31_platform_setup(); + +#if RAS_EXTENSION + sgi_ras_intr_handler_setup(); +#endif + + /* Configure the warm reboot SGI for primary core */ + css_setup_cpu_pwr_down_intr(); + +#if CSS_SYSTEM_GRACEFUL_RESET + /* Register priority level handlers for reboot */ + ehf_register_priority_handler(PLAT_REBOOT_PRI, + css_reboot_interrupt_handler); +#endif +} + +const plat_psci_ops_t *plat_arm_psci_override_pm_ops(plat_psci_ops_t *ops) +{ + /* + * For RD-E1-Edge, only CPU power ON/OFF, PSCI platform callbacks are + * supported. + */ + if (((sgi_plat_info.platform_id == RD_N1E1_EDGE_SID_VER_PART_NUM) && + (sgi_plat_info.config_id == RD_E1_EDGE_CONFIG_ID))) { + ops->cpu_standby = NULL; + ops->system_off = NULL; + ops->system_reset = NULL; + ops->get_sys_suspend_power_state = NULL; + ops->pwr_domain_suspend = NULL; + ops->pwr_domain_suspend_finish = NULL; + } + + return css_scmi_override_pm_ops(ops); +} diff --git a/plat/arm/css/sgi/sgi_image_load.c b/plat/arm/css/sgi/sgi_image_load.c new file mode 100644 index 0000000..ac4bfd2 --- /dev/null +++ b/plat/arm/css/sgi/sgi_image_load.c @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <libfdt.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <common/desc_image_load.h> +#include <drivers/arm/css/sds.h> +#include <plat/arm/common/plat_arm.h> +#include <plat/common/platform.h> + +#include <platform_def.h> +#include <sgi_base_platform_def.h> +#include <sgi_variant.h> + +/* + * Information about the isolated CPUs obtained from SDS. + */ +struct isolated_cpu_mpid_list { + uint64_t num_entries; /* Number of entries in the list */ + uint64_t mpid_list[PLATFORM_CORE_COUNT]; /* List of isolated CPU MPIDs */ +}; + +/* Function to read isolated CPU MPID list from SDS. */ +void plat_arm_sgi_get_isolated_cpu_list(struct isolated_cpu_mpid_list *list) +{ + int ret; + + ret = sds_init(); + if (ret != SDS_OK) { + ERROR("SDS initialization failed, error: %d\n", ret); + panic(); + } + + ret = sds_struct_read(SDS_ISOLATED_CPU_LIST_ID, 0, &list->num_entries, + sizeof(list->num_entries), SDS_ACCESS_MODE_CACHED); + if (ret != SDS_OK) { + INFO("SDS CPU num elements read failed, error: %d\n", ret); + list->num_entries = 0; + return; + } + + if (list->num_entries > PLATFORM_CORE_COUNT) { + ERROR("Isolated CPU list count %ld greater than max" + " number supported %d\n", + list->num_entries, PLATFORM_CORE_COUNT); + panic(); + } else if (list->num_entries == 0) { + INFO("SDS isolated CPU list is empty\n"); + return; + } + + ret = sds_struct_read(SDS_ISOLATED_CPU_LIST_ID, + sizeof(list->num_entries), + &list->mpid_list, + sizeof(list->mpid_list[0]) * list->num_entries, + SDS_ACCESS_MODE_CACHED); + if (ret != SDS_OK) { + ERROR("SDS CPU list read failed. error: %d\n", ret); + panic(); + } +} + +/******************************************************************************* + * This function inserts Platform information via device tree nodes as, + * system-id { + * platform-id = <0>; + * config-id = <0>; + * isolated-cpu-list = <0> + * } + ******************************************************************************/ +static int plat_sgi_append_config_node(void) +{ + bl_mem_params_node_t *mem_params; + void *fdt; + int nodeoffset, err; + unsigned int platid = 0, platcfg = 0; + struct isolated_cpu_mpid_list cpu_mpid_list = {0}; + + mem_params = get_bl_mem_params_node(NT_FW_CONFIG_ID); + if (mem_params == NULL) { + ERROR("NT_FW CONFIG base address is NULL"); + return -1; + } + + fdt = (void *)(mem_params->image_info.image_base); + + /* Check the validity of the fdt */ + if (fdt_check_header(fdt) != 0) { + ERROR("Invalid NT_FW_CONFIG DTB passed\n"); + return -1; + } + + nodeoffset = fdt_subnode_offset(fdt, 0, "system-id"); + if (nodeoffset < 0) { + ERROR("Failed to get system-id node offset\n"); + return -1; + } + + platid = plat_arm_sgi_get_platform_id(); + err = fdt_setprop_u32(fdt, nodeoffset, "platform-id", platid); + if (err < 0) { + ERROR("Failed to set platform-id\n"); + return -1; + } + + platcfg = plat_arm_sgi_get_config_id(); + err = fdt_setprop_u32(fdt, nodeoffset, "config-id", platcfg); + if (err < 0) { + ERROR("Failed to set config-id\n"); + return -1; + } + + platcfg = plat_arm_sgi_get_multi_chip_mode(); + err = fdt_setprop_u32(fdt, nodeoffset, "multi-chip-mode", platcfg); + if (err < 0) { + ERROR("Failed to set multi-chip-mode\n"); + return -1; + } + + plat_arm_sgi_get_isolated_cpu_list(&cpu_mpid_list); + if (cpu_mpid_list.num_entries > 0) { + err = fdt_setprop(fdt, nodeoffset, "isolated-cpu-list", + &cpu_mpid_list, + (sizeof(cpu_mpid_list.num_entries) * + (cpu_mpid_list.num_entries + 1))); + if (err < 0) { + ERROR("Failed to set isolated-cpu-list, error: %d\n", + err); + } + } + + flush_dcache_range((uintptr_t)fdt, mem_params->image_info.image_size); + + return 0; +} + +/******************************************************************************* + * This function returns the list of executable images. + ******************************************************************************/ +bl_params_t *plat_get_next_bl_params(void) +{ + int ret; + + ret = plat_sgi_append_config_node(); + if (ret != 0) + panic(); + + return arm_get_next_bl_params(); +} + diff --git a/plat/arm/css/sgi/sgi_interconnect.c b/plat/arm/css/sgi/sgi_interconnect.c new file mode 100644 index 0000000..e9cd812 --- /dev/null +++ b/plat/arm/css/sgi/sgi_interconnect.c @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch_helpers.h> +#include <common/debug.h> +#include <plat/arm/common/plat_arm.h> + +/* + * For SGI575 which support FCM (with automatic interconnect enter/exit), + * we should not do anything in these interface functions. + * They are used to override the weak functions in cci drivers. + */ + +/****************************************************************************** + * Helper function to initialize ARM interconnect driver. + *****************************************************************************/ +void __init plat_arm_interconnect_init(void) +{ +} + +/****************************************************************************** + * Helper function to place current master into coherency + *****************************************************************************/ +void plat_arm_interconnect_enter_coherency(void) +{ +} + +/****************************************************************************** + * Helper function to remove current master from coherency + *****************************************************************************/ +void plat_arm_interconnect_exit_coherency(void) +{ +} diff --git a/plat/arm/css/sgi/sgi_plat.c b/plat/arm/css/sgi/sgi_plat.c new file mode 100644 index 0000000..a0199c3 --- /dev/null +++ b/plat/arm/css/sgi/sgi_plat.c @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <platform_def.h> + +#include <common/bl_common.h> +#include <common/debug.h> +#include <drivers/arm/ccn.h> +#include <plat/arm/common/plat_arm.h> +#include <plat/common/platform.h> +#include <drivers/arm/sbsa.h> +#include <sgi_base_platform_def.h> + +#if SPM_MM +#include <services/spm_mm_partition.h> +#endif + +#define SGI_MAP_FLASH0_RO MAP_REGION_FLAT(V2M_FLASH0_BASE,\ + V2M_FLASH0_SIZE, \ + MT_DEVICE | MT_RO | MT_SECURE) +/* + * Table of regions for different BL stages to map using the MMU. + * This doesn't include Trusted RAM as the 'mem_layout' argument passed to + * arm_configure_mmu_elx() will give the available subset of that. + * + * Replace or extend the below regions as required + */ +#if IMAGE_BL1 +const mmap_region_t plat_arm_mmap[] = { + ARM_MAP_SHARED_RAM, + SGI_MAP_FLASH0_RO, + CSS_SGI_MAP_DEVICE, + SOC_CSS_MAP_DEVICE, + {0} +}; +#endif +#if IMAGE_BL2 +const mmap_region_t plat_arm_mmap[] = { + ARM_MAP_SHARED_RAM, + SGI_MAP_FLASH0_RO, +#ifdef PLAT_ARM_MEM_PROT_ADDR + ARM_V2M_MAP_MEM_PROTECT, +#endif + CSS_SGI_MAP_DEVICE, + SOC_CSS_MAP_DEVICE, + ARM_MAP_NS_DRAM1, +#if CSS_SGI_CHIP_COUNT > 1 + CSS_SGI_MAP_DEVICE_REMOTE_CHIP(1), +#endif +#if CSS_SGI_CHIP_COUNT > 2 + CSS_SGI_MAP_DEVICE_REMOTE_CHIP(2), +#endif +#if CSS_SGI_CHIP_COUNT > 3 + CSS_SGI_MAP_DEVICE_REMOTE_CHIP(3), +#endif +#if ARM_BL31_IN_DRAM + ARM_MAP_BL31_SEC_DRAM, +#endif +#if SPM_MM + ARM_SP_IMAGE_MMAP, +#endif +#if TRUSTED_BOARD_BOOT && !BL2_AT_EL3 + ARM_MAP_BL1_RW, +#endif + {0} +}; +#endif +#if IMAGE_BL31 +const mmap_region_t plat_arm_mmap[] = { + ARM_MAP_SHARED_RAM, + V2M_MAP_IOFPGA, + CSS_SGI_MAP_DEVICE, +#ifdef PLAT_ARM_MEM_PROT_ADDR + ARM_V2M_MAP_MEM_PROTECT, +#endif + SOC_CSS_MAP_DEVICE, +#if SPM_MM + ARM_SPM_BUF_EL3_MMAP, +#endif + {0} +}; + +#if SPM_MM && defined(IMAGE_BL31) +const mmap_region_t plat_arm_secure_partition_mmap[] = { + PLAT_ARM_SECURE_MAP_SYSTEMREG, + PLAT_ARM_SECURE_MAP_NOR2, + SOC_PLATFORM_SECURE_UART, + PLAT_ARM_SECURE_MAP_DEVICE, + ARM_SP_IMAGE_MMAP, + ARM_SP_IMAGE_NS_BUF_MMAP, +#if RAS_EXTENSION + CSS_SGI_SP_CPER_BUF_MMAP, +#endif + ARM_SP_IMAGE_RW_MMAP, + ARM_SPM_BUF_EL0_MMAP, + {0} +}; +#endif /* SPM_MM && defined(IMAGE_BL31) */ +#endif + +ARM_CASSERT_MMAP + +#if SPM_MM && defined(IMAGE_BL31) +/* + * Boot information passed to a secure partition during initialisation. Linear + * indices in MP information will be filled at runtime. + */ +static spm_mm_mp_info_t sp_mp_info[] = { + [0] = {0x81000000, 0}, + [1] = {0x81000100, 0}, + [2] = {0x81000200, 0}, + [3] = {0x81000300, 0}, + [4] = {0x81010000, 0}, + [5] = {0x81010100, 0}, + [6] = {0x81010200, 0}, + [7] = {0x81010300, 0}, +}; + +const spm_mm_boot_info_t plat_arm_secure_partition_boot_info = { + .h.type = PARAM_SP_IMAGE_BOOT_INFO, + .h.version = VERSION_1, + .h.size = sizeof(spm_mm_boot_info_t), + .h.attr = 0, + .sp_mem_base = ARM_SP_IMAGE_BASE, + .sp_mem_limit = ARM_SP_IMAGE_LIMIT, + .sp_image_base = ARM_SP_IMAGE_BASE, + .sp_stack_base = PLAT_SP_IMAGE_STACK_BASE, + .sp_heap_base = ARM_SP_IMAGE_HEAP_BASE, + .sp_ns_comm_buf_base = PLAT_SP_IMAGE_NS_BUF_BASE, + .sp_shared_buf_base = PLAT_SPM_BUF_BASE, + .sp_image_size = ARM_SP_IMAGE_SIZE, + .sp_pcpu_stack_size = PLAT_SP_IMAGE_STACK_PCPU_SIZE, + .sp_heap_size = ARM_SP_IMAGE_HEAP_SIZE, + .sp_ns_comm_buf_size = PLAT_SP_IMAGE_NS_BUF_SIZE, + .sp_shared_buf_size = PLAT_SPM_BUF_SIZE, + .num_sp_mem_regions = ARM_SP_IMAGE_NUM_MEM_REGIONS, + .num_cpus = PLATFORM_CORE_COUNT, + .mp_info = &sp_mp_info[0], +}; + +const struct mmap_region *plat_get_secure_partition_mmap(void *cookie) +{ + return plat_arm_secure_partition_mmap; +} + +const struct spm_mm_boot_info *plat_get_secure_partition_boot_info( + void *cookie) +{ + return &plat_arm_secure_partition_boot_info; +} +#endif /* SPM_MM && defined(IMAGE_BL31) */ + +#if TRUSTED_BOARD_BOOT +int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size) +{ + assert(heap_addr != NULL); + assert(heap_size != NULL); + + return arm_get_mbedtls_heap(heap_addr, heap_size); +} +#endif + +void plat_arm_secure_wdt_start(void) +{ + sbsa_wdog_start(SBSA_SECURE_WDOG_BASE, SBSA_SECURE_WDOG_TIMEOUT); +} + +void plat_arm_secure_wdt_stop(void) +{ + sbsa_wdog_stop(SBSA_SECURE_WDOG_BASE); +} diff --git a/plat/arm/css/sgi/sgi_plat_v2.c b/plat/arm/css/sgi/sgi_plat_v2.c new file mode 100644 index 0000000..cef5345 --- /dev/null +++ b/plat/arm/css/sgi/sgi_plat_v2.c @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <platform_def.h> + +#include <plat/arm/common/plat_arm.h> +#include <plat/common/platform.h> +#include <drivers/arm/sbsa.h> + +#if SPM_MM +#include <services/spm_mm_partition.h> +#endif + +/* + * Table of regions for different BL stages to map using the MMU. + */ +#if IMAGE_BL1 +const mmap_region_t plat_arm_mmap[] = { + ARM_MAP_SHARED_RAM, + SGI_MAP_FLASH0_RO, + CSS_SGI_MAP_DEVICE, + SOC_PLATFORM_PERIPH_MAP_DEVICE, + SOC_SYSTEM_PERIPH_MAP_DEVICE, + {0} +}; +#endif + +#if IMAGE_BL2 +const mmap_region_t plat_arm_mmap[] = { + ARM_MAP_SHARED_RAM, + SGI_MAP_FLASH0_RO, +#ifdef PLAT_ARM_MEM_PROT_ADDR + ARM_V2M_MAP_MEM_PROTECT, +#endif + CSS_SGI_MAP_DEVICE, + SOC_MEMCNTRL_MAP_DEVICE, + SOC_PLATFORM_PERIPH_MAP_DEVICE, + SOC_SYSTEM_PERIPH_MAP_DEVICE, + ARM_MAP_NS_DRAM1, +#if CSS_SGI_CHIP_COUNT > 1 + SOC_MEMCNTRL_MAP_DEVICE_REMOTE_CHIP(1), +#endif +#if CSS_SGI_CHIP_COUNT > 2 + SOC_MEMCNTRL_MAP_DEVICE_REMOTE_CHIP(2), +#endif +#if CSS_SGI_CHIP_COUNT > 3 + SOC_MEMCNTRL_MAP_DEVICE_REMOTE_CHIP(3), +#endif +#if ARM_BL31_IN_DRAM + ARM_MAP_BL31_SEC_DRAM, +#endif +#if SPM_MM + ARM_SP_IMAGE_MMAP, +#endif +#if TRUSTED_BOARD_BOOT && !BL2_AT_EL3 + ARM_MAP_BL1_RW, +#endif + {0} +}; +#endif + +#if IMAGE_BL31 +const mmap_region_t plat_arm_mmap[] = { + ARM_MAP_SHARED_RAM, +#ifdef PLAT_ARM_MEM_PROT_ADDR + ARM_V2M_MAP_MEM_PROTECT, +#endif + CSS_SGI_MAP_DEVICE, + SOC_PLATFORM_PERIPH_MAP_DEVICE, + SOC_SYSTEM_PERIPH_MAP_DEVICE, +#if SPM_MM + ARM_SPM_BUF_EL3_MMAP, +#endif + {0} +}; + +#if SPM_MM && defined(IMAGE_BL31) +const mmap_region_t plat_arm_secure_partition_mmap[] = { + PLAT_ARM_SECURE_MAP_SYSTEMREG, + PLAT_ARM_SECURE_MAP_NOR2, + SOC_PLATFORM_SECURE_UART, + SOC_PLATFORM_PERIPH_MAP_DEVICE_USER, + ARM_SP_IMAGE_MMAP, + ARM_SP_IMAGE_NS_BUF_MMAP, + ARM_SP_IMAGE_RW_MMAP, + ARM_SPM_BUF_EL0_MMAP, + {0} +}; +#endif /* SPM_MM && defined(IMAGE_BL31) */ +#endif + +ARM_CASSERT_MMAP + +#if SPM_MM && defined(IMAGE_BL31) +/* + * Boot information passed to a secure partition during initialisation. Linear + * indices in MP information will be filled at runtime. + */ +static spm_mm_mp_info_t sp_mp_info[] = { + [0] = {0x81000000, 0}, + [1] = {0x81010000, 0}, + [2] = {0x81020000, 0}, + [3] = {0x81030000, 0}, + [4] = {0x81040000, 0}, + [5] = {0x81050000, 0}, + [6] = {0x81060000, 0}, + [7] = {0x81070000, 0}, + [8] = {0x81080000, 0}, + [9] = {0x81090000, 0}, + [10] = {0x810a0000, 0}, + [11] = {0x810b0000, 0}, + [12] = {0x810c0000, 0}, + [13] = {0x810d0000, 0}, + [14] = {0x810e0000, 0}, + [15] = {0x810f0000, 0}, +}; + +const spm_mm_boot_info_t plat_arm_secure_partition_boot_info = { + .h.type = PARAM_SP_IMAGE_BOOT_INFO, + .h.version = VERSION_1, + .h.size = sizeof(spm_mm_boot_info_t), + .h.attr = 0, + .sp_mem_base = ARM_SP_IMAGE_BASE, + .sp_mem_limit = ARM_SP_IMAGE_LIMIT, + .sp_image_base = ARM_SP_IMAGE_BASE, + .sp_stack_base = PLAT_SP_IMAGE_STACK_BASE, + .sp_heap_base = ARM_SP_IMAGE_HEAP_BASE, + .sp_ns_comm_buf_base = PLAT_SP_IMAGE_NS_BUF_BASE, + .sp_shared_buf_base = PLAT_SPM_BUF_BASE, + .sp_image_size = ARM_SP_IMAGE_SIZE, + .sp_pcpu_stack_size = PLAT_SP_IMAGE_STACK_PCPU_SIZE, + .sp_heap_size = ARM_SP_IMAGE_HEAP_SIZE, + .sp_ns_comm_buf_size = PLAT_SP_IMAGE_NS_BUF_SIZE, + .sp_shared_buf_size = PLAT_SPM_BUF_SIZE, + .num_sp_mem_regions = ARM_SP_IMAGE_NUM_MEM_REGIONS, + .num_cpus = PLATFORM_CORE_COUNT, + .mp_info = &sp_mp_info[0], +}; + +const struct mmap_region *plat_get_secure_partition_mmap(void *cookie) +{ + return plat_arm_secure_partition_mmap; +} + +const struct spm_mm_boot_info *plat_get_secure_partition_boot_info( + void *cookie) +{ + return &plat_arm_secure_partition_boot_info; +} +#endif /* SPM_MM && defined(IMAGE_BL31) */ + +#if TRUSTED_BOARD_BOOT +int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size) +{ + assert(heap_addr != NULL); + assert(heap_size != NULL); + + return arm_get_mbedtls_heap(heap_addr, heap_size); +} +#endif + +void plat_arm_secure_wdt_start(void) +{ + sbsa_wdog_start(SBSA_SECURE_WDOG_BASE, SBSA_SECURE_WDOG_TIMEOUT); +} + +void plat_arm_secure_wdt_stop(void) +{ + sbsa_wdog_stop(SBSA_SECURE_WDOG_BASE); +} diff --git a/plat/arm/css/sgi/sgi_ras.c b/plat/arm/css/sgi/sgi_ras.c new file mode 100644 index 0000000..4f03ac4 --- /dev/null +++ b/plat/arm/css/sgi/sgi_ras.c @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <string.h> + +#include <bl31/interrupt_mgmt.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <lib/extensions/ras.h> +#include <plat/arm/common/arm_spm_def.h> +#include <plat/common/platform.h> +#include <services/sdei.h> +#include <services/spm_mm_svc.h> + +#include <sgi_ras.h> + +static int sgi_ras_intr_handler(const struct err_record_info *err_rec, + int probe_data, + const struct err_handler_data *const data); +typedef struct mm_communicate_header { + struct efi_guid header_guid; + size_t message_len; + uint8_t data[8]; +} mm_communicate_header_t; + +/* + * GUID to indicate that the MM communication message is intended for DMC-620 + * MM driver. + */ +const struct efi_guid dmc620_ecc_event_guid = { + 0x5ef0afd5, 0xe01a, 0x4c30, + {0x86, 0x19, 0x45, 0x46, 0x26, 0x91, 0x80, 0x98} +}; + +struct sgi_ras_ev_map sgi575_ras_map[] = { + + /* DMC 0 error ECC error interrupt*/ + {SGI_SDEI_DS_EVENT_0, 35}, + + /* DMC 1 error ECC error interrupt*/ + {SGI_SDEI_DS_EVENT_1, 39}, +}; + +#define SGI575_RAS_MAP_SIZE ARRAY_SIZE(sgi575_ras_map) + +struct err_record_info sgi_err_records[] = { + { + /* DMC 0 error record info */ + .handler = &sgi_ras_intr_handler, + .aux_data = (void *)0, + }, { + /* DMC 1 error record info */ + .handler = &sgi_ras_intr_handler, + .aux_data = (void *)1, + }, +}; + +struct ras_interrupt sgi_ras_interrupts[] = { + { + .intr_number = 35, + .err_record = &sgi_err_records[0], + }, { + .intr_number = 39, + .err_record = &sgi_err_records[1], + } +}; + +REGISTER_ERR_RECORD_INFO(sgi_err_records); +REGISTER_RAS_INTERRUPTS(sgi_ras_interrupts); + +static struct sgi_ras_ev_map *plat_sgi_get_ras_ev_map(void) +{ + return sgi575_ras_map; +} + +static int plat_sgi_get_ras_ev_map_size(void) +{ + return SGI575_RAS_MAP_SIZE; +} + +/* + * Find event mapping for a given interrupt number: On success, returns pointer + * to the event mapping. On error, returns NULL. + */ +static struct sgi_ras_ev_map *find_ras_event_map_by_intr(uint32_t intr_num) +{ + struct sgi_ras_ev_map *map = plat_sgi_get_ras_ev_map(); + int i; + int size = plat_sgi_get_ras_ev_map_size(); + + for (i = 0; i < size; i++) { + if (map->intr == intr_num) + return map; + + map++; + } + + return NULL; +} + +static void sgi_ras_intr_configure(int intr) +{ + plat_ic_set_interrupt_type(intr, INTR_TYPE_EL3); + plat_ic_set_interrupt_priority(intr, PLAT_RAS_PRI); + plat_ic_clear_interrupt_pending(intr); + plat_ic_set_spi_routing(intr, INTR_ROUTING_MODE_ANY, + (u_register_t)read_mpidr_el1()); + plat_ic_enable_interrupt(intr); +} + +static int sgi_ras_intr_handler(const struct err_record_info *err_rec, + int probe_data, + const struct err_handler_data *const data) +{ + struct sgi_ras_ev_map *ras_map; + mm_communicate_header_t *header; + uint32_t intr; + int ret; + + cm_el1_sysregs_context_save(NON_SECURE); + intr = data->interrupt; + + /* + * Find if this is a RAS interrupt. There must be an event against + * this interrupt + */ + ras_map = find_ras_event_map_by_intr(intr); + assert(ras_map != NULL); + + /* + * Populate the MM_COMMUNICATE payload to share the + * event info with StandaloneMM code. This allows us to use + * MM_COMMUNICATE as a common entry mechanism into S-EL0. The + * header data will be parsed in StandaloneMM to process the + * corresponding event. + * + * TBD - Currently, the buffer allocated by SPM for communication + * between EL3 and S-EL0 is being used(PLAT_SPM_BUF_BASE). But this + * should happen via a dynamic mem allocation, which should be + * managed by SPM -- the individual platforms then call the mem + * alloc api to get memory for the payload. + */ + header = (void *) PLAT_SPM_BUF_BASE; + memset(header, 0, sizeof(*header)); + memcpy(&header->data, &err_rec->aux_data, sizeof(err_rec->aux_data)); + header->message_len = sizeof(err_rec->aux_data); + memcpy(&header->header_guid, (void *) &dmc620_ecc_event_guid, + sizeof(const struct efi_guid)); + + spm_mm_sp_call(MM_COMMUNICATE_AARCH64, (uint64_t)header, 0, + plat_my_core_pos()); + + /* + * Do an EOI of the RAS interrupt. This allows the + * sdei event to be dispatched at the SDEI event's + * priority. + */ + plat_ic_end_of_interrupt(intr); + + /* Dispatch the event to the SDEI client */ + ret = sdei_dispatch_event(ras_map->sdei_ev_num); + if (ret != 0) { + /* + * sdei_dispatch_event() may return failing result in some cases, + * for example kernel may not have registered a handler or RAS event + * may happen early during boot. We restore the NS context when + * sdei_dispatch_event() returns failing result. + */ + ERROR("SDEI dispatch failed: %d", ret); + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); + } + + return ret; +} + +int sgi_ras_intr_handler_setup(void) +{ + int i; + struct sgi_ras_ev_map *map = plat_sgi_get_ras_ev_map(); + int size = plat_sgi_get_ras_ev_map_size(); + + for (i = 0; i < size; i++) { + sgi_ras_intr_configure(map->intr); + map++; + } + + INFO("SGI: RAS Interrupt Handler successfully registered\n"); + + return 0; +} diff --git a/plat/arm/css/sgi/sgi_topology.c b/plat/arm/css/sgi/sgi_topology.c new file mode 100644 index 0000000..1c3b5bf --- /dev/null +++ b/plat/arm/css/sgi/sgi_topology.c @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <plat/arm/common/plat_arm.h> + +/* + * Common topology related methods for SGI and RD based platforms + */ +/******************************************************************************* + * This function returns the core count within the cluster corresponding to + * `mpidr`. + ******************************************************************************/ +unsigned int plat_arm_get_cluster_core_count(u_register_t mpidr) +{ + return CSS_SGI_MAX_CPUS_PER_CLUSTER; +} + +#if ARM_PLAT_MT +/****************************************************************************** + * Return the number of PE's supported by the CPU. + *****************************************************************************/ +unsigned int plat_arm_get_cpu_pe_count(u_register_t mpidr) +{ + return CSS_SGI_MAX_PE_PER_CPU; +} +#endif |