diff options
Diffstat (limited to '')
-rw-r--r-- | lib/psci/aarch32/psci_helpers.S | 149 | ||||
-rw-r--r-- | lib/psci/aarch64/psci_helpers.S | 134 | ||||
-rw-r--r-- | lib/psci/aarch64/runtime_errata.S | 27 | ||||
-rw-r--r-- | lib/psci/psci_common.c | 1285 | ||||
-rw-r--r-- | lib/psci/psci_lib.mk | 36 | ||||
-rw-r--r-- | lib/psci/psci_main.c | 606 | ||||
-rw-r--r-- | lib/psci/psci_mem_protect.c | 41 | ||||
-rw-r--r-- | lib/psci/psci_off.c | 198 | ||||
-rw-r--r-- | lib/psci/psci_on.c | 220 | ||||
-rw-r--r-- | lib/psci/psci_private.h | 376 | ||||
-rw-r--r-- | lib/psci/psci_setup.c | 318 | ||||
-rw-r--r-- | lib/psci/psci_stat.c | 258 | ||||
-rw-r--r-- | lib/psci/psci_suspend.c | 370 | ||||
-rw-r--r-- | lib/psci/psci_system_off.c | 85 |
14 files changed, 4103 insertions, 0 deletions
diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S new file mode 100644 index 0000000..4e1013c --- /dev/null +++ b/lib/psci/aarch32/psci_helpers.S @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <asm_macros.S> +#include <lib/psci/psci.h> +#include <platform_def.h> + + .globl psci_do_pwrdown_cache_maintenance + .globl psci_do_pwrup_cache_maintenance + .globl psci_power_down_wfi + +/* ----------------------------------------------------------------------- + * void psci_do_pwrdown_cache_maintenance(unsigned int power level); + * + * This function performs cache maintenance for the specified power + * level. The levels of cache affected are determined by the power + * level which is passed as the argument i.e. level 0 results + * in a flush of the L1 cache. Both the L1 and L2 caches are flushed + * for a higher power level. + * + * Additionally, this function also ensures that stack memory is correctly + * flushed out to avoid coherency issues due to a change in its memory + * attributes after the data cache is disabled. + * ----------------------------------------------------------------------- + */ +func psci_do_pwrdown_cache_maintenance + push {r4, lr} + + /* ---------------------------------------------- + * Turn OFF cache and do stack maintenance + * prior to cpu operations . This sequence is + * different from AArch64 because in AArch32 the + * assembler routines for cpu operations utilize + * the stack whereas in AArch64 it doesn't. + * ---------------------------------------------- + */ + mov r4, r0 + bl do_stack_maintenance + + /* --------------------------------------------- + * Invoke CPU-specifc power down operations for + * the appropriate level + * --------------------------------------------- + */ + mov r0, r4 + pop {r4, lr} + b prepare_cpu_pwr_dwn +endfunc psci_do_pwrdown_cache_maintenance + + +/* ----------------------------------------------------------------------- + * void psci_do_pwrup_cache_maintenance(void); + * + * This function performs cache maintenance after this cpu is powered up. + * Currently, this involves managing the used stack memory before turning + * on the data cache. + * ----------------------------------------------------------------------- + */ +func psci_do_pwrup_cache_maintenance + /* r12 is pushed to meet the 8 byte stack alignment requirement */ + push {r12, lr} + + /* --------------------------------------------- + * Ensure any inflight stack writes have made it + * to main memory. + * --------------------------------------------- + */ + dmb st + + /* --------------------------------------------- + * Calculate and store the size of the used + * stack memory in r1. Calculate and store the + * stack base address in r0. + * --------------------------------------------- + */ + bl plat_get_my_stack + mov r1, sp + sub r1, r0, r1 + mov r0, sp + bl inv_dcache_range + + /* --------------------------------------------- + * Enable the data cache. + * --------------------------------------------- + */ + ldcopr r0, SCTLR + orr r0, r0, #SCTLR_C_BIT + stcopr r0, SCTLR + isb + + pop {r12, pc} +endfunc psci_do_pwrup_cache_maintenance + + /* --------------------------------------------- + * void do_stack_maintenance(void) + * Do stack maintenance by flushing the used + * stack to the main memory and invalidating the + * remainder. + * --------------------------------------------- + */ +func do_stack_maintenance + push {r4, lr} + bl plat_get_my_stack + + /* Turn off the D-cache */ + ldcopr r1, SCTLR + bic r1, #SCTLR_C_BIT + stcopr r1, SCTLR + isb + + /* --------------------------------------------- + * Calculate and store the size of the used + * stack memory in r1. + * --------------------------------------------- + */ + mov r4, r0 + mov r1, sp + sub r1, r0, r1 + mov r0, sp + bl flush_dcache_range + + /* --------------------------------------------- + * Calculate and store the size of the unused + * stack memory in r1. Calculate and store the + * stack base address in r0. + * --------------------------------------------- + */ + sub r0, r4, #PLATFORM_STACK_SIZE + sub r1, sp, r0 + bl inv_dcache_range + + pop {r4, pc} +endfunc do_stack_maintenance + +/* ----------------------------------------------------------------------- + * This function is called to indicate to the power controller that it + * is safe to power down this cpu. It should not exit the wfi and will + * be released from reset upon power up. + * ----------------------------------------------------------------------- + */ +func psci_power_down_wfi + dsb sy // ensure write buffer empty +1: + wfi + b 1b +endfunc psci_power_down_wfi diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S new file mode 100644 index 0000000..3b77ab2 --- /dev/null +++ b/lib/psci/aarch64/psci_helpers.S @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <asm_macros.S> +#include <assert_macros.S> +#include <lib/psci/psci.h> +#include <platform_def.h> + + .globl psci_do_pwrdown_cache_maintenance + .globl psci_do_pwrup_cache_maintenance + .globl psci_power_down_wfi + +/* ----------------------------------------------------------------------- + * void psci_do_pwrdown_cache_maintenance(unsigned int power level); + * + * This function performs cache maintenance for the specified power + * level. The levels of cache affected are determined by the power + * level which is passed as the argument i.e. level 0 results + * in a flush of the L1 cache. Both the L1 and L2 caches are flushed + * for a higher power level. + * + * Additionally, this function also ensures that stack memory is correctly + * flushed out to avoid coherency issues due to a change in its memory + * attributes after the data cache is disabled. + * ----------------------------------------------------------------------- + */ +func psci_do_pwrdown_cache_maintenance + stp x29, x30, [sp,#-16]! + stp x19, x20, [sp,#-16]! + + /* --------------------------------------------- + * Invoke CPU-specific power down operations for + * the appropriate level + * --------------------------------------------- + */ + bl prepare_cpu_pwr_dwn + + /* --------------------------------------------- + * Do stack maintenance by flushing the used + * stack to the main memory and invalidating the + * remainder. + * --------------------------------------------- + */ + bl plat_get_my_stack + + /* --------------------------------------------- + * Calculate and store the size of the used + * stack memory in x1. + * --------------------------------------------- + */ + mov x19, x0 + mov x1, sp + sub x1, x0, x1 + mov x0, sp + bl flush_dcache_range + + /* --------------------------------------------- + * Calculate and store the size of the unused + * stack memory in x1. Calculate and store the + * stack base address in x0. + * --------------------------------------------- + */ + sub x0, x19, #PLATFORM_STACK_SIZE + sub x1, sp, x0 + bl inv_dcache_range + + ldp x19, x20, [sp], #16 + ldp x29, x30, [sp], #16 + ret +endfunc psci_do_pwrdown_cache_maintenance + + +/* ----------------------------------------------------------------------- + * void psci_do_pwrup_cache_maintenance(void); + * + * This function performs cache maintenance after this cpu is powered up. + * Currently, this involves managing the used stack memory before turning + * on the data cache. + * ----------------------------------------------------------------------- + */ +func psci_do_pwrup_cache_maintenance + stp x29, x30, [sp,#-16]! + + /* --------------------------------------------- + * Ensure any inflight stack writes have made it + * to main memory. + * --------------------------------------------- + */ + dmb st + + /* --------------------------------------------- + * Calculate and store the size of the used + * stack memory in x1. Calculate and store the + * stack base address in x0. + * --------------------------------------------- + */ + bl plat_get_my_stack + mov x1, sp + sub x1, x0, x1 + mov x0, sp + bl inv_dcache_range + + /* --------------------------------------------- + * Enable the data cache. + * --------------------------------------------- + */ + mrs x0, sctlr_el3 + orr x0, x0, #SCTLR_C_BIT + msr sctlr_el3, x0 + isb + + ldp x29, x30, [sp], #16 + ret +endfunc psci_do_pwrup_cache_maintenance + +/* ----------------------------------------------------------------------- + * void psci_power_down_wfi(void); + * This function is called to indicate to the power controller that it + * is safe to power down this cpu. It should not exit the wfi and will + * be released from reset upon power up. + * ----------------------------------------------------------------------- + */ +func psci_power_down_wfi +#if ERRATA_A510_2684597 + bl apply_cpu_pwr_dwn_errata +#endif + dsb sy // ensure write buffer empty +1: + wfi + b 1b +endfunc psci_power_down_wfi diff --git a/lib/psci/aarch64/runtime_errata.S b/lib/psci/aarch64/runtime_errata.S new file mode 100644 index 0000000..89e3e12 --- /dev/null +++ b/lib/psci/aarch64/runtime_errata.S @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <asm_macros.S> +#include <cortex_a510.h> +#include <cpu_macros.S> + +/* + * void apply_cpu_pwr_dwn_errata(void); + * + * This function applies various CPU errata during power down. + */ + .globl apply_cpu_pwr_dwn_errata +func apply_cpu_pwr_dwn_errata + mov x19, x30 + bl cpu_get_rev_var + mov x18, x0 + +#if ERRATA_A510_2684597 + bl erratum_cortex_a510_2684597_wa +#endif + + ret x19 +endfunc apply_cpu_pwr_dwn_errata diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c new file mode 100644 index 0000000..f9de432 --- /dev/null +++ b/lib/psci/psci_common.c @@ -0,0 +1,1285 @@ +/* + * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <string.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <context.h> +#include <drivers/delay_timer.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <lib/utils.h> +#include <plat/common/platform.h> + +#include "psci_private.h" + +/* + * SPD power management operations, expected to be supplied by the registered + * SPD on successful SP initialization + */ +const spd_pm_ops_t *psci_spd_pm; + +/* + * PSCI requested local power state map. This array is used to store the local + * power states requested by a CPU for power levels from level 1 to + * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power + * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a + * CPU are the same. + * + * During state coordination, the platform is passed an array containing the + * local states requested for a particular non cpu power domain by each cpu + * within the domain. + * + * TODO: Dense packing of the requested states will cause cache thrashing + * when multiple power domains write to it. If we allocate the requested + * states at each power level in a cache-line aligned per-domain memory, + * the cache thrashing can be avoided. + */ +static plat_local_state_t + psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT]; + +unsigned int psci_plat_core_count; + +/******************************************************************************* + * Arrays that hold the platform's power domain tree information for state + * management of power domains. + * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain + * which is an ancestor of a CPU power domain. + * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain + ******************************************************************************/ +non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS] +#if USE_COHERENT_MEM +__section(".tzfw_coherent_mem") +#endif +; + +/* Lock for PSCI state coordination */ +DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); + +cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; + +/******************************************************************************* + * Pointer to functions exported by the platform to complete power mgmt. ops + ******************************************************************************/ +const plat_psci_ops_t *psci_plat_pm_ops; + +/****************************************************************************** + * Check that the maximum power level supported by the platform makes sense + *****************************************************************************/ +CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) && + (PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL), + assert_platform_max_pwrlvl_check); + +#if PSCI_OS_INIT_MODE +/******************************************************************************* + * The power state coordination mode used in CPU_SUSPEND. + * Defaults to platform-coordinated mode. + ******************************************************************************/ +suspend_mode_t psci_suspend_mode = PLAT_COORD; +#endif + +/* + * The plat_local_state used by the platform is one of these types: RUN, + * RETENTION and OFF. The platform can define further sub-states for each type + * apart from RUN. This categorization is done to verify the sanity of the + * psci_power_state passed by the platform and to print debug information. The + * categorization is done on the basis of the following conditions: + * + * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN. + * + * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is + * STATE_TYPE_RETN. + * + * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is + * STATE_TYPE_OFF. + */ +typedef enum plat_local_state_type { + STATE_TYPE_RUN = 0, + STATE_TYPE_RETN, + STATE_TYPE_OFF +} plat_local_state_type_t; + +/* Function used to categorize plat_local_state. */ +static plat_local_state_type_t find_local_state_type(plat_local_state_t state) +{ + if (state != 0U) { + if (state > PLAT_MAX_RET_STATE) { + return STATE_TYPE_OFF; + } else { + return STATE_TYPE_RETN; + } + } else { + return STATE_TYPE_RUN; + } +} + +/****************************************************************************** + * Check that the maximum retention level supported by the platform is less + * than the maximum off level. + *****************************************************************************/ +CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, + assert_platform_max_off_and_retn_state_check); + +/****************************************************************************** + * This function ensures that the power state parameter in a CPU_SUSPEND request + * is valid. If so, it returns the requested states for each power level. + *****************************************************************************/ +int psci_validate_power_state(unsigned int power_state, + psci_power_state_t *state_info) +{ + /* Check SBZ bits in power state are zero */ + if (psci_check_power_state(power_state) != 0U) + return PSCI_E_INVALID_PARAMS; + + assert(psci_plat_pm_ops->validate_power_state != NULL); + + /* Validate the power_state using platform pm_ops */ + return psci_plat_pm_ops->validate_power_state(power_state, state_info); +} + +/****************************************************************************** + * This function retrieves the `psci_power_state_t` for system suspend from + * the platform. + *****************************************************************************/ +void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info) +{ + /* + * Assert that the required pm_ops hook is implemented to ensure that + * the capability detected during psci_setup() is valid. + */ + assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL); + + /* + * Query the platform for the power_state required for system suspend + */ + psci_plat_pm_ops->get_sys_suspend_power_state(state_info); +} + +#if PSCI_OS_INIT_MODE +/******************************************************************************* + * This function verifies that all the other cores at the 'end_pwrlvl' have been + * idled and the current CPU is the last running CPU at the 'end_pwrlvl'. + * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false) + * otherwise. + ******************************************************************************/ +static bool psci_is_last_cpu_to_idle_at_pwrlvl(unsigned int end_pwrlvl) +{ + unsigned int my_idx, lvl, parent_idx; + unsigned int cpu_start_idx, ncpus, cpu_idx; + plat_local_state_t local_state; + + if (end_pwrlvl == PSCI_CPU_PWR_LVL) { + return true; + } + + my_idx = plat_my_core_pos(); + + for (lvl = PSCI_CPU_PWR_LVL; lvl <= end_pwrlvl; lvl++) { + parent_idx = psci_cpu_pd_nodes[my_idx].parent_node; + } + + cpu_start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; + ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; + + for (cpu_idx = cpu_start_idx; cpu_idx < cpu_start_idx + ncpus; + cpu_idx++) { + local_state = psci_get_cpu_local_state_by_idx(cpu_idx); + if (cpu_idx == my_idx) { + assert(is_local_state_run(local_state) != 0); + continue; + } + + if (is_local_state_run(local_state) != 0) { + return false; + } + } + + return true; +} +#endif + +/******************************************************************************* + * This function verifies that all the other cores in the system have been + * turned OFF and the current CPU is the last running CPU in the system. + * Returns true, if the current CPU is the last ON CPU or false otherwise. + ******************************************************************************/ +bool psci_is_last_on_cpu(void) +{ + unsigned int cpu_idx, my_idx = plat_my_core_pos(); + + for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) { + if (cpu_idx == my_idx) { + assert(psci_get_aff_info_state() == AFF_STATE_ON); + continue; + } + + if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) { + VERBOSE("core=%u other than current core=%u %s\n", + cpu_idx, my_idx, "running in the system"); + return false; + } + } + + return true; +} + +/******************************************************************************* + * This function verifies that all cores in the system have been turned ON. + * Returns true, if all CPUs are ON or false otherwise. + ******************************************************************************/ +static bool psci_are_all_cpus_on(void) +{ + unsigned int cpu_idx; + + for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) { + if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_OFF) { + return false; + } + } + + return true; +} + +/******************************************************************************* + * Routine to return the maximum power level to traverse to after a cpu has + * been physically powered up. It is expected to be called immediately after + * reset from assembler code. + ******************************************************************************/ +static unsigned int get_power_on_target_pwrlvl(void) +{ + unsigned int pwrlvl; + + /* + * Assume that this cpu was suspended and retrieve its target power + * level. If it is invalid then it could only have been turned off + * earlier. PLAT_MAX_PWR_LVL will be the highest power level a + * cpu can be turned off to. + */ + pwrlvl = psci_get_suspend_pwrlvl(); + if (pwrlvl == PSCI_INVALID_PWR_LVL) + pwrlvl = PLAT_MAX_PWR_LVL; + assert(pwrlvl < PSCI_INVALID_PWR_LVL); + return pwrlvl; +} + +/****************************************************************************** + * Helper function to update the requested local power state array. This array + * does not store the requested state for the CPU power level. Hence an + * assertion is added to prevent us from accessing the CPU power level. + *****************************************************************************/ +static void psci_set_req_local_pwr_state(unsigned int pwrlvl, + unsigned int cpu_idx, + plat_local_state_t req_pwr_state) +{ + assert(pwrlvl > PSCI_CPU_PWR_LVL); + if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) && + (cpu_idx < psci_plat_core_count)) { + psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state; + } +} + +/****************************************************************************** + * This function initializes the psci_req_local_pwr_states. + *****************************************************************************/ +void __init psci_init_req_local_pwr_states(void) +{ + /* Initialize the requested state of all non CPU power domains as OFF */ + unsigned int pwrlvl; + unsigned int core; + + for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) { + for (core = 0; core < psci_plat_core_count; core++) { + psci_req_local_pwr_states[pwrlvl][core] = + PLAT_MAX_OFF_STATE; + } + } +} + +/****************************************************************************** + * Helper function to return a reference to an array containing the local power + * states requested by each cpu for a power domain at 'pwrlvl'. The size of the + * array will be the number of cpu power domains of which this power domain is + * an ancestor. These requested states will be used to determine a suitable + * target state for this power domain during psci state coordination. An + * assertion is added to prevent us from accessing the CPU power level. + *****************************************************************************/ +static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl, + unsigned int cpu_idx) +{ + assert(pwrlvl > PSCI_CPU_PWR_LVL); + + if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) && + (cpu_idx < psci_plat_core_count)) { + return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx]; + } else + return NULL; +} + +#if PSCI_OS_INIT_MODE +/****************************************************************************** + * Helper function to save a copy of the psci_req_local_pwr_states (prev) for a + * CPU (cpu_idx), and update psci_req_local_pwr_states with the new requested + * local power states (state_info). + *****************************************************************************/ +void psci_update_req_local_pwr_states(unsigned int end_pwrlvl, + unsigned int cpu_idx, + psci_power_state_t *state_info, + plat_local_state_t *prev) +{ + unsigned int lvl; +#ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL + unsigned int max_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL; +#else + unsigned int max_pwrlvl = PLAT_MAX_PWR_LVL; +#endif + plat_local_state_t req_state; + + for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= max_pwrlvl; lvl++) { + /* Save the previous requested local power state */ + prev[lvl - 1U] = *psci_get_req_local_pwr_states(lvl, cpu_idx); + + /* Update the new requested local power state */ + if (lvl <= end_pwrlvl) { + req_state = state_info->pwr_domain_state[lvl]; + } else { + req_state = state_info->pwr_domain_state[end_pwrlvl]; + } + psci_set_req_local_pwr_state(lvl, cpu_idx, req_state); + } +} + +/****************************************************************************** + * Helper function to restore the previously saved requested local power states + * (prev) for a CPU (cpu_idx) to psci_req_local_pwr_states. + *****************************************************************************/ +void psci_restore_req_local_pwr_states(unsigned int cpu_idx, + plat_local_state_t *prev) +{ + unsigned int lvl; +#ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL + unsigned int max_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL; +#else + unsigned int max_pwrlvl = PLAT_MAX_PWR_LVL; +#endif + + for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= max_pwrlvl; lvl++) { + /* Restore the previous requested local power state */ + psci_set_req_local_pwr_state(lvl, cpu_idx, prev[lvl - 1U]); + } +} +#endif + +/* + * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent + * memory. + * + * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory, + * it's accessed by both cached and non-cached participants. To serve the common + * minimum, perform a cache flush before read and after write so that non-cached + * participants operate on latest data in main memory. + * + * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent + * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent. + * In both cases, no cache operations are required. + */ + +/* + * Retrieve local state of non-CPU power domain node from a non-cached CPU, + * after any required cache maintenance operation. + */ +static plat_local_state_t get_non_cpu_pd_node_local_state( + unsigned int parent_idx) +{ +#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + flush_dcache_range( + (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], + sizeof(psci_non_cpu_pd_nodes[parent_idx])); +#endif + return psci_non_cpu_pd_nodes[parent_idx].local_state; +} + +/* + * Update local state of non-CPU power domain node from a cached CPU; perform + * any required cache maintenance operation afterwards. + */ +static void set_non_cpu_pd_node_local_state(unsigned int parent_idx, + plat_local_state_t state) +{ + psci_non_cpu_pd_nodes[parent_idx].local_state = state; +#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + flush_dcache_range( + (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], + sizeof(psci_non_cpu_pd_nodes[parent_idx])); +#endif +} + +/****************************************************************************** + * Helper function to return the current local power state of each power domain + * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This + * function will be called after a cpu is powered on to find the local state + * each power domain has emerged from. + *****************************************************************************/ +void psci_get_target_local_pwr_states(unsigned int end_pwrlvl, + psci_power_state_t *target_state) +{ + unsigned int parent_idx, lvl; + plat_local_state_t *pd_state = target_state->pwr_domain_state; + + pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state(); + parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; + + /* Copy the local power state from node to state_info */ + for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { + pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx); + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + + /* Set the the higher levels to RUN */ + for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) + target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; +} + +/****************************************************************************** + * Helper function to set the target local power state that each power domain + * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will + * enter. This function will be called after coordination of requested power + * states has been done for each power level. + *****************************************************************************/ +void psci_set_target_local_pwr_states(unsigned int end_pwrlvl, + const psci_power_state_t *target_state) +{ + unsigned int parent_idx, lvl; + const plat_local_state_t *pd_state = target_state->pwr_domain_state; + + psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]); + + /* + * Need to flush as local_state might be accessed with Data Cache + * disabled during power on + */ + psci_flush_cpu_data(psci_svc_cpu_data.local_state); + + parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; + + /* Copy the local_state from state_info */ + for (lvl = 1U; lvl <= end_pwrlvl; lvl++) { + set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]); + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } +} + +/******************************************************************************* + * PSCI helper function to get the parent nodes corresponding to a cpu_index. + ******************************************************************************/ +void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, + unsigned int end_lvl, + unsigned int *node_index) +{ + unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node; + unsigned int i; + unsigned int *node = node_index; + + for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) { + *node = parent_node; + node++; + parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node; + } +} + +/****************************************************************************** + * This function is invoked post CPU power up and initialization. It sets the + * affinity info state, target power state and requested power state for the + * current CPU and all its ancestor power domains to RUN. + *****************************************************************************/ +void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl) +{ + unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl; + parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; + + /* Reset the local_state to RUN for the non cpu power domains. */ + for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { + set_non_cpu_pd_node_local_state(parent_idx, + PSCI_LOCAL_STATE_RUN); + psci_set_req_local_pwr_state(lvl, + cpu_idx, + PSCI_LOCAL_STATE_RUN); + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + + /* Set the affinity info state to ON */ + psci_set_aff_info_state(AFF_STATE_ON); + + psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); + psci_flush_cpu_data(psci_svc_cpu_data); +} + +/****************************************************************************** + * This function is used in platform-coordinated mode. + * + * This function is passed the local power states requested for each power + * domain (state_info) between the current CPU domain and its ancestors until + * the target power level (end_pwrlvl). It updates the array of requested power + * states with this information. + * + * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it + * retrieves the states requested by all the cpus of which the power domain at + * that level is an ancestor. It passes this information to the platform to + * coordinate and return the target power state. If the target state for a level + * is RUN then subsequent levels are not considered. At the CPU level, state + * coordination is not required. Hence, the requested and the target states are + * the same. + * + * The 'state_info' is updated with the target state for each level between the + * CPU and the 'end_pwrlvl' and returned to the caller. + * + * This function will only be invoked with data cache enabled and while + * powering down a core. + *****************************************************************************/ +void psci_do_state_coordination(unsigned int end_pwrlvl, + psci_power_state_t *state_info) +{ + unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos(); + unsigned int start_idx; + unsigned int ncpus; + plat_local_state_t target_state, *req_states; + + assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); + parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; + + /* For level 0, the requested state will be equivalent + to target state */ + for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { + + /* First update the requested power state */ + psci_set_req_local_pwr_state(lvl, cpu_idx, + state_info->pwr_domain_state[lvl]); + + /* Get the requested power states for this power level */ + start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; + req_states = psci_get_req_local_pwr_states(lvl, start_idx); + + /* + * Let the platform coordinate amongst the requested states at + * this power level and return the target local power state. + */ + ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; + target_state = plat_get_target_pwr_state(lvl, + req_states, + ncpus); + + state_info->pwr_domain_state[lvl] = target_state; + + /* Break early if the negotiated target power state is RUN */ + if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0) + break; + + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + + /* + * This is for cases when we break out of the above loop early because + * the target power state is RUN at a power level < end_pwlvl. + * We update the requested power state from state_info and then + * set the target state as RUN. + */ + for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) { + psci_set_req_local_pwr_state(lvl, cpu_idx, + state_info->pwr_domain_state[lvl]); + state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; + + } +} + +#if PSCI_OS_INIT_MODE +/****************************************************************************** + * This function is used in OS-initiated mode. + * + * This function is passed the local power states requested for each power + * domain (state_info) between the current CPU domain and its ancestors until + * the target power level (end_pwrlvl), and ensures the requested power states + * are valid. It updates the array of requested power states with this + * information. + * + * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it + * retrieves the states requested by all the cpus of which the power domain at + * that level is an ancestor. It passes this information to the platform to + * coordinate and return the target power state. If the requested state does + * not match the target state, the request is denied. + * + * The 'state_info' is not modified. + * + * This function will only be invoked with data cache enabled and while + * powering down a core. + *****************************************************************************/ +int psci_validate_state_coordination(unsigned int end_pwrlvl, + psci_power_state_t *state_info) +{ + int rc = PSCI_E_SUCCESS; + unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos(); + unsigned int start_idx; + unsigned int ncpus; + plat_local_state_t target_state, *req_states; + plat_local_state_t prev[PLAT_MAX_PWR_LVL]; + + assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); + parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; + + /* + * Save a copy of the previous requested local power states and update + * the new requested local power states. + */ + psci_update_req_local_pwr_states(end_pwrlvl, cpu_idx, state_info, prev); + + for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { + /* Get the requested power states for this power level */ + start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; + req_states = psci_get_req_local_pwr_states(lvl, start_idx); + + /* + * Let the platform coordinate amongst the requested states at + * this power level and return the target local power state. + */ + ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; + target_state = plat_get_target_pwr_state(lvl, + req_states, + ncpus); + + /* + * Verify that the requested power state matches the target + * local power state. + */ + if (state_info->pwr_domain_state[lvl] != target_state) { + if (target_state == PSCI_LOCAL_STATE_RUN) { + rc = PSCI_E_DENIED; + } else { + rc = PSCI_E_INVALID_PARAMS; + } + goto exit; + } + } + + /* + * Verify that the current core is the last running core at the + * specified power level. + */ + lvl = state_info->last_at_pwrlvl; + if (!psci_is_last_cpu_to_idle_at_pwrlvl(lvl)) { + rc = PSCI_E_DENIED; + } + +exit: + if (rc != PSCI_E_SUCCESS) { + /* Restore the previous requested local power states. */ + psci_restore_req_local_pwr_states(cpu_idx, prev); + return rc; + } + + return rc; +} +#endif + +/****************************************************************************** + * This function validates a suspend request by making sure that if a standby + * state is requested then no power level is turned off and the highest power + * level is placed in a standby/retention state. + * + * It also ensures that the state level X will enter is not shallower than the + * state level X + 1 will enter. + * + * This validation will be enabled only for DEBUG builds as the platform is + * expected to perform these validations as well. + *****************************************************************************/ +int psci_validate_suspend_req(const psci_power_state_t *state_info, + unsigned int is_power_down_state) +{ + unsigned int max_off_lvl, target_lvl, max_retn_lvl; + plat_local_state_t state; + plat_local_state_type_t req_state_type, deepest_state_type; + int i; + + /* Find the target suspend power level */ + target_lvl = psci_find_target_suspend_lvl(state_info); + if (target_lvl == PSCI_INVALID_PWR_LVL) + return PSCI_E_INVALID_PARAMS; + + /* All power domain levels are in a RUN state to begin with */ + deepest_state_type = STATE_TYPE_RUN; + + for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) { + state = state_info->pwr_domain_state[i]; + req_state_type = find_local_state_type(state); + + /* + * While traversing from the highest power level to the lowest, + * the state requested for lower levels has to be the same or + * deeper i.e. equal to or greater than the state at the higher + * levels. If this condition is true, then the requested state + * becomes the deepest state encountered so far. + */ + if (req_state_type < deepest_state_type) + return PSCI_E_INVALID_PARAMS; + deepest_state_type = req_state_type; + } + + /* Find the highest off power level */ + max_off_lvl = psci_find_max_off_lvl(state_info); + + /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */ + max_retn_lvl = PSCI_INVALID_PWR_LVL; + if (target_lvl != max_off_lvl) + max_retn_lvl = target_lvl; + + /* + * If this is not a request for a power down state then max off level + * has to be invalid and max retention level has to be a valid power + * level. + */ + if ((is_power_down_state == 0U) && + ((max_off_lvl != PSCI_INVALID_PWR_LVL) || + (max_retn_lvl == PSCI_INVALID_PWR_LVL))) + return PSCI_E_INVALID_PARAMS; + + return PSCI_E_SUCCESS; +} + +/****************************************************************************** + * This function finds the highest power level which will be powered down + * amongst all the power levels specified in the 'state_info' structure + *****************************************************************************/ +unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info) +{ + int i; + + for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) { + if (is_local_state_off(state_info->pwr_domain_state[i]) != 0) + return (unsigned int) i; + } + + return PSCI_INVALID_PWR_LVL; +} + +/****************************************************************************** + * This functions finds the level of the highest power domain which will be + * placed in a low power state during a suspend operation. + *****************************************************************************/ +unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info) +{ + int i; + + for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) { + if (is_local_state_run(state_info->pwr_domain_state[i]) == 0) + return (unsigned int) i; + } + + return PSCI_INVALID_PWR_LVL; +} + +/******************************************************************************* + * This function is passed the highest level in the topology tree that the + * operation should be applied to and a list of node indexes. It picks up locks + * from the node index list in order of increasing power domain level in the + * range specified. + ******************************************************************************/ +void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, + const unsigned int *parent_nodes) +{ + unsigned int parent_idx; + unsigned int level; + + /* No locking required for level 0. Hence start locking from level 1 */ + for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) { + parent_idx = parent_nodes[level - 1U]; + psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]); + } +} + +/******************************************************************************* + * This function is passed the highest level in the topology tree that the + * operation should be applied to and a list of node indexes. It releases the + * locks in order of decreasing power domain level in the range specified. + ******************************************************************************/ +void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, + const unsigned int *parent_nodes) +{ + unsigned int parent_idx; + unsigned int level; + + /* Unlock top down. No unlocking required for level 0. */ + for (level = end_pwrlvl; level >= (PSCI_CPU_PWR_LVL + 1U); level--) { + parent_idx = parent_nodes[level - 1U]; + psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]); + } +} + +/******************************************************************************* + * This function determines the full entrypoint information for the requested + * PSCI entrypoint on power on/resume and returns it. + ******************************************************************************/ +#ifdef __aarch64__ +static int psci_get_ns_ep_info(entry_point_info_t *ep, + uintptr_t entrypoint, + u_register_t context_id) +{ + u_register_t ep_attr, sctlr; + unsigned int daif, ee, mode; + u_register_t ns_scr_el3 = read_scr_el3(); + u_register_t ns_sctlr_el1 = read_sctlr_el1(); + + sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? + read_sctlr_el2() : ns_sctlr_el1; + ee = 0; + + ep_attr = NON_SECURE | EP_ST_DISABLE; + if ((sctlr & SCTLR_EE_BIT) != 0U) { + ep_attr |= EP_EE_BIG; + ee = 1; + } + SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); + + ep->pc = entrypoint; + zeromem(&ep->args, sizeof(ep->args)); + ep->args.arg0 = context_id; + + /* + * Figure out whether the cpu enters the non-secure address space + * in aarch32 or aarch64 + */ + if ((ns_scr_el3 & SCR_RW_BIT) != 0U) { + + /* + * Check whether a Thumb entry point has been provided for an + * aarch64 EL + */ + if ((entrypoint & 0x1UL) != 0UL) + return PSCI_E_INVALID_ADDRESS; + + mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1; + + ep->spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + } else { + + mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? + MODE32_hyp : MODE32_svc; + + /* + * TODO: Choose async. exception bits if HYP mode is not + * implemented according to the values of SCR.{AW, FW} bits + */ + daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; + + ep->spsr = SPSR_MODE32((uint64_t)mode, entrypoint & 0x1, ee, + daif); + } + + return PSCI_E_SUCCESS; +} +#else /* !__aarch64__ */ +static int psci_get_ns_ep_info(entry_point_info_t *ep, + uintptr_t entrypoint, + u_register_t context_id) +{ + u_register_t ep_attr; + unsigned int aif, ee, mode; + u_register_t scr = read_scr(); + u_register_t ns_sctlr, sctlr; + + /* Switch to non secure state */ + write_scr(scr | SCR_NS_BIT); + isb(); + ns_sctlr = read_sctlr(); + + sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr; + + /* Return to original state */ + write_scr(scr); + isb(); + ee = 0; + + ep_attr = NON_SECURE | EP_ST_DISABLE; + if (sctlr & SCTLR_EE_BIT) { + ep_attr |= EP_EE_BIG; + ee = 1; + } + SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); + + ep->pc = entrypoint; + zeromem(&ep->args, sizeof(ep->args)); + ep->args.arg0 = context_id; + + mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; + + /* + * TODO: Choose async. exception bits if HYP mode is not + * implemented according to the values of SCR.{AW, FW} bits + */ + aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT; + + ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif); + + return PSCI_E_SUCCESS; +} + +#endif /* __aarch64__ */ + +/******************************************************************************* + * This function validates the entrypoint with the platform layer if the + * appropriate pm_ops hook is exported by the platform and returns the + * 'entry_point_info'. + ******************************************************************************/ +int psci_validate_entry_point(entry_point_info_t *ep, + uintptr_t entrypoint, + u_register_t context_id) +{ + int rc; + + /* Validate the entrypoint using platform psci_ops */ + if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) { + rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); + if (rc != PSCI_E_SUCCESS) + return PSCI_E_INVALID_ADDRESS; + } + + /* + * Verify and derive the re-entry information for + * the non-secure world from the non-secure state from + * where this call originated. + */ + rc = psci_get_ns_ep_info(ep, entrypoint, context_id); + return rc; +} + +/******************************************************************************* + * Generic handler which is called when a cpu is physically powered on. It + * traverses the node information and finds the highest power level powered + * off and performs generic, architectural, platform setup and state management + * to power on that power level and power levels below it. + * e.g. For a cpu that's been powered on, it will call the platform specific + * code to enable the gic cpu interface and for a cluster it will enable + * coherency at the interconnect level in addition to gic cpu interface. + ******************************************************************************/ +void psci_warmboot_entrypoint(void) +{ + unsigned int end_pwrlvl; + unsigned int cpu_idx = plat_my_core_pos(); + unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; + psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; + + /* Init registers that never change for the lifetime of TF-A */ + cm_manage_extensions_el3(); + + /* + * Verify that we have been explicitly turned ON or resumed from + * suspend. + */ + if (psci_get_aff_info_state() == AFF_STATE_OFF) { + ERROR("Unexpected affinity info state.\n"); + panic(); + } + + /* + * Get the maximum power domain level to traverse to after this cpu + * has been physically powered up. + */ + end_pwrlvl = get_power_on_target_pwrlvl(); + + /* Get the parent nodes */ + psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); + + /* + * This function acquires the lock corresponding to each power level so + * that by the time all locks are taken, the system topology is snapshot + * and state management can be done safely. + */ + psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes); + + psci_get_target_local_pwr_states(end_pwrlvl, &state_info); + +#if ENABLE_PSCI_STAT + plat_psci_stat_accounting_stop(&state_info); +#endif + + /* + * This CPU could be resuming from suspend or it could have just been + * turned on. To distinguish between these 2 cases, we examine the + * affinity state of the CPU: + * - If the affinity state is ON_PENDING then it has just been + * turned on. + * - Else it is resuming from suspend. + * + * Depending on the type of warm reset identified, choose the right set + * of power management handler and perform the generic, architecture + * and platform specific handling. + */ + if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) + psci_cpu_on_finish(cpu_idx, &state_info); + else + psci_cpu_suspend_finish(cpu_idx, &state_info); + + /* + * Generic management: Now we just need to retrieve the + * information that we had stashed away during the cpu_on + * call to set this cpu on its way. + */ + cm_prepare_el3_exit_ns(); + + /* + * Set the requested and target state of this CPU and all the higher + * power domains which are ancestors of this CPU to run. + */ + psci_set_pwr_domains_to_run(end_pwrlvl); + +#if ENABLE_PSCI_STAT + /* + * Update PSCI stats. + * Caches are off when writing stats data on the power down path. + * Since caches are now enabled, it's necessary to do cache + * maintenance before reading that same data. + */ + psci_stats_update_pwr_up(end_pwrlvl, &state_info); +#endif + + /* + * This loop releases the lock corresponding to each power level + * in the reverse order to which they were acquired. + */ + psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes); +} + +/******************************************************************************* + * This function initializes the set of hooks that PSCI invokes as part of power + * management operation. The power management hooks are expected to be provided + * by the SPD, after it finishes all its initialization + ******************************************************************************/ +void psci_register_spd_pm_hook(const spd_pm_ops_t *pm) +{ + assert(pm != NULL); + psci_spd_pm = pm; + + if (pm->svc_migrate != NULL) + psci_caps |= define_psci_cap(PSCI_MIG_AARCH64); + + if (pm->svc_migrate_info != NULL) + psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) + | define_psci_cap(PSCI_MIG_INFO_TYPE); +} + +/******************************************************************************* + * This function invokes the migrate info hook in the spd_pm_ops. It performs + * the necessary return value validation. If the Secure Payload is UP and + * migrate capable, it returns the mpidr of the CPU on which the Secure payload + * is resident through the mpidr parameter. Else the value of the parameter on + * return is undefined. + ******************************************************************************/ +int psci_spd_migrate_info(u_register_t *mpidr) +{ + int rc; + + if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL)) + return PSCI_E_NOT_SUPPORTED; + + rc = psci_spd_pm->svc_migrate_info(mpidr); + + assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) || + (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED)); + + return rc; +} + + +/******************************************************************************* + * This function prints the state of all power domains present in the + * system + ******************************************************************************/ +void psci_print_power_domain_map(void) +{ +#if LOG_LEVEL >= LOG_LEVEL_INFO + unsigned int idx; + plat_local_state_t state; + plat_local_state_type_t state_type; + + /* This array maps to the PSCI_STATE_X definitions in psci.h */ + static const char * const psci_state_type_str[] = { + "ON", + "RETENTION", + "OFF", + }; + + INFO("PSCI Power Domain Map:\n"); + for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - psci_plat_core_count); + idx++) { + state_type = find_local_state_type( + psci_non_cpu_pd_nodes[idx].local_state); + INFO(" Domain Node : Level %u, parent_node %u," + " State %s (0x%x)\n", + psci_non_cpu_pd_nodes[idx].level, + psci_non_cpu_pd_nodes[idx].parent_node, + psci_state_type_str[state_type], + psci_non_cpu_pd_nodes[idx].local_state); + } + + for (idx = 0; idx < psci_plat_core_count; idx++) { + state = psci_get_cpu_local_state_by_idx(idx); + state_type = find_local_state_type(state); + INFO(" CPU Node : MPID 0x%llx, parent_node %u," + " State %s (0x%x)\n", + (unsigned long long)psci_cpu_pd_nodes[idx].mpidr, + psci_cpu_pd_nodes[idx].parent_node, + psci_state_type_str[state_type], + psci_get_cpu_local_state_by_idx(idx)); + } +#endif +} + +/****************************************************************************** + * Return whether any secondaries were powered up with CPU_ON call. A CPU that + * have ever been powered up would have set its MPDIR value to something other + * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to + * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is + * meaningful only when called on the primary CPU during early boot. + *****************************************************************************/ +int psci_secondaries_brought_up(void) +{ + unsigned int idx, n_valid = 0U; + + for (idx = 0U; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) { + if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR) + n_valid++; + } + + assert(n_valid > 0U); + + return (n_valid > 1U) ? 1 : 0; +} + +/******************************************************************************* + * Initiate power down sequence, by calling power down operations registered for + * this CPU. + ******************************************************************************/ +void psci_pwrdown_cpu(unsigned int power_level) +{ +#if HW_ASSISTED_COHERENCY + /* + * With hardware-assisted coherency, the CPU drivers only initiate the + * power down sequence, without performing cache-maintenance operations + * in software. Data caches enabled both before and after this call. + */ + prepare_cpu_pwr_dwn(power_level); +#else + /* + * Without hardware-assisted coherency, the CPU drivers disable data + * caches, then perform cache-maintenance operations in software. + * + * This also calls prepare_cpu_pwr_dwn() to initiate power down + * sequence, but that function will return with data caches disabled. + * We must ensure that the stack memory is flushed out to memory before + * we start popping from it again. + */ + psci_do_pwrdown_cache_maintenance(power_level); +#endif +} + +/******************************************************************************* + * This function invokes the callback 'stop_func()' with the 'mpidr' of each + * online PE. Caller can pass suitable method to stop a remote core. + * + * 'wait_ms' is the timeout value in milliseconds for the other cores to + * transition to power down state. Passing '0' makes it non-blocking. + * + * The function returns 'PSCI_E_DENIED' if some cores failed to stop within the + * given timeout. + ******************************************************************************/ +int psci_stop_other_cores(unsigned int wait_ms, + void (*stop_func)(u_register_t mpidr)) +{ + unsigned int idx, this_cpu_idx; + + this_cpu_idx = plat_my_core_pos(); + + /* Invoke stop_func for each core */ + for (idx = 0U; idx < psci_plat_core_count; idx++) { + /* skip current CPU */ + if (idx == this_cpu_idx) { + continue; + } + + /* Check if the CPU is ON */ + if (psci_get_aff_info_state_by_idx(idx) == AFF_STATE_ON) { + (*stop_func)(psci_cpu_pd_nodes[idx].mpidr); + } + } + + /* Need to wait for other cores to shutdown */ + if (wait_ms != 0U) { + while ((wait_ms-- != 0U) && (!psci_is_last_on_cpu())) { + mdelay(1U); + } + + if (!psci_is_last_on_cpu()) { + WARN("Failed to stop all cores!\n"); + psci_print_power_domain_map(); + return PSCI_E_DENIED; + } + } + + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * This function verifies that all the other cores in the system have been + * turned OFF and the current CPU is the last running CPU in the system. + * Returns true if the current CPU is the last ON CPU or false otherwise. + * + * This API has following differences with psci_is_last_on_cpu + * 1. PSCI states are locked + ******************************************************************************/ +bool psci_is_last_on_cpu_safe(void) +{ + unsigned int this_core = plat_my_core_pos(); + unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; + + psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes); + + psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); + + if (!psci_is_last_on_cpu()) { + psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); + return false; + } + + psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); + + return true; +} + +/******************************************************************************* + * This function verifies that all cores in the system have been turned ON. + * Returns true, if all CPUs are ON or false otherwise. + * + * This API has following differences with psci_are_all_cpus_on + * 1. PSCI states are locked + ******************************************************************************/ +bool psci_are_all_cpus_on_safe(void) +{ + unsigned int this_core = plat_my_core_pos(); + unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; + + psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes); + + psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); + + if (!psci_are_all_cpus_on()) { + psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); + return false; + } + + psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); + + return true; +} diff --git a/lib/psci/psci_lib.mk b/lib/psci/psci_lib.mk new file mode 100644 index 0000000..c71580f --- /dev/null +++ b/lib/psci/psci_lib.mk @@ -0,0 +1,36 @@ +# +# Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \ + lib/el3_runtime/${ARCH}/cpu_data.S \ + lib/el3_runtime/${ARCH}/context_mgmt.c \ + lib/cpus/${ARCH}/cpu_helpers.S \ + lib/cpus/errata_report.c \ + lib/locks/exclusive/${ARCH}/spinlock.S \ + lib/psci/psci_off.c \ + lib/psci/psci_on.c \ + lib/psci/psci_suspend.c \ + lib/psci/psci_common.c \ + lib/psci/psci_main.c \ + lib/psci/psci_setup.c \ + lib/psci/psci_system_off.c \ + lib/psci/psci_mem_protect.c \ + lib/psci/${ARCH}/psci_helpers.S + +ifeq (${ARCH}, aarch64) +PSCI_LIB_SOURCES += lib/el3_runtime/aarch64/context.S \ + lib/psci/aarch64/runtime_errata.S +endif + +ifeq (${USE_COHERENT_MEM}, 1) +PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_coherent.c +else +PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_normal.c +endif + +ifeq (${ENABLE_PSCI_STAT}, 1) +PSCI_LIB_SOURCES += lib/psci/psci_stat.c +endif diff --git a/lib/psci/psci_main.c b/lib/psci/psci_main.c new file mode 100644 index 0000000..a015531 --- /dev/null +++ b/lib/psci/psci_main.c @@ -0,0 +1,606 @@ +/* + * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <string.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/debug.h> +#include <lib/pmf/pmf.h> +#include <lib/runtime_instr.h> +#include <lib/smccc.h> +#include <plat/common/platform.h> +#include <services/arm_arch_svc.h> + +#include "psci_private.h" + +/******************************************************************************* + * PSCI frontend api for servicing SMCs. Described in the PSCI spec. + ******************************************************************************/ +int psci_cpu_on(u_register_t target_cpu, + uintptr_t entrypoint, + u_register_t context_id) + +{ + int rc; + entry_point_info_t ep; + + /* Validate the target CPU */ + if (!is_valid_mpidr(target_cpu)) + return PSCI_E_INVALID_PARAMS; + + /* Validate the entry point and get the entry_point_info */ + rc = psci_validate_entry_point(&ep, entrypoint, context_id); + if (rc != PSCI_E_SUCCESS) + return rc; + + /* + * To turn this cpu on, specify which power + * levels need to be turned on + */ + return psci_cpu_on_start(target_cpu, &ep); +} + +unsigned int psci_version(void) +{ + return PSCI_MAJOR_VER | PSCI_MINOR_VER; +} + +int psci_cpu_suspend(unsigned int power_state, + uintptr_t entrypoint, + u_register_t context_id) +{ + int rc; + unsigned int target_pwrlvl, is_power_down_state; + entry_point_info_t ep; + psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; + plat_local_state_t cpu_pd_state; +#if PSCI_OS_INIT_MODE + unsigned int cpu_idx = plat_my_core_pos(); + plat_local_state_t prev[PLAT_MAX_PWR_LVL]; +#endif + + /* Validate the power_state parameter */ + rc = psci_validate_power_state(power_state, &state_info); + if (rc != PSCI_E_SUCCESS) { + assert(rc == PSCI_E_INVALID_PARAMS); + return rc; + } + + /* + * Get the value of the state type bit from the power state parameter. + */ + is_power_down_state = psci_get_pstate_type(power_state); + + /* Sanity check the requested suspend levels */ + assert(psci_validate_suspend_req(&state_info, is_power_down_state) + == PSCI_E_SUCCESS); + + target_pwrlvl = psci_find_target_suspend_lvl(&state_info); + if (target_pwrlvl == PSCI_INVALID_PWR_LVL) { + ERROR("Invalid target power level for suspend operation\n"); + panic(); + } + + /* Fast path for CPU standby.*/ + if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) { + if (psci_plat_pm_ops->cpu_standby == NULL) + return PSCI_E_INVALID_PARAMS; + + /* + * Set the state of the CPU power domain to the platform + * specific retention state and enter the standby state. + */ + cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL]; + psci_set_cpu_local_state(cpu_pd_state); + +#if PSCI_OS_INIT_MODE + /* + * If in OS-initiated mode, save a copy of the previous + * requested local power states and update the new requested + * local power states for this CPU. + */ + if (psci_suspend_mode == OS_INIT) { + psci_update_req_local_pwr_states(target_pwrlvl, cpu_idx, + &state_info, prev); + } +#endif + +#if ENABLE_PSCI_STAT + plat_psci_stat_accounting_start(&state_info); +#endif + +#if ENABLE_RUNTIME_INSTRUMENTATION + PMF_CAPTURE_TIMESTAMP(rt_instr_svc, + RT_INSTR_ENTER_HW_LOW_PWR, + PMF_NO_CACHE_MAINT); +#endif + + psci_plat_pm_ops->cpu_standby(cpu_pd_state); + + /* Upon exit from standby, set the state back to RUN. */ + psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); + +#if PSCI_OS_INIT_MODE + /* + * If in OS-initiated mode, restore the previous requested + * local power states for this CPU. + */ + if (psci_suspend_mode == OS_INIT) { + psci_restore_req_local_pwr_states(cpu_idx, prev); + } +#endif + +#if ENABLE_RUNTIME_INSTRUMENTATION + PMF_CAPTURE_TIMESTAMP(rt_instr_svc, + RT_INSTR_EXIT_HW_LOW_PWR, + PMF_NO_CACHE_MAINT); +#endif + +#if ENABLE_PSCI_STAT + plat_psci_stat_accounting_stop(&state_info); + + /* Update PSCI stats */ + psci_stats_update_pwr_up(PSCI_CPU_PWR_LVL, &state_info); +#endif + + return PSCI_E_SUCCESS; + } + + /* + * If a power down state has been requested, we need to verify entry + * point and program entry information. + */ + if (is_power_down_state != 0U) { + rc = psci_validate_entry_point(&ep, entrypoint, context_id); + if (rc != PSCI_E_SUCCESS) + return rc; + } + + /* + * Do what is needed to enter the power down state. Upon success, + * enter the final wfi which will power down this CPU. This function + * might return if the power down was abandoned for any reason, e.g. + * arrival of an interrupt + */ + rc = psci_cpu_suspend_start(&ep, + target_pwrlvl, + &state_info, + is_power_down_state); + + return rc; +} + + +int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id) +{ + int rc; + psci_power_state_t state_info; + entry_point_info_t ep; + + /* Check if the current CPU is the last ON CPU in the system */ + if (!psci_is_last_on_cpu()) + return PSCI_E_DENIED; + + /* Validate the entry point and get the entry_point_info */ + rc = psci_validate_entry_point(&ep, entrypoint, context_id); + if (rc != PSCI_E_SUCCESS) + return rc; + + /* Query the psci_power_state for system suspend */ + psci_query_sys_suspend_pwrstate(&state_info); + + /* + * Check if platform allows suspend to Highest power level + * (System level) + */ + if (psci_find_target_suspend_lvl(&state_info) < PLAT_MAX_PWR_LVL) + return PSCI_E_DENIED; + + /* Ensure that the psci_power_state makes sense */ + assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN) + == PSCI_E_SUCCESS); + assert(is_local_state_off( + state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]) != 0); + + /* + * Do what is needed to enter the system suspend state. This function + * might return if the power down was abandoned for any reason, e.g. + * arrival of an interrupt + */ + rc = psci_cpu_suspend_start(&ep, + PLAT_MAX_PWR_LVL, + &state_info, + PSTATE_TYPE_POWERDOWN); + + return rc; +} + +int psci_cpu_off(void) +{ + int rc; + unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL; + + /* + * Do what is needed to power off this CPU and possible higher power + * levels if it able to do so. Upon success, enter the final wfi + * which will power down this CPU. + */ + rc = psci_do_cpu_off(target_pwrlvl); + + /* + * The only error cpu_off can return is E_DENIED. So check if that's + * indeed the case. + */ + assert(rc == PSCI_E_DENIED); + + return rc; +} + +int psci_affinity_info(u_register_t target_affinity, + unsigned int lowest_affinity_level) +{ + unsigned int target_idx; + + /* Validate the target affinity */ + if (!is_valid_mpidr(target_affinity)) + return PSCI_E_INVALID_PARAMS; + + /* We dont support level higher than PSCI_CPU_PWR_LVL */ + if (lowest_affinity_level > PSCI_CPU_PWR_LVL) + return PSCI_E_INVALID_PARAMS; + + /* Calculate the cpu index of the target */ + target_idx = (unsigned int) plat_core_pos_by_mpidr(target_affinity); + + /* + * Generic management: + * Perform cache maintanence ahead of reading the target CPU state to + * ensure that the data is not stale. + * There is a theoretical edge case where the cache may contain stale + * data for the target CPU data - this can occur under the following + * conditions: + * - the target CPU is in another cluster from the current + * - the target CPU was the last CPU to shutdown on its cluster + * - the cluster was removed from coherency as part of the CPU shutdown + * + * In this case the cache maintenace that was performed as part of the + * target CPUs shutdown was not seen by the current CPU's cluster. And + * so the cache may contain stale data for the target CPU. + */ + flush_cpu_data_by_index(target_idx, + psci_svc_cpu_data.aff_info_state); + + return psci_get_aff_info_state_by_idx(target_idx); +} + +int psci_migrate(u_register_t target_cpu) +{ + int rc; + u_register_t resident_cpu_mpidr; + + /* Validate the target cpu */ + if (!is_valid_mpidr(target_cpu)) + return PSCI_E_INVALID_PARAMS; + + rc = psci_spd_migrate_info(&resident_cpu_mpidr); + if (rc != PSCI_TOS_UP_MIG_CAP) + return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ? + PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED; + + /* + * Migrate should only be invoked on the CPU where + * the Secure OS is resident. + */ + if (resident_cpu_mpidr != read_mpidr_el1()) + return PSCI_E_NOT_PRESENT; + + /* Check the validity of the specified target cpu */ + if (!is_valid_mpidr(target_cpu)) + return PSCI_E_INVALID_PARAMS; + + assert((psci_spd_pm != NULL) && (psci_spd_pm->svc_migrate != NULL)); + + rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu); + assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL)); + + return rc; +} + +int psci_migrate_info_type(void) +{ + u_register_t resident_cpu_mpidr; + + return psci_spd_migrate_info(&resident_cpu_mpidr); +} + +u_register_t psci_migrate_info_up_cpu(void) +{ + u_register_t resident_cpu_mpidr; + int rc; + + /* + * Return value of this depends upon what + * psci_spd_migrate_info() returns. + */ + rc = psci_spd_migrate_info(&resident_cpu_mpidr); + if ((rc != PSCI_TOS_NOT_UP_MIG_CAP) && (rc != PSCI_TOS_UP_MIG_CAP)) + return (u_register_t)(register_t) PSCI_E_INVALID_PARAMS; + + return resident_cpu_mpidr; +} + +int psci_node_hw_state(u_register_t target_cpu, + unsigned int power_level) +{ + int rc; + + /* Validate target_cpu */ + if (!is_valid_mpidr(target_cpu)) + return PSCI_E_INVALID_PARAMS; + + /* Validate power_level against PLAT_MAX_PWR_LVL */ + if (power_level > PLAT_MAX_PWR_LVL) + return PSCI_E_INVALID_PARAMS; + + /* + * Dispatch this call to platform to query power controller, and pass on + * to the caller what it returns + */ + assert(psci_plat_pm_ops->get_node_hw_state != NULL); + rc = psci_plat_pm_ops->get_node_hw_state(target_cpu, power_level); + assert(((rc >= HW_ON) && (rc <= HW_STANDBY)) + || (rc == PSCI_E_NOT_SUPPORTED) + || (rc == PSCI_E_INVALID_PARAMS)); + return rc; +} + +int psci_features(unsigned int psci_fid) +{ + unsigned int local_caps = psci_caps; + + if (psci_fid == SMCCC_VERSION) + return PSCI_E_SUCCESS; + + /* Check if it is a 64 bit function */ + if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64) + local_caps &= PSCI_CAP_64BIT_MASK; + + /* Check for invalid fid */ + if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid) + && is_psci_fid(psci_fid))) + return PSCI_E_NOT_SUPPORTED; + + + /* Check if the psci fid is supported or not */ + if ((local_caps & define_psci_cap(psci_fid)) == 0U) + return PSCI_E_NOT_SUPPORTED; + + /* Format the feature flags */ + if ((psci_fid == PSCI_CPU_SUSPEND_AARCH32) || + (psci_fid == PSCI_CPU_SUSPEND_AARCH64)) { + unsigned int ret = ((FF_PSTATE << FF_PSTATE_SHIFT) | + (FF_SUPPORTS_OS_INIT_MODE << FF_MODE_SUPPORT_SHIFT)); + return (int)ret; + } + + /* Return 0 for all other fid's */ + return PSCI_E_SUCCESS; +} + +#if PSCI_OS_INIT_MODE +int psci_set_suspend_mode(unsigned int mode) +{ + if (psci_suspend_mode == mode) { + return PSCI_E_SUCCESS; + } + + if (mode == PLAT_COORD) { + /* Check if the current CPU is the last ON CPU in the system */ + if (!psci_is_last_on_cpu_safe()) { + return PSCI_E_DENIED; + } + } + + if (mode == OS_INIT) { + /* + * Check if all CPUs in the system are ON or if the current + * CPU is the last ON CPU in the system. + */ + if (!(psci_are_all_cpus_on_safe() || + psci_is_last_on_cpu_safe())) { + return PSCI_E_DENIED; + } + } + + psci_suspend_mode = mode; + psci_flush_dcache_range((uintptr_t)&psci_suspend_mode, + sizeof(psci_suspend_mode)); + + return PSCI_E_SUCCESS; +} +#endif + +/******************************************************************************* + * PSCI top level handler for servicing SMCs. + ******************************************************************************/ +u_register_t psci_smc_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + u_register_t ret; + + if (is_caller_secure(flags)) + return (u_register_t)SMC_UNK; + + /* Check the fid against the capabilities */ + if ((psci_caps & define_psci_cap(smc_fid)) == 0U) + return (u_register_t)SMC_UNK; + + if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) { + /* 32-bit PSCI function, clear top parameter bits */ + + uint32_t r1 = (uint32_t)x1; + uint32_t r2 = (uint32_t)x2; + uint32_t r3 = (uint32_t)x3; + + switch (smc_fid) { + case PSCI_VERSION: + ret = (u_register_t)psci_version(); + break; + + case PSCI_CPU_OFF: + ret = (u_register_t)psci_cpu_off(); + break; + + case PSCI_CPU_SUSPEND_AARCH32: + ret = (u_register_t)psci_cpu_suspend(r1, r2, r3); + break; + + case PSCI_CPU_ON_AARCH32: + ret = (u_register_t)psci_cpu_on(r1, r2, r3); + break; + + case PSCI_AFFINITY_INFO_AARCH32: + ret = (u_register_t)psci_affinity_info(r1, r2); + break; + + case PSCI_MIG_AARCH32: + ret = (u_register_t)psci_migrate(r1); + break; + + case PSCI_MIG_INFO_TYPE: + ret = (u_register_t)psci_migrate_info_type(); + break; + + case PSCI_MIG_INFO_UP_CPU_AARCH32: + ret = psci_migrate_info_up_cpu(); + break; + + case PSCI_NODE_HW_STATE_AARCH32: + ret = (u_register_t)psci_node_hw_state(r1, r2); + break; + + case PSCI_SYSTEM_SUSPEND_AARCH32: + ret = (u_register_t)psci_system_suspend(r1, r2); + break; + + case PSCI_SYSTEM_OFF: + psci_system_off(); + /* We should never return from psci_system_off() */ + break; + + case PSCI_SYSTEM_RESET: + psci_system_reset(); + /* We should never return from psci_system_reset() */ + break; + + case PSCI_FEATURES: + ret = (u_register_t)psci_features(r1); + break; + +#if PSCI_OS_INIT_MODE + case PSCI_SET_SUSPEND_MODE: + ret = (u_register_t)psci_set_suspend_mode(r1); + break; +#endif + +#if ENABLE_PSCI_STAT + case PSCI_STAT_RESIDENCY_AARCH32: + ret = psci_stat_residency(r1, r2); + break; + + case PSCI_STAT_COUNT_AARCH32: + ret = psci_stat_count(r1, r2); + break; +#endif + case PSCI_MEM_PROTECT: + ret = psci_mem_protect(r1); + break; + + case PSCI_MEM_CHK_RANGE_AARCH32: + ret = psci_mem_chk_range(r1, r2); + break; + + case PSCI_SYSTEM_RESET2_AARCH32: + /* We should never return from psci_system_reset2() */ + ret = psci_system_reset2(r1, r2); + break; + + default: + WARN("Unimplemented PSCI Call: 0x%x\n", smc_fid); + ret = (u_register_t)SMC_UNK; + break; + } + } else { + /* 64-bit PSCI function */ + + switch (smc_fid) { + case PSCI_CPU_SUSPEND_AARCH64: + ret = (u_register_t) + psci_cpu_suspend((unsigned int)x1, x2, x3); + break; + + case PSCI_CPU_ON_AARCH64: + ret = (u_register_t)psci_cpu_on(x1, x2, x3); + break; + + case PSCI_AFFINITY_INFO_AARCH64: + ret = (u_register_t) + psci_affinity_info(x1, (unsigned int)x2); + break; + + case PSCI_MIG_AARCH64: + ret = (u_register_t)psci_migrate(x1); + break; + + case PSCI_MIG_INFO_UP_CPU_AARCH64: + ret = psci_migrate_info_up_cpu(); + break; + + case PSCI_NODE_HW_STATE_AARCH64: + ret = (u_register_t)psci_node_hw_state( + x1, (unsigned int) x2); + break; + + case PSCI_SYSTEM_SUSPEND_AARCH64: + ret = (u_register_t)psci_system_suspend(x1, x2); + break; + +#if ENABLE_PSCI_STAT + case PSCI_STAT_RESIDENCY_AARCH64: + ret = psci_stat_residency(x1, (unsigned int) x2); + break; + + case PSCI_STAT_COUNT_AARCH64: + ret = psci_stat_count(x1, (unsigned int) x2); + break; +#endif + + case PSCI_MEM_CHK_RANGE_AARCH64: + ret = psci_mem_chk_range(x1, x2); + break; + + case PSCI_SYSTEM_RESET2_AARCH64: + /* We should never return from psci_system_reset2() */ + ret = psci_system_reset2((uint32_t) x1, x2); + break; + + default: + WARN("Unimplemented PSCI Call: 0x%x\n", smc_fid); + ret = (u_register_t)SMC_UNK; + break; + } + } + + return ret; +} diff --git a/lib/psci/psci_mem_protect.c b/lib/psci/psci_mem_protect.c new file mode 100644 index 0000000..385dcd2 --- /dev/null +++ b/lib/psci/psci_mem_protect.c @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2017-2018, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <limits.h> + +#include <lib/utils.h> + +#include "psci_private.h" + +u_register_t psci_mem_protect(unsigned int enable) +{ + int val; + + assert(psci_plat_pm_ops->read_mem_protect != NULL); + assert(psci_plat_pm_ops->write_mem_protect != NULL); + + if (psci_plat_pm_ops->read_mem_protect(&val) < 0) + return (u_register_t) PSCI_E_NOT_SUPPORTED; + if (psci_plat_pm_ops->write_mem_protect(enable) < 0) + return (u_register_t) PSCI_E_NOT_SUPPORTED; + + return (val != 0) ? 1U : 0U; +} + +u_register_t psci_mem_chk_range(uintptr_t base, u_register_t length) +{ + int ret; + + assert(psci_plat_pm_ops->mem_protect_chk != NULL); + + if ((length == 0U) || check_uptr_overflow(base, length - 1U)) + return (u_register_t) PSCI_E_DENIED; + + ret = psci_plat_pm_ops->mem_protect_chk(base, length); + return (ret < 0) ? + (u_register_t) PSCI_E_DENIED : (u_register_t) PSCI_E_SUCCESS; +} diff --git a/lib/psci/psci_off.c b/lib/psci/psci_off.c new file mode 100644 index 0000000..f83753f --- /dev/null +++ b/lib/psci/psci_off.c @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2023, NVIDIA Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <string.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/debug.h> +#include <lib/pmf/pmf.h> +#include <lib/runtime_instr.h> +#include <plat/common/platform.h> + +#include "psci_private.h" + +/****************************************************************************** + * Construct the psci_power_state to request power OFF at all power levels. + ******************************************************************************/ +static void psci_set_power_off_state(psci_power_state_t *state_info) +{ + unsigned int lvl; + + for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++) + state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE; +} + +/****************************************************************************** + * Top level handler which is called when a cpu wants to power itself down. + * It's assumed that along with turning the cpu power domain off, power + * domains at higher levels will be turned off as far as possible. It finds + * the highest level where a domain has to be powered off by traversing the + * node information and then performs generic, architectural, platform setup + * and state management required to turn OFF that power domain and domains + * below it. e.g. For a cpu that's to be powered OFF, it could mean programming + * the power controller whereas for a cluster that's to be powered off, it will + * call the platform specific code which will disable coherency at the + * interconnect level if the cpu is the last in the cluster and also the + * program the power controller. + ******************************************************************************/ +int psci_do_cpu_off(unsigned int end_pwrlvl) +{ + int rc = PSCI_E_SUCCESS; + unsigned int idx = plat_my_core_pos(); + psci_power_state_t state_info; + unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; + + /* + * This function must only be called on platforms where the + * CPU_OFF platform hooks have been implemented. + */ + assert(psci_plat_pm_ops->pwr_domain_off != NULL); + + /* Construct the psci_power_state for CPU_OFF */ + psci_set_power_off_state(&state_info); + + /* + * Call the platform provided early CPU_OFF handler to allow + * platforms to perform any housekeeping activities before + * actually powering the CPU off. PSCI_E_DENIED indicates that + * the CPU off sequence should be aborted at this time. + */ + if (psci_plat_pm_ops->pwr_domain_off_early) { + rc = psci_plat_pm_ops->pwr_domain_off_early(&state_info); + if (rc == PSCI_E_DENIED) { + return rc; + } + } + + /* + * Get the parent nodes here, this is important to do before we + * initiate the power down sequence as after that point the core may + * have exited coherency and its cache may be disabled, any access to + * shared memory after that (such as the parent node lookup in + * psci_cpu_pd_nodes) can cause coherency issues on some platforms. + */ + psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes); + + /* + * This function acquires the lock corresponding to each power + * level so that by the time all locks are taken, the system topology + * is snapshot and state management can be done safely. + */ + psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes); + + /* + * Call the cpu off handler registered by the Secure Payload Dispatcher + * to let it do any bookkeeping. Assume that the SPD always reports an + * E_DENIED error if SP refuse to power down + */ + if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_off != NULL)) { + rc = psci_spd_pm->svc_off(0); + if (rc != 0) + goto exit; + } + + /* + * This function is passed the requested state info and + * it returns the negotiated state info for each power level upto + * the end level specified. + */ + psci_do_state_coordination(end_pwrlvl, &state_info); + + /* Update the target state in the power domain nodes */ + psci_set_target_local_pwr_states(end_pwrlvl, &state_info); + +#if ENABLE_PSCI_STAT + /* Update the last cpu for each level till end_pwrlvl */ + psci_stats_update_pwr_down(end_pwrlvl, &state_info); +#endif + +#if ENABLE_RUNTIME_INSTRUMENTATION + + /* + * Flush cache line so that even if CPU power down happens + * the timestamp update is reflected in memory. + */ + PMF_CAPTURE_TIMESTAMP(rt_instr_svc, + RT_INSTR_ENTER_CFLUSH, + PMF_CACHE_MAINT); +#endif + + /* + * Arch. management. Initiate power down sequence. + */ + psci_pwrdown_cpu(psci_find_max_off_lvl(&state_info)); + +#if ENABLE_RUNTIME_INSTRUMENTATION + PMF_CAPTURE_TIMESTAMP(rt_instr_svc, + RT_INSTR_EXIT_CFLUSH, + PMF_NO_CACHE_MAINT); +#endif + + /* + * Plat. management: Perform platform specific actions to turn this + * cpu off e.g. exit cpu coherency, program the power controller etc. + */ + psci_plat_pm_ops->pwr_domain_off(&state_info); + +#if ENABLE_PSCI_STAT + plat_psci_stat_accounting_start(&state_info); +#endif + +exit: + /* + * Release the locks corresponding to each power level in the + * reverse order to which they were acquired. + */ + psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes); + + /* + * Check if all actions needed to safely power down this cpu have + * successfully completed. + */ + if (rc == PSCI_E_SUCCESS) { + /* + * Set the affinity info state to OFF. When caches are disabled, + * this writes directly to main memory, so cache maintenance is + * required to ensure that later cached reads of aff_info_state + * return AFF_STATE_OFF. A dsbish() ensures ordering of the + * update to the affinity info state prior to cache line + * invalidation. + */ + psci_flush_cpu_data(psci_svc_cpu_data.aff_info_state); + psci_set_aff_info_state(AFF_STATE_OFF); + psci_dsbish(); + psci_inv_cpu_data(psci_svc_cpu_data.aff_info_state); + +#if ENABLE_RUNTIME_INSTRUMENTATION + + /* + * Update the timestamp with cache off. We assume this + * timestamp can only be read from the current CPU and the + * timestamp cache line will be flushed before return to + * normal world on wakeup. + */ + PMF_CAPTURE_TIMESTAMP(rt_instr_svc, + RT_INSTR_ENTER_HW_LOW_PWR, + PMF_NO_CACHE_MAINT); +#endif + + if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL) { + /* This function must not return */ + psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info); + } else { + /* + * Enter a wfi loop which will allow the power + * controller to physically power down this cpu. + */ + psci_power_down_wfi(); + } + } + + return rc; +} diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c new file mode 100644 index 0000000..b279774 --- /dev/null +++ b/lib/psci/psci_on.c @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stddef.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <lib/el3_runtime/pubsub_events.h> +#include <plat/common/platform.h> + +#include "psci_private.h" + +/* + * Helper functions for the CPU level spinlocks + */ +static inline void psci_spin_lock_cpu(unsigned int idx) +{ + spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock); +} + +static inline void psci_spin_unlock_cpu(unsigned int idx) +{ + spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock); +} + +/******************************************************************************* + * This function checks whether a cpu which has been requested to be turned on + * is OFF to begin with. + ******************************************************************************/ +static int cpu_on_validate_state(aff_info_state_t aff_state) +{ + if (aff_state == AFF_STATE_ON) + return PSCI_E_ALREADY_ON; + + if (aff_state == AFF_STATE_ON_PENDING) + return PSCI_E_ON_PENDING; + + assert(aff_state == AFF_STATE_OFF); + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * Generic handler which is called to physically power on a cpu identified by + * its mpidr. It performs the generic, architectural, platform setup and state + * management to power on the target cpu e.g. it will ensure that + * enough information is stashed for it to resume execution in the non-secure + * security state. + * + * The state of all the relevant power domains are changed after calling the + * platform handler as it can return error. + ******************************************************************************/ +int psci_cpu_on_start(u_register_t target_cpu, + const entry_point_info_t *ep) +{ + int rc; + aff_info_state_t target_aff_state; + unsigned int target_idx = (unsigned int)plat_core_pos_by_mpidr(target_cpu); + + /* + * This function must only be called on platforms where the + * CPU_ON platform hooks have been implemented. + */ + assert((psci_plat_pm_ops->pwr_domain_on != NULL) && + (psci_plat_pm_ops->pwr_domain_on_finish != NULL)); + + /* Protect against multiple CPUs trying to turn ON the same target CPU */ + psci_spin_lock_cpu(target_idx); + + /* + * Generic management: Ensure that the cpu is off to be + * turned on. + * Perform cache maintanence ahead of reading the target CPU state to + * ensure that the data is not stale. + * There is a theoretical edge case where the cache may contain stale + * data for the target CPU data - this can occur under the following + * conditions: + * - the target CPU is in another cluster from the current + * - the target CPU was the last CPU to shutdown on its cluster + * - the cluster was removed from coherency as part of the CPU shutdown + * + * In this case the cache maintenace that was performed as part of the + * target CPUs shutdown was not seen by the current CPU's cluster. And + * so the cache may contain stale data for the target CPU. + */ + flush_cpu_data_by_index(target_idx, + psci_svc_cpu_data.aff_info_state); + rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); + if (rc != PSCI_E_SUCCESS) + goto exit; + + /* + * Call the cpu on handler registered by the Secure Payload Dispatcher + * to let it do any bookeeping. If the handler encounters an error, it's + * expected to assert within + */ + if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on != NULL)) + psci_spd_pm->svc_on(target_cpu); + + /* + * Set the Affinity info state of the target cpu to ON_PENDING. + * Flush aff_info_state as it will be accessed with caches + * turned OFF. + */ + psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); + flush_cpu_data_by_index(target_idx, + psci_svc_cpu_data.aff_info_state); + + /* + * The cache line invalidation by the target CPU after setting the + * state to OFF (see psci_do_cpu_off()), could cause the update to + * aff_info_state to be invalidated. Retry the update if the target + * CPU aff_info_state is not ON_PENDING. + */ + target_aff_state = psci_get_aff_info_state_by_idx(target_idx); + if (target_aff_state != AFF_STATE_ON_PENDING) { + assert(target_aff_state == AFF_STATE_OFF); + psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); + flush_cpu_data_by_index(target_idx, + psci_svc_cpu_data.aff_info_state); + + assert(psci_get_aff_info_state_by_idx(target_idx) == + AFF_STATE_ON_PENDING); + } + + /* + * Perform generic, architecture and platform specific handling. + */ + /* + * Plat. management: Give the platform the current state + * of the target cpu to allow it to perform the necessary + * steps to power on. + */ + rc = psci_plat_pm_ops->pwr_domain_on(target_cpu); + assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL)); + + if (rc == PSCI_E_SUCCESS) + /* Store the re-entry information for the non-secure world. */ + cm_init_context_by_index(target_idx, ep); + else { + /* Restore the state on error. */ + psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); + flush_cpu_data_by_index(target_idx, + psci_svc_cpu_data.aff_info_state); + } + +exit: + psci_spin_unlock_cpu(target_idx); + return rc; +} + +/******************************************************************************* + * The following function finish an earlier power on request. They + * are called by the common finisher routine in psci_common.c. The `state_info` + * is the psci_power_state from which this CPU has woken up from. + ******************************************************************************/ +void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info) +{ + /* + * Plat. management: Perform the platform specific actions + * for this cpu e.g. enabling the gic or zeroing the mailbox + * register. The actual state of this cpu has already been + * changed. + */ + psci_plat_pm_ops->pwr_domain_on_finish(state_info); + +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + /* + * Arch. management: Enable data cache and manage stack memory + */ + psci_do_pwrup_cache_maintenance(); +#endif + + /* + * Plat. management: Perform any platform specific actions which + * can only be done with the cpu and the cluster guaranteed to + * be coherent. + */ + if (psci_plat_pm_ops->pwr_domain_on_finish_late != NULL) + psci_plat_pm_ops->pwr_domain_on_finish_late(state_info); + + /* + * All the platform specific actions for turning this cpu + * on have completed. Perform enough arch.initialization + * to run in the non-secure address space. + */ + psci_arch_setup(); + + /* + * Lock the CPU spin lock to make sure that the context initialization + * is done. Since the lock is only used in this function to create + * a synchronization point with cpu_on_start(), it can be released + * immediately. + */ + psci_spin_lock_cpu(cpu_idx); + psci_spin_unlock_cpu(cpu_idx); + + /* Ensure we have been explicitly woken up by another cpu */ + assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING); + + /* + * Call the cpu on finish handler registered by the Secure Payload + * Dispatcher to let it do any bookeeping. If the handler encounters an + * error, it's expected to assert within + */ + if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on_finish != NULL)) + psci_spd_pm->svc_on_finish(0); + + PUBLISH_EVENT(psci_cpu_on_finish); + + /* Populate the mpidr field within the cpu node array */ + /* This needs to be done only once */ + psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; +} diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h new file mode 100644 index 0000000..2eb4a9b --- /dev/null +++ b/lib/psci/psci_private.h @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PSCI_PRIVATE_H +#define PSCI_PRIVATE_H + +#include <stdbool.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <lib/bakery_lock.h> +#include <lib/el3_runtime/cpu_data.h> +#include <lib/psci/psci.h> +#include <lib/spinlock.h> + +/* + * The PSCI capability which are provided by the generic code but does not + * depend on the platform or spd capabilities. + */ +#define PSCI_GENERIC_CAP \ + (define_psci_cap(PSCI_VERSION) | \ + define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \ + define_psci_cap(PSCI_FEATURES)) + +/* + * The PSCI capabilities mask for 64 bit functions. + */ +#define PSCI_CAP_64BIT_MASK \ + (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) | \ + define_psci_cap(PSCI_CPU_ON_AARCH64) | \ + define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \ + define_psci_cap(PSCI_MIG_AARCH64) | \ + define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \ + define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) | \ + define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) | \ + define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) | \ + define_psci_cap(PSCI_STAT_COUNT_AARCH64) | \ + define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64) | \ + define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64)) + +/* Internally PSCI uses a uint16_t for various cpu indexes so + * define a limit to number of CPUs that can be initialised. + */ +#define PSCI_MAX_CPUS_INDEX 0xFFFFU + +/* Invalid parent */ +#define PSCI_PARENT_NODE_INVALID 0xFFFFFFFFU + +/* + * Helper functions to get/set the fields of PSCI per-cpu data. + */ +static inline void psci_set_aff_info_state(aff_info_state_t aff_state) +{ + set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state); +} + +static inline aff_info_state_t psci_get_aff_info_state(void) +{ + return get_cpu_data(psci_svc_cpu_data.aff_info_state); +} + +static inline aff_info_state_t psci_get_aff_info_state_by_idx(unsigned int idx) +{ + return get_cpu_data_by_index(idx, + psci_svc_cpu_data.aff_info_state); +} + +static inline void psci_set_aff_info_state_by_idx(unsigned int idx, + aff_info_state_t aff_state) +{ + set_cpu_data_by_index(idx, + psci_svc_cpu_data.aff_info_state, aff_state); +} + +static inline unsigned int psci_get_suspend_pwrlvl(void) +{ + return get_cpu_data(psci_svc_cpu_data.target_pwrlvl); +} + +static inline void psci_set_suspend_pwrlvl(unsigned int target_lvl) +{ + set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl); +} + +static inline void psci_set_cpu_local_state(plat_local_state_t state) +{ + set_cpu_data(psci_svc_cpu_data.local_state, state); +} + +static inline plat_local_state_t psci_get_cpu_local_state(void) +{ + return get_cpu_data(psci_svc_cpu_data.local_state); +} + +static inline plat_local_state_t psci_get_cpu_local_state_by_idx( + unsigned int idx) +{ + return get_cpu_data_by_index(idx, + psci_svc_cpu_data.local_state); +} + +/* Helper function to identify a CPU standby request in PSCI Suspend call */ +static inline bool is_cpu_standby_req(unsigned int is_power_down_state, + unsigned int retn_lvl) +{ + return (is_power_down_state == 0U) && (retn_lvl == 0U); +} + +/******************************************************************************* + * The following two data structures implement the power domain tree. The tree + * is used to track the state of all the nodes i.e. power domain instances + * described by the platform. The tree consists of nodes that describe CPU power + * domains i.e. leaf nodes and all other power domains which are parents of a + * CPU power domain i.e. non-leaf nodes. + ******************************************************************************/ +typedef struct non_cpu_pwr_domain_node { + /* + * Index of the first CPU power domain node level 0 which has this node + * as its parent. + */ + unsigned int cpu_start_idx; + + /* + * Number of CPU power domains which are siblings of the domain indexed + * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx + * -> cpu_start_idx + ncpus' have this node as their parent. + */ + unsigned int ncpus; + + /* + * Index of the parent power domain node. + * TODO: Figure out whether to whether using pointer is more efficient. + */ + unsigned int parent_node; + + plat_local_state_t local_state; + + unsigned char level; + + /* For indexing the psci_lock array*/ + uint16_t lock_index; +} non_cpu_pd_node_t; + +typedef struct cpu_pwr_domain_node { + u_register_t mpidr; + + /* + * Index of the parent power domain node. + * TODO: Figure out whether to whether using pointer is more efficient. + */ + unsigned int parent_node; + + /* + * A CPU power domain does not require state coordination like its + * parent power domains. Hence this node does not include a bakery + * lock. A spinlock is required by the CPU_ON handler to prevent a race + * when multiple CPUs try to turn ON the same target CPU. + */ + spinlock_t cpu_lock; +} cpu_pd_node_t; + +#if PSCI_OS_INIT_MODE +/******************************************************************************* + * The supported power state coordination modes that can be used in CPU_SUSPEND. + ******************************************************************************/ +typedef enum suspend_mode { + PLAT_COORD = 0, + OS_INIT = 1 +} suspend_mode_t; +#endif + +/******************************************************************************* + * The following are helpers and declarations of locks. + ******************************************************************************/ +#if HW_ASSISTED_COHERENCY +/* + * On systems where participant CPUs are cache-coherent, we can use spinlocks + * instead of bakery locks. + */ +#define DEFINE_PSCI_LOCK(_name) spinlock_t _name +#define DECLARE_PSCI_LOCK(_name) extern DEFINE_PSCI_LOCK(_name) + +/* One lock is required per non-CPU power domain node */ +DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); + +/* + * On systems with hardware-assisted coherency, make PSCI cache operations NOP, + * as PSCI participants are cache-coherent, and there's no need for explicit + * cache maintenance operations or barriers to coordinate their state. + */ +static inline void psci_flush_dcache_range(uintptr_t __unused addr, + size_t __unused size) +{ + /* Empty */ +} + +#define psci_flush_cpu_data(member) +#define psci_inv_cpu_data(member) + +static inline void psci_dsbish(void) +{ + /* Empty */ +} + +static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node) +{ + spin_lock(&psci_locks[non_cpu_pd_node->lock_index]); +} + +static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node) +{ + spin_unlock(&psci_locks[non_cpu_pd_node->lock_index]); +} + +#else /* if HW_ASSISTED_COHERENCY == 0 */ +/* + * Use bakery locks for state coordination as not all PSCI participants are + * cache coherent. + */ +#define DEFINE_PSCI_LOCK(_name) DEFINE_BAKERY_LOCK(_name) +#define DECLARE_PSCI_LOCK(_name) DECLARE_BAKERY_LOCK(_name) + +/* One lock is required per non-CPU power domain node */ +DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); + +/* + * If not all PSCI participants are cache-coherent, perform cache maintenance + * and issue barriers wherever required to coordinate state. + */ +static inline void psci_flush_dcache_range(uintptr_t addr, size_t size) +{ + flush_dcache_range(addr, size); +} + +#define psci_flush_cpu_data(member) flush_cpu_data(member) +#define psci_inv_cpu_data(member) inv_cpu_data(member) + +static inline void psci_dsbish(void) +{ + dsbish(); +} + +static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node) +{ + bakery_lock_get(&psci_locks[non_cpu_pd_node->lock_index]); +} + +static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node) +{ + bakery_lock_release(&psci_locks[non_cpu_pd_node->lock_index]); +} + +#endif /* HW_ASSISTED_COHERENCY */ + +static inline void psci_lock_init(non_cpu_pd_node_t *non_cpu_pd_node, + uint16_t idx) +{ + non_cpu_pd_node[idx].lock_index = idx; +} + +/******************************************************************************* + * Data prototypes + ******************************************************************************/ +extern const plat_psci_ops_t *psci_plat_pm_ops; +extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]; +extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; +extern unsigned int psci_caps; +extern unsigned int psci_plat_core_count; +#if PSCI_OS_INIT_MODE +extern suspend_mode_t psci_suspend_mode; +#endif + +/******************************************************************************* + * SPD's power management hooks registered with PSCI + ******************************************************************************/ +extern const spd_pm_ops_t *psci_spd_pm; + +/******************************************************************************* + * Function prototypes + ******************************************************************************/ +/* Private exported functions from psci_common.c */ +int psci_validate_power_state(unsigned int power_state, + psci_power_state_t *state_info); +void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info); +void psci_init_req_local_pwr_states(void); +#if PSCI_OS_INIT_MODE +void psci_update_req_local_pwr_states(unsigned int end_pwrlvl, + unsigned int cpu_idx, + psci_power_state_t *state_info, + plat_local_state_t *prev); +void psci_restore_req_local_pwr_states(unsigned int cpu_idx, + plat_local_state_t *prev); +#endif +void psci_get_target_local_pwr_states(unsigned int end_pwrlvl, + psci_power_state_t *target_state); +void psci_set_target_local_pwr_states(unsigned int end_pwrlvl, + const psci_power_state_t *target_state); +int psci_validate_entry_point(entry_point_info_t *ep, + uintptr_t entrypoint, u_register_t context_id); +void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, + unsigned int end_lvl, + unsigned int *node_index); +void psci_do_state_coordination(unsigned int end_pwrlvl, + psci_power_state_t *state_info); +#if PSCI_OS_INIT_MODE +int psci_validate_state_coordination(unsigned int end_pwrlvl, + psci_power_state_t *state_info); +#endif +void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, + const unsigned int *parent_nodes); +void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, + const unsigned int *parent_nodes); +int psci_validate_suspend_req(const psci_power_state_t *state_info, + unsigned int is_power_down_state); +unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info); +unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info); +void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl); +void psci_print_power_domain_map(void); +bool psci_is_last_on_cpu(void); +int psci_spd_migrate_info(u_register_t *mpidr); + +/* + * CPU power down is directly called only when HW_ASSISTED_COHERENCY is + * available. Otherwise, this needs post-call stack maintenance, which is + * handled in assembly. + */ +void prepare_cpu_pwr_dwn(unsigned int power_level); + +/* This function applies various CPU errata during power down. */ +void apply_cpu_pwr_dwn_errata(void); + +/* Private exported functions from psci_on.c */ +int psci_cpu_on_start(u_register_t target_cpu, + const entry_point_info_t *ep); + +void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info); + +/* Private exported functions from psci_off.c */ +int psci_do_cpu_off(unsigned int end_pwrlvl); + +/* Private exported functions from psci_suspend.c */ +int psci_cpu_suspend_start(const entry_point_info_t *ep, + unsigned int end_pwrlvl, + psci_power_state_t *state_info, + unsigned int is_power_down_state); + +void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info); + +/* Private exported functions from psci_helpers.S */ +void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level); +void psci_do_pwrup_cache_maintenance(void); + +/* Private exported functions from psci_system_off.c */ +void __dead2 psci_system_off(void); +void __dead2 psci_system_reset(void); +u_register_t psci_system_reset2(uint32_t reset_type, u_register_t cookie); + +/* Private exported functions from psci_stat.c */ +void psci_stats_update_pwr_down(unsigned int end_pwrlvl, + const psci_power_state_t *state_info); +void psci_stats_update_pwr_up(unsigned int end_pwrlvl, + const psci_power_state_t *state_info); +u_register_t psci_stat_residency(u_register_t target_cpu, + unsigned int power_state); +u_register_t psci_stat_count(u_register_t target_cpu, + unsigned int power_state); + +/* Private exported functions from psci_mem_protect.c */ +u_register_t psci_mem_protect(unsigned int enable); +u_register_t psci_mem_chk_range(uintptr_t base, u_register_t length); + +#endif /* PSCI_PRIVATE_H */ diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c new file mode 100644 index 0000000..6bf1ff4 --- /dev/null +++ b/lib/psci/psci_setup.c @@ -0,0 +1,318 @@ +/* + * Copyright (c) 2013-2020, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stddef.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <context.h> +#include <lib/cpus/errata.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <plat/common/platform.h> + +#include "psci_private.h" + +/* + * Check that PLATFORM_CORE_COUNT fits into the number of cores + * that can be represented by PSCI_MAX_CPUS_INDEX. + */ +CASSERT(PLATFORM_CORE_COUNT <= (PSCI_MAX_CPUS_INDEX + 1U), assert_psci_cores_overflow); + +/******************************************************************************* + * Per cpu non-secure contexts used to program the architectural state prior + * return to the normal world. + * TODO: Use the memory allocator to set aside memory for the contexts instead + * of relying on platform defined constants. + ******************************************************************************/ +static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT]; + +/****************************************************************************** + * Define the psci capability variable. + *****************************************************************************/ +unsigned int psci_caps; + +/******************************************************************************* + * Function which initializes the 'psci_non_cpu_pd_nodes' or the + * 'psci_cpu_pd_nodes' corresponding to the power level. + ******************************************************************************/ +static void __init psci_init_pwr_domain_node(uint16_t node_idx, + unsigned int parent_idx, + unsigned char level) +{ + if (level > PSCI_CPU_PWR_LVL) { + assert(node_idx < PSCI_NUM_NON_CPU_PWR_DOMAINS); + + psci_non_cpu_pd_nodes[node_idx].level = level; + psci_lock_init(psci_non_cpu_pd_nodes, node_idx); + psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx; + psci_non_cpu_pd_nodes[node_idx].local_state = + PLAT_MAX_OFF_STATE; + } else { + psci_cpu_data_t *svc_cpu_data; + + assert(node_idx < PLATFORM_CORE_COUNT); + + psci_cpu_pd_nodes[node_idx].parent_node = parent_idx; + + /* Initialize with an invalid mpidr */ + psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR; + + svc_cpu_data = + &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data); + + /* Set the Affinity Info for the cores as OFF */ + svc_cpu_data->aff_info_state = AFF_STATE_OFF; + + /* Invalidate the suspend level for the cpu */ + svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL; + + /* Set the power state to OFF state */ + svc_cpu_data->local_state = PLAT_MAX_OFF_STATE; + + psci_flush_dcache_range((uintptr_t)svc_cpu_data, + sizeof(*svc_cpu_data)); + + cm_set_context_by_index(node_idx, + (void *) &psci_ns_context[node_idx], + NON_SECURE); + } +} + +/******************************************************************************* + * This functions updates cpu_start_idx and ncpus field for each of the node in + * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of + * the CPUs and check whether they match with the parent of the previous + * CPU. The basic assumption for this work is that children of the same parent + * are allocated adjacent indices. The platform should ensure this though proper + * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and + * plat_my_core_pos() APIs. + *******************************************************************************/ +static void __init psci_update_pwrlvl_limits(void) +{ + unsigned int cpu_idx; + int j; + unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0}; + unsigned int temp_index[PLAT_MAX_PWR_LVL]; + + for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) { + psci_get_parent_pwr_domain_nodes(cpu_idx, + PLAT_MAX_PWR_LVL, + temp_index); + for (j = (int)PLAT_MAX_PWR_LVL - 1; j >= 0; j--) { + if (temp_index[j] != nodes_idx[j]) { + nodes_idx[j] = temp_index[j]; + psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx + = cpu_idx; + } + psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++; + } + } +} + +/******************************************************************************* + * Core routine to populate the power domain tree. The tree descriptor passed by + * the platform is populated breadth-first and the first entry in the map + * informs the number of root power domains. The parent nodes of the root nodes + * will point to an invalid entry(-1). + ******************************************************************************/ +static unsigned int __init populate_power_domain_tree(const unsigned char + *topology) +{ + unsigned int i, j = 0U, num_nodes_at_lvl = 1U, num_nodes_at_next_lvl; + unsigned int node_index = 0U, num_children; + unsigned int parent_node_index = 0U; + int level = (int)PLAT_MAX_PWR_LVL; + + /* + * For each level the inputs are: + * - number of nodes at this level in plat_array i.e. num_nodes_at_level + * This is the sum of values of nodes at the parent level. + * - Index of first entry at this level in the plat_array i.e. + * parent_node_index. + * - Index of first free entry in psci_non_cpu_pd_nodes[] or + * psci_cpu_pd_nodes[] i.e. node_index depending upon the level. + */ + while (level >= (int) PSCI_CPU_PWR_LVL) { + num_nodes_at_next_lvl = 0U; + /* + * For each entry (parent node) at this level in the plat_array: + * - Find the number of children + * - Allocate a node in a power domain array for each child + * - Set the parent of the child to the parent_node_index - 1 + * - Increment parent_node_index to point to the next parent + * - Accumulate the number of children at next level. + */ + for (i = 0U; i < num_nodes_at_lvl; i++) { + assert(parent_node_index <= + PSCI_NUM_NON_CPU_PWR_DOMAINS); + num_children = topology[parent_node_index]; + + for (j = node_index; + j < (node_index + num_children); j++) + psci_init_pwr_domain_node((uint16_t)j, + parent_node_index - 1U, + (unsigned char)level); + + node_index = j; + num_nodes_at_next_lvl += num_children; + parent_node_index++; + } + + num_nodes_at_lvl = num_nodes_at_next_lvl; + level--; + + /* Reset the index for the cpu power domain array */ + if (level == (int) PSCI_CPU_PWR_LVL) + node_index = 0; + } + + /* Validate the sanity of array exported by the platform */ + assert(j <= PLATFORM_CORE_COUNT); + return j; +} + +/******************************************************************************* + * This function does the architectural setup and takes the warm boot + * entry-point `mailbox_ep` as an argument. The function also initializes the + * power domain topology tree by querying the platform. The power domain nodes + * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and + * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform + * exports its static topology map through the + * populate_power_domain_topology_tree() API. The algorithm populates the + * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this + * topology map. On a platform that implements two clusters of 2 cpus each, + * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would + * look like this: + * + * --------------------------------------------------- + * | system node | cluster 0 node | cluster 1 node | + * --------------------------------------------------- + * + * And populated psci_cpu_pd_nodes would look like this : + * <- cpus cluster0 -><- cpus cluster1 -> + * ------------------------------------------------ + * | CPU 0 | CPU 1 | CPU 2 | CPU 3 | + * ------------------------------------------------ + ******************************************************************************/ +int __init psci_setup(const psci_lib_args_t *lib_args) +{ + const unsigned char *topology_tree; + + assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args)); + + /* Do the Architectural initialization */ + psci_arch_setup(); + + /* Query the topology map from the platform */ + topology_tree = plat_get_power_domain_tree_desc(); + + /* Populate the power domain arrays using the platform topology map */ + psci_plat_core_count = populate_power_domain_tree(topology_tree); + + /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */ + psci_update_pwrlvl_limits(); + + /* Populate the mpidr field of cpu node for this CPU */ + psci_cpu_pd_nodes[plat_my_core_pos()].mpidr = + read_mpidr() & MPIDR_AFFINITY_MASK; + + psci_init_req_local_pwr_states(); + + /* + * Set the requested and target state of this CPU and all the higher + * power domain levels for this CPU to run. + */ + psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL); + + (void) plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep, + &psci_plat_pm_ops); + assert(psci_plat_pm_ops != NULL); + + /* + * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs + * during warm boot, possibly before data cache is enabled. + */ + psci_flush_dcache_range((uintptr_t)&psci_plat_pm_ops, + sizeof(psci_plat_pm_ops)); + + /* Initialize the psci capability */ + psci_caps = PSCI_GENERIC_CAP; + + if (psci_plat_pm_ops->pwr_domain_off != NULL) + psci_caps |= define_psci_cap(PSCI_CPU_OFF); + if ((psci_plat_pm_ops->pwr_domain_on != NULL) && + (psci_plat_pm_ops->pwr_domain_on_finish != NULL)) + psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); + if ((psci_plat_pm_ops->pwr_domain_suspend != NULL) && + (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL)) { + if (psci_plat_pm_ops->validate_power_state != NULL) + psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); + if (psci_plat_pm_ops->get_sys_suspend_power_state != NULL) + psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); +#if PSCI_OS_INIT_MODE + psci_caps |= define_psci_cap(PSCI_SET_SUSPEND_MODE); +#endif + } + if (psci_plat_pm_ops->system_off != NULL) + psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF); + if (psci_plat_pm_ops->system_reset != NULL) + psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET); + if (psci_plat_pm_ops->get_node_hw_state != NULL) + psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64); + if ((psci_plat_pm_ops->read_mem_protect != NULL) && + (psci_plat_pm_ops->write_mem_protect != NULL)) + psci_caps |= define_psci_cap(PSCI_MEM_PROTECT); + if (psci_plat_pm_ops->mem_protect_chk != NULL) + psci_caps |= define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64); + if (psci_plat_pm_ops->system_reset2 != NULL) + psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64); + +#if ENABLE_PSCI_STAT + psci_caps |= define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64); + psci_caps |= define_psci_cap(PSCI_STAT_COUNT_AARCH64); +#endif + + return 0; +} + +/******************************************************************************* + * This duplicates what the primary cpu did after a cold boot in BL1. The same + * needs to be done when a cpu is hotplugged in. This function could also over- + * ride any EL3 setup done by BL1 as this code resides in rw memory. + ******************************************************************************/ +void psci_arch_setup(void) +{ +#if (ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_GENERIC_TIMER) + /* Program the counter frequency */ + write_cntfrq_el0(plat_get_syscnt_freq2()); +#endif + + /* Initialize the cpu_ops pointer. */ + init_cpu_ops(); + + /* Having initialized cpu_ops, we can now print errata status */ + print_errata_status(); + +#if ENABLE_PAUTH + /* Store APIAKey_EL1 key */ + set_cpu_data(apiakey[0], read_apiakeylo_el1()); + set_cpu_data(apiakey[1], read_apiakeyhi_el1()); +#endif /* ENABLE_PAUTH */ +} + +/****************************************************************************** + * PSCI Library interface to initialize the cpu context for the next non + * secure image during cold boot. The relevant registers in the cpu context + * need to be retrieved and programmed on return from this interface. + *****************************************************************************/ +void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info) +{ + assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE); + cm_init_my_context(next_image_info); + cm_prepare_el3_exit(NON_SECURE); +} diff --git a/lib/psci/psci_stat.c b/lib/psci/psci_stat.c new file mode 100644 index 0000000..bedb816 --- /dev/null +++ b/lib/psci/psci_stat.c @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2016-2019, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> + +#include <platform_def.h> + +#include <common/debug.h> +#include <plat/common/platform.h> + +#include "psci_private.h" + +#ifndef PLAT_MAX_PWR_LVL_STATES +#define PLAT_MAX_PWR_LVL_STATES 2U +#endif + +/* Following structure is used for PSCI STAT */ +typedef struct psci_stat { + u_register_t residency; + u_register_t count; +} psci_stat_t; + +/* + * Following is used to keep track of the last cpu + * that goes to power down in non cpu power domains. + */ +static int last_cpu_in_non_cpu_pd[PSCI_NUM_NON_CPU_PWR_DOMAINS] = { + [0 ... PSCI_NUM_NON_CPU_PWR_DOMAINS - 1U] = -1}; + +/* + * Following are used to store PSCI STAT values for + * CPU and non CPU power domains. + */ +static psci_stat_t psci_cpu_stat[PLATFORM_CORE_COUNT] + [PLAT_MAX_PWR_LVL_STATES]; +static psci_stat_t psci_non_cpu_stat[PSCI_NUM_NON_CPU_PWR_DOMAINS] + [PLAT_MAX_PWR_LVL_STATES]; + +/* + * This functions returns the index into the `psci_stat_t` array given the + * local power state and power domain level. If the platform implements the + * `get_pwr_lvl_state_idx` pm hook, then that will be used to return the index. + */ +static int get_stat_idx(plat_local_state_t local_state, unsigned int pwr_lvl) +{ + int idx; + + if (psci_plat_pm_ops->get_pwr_lvl_state_idx == NULL) { + assert(PLAT_MAX_PWR_LVL_STATES == 2U); + if (is_local_state_retn(local_state) != 0) + return 0; + + assert(is_local_state_off(local_state) != 0); + return 1; + } + + idx = psci_plat_pm_ops->get_pwr_lvl_state_idx(local_state, pwr_lvl); + assert((idx >= 0) && (idx < (int) PLAT_MAX_PWR_LVL_STATES)); + return idx; +} + +/******************************************************************************* + * This function is passed the target local power states for each power + * domain (state_info) between the current CPU domain and its ancestors until + * the target power level (end_pwrlvl). + * + * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it + * updates the `last_cpu_in_non_cpu_pd[]` with last power down cpu id. + * + * This function will only be invoked with data cache enabled and while + * powering down a core. + ******************************************************************************/ +void psci_stats_update_pwr_down(unsigned int end_pwrlvl, + const psci_power_state_t *state_info) +{ + unsigned int lvl, parent_idx; + unsigned int cpu_idx = plat_my_core_pos(); + + assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); + assert(state_info != NULL); + + parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; + + for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { + + /* Break early if the target power state is RUN */ + if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0) + break; + + /* + * The power domain is entering a low power state, so this is + * the last CPU for this power domain + */ + last_cpu_in_non_cpu_pd[parent_idx] = (int)cpu_idx; + + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + +} + +/******************************************************************************* + * This function updates the PSCI STATS(residency time and count) for CPU + * and NON-CPU power domains. + * It is called with caches enabled and locks acquired(for NON-CPU domain) + ******************************************************************************/ +void psci_stats_update_pwr_up(unsigned int end_pwrlvl, + const psci_power_state_t *state_info) +{ + unsigned int lvl, parent_idx; + unsigned int cpu_idx = plat_my_core_pos(); + int stat_idx; + plat_local_state_t local_state; + u_register_t residency; + + assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); + assert(state_info != NULL); + + /* Get the index into the stats array */ + local_state = state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]; + stat_idx = get_stat_idx(local_state, PSCI_CPU_PWR_LVL); + + /* Call into platform interface to calculate residency. */ + residency = plat_psci_stat_get_residency(PSCI_CPU_PWR_LVL, + state_info, cpu_idx); + + /* Update CPU stats. */ + psci_cpu_stat[cpu_idx][stat_idx].residency += residency; + psci_cpu_stat[cpu_idx][stat_idx].count++; + + /* + * Check what power domains above CPU were off + * prior to this CPU powering on. + */ + parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; + /* Return early if this is the first power up. */ + if (last_cpu_in_non_cpu_pd[parent_idx] == -1) + return; + + for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { + local_state = state_info->pwr_domain_state[lvl]; + if (is_local_state_run(local_state) != 0) { + /* Break early */ + break; + } + + assert(last_cpu_in_non_cpu_pd[parent_idx] != -1); + + /* Call into platform interface to calculate residency. */ + residency = plat_psci_stat_get_residency(lvl, state_info, + (unsigned int)last_cpu_in_non_cpu_pd[parent_idx]); + + /* Initialize back to reset value */ + last_cpu_in_non_cpu_pd[parent_idx] = -1; + + /* Get the index into the stats array */ + stat_idx = get_stat_idx(local_state, lvl); + + /* Update non cpu stats */ + psci_non_cpu_stat[parent_idx][stat_idx].residency += residency; + psci_non_cpu_stat[parent_idx][stat_idx].count++; + + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + +} + +/******************************************************************************* + * This function returns the appropriate count and residency time of the + * local state for the highest power level expressed in the `power_state` + * for the node represented by `target_cpu`. + ******************************************************************************/ +static int psci_get_stat(u_register_t target_cpu, unsigned int power_state, + psci_stat_t *psci_stat) +{ + int rc; + unsigned int pwrlvl, lvl, parent_idx, target_idx; + int stat_idx; + psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; + plat_local_state_t local_state; + + /* Determine the cpu index */ + target_idx = (unsigned int) plat_core_pos_by_mpidr(target_cpu); + + /* Validate the power_state parameter */ + if (psci_plat_pm_ops->translate_power_state_by_mpidr == NULL) + rc = psci_validate_power_state(power_state, &state_info); + else + rc = psci_plat_pm_ops->translate_power_state_by_mpidr( + target_cpu, power_state, &state_info); + + if (rc != PSCI_E_SUCCESS) + return PSCI_E_INVALID_PARAMS; + + /* Find the highest power level */ + pwrlvl = psci_find_target_suspend_lvl(&state_info); + if (pwrlvl == PSCI_INVALID_PWR_LVL) { + ERROR("Invalid target power level for PSCI statistics operation\n"); + panic(); + } + + /* Get the index into the stats array */ + local_state = state_info.pwr_domain_state[pwrlvl]; + stat_idx = get_stat_idx(local_state, pwrlvl); + + if (pwrlvl > PSCI_CPU_PWR_LVL) { + /* Get the power domain index */ + parent_idx = SPECULATION_SAFE_VALUE(psci_cpu_pd_nodes[target_idx].parent_node); + for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl < pwrlvl; lvl++) + parent_idx = SPECULATION_SAFE_VALUE(psci_non_cpu_pd_nodes[parent_idx].parent_node); + + /* Get the non cpu power domain stats */ + *psci_stat = psci_non_cpu_stat[parent_idx][stat_idx]; + } else { + /* Get the cpu power domain stats */ + *psci_stat = psci_cpu_stat[target_idx][stat_idx]; + } + + return PSCI_E_SUCCESS; +} + +/* This is the top level function for PSCI_STAT_RESIDENCY SMC. */ +u_register_t psci_stat_residency(u_register_t target_cpu, + unsigned int power_state) +{ + psci_stat_t psci_stat; + + /* Validate the target cpu */ + if (!is_valid_mpidr(target_cpu)) + return 0; + + int rc = psci_get_stat(target_cpu, power_state, &psci_stat); + + if (rc == PSCI_E_SUCCESS) + return psci_stat.residency; + else + return 0; +} + +/* This is the top level function for PSCI_STAT_COUNT SMC. */ +u_register_t psci_stat_count(u_register_t target_cpu, + unsigned int power_state) +{ + psci_stat_t psci_stat; + + /* Validate the target cpu */ + if (!is_valid_mpidr(target_cpu)) + return 0; + + int rc = psci_get_stat(target_cpu, power_state, &psci_stat); + + if (rc == PSCI_E_SUCCESS) + return psci_stat.count; + else + return 0; +} diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c new file mode 100644 index 0000000..cb12b83 --- /dev/null +++ b/lib/psci/psci_suspend.c @@ -0,0 +1,370 @@ +/* + * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stddef.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/bl_common.h> +#include <common/debug.h> +#include <context.h> +#include <lib/el3_runtime/context_mgmt.h> +#include <lib/el3_runtime/cpu_data.h> +#include <lib/el3_runtime/pubsub_events.h> +#include <lib/pmf/pmf.h> +#include <lib/runtime_instr.h> +#include <plat/common/platform.h> + +#include "psci_private.h" + +/******************************************************************************* + * This function does generic and platform specific operations after a wake-up + * from standby/retention states at multiple power levels. + ******************************************************************************/ +static void psci_suspend_to_standby_finisher(unsigned int cpu_idx, + unsigned int end_pwrlvl) +{ + unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; + psci_power_state_t state_info; + + /* Get the parent nodes */ + psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); + + psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes); + + /* + * Find out which retention states this CPU has exited from until the + * 'end_pwrlvl'. The exit retention state could be deeper than the entry + * state as a result of state coordination amongst other CPUs post wfi. + */ + psci_get_target_local_pwr_states(end_pwrlvl, &state_info); + +#if ENABLE_PSCI_STAT + plat_psci_stat_accounting_stop(&state_info); + psci_stats_update_pwr_up(end_pwrlvl, &state_info); +#endif + + /* + * Plat. management: Allow the platform to do operations + * on waking up from retention. + */ + psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info); + + /* + * Set the requested and target state of this CPU and all the higher + * power domain levels for this CPU to run. + */ + psci_set_pwr_domains_to_run(end_pwrlvl); + + psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes); +} + +/******************************************************************************* + * This function does generic and platform specific suspend to power down + * operations. + ******************************************************************************/ +static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl, + const entry_point_info_t *ep, + const psci_power_state_t *state_info) +{ + unsigned int max_off_lvl = psci_find_max_off_lvl(state_info); + + PUBLISH_EVENT(psci_suspend_pwrdown_start); + +#if PSCI_OS_INIT_MODE +#ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL + end_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL; +#else + end_pwrlvl = PLAT_MAX_PWR_LVL; +#endif +#endif + + /* Save PSCI target power level for the suspend finisher handler */ + psci_set_suspend_pwrlvl(end_pwrlvl); + + /* + * Flush the target power level as it might be accessed on power up with + * Data cache disabled. + */ + psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl); + + /* + * Call the cpu suspend handler registered by the Secure Payload + * Dispatcher to let it do any book-keeping. If the handler encounters an + * error, it's expected to assert within + */ + if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend != NULL)) + psci_spd_pm->svc_suspend(max_off_lvl); + +#if !HW_ASSISTED_COHERENCY + /* + * Plat. management: Allow the platform to perform any early + * actions required to power down the CPU. This might be useful for + * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these + * actions with data caches enabled. + */ + if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early != NULL) + psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info); +#endif + + /* + * Store the re-entry information for the non-secure world. + */ + cm_init_my_context(ep); + +#if ENABLE_RUNTIME_INSTRUMENTATION + + /* + * Flush cache line so that even if CPU power down happens + * the timestamp update is reflected in memory. + */ + PMF_CAPTURE_TIMESTAMP(rt_instr_svc, + RT_INSTR_ENTER_CFLUSH, + PMF_CACHE_MAINT); +#endif + + /* + * Arch. management. Initiate power down sequence. + * TODO : Introduce a mechanism to query the cache level to flush + * and the cpu-ops power down to perform from the platform. + */ + psci_pwrdown_cpu(max_off_lvl); + +#if ENABLE_RUNTIME_INSTRUMENTATION + PMF_CAPTURE_TIMESTAMP(rt_instr_svc, + RT_INSTR_EXIT_CFLUSH, + PMF_NO_CACHE_MAINT); +#endif +} + +/******************************************************************************* + * Top level handler which is called when a cpu wants to suspend its execution. + * It is assumed that along with suspending the cpu power domain, power domains + * at higher levels until the target power level will be suspended as well. It + * coordinates with the platform to negotiate the target state for each of + * the power domain level till the target power domain level. It then performs + * generic, architectural, platform setup and state management required to + * suspend that power domain level and power domain levels below it. + * e.g. For a cpu that's to be suspended, it could mean programming the + * power controller whereas for a cluster that's to be suspended, it will call + * the platform specific code which will disable coherency at the interconnect + * level if the cpu is the last in the cluster and also the program the power + * controller. + * + * All the required parameter checks are performed at the beginning and after + * the state transition has been done, no further error is expected and it is + * not possible to undo any of the actions taken beyond that point. + ******************************************************************************/ +int psci_cpu_suspend_start(const entry_point_info_t *ep, + unsigned int end_pwrlvl, + psci_power_state_t *state_info, + unsigned int is_power_down_state) +{ + int rc = PSCI_E_SUCCESS; + bool skip_wfi = false; + unsigned int idx = plat_my_core_pos(); + unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; + + /* + * This function must only be called on platforms where the + * CPU_SUSPEND platform hooks have been implemented. + */ + assert((psci_plat_pm_ops->pwr_domain_suspend != NULL) && + (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL)); + + /* Get the parent nodes */ + psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes); + + /* + * This function acquires the lock corresponding to each power + * level so that by the time all locks are taken, the system topology + * is snapshot and state management can be done safely. + */ + psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes); + + /* + * We check if there are any pending interrupts after the delay + * introduced by lock contention to increase the chances of early + * detection that a wake-up interrupt has fired. + */ + if (read_isr_el1() != 0U) { + skip_wfi = true; + goto exit; + } + +#if PSCI_OS_INIT_MODE + if (psci_suspend_mode == OS_INIT) { + /* + * This function validates the requested state info for + * OS-initiated mode. + */ + rc = psci_validate_state_coordination(end_pwrlvl, state_info); + if (rc != PSCI_E_SUCCESS) { + skip_wfi = true; + goto exit; + } + } else { +#endif + /* + * This function is passed the requested state info and + * it returns the negotiated state info for each power level upto + * the end level specified. + */ + psci_do_state_coordination(end_pwrlvl, state_info); +#if PSCI_OS_INIT_MODE + } +#endif + +#if PSCI_OS_INIT_MODE + if (psci_plat_pm_ops->pwr_domain_validate_suspend != NULL) { + rc = psci_plat_pm_ops->pwr_domain_validate_suspend(state_info); + if (rc != PSCI_E_SUCCESS) { + skip_wfi = true; + goto exit; + } + } +#endif + + /* Update the target state in the power domain nodes */ + psci_set_target_local_pwr_states(end_pwrlvl, state_info); + +#if ENABLE_PSCI_STAT + /* Update the last cpu for each level till end_pwrlvl */ + psci_stats_update_pwr_down(end_pwrlvl, state_info); +#endif + + if (is_power_down_state != 0U) + psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info); + + /* + * Plat. management: Allow the platform to perform the + * necessary actions to turn off this cpu e.g. set the + * platform defined mailbox with the psci entrypoint, + * program the power controller etc. + */ + + psci_plat_pm_ops->pwr_domain_suspend(state_info); + +#if ENABLE_PSCI_STAT + plat_psci_stat_accounting_start(state_info); +#endif + +exit: + /* + * Release the locks corresponding to each power level in the + * reverse order to which they were acquired. + */ + psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes); + + if (skip_wfi) { + return rc; + } + + if (is_power_down_state != 0U) { +#if ENABLE_RUNTIME_INSTRUMENTATION + + /* + * Update the timestamp with cache off. We assume this + * timestamp can only be read from the current CPU and the + * timestamp cache line will be flushed before return to + * normal world on wakeup. + */ + PMF_CAPTURE_TIMESTAMP(rt_instr_svc, + RT_INSTR_ENTER_HW_LOW_PWR, + PMF_NO_CACHE_MAINT); +#endif + + /* The function calls below must not return */ + if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL) + psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info); + else + psci_power_down_wfi(); + } + +#if ENABLE_RUNTIME_INSTRUMENTATION + PMF_CAPTURE_TIMESTAMP(rt_instr_svc, + RT_INSTR_ENTER_HW_LOW_PWR, + PMF_NO_CACHE_MAINT); +#endif + + /* + * We will reach here if only retention/standby states have been + * requested at multiple power levels. This means that the cpu + * context will be preserved. + */ + wfi(); + +#if ENABLE_RUNTIME_INSTRUMENTATION + PMF_CAPTURE_TIMESTAMP(rt_instr_svc, + RT_INSTR_EXIT_HW_LOW_PWR, + PMF_NO_CACHE_MAINT); +#endif + + /* + * After we wake up from context retaining suspend, call the + * context retaining suspend finisher. + */ + psci_suspend_to_standby_finisher(idx, end_pwrlvl); + + return rc; +} + +/******************************************************************************* + * The following functions finish an earlier suspend request. They + * are called by the common finisher routine in psci_common.c. The `state_info` + * is the psci_power_state from which this CPU has woken up from. + ******************************************************************************/ +void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info) +{ + unsigned int counter_freq; + unsigned int max_off_lvl; + + /* Ensure we have been woken up from a suspended state */ + assert((psci_get_aff_info_state() == AFF_STATE_ON) && + (is_local_state_off( + state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]) != 0)); + + /* + * Plat. management: Perform the platform specific actions + * before we change the state of the cpu e.g. enabling the + * gic or zeroing the mailbox register. If anything goes + * wrong then assert as there is no way to recover from this + * situation. + */ + psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); + +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + /* Arch. management: Enable the data cache, stack memory maintenance. */ + psci_do_pwrup_cache_maintenance(); +#endif + + /* Re-init the cntfrq_el0 register */ + counter_freq = plat_get_syscnt_freq2(); + write_cntfrq_el0(counter_freq); + +#if ENABLE_PAUTH + /* Store APIAKey_EL1 key */ + set_cpu_data(apiakey[0], read_apiakeylo_el1()); + set_cpu_data(apiakey[1], read_apiakeyhi_el1()); +#endif /* ENABLE_PAUTH */ + + /* + * Call the cpu suspend finish handler registered by the Secure Payload + * Dispatcher to let it do any bookeeping. If the handler encounters an + * error, it's expected to assert within + */ + if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend_finish != NULL)) { + max_off_lvl = psci_find_max_off_lvl(state_info); + assert(max_off_lvl != PSCI_INVALID_PWR_LVL); + psci_spd_pm->svc_suspend_finish(max_off_lvl); + } + + /* Invalidate the suspend level for the cpu */ + psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL); + + PUBLISH_EVENT(psci_suspend_pwrdown_finish); +} diff --git a/lib/psci/psci_system_off.c b/lib/psci/psci_system_off.c new file mode 100644 index 0000000..002392c --- /dev/null +++ b/lib/psci/psci_system_off.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stddef.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <drivers/console.h> +#include <plat/common/platform.h> + +#include "psci_private.h" + +void __dead2 psci_system_off(void) +{ + psci_print_power_domain_map(); + + assert(psci_plat_pm_ops->system_off != NULL); + + /* Notify the Secure Payload Dispatcher */ + if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_system_off != NULL)) { + psci_spd_pm->svc_system_off(); + } + + console_flush(); + + /* Call the platform specific hook */ + psci_plat_pm_ops->system_off(); + + /* This function does not return. We should never get here */ +} + +void __dead2 psci_system_reset(void) +{ + psci_print_power_domain_map(); + + assert(psci_plat_pm_ops->system_reset != NULL); + + /* Notify the Secure Payload Dispatcher */ + if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_system_reset != NULL)) { + psci_spd_pm->svc_system_reset(); + } + + console_flush(); + + /* Call the platform specific hook */ + psci_plat_pm_ops->system_reset(); + + /* This function does not return. We should never get here */ +} + +u_register_t psci_system_reset2(uint32_t reset_type, u_register_t cookie) +{ + unsigned int is_vendor; + + psci_print_power_domain_map(); + + assert(psci_plat_pm_ops->system_reset2 != NULL); + + is_vendor = (reset_type >> PSCI_RESET2_TYPE_VENDOR_SHIFT) & 1U; + if (is_vendor == 0U) { + /* + * Only WARM_RESET is allowed for architectural type resets. + */ + if (reset_type != PSCI_RESET2_SYSTEM_WARM_RESET) + return (u_register_t) PSCI_E_INVALID_PARAMS; + if ((psci_plat_pm_ops->write_mem_protect != NULL) && + (psci_plat_pm_ops->write_mem_protect(0) < 0)) { + return (u_register_t) PSCI_E_NOT_SUPPORTED; + } + } + + /* Notify the Secure Payload Dispatcher */ + if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_system_reset != NULL)) { + psci_spd_pm->svc_system_reset(); + } + console_flush(); + + return (u_register_t) + psci_plat_pm_ops->system_reset2((int) is_vendor, reset_type, + cookie); +} |