diff options
Diffstat (limited to 'drivers/firmware/psci')
-rw-r--r-- | drivers/firmware/psci/Kconfig | 14 | ||||
-rw-r--r-- | drivers/firmware/psci/Makefile | 4 | ||||
-rw-r--r-- | drivers/firmware/psci/psci.c | 779 | ||||
-rw-r--r-- | drivers/firmware/psci/psci_checker.c | 492 |
4 files changed, 1289 insertions, 0 deletions
diff --git a/drivers/firmware/psci/Kconfig b/drivers/firmware/psci/Kconfig new file mode 100644 index 000000000..97944168b --- /dev/null +++ b/drivers/firmware/psci/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +config ARM_PSCI_FW + bool + +config ARM_PSCI_CHECKER + bool "ARM PSCI checker" + depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST + help + Run the PSCI checker during startup. This checks that hotplug and + suspend operations work correctly when using PSCI. + + The torture tests may interfere with the PSCI checker by turning CPUs + on and off through hotplug, so for now torture tests and PSCI checker + are mutually exclusive. diff --git a/drivers/firmware/psci/Makefile b/drivers/firmware/psci/Makefile new file mode 100644 index 000000000..1956b8824 --- /dev/null +++ b/drivers/firmware/psci/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +# +obj-$(CONFIG_ARM_PSCI_FW) += psci.o +obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c new file mode 100644 index 000000000..f78249fe2 --- /dev/null +++ b/drivers/firmware/psci/psci.c @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Copyright (C) 2015 ARM Limited + */ + +#define pr_fmt(fmt) "psci: " fmt + +#include <linux/acpi.h> +#include <linux/arm-smccc.h> +#include <linux/cpuidle.h> +#include <linux/debugfs.h> +#include <linux/errno.h> +#include <linux/linkage.h> +#include <linux/of.h> +#include <linux/pm.h> +#include <linux/printk.h> +#include <linux/psci.h> +#include <linux/reboot.h> +#include <linux/slab.h> +#include <linux/suspend.h> + +#include <uapi/linux/psci.h> + +#include <asm/cpuidle.h> +#include <asm/cputype.h> +#include <asm/hypervisor.h> +#include <asm/system_misc.h> +#include <asm/smp_plat.h> +#include <asm/suspend.h> + +/* + * While a 64-bit OS can make calls with SMC32 calling conventions, for some + * calls it is necessary to use SMC64 to pass or return 64-bit values. + * For such calls PSCI_FN_NATIVE(version, name) will choose the appropriate + * (native-width) function ID. + */ +#ifdef CONFIG_64BIT +#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN64_##name +#else +#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN_##name +#endif + +/* + * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF + * calls to its resident CPU, so we must avoid issuing those. We never migrate + * a Trusted OS even if it claims to be capable of migration -- doing so will + * require cooperation with a Trusted OS driver. + */ +static int resident_cpu = -1; +struct psci_operations psci_ops; +static enum arm_smccc_conduit psci_conduit = SMCCC_CONDUIT_NONE; + +bool psci_tos_resident_on(int cpu) +{ + return cpu == resident_cpu; +} + +typedef unsigned long (psci_fn)(unsigned long, unsigned long, + unsigned long, unsigned long); +static psci_fn *invoke_psci_fn; + +static struct psci_0_1_function_ids psci_0_1_function_ids; + +struct psci_0_1_function_ids get_psci_0_1_function_ids(void) +{ + return psci_0_1_function_ids; +} + +#define PSCI_0_2_POWER_STATE_MASK \ + (PSCI_0_2_POWER_STATE_ID_MASK | \ + PSCI_0_2_POWER_STATE_TYPE_MASK | \ + PSCI_0_2_POWER_STATE_AFFL_MASK) + +#define PSCI_1_0_EXT_POWER_STATE_MASK \ + (PSCI_1_0_EXT_POWER_STATE_ID_MASK | \ + PSCI_1_0_EXT_POWER_STATE_TYPE_MASK) + +static u32 psci_cpu_suspend_feature; +static bool psci_system_reset2_supported; + +static inline bool psci_has_ext_power_state(void) +{ + return psci_cpu_suspend_feature & + PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK; +} + +bool psci_has_osi_support(void) +{ + return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED; +} + +static inline bool psci_power_state_loses_context(u32 state) +{ + const u32 mask = psci_has_ext_power_state() ? + PSCI_1_0_EXT_POWER_STATE_TYPE_MASK : + PSCI_0_2_POWER_STATE_TYPE_MASK; + + return state & mask; +} + +bool psci_power_state_is_valid(u32 state) +{ + const u32 valid_mask = psci_has_ext_power_state() ? + PSCI_1_0_EXT_POWER_STATE_MASK : + PSCI_0_2_POWER_STATE_MASK; + + return !(state & ~valid_mask); +} + +static __always_inline unsigned long +__invoke_psci_fn_hvc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + +static __always_inline unsigned long +__invoke_psci_fn_smc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + +static __always_inline int psci_to_linux_errno(int errno) +{ + switch (errno) { + case PSCI_RET_SUCCESS: + return 0; + case PSCI_RET_NOT_SUPPORTED: + return -EOPNOTSUPP; + case PSCI_RET_INVALID_PARAMS: + case PSCI_RET_INVALID_ADDRESS: + return -EINVAL; + case PSCI_RET_DENIED: + return -EPERM; + } + + return -EINVAL; +} + +static u32 psci_0_1_get_version(void) +{ + return PSCI_VERSION(0, 1); +} + +static u32 psci_0_2_get_version(void) +{ + return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); +} + +int psci_set_osi_mode(bool enable) +{ + unsigned long suspend_mode; + int err; + + suspend_mode = enable ? PSCI_1_0_SUSPEND_MODE_OSI : + PSCI_1_0_SUSPEND_MODE_PC; + + err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, suspend_mode, 0, 0); + if (err < 0) + pr_warn("failed to set %s mode: %d\n", enable ? "OSI" : "PC", err); + return psci_to_linux_errno(err); +} + +static __always_inline int +__psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point) +{ + int err; + + err = invoke_psci_fn(fn, state, entry_point, 0); + return psci_to_linux_errno(err); +} + +static __always_inline int +psci_0_1_cpu_suspend(u32 state, unsigned long entry_point) +{ + return __psci_cpu_suspend(psci_0_1_function_ids.cpu_suspend, + state, entry_point); +} + +static __always_inline int +psci_0_2_cpu_suspend(u32 state, unsigned long entry_point) +{ + return __psci_cpu_suspend(PSCI_FN_NATIVE(0_2, CPU_SUSPEND), + state, entry_point); +} + +static int __psci_cpu_off(u32 fn, u32 state) +{ + int err; + + err = invoke_psci_fn(fn, state, 0, 0); + return psci_to_linux_errno(err); +} + +static int psci_0_1_cpu_off(u32 state) +{ + return __psci_cpu_off(psci_0_1_function_ids.cpu_off, state); +} + +static int psci_0_2_cpu_off(u32 state) +{ + return __psci_cpu_off(PSCI_0_2_FN_CPU_OFF, state); +} + +static int __psci_cpu_on(u32 fn, unsigned long cpuid, unsigned long entry_point) +{ + int err; + + err = invoke_psci_fn(fn, cpuid, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_0_1_cpu_on(unsigned long cpuid, unsigned long entry_point) +{ + return __psci_cpu_on(psci_0_1_function_ids.cpu_on, cpuid, entry_point); +} + +static int psci_0_2_cpu_on(unsigned long cpuid, unsigned long entry_point) +{ + return __psci_cpu_on(PSCI_FN_NATIVE(0_2, CPU_ON), cpuid, entry_point); +} + +static int __psci_migrate(u32 fn, unsigned long cpuid) +{ + int err; + + err = invoke_psci_fn(fn, cpuid, 0, 0); + return psci_to_linux_errno(err); +} + +static int psci_0_1_migrate(unsigned long cpuid) +{ + return __psci_migrate(psci_0_1_function_ids.migrate, cpuid); +} + +static int psci_0_2_migrate(unsigned long cpuid) +{ + return __psci_migrate(PSCI_FN_NATIVE(0_2, MIGRATE), cpuid); +} + +static int psci_affinity_info(unsigned long target_affinity, + unsigned long lowest_affinity_level) +{ + return invoke_psci_fn(PSCI_FN_NATIVE(0_2, AFFINITY_INFO), + target_affinity, lowest_affinity_level, 0); +} + +static int psci_migrate_info_type(void) +{ + return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0); +} + +static unsigned long psci_migrate_info_up_cpu(void) +{ + return invoke_psci_fn(PSCI_FN_NATIVE(0_2, MIGRATE_INFO_UP_CPU), + 0, 0, 0); +} + +static void set_conduit(enum arm_smccc_conduit conduit) +{ + switch (conduit) { + case SMCCC_CONDUIT_HVC: + invoke_psci_fn = __invoke_psci_fn_hvc; + break; + case SMCCC_CONDUIT_SMC: + invoke_psci_fn = __invoke_psci_fn_smc; + break; + default: + WARN(1, "Unexpected PSCI conduit %d\n", conduit); + } + + psci_conduit = conduit; +} + +static int get_set_conduit_method(const struct device_node *np) +{ + const char *method; + + pr_info("probing for conduit method from DT.\n"); + + if (of_property_read_string(np, "method", &method)) { + pr_warn("missing \"method\" property\n"); + return -ENXIO; + } + + if (!strcmp("hvc", method)) { + set_conduit(SMCCC_CONDUIT_HVC); + } else if (!strcmp("smc", method)) { + set_conduit(SMCCC_CONDUIT_SMC); + } else { + pr_warn("invalid \"method\" property: %s\n", method); + return -EINVAL; + } + return 0; +} + +static int psci_sys_reset(struct notifier_block *nb, unsigned long action, + void *data) +{ + if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) && + psci_system_reset2_supported) { + /* + * reset_type[31] = 0 (architectural) + * reset_type[30:0] = 0 (SYSTEM_WARM_RESET) + * cookie = 0 (ignored by the implementation) + */ + invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0); + } else { + invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0); + } + + return NOTIFY_DONE; +} + +static struct notifier_block psci_sys_reset_nb = { + .notifier_call = psci_sys_reset, + .priority = 129, +}; + +static void psci_sys_poweroff(void) +{ + invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); +} + +static int psci_features(u32 psci_func_id) +{ + return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES, + psci_func_id, 0, 0); +} + +#ifdef CONFIG_DEBUG_FS + +#define PSCI_ID(ver, _name) \ + { .fn = PSCI_##ver##_FN_##_name, .name = #_name, } +#define PSCI_ID_NATIVE(ver, _name) \ + { .fn = PSCI_FN_NATIVE(ver, _name), .name = #_name, } + +/* A table of all optional functions */ +static const struct { + u32 fn; + const char *name; +} psci_fn_ids[] = { + PSCI_ID_NATIVE(0_2, MIGRATE), + PSCI_ID(0_2, MIGRATE_INFO_TYPE), + PSCI_ID_NATIVE(0_2, MIGRATE_INFO_UP_CPU), + PSCI_ID(1_0, CPU_FREEZE), + PSCI_ID_NATIVE(1_0, CPU_DEFAULT_SUSPEND), + PSCI_ID_NATIVE(1_0, NODE_HW_STATE), + PSCI_ID_NATIVE(1_0, SYSTEM_SUSPEND), + PSCI_ID(1_0, SET_SUSPEND_MODE), + PSCI_ID_NATIVE(1_0, STAT_RESIDENCY), + PSCI_ID_NATIVE(1_0, STAT_COUNT), + PSCI_ID_NATIVE(1_1, SYSTEM_RESET2), + PSCI_ID(1_1, MEM_PROTECT), + PSCI_ID_NATIVE(1_1, MEM_PROTECT_CHECK_RANGE), +}; + +static int psci_debugfs_read(struct seq_file *s, void *data) +{ + int feature, type, i; + u32 ver; + + ver = psci_ops.get_version(); + seq_printf(s, "PSCIv%d.%d\n", + PSCI_VERSION_MAJOR(ver), + PSCI_VERSION_MINOR(ver)); + + /* PSCI_FEATURES is available only starting from 1.0 */ + if (PSCI_VERSION_MAJOR(ver) < 1) + return 0; + + feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID); + if (feature != PSCI_RET_NOT_SUPPORTED) { + ver = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0); + seq_printf(s, "SMC Calling Convention v%d.%d\n", + PSCI_VERSION_MAJOR(ver), + PSCI_VERSION_MINOR(ver)); + } else { + seq_puts(s, "SMC Calling Convention v1.0 is assumed\n"); + } + + feature = psci_features(PSCI_FN_NATIVE(0_2, CPU_SUSPEND)); + if (feature < 0) { + seq_printf(s, "PSCI_FEATURES(CPU_SUSPEND) error (%d)\n", feature); + } else { + seq_printf(s, "OSI is %ssupported\n", + (feature & BIT(0)) ? "" : "not "); + seq_printf(s, "%s StateID format is used\n", + (feature & BIT(1)) ? "Extended" : "Original"); + } + + type = psci_ops.migrate_info_type(); + if (type == PSCI_0_2_TOS_UP_MIGRATE || + type == PSCI_0_2_TOS_UP_NO_MIGRATE) { + unsigned long cpuid; + + seq_printf(s, "Trusted OS %smigrate capable\n", + type == PSCI_0_2_TOS_UP_NO_MIGRATE ? "not " : ""); + cpuid = psci_migrate_info_up_cpu(); + seq_printf(s, "Trusted OS resident on physical CPU 0x%lx (#%d)\n", + cpuid, resident_cpu); + } else if (type == PSCI_0_2_TOS_MP) { + seq_puts(s, "Trusted OS migration not required\n"); + } else { + if (type != PSCI_RET_NOT_SUPPORTED) + seq_printf(s, "MIGRATE_INFO_TYPE returned unknown type (%d)\n", type); + } + + for (i = 0; i < ARRAY_SIZE(psci_fn_ids); i++) { + feature = psci_features(psci_fn_ids[i].fn); + if (feature == PSCI_RET_NOT_SUPPORTED) + continue; + if (feature < 0) + seq_printf(s, "PSCI_FEATURES(%s) error (%d)\n", + psci_fn_ids[i].name, feature); + else + seq_printf(s, "%s is supported\n", psci_fn_ids[i].name); + } + + return 0; +} + +static int psci_debugfs_open(struct inode *inode, struct file *f) +{ + return single_open(f, psci_debugfs_read, NULL); +} + +static const struct file_operations psci_debugfs_ops = { + .owner = THIS_MODULE, + .open = psci_debugfs_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek +}; + +static int __init psci_debugfs_init(void) +{ + if (!invoke_psci_fn || !psci_ops.get_version) + return 0; + + return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL, + &psci_debugfs_ops)); +} +late_initcall(psci_debugfs_init) +#endif + +#ifdef CONFIG_CPU_IDLE +static noinstr int psci_suspend_finisher(unsigned long state) +{ + u32 power_state = state; + phys_addr_t pa_cpu_resume; + + pa_cpu_resume = __pa_symbol_nodebug((unsigned long)cpu_resume); + + return psci_ops.cpu_suspend(power_state, pa_cpu_resume); +} + +int psci_cpu_suspend_enter(u32 state) +{ + int ret; + + if (!psci_power_state_loses_context(state)) { + struct arm_cpuidle_irq_context context; + + arm_cpuidle_save_irq_context(&context); + ret = psci_ops.cpu_suspend(state, 0); + arm_cpuidle_restore_irq_context(&context); + } else { + ret = cpu_suspend(state, psci_suspend_finisher); + } + + return ret; +} +#endif + +static int psci_system_suspend(unsigned long unused) +{ + phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume); + + return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), + pa_cpu_resume, 0, 0); +} + +static int psci_system_suspend_enter(suspend_state_t state) +{ + return cpu_suspend(0, psci_system_suspend); +} + +static const struct platform_suspend_ops psci_suspend_ops = { + .valid = suspend_valid_only_mem, + .enter = psci_system_suspend_enter, +}; + +static void __init psci_init_system_reset2(void) +{ + int ret; + + ret = psci_features(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2)); + + if (ret != PSCI_RET_NOT_SUPPORTED) + psci_system_reset2_supported = true; +} + +static void __init psci_init_system_suspend(void) +{ + int ret; + + if (!IS_ENABLED(CONFIG_SUSPEND)) + return; + + ret = psci_features(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND)); + + if (ret != PSCI_RET_NOT_SUPPORTED) + suspend_set_ops(&psci_suspend_ops); +} + +static void __init psci_init_cpu_suspend(void) +{ + int feature = psci_features(PSCI_FN_NATIVE(0_2, CPU_SUSPEND)); + + if (feature != PSCI_RET_NOT_SUPPORTED) + psci_cpu_suspend_feature = feature; +} + +/* + * Detect the presence of a resident Trusted OS which may cause CPU_OFF to + * return DENIED (which would be fatal). + */ +static void __init psci_init_migrate(void) +{ + unsigned long cpuid; + int type, cpu = -1; + + type = psci_ops.migrate_info_type(); + + if (type == PSCI_0_2_TOS_MP) { + pr_info("Trusted OS migration not required\n"); + return; + } + + if (type == PSCI_RET_NOT_SUPPORTED) { + pr_info("MIGRATE_INFO_TYPE not supported.\n"); + return; + } + + if (type != PSCI_0_2_TOS_UP_MIGRATE && + type != PSCI_0_2_TOS_UP_NO_MIGRATE) { + pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type); + return; + } + + cpuid = psci_migrate_info_up_cpu(); + if (cpuid & ~MPIDR_HWID_BITMASK) { + pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n", + cpuid); + return; + } + + cpu = get_logical_index(cpuid); + resident_cpu = cpu >= 0 ? cpu : -1; + + pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); +} + +static void __init psci_init_smccc(void) +{ + u32 ver = ARM_SMCCC_VERSION_1_0; + int feature; + + feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID); + + if (feature != PSCI_RET_NOT_SUPPORTED) { + u32 ret; + ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0); + if (ret >= ARM_SMCCC_VERSION_1_1) { + arm_smccc_version_init(ret, psci_conduit); + ver = ret; + } + } + + /* + * Conveniently, the SMCCC and PSCI versions are encoded the + * same way. No, this isn't accidental. + */ + pr_info("SMC Calling Convention v%d.%d\n", + PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); + +} + +static void __init psci_0_2_set_functions(void) +{ + pr_info("Using standard PSCI v0.2 function IDs\n"); + + psci_ops = (struct psci_operations){ + .get_version = psci_0_2_get_version, + .cpu_suspend = psci_0_2_cpu_suspend, + .cpu_off = psci_0_2_cpu_off, + .cpu_on = psci_0_2_cpu_on, + .migrate = psci_0_2_migrate, + .affinity_info = psci_affinity_info, + .migrate_info_type = psci_migrate_info_type, + }; + + register_restart_handler(&psci_sys_reset_nb); + + pm_power_off = psci_sys_poweroff; +} + +/* + * Probe function for PSCI firmware versions >= 0.2 + */ +static int __init psci_probe(void) +{ + u32 ver = psci_0_2_get_version(); + + pr_info("PSCIv%d.%d detected in firmware.\n", + PSCI_VERSION_MAJOR(ver), + PSCI_VERSION_MINOR(ver)); + + if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) { + pr_err("Conflicting PSCI version detected.\n"); + return -EINVAL; + } + + psci_0_2_set_functions(); + + psci_init_migrate(); + + if (PSCI_VERSION_MAJOR(ver) >= 1) { + psci_init_smccc(); + psci_init_cpu_suspend(); + psci_init_system_suspend(); + psci_init_system_reset2(); + kvm_init_hyp_services(); + } + + return 0; +} + +typedef int (*psci_initcall_t)(const struct device_node *); + +/* + * PSCI init function for PSCI versions >=0.2 + * + * Probe based on PSCI PSCI_VERSION function + */ +static int __init psci_0_2_init(const struct device_node *np) +{ + int err; + + err = get_set_conduit_method(np); + if (err) + return err; + + /* + * Starting with v0.2, the PSCI specification introduced a call + * (PSCI_VERSION) that allows probing the firmware version, so + * that PSCI function IDs and version specific initialization + * can be carried out according to the specific version reported + * by firmware + */ + return psci_probe(); +} + +/* + * PSCI < v0.2 get PSCI Function IDs via DT. + */ +static int __init psci_0_1_init(const struct device_node *np) +{ + u32 id; + int err; + + err = get_set_conduit_method(np); + if (err) + return err; + + pr_info("Using PSCI v0.1 Function IDs from DT\n"); + + psci_ops.get_version = psci_0_1_get_version; + + if (!of_property_read_u32(np, "cpu_suspend", &id)) { + psci_0_1_function_ids.cpu_suspend = id; + psci_ops.cpu_suspend = psci_0_1_cpu_suspend; + } + + if (!of_property_read_u32(np, "cpu_off", &id)) { + psci_0_1_function_ids.cpu_off = id; + psci_ops.cpu_off = psci_0_1_cpu_off; + } + + if (!of_property_read_u32(np, "cpu_on", &id)) { + psci_0_1_function_ids.cpu_on = id; + psci_ops.cpu_on = psci_0_1_cpu_on; + } + + if (!of_property_read_u32(np, "migrate", &id)) { + psci_0_1_function_ids.migrate = id; + psci_ops.migrate = psci_0_1_migrate; + } + + return 0; +} + +static int __init psci_1_0_init(const struct device_node *np) +{ + int err; + + err = psci_0_2_init(np); + if (err) + return err; + + if (psci_has_osi_support()) { + pr_info("OSI mode supported.\n"); + + /* Default to PC mode. */ + psci_set_osi_mode(false); + } + + return 0; +} + +static const struct of_device_id psci_of_match[] __initconst = { + { .compatible = "arm,psci", .data = psci_0_1_init}, + { .compatible = "arm,psci-0.2", .data = psci_0_2_init}, + { .compatible = "arm,psci-1.0", .data = psci_1_0_init}, + {}, +}; + +int __init psci_dt_init(void) +{ + struct device_node *np; + const struct of_device_id *matched_np; + psci_initcall_t init_fn; + int ret; + + np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); + + if (!np || !of_device_is_available(np)) + return -ENODEV; + + init_fn = (psci_initcall_t)matched_np->data; + ret = init_fn(np); + + of_node_put(np); + return ret; +} + +#ifdef CONFIG_ACPI +/* + * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's + * explicitly clarified in SBBR + */ +int __init psci_acpi_init(void) +{ + if (!acpi_psci_present()) { + pr_info("is not implemented in ACPI.\n"); + return -EOPNOTSUPP; + } + + pr_info("probing for conduit method from ACPI.\n"); + + if (acpi_psci_use_hvc()) + set_conduit(SMCCC_CONDUIT_HVC); + else + set_conduit(SMCCC_CONDUIT_SMC); + + return psci_probe(); +} +#endif diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c new file mode 100644 index 000000000..116eb465c --- /dev/null +++ b/drivers/firmware/psci/psci_checker.c @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Copyright (C) 2016 ARM Limited + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/atomic.h> +#include <linux/completion.h> +#include <linux/cpu.h> +#include <linux/cpuidle.h> +#include <linux/cpu_pm.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <uapi/linux/sched/types.h> +#include <linux/module.h> +#include <linux/preempt.h> +#include <linux/psci.h> +#include <linux/slab.h> +#include <linux/tick.h> +#include <linux/topology.h> + +#include <asm/cpuidle.h> + +#include <uapi/linux/psci.h> + +#define NUM_SUSPEND_CYCLE (10) + +static unsigned int nb_available_cpus; +static int tos_resident_cpu = -1; + +static atomic_t nb_active_threads; +static struct completion suspend_threads_started = + COMPLETION_INITIALIZER(suspend_threads_started); +static struct completion suspend_threads_done = + COMPLETION_INITIALIZER(suspend_threads_done); + +/* + * We assume that PSCI operations are used if they are available. This is not + * necessarily true on arm64, since the decision is based on the + * "enable-method" property of each CPU in the DT, but given that there is no + * arch-specific way to check this, we assume that the DT is sensible. + */ +static int psci_ops_check(void) +{ + int migrate_type = -1; + int cpu; + + if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) { + pr_warn("Missing PSCI operations, aborting tests\n"); + return -EOPNOTSUPP; + } + + if (psci_ops.migrate_info_type) + migrate_type = psci_ops.migrate_info_type(); + + if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE || + migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) { + /* There is a UP Trusted OS, find on which core it resides. */ + for_each_online_cpu(cpu) + if (psci_tos_resident_on(cpu)) { + tos_resident_cpu = cpu; + break; + } + if (tos_resident_cpu == -1) + pr_warn("UP Trusted OS resides on no online CPU\n"); + } + + return 0; +} + +/* + * offlined_cpus is a temporary array but passing it as an argument avoids + * multiple allocations. + */ +static unsigned int down_and_up_cpus(const struct cpumask *cpus, + struct cpumask *offlined_cpus) +{ + int cpu; + int err = 0; + + cpumask_clear(offlined_cpus); + + /* Try to power down all CPUs in the mask. */ + for_each_cpu(cpu, cpus) { + int ret = remove_cpu(cpu); + + /* + * cpu_down() checks the number of online CPUs before the TOS + * resident CPU. + */ + if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) { + if (ret != -EBUSY) { + pr_err("Unexpected return code %d while trying " + "to power down last online CPU %d\n", + ret, cpu); + ++err; + } + } else if (cpu == tos_resident_cpu) { + if (ret != -EPERM) { + pr_err("Unexpected return code %d while trying " + "to power down TOS resident CPU %d\n", + ret, cpu); + ++err; + } + } else if (ret != 0) { + pr_err("Error occurred (%d) while trying " + "to power down CPU %d\n", ret, cpu); + ++err; + } + + if (ret == 0) + cpumask_set_cpu(cpu, offlined_cpus); + } + + /* Try to power up all the CPUs that have been offlined. */ + for_each_cpu(cpu, offlined_cpus) { + int ret = add_cpu(cpu); + + if (ret != 0) { + pr_err("Error occurred (%d) while trying " + "to power up CPU %d\n", ret, cpu); + ++err; + } else { + cpumask_clear_cpu(cpu, offlined_cpus); + } + } + + /* + * Something went bad at some point and some CPUs could not be turned + * back on. + */ + WARN_ON(!cpumask_empty(offlined_cpus) || + num_online_cpus() != nb_available_cpus); + + return err; +} + +static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups) +{ + int i; + cpumask_var_t *cpu_groups = *pcpu_groups; + + for (i = 0; i < num; ++i) + free_cpumask_var(cpu_groups[i]); + kfree(cpu_groups); +} + +static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) +{ + int num_groups = 0; + cpumask_var_t tmp, *cpu_groups; + + if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) + return -ENOMEM; + + cpu_groups = kcalloc(nb_available_cpus, sizeof(*cpu_groups), + GFP_KERNEL); + if (!cpu_groups) { + free_cpumask_var(tmp); + return -ENOMEM; + } + + cpumask_copy(tmp, cpu_online_mask); + + while (!cpumask_empty(tmp)) { + const struct cpumask *cpu_group = + topology_core_cpumask(cpumask_any(tmp)); + + if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) { + free_cpumask_var(tmp); + free_cpu_groups(num_groups, &cpu_groups); + return -ENOMEM; + } + cpumask_copy(cpu_groups[num_groups++], cpu_group); + cpumask_andnot(tmp, tmp, cpu_group); + } + + free_cpumask_var(tmp); + *pcpu_groups = cpu_groups; + + return num_groups; +} + +static int hotplug_tests(void) +{ + int i, nb_cpu_group, err = -ENOMEM; + cpumask_var_t offlined_cpus, *cpu_groups; + char *page_buf; + + if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL)) + return err; + + nb_cpu_group = alloc_init_cpu_groups(&cpu_groups); + if (nb_cpu_group < 0) + goto out_free_cpus; + page_buf = (char *)__get_free_page(GFP_KERNEL); + if (!page_buf) + goto out_free_cpu_groups; + + /* + * Of course the last CPU cannot be powered down and cpu_down() should + * refuse doing that. + */ + pr_info("Trying to turn off and on again all CPUs\n"); + err = down_and_up_cpus(cpu_online_mask, offlined_cpus); + + /* + * Take down CPUs by cpu group this time. When the last CPU is turned + * off, the cpu group itself should shut down. + */ + for (i = 0; i < nb_cpu_group; ++i) { + ssize_t len = cpumap_print_to_pagebuf(true, page_buf, + cpu_groups[i]); + /* Remove trailing newline. */ + page_buf[len - 1] = '\0'; + pr_info("Trying to turn off and on again group %d (CPUs %s)\n", + i, page_buf); + err += down_and_up_cpus(cpu_groups[i], offlined_cpus); + } + + free_page((unsigned long)page_buf); +out_free_cpu_groups: + free_cpu_groups(nb_cpu_group, &cpu_groups); +out_free_cpus: + free_cpumask_var(offlined_cpus); + return err; +} + +static void dummy_callback(struct timer_list *unused) {} + +static int suspend_cpu(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + struct cpuidle_state *state = &drv->states[index]; + bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; + int ret; + + arch_cpu_idle_enter(); + + if (broadcast) { + /* + * The local timer will be shut down, we need to enter tick + * broadcast. + */ + ret = tick_broadcast_enter(); + if (ret) { + /* + * In the absence of hardware broadcast mechanism, + * this CPU might be used to broadcast wakeups, which + * may be why entering tick broadcast has failed. + * There is little the kernel can do to work around + * that, so enter WFI instead (idle state 0). + */ + cpu_do_idle(); + ret = 0; + goto out_arch_exit; + } + } + + ret = state->enter(dev, drv, index); + + if (broadcast) + tick_broadcast_exit(); + +out_arch_exit: + arch_cpu_idle_exit(); + + return ret; +} + +static int suspend_test_thread(void *arg) +{ + int cpu = (long)arg; + int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0; + struct cpuidle_device *dev; + struct cpuidle_driver *drv; + /* No need for an actual callback, we just want to wake up the CPU. */ + struct timer_list wakeup_timer; + + /* Wait for the main thread to give the start signal. */ + wait_for_completion(&suspend_threads_started); + + /* Set maximum priority to preempt all other threads on this CPU. */ + sched_set_fifo(current); + + dev = this_cpu_read(cpuidle_devices); + drv = cpuidle_get_cpu_driver(dev); + + pr_info("CPU %d entering suspend cycles, states 1 through %d\n", + cpu, drv->state_count - 1); + + timer_setup_on_stack(&wakeup_timer, dummy_callback, 0); + for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) { + int index; + /* + * Test all possible states, except 0 (which is usually WFI and + * doesn't use PSCI). + */ + for (index = 1; index < drv->state_count; ++index) { + int ret; + struct cpuidle_state *state = &drv->states[index]; + + /* + * Set the timer to wake this CPU up in some time (which + * should be largely sufficient for entering suspend). + * If the local tick is disabled when entering suspend, + * suspend_cpu() takes care of switching to a broadcast + * tick, so the timer will still wake us up. + */ + mod_timer(&wakeup_timer, jiffies + + usecs_to_jiffies(state->target_residency)); + + /* IRQs must be disabled during suspend operations. */ + local_irq_disable(); + + ret = suspend_cpu(dev, drv, index); + + /* + * We have woken up. Re-enable IRQs to handle any + * pending interrupt, do not wait until the end of the + * loop. + */ + local_irq_enable(); + + if (ret == index) { + ++nb_suspend; + } else if (ret >= 0) { + /* We did not enter the expected state. */ + ++nb_shallow_sleep; + } else { + pr_err("Failed to suspend CPU %d: error %d " + "(requested state %d, cycle %d)\n", + cpu, ret, index, i); + ++nb_err; + } + } + } + + /* + * Disable the timer to make sure that the timer will not trigger + * later. + */ + del_timer(&wakeup_timer); + destroy_timer_on_stack(&wakeup_timer); + + if (atomic_dec_return_relaxed(&nb_active_threads) == 0) + complete(&suspend_threads_done); + + for (;;) { + /* Needs to be set first to avoid missing a wakeup. */ + set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_park()) + break; + schedule(); + } + + pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n", + cpu, nb_suspend, nb_shallow_sleep, nb_err); + + kthread_parkme(); + + return nb_err; +} + +static int suspend_tests(void) +{ + int i, cpu, err = 0; + struct task_struct **threads; + int nb_threads = 0; + + threads = kmalloc_array(nb_available_cpus, sizeof(*threads), + GFP_KERNEL); + if (!threads) + return -ENOMEM; + + /* + * Stop cpuidle to prevent the idle tasks from entering a deep sleep + * mode, as it might interfere with the suspend threads on other CPUs. + * This does not prevent the suspend threads from using cpuidle (only + * the idle tasks check this status). Take the idle lock so that + * the cpuidle driver and device look-up can be carried out safely. + */ + cpuidle_pause_and_lock(); + + for_each_online_cpu(cpu) { + struct task_struct *thread; + /* Check that cpuidle is available on that CPU. */ + struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + + if (!dev || !drv) { + pr_warn("cpuidle not available on CPU %d, ignoring\n", + cpu); + continue; + } + + thread = kthread_create_on_cpu(suspend_test_thread, + (void *)(long)cpu, cpu, + "psci_suspend_test"); + if (IS_ERR(thread)) + pr_err("Failed to create kthread on CPU %d\n", cpu); + else + threads[nb_threads++] = thread; + } + + if (nb_threads < 1) { + err = -ENODEV; + goto out; + } + + atomic_set(&nb_active_threads, nb_threads); + + /* + * Wake up the suspend threads. To avoid the main thread being preempted + * before all the threads have been unparked, the suspend threads will + * wait for the completion of suspend_threads_started. + */ + for (i = 0; i < nb_threads; ++i) + wake_up_process(threads[i]); + complete_all(&suspend_threads_started); + + wait_for_completion(&suspend_threads_done); + + + /* Stop and destroy all threads, get return status. */ + for (i = 0; i < nb_threads; ++i) { + err += kthread_park(threads[i]); + err += kthread_stop(threads[i]); + } + out: + cpuidle_resume_and_unlock(); + kfree(threads); + return err; +} + +static int __init psci_checker(void) +{ + int ret; + + /* + * Since we're in an initcall, we assume that all the CPUs that all + * CPUs that can be onlined have been onlined. + * + * The tests assume that hotplug is enabled but nobody else is using it, + * otherwise the results will be unpredictable. However, since there + * is no userspace yet in initcalls, that should be fine, as long as + * no torture test is running at the same time (see Kconfig). + */ + nb_available_cpus = num_online_cpus(); + + /* Check PSCI operations are set up and working. */ + ret = psci_ops_check(); + if (ret) + return ret; + + pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus); + + pr_info("Starting hotplug tests\n"); + ret = hotplug_tests(); + if (ret == 0) + pr_info("Hotplug tests passed OK\n"); + else if (ret > 0) + pr_err("%d error(s) encountered in hotplug tests\n", ret); + else { + pr_err("Out of memory\n"); + return ret; + } + + pr_info("Starting suspend tests (%d cycles per state)\n", + NUM_SUSPEND_CYCLE); + ret = suspend_tests(); + if (ret == 0) + pr_info("Suspend tests passed OK\n"); + else if (ret > 0) + pr_err("%d error(s) encountered in suspend tests\n", ret); + else { + switch (ret) { + case -ENOMEM: + pr_err("Out of memory\n"); + break; + case -ENODEV: + pr_warn("Could not start suspend tests on any CPU\n"); + break; + } + } + + pr_info("PSCI checker completed\n"); + return ret < 0 ? ret : 0; +} +late_initcall(psci_checker); |