diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:57 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:57 +0000 |
commit | dc50eab76b709d68175a358d6e23a5a3890764d3 (patch) | |
tree | c754d0390db060af0213ff994f0ac310e4cfd6e9 /drivers/cpufreq | |
parent | Adding debian version 6.6.15-2. (diff) | |
download | linux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip |
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/Kconfig | 11 | ||||
-rw-r--r-- | drivers/cpufreq/Kconfig.arm | 6 | ||||
-rw-r--r-- | drivers/cpufreq/Makefile | 1 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq-dt-platdev.c | 7 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 7 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 76 | ||||
-rw-r--r-- | drivers/cpufreq/ia64-acpi-cpufreq.c | 353 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 6 | ||||
-rw-r--r-- | drivers/cpufreq/pmac32-cpufreq.c | 7 | ||||
-rw-r--r-- | drivers/cpufreq/qcom-cpufreq-nvmem.c | 337 | ||||
-rw-r--r-- | drivers/cpufreq/scmi-cpufreq.c | 52 | ||||
-rw-r--r-- | drivers/cpufreq/tegra194-cpufreq.c | 151 | ||||
-rw-r--r-- | drivers/cpufreq/ti-cpufreq.c | 1 |
14 files changed, 506 insertions, 512 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index f429b9b37b..35efb53d54 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -239,17 +239,6 @@ if PPC32 || PPC64 source "drivers/cpufreq/Kconfig.powerpc" endif -if IA64 -config IA64_ACPI_CPUFREQ - tristate "ACPI Processor P-States driver" - depends on ACPI_PROCESSOR - help - This driver adds a CPUFreq driver which utilizes the ACPI - Processor Performance States. - - If in doubt, say N. -endif - if MIPS config BMIPS_CPUFREQ tristate "BMIPS CPUfreq Driver" diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 123b4bbfcf..f911606897 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -90,7 +90,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ config ARM_BRCMSTB_AVS_CPUFREQ tristate "Broadcom STB AVS CPUfreq driver" - depends on ARCH_BRCMSTB || COMPILE_TEST + depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST default y help Some Broadcom STB SoCs use a co-processor running proprietary firmware @@ -124,8 +124,8 @@ config ARM_IMX_CPUFREQ_DT tristate "Freescale i.MX8M cpufreq support" depends on ARCH_MXC && CPUFREQ_DT help - This adds cpufreq driver support for Freescale i.MX8M series SoCs, - based on cpufreq-dt. + This adds cpufreq driver support for Freescale i.MX7/i.MX8M + series SoCs, based on cpufreq-dt. If in doubt, say N. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ef85107749..8d141c71b0 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -102,7 +102,6 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o ################################################################################## # Other platform drivers obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o -obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index fb2875ce1f..bd1e1357ce 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -142,9 +142,11 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "nvidia,tegra234", }, { .compatible = "qcom,apq8096", }, + { .compatible = "qcom,msm8909", }, { .compatible = "qcom,msm8996", }, { .compatible = "qcom,msm8998", }, { .compatible = "qcom,qcm2290", }, + { .compatible = "qcom,qcm6490", }, { .compatible = "qcom,qcs404", }, { .compatible = "qcom,qdu1000", }, { .compatible = "qcom,sa8155p" }, @@ -176,8 +178,13 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "ti,omap3", }, { .compatible = "ti,am625", }, { .compatible = "ti,am62a7", }, + { .compatible = "ti,am62p5", }, + { .compatible = "qcom,ipq5332", }, + { .compatible = "qcom,ipq6018", }, { .compatible = "qcom,ipq8064", }, + { .compatible = "qcom,ipq8074", }, + { .compatible = "qcom,ipq9574", }, { .compatible = "qcom,apq8064", }, { .compatible = "qcom,msm8974", }, { .compatible = "qcom,msm8960", }, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 60ed89000e..934d35f570 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1544,7 +1544,7 @@ static int cpufreq_online(unsigned int cpu) /* * Register with the energy model before - * sched_cpufreq_governor_change() is called, which will result + * sugov_eas_rebuild_sd() is called, which will result * in rebuilding of the sched domains, which should only be done * once the energy model is properly initialized for the policy * first. @@ -1650,7 +1650,7 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy) } if (has_target()) - strncpy(policy->last_governor, policy->governor->name, + strscpy(policy->last_governor, policy->governor->name, CPUFREQ_NAME_LEN); else policy->last_policy = policy->policy; @@ -2652,7 +2652,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, ret = cpufreq_start_governor(policy); if (!ret) { pr_debug("governor change\n"); - sched_cpufreq_governor_change(policy, old_gov); return 0; } cpufreq_exit_governor(policy); @@ -2996,7 +2995,7 @@ static int __init cpufreq_core_init(void) BUG_ON(!cpufreq_global_kobject); if (!strlen(default_governor)) - strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN); + strscpy(default_governor, gov->name, CPUFREQ_NAME_LEN); return 0; } diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index b6bd0ff353..56500b25d7 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -187,8 +187,7 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set, ret = sscanf(buf, "%u", &input); /* cannot be lower than 1 otherwise freq will not fall */ - if (ret != 1 || input < 1 || input > 100 || - input >= dbs_data->up_threshold) + if (ret != 1 || input < 1 || input >= dbs_data->up_threshold) return -EINVAL; cs_tuners->down_threshold = input; diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 50a4d78465..2c42fee76d 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -15,8 +15,11 @@ #include <linux/mutex.h> #include <linux/slab.h> -static DEFINE_PER_CPU(unsigned int, cpu_is_managed); -static DEFINE_MUTEX(userspace_mutex); +struct userspace_policy { + unsigned int is_managed; + unsigned int setspeed; + struct mutex mutex; +}; /** * cpufreq_set - set the CPU frequency @@ -28,19 +31,19 @@ static DEFINE_MUTEX(userspace_mutex); static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) { int ret = -EINVAL; - unsigned int *setspeed = policy->governor_data; + struct userspace_policy *userspace = policy->governor_data; pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); - mutex_lock(&userspace_mutex); - if (!per_cpu(cpu_is_managed, policy->cpu)) + mutex_lock(&userspace->mutex); + if (!userspace->is_managed) goto err; - *setspeed = freq; + userspace->setspeed = freq; ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); err: - mutex_unlock(&userspace_mutex); + mutex_unlock(&userspace->mutex); return ret; } @@ -51,67 +54,74 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy) { - unsigned int *setspeed; + struct userspace_policy *userspace; - setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL); - if (!setspeed) + userspace = kzalloc(sizeof(*userspace), GFP_KERNEL); + if (!userspace) return -ENOMEM; - policy->governor_data = setspeed; + mutex_init(&userspace->mutex); + + policy->governor_data = userspace; return 0; } +/* + * Any routine that writes to the policy struct will hold the "rwsem" of + * policy struct that means it is free to free "governor_data" here. + */ static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy) { - mutex_lock(&userspace_mutex); kfree(policy->governor_data); policy->governor_data = NULL; - mutex_unlock(&userspace_mutex); } static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy) { - unsigned int *setspeed = policy->governor_data; + struct userspace_policy *userspace = policy->governor_data; BUG_ON(!policy->cur); pr_debug("started managing cpu %u\n", policy->cpu); - mutex_lock(&userspace_mutex); - per_cpu(cpu_is_managed, policy->cpu) = 1; - *setspeed = policy->cur; - mutex_unlock(&userspace_mutex); + mutex_lock(&userspace->mutex); + userspace->is_managed = 1; + userspace->setspeed = policy->cur; + mutex_unlock(&userspace->mutex); return 0; } static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy) { - unsigned int *setspeed = policy->governor_data; + struct userspace_policy *userspace = policy->governor_data; pr_debug("managing cpu %u stopped\n", policy->cpu); - mutex_lock(&userspace_mutex); - per_cpu(cpu_is_managed, policy->cpu) = 0; - *setspeed = 0; - mutex_unlock(&userspace_mutex); + mutex_lock(&userspace->mutex); + userspace->is_managed = 0; + userspace->setspeed = 0; + mutex_unlock(&userspace->mutex); } static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy) { - unsigned int *setspeed = policy->governor_data; + struct userspace_policy *userspace = policy->governor_data; - mutex_lock(&userspace_mutex); + mutex_lock(&userspace->mutex); pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", - policy->cpu, policy->min, policy->max, policy->cur, *setspeed); - - if (policy->max < *setspeed) - __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); - else if (policy->min > *setspeed) - __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); + policy->cpu, policy->min, policy->max, policy->cur, userspace->setspeed); + + if (policy->max < userspace->setspeed) + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + else if (policy->min > userspace->setspeed) + __cpufreq_driver_target(policy, policy->min, + CPUFREQ_RELATION_L); else - __cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, userspace->setspeed, + CPUFREQ_RELATION_L); - mutex_unlock(&userspace_mutex); + mutex_unlock(&userspace->mutex); } static struct cpufreq_governor cpufreq_gov_userspace = { diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c deleted file mode 100644 index c6bdc45551..0000000000 --- a/drivers/cpufreq/ia64-acpi-cpufreq.c +++ /dev/null @@ -1,353 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * This file provides the ACPI based P-state support. This - * module works with generic cpufreq infrastructure. Most of - * the code is based on i386 version - * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c) - * - * Copyright (C) 2005 Intel Corp - * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/cpufreq.h> -#include <linux/proc_fs.h> -#include <asm/io.h> -#include <linux/uaccess.h> -#include <asm/pal.h> - -#include <linux/acpi.h> -#include <acpi/processor.h> - -MODULE_AUTHOR("Venkatesh Pallipadi"); -MODULE_DESCRIPTION("ACPI Processor P-States Driver"); -MODULE_LICENSE("GPL"); - -struct cpufreq_acpi_io { - struct acpi_processor_performance acpi_data; - unsigned int resume; -}; - -struct cpufreq_acpi_req { - unsigned int cpu; - unsigned int state; -}; - -static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; - -static struct cpufreq_driver acpi_cpufreq_driver; - - -static int -processor_set_pstate ( - u32 value) -{ - s64 retval; - - pr_debug("processor_set_pstate\n"); - - retval = ia64_pal_set_pstate((u64)value); - - if (retval) { - pr_debug("Failed to set freq to 0x%x, with error 0x%llx\n", - value, retval); - return -ENODEV; - } - return (int)retval; -} - - -static int -processor_get_pstate ( - u32 *value) -{ - u64 pstate_index = 0; - s64 retval; - - pr_debug("processor_get_pstate\n"); - - retval = ia64_pal_get_pstate(&pstate_index, - PAL_GET_PSTATE_TYPE_INSTANT); - *value = (u32) pstate_index; - - if (retval) - pr_debug("Failed to get current freq with " - "error 0x%llx, idx 0x%x\n", retval, *value); - - return (int)retval; -} - - -/* To be used only after data->acpi_data is initialized */ -static unsigned -extract_clock ( - struct cpufreq_acpi_io *data, - unsigned value) -{ - unsigned long i; - - pr_debug("extract_clock\n"); - - for (i = 0; i < data->acpi_data.state_count; i++) { - if (value == data->acpi_data.states[i].status) - return data->acpi_data.states[i].core_frequency; - } - return data->acpi_data.states[i-1].core_frequency; -} - - -static long -processor_get_freq ( - void *arg) -{ - struct cpufreq_acpi_req *req = arg; - unsigned int cpu = req->cpu; - struct cpufreq_acpi_io *data = acpi_io_data[cpu]; - u32 value; - int ret; - - pr_debug("processor_get_freq\n"); - if (smp_processor_id() != cpu) - return -EAGAIN; - - /* processor_get_pstate gets the instantaneous frequency */ - ret = processor_get_pstate(&value); - if (ret) { - pr_warn("get performance failed with error %d\n", ret); - return ret; - } - return 1000 * extract_clock(data, value); -} - - -static long -processor_set_freq ( - void *arg) -{ - struct cpufreq_acpi_req *req = arg; - unsigned int cpu = req->cpu; - struct cpufreq_acpi_io *data = acpi_io_data[cpu]; - int ret, state = req->state; - u32 value; - - pr_debug("processor_set_freq\n"); - if (smp_processor_id() != cpu) - return -EAGAIN; - - if (state == data->acpi_data.state) { - if (unlikely(data->resume)) { - pr_debug("Called after resume, resetting to P%d\n", state); - data->resume = 0; - } else { - pr_debug("Already at target state (P%d)\n", state); - return 0; - } - } - - pr_debug("Transitioning from P%d to P%d\n", - data->acpi_data.state, state); - - /* - * First we write the target state's 'control' value to the - * control_register. - */ - value = (u32) data->acpi_data.states[state].control; - - pr_debug("Transitioning to state: 0x%08x\n", value); - - ret = processor_set_pstate(value); - if (ret) { - pr_warn("Transition failed with error %d\n", ret); - return -ENODEV; - } - - data->acpi_data.state = state; - return 0; -} - - -static unsigned int -acpi_cpufreq_get ( - unsigned int cpu) -{ - struct cpufreq_acpi_req req; - long ret; - - req.cpu = cpu; - ret = work_on_cpu(cpu, processor_get_freq, &req); - - return ret > 0 ? (unsigned int) ret : 0; -} - - -static int -acpi_cpufreq_target ( - struct cpufreq_policy *policy, - unsigned int index) -{ - struct cpufreq_acpi_req req; - - req.cpu = policy->cpu; - req.state = index; - - return work_on_cpu(req.cpu, processor_set_freq, &req); -} - -static int -acpi_cpufreq_cpu_init ( - struct cpufreq_policy *policy) -{ - unsigned int i; - unsigned int cpu = policy->cpu; - struct cpufreq_acpi_io *data; - unsigned int result = 0; - struct cpufreq_frequency_table *freq_table; - - pr_debug("acpi_cpufreq_cpu_init\n"); - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return (-ENOMEM); - - acpi_io_data[cpu] = data; - - result = acpi_processor_register_performance(&data->acpi_data, cpu); - - if (result) - goto err_free; - - /* capability check */ - if (data->acpi_data.state_count <= 1) { - pr_debug("No P-States\n"); - result = -ENODEV; - goto err_unreg; - } - - if ((data->acpi_data.control_register.space_id != - ACPI_ADR_SPACE_FIXED_HARDWARE) || - (data->acpi_data.status_register.space_id != - ACPI_ADR_SPACE_FIXED_HARDWARE)) { - pr_debug("Unsupported address space [%d, %d]\n", - (u32) (data->acpi_data.control_register.space_id), - (u32) (data->acpi_data.status_register.space_id)); - result = -ENODEV; - goto err_unreg; - } - - /* alloc freq_table */ - freq_table = kcalloc(data->acpi_data.state_count + 1, - sizeof(*freq_table), - GFP_KERNEL); - if (!freq_table) { - result = -ENOMEM; - goto err_unreg; - } - - /* detect transition latency */ - policy->cpuinfo.transition_latency = 0; - for (i=0; i<data->acpi_data.state_count; i++) { - if ((data->acpi_data.states[i].transition_latency * 1000) > - policy->cpuinfo.transition_latency) { - policy->cpuinfo.transition_latency = - data->acpi_data.states[i].transition_latency * 1000; - } - } - - /* table init */ - for (i = 0; i <= data->acpi_data.state_count; i++) - { - if (i < data->acpi_data.state_count) { - freq_table[i].frequency = - data->acpi_data.states[i].core_frequency * 1000; - } else { - freq_table[i].frequency = CPUFREQ_TABLE_END; - } - } - - policy->freq_table = freq_table; - - /* notify BIOS that we exist */ - acpi_processor_notify_smm(THIS_MODULE); - - pr_info("CPU%u - ACPI performance management activated\n", cpu); - - for (i = 0; i < data->acpi_data.state_count; i++) - pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", - (i == data->acpi_data.state?'*':' '), i, - (u32) data->acpi_data.states[i].core_frequency, - (u32) data->acpi_data.states[i].power, - (u32) data->acpi_data.states[i].transition_latency, - (u32) data->acpi_data.states[i].bus_master_latency, - (u32) data->acpi_data.states[i].status, - (u32) data->acpi_data.states[i].control); - - /* the first call to ->target() should result in us actually - * writing something to the appropriate registers. */ - data->resume = 1; - - return (result); - - err_unreg: - acpi_processor_unregister_performance(cpu); - err_free: - kfree(data); - acpi_io_data[cpu] = NULL; - - return (result); -} - - -static int -acpi_cpufreq_cpu_exit ( - struct cpufreq_policy *policy) -{ - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - - pr_debug("acpi_cpufreq_cpu_exit\n"); - - if (data) { - acpi_io_data[policy->cpu] = NULL; - acpi_processor_unregister_performance(policy->cpu); - kfree(policy->freq_table); - kfree(data); - } - - return (0); -} - - -static struct cpufreq_driver acpi_cpufreq_driver = { - .verify = cpufreq_generic_frequency_table_verify, - .target_index = acpi_cpufreq_target, - .get = acpi_cpufreq_get, - .init = acpi_cpufreq_cpu_init, - .exit = acpi_cpufreq_cpu_exit, - .name = "acpi-cpufreq", - .attr = cpufreq_generic_attr, -}; - - -static int __init -acpi_cpufreq_init (void) -{ - pr_debug("acpi_cpufreq_init\n"); - - return cpufreq_register_driver(&acpi_cpufreq_driver); -} - - -static void __exit -acpi_cpufreq_exit (void) -{ - pr_debug("acpi_cpufreq_exit\n"); - - cpufreq_unregister_driver(&acpi_cpufreq_driver); -} - -late_initcall(acpi_cpufreq_init); -module_exit(acpi_cpufreq_exit); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index c352a593e5..f5c69fa230 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -596,13 +596,9 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) static inline void update_turbo_state(void) { u64 misc_en; - struct cpudata *cpu; - cpu = all_cpu_data[0]; rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); - global.turbo_disabled = - (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || - cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); + global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE; } static int min_perf_pct_min(void) diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index ec75e79659..df3567c1e9 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -24,6 +24,7 @@ #include <linux/device.h> #include <linux/hardirq.h> #include <linux/of.h> +#include <linux/of_address.h> #include <asm/machdep.h> #include <asm/irq.h> @@ -378,10 +379,9 @@ static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) static u32 read_gpio(struct device_node *np) { - const u32 *reg = of_get_property(np, "reg", NULL); - u32 offset; + u64 offset; - if (reg == NULL) + if (of_property_read_reg(np, 0, &offset, NULL) < 0) return 0; /* That works for all keylargos but shall be fixed properly * some day... The problem is that it seems we can't rely @@ -389,7 +389,6 @@ static u32 read_gpio(struct device_node *np) * relative to the base of KeyLargo or to the base of the * GPIO space, and the device-tree doesn't help. */ - offset = *reg; if (offset < KEYLARGO_GPIO_LEVELS0) offset += KEYLARGO_GPIO_LEVELS0; return offset; diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 84d7033e5e..ea05d9d674 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -23,13 +23,28 @@ #include <linux/nvmem-consumer.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/pm.h> #include <linux/pm_domain.h> #include <linux/pm_opp.h> +#include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/soc/qcom/smem.h> #include <dt-bindings/arm/qcom,ids.h> +enum ipq806x_versions { + IPQ8062_VERSION = 0, + IPQ8064_VERSION, + IPQ8065_VERSION, +}; + +#define IPQ6000_VERSION BIT(2) + +enum ipq8074_versions { + IPQ8074_HAWKEYE_VERSION = 0, + IPQ8074_ACORN_VERSION, +}; + struct qcom_cpufreq_drv; struct qcom_cpufreq_match_data { @@ -40,16 +55,39 @@ struct qcom_cpufreq_match_data { const char **genpd_names; }; +struct qcom_cpufreq_drv_cpu { + int opp_token; + struct device **virt_devs; +}; + struct qcom_cpufreq_drv { - int *opp_tokens; u32 versions; const struct qcom_cpufreq_match_data *data; + struct qcom_cpufreq_drv_cpu cpus[]; }; static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev; +static int qcom_cpufreq_simple_get_version(struct device *cpu_dev, + struct nvmem_cell *speedbin_nvmem, + char **pvs_name, + struct qcom_cpufreq_drv *drv) +{ + u8 *speedbin; + + *pvs_name = NULL; + speedbin = nvmem_cell_read(speedbin_nvmem, NULL); + if (IS_ERR(speedbin)) + return PTR_ERR(speedbin); + + dev_dbg(cpu_dev, "speedbin: %d\n", *speedbin); + drv->versions = 1 << *speedbin; + kfree(speedbin); + return 0; +} + static void get_krait_bin_format_a(struct device *cpu_dev, - int *speed, int *pvs, int *pvs_ver, + int *speed, int *pvs, u8 *buf) { u32 pte_efuse; @@ -148,6 +186,16 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev, switch (msm_id) { case QCOM_ID_MSM8996: case QCOM_ID_APQ8096: + case QCOM_ID_IPQ5332: + case QCOM_ID_IPQ5322: + case QCOM_ID_IPQ5312: + case QCOM_ID_IPQ5302: + case QCOM_ID_IPQ5300: + case QCOM_ID_IPQ9514: + case QCOM_ID_IPQ9550: + case QCOM_ID_IPQ9554: + case QCOM_ID_IPQ9570: + case QCOM_ID_IPQ9574: drv->versions = 1 << (unsigned int)(*speedbin); break; case QCOM_ID_MSM8996SG: @@ -180,8 +228,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, switch (len) { case 4: - get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver, - speedbin); + get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin); break; case 8: get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver, @@ -203,6 +250,152 @@ len_error: return ret; } +static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev, + struct nvmem_cell *speedbin_nvmem, + char **pvs_name, + struct qcom_cpufreq_drv *drv) +{ + int speed = 0, pvs = 0; + int msm_id, ret = 0; + u8 *speedbin; + size_t len; + + speedbin = nvmem_cell_read(speedbin_nvmem, &len); + if (IS_ERR(speedbin)) + return PTR_ERR(speedbin); + + if (len != 4) { + dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n"); + ret = -ENODEV; + goto exit; + } + + get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin); + + ret = qcom_smem_get_soc_id(&msm_id); + if (ret) + goto exit; + + switch (msm_id) { + case QCOM_ID_IPQ8062: + drv->versions = BIT(IPQ8062_VERSION); + break; + case QCOM_ID_IPQ8064: + case QCOM_ID_IPQ8066: + case QCOM_ID_IPQ8068: + drv->versions = BIT(IPQ8064_VERSION); + break; + case QCOM_ID_IPQ8065: + case QCOM_ID_IPQ8069: + drv->versions = BIT(IPQ8065_VERSION); + break; + default: + dev_err(cpu_dev, + "SoC ID %u is not part of IPQ8064 family, limiting to 1.0GHz!\n", + msm_id); + drv->versions = BIT(IPQ8062_VERSION); + break; + } + + /* IPQ8064 speed is never fused. Only pvs values are fused. */ + snprintf(*pvs_name, sizeof("speed0-pvsXX"), "speed0-pvs%d", pvs); + +exit: + kfree(speedbin); + return ret; +} + +static int qcom_cpufreq_ipq6018_name_version(struct device *cpu_dev, + struct nvmem_cell *speedbin_nvmem, + char **pvs_name, + struct qcom_cpufreq_drv *drv) +{ + u32 msm_id; + int ret; + u8 *speedbin; + *pvs_name = NULL; + + ret = qcom_smem_get_soc_id(&msm_id); + if (ret) + return ret; + + speedbin = nvmem_cell_read(speedbin_nvmem, NULL); + if (IS_ERR(speedbin)) + return PTR_ERR(speedbin); + + switch (msm_id) { + case QCOM_ID_IPQ6005: + case QCOM_ID_IPQ6010: + case QCOM_ID_IPQ6018: + case QCOM_ID_IPQ6028: + /* Fuse Value Freq BIT to set + * --------------------------------- + * 2’b0 No Limit BIT(0) + * 2’b1 1.5 GHz BIT(1) + */ + drv->versions = 1 << (unsigned int)(*speedbin); + break; + case QCOM_ID_IPQ6000: + /* + * IPQ6018 family only has one bit to advertise the CPU + * speed-bin, but that is not enough for IPQ6000 which + * is only rated up to 1.2GHz. + * So for IPQ6000 manually set BIT(2) based on SMEM ID. + */ + drv->versions = IPQ6000_VERSION; + break; + default: + dev_err(cpu_dev, + "SoC ID %u is not part of IPQ6018 family, limiting to 1.2GHz!\n", + msm_id); + drv->versions = IPQ6000_VERSION; + break; + } + + kfree(speedbin); + return 0; +} + +static int qcom_cpufreq_ipq8074_name_version(struct device *cpu_dev, + struct nvmem_cell *speedbin_nvmem, + char **pvs_name, + struct qcom_cpufreq_drv *drv) +{ + u32 msm_id; + int ret; + *pvs_name = NULL; + + ret = qcom_smem_get_soc_id(&msm_id); + if (ret) + return ret; + + switch (msm_id) { + case QCOM_ID_IPQ8070A: + case QCOM_ID_IPQ8071A: + case QCOM_ID_IPQ8172: + case QCOM_ID_IPQ8173: + case QCOM_ID_IPQ8174: + drv->versions = BIT(IPQ8074_ACORN_VERSION); + break; + case QCOM_ID_IPQ8072A: + case QCOM_ID_IPQ8074A: + case QCOM_ID_IPQ8076A: + case QCOM_ID_IPQ8078A: + drv->versions = BIT(IPQ8074_HAWKEYE_VERSION); + break; + default: + dev_err(cpu_dev, + "SoC ID %u is not part of IPQ8074 family, limiting to 1.4GHz!\n", + msm_id); + drv->versions = BIT(IPQ8074_ACORN_VERSION); + break; + } + + return 0; +} + +static const char *generic_genpd_names[] = { "perf", NULL }; + static const struct qcom_cpufreq_match_data match_data_kryo = { .get_version = qcom_cpufreq_kryo_name_version, }; @@ -211,12 +404,53 @@ static const struct qcom_cpufreq_match_data match_data_krait = { .get_version = qcom_cpufreq_krait_name_version, }; +static const struct qcom_cpufreq_match_data match_data_msm8909 = { + .get_version = qcom_cpufreq_simple_get_version, + .genpd_names = generic_genpd_names, +}; + static const char *qcs404_genpd_names[] = { "cpr", NULL }; static const struct qcom_cpufreq_match_data match_data_qcs404 = { .genpd_names = qcs404_genpd_names, }; +static const struct qcom_cpufreq_match_data match_data_ipq6018 = { + .get_version = qcom_cpufreq_ipq6018_name_version, +}; + +static const struct qcom_cpufreq_match_data match_data_ipq8064 = { + .get_version = qcom_cpufreq_ipq8064_name_version, +}; + +static const struct qcom_cpufreq_match_data match_data_ipq8074 = { + .get_version = qcom_cpufreq_ipq8074_name_version, +}; + +static void qcom_cpufreq_suspend_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu) +{ + const char * const *name = drv->data->genpd_names; + int i; + + if (!drv->cpus[cpu].virt_devs) + return; + + for (i = 0; *name; i++, name++) + device_set_awake_path(drv->cpus[cpu].virt_devs[i]); +} + +static void qcom_cpufreq_put_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu) +{ + const char * const *name = drv->data->genpd_names; + int i; + + if (!drv->cpus[cpu].virt_devs) + return; + + for (i = 0; *name; i++, name++) + pm_runtime_put(drv->cpus[cpu].virt_devs[i]); +} + static int qcom_cpufreq_probe(struct platform_device *pdev) { struct qcom_cpufreq_drv *drv; @@ -237,49 +471,41 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) if (!np) return -ENOENT; - ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu"); + ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu") || + of_device_is_compatible(np, "operating-points-v2-krait-cpu"); if (!ret) { of_node_put(np); return -ENOENT; } - drv = kzalloc(sizeof(*drv), GFP_KERNEL); + drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()), + GFP_KERNEL); if (!drv) return -ENOMEM; match = pdev->dev.platform_data; drv->data = match->data; - if (!drv->data) { - ret = -ENODEV; - goto free_drv; - } + if (!drv->data) + return -ENODEV; if (drv->data->get_version) { speedbin_nvmem = of_nvmem_cell_get(np, NULL); - if (IS_ERR(speedbin_nvmem)) { - ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), - "Could not get nvmem cell\n"); - goto free_drv; - } + if (IS_ERR(speedbin_nvmem)) + return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), + "Could not get nvmem cell\n"); ret = drv->data->get_version(cpu_dev, speedbin_nvmem, &pvs_name, drv); if (ret) { nvmem_cell_put(speedbin_nvmem); - goto free_drv; + return ret; } nvmem_cell_put(speedbin_nvmem); } of_node_put(np); - drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens), - GFP_KERNEL); - if (!drv->opp_tokens) { - ret = -ENOMEM; - goto free_drv; - } - for_each_possible_cpu(cpu) { + struct device **virt_devs = NULL; struct dev_pm_opp_config config = { .supported_hw = NULL, }; @@ -300,17 +526,38 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) if (drv->data->genpd_names) { config.genpd_names = drv->data->genpd_names; - config.virt_devs = NULL; + config.virt_devs = &virt_devs; } if (config.supported_hw || config.genpd_names) { - drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config); - if (drv->opp_tokens[cpu] < 0) { - ret = drv->opp_tokens[cpu]; + drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config); + if (drv->cpus[cpu].opp_token < 0) { + ret = drv->cpus[cpu].opp_token; dev_err(cpu_dev, "Failed to set OPP config\n"); goto free_opp; } } + + if (virt_devs) { + const char * const *name = config.genpd_names; + int i, j; + + for (i = 0; *name; i++, name++) { + ret = pm_runtime_resume_and_get(virt_devs[i]); + if (ret) { + dev_err(cpu_dev, "failed to resume %s: %d\n", + *name, ret); + + /* Rollback previous PM runtime calls */ + name = config.genpd_names; + for (j = 0; *name && j < i; j++, name++) + pm_runtime_put(virt_devs[j]); + + goto free_opp; + } + } + drv->cpus[cpu].virt_devs = virt_devs; + } } cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1, @@ -324,12 +571,10 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) dev_err(cpu_dev, "Failed to register platform device\n"); free_opp: - for_each_possible_cpu(cpu) - dev_pm_opp_clear_config(drv->opp_tokens[cpu]); - kfree(drv->opp_tokens); -free_drv: - kfree(drv); - + for_each_possible_cpu(cpu) { + qcom_cpufreq_put_virt_devs(drv, cpu); + dev_pm_opp_clear_config(drv->cpus[cpu].opp_token); + } return ret; } @@ -340,27 +585,45 @@ static void qcom_cpufreq_remove(struct platform_device *pdev) platform_device_unregister(cpufreq_dt_pdev); + for_each_possible_cpu(cpu) { + qcom_cpufreq_put_virt_devs(drv, cpu); + dev_pm_opp_clear_config(drv->cpus[cpu].opp_token); + } +} + +static int qcom_cpufreq_suspend(struct device *dev) +{ + struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev); + unsigned int cpu; + for_each_possible_cpu(cpu) - dev_pm_opp_clear_config(drv->opp_tokens[cpu]); + qcom_cpufreq_suspend_virt_devs(drv, cpu); - kfree(drv->opp_tokens); - kfree(drv); + return 0; } +static DEFINE_SIMPLE_DEV_PM_OPS(qcom_cpufreq_pm_ops, qcom_cpufreq_suspend, NULL); + static struct platform_driver qcom_cpufreq_driver = { .probe = qcom_cpufreq_probe, .remove_new = qcom_cpufreq_remove, .driver = { .name = "qcom-cpufreq-nvmem", + .pm = pm_sleep_ptr(&qcom_cpufreq_pm_ops), }, }; static const struct of_device_id qcom_cpufreq_match_list[] __initconst = { { .compatible = "qcom,apq8096", .data = &match_data_kryo }, + { .compatible = "qcom,msm8909", .data = &match_data_msm8909 }, { .compatible = "qcom,msm8996", .data = &match_data_kryo }, { .compatible = "qcom,qcs404", .data = &match_data_qcs404 }, - { .compatible = "qcom,ipq8064", .data = &match_data_krait }, + { .compatible = "qcom,ipq5332", .data = &match_data_kryo }, + { .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 }, + { .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 }, + { .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 }, { .compatible = "qcom,apq8064", .data = &match_data_krait }, + { .compatible = "qcom,ipq9574", .data = &match_data_kryo }, { .compatible = "qcom,msm8974", .data = &match_data_krait }, { .compatible = "qcom,msm8960", .data = &match_data_krait }, {}, diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 028df8a5f5..4ee23f4ebf 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -70,16 +70,36 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy, return 0; } +static int scmi_cpu_domain_id(struct device *cpu_dev) +{ + struct device_node *np = cpu_dev->of_node; + struct of_phandle_args domain_id; + int index; + + if (of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0, + &domain_id)) { + /* Find the corresponding index for power-domain "perf". */ + index = of_property_match_string(np, "power-domain-names", + "perf"); + if (index < 0) + return -EINVAL; + + if (of_parse_phandle_with_args(np, "power-domains", + "#power-domain-cells", index, + &domain_id)) + return -EINVAL; + } + + return domain_id.args[0]; +} + static int -scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) +scmi_get_sharing_cpus(struct device *cpu_dev, int domain, + struct cpumask *cpumask) { - int cpu, domain, tdomain; + int cpu, tdomain; struct device *tcpu_dev; - domain = perf_ops->device_domain_id(cpu_dev); - if (domain < 0) - return domain; - for_each_possible_cpu(cpu) { if (cpu == cpu_dev->id) continue; @@ -88,7 +108,7 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) if (!tcpu_dev) continue; - tdomain = perf_ops->device_domain_id(tcpu_dev); + tdomain = scmi_cpu_domain_id(tcpu_dev); if (tdomain == domain) cpumask_set_cpu(cpu, cpumask); } @@ -104,7 +124,7 @@ scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power, unsigned long Hz; int ret, domain; - domain = perf_ops->device_domain_id(cpu_dev); + domain = scmi_cpu_domain_id(cpu_dev); if (domain < 0) return domain; @@ -126,7 +146,7 @@ scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power, static int scmi_cpufreq_init(struct cpufreq_policy *policy) { - int ret, nr_opp; + int ret, nr_opp, domain; unsigned int latency; struct device *cpu_dev; struct scmi_data *priv; @@ -138,6 +158,10 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) return -ENODEV; } + domain = scmi_cpu_domain_id(cpu_dev); + if (domain < 0) + return domain; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -148,7 +172,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) } /* Obtain CPUs that share SCMI performance controls */ - ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus); + ret = scmi_get_sharing_cpus(cpu_dev, domain, policy->cpus); if (ret) { dev_warn(cpu_dev, "failed to get sharing cpumask\n"); goto out_free_cpumask; @@ -176,7 +200,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) */ nr_opp = dev_pm_opp_get_opp_count(cpu_dev); if (nr_opp <= 0) { - ret = perf_ops->device_opps_add(ph, cpu_dev); + ret = perf_ops->device_opps_add(ph, cpu_dev, domain); if (ret) { dev_warn(cpu_dev, "failed to add opps to the device\n"); goto out_free_cpumask; @@ -209,7 +233,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) } priv->cpu_dev = cpu_dev; - priv->domain_id = perf_ops->device_domain_id(cpu_dev); + priv->domain_id = domain; policy->driver_data = priv; policy->freq_table = freq_table; @@ -217,14 +241,14 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) /* SCMI allows DVFS request for any domain from any CPU */ policy->dvfs_possible_from_any_cpu = true; - latency = perf_ops->transition_latency_get(ph, cpu_dev); + latency = perf_ops->transition_latency_get(ph, domain); if (!latency) latency = CPUFREQ_ETERNAL; policy->cpuinfo.transition_latency = latency; policy->fast_switch_possible = - perf_ops->fast_switch_possible(ph, cpu_dev); + perf_ops->fast_switch_possible(ph, domain); return 0; diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index 386aed3637..59865ea455 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -5,7 +5,6 @@ #include <linux/cpu.h> #include <linux/cpufreq.h> -#include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/of.h> @@ -21,10 +20,11 @@ #define KHZ 1000 #define REF_CLK_MHZ 408 /* 408 MHz */ -#define US_DELAY 500 #define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ) #define MAX_CNT ~0U +#define MAX_DELTA_KHZ 115200 + #define NDIV_MASK 0x1FF #define CORE_OFFSET(cpu) (cpu * 8) @@ -39,6 +39,12 @@ /* cpufreq transisition latency */ #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */ +struct tegra_cpu_data { + u32 cpuid; + u32 clusterid; + void __iomem *freq_core_reg; +}; + struct tegra_cpu_ctr { u32 cpu; u32 coreclk_cnt, last_coreclk_cnt; @@ -62,6 +68,7 @@ struct tegra_cpufreq_soc { int maxcpus_per_cluster; unsigned int num_clusters; phys_addr_t actmon_cntr_base; + u32 refclk_delta_min; }; struct tegra194_cpufreq_data { @@ -69,6 +76,7 @@ struct tegra194_cpufreq_data { struct cpufreq_frequency_table **bpmp_luts; const struct tegra_cpufreq_soc *soc; bool icc_dram_bw_scaling; + struct tegra_cpu_data *cpu_data; }; static struct workqueue_struct *read_counters_wq; @@ -116,14 +124,8 @@ static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid) static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); - void __iomem *freq_core_reg; - u64 mpidr_id; - /* use physical id to get address of per core frequency register */ - mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid; - freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id); - - *ndiv = readl(freq_core_reg) & NDIV_MASK; + *ndiv = readl(data->cpu_data[cpu].freq_core_reg) & NDIV_MASK; return 0; } @@ -131,19 +133,10 @@ static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv) static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); - void __iomem *freq_core_reg; - u32 cpu, cpuid, clusterid; - u64 mpidr_id; - - for_each_cpu_and(cpu, policy->cpus, cpu_online_mask) { - data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid); - - /* use physical id to get address of per core frequency register */ - mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid; - freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id); + u32 cpu; - writel(ndiv, freq_core_reg); - } + for_each_cpu(cpu, policy->cpus) + writel(ndiv, data->cpu_data[cpu].freq_core_reg); } /* @@ -157,19 +150,35 @@ static void tegra234_read_counters(struct tegra_cpu_ctr *c) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); void __iomem *actmon_reg; - u32 cpuid, clusterid; + u32 delta_refcnt; + int cnt = 0; u64 val; - data->soc->ops->get_cpu_cluster_id(c->cpu, &cpuid, &clusterid); - actmon_reg = CORE_ACTMON_CNTR_REG(data, clusterid, cpuid); + actmon_reg = CORE_ACTMON_CNTR_REG(data, data->cpu_data[c->cpu].clusterid, + data->cpu_data[c->cpu].cpuid); val = readq(actmon_reg); c->last_refclk_cnt = upper_32_bits(val); c->last_coreclk_cnt = lower_32_bits(val); - udelay(US_DELAY); - val = readq(actmon_reg); - c->refclk_cnt = upper_32_bits(val); - c->coreclk_cnt = lower_32_bits(val); + + /* + * The sampling window is based on the minimum number of reference + * clock cycles which is known to give a stable value of CPU frequency. + */ + do { + val = readq(actmon_reg); + c->refclk_cnt = upper_32_bits(val); + c->coreclk_cnt = lower_32_bits(val); + if (c->refclk_cnt < c->last_refclk_cnt) + delta_refcnt = c->refclk_cnt + (MAX_CNT - c->last_refclk_cnt); + else + delta_refcnt = c->refclk_cnt - c->last_refclk_cnt; + if (++cnt >= 0xFFFF) { + pr_warn("cpufreq: problem with refclk on cpu:%d, delta_refcnt:%u, cnt:%d\n", + c->cpu, delta_refcnt, cnt); + break; + } + } while (delta_refcnt < data->soc->refclk_delta_min); } static struct tegra_cpufreq_ops tegra234_cpufreq_ops = { @@ -184,6 +193,7 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = { .actmon_cntr_base = 0x9000, .maxcpus_per_cluster = 4, .num_clusters = 3, + .refclk_delta_min = 16000, }; static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = { @@ -191,6 +201,7 @@ static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = { .actmon_cntr_base = 0x4000, .maxcpus_per_cluster = 8, .num_clusters = 1, + .refclk_delta_min = 16000, }; static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid) @@ -231,15 +242,33 @@ static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response static void tegra194_read_counters(struct tegra_cpu_ctr *c) { + struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); + u32 delta_refcnt; + int cnt = 0; u64 val; val = read_freq_feedback(); c->last_refclk_cnt = lower_32_bits(val); c->last_coreclk_cnt = upper_32_bits(val); - udelay(US_DELAY); - val = read_freq_feedback(); - c->refclk_cnt = lower_32_bits(val); - c->coreclk_cnt = upper_32_bits(val); + + /* + * The sampling window is based on the minimum number of reference + * clock cycles which is known to give a stable value of CPU frequency. + */ + do { + val = read_freq_feedback(); + c->refclk_cnt = lower_32_bits(val); + c->coreclk_cnt = upper_32_bits(val); + if (c->refclk_cnt < c->last_refclk_cnt) + delta_refcnt = c->refclk_cnt + (MAX_CNT - c->last_refclk_cnt); + else + delta_refcnt = c->refclk_cnt - c->last_refclk_cnt; + if (++cnt >= 0xFFFF) { + pr_warn("cpufreq: problem with refclk on cpu:%d, delta_refcnt:%u, cnt:%d\n", + c->cpu, delta_refcnt, cnt); + break; + } + } while (delta_refcnt < data->soc->refclk_delta_min); } static void tegra_read_counters(struct work_struct *work) @@ -297,9 +326,8 @@ static unsigned int tegra194_calculate_speed(u32 cpu) u32 rate_mhz; /* - * udelay() is required to reconstruct cpu frequency over an - * observation window. Using workqueue to call udelay() with - * interrupts enabled. + * Reconstruct cpu frequency over an observation/sampling window. + * Using workqueue to keep interrupts enabled during the interval. */ read_counters_work.c.cpu = cpu; INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters); @@ -357,19 +385,17 @@ static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv) static unsigned int tegra194_get_speed(u32 cpu) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); + u32 clusterid = data->cpu_data[cpu].clusterid; struct cpufreq_frequency_table *pos; - u32 cpuid, clusterid; unsigned int rate; u64 ndiv; int ret; - data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid); - /* reconstruct actual cpu freq using counters */ rate = tegra194_calculate_speed(cpu); /* get last written ndiv value */ - ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv); + ret = data->soc->ops->get_cpu_ndiv(cpu, data->cpu_data[cpu].cpuid, clusterid, &ndiv); if (WARN_ON_ONCE(ret)) return rate; @@ -383,9 +409,9 @@ static unsigned int tegra194_get_speed(u32 cpu) if (pos->driver_data != ndiv) continue; - if (abs(pos->frequency - rate) > 115200) { - pr_warn("cpufreq: cpu%d,cur:%u,set:%u,set ndiv:%llu\n", - cpu, rate, pos->frequency, ndiv); + if (abs(pos->frequency - rate) > MAX_DELTA_KHZ) { + pr_warn("cpufreq: cpu%d,cur:%u,set:%u,delta:%d,set ndiv:%llu\n", + cpu, rate, pos->frequency, abs(rate - pos->frequency), ndiv); } else { rate = pos->frequency; } @@ -475,13 +501,12 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); int maxcpus_per_cluster = data->soc->maxcpus_per_cluster; + u32 clusterid = data->cpu_data[policy->cpu].clusterid; struct cpufreq_frequency_table *freq_table; struct cpufreq_frequency_table *bpmp_lut; u32 start_cpu, cpu; - u32 clusterid; int ret; - data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid); if (clusterid >= data->soc->num_clusters || !data->bpmp_luts[clusterid]) return -EINVAL; @@ -580,6 +605,7 @@ static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = { .ops = &tegra194_cpufreq_ops, .maxcpus_per_cluster = 2, .num_clusters = 4, + .refclk_delta_min = 16000, }; static void tegra194_cpufreq_free_resources(void) @@ -659,6 +685,28 @@ tegra_cpufreq_bpmp_read_lut(struct platform_device *pdev, struct tegra_bpmp *bpm return freq_table; } +static int tegra194_cpufreq_store_physids(unsigned int cpu, struct tegra194_cpufreq_data *data) +{ + int num_cpus = data->soc->maxcpus_per_cluster * data->soc->num_clusters; + u32 cpuid, clusterid; + u64 mpidr_id; + + if (cpu > (num_cpus - 1)) { + pr_err("cpufreq: wrong num of cpus or clusters in soc data\n"); + return -EINVAL; + } + + data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid); + + mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid; + + data->cpu_data[cpu].cpuid = cpuid; + data->cpu_data[cpu].clusterid = clusterid; + data->cpu_data[cpu].freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id); + + return 0; +} + static int tegra194_cpufreq_probe(struct platform_device *pdev) { const struct tegra_cpufreq_soc *soc; @@ -666,6 +714,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) struct tegra_bpmp *bpmp; struct device *cpu_dev; int err, i; + u32 cpu; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -673,7 +722,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) soc = of_device_get_match_data(&pdev->dev); - if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) { + if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters && soc->refclk_delta_min) { data->soc = soc; } else { dev_err(&pdev->dev, "soc data missing\n"); @@ -692,6 +741,12 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) return PTR_ERR(data->regs); } + data->cpu_data = devm_kcalloc(&pdev->dev, data->soc->num_clusters * + data->soc->maxcpus_per_cluster, + sizeof(*data->cpu_data), GFP_KERNEL); + if (!data->cpu_data) + return -ENOMEM; + platform_set_drvdata(pdev, data); bpmp = tegra_bpmp_get(&pdev->dev); @@ -713,6 +768,12 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) } } + for_each_possible_cpu(cpu) { + err = tegra194_cpufreq_store_physids(cpu, data); + if (err) + goto err_free_res; + } + tegra194_cpufreq_driver.driver_data = data; /* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */ diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 3c37d78996..46c41e2ca7 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -338,6 +338,7 @@ static const struct of_device_id ti_cpufreq_of_match[] = { { .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, }, { .compatible = "ti,am625", .data = &am625_soc_data, }, { .compatible = "ti,am62a7", .data = &am625_soc_data, }, + { .compatible = "ti,am62p5", .data = &am625_soc_data, }, /* legacy */ { .compatible = "ti,omap3430", .data = &omap34xx_soc_data, }, { .compatible = "ti,omap3630", .data = &omap36xx_soc_data, }, |