summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
commit8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 (patch)
tree8d58900dc0ebd4a3011f92c128d2fe45bc7c4bf2 /drivers/cpufreq
parentAdding debian version 6.7.12-1. (diff)
downloadlinux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.tar.xz
linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.zip
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c139
-rw-r--r--drivers/cpufreq/cpufreq.c21
-rw-r--r--drivers/cpufreq/intel_pstate.c36
4 files changed, 58 insertions, 142 deletions
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 8afefdea4d..ce5a5641b6 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -57,7 +57,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
continue;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
pr_warn("Cannot get clock for CPU %d\n", cpu);
} else {
@@ -165,7 +165,7 @@ static int __init armada_8k_cpufreq_init(void)
continue;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
pr_err("Cannot get clock for CPU %d\n", cpu);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index fe08ca419b..64420d9cfd 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -16,7 +16,6 @@
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
-#include <linux/dmi.h>
#include <linux/irq_work.h>
#include <linux/kthread.h>
#include <linux/time.h>
@@ -27,12 +26,6 @@
#include <acpi/cppc_acpi.h>
-/* Minimum struct length needed for the DMI processor entry we want */
-#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
-
-/* Offset in the DMI processor structure for the max frequency */
-#define DMI_PROCESSOR_MAX_SPEED 0x14
-
/*
* This list contains information parsed from per CPU ACPI _CPC and _PSD
* structures: e.g. the highest and lowest supported performance, capabilities,
@@ -291,97 +284,9 @@ static inline void cppc_freq_invariance_exit(void)
}
#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
-/* Callback function used to retrieve the max frequency from DMI */
-static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
-{
- const u8 *dmi_data = (const u8 *)dm;
- u16 *mhz = (u16 *)private;
-
- if (dm->type == DMI_ENTRY_PROCESSOR &&
- dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
- u16 val = (u16)get_unaligned((const u16 *)
- (dmi_data + DMI_PROCESSOR_MAX_SPEED));
- *mhz = val > *mhz ? val : *mhz;
- }
-}
-
-/* Look up the max frequency in DMI */
-static u64 cppc_get_dmi_max_khz(void)
-{
- u16 mhz = 0;
-
- dmi_walk(cppc_find_dmi_mhz, &mhz);
-
- /*
- * Real stupid fallback value, just in case there is no
- * actual value set.
- */
- mhz = mhz ? mhz : 1;
-
- return (1000 * mhz);
-}
-
-/*
- * If CPPC lowest_freq and nominal_freq registers are exposed then we can
- * use them to convert perf to freq and vice versa. The conversion is
- * extrapolated as an affine function passing by the 2 points:
- * - (Low perf, Low freq)
- * - (Nominal perf, Nominal perf)
- */
-static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
- unsigned int perf)
-{
- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
- s64 retval, offset = 0;
- static u64 max_khz;
- u64 mul, div;
-
- if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_freq - caps->lowest_freq;
- div = caps->nominal_perf - caps->lowest_perf;
- offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
- } else {
- if (!max_khz)
- max_khz = cppc_get_dmi_max_khz();
- mul = max_khz;
- div = caps->highest_perf;
- }
-
- retval = offset + div64_u64(perf * mul, div);
- if (retval >= 0)
- return retval;
- return 0;
-}
-
-static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
- unsigned int freq)
-{
- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
- s64 retval, offset = 0;
- static u64 max_khz;
- u64 mul, div;
-
- if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_perf - caps->lowest_perf;
- div = caps->nominal_freq - caps->lowest_freq;
- offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
- } else {
- if (!max_khz)
- max_khz = cppc_get_dmi_max_khz();
- mul = caps->highest_perf;
- div = max_khz;
- }
-
- retval = offset + div64_u64(freq * mul, div);
- if (retval >= 0)
- return retval;
- return 0;
-}
-
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
-
{
struct cppc_cpudata *cpu_data = policy->driver_data;
unsigned int cpu = policy->cpu;
@@ -389,7 +294,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
u32 desired_perf;
int ret = 0;
- desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
+ desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
/* Return if it is exactly the same perf */
if (desired_perf == cpu_data->perf_ctrls.desired_perf)
return ret;
@@ -417,7 +322,7 @@ static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
u32 desired_perf;
int ret;
- desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
+ desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
cpu_data->perf_ctrls.desired_perf = desired_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
@@ -530,7 +435,7 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
min_step = min_cap / CPPC_EM_CAP_STEP;
max_step = max_cap / CPPC_EM_CAP_STEP;
- perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ perf_prev = cppc_khz_to_perf(perf_caps, *KHz);
step = perf_prev / perf_step;
if (step > max_step)
@@ -550,8 +455,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
perf = step * perf_step;
}
- *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
- perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ *KHz = cppc_perf_to_khz(perf_caps, perf);
+ perf_check = cppc_khz_to_perf(perf_caps, *KHz);
step_check = perf_check / perf_step;
/*
@@ -561,8 +466,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
*/
while ((*KHz == prev_freq) || (step_check != step)) {
perf++;
- *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
- perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ *KHz = cppc_perf_to_khz(perf_caps, perf);
+ perf_check = cppc_khz_to_perf(perf_caps, *KHz);
step_check = perf_check / perf_step;
}
@@ -591,7 +496,7 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
- perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz);
+ perf_prev = cppc_khz_to_perf(perf_caps, KHz);
perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
step = perf_prev / perf_step;
@@ -679,10 +584,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
goto free_mask;
}
- /* Convert the lowest and nominal freq from MHz to KHz */
- cpu_data->perf_caps.lowest_freq *= 1000;
- cpu_data->perf_caps.nominal_freq *= 1000;
-
list_add(&cpu_data->node, &cpu_data_list);
return cpu_data;
@@ -724,20 +625,16 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
- policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->lowest_nonlinear_perf);
- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->nominal_perf);
+ policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf);
+ policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
* available if userspace wants to use any perf between lowest & lowest
* nonlinear perf
*/
- policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->lowest_perf);
- policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->nominal_perf);
+ policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf);
+ policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf);
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
policy->shared_type = cpu_data->shared_type;
@@ -773,7 +670,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
boost_supported = true;
/* Set policy->cur to max now. The governors will adjust later. */
- policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
+ policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
@@ -863,7 +760,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
&fb_ctrs_t1);
- return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
+ return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
}
static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
@@ -878,11 +775,9 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
}
if (state)
- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->highest_perf);
+ policy->max = cppc_perf_to_khz(caps, caps->highest_perf);
else
- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->nominal_perf);
+ policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
policy->cpuinfo.max_freq = policy->max;
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
@@ -937,7 +832,7 @@ static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
if (ret < 0)
return -EIO;
- return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
+ return cppc_perf_to_khz(&cpu_data->perf_caps, desired_perf);
}
static void cppc_check_hisi_workaround(void)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5104f853a9..3c2c955fbb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -454,7 +454,7 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
arch_set_freq_scale(policy->related_cpus,
policy->cur,
- policy->cpuinfo.max_freq);
+ arch_scale_freq_ref(policy->cpu));
spin_lock(&policy->transition_lock);
policy->transition_ongoing = false;
@@ -1576,7 +1576,8 @@ static int cpufreq_online(unsigned int cpu)
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
- if (cpufreq_thermal_control_enabled(cpufreq_driver))
+ /* Register cpufreq cooling only for a new policy */
+ if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
pr_debug("initialization complete\n");
@@ -1660,11 +1661,6 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
else
policy->last_policy = policy->policy;
- if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
- cpufreq_cooling_unregister(policy->cdev);
- policy->cdev = NULL;
- }
-
if (has_target())
cpufreq_exit_governor(policy);
@@ -1725,6 +1721,15 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
return;
}
+ /*
+ * Unregister cpufreq cooling once all the CPUs of the policy are
+ * removed.
+ */
+ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
+
/* We did light-weight exit earlier, do full tear down now */
if (cpufreq_driver->offline)
cpufreq_driver->exit(policy);
@@ -2179,7 +2184,7 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
policy->cur = freq;
arch_set_freq_scale(policy->related_cpus, freq,
- policy->cpuinfo.max_freq);
+ arch_scale_freq_ref(policy->cpu));
cpufreq_stats_record_transition(policy, freq);
if (trace_cpu_frequency_enabled()) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 357e01a272..79619227ea 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -302,7 +302,10 @@ static bool hwp_forced __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
-#define HYBRID_SCALING_FACTOR 78741
+#define HYBRID_SCALING_FACTOR 78741
+#define HYBRID_SCALING_FACTOR_MTL 80000
+
+static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
static inline int core_get_scaling(void)
{
@@ -422,7 +425,7 @@ static int intel_pstate_cppc_get_scaling(int cpu)
*/
if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
- return HYBRID_SCALING_FACTOR;
+ return hybrid_scaling_factor;
return core_get_scaling();
}
@@ -1717,13 +1720,6 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
/*
- * If this CPU gen doesn't call for change in balance_perf
- * EPP return.
- */
- if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
- return;
-
- /*
* If the EPP is set by firmware, which means that firmware enabled HWP
* - Is equal or less than 0x80 (default balance_perf EPP)
* - But less performance oriented than performance EPP
@@ -1736,6 +1732,13 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
}
/*
+ * If this CPU gen doesn't call for change in balance_perf
+ * EPP return.
+ */
+ if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
+ return;
+
+ /*
* Use hard coded value per gen to update the balance_perf
* and default EPP.
*/
@@ -1993,7 +1996,7 @@ static int hwp_get_cpu_scaling(int cpu)
smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
/* P-cores have a smaller perf level-to-freqency scaling factor. */
if (cpu_type == 0x40)
- return HYBRID_SCALING_FACTOR;
+ return hybrid_scaling_factor;
/* Use default core scaling for E-cores */
if (cpu_type == 0x20)
@@ -2431,6 +2434,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(ICELAKE_X, core_funcs),
X86_MATCH(TIGERLAKE, core_funcs),
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(EMERALDRAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -3414,6 +3418,11 @@ static const struct x86_cpu_id intel_epp_balance_perf[] = {
{}
};
+static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
+ {}
+};
+
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
@@ -3504,9 +3513,16 @@ hwp_cpu_matched:
if (hwp_active) {
const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
+ const struct x86_cpu_id *hybrid_id = x86_match_cpu(intel_hybrid_scaling_factor);
if (id)
epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
+
+ if (hybrid_id) {
+ hybrid_scaling_factor = hybrid_id->driver_data;
+ pr_debug("hybrid scaling factor: %d\n", hybrid_scaling_factor);
+ }
+
}
mutex_lock(&intel_pstate_driver_lock);