summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c12
-rw-r--r--drivers/cpufreq/amd-pstate.c138
-rw-r--r--drivers/cpufreq/amd-pstate.h14
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c10
-rw-r--r--drivers/cpufreq/cpufreq-dt.c21
-rw-r--r--drivers/cpufreq/cpufreq.c36
-rw-r--r--drivers/cpufreq/freq_table.c12
-rw-r--r--drivers/cpufreq/intel_pstate.c183
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c10
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c8
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c4
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c13
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c209
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c19
-rw-r--r--drivers/cpufreq/ti-cpufreq.c6
15 files changed, 425 insertions, 270 deletions
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index fc275d41d5..66b73c308c 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -202,6 +202,7 @@ static void amd_pstate_ut_check_freq(u32 index)
int cpu = 0;
struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata = NULL;
+ u32 nominal_freq_khz;
for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
@@ -209,13 +210,14 @@ static void amd_pstate_ut_check_freq(u32 index)
break;
cpudata = policy->driver_data;
- if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
- (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
+ nominal_freq_khz = cpudata->nominal_freq*1000;
+ if (!((cpudata->max_freq >= nominal_freq_khz) &&
+ (nominal_freq_khz > cpudata->lowest_nonlinear_freq) &&
(cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
(cpudata->min_freq > 0))) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
- __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
+ __func__, cpu, cpudata->max_freq, nominal_freq_khz,
cpudata->lowest_nonlinear_freq, cpudata->min_freq);
goto skip_test;
}
@@ -229,13 +231,13 @@ static void amd_pstate_ut_check_freq(u32 index)
if (cpudata->boost_supported) {
if ((policy->max == cpudata->max_freq) ||
- (policy->max == cpudata->nominal_freq))
+ (policy->max == nominal_freq_khz))
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
else {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
__func__, cpu, policy->max, cpudata->max_freq,
- cpudata->nominal_freq);
+ nominal_freq_khz);
goto skip_test;
}
} else {
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 6af175e6c0..a092b13ffb 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -247,6 +247,26 @@ static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
return index;
}
+static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, bool fast_switch)
+{
+ if (fast_switch)
+ wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
+ else
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+ READ_ONCE(cpudata->cppc_req_cached));
+}
+
+DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
+
+static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
+ u32 min_perf, u32 des_perf,
+ u32 max_perf, bool fast_switch)
+{
+ static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ max_perf, fast_switch);
+}
+
static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
int ret;
@@ -263,6 +283,9 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
if (!ret)
cpudata->epp_cached = epp;
} else {
+ amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
+ cpudata->max_limit_perf, false);
+
perf_ctrls.energy_perf = epp;
ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
if (ret) {
@@ -452,16 +475,6 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
return static_call(amd_pstate_init_perf)(cpudata);
}
-static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch)
-{
- if (fast_switch)
- wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
- else
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- READ_ONCE(cpudata->cppc_req_cached));
-}
-
static void cppc_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
u32 max_perf, bool fast_switch)
@@ -475,16 +488,6 @@ static void cppc_update_perf(struct amd_cpudata *cpudata,
cppc_set_perf(cpudata->cpu, &perf_ctrls);
}
-DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
-
-static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
-{
- static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
-}
-
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
{
u64 aperf, mperf, tsc;
@@ -688,26 +691,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
cpufreq_cpu_put(policy);
}
-static int amd_get_min_freq(struct amd_cpudata *cpudata)
-{
- return READ_ONCE(cpudata->min_freq);
-}
-
-static int amd_get_max_freq(struct amd_cpudata *cpudata)
-{
- return READ_ONCE(cpudata->max_freq);
-}
-
-static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
-{
- return READ_ONCE(cpudata->nominal_freq);
-}
-
-static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
-{
- return READ_ONCE(cpudata->lowest_nonlinear_freq);
-}
-
static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
{
struct amd_cpudata *cpudata = policy->driver_data;
@@ -860,7 +843,37 @@ free_cpufreq_put:
mutex_unlock(&amd_pstate_driver_lock);
}
-/**
+/*
+ * Get pstate transition delay time from ACPI tables that firmware set
+ * instead of using hardcode value directly.
+ */
+static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
+{
+ u32 transition_delay_ns;
+
+ transition_delay_ns = cppc_get_transition_latency(cpu);
+ if (transition_delay_ns == CPUFREQ_ETERNAL)
+ return AMD_PSTATE_TRANSITION_DELAY;
+
+ return transition_delay_ns / NSEC_PER_USEC;
+}
+
+/*
+ * Get pstate transition latency value from ACPI tables that firmware
+ * set instead of using hardcode value directly.
+ */
+static u32 amd_pstate_get_transition_latency(unsigned int cpu)
+{
+ u32 transition_latency;
+
+ transition_latency = cppc_get_transition_latency(cpu);
+ if (transition_latency == CPUFREQ_ETERNAL)
+ return AMD_PSTATE_TRANSITION_LATENCY;
+
+ return transition_latency;
+}
+
+/*
* amd_pstate_init_freq: Initialize the max_freq, min_freq,
* nominal_freq and lowest_nonlinear_freq for
* the @cpudata object.
@@ -881,7 +894,6 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
u32 boost_ratio, lowest_nonlinear_ratio;
struct cppc_perf_caps cppc_perf;
-
ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
@@ -917,7 +929,7 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ int min_freq, max_freq, nominal_freq, ret;
struct device *dev;
struct amd_cpudata *cpudata;
@@ -946,20 +958,21 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = amd_get_min_freq(cpudata);
- max_freq = amd_get_max_freq(cpudata);
- nominal_freq = amd_get_nominal_freq(cpudata);
- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
+ min_freq = READ_ONCE(cpudata->min_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
+ if (min_freq <= 0 || max_freq <= 0 ||
+ nominal_freq <= 0 || min_freq > max_freq) {
+ dev_err(dev,
+ "min_freq(%d) or max_freq(%d) or nominal_freq (%d) value is incorrect, check _CPC in ACPI tables\n",
+ min_freq, max_freq, nominal_freq);
ret = -EINVAL;
goto free_cpudata1;
}
- policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
- policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
+ policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
+ policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
policy->min = min_freq;
policy->max = max_freq;
@@ -1052,7 +1065,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
int max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
- max_freq = amd_get_max_freq(cpudata);
+ max_freq = READ_ONCE(cpudata->max_freq);
if (max_freq < 0)
return max_freq;
@@ -1065,7 +1078,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
int freq;
struct amd_cpudata *cpudata = policy->driver_data;
- freq = amd_get_lowest_nonlinear_freq(cpudata);
+ freq = READ_ONCE(cpudata->lowest_nonlinear_freq);
if (freq < 0)
return freq;
@@ -1376,7 +1389,7 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ int min_freq, max_freq, nominal_freq, ret;
struct amd_cpudata *cpudata;
struct device *dev;
u64 value;
@@ -1407,13 +1420,14 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = amd_get_min_freq(cpudata);
- max_freq = amd_get_max_freq(cpudata);
- nominal_freq = amd_get_nominal_freq(cpudata);
- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
+ min_freq = READ_ONCE(cpudata->min_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
+ if (min_freq <= 0 || max_freq <= 0 ||
+ nominal_freq <= 0 || min_freq > max_freq) {
+ dev_err(dev,
+ "min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect, check _CPC in ACPI tables\n",
+ min_freq, max_freq, nominal_freq);
ret = -EINVAL;
goto free_cpudata1;
}
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index bc341f3590..e6a28e7f4d 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -42,13 +42,17 @@ struct amd_aperf_mperf {
* @lowest_perf: the absolute lowest performance level of the processor
* @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
* priority.
- * @max_freq: the frequency that mapped to highest_perf
- * @min_freq: the frequency that mapped to lowest_perf
- * @nominal_freq: the frequency that mapped to nominal_perf
- * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
+ * @min_limit_perf: Cached value of the performance corresponding to policy->min
+ * @max_limit_perf: Cached value of the performance corresponding to policy->max
+ * @min_limit_freq: Cached value of policy->min (in khz)
+ * @max_limit_freq: Cached value of policy->max (in khz)
+ * @max_freq: the frequency (in khz) that mapped to highest_perf
+ * @min_freq: the frequency (in khz) that mapped to lowest_perf
+ * @nominal_freq: the frequency (in khz) that mapped to nominal_perf
+ * @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf
* @cur: Difference of Aperf/Mperf/tsc count between last and current sample
* @prev: Last Aperf/Mperf/tsc count value read from register
- * @freq: current cpu frequency value
+ * @freq: current cpu frequency value (in khz)
* @boost_supported: check whether the Processor or SBIOS supports boost mode
* @hw_prefcore: check whether HW supports preferred core featue.
* Only when hw_prefcore and early prefcore param are true,
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index b993a49808..c74dd1e01e 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -104,6 +104,9 @@ static const struct of_device_id allowlist[] __initconst = {
*/
static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "allwinner,sun50i-h6", },
+ { .compatible = "allwinner,sun50i-h616", },
+ { .compatible = "allwinner,sun50i-h618", },
+ { .compatible = "allwinner,sun50i-h700", },
{ .compatible = "apple,arm-platform", },
@@ -195,19 +198,18 @@ static const struct of_device_id blocklist[] __initconst = {
static bool __init cpu0_node_has_opp_v2_prop(void)
{
- struct device_node *np = of_cpu_device_node_get(0);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
bool ret = false;
if (of_property_present(np, "operating-points-v2"))
ret = true;
- of_node_put(np);
return ret;
}
static int __init cpufreq_dt_platdev_init(void)
{
- struct device_node *np = of_find_node_by_path("/");
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
const struct of_device_id *match;
const void *data = NULL;
@@ -223,11 +225,9 @@ static int __init cpufreq_dt_platdev_init(void)
if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np))
goto create_pdev;
- of_node_put(np);
return -ENODEV;
create_pdev:
- of_node_put(np);
return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
-1, data,
sizeof(struct cpufreq_dt_platform_data)));
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 2d83bbc65d..907e22632f 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -68,12 +68,9 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
*/
static const char *find_supply_name(struct device *dev)
{
- struct device_node *np;
+ struct device_node *np __free(device_node) = of_node_get(dev->of_node);
struct property *pp;
int cpu = dev->id;
- const char *name = NULL;
-
- np = of_node_get(dev->of_node);
/* This must be valid for sure */
if (WARN_ON(!np))
@@ -82,22 +79,16 @@ static const char *find_supply_name(struct device *dev)
/* Try "cpu0" for older DTs */
if (!cpu) {
pp = of_find_property(np, "cpu0-supply", NULL);
- if (pp) {
- name = "cpu0";
- goto node_put;
- }
+ if (pp)
+ return "cpu0";
}
pp = of_find_property(np, "cpu-supply", NULL);
- if (pp) {
- name = "cpu";
- goto node_put;
- }
+ if (pp)
+ return "cpu";
dev_dbg(dev, "no regulator for cpu%d\n", cpu);
-node_put:
- of_node_put(np);
- return name;
+ return NULL;
}
static int cpufreq_init(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d7630d9cdb..9e5060b278 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2586,6 +2586,40 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_get_policy);
+DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
+
+/**
+ * cpufreq_update_pressure() - Update cpufreq pressure for CPUs
+ * @policy: cpufreq policy of the CPUs.
+ *
+ * Update the value of cpufreq pressure for all @cpus in the policy.
+ */
+static void cpufreq_update_pressure(struct cpufreq_policy *policy)
+{
+ unsigned long max_capacity, capped_freq, pressure;
+ u32 max_freq;
+ int cpu;
+
+ cpu = cpumask_first(policy->related_cpus);
+ max_freq = arch_scale_freq_ref(cpu);
+ capped_freq = policy->max;
+
+ /*
+ * Handle properly the boost frequencies, which should simply clean
+ * the cpufreq pressure value.
+ */
+ if (max_freq <= capped_freq) {
+ pressure = 0;
+ } else {
+ max_capacity = arch_scale_cpu_capacity(cpu);
+ pressure = max_capacity -
+ mult_frac(max_capacity, capped_freq, max_freq);
+ }
+
+ for_each_cpu(cpu, policy->related_cpus)
+ WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
+}
+
/**
* cpufreq_set_policy - Modify cpufreq policy parameters.
* @policy: Policy object to modify.
@@ -2641,6 +2675,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
trace_cpu_frequency_limits(policy);
+ cpufreq_update_pressure(policy);
+
policy->cached_target_freq = UINT_MAX;
pr_debug("new min and max freqs are %u - %u kHz\n",
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index c17dc51a5a..10e80d912b 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -70,7 +70,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
- unsigned int freq, next_larger = ~0;
+ unsigned int freq, prev_smaller = 0;
bool found = false;
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
@@ -86,12 +86,12 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
break;
}
- if ((next_larger > freq) && (freq > policy->max))
- next_larger = freq;
+ if ((prev_smaller < freq) && (freq <= policy->max))
+ prev_smaller = freq;
}
if (!found) {
- policy->max = next_larger;
+ policy->max = prev_smaller;
cpufreq_verify_within_cpu_limits(policy);
}
@@ -194,7 +194,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
}
if (optimal.driver_data > i) {
if (suboptimal.driver_data > i) {
- WARN(1, "Invalid frequency table: %d\n", policy->cpu);
+ WARN(1, "Invalid frequency table: %u\n", policy->cpu);
return 0;
}
@@ -254,7 +254,7 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- count += sprintf(&buf[count], "%d ", pos->frequency);
+ count += sprintf(&buf[count], "%u ", pos->frequency);
}
count += sprintf(&buf[count], "\n");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 3405bf69b1..c31914a987 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -173,7 +173,6 @@ struct vid_data {
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
- * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
* P-state capacity.
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
@@ -182,7 +181,6 @@ struct vid_data {
struct global_params {
bool no_turbo;
bool turbo_disabled;
- bool turbo_disabled_mf;
int max_perf_pct;
int min_perf_pct;
};
@@ -213,7 +211,7 @@ struct global_params {
* @epp_policy: Last saved policy used to set EPP/EPB
* @epp_default: Power on default HWP energy performance
* preference/bias
- * @epp_cached Cached HWP energy-performance preference value
+ * @epp_cached: Cached HWP energy-performance preference value
* @hwp_req_cached: Cached value of the last HWP Request MSR
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
* @last_io_update: Last time when IO wake flag was set
@@ -292,11 +290,11 @@ struct pstate_funcs {
static struct pstate_funcs pstate_funcs __read_mostly;
-static int hwp_active __read_mostly;
-static int hwp_mode_bdw __read_mostly;
-static bool per_cpu_limits __read_mostly;
+static bool hwp_active __ro_after_init;
+static int hwp_mode_bdw __ro_after_init;
+static bool per_cpu_limits __ro_after_init;
+static bool hwp_forced __ro_after_init;
static bool hwp_boost __read_mostly;
-static bool hwp_forced __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
@@ -593,12 +591,13 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
}
-static inline void update_turbo_state(void)
+static bool turbo_is_disabled(void)
{
u64 misc_en;
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
- global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+
+ return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
}
static int min_perf_pct_min(void)
@@ -1153,12 +1152,16 @@ static void intel_pstate_update_policies(void)
static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
struct cpufreq_policy *policy)
{
- policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+ if (hwp_active)
+ intel_pstate_get_hwp_cap(cpudata);
+
+ policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+
refresh_frequency_limits(policy);
}
-static void intel_pstate_update_max_freq(unsigned int cpu)
+static void intel_pstate_update_limits(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
@@ -1170,25 +1173,12 @@ static void intel_pstate_update_max_freq(unsigned int cpu)
cpufreq_cpu_release(policy);
}
-static void intel_pstate_update_limits(unsigned int cpu)
+static void intel_pstate_update_limits_for_all(void)
{
- mutex_lock(&intel_pstate_driver_lock);
-
- update_turbo_state();
- /*
- * If turbo has been turned on or off globally, policy limits for
- * all CPUs need to be updated to reflect that.
- */
- if (global.turbo_disabled_mf != global.turbo_disabled) {
- global.turbo_disabled_mf = global.turbo_disabled;
- arch_set_max_freq_ratio(global.turbo_disabled);
- for_each_possible_cpu(cpu)
- intel_pstate_update_max_freq(cpu);
- } else {
- cpufreq_update_policy(cpu);
- }
+ int cpu;
- mutex_unlock(&intel_pstate_driver_lock);
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_limits(cpu);
}
/************************** sysfs begin ************************/
@@ -1286,11 +1276,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
return -EAGAIN;
}
- update_turbo_state();
- if (global.turbo_disabled)
- ret = sprintf(buf, "%u\n", global.turbo_disabled);
- else
- ret = sprintf(buf, "%u\n", global.no_turbo);
+ ret = sprintf(buf, "%u\n", global.no_turbo);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1301,32 +1287,39 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
- int ret;
+ bool no_turbo;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
+ if (sscanf(buf, "%u", &input) != 1)
return -EINVAL;
mutex_lock(&intel_pstate_driver_lock);
if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
- return -EAGAIN;
+ count = -EAGAIN;
+ goto unlock_driver;
}
- mutex_lock(&intel_pstate_limits_lock);
+ no_turbo = !!clamp_t(int, input, 0, 1);
- update_turbo_state();
- if (global.turbo_disabled) {
- pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
- mutex_unlock(&intel_pstate_limits_lock);
- mutex_unlock(&intel_pstate_driver_lock);
- return -EPERM;
+ WRITE_ONCE(global.turbo_disabled, turbo_is_disabled());
+ if (global.turbo_disabled && !no_turbo) {
+ pr_notice("Turbo disabled by BIOS or unavailable on processor\n");
+ count = -EPERM;
+ if (global.no_turbo)
+ goto unlock_driver;
+ else
+ no_turbo = 1;
+ }
+
+ if (no_turbo == global.no_turbo) {
+ goto unlock_driver;
}
- global.no_turbo = clamp_t(int, input, 0, 1);
+ WRITE_ONCE(global.no_turbo, no_turbo);
+
+ mutex_lock(&intel_pstate_limits_lock);
- if (global.no_turbo) {
+ if (no_turbo) {
struct cpudata *cpu = all_cpu_data[0];
int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
@@ -1337,9 +1330,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
mutex_unlock(&intel_pstate_limits_lock);
- intel_pstate_update_policies();
- arch_set_max_freq_ratio(global.no_turbo);
+ intel_pstate_update_limits_for_all();
+ arch_set_max_freq_ratio(no_turbo);
+unlock_driver:
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1620,7 +1614,6 @@ static void intel_pstate_notify_work(struct work_struct *work)
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
if (policy) {
- intel_pstate_get_hwp_cap(cpudata);
__intel_pstate_update_max_freq(cpudata, policy);
cpufreq_cpu_release(policy);
@@ -1635,11 +1628,10 @@ static cpumask_t hwp_intr_enable_mask;
void notify_hwp_interrupt(void)
{
unsigned int this_cpu = smp_processor_id();
- struct cpudata *cpudata;
unsigned long flags;
u64 value;
- if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
return;
rdmsrl_safe(MSR_HWP_STATUS, &value);
@@ -1651,24 +1643,8 @@ void notify_hwp_interrupt(void)
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
goto ack_intr;
- /*
- * Currently we never free all_cpu_data. And we can't reach here
- * without this allocated. But for safety for future changes, added
- * check.
- */
- if (unlikely(!READ_ONCE(all_cpu_data)))
- goto ack_intr;
-
- /*
- * The free is done during cleanup, when cpufreq registry is failed.
- * We wouldn't be here if it fails on init or switch status. But for
- * future changes, added check.
- */
- cpudata = READ_ONCE(all_cpu_data[this_cpu]);
- if (unlikely(!cpudata))
- goto ack_intr;
-
- schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
+ schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
+ msecs_to_jiffies(10));
spin_unlock_irqrestore(&hwp_notify_lock, flags);
@@ -1681,7 +1657,7 @@ ack_intr:
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
{
- unsigned long flags;
+ bool cancel_work;
if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
return;
@@ -1689,22 +1665,22 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- spin_lock_irqsave(&hwp_notify_lock, flags);
- if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
- cancel_delayed_work(&cpudata->hwp_notify_work);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ spin_lock_irq(&hwp_notify_lock);
+ cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
+ spin_unlock_irq(&hwp_notify_lock);
+
+ if (cancel_work)
+ cancel_delayed_work_sync(&cpudata->hwp_notify_work);
}
static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{
/* Enable HWP notification interrupt for guaranteed performance change */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
- unsigned long flags;
-
- spin_lock_irqsave(&hwp_notify_lock, flags);
+ spin_lock_irq(&hwp_notify_lock);
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ spin_unlock_irq(&hwp_notify_lock);
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
@@ -1790,7 +1766,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
u32 vid;
val = (u64)pstate << 8;
- if (global.no_turbo && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@@ -1955,7 +1931,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
u64 val;
val = (u64)pstate << 8;
- if (global.no_turbo && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
val |= (u64)1 << 32;
return val;
@@ -2028,14 +2004,6 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}
-static void intel_pstate_max_within_limits(struct cpudata *cpu)
-{
- int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
-
- update_turbo_state();
- intel_pstate_set_pstate(cpu, pstate);
-}
-
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
@@ -2261,7 +2229,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)
sample->busy_scaled = busy_frac * 100;
- target = global.no_turbo || global.turbo_disabled ?
+ target = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
target += target >> 2;
target = mul_fp(target, busy_frac);
@@ -2305,8 +2273,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu)
struct sample *sample;
int target_pstate;
- update_turbo_state();
-
target_pstate = get_target_pstate(cpu);
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
@@ -2436,6 +2402,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+#ifdef CONFIG_ACPI
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(BROADWELL_D, core_funcs),
X86_MATCH(BROADWELL_X, core_funcs),
@@ -2444,6 +2411,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
{}
};
+#endif
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
X86_MATCH(KABYLAKE, core_funcs),
@@ -2525,7 +2493,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
static int intel_pstate_get_max_freq(struct cpudata *cpu)
{
- return global.turbo_disabled || global.no_turbo ?
+ return READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
}
@@ -2610,12 +2578,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
+
/*
* NOHZ_FULL CPUs need this as the governor callback may not
* be invoked on them.
*/
intel_pstate_clear_update_util_hook(policy->cpu);
- intel_pstate_max_within_limits(cpu);
+ intel_pstate_set_pstate(cpu, pstate);
} else {
intel_pstate_set_update_util_hook(policy->cpu);
}
@@ -2658,10 +2628,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
{
int max_freq;
- update_turbo_state();
if (hwp_active) {
intel_pstate_get_hwp_cap(cpu);
- max_freq = global.no_turbo || global.turbo_disabled ?
+ max_freq = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
} else {
max_freq = intel_pstate_get_max_freq(cpu);
@@ -2755,9 +2724,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_freq;
- update_turbo_state();
- global.turbo_disabled_mf = global.turbo_disabled;
- policy->cpuinfo.max_freq = global.turbo_disabled ?
+ policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
policy->min = policy->cpuinfo.min_freq;
@@ -2922,8 +2889,6 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
int target_pstate;
- update_turbo_state();
-
freqs.old = policy->cur;
freqs.new = target_freq;
@@ -2945,8 +2910,6 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
struct cpudata *cpu = all_cpu_data[policy->cpu];
int target_pstate;
- update_turbo_state();
-
target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
@@ -2964,9 +2927,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
int old_pstate = cpu->pstate.current_pstate;
int cap_pstate, min_pstate, max_pstate, target_pstate;
- update_turbo_state();
- cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
- HWP_HIGHEST_PERF(hwp_cap);
+ cap_pstate = READ_ONCE(global.no_turbo) ?
+ HWP_GUARANTEED_PERF(hwp_cap) :
+ HWP_HIGHEST_PERF(hwp_cap);
/* Optimization: Avoid unnecessary divisions. */
@@ -3134,10 +3097,8 @@ static void intel_pstate_driver_cleanup(void)
if (intel_pstate_driver == &intel_pstate)
intel_pstate_clear_update_util_hook(cpu);
- spin_lock(&hwp_notify_lock);
kfree(all_cpu_data[cpu]);
WRITE_ONCE(all_cpu_data[cpu], NULL);
- spin_unlock(&hwp_notify_lock);
}
}
cpus_read_unlock();
@@ -3154,6 +3115,10 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
memset(&global, 0, sizeof(global));
global.max_perf_pct = 100;
+ global.turbo_disabled = turbo_is_disabled();
+ global.no_turbo = global.turbo_disabled;
+
+ arch_set_max_freq_ratio(global.turbo_disabled);
intel_pstate_driver = driver;
ret = cpufreq_register_driver(intel_pstate_driver);
@@ -3465,7 +3430,7 @@ static int __init intel_pstate_init(void)
* deal with it.
*/
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
- WRITE_ONCE(hwp_active, 1);
+ hwp_active = true;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index a0a61919bc..518606adf1 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -707,6 +707,15 @@ static const struct mtk_cpufreq_platform_data mt7623_platform_data = {
.ccifreq_supported = false,
};
+static const struct mtk_cpufreq_platform_data mt7988_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 900000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1150000,
+ .ccifreq_supported = true,
+};
+
static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
.min_volt_shift = 100000,
.max_volt_shift = 200000,
@@ -740,6 +749,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
{ .compatible = "mediatek,mt7623", .data = &mt7623_platform_data },
+ { .compatible = "mediatek,mt7988a", .data = &mt7988_platform_data },
{ .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
{ .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index df3567c1e9..6c9f0888a2 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -120,9 +120,9 @@ static int cpu_750fx_cpu_speed(int low_speed)
/* tweak L2 for high voltage */
if (has_cpu_l2lve) {
- hid2 = mfspr(SPRN_HID2);
+ hid2 = mfspr(SPRN_HID2_750FX);
hid2 &= ~0x2000;
- mtspr(SPRN_HID2, hid2);
+ mtspr(SPRN_HID2_750FX, hid2);
}
}
#ifdef CONFIG_PPC_BOOK3S_32
@@ -131,9 +131,9 @@ static int cpu_750fx_cpu_speed(int low_speed)
if (low_speed == 1) {
/* tweak L2 for low voltage */
if (has_cpu_l2lve) {
- hid2 = mfspr(SPRN_HID2);
+ hid2 = mfspr(SPRN_HID2_750FX);
hid2 |= 0x2000;
- mtspr(SPRN_HID2, hid2);
+ mtspr(SPRN_HID2_750FX, hid2);
}
/* ramping down, set voltage last */
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 70b0f21968..ec8df5496a 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -347,8 +347,8 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
throttled_freq = freq_hz / HZ_PER_KHZ;
- /* Update thermal pressure (the boost frequencies are accepted) */
- arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+ /* Update HW pressure (the boost frequencies are accepted) */
+ arch_update_hw_pressure(policy->related_cpus, throttled_freq);
/*
* In the unlikely case policy is unregistered do not enable
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index ea05d9d674..5004e1dbc7 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -480,23 +480,30 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()),
GFP_KERNEL);
- if (!drv)
+ if (!drv) {
+ of_node_put(np);
return -ENOMEM;
+ }
match = pdev->dev.platform_data;
drv->data = match->data;
- if (!drv->data)
+ if (!drv->data) {
+ of_node_put(np);
return -ENODEV;
+ }
if (drv->data->get_version) {
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
- if (IS_ERR(speedbin_nvmem))
+ if (IS_ERR(speedbin_nvmem)) {
+ of_node_put(np);
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
+ }
ret = drv->data->get_version(cpu_dev,
speedbin_nvmem, &pvs_name, drv);
if (ret) {
+ of_node_put(np);
nvmem_cell_put(speedbin_nvmem);
return ret;
}
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 32a9c88f8f..ef83e4bf26 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/arm-smccc.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
@@ -18,26 +19,155 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
-#define MAX_NAME_LEN 7
-
#define NVMEM_MASK 0x7
#define NVMEM_SHIFT 5
static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
+struct sunxi_cpufreq_data {
+ u32 (*efuse_xlate)(u32 speedbin);
+};
+
+static u32 sun50i_h6_efuse_xlate(u32 speedbin)
+{
+ u32 efuse_value;
+
+ efuse_value = (speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
+
+ /*
+ * We treat unexpected efuse values as if the SoC was from
+ * the slowest bin. Expected efuse values are 1-3, slowest
+ * to fastest.
+ */
+ if (efuse_value >= 1 && efuse_value <= 3)
+ return efuse_value - 1;
+ else
+ return 0;
+}
+
+static int get_soc_id_revision(void)
+{
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+ return arm_smccc_get_soc_id_revision();
+#else
+ return SMCCC_RET_NOT_SUPPORTED;
+#endif
+}
+
+/*
+ * Judging by the OPP tables in the vendor BSP, the quality order of the
+ * returned speedbin index is 4 -> 0/2 -> 3 -> 1, from worst to best.
+ * 0 and 2 seem identical from the OPP tables' point of view.
+ */
+static u32 sun50i_h616_efuse_xlate(u32 speedbin)
+{
+ int ver_bits = get_soc_id_revision();
+ u32 value = 0;
+
+ switch (speedbin & 0xffff) {
+ case 0x2000:
+ value = 0;
+ break;
+ case 0x2400:
+ case 0x7400:
+ case 0x2c00:
+ case 0x7c00:
+ if (ver_bits != SMCCC_RET_NOT_SUPPORTED && ver_bits <= 1) {
+ /* ic version A/B */
+ value = 1;
+ } else {
+ /* ic version C and later version */
+ value = 2;
+ }
+ break;
+ case 0x5000:
+ case 0x5400:
+ case 0x6000:
+ value = 3;
+ break;
+ case 0x5c00:
+ value = 4;
+ break;
+ case 0x5d00:
+ value = 0;
+ break;
+ default:
+ pr_warn("sun50i-cpufreq-nvmem: unknown speed bin 0x%x, using default bin 0\n",
+ speedbin & 0xffff);
+ value = 0;
+ break;
+ }
+
+ return value;
+}
+
+static struct sunxi_cpufreq_data sun50i_h6_cpufreq_data = {
+ .efuse_xlate = sun50i_h6_efuse_xlate,
+};
+
+static struct sunxi_cpufreq_data sun50i_h616_cpufreq_data = {
+ .efuse_xlate = sun50i_h616_efuse_xlate,
+};
+
+static const struct of_device_id cpu_opp_match_list[] = {
+ { .compatible = "allwinner,sun50i-h6-operating-points",
+ .data = &sun50i_h6_cpufreq_data,
+ },
+ { .compatible = "allwinner,sun50i-h616-operating-points",
+ .data = &sun50i_h616_cpufreq_data,
+ },
+ {}
+};
+
+/**
+ * dt_has_supported_hw() - Check if any OPPs use opp-supported-hw
+ *
+ * If we ask the cpufreq framework to use the opp-supported-hw feature, it
+ * will ignore every OPP node without that DT property. If none of the OPPs
+ * have it, the driver will fail probing, due to the lack of OPPs.
+ *
+ * Returns true if we have at least one OPP with the opp-supported-hw property.
+ */
+static bool dt_has_supported_hw(void)
+{
+ bool has_opp_supported_hw = false;
+ struct device_node *np;
+ struct device *cpu_dev;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return false;
+
+ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!np)
+ return false;
+
+ for_each_child_of_node_scoped(np, opp) {
+ if (of_find_property(opp, "opp-supported-hw", NULL)) {
+ has_opp_supported_hw = true;
+ break;
+ }
+ }
+
+ of_node_put(np);
+
+ return has_opp_supported_hw;
+}
+
/**
* sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value
- * @versions: Set to the value parsed from efuse
*
- * Returns 0 if success.
+ * Returns non-negative speed bin index on success, a negative error
+ * value otherwise.
*/
-static int sun50i_cpufreq_get_efuse(u32 *versions)
+static int sun50i_cpufreq_get_efuse(void)
{
+ const struct sunxi_cpufreq_data *opp_data;
struct nvmem_cell *speedbin_nvmem;
+ const struct of_device_id *match;
struct device_node *np;
struct device *cpu_dev;
- u32 *speedbin, efuse_value;
- size_t len;
+ u32 *speedbin;
int ret;
cpu_dev = get_cpu_device(0);
@@ -48,12 +178,12 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
if (!np)
return -ENOENT;
- ret = of_device_is_compatible(np,
- "allwinner,sun50i-h6-operating-points");
- if (!ret) {
+ match = of_match_node(cpu_opp_match_list, np);
+ if (!match) {
of_node_put(np);
return -ENOENT;
}
+ opp_data = match->data;
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
of_node_put(np);
@@ -61,33 +191,25 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
- speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+ speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
nvmem_cell_put(speedbin_nvmem);
if (IS_ERR(speedbin))
return PTR_ERR(speedbin);
- efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
-
- /*
- * We treat unexpected efuse values as if the SoC was from
- * the slowest bin. Expected efuse values are 1-3, slowest
- * to fastest.
- */
- if (efuse_value >= 1 && efuse_value <= 3)
- *versions = efuse_value - 1;
- else
- *versions = 0;
+ ret = opp_data->efuse_xlate(*speedbin);
kfree(speedbin);
- return 0;
+
+ return ret;
};
static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
{
int *opp_tokens;
- char name[MAX_NAME_LEN];
- unsigned int cpu;
- u32 speed = 0;
+ char name[] = "speedXXXXXXXXXXX"; /* Integers can take 11 chars max */
+ unsigned int cpu, supported_hw;
+ struct dev_pm_opp_config config = {};
+ int speed;
int ret;
opp_tokens = kcalloc(num_possible_cpus(), sizeof(*opp_tokens),
@@ -95,13 +217,24 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
if (!opp_tokens)
return -ENOMEM;
- ret = sun50i_cpufreq_get_efuse(&speed);
- if (ret) {
+ speed = sun50i_cpufreq_get_efuse();
+ if (speed < 0) {
kfree(opp_tokens);
- return ret;
+ return speed;
}
- snprintf(name, MAX_NAME_LEN, "speed%d", speed);
+ /*
+ * We need at least one OPP with the "opp-supported-hw" property,
+ * or else the upper layers will ignore every OPP and will bail out.
+ */
+ if (dt_has_supported_hw()) {
+ supported_hw = 1U << speed;
+ config.supported_hw = &supported_hw;
+ config.supported_hw_count = 1;
+ }
+
+ snprintf(name, sizeof(name), "speed%d", speed);
+ config.prop_name = name;
for_each_possible_cpu(cpu) {
struct device *cpu_dev = get_cpu_device(cpu);
@@ -111,12 +244,11 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
goto free_opp;
}
- opp_tokens[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
- if (opp_tokens[cpu] < 0) {
- ret = opp_tokens[cpu];
- pr_err("Failed to set prop name\n");
+ ret = dev_pm_opp_set_config(cpu_dev, &config);
+ if (ret < 0)
goto free_opp;
- }
+
+ opp_tokens[cpu] = ret;
}
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
@@ -131,7 +263,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
free_opp:
for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
return ret;
@@ -145,7 +277,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
}
@@ -160,6 +292,9 @@ static struct platform_driver sun50i_cpufreq_driver = {
static const struct of_device_id sun50i_cpufreq_match_list[] = {
{ .compatible = "allwinner,sun50i-h6" },
+ { .compatible = "allwinner,sun50i-h616" },
+ { .compatible = "allwinner,sun50i-h618" },
+ { .compatible = "allwinner,sun50i-h700" },
{}
};
MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list);
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index aae951d4e7..514146d98b 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -52,12 +52,15 @@ out:
static int tegra124_cpufreq_probe(struct platform_device *pdev)
{
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
struct tegra124_cpufreq_priv *priv;
- struct device_node *np;
struct device *cpu_dev;
struct platform_device_info cpufreq_dt_devinfo = {};
int ret;
+ if (!np)
+ return -ENODEV;
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -66,15 +69,9 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
if (!cpu_dev)
return -ENODEV;
- np = of_cpu_device_node_get(0);
- if (!np)
- return -ENODEV;
-
priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
- if (IS_ERR(priv->cpu_clk)) {
- ret = PTR_ERR(priv->cpu_clk);
- goto out_put_np;
- }
+ if (IS_ERR(priv->cpu_clk))
+ return PTR_ERR(priv->cpu_clk);
priv->dfll_clk = of_clk_get_by_name(np, "dfll");
if (IS_ERR(priv->dfll_clk)) {
@@ -110,8 +107,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- of_node_put(np);
-
return 0;
out_put_pllp_clk:
@@ -122,8 +117,6 @@ out_put_dfll_clk:
clk_put(priv->dfll_clk);
out_put_cpu_clk:
clk_put(priv->cpu_clk);
-out_put_np:
- of_node_put(np);
return ret;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 46c41e2ca7..5af85c4cba 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -347,12 +347,10 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
static const struct of_device_id *ti_cpufreq_match_node(void)
{
- struct device_node *np;
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
const struct of_device_id *match;
- np = of_find_node_by_path("/");
match = of_match_node(ti_cpufreq_of_match, np);
- of_node_put(np);
return match;
}
@@ -419,7 +417,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config);
if (ret < 0) {
- dev_err(opp_data->cpu_dev, "Failed to set OPP config\n");
+ dev_err_probe(opp_data->cpu_dev, ret, "Failed to set OPP config\n");
goto fail_put_node;
}