aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig29
-rw-r--r--drivers/cpufreq/Kconfig.arm27
-rw-r--r--drivers/cpufreq/amd-pstate.c475
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c7
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c11
-rw-r--r--drivers/cpufreq/cpufreq-dt.c23
-rw-r--r--drivers/cpufreq/cpufreq.c97
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c1
-rw-r--r--drivers/cpufreq/freq_table.c14
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c45
-rw-r--r--drivers/cpufreq/intel_pstate.c219
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c19
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c10
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c8
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c4
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c46
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c209
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c19
-rw-r--r--drivers/cpufreq/ti-cpufreq.c4
20 files changed, 887 insertions, 394 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 35efb53d5492..94e55c40970a 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -302,4 +302,33 @@ config QORIQ_CPUFREQ
which are capable of changing the CPU's frequency dynamically.
endif
+
+config ACPI_CPPC_CPUFREQ
+ tristate "CPUFreq driver based on the ACPI CPPC spec"
+ depends on ACPI_PROCESSOR
+ depends on ARM || ARM64 || RISCV
+ select ACPI_CPPC_LIB
+ help
+ This adds a CPUFreq driver which uses CPPC methods
+ as described in the ACPIv5.1 spec. CPPC stands for
+ Collaborative Processor Performance Controls. It
+ is based on an abstract continuous scale of CPU
+ performance values which allows the remote power
+ processor to flexibly optimize for power and
+ performance. CPPC relies on power management firmware
+ support for its operation.
+
+ If in doubt, say N.
+
+config ACPI_CPPC_CPUFREQ_FIE
+ bool "Frequency Invariance support for CPPC cpufreq driver"
+ depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
+ depends on ARM || ARM64 || RISCV
+ default y
+ help
+ This extends frequency invariance support in the CPPC cpufreq driver,
+ by using CPPC delivered and reference performance counters.
+
+ If in doubt, say N.
+
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index f911606897b8..96b404ce829f 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -3,32 +3,6 @@
# ARM CPU Frequency scaling drivers
#
-config ACPI_CPPC_CPUFREQ
- tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI_PROCESSOR
- select ACPI_CPPC_LIB
- help
- This adds a CPUFreq driver which uses CPPC methods
- as described in the ACPIv5.1 spec. CPPC stands for
- Collaborative Processor Performance Controls. It
- is based on an abstract continuous scale of CPU
- performance values which allows the remote power
- processor to flexibly optimize for power and
- performance. CPPC relies on power management firmware
- support for its operation.
-
- If in doubt, say N.
-
-config ACPI_CPPC_CPUFREQ_FIE
- bool "Frequency Invariance support for CPPC cpufreq driver"
- depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
- default y
- help
- This extends frequency invariance support in the CPPC cpufreq driver,
- by using CPPC delivered and reference performance counters.
-
- If in doubt, say N.
-
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
depends on ARCH_SUNXI
@@ -173,6 +147,7 @@ config ARM_QCOM_CPUFREQ_NVMEM
config ARM_QCOM_CPUFREQ_HW
tristate "QCOM CPUFreq HW driver"
depends on ARCH_QCOM || COMPILE_TEST
+ depends on COMMON_CLK
help
Support for the CPUFreq HW driver.
Some QCOM chipsets have a HW engine to offload the steps
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 1791d37fbc53..1b7e82a0ad2e 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <linux/static_call.h>
#include <linux/amd-pstate.h>
+#include <linux/topology.h>
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
@@ -49,6 +50,8 @@
#define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 1000
+#define CPPC_HIGHEST_PERF_PERFORMANCE 196
+#define CPPC_HIGHEST_PERF_DEFAULT 166
/*
* TODO: We need more time to fine tune processors with shared memory solution
@@ -64,6 +67,8 @@ static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
static int cppc_state = AMD_PSTATE_UNDEFINED;
static bool cppc_enabled;
+static bool amd_pstate_prefcore = true;
+static struct quirk_entry *quirks;
/*
* AMD Energy Preference Performance (EPP)
@@ -108,6 +113,41 @@ static unsigned int epp_values[] = {
typedef int (*cppc_mode_transition_fn)(int);
+static struct quirk_entry quirk_amd_7k62 = {
+ .nominal_freq = 2600,
+ .lowest_freq = 550,
+};
+
+static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
+{
+ /**
+ * match the broken bios for family 17h processor support CPPC V2
+ * broken BIOS lack of nominal_freq and lowest_freq capabilities
+ * definition in ACPI tables
+ */
+ if (boot_cpu_has(X86_FEATURE_ZEN2)) {
+ quirks = dmi->driver_data;
+ pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
+ return 1;
+ }
+
+ return 0;
+}
+
+static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = {
+ {
+ .callback = dmi_matched_7k62_bios_bug,
+ .ident = "AMD EPYC 7K62",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VERSION, "5.14"),
+ DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"),
+ },
+ .driver_data = &quirk_amd_7k62,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table);
+
static inline int get_mode_idx_from_str(const char *str, size_t size)
{
int i;
@@ -287,6 +327,21 @@ static inline int amd_pstate_enable(bool enable)
return static_call(amd_pstate_enable)(enable);
}
+static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ /*
+ * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
+ * the highest performance level is set to 196.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=218759
+ */
+ if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f))
+ return CPPC_HIGHEST_PERF_PERFORMANCE;
+
+ return CPPC_HIGHEST_PERF_DEFAULT;
+}
+
static int pstate_init_perf(struct amd_cpudata *cpudata)
{
u64 cap1;
@@ -297,13 +352,14 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- /*
- * TODO: Introduce AMD specific power feature.
- *
- * CPPC entry doesn't indicate the highest performance in some ASICs.
+ /* For platforms that do not support the preferred core feature, the
+ * highest_pef may be configured with 166 or 255, to avoid max frequency
+ * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as
+ * the default max perf.
*/
- highest_perf = amd_get_highest_perf();
- if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
+ if (cpudata->hw_prefcore)
+ highest_perf = amd_pstate_highest_perf_set(cpudata);
+ else
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
WRITE_ONCE(cpudata->highest_perf, highest_perf);
@@ -311,6 +367,7 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
+ WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1));
WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
return 0;
}
@@ -324,8 +381,9 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- highest_perf = amd_get_highest_perf();
- if (highest_perf > cppc_perf.highest_perf)
+ if (cpudata->hw_prefcore)
+ highest_perf = amd_pstate_highest_perf_set(cpudata);
+ else
highest_perf = cppc_perf.highest_perf;
WRITE_ONCE(cpudata->highest_perf, highest_perf);
@@ -334,6 +392,7 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+ WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf);
WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
@@ -477,12 +536,19 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- u32 max_limit_perf, min_limit_perf;
+ u32 max_limit_perf, min_limit_perf, lowest_perf;
struct amd_cpudata *cpudata = policy->driver_data;
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+ lowest_perf = READ_ONCE(cpudata->lowest_perf);
+ if (min_limit_perf < lowest_perf)
+ min_limit_perf = lowest_perf;
+
+ if (max_limit_perf < min_limit_perf)
+ max_limit_perf = min_limit_perf;
+
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
WRITE_ONCE(cpudata->max_limit_freq, policy->max);
@@ -570,7 +636,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (target_perf < capacity)
des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
- min_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
if (_min_perf < capacity)
min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
@@ -590,125 +656,245 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
cpufreq_cpu_put(policy);
}
-static int amd_get_min_freq(struct amd_cpudata *cpudata)
+static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
{
- struct cppc_perf_caps cppc_perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
+ if (!cpudata->boost_supported) {
+ pr_err("Boost mode is not supported by this processor or SBIOS\n");
+ return -EINVAL;
+ }
+
+ if (state)
+ policy->cpuinfo.max_freq = cpudata->max_freq;
+ else
+ policy->cpuinfo.max_freq = cpudata->nominal_freq;
+
+ policy->max = policy->cpuinfo.max_freq;
+
+ ret = freq_qos_update_request(&cpudata->req[1],
+ policy->cpuinfo.max_freq);
+ if (ret < 0)
return ret;
- /* Switch to khz */
- return cppc_perf.lowest_freq * 1000;
+ return 0;
}
-static int amd_get_max_freq(struct amd_cpudata *cpudata)
+static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
{
- struct cppc_perf_caps cppc_perf;
- u32 max_perf, max_freq, nominal_freq, nominal_perf;
- u64 boost_ratio;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
+ u32 highest_perf, nominal_perf;
- nominal_freq = cppc_perf.nominal_freq;
+ highest_perf = READ_ONCE(cpudata->highest_perf);
nominal_perf = READ_ONCE(cpudata->nominal_perf);
- max_perf = READ_ONCE(cpudata->highest_perf);
- boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
+ if (highest_perf <= nominal_perf)
+ return;
- max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
+ cpudata->boost_supported = true;
+ current_pstate_driver->boost_enabled = true;
+}
+
+static void amd_perf_ctl_reset(unsigned int cpu)
+{
+ wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
+}
- /* Switch to khz */
- return max_freq * 1000;
+/*
+ * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
+ * due to locking, so queue the work for later.
+ */
+static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
+{
+ sched_set_itmt_support();
}
+static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
-static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
+/*
+ * Get the highest performance register value.
+ * @cpu: CPU from which to get highest performance.
+ * @highest_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
{
- struct cppc_perf_caps cppc_perf;
+ int ret;
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ u64 cap1;
+
+ ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+ if (ret)
+ return ret;
+ WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
+ } else {
+ u64 cppc_highest_perf;
+
+ ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
+ if (ret)
+ return ret;
+ WRITE_ONCE(*highest_perf, cppc_highest_perf);
+ }
- /* Switch to khz */
- return cppc_perf.nominal_freq * 1000;
+ return (ret);
}
-static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
+#define CPPC_MAX_PERF U8_MAX
+
+static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
{
- struct cppc_perf_caps cppc_perf;
- u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
- nominal_freq, nominal_perf;
- u64 lowest_nonlinear_ratio;
+ int ret, prio;
+ u32 highest_perf;
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
if (ret)
- return ret;
-
- nominal_freq = cppc_perf.nominal_freq;
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ return;
- lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+ cpudata->hw_prefcore = true;
+ /* check if CPPC preferred core feature is enabled*/
+ if (highest_perf < CPPC_MAX_PERF)
+ prio = (int)highest_perf;
+ else {
+ pr_debug("AMD CPPC preferred core is unsupported!\n");
+ cpudata->hw_prefcore = false;
+ return;
+ }
- lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
+ if (!amd_pstate_prefcore)
+ return;
- lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
+ /*
+ * The priorities can be set regardless of whether or not
+ * sched_set_itmt_support(true) has been called and it is valid to
+ * update them at any time after it has been called.
+ */
+ sched_set_itmt_core_prio(prio, cpudata->cpu);
- /* Switch to khz */
- return lowest_nonlinear_freq * 1000;
+ schedule_work(&sched_prefcore_work);
}
-static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+static void amd_pstate_update_limits(unsigned int cpu)
{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata = policy->driver_data;
+ u32 prev_high = 0, cur_high = 0;
int ret;
+ bool highest_perf_changed = false;
- if (!cpudata->boost_supported) {
- pr_err("Boost mode is not supported by this processor or SBIOS\n");
- return -EINVAL;
- }
+ mutex_lock(&amd_pstate_driver_lock);
+ if ((!amd_pstate_prefcore) || (!cpudata->hw_prefcore))
+ goto free_cpufreq_put;
- if (state)
- policy->cpuinfo.max_freq = cpudata->max_freq;
- else
- policy->cpuinfo.max_freq = cpudata->nominal_freq;
+ ret = amd_pstate_get_highest_perf(cpu, &cur_high);
+ if (ret)
+ goto free_cpufreq_put;
- policy->max = policy->cpuinfo.max_freq;
+ prev_high = READ_ONCE(cpudata->prefcore_ranking);
+ if (prev_high != cur_high) {
+ highest_perf_changed = true;
+ WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
- ret = freq_qos_update_request(&cpudata->req[1],
- policy->cpuinfo.max_freq);
- if (ret < 0)
- return ret;
+ if (cur_high < CPPC_MAX_PERF)
+ sched_set_itmt_core_prio((int)cur_high, cpu);
+ }
- return 0;
+free_cpufreq_put:
+ cpufreq_cpu_put(policy);
+
+ if (!highest_perf_changed)
+ cpufreq_update_policy(cpu);
+
+ mutex_unlock(&amd_pstate_driver_lock);
}
-static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
+/*
+ * Get pstate transition delay time from ACPI tables that firmware set
+ * instead of using hardcode value directly.
+ */
+static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
{
- u32 highest_perf, nominal_perf;
+ u32 transition_delay_ns;
- highest_perf = READ_ONCE(cpudata->highest_perf);
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ transition_delay_ns = cppc_get_transition_latency(cpu);
+ if (transition_delay_ns == CPUFREQ_ETERNAL)
+ return AMD_PSTATE_TRANSITION_DELAY;
- if (highest_perf <= nominal_perf)
- return;
+ return transition_delay_ns / NSEC_PER_USEC;
+}
- cpudata->boost_supported = true;
- current_pstate_driver->boost_enabled = true;
+/*
+ * Get pstate transition latency value from ACPI tables that firmware
+ * set instead of using hardcode value directly.
+ */
+static u32 amd_pstate_get_transition_latency(unsigned int cpu)
+{
+ u32 transition_latency;
+
+ transition_latency = cppc_get_transition_latency(cpu);
+ if (transition_latency == CPUFREQ_ETERNAL)
+ return AMD_PSTATE_TRANSITION_LATENCY;
+
+ return transition_latency;
}
-static void amd_perf_ctl_reset(unsigned int cpu)
+/*
+ * amd_pstate_init_freq: Initialize the max_freq, min_freq,
+ * nominal_freq and lowest_nonlinear_freq for
+ * the @cpudata object.
+ *
+ * Requires: highest_perf, lowest_perf, nominal_perf and
+ * lowest_nonlinear_perf members of @cpudata to be
+ * initialized.
+ *
+ * Returns 0 on success, non-zero value on failure.
+ */
+static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
- wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
+ int ret;
+ u32 min_freq;
+ u32 highest_perf, max_freq;
+ u32 nominal_perf, nominal_freq;
+ u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
+ u32 boost_ratio, lowest_nonlinear_ratio;
+ struct cppc_perf_caps cppc_perf;
+
+ ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ if (quirks && quirks->lowest_freq)
+ min_freq = quirks->lowest_freq * 1000;
+ else
+ min_freq = cppc_perf.lowest_freq * 1000;
+
+ if (quirks && quirks->nominal_freq)
+ nominal_freq = quirks->nominal_freq ;
+ else
+ nominal_freq = cppc_perf.nominal_freq;
+
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+
+ highest_perf = READ_ONCE(cpudata->highest_perf);
+ boost_ratio = div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+ max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+
+ lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
+ nominal_perf);
+ lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+
+ WRITE_ONCE(cpudata->min_freq, min_freq);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
+ WRITE_ONCE(cpudata->max_freq, max_freq);
+
+ return 0;
}
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ int min_freq, max_freq, nominal_freq, ret;
struct device *dev;
struct amd_cpudata *cpudata;
@@ -727,24 +913,31 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
+ amd_pstate_init_prefcore(cpudata);
+
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
- min_freq = amd_get_min_freq(cpudata);
- max_freq = amd_get_max_freq(cpudata);
- nominal_freq = amd_get_nominal_freq(cpudata);
- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
+ ret = amd_pstate_init_freq(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ min_freq = READ_ONCE(cpudata->min_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
+ if (min_freq <= 0 || max_freq <= 0 ||
+ nominal_freq <= 0 || min_freq > max_freq) {
+ dev_err(dev,
+ "min_freq(%d) or max_freq(%d) or nominal_freq (%d) value is incorrect, check _CPC in ACPI tables\n",
+ min_freq, max_freq, nominal_freq);
ret = -EINVAL;
goto free_cpudata1;
}
- policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
- policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
+ policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
+ policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
policy->min = min_freq;
policy->max = max_freq;
@@ -772,13 +965,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
goto free_cpudata2;
}
- /* Initial processor data capability frequencies */
- cpudata->max_freq = max_freq;
- cpudata->min_freq = min_freq;
cpudata->max_limit_freq = max_freq;
cpudata->min_limit_freq = min_freq;
- cpudata->nominal_freq = nominal_freq;
- cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
policy->driver_data = cpudata;
@@ -842,7 +1030,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
int max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
- max_freq = amd_get_max_freq(cpudata);
+ max_freq = READ_ONCE(cpudata->max_freq);
if (max_freq < 0)
return max_freq;
@@ -855,7 +1043,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
int freq;
struct amd_cpudata *cpudata = policy->driver_data;
- freq = amd_get_lowest_nonlinear_freq(cpudata);
+ freq = READ_ONCE(cpudata->lowest_nonlinear_freq);
if (freq < 0)
return freq;
@@ -877,6 +1065,28 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
return sysfs_emit(buf, "%u\n", perf);
}
+static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
+ char *buf)
+{
+ u32 perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ perf = READ_ONCE(cpudata->prefcore_ranking);
+
+ return sysfs_emit(buf, "%u\n", perf);
+}
+
+static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
+ char *buf)
+{
+ bool hw_prefcore;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
+
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore));
+}
+
static ssize_t show_energy_performance_available_preferences(
struct cpufreq_policy *policy, char *buf)
{
@@ -1074,18 +1284,29 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
return ret < 0 ? ret : count;
}
+static ssize_t prefcore_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
+}
+
cpufreq_freq_attr_ro(amd_pstate_max_freq);
cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking);
+cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
cpufreq_freq_attr_rw(energy_performance_preference);
cpufreq_freq_attr_ro(energy_performance_available_preferences);
static DEVICE_ATTR_RW(status);
+static DEVICE_ATTR_RO(prefcore);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
&amd_pstate_lowest_nonlinear_freq,
&amd_pstate_highest_perf,
+ &amd_pstate_prefcore_ranking,
+ &amd_pstate_hw_prefcore,
NULL,
};
@@ -1093,6 +1314,8 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
&amd_pstate_max_freq,
&amd_pstate_lowest_nonlinear_freq,
&amd_pstate_highest_perf,
+ &amd_pstate_prefcore_ranking,
+ &amd_pstate_hw_prefcore,
&energy_performance_preference,
&energy_performance_available_preferences,
NULL,
@@ -1100,6 +1323,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
static struct attribute *pstate_global_attributes[] = {
&dev_attr_status.attr,
+ &dev_attr_prefcore.attr,
NULL
};
@@ -1130,7 +1354,7 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ int min_freq, max_freq, nominal_freq, ret;
struct amd_cpudata *cpudata;
struct device *dev;
u64 value;
@@ -1151,17 +1375,24 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
cpudata->epp_policy = 0;
+ amd_pstate_init_prefcore(cpudata);
+
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
- min_freq = amd_get_min_freq(cpudata);
- max_freq = amd_get_max_freq(cpudata);
- nominal_freq = amd_get_nominal_freq(cpudata);
- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
+ ret = amd_pstate_init_freq(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ min_freq = READ_ONCE(cpudata->min_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
+ if (min_freq <= 0 || max_freq <= 0 ||
+ nominal_freq <= 0 || min_freq > max_freq) {
+ dev_err(dev,
+ "min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect, check _CPC in ACPI tables\n",
+ min_freq, max_freq, nominal_freq);
ret = -EINVAL;
goto free_cpudata1;
}
@@ -1171,12 +1402,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
- /* Initial processor data capability frequencies */
- cpudata->max_freq = max_freq;
- cpudata->min_freq = min_freq;
- cpudata->nominal_freq = nominal_freq;
- cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
-
policy->driver_data = cpudata;
cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
@@ -1216,6 +1441,13 @@ free_cpudata1:
static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (cpudata) {
+ kfree(cpudata);
+ policy->driver_data = NULL;
+ }
+
pr_debug("CPU %d exiting\n", policy->cpu);
return 0;
}
@@ -1232,6 +1464,12 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+ if (min_limit_perf < min_perf)
+ min_limit_perf = min_perf;
+
+ if (max_limit_perf < min_limit_perf)
+ max_limit_perf = min_limit_perf;
+
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
@@ -1432,6 +1670,7 @@ static struct cpufreq_driver amd_pstate_driver = {
.suspend = amd_pstate_cpu_suspend,
.resume = amd_pstate_cpu_resume,
.set_boost = amd_pstate_set_boost,
+ .update_limits = amd_pstate_update_limits,
.name = "amd-pstate",
.attr = amd_pstate_attr,
};
@@ -1446,6 +1685,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.online = amd_pstate_epp_cpu_online,
.suspend = amd_pstate_epp_suspend,
.resume = amd_pstate_epp_resume,
+ .update_limits = amd_pstate_update_limits,
.name = "amd-pstate-epp",
.attr = amd_pstate_epp_attr,
};
@@ -1486,6 +1726,11 @@ static int __init amd_pstate_init(void)
if (cpufreq_get_current_driver())
return -EEXIST;
+ quirks = NULL;
+
+ /* check if this machine need CPPC quirks */
+ dmi_check_system(amd_pstate_quirks_table);
+
switch (cppc_state) {
case AMD_PSTATE_UNDEFINED:
/* Disable on the following configs by default:
@@ -1567,7 +1812,17 @@ static int __init amd_pstate_param(char *str)
return amd_pstate_set_driver(mode_idx);
}
+
+static int __init amd_prefcore_param(char *str)
+{
+ if (!strcmp(str, "disable"))
+ amd_pstate_prefcore = false;
+
+ return 0;
+}
+
early_param("amd_pstate", amd_pstate_param);
+early_param("amd_prefcore", amd_prefcore_param);
MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 35fb3a559ea9..ea8438550b49 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -481,7 +481,12 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct private_data *priv = policy->driver_data;
+ struct private_data *priv;
+
+ if (!policy)
+ return 0;
+
+ priv = policy->driver_data;
cpufreq_cpu_put(policy);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 64420d9cfd1e..15f1d41920a3 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -741,10 +741,15 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct cppc_cpudata *cpu_data = policy->driver_data;
+ struct cppc_cpudata *cpu_data;
u64 delivered_perf;
int ret;
+ if (!policy)
+ return -ENODEV;
+
+ cpu_data = policy->driver_data;
+
cpufreq_cpu_put(policy);
ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
@@ -822,10 +827,15 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct cppc_cpudata *cpu_data = policy->driver_data;
+ struct cppc_cpudata *cpu_data;
u64 desired_perf;
int ret;
+ if (!policy)
+ return -ENODEV;
+
+ cpu_data = policy->driver_data;
+
cpufreq_cpu_put(policy);
ret = cppc_get_desired_perf(cpu, &desired_perf);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bd1e1357cef8..c74dd1e01e0d 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -104,6 +104,9 @@ static const struct of_device_id allowlist[] __initconst = {
*/
static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "allwinner,sun50i-h6", },
+ { .compatible = "allwinner,sun50i-h616", },
+ { .compatible = "allwinner,sun50i-h618", },
+ { .compatible = "allwinner,sun50i-h700", },
{ .compatible = "apple,arm-platform", },
@@ -156,6 +159,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sc7280", },
{ .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sc8280xp", },
+ { .compatible = "qcom,sdm670", },
{ .compatible = "qcom,sdm845", },
{ .compatible = "qcom,sdx75", },
{ .compatible = "qcom,sm6115", },
@@ -194,19 +198,18 @@ static const struct of_device_id blocklist[] __initconst = {
static bool __init cpu0_node_has_opp_v2_prop(void)
{
- struct device_node *np = of_cpu_device_node_get(0);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
bool ret = false;
if (of_property_present(np, "operating-points-v2"))
ret = true;
- of_node_put(np);
return ret;
}
static int __init cpufreq_dt_platdev_init(void)
{
- struct device_node *np = of_find_node_by_path("/");
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
const struct of_device_id *match;
const void *data = NULL;
@@ -222,11 +225,9 @@ static int __init cpufreq_dt_platdev_init(void)
if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np))
goto create_pdev;
- of_node_put(np);
return -ENODEV;
create_pdev:
- of_node_put(np);
return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
-1, data,
sizeof(struct cpufreq_dt_platform_data)));
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 8bd6e5e8f121..907e22632fda 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -68,12 +68,9 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
*/
static const char *find_supply_name(struct device *dev)
{
- struct device_node *np;
+ struct device_node *np __free(device_node) = of_node_get(dev->of_node);
struct property *pp;
int cpu = dev->id;
- const char *name = NULL;
-
- np = of_node_get(dev->of_node);
/* This must be valid for sure */
if (WARN_ON(!np))
@@ -82,22 +79,16 @@ static const char *find_supply_name(struct device *dev)
/* Try "cpu0" for older DTs */
if (!cpu) {
pp = of_find_property(np, "cpu0-supply", NULL);
- if (pp) {
- name = "cpu0";
- goto node_put;
- }
+ if (pp)
+ return "cpu0";
}
pp = of_find_property(np, "cpu-supply", NULL);
- if (pp) {
- name = "cpu";
- goto node_put;
- }
+ if (pp)
+ return "cpu";
dev_dbg(dev, "no regulator for cpu%d\n", cpu);
-node_put:
- of_node_put(np);
- return name;
+ return NULL;
}
static int cpufreq_init(struct cpufreq_policy *policy)
@@ -208,7 +199,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
if (!priv)
return -ENOMEM;
- if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
return -ENOMEM;
cpumask_set_cpu(cpu, priv->cpus);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 44db4f59c4cc..a45aac17c20f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -576,17 +576,26 @@ unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
if (latency) {
+ unsigned int max_delay_us = 2 * MSEC_PER_SEC;
+
+ /*
+ * If the platform already has high transition_latency, use it
+ * as-is.
+ */
+ if (latency > max_delay_us)
+ return latency;
+
/*
- * For platforms that can change the frequency very fast (< 10
+ * For platforms that can change the frequency very fast (< 2
* us), the above formula gives a decent transition delay. But
* for platforms where transition_latency is in milliseconds, it
* ends up giving unrealistic values.
*
- * Cap the default transition delay to 10 ms, which seems to be
+ * Cap the default transition delay to 2 ms, which seems to be
* a reasonable amount of time after which we should reevaluate
* the frequency.
*/
- return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
+ return min(latency * LATENCY_MULTIPLIER, max_delay_us);
}
return LATENCY_MULTIPLIER;
@@ -644,14 +653,16 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (policy->boost_enabled == enable)
return count;
+ policy->boost_enabled = enable;
+
cpus_read_lock();
ret = cpufreq_driver->set_boost(policy, enable);
cpus_read_unlock();
- if (ret)
+ if (ret) {
+ policy->boost_enabled = !policy->boost_enabled;
return ret;
-
- policy->boost_enabled = enable;
+ }
return count;
}
@@ -1419,6 +1430,9 @@ static int cpufreq_online(unsigned int cpu)
goto out_free_policy;
}
+ /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+ policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
+
/*
* The initialization has succeeded and the policy is online.
* If there is a problem with its frequency table, take it
@@ -1571,7 +1585,8 @@ static int cpufreq_online(unsigned int cpu)
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
- if (cpufreq_thermal_control_enabled(cpufreq_driver))
+ /* Register cpufreq cooling only for a new policy */
+ if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
pr_debug("initialization complete\n");
@@ -1655,11 +1670,6 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
else
policy->last_policy = policy->policy;
- if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
- cpufreq_cooling_unregister(policy->cdev);
- policy->cdev = NULL;
- }
-
if (has_target())
cpufreq_exit_governor(policy);
@@ -1669,10 +1679,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
*/
if (cpufreq_driver->offline) {
cpufreq_driver->offline(policy);
- } else if (cpufreq_driver->exit) {
- cpufreq_driver->exit(policy);
- policy->freq_table = NULL;
+ return;
}
+
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+ policy->freq_table = NULL;
}
static int cpufreq_offline(unsigned int cpu)
@@ -1720,8 +1733,17 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
return;
}
+ /*
+ * Unregister cpufreq cooling once all the CPUs of the policy are
+ * removed.
+ */
+ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
+
/* We did light-weight exit earlier, do full tear down now */
- if (cpufreq_driver->offline)
+ if (cpufreq_driver->offline && cpufreq_driver->exit)
cpufreq_driver->exit(policy);
up_write(&policy->rwsem);
@@ -2563,6 +2585,40 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_get_policy);
+DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
+
+/**
+ * cpufreq_update_pressure() - Update cpufreq pressure for CPUs
+ * @policy: cpufreq policy of the CPUs.
+ *
+ * Update the value of cpufreq pressure for all @cpus in the policy.
+ */
+static void cpufreq_update_pressure(struct cpufreq_policy *policy)
+{
+ unsigned long max_capacity, capped_freq, pressure;
+ u32 max_freq;
+ int cpu;
+
+ cpu = cpumask_first(policy->related_cpus);
+ max_freq = arch_scale_freq_ref(cpu);
+ capped_freq = policy->max;
+
+ /*
+ * Handle properly the boost frequencies, which should simply clean
+ * the cpufreq pressure value.
+ */
+ if (max_freq <= capped_freq) {
+ pressure = 0;
+ } else {
+ max_capacity = arch_scale_cpu_capacity(cpu);
+ pressure = max_capacity -
+ mult_frac(max_capacity, capped_freq, max_freq);
+ }
+
+ for_each_cpu(cpu, policy->related_cpus)
+ WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
+}
+
/**
* cpufreq_set_policy - Modify cpufreq policy parameters.
* @policy: Policy object to modify.
@@ -2618,6 +2674,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
trace_cpu_frequency_limits(policy);
+ cpufreq_update_pressure(policy);
+
policy->cached_target_freq = UINT_MAX;
pr_debug("new min and max freqs are %u - %u kHz\n",
@@ -2755,11 +2813,12 @@ int cpufreq_boost_trigger_state(int state)
cpus_read_lock();
for_each_active_policy(policy) {
+ policy->boost_enabled = state;
ret = cpufreq_driver->set_boost(policy, state);
- if (ret)
+ if (ret) {
+ policy->boost_enabled = !policy->boost_enabled;
goto err_reset_state;
-
- policy->boost_enabled = state;
+ }
}
cpus_read_unlock();
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c52d19d67557..a7c38b8b3e78 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -22,7 +22,6 @@
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
-#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (1)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index c4d4643b6ca6..10e80d912b8d 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -40,7 +40,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
cpufreq_for_each_valid_entry(pos, table) {
freq = pos->frequency;
- if (!cpufreq_boost_enabled()
+ if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
@@ -70,7 +70,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
- unsigned int freq, next_larger = ~0;
+ unsigned int freq, prev_smaller = 0;
bool found = false;
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
@@ -86,12 +86,12 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
break;
}
- if ((next_larger > freq) && (freq > policy->max))
- next_larger = freq;
+ if ((prev_smaller < freq) && (freq <= policy->max))
+ prev_smaller = freq;
}
if (!found) {
- policy->max = next_larger;
+ policy->max = prev_smaller;
cpufreq_verify_within_cpu_limits(policy);
}
@@ -194,7 +194,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
}
if (optimal.driver_data > i) {
if (suboptimal.driver_data > i) {
- WARN(1, "Invalid frequency table: %d\n", policy->cpu);
+ WARN(1, "Invalid frequency table: %u\n", policy->cpu);
return 0;
}
@@ -254,7 +254,7 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- count += sprintf(&buf[count], "%d ", pos->frequency);
+ count += sprintf(&buf[count], "%u ", pos->frequency);
}
count += sprintf(&buf[count], "\n");
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 33728c242f66..c20d3ecc5a81 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -14,6 +14,8 @@
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define PU_SOC_VOLTAGE_NORMAL 1250000
#define PU_SOC_VOLTAGE_HIGH 1275000
@@ -225,8 +227,6 @@ static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq)
static int imx6q_opp_check_speed_grading(struct device *dev)
{
- struct device_node *np;
- void __iomem *base;
u32 val;
int ret;
@@ -235,16 +235,11 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
if (ret)
return ret;
} else {
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
- if (!np)
- return -ENOENT;
+ struct regmap *ocotp;
- base = of_iomap(np, 0);
- of_node_put(np);
- if (!base) {
- dev_err(dev, "failed to map ocotp\n");
- return -EFAULT;
- }
+ ocotp = syscon_regmap_lookup_by_compatible("fsl,imx6q-ocotp");
+ if (IS_ERR(ocotp))
+ return -ENOENT;
/*
* SPEED_GRADING[1:0] defines the max speed of ARM:
@@ -254,8 +249,7 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
* 2b'00: 792000000Hz;
* We need to set the max speed of ARM according to fuse map.
*/
- val = readl_relaxed(base + OCOTP_CFG3);
- iounmap(base);
+ regmap_read(ocotp, OCOTP_CFG3, &val);
}
val >>= OCOTP_CFG3_SPEED_SHIFT;
@@ -290,25 +284,16 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
if (ret)
return ret;
} else {
- struct device_node *np;
- void __iomem *base;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
- if (!np)
- np = of_find_compatible_node(NULL, NULL,
- "fsl,imx6ull-ocotp");
- if (!np)
- return -ENOENT;
+ struct regmap *ocotp;
- base = of_iomap(np, 0);
- of_node_put(np);
- if (!base) {
- dev_err(dev, "failed to map ocotp\n");
- return -EFAULT;
- }
+ ocotp = syscon_regmap_lookup_by_compatible("fsl,imx6ul-ocotp");
+ if (IS_ERR(ocotp))
+ ocotp = syscon_regmap_lookup_by_compatible("fsl,imx6ull-ocotp");
+
+ if (IS_ERR(ocotp))
+ return -ENOENT;
- val = readl_relaxed(base + OCOTP_CFG3);
- iounmap(base);
+ regmap_read(ocotp, OCOTP_CFG3, &val);
}
/*
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 79619227ea51..4b986c044741 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -25,6 +25,7 @@
#include <linux/acpi.h>
#include <linux/vmalloc.h>
#include <linux/pm_qos.h>
+#include <linux/bitfield.h>
#include <trace/events/power.h>
#include <asm/cpu.h>
@@ -172,7 +173,6 @@ struct vid_data {
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
- * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
* P-state capacity.
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
@@ -181,7 +181,6 @@ struct vid_data {
struct global_params {
bool no_turbo;
bool turbo_disabled;
- bool turbo_disabled_mf;
int max_perf_pct;
int min_perf_pct;
};
@@ -201,8 +200,6 @@ struct global_params {
* @prev_aperf: Last APERF value read from APERF MSR
* @prev_mperf: Last MPERF value read from MPERF MSR
* @prev_tsc: Last timestamp counter (TSC) value
- * @prev_cummulative_iowait: IO Wait time difference from last and
- * current sample
* @sample: Storage for storing last Sample data
* @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
* @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
@@ -214,7 +211,7 @@ struct global_params {
* @epp_policy: Last saved policy used to set EPP/EPB
* @epp_default: Power on default HWP energy performance
* preference/bias
- * @epp_cached Cached HWP energy-performance preference value
+ * @epp_cached: Cached HWP energy-performance preference value
* @hwp_req_cached: Cached value of the last HWP Request MSR
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
* @last_io_update: Last time when IO wake flag was set
@@ -241,7 +238,6 @@ struct cpudata {
u64 prev_aperf;
u64 prev_mperf;
u64 prev_tsc;
- u64 prev_cummulative_iowait;
struct sample sample;
int32_t min_perf_ratio;
int32_t max_perf_ratio;
@@ -294,11 +290,11 @@ struct pstate_funcs {
static struct pstate_funcs pstate_funcs __read_mostly;
-static int hwp_active __read_mostly;
-static int hwp_mode_bdw __read_mostly;
-static bool per_cpu_limits __read_mostly;
+static bool hwp_active __ro_after_init;
+static int hwp_mode_bdw __ro_after_init;
+static bool per_cpu_limits __ro_after_init;
+static bool hwp_forced __ro_after_init;
static bool hwp_boost __read_mostly;
-static bool hwp_forced __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
@@ -596,12 +592,13 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
}
-static inline void update_turbo_state(void)
+static bool turbo_is_disabled(void)
{
u64 misc_en;
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
- global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+
+ return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
}
static int min_perf_pct_min(void)
@@ -1156,12 +1153,15 @@ static void intel_pstate_update_policies(void)
static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
struct cpufreq_policy *policy)
{
- policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+ intel_pstate_get_hwp_cap(cpudata);
+
+ policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+
refresh_frequency_limits(policy);
}
-static void intel_pstate_update_max_freq(unsigned int cpu)
+static void intel_pstate_update_limits(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
@@ -1173,25 +1173,12 @@ static void intel_pstate_update_max_freq(unsigned int cpu)
cpufreq_cpu_release(policy);
}
-static void intel_pstate_update_limits(unsigned int cpu)
+static void intel_pstate_update_limits_for_all(void)
{
- mutex_lock(&intel_pstate_driver_lock);
-
- update_turbo_state();
- /*
- * If turbo has been turned on or off globally, policy limits for
- * all CPUs need to be updated to reflect that.
- */
- if (global.turbo_disabled_mf != global.turbo_disabled) {
- global.turbo_disabled_mf = global.turbo_disabled;
- arch_set_max_freq_ratio(global.turbo_disabled);
- for_each_possible_cpu(cpu)
- intel_pstate_update_max_freq(cpu);
- } else {
- cpufreq_update_policy(cpu);
- }
+ int cpu;
- mutex_unlock(&intel_pstate_driver_lock);
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_limits(cpu);
}
/************************** sysfs begin ************************/
@@ -1289,11 +1276,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
return -EAGAIN;
}
- update_turbo_state();
- if (global.turbo_disabled)
- ret = sprintf(buf, "%u\n", global.turbo_disabled);
- else
- ret = sprintf(buf, "%u\n", global.no_turbo);
+ ret = sprintf(buf, "%u\n", global.no_turbo);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1304,32 +1287,34 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
- int ret;
+ bool no_turbo;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
+ if (sscanf(buf, "%u", &input) != 1)
return -EINVAL;
mutex_lock(&intel_pstate_driver_lock);
if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
- return -EAGAIN;
+ count = -EAGAIN;
+ goto unlock_driver;
}
- mutex_lock(&intel_pstate_limits_lock);
+ no_turbo = !!clamp_t(int, input, 0, 1);
+
+ if (no_turbo == global.no_turbo)
+ goto unlock_driver;
- update_turbo_state();
if (global.turbo_disabled) {
pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
- mutex_unlock(&intel_pstate_limits_lock);
- mutex_unlock(&intel_pstate_driver_lock);
- return -EPERM;
+ count = -EPERM;
+ goto unlock_driver;
}
- global.no_turbo = clamp_t(int, input, 0, 1);
+ WRITE_ONCE(global.no_turbo, no_turbo);
- if (global.no_turbo) {
+ mutex_lock(&intel_pstate_limits_lock);
+
+ if (no_turbo) {
struct cpudata *cpu = all_cpu_data[0];
int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
@@ -1340,9 +1325,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
mutex_unlock(&intel_pstate_limits_lock);
- intel_pstate_update_policies();
- arch_set_max_freq_ratio(global.no_turbo);
+ intel_pstate_update_limits_for_all();
+ arch_set_max_freq_ratio(no_turbo);
+unlock_driver:
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1623,7 +1609,6 @@ static void intel_pstate_notify_work(struct work_struct *work)
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
if (policy) {
- intel_pstate_get_hwp_cap(cpudata);
__intel_pstate_update_max_freq(cpudata, policy);
cpufreq_cpu_release(policy);
@@ -1638,11 +1623,10 @@ static cpumask_t hwp_intr_enable_mask;
void notify_hwp_interrupt(void)
{
unsigned int this_cpu = smp_processor_id();
- struct cpudata *cpudata;
unsigned long flags;
u64 value;
- if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
return;
rdmsrl_safe(MSR_HWP_STATUS, &value);
@@ -1654,24 +1638,8 @@ void notify_hwp_interrupt(void)
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
goto ack_intr;
- /*
- * Currently we never free all_cpu_data. And we can't reach here
- * without this allocated. But for safety for future changes, added
- * check.
- */
- if (unlikely(!READ_ONCE(all_cpu_data)))
- goto ack_intr;
-
- /*
- * The free is done during cleanup, when cpufreq registry is failed.
- * We wouldn't be here if it fails on init or switch status. But for
- * future changes, added check.
- */
- cpudata = READ_ONCE(all_cpu_data[this_cpu]);
- if (unlikely(!cpudata))
- goto ack_intr;
-
- schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
+ schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
+ msecs_to_jiffies(10));
spin_unlock_irqrestore(&hwp_notify_lock, flags);
@@ -1684,7 +1652,7 @@ ack_intr:
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
{
- unsigned long flags;
+ bool cancel_work;
if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
return;
@@ -1692,22 +1660,22 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- spin_lock_irqsave(&hwp_notify_lock, flags);
- if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
- cancel_delayed_work(&cpudata->hwp_notify_work);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ spin_lock_irq(&hwp_notify_lock);
+ cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
+ spin_unlock_irq(&hwp_notify_lock);
+
+ if (cancel_work)
+ cancel_delayed_work_sync(&cpudata->hwp_notify_work);
}
static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{
/* Enable HWP notification interrupt for guaranteed performance change */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
- unsigned long flags;
-
- spin_lock_irqsave(&hwp_notify_lock, flags);
+ spin_lock_irq(&hwp_notify_lock);
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ spin_unlock_irq(&hwp_notify_lock);
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
@@ -1793,7 +1761,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
u32 vid;
val = (u64)pstate << 8;
- if (global.no_turbo && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@@ -1958,7 +1926,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
u64 val;
val = (u64)pstate << 8;
- if (global.no_turbo && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
val |= (u64)1 << 32;
return val;
@@ -2031,14 +1999,6 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}
-static void intel_pstate_max_within_limits(struct cpudata *cpu)
-{
- int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
-
- update_turbo_state();
- intel_pstate_set_pstate(cpu, pstate);
-}
-
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
@@ -2264,7 +2224,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)
sample->busy_scaled = busy_frac * 100;
- target = global.no_turbo || global.turbo_disabled ?
+ target = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
target += target >> 2;
target = mul_fp(target, busy_frac);
@@ -2308,8 +2268,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu)
struct sample *sample;
int target_pstate;
- update_turbo_state();
-
target_pstate = get_target_pstate(cpu);
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
@@ -2439,6 +2397,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+#ifdef CONFIG_ACPI
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(BROADWELL_D, core_funcs),
X86_MATCH(BROADWELL_X, core_funcs),
@@ -2447,6 +2406,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
{}
};
+#endif
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
X86_MATCH(KABYLAKE, core_funcs),
@@ -2528,7 +2488,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
static int intel_pstate_get_max_freq(struct cpudata *cpu)
{
- return global.turbo_disabled || global.no_turbo ?
+ return READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
}
@@ -2613,12 +2573,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
+
/*
* NOHZ_FULL CPUs need this as the governor callback may not
* be invoked on them.
*/
intel_pstate_clear_update_util_hook(policy->cpu);
- intel_pstate_max_within_limits(cpu);
+ intel_pstate_set_pstate(cpu, pstate);
} else {
intel_pstate_set_update_util_hook(policy->cpu);
}
@@ -2661,10 +2623,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
{
int max_freq;
- update_turbo_state();
if (hwp_active) {
intel_pstate_get_hwp_cap(cpu);
- max_freq = global.no_turbo || global.turbo_disabled ?
+ max_freq = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
} else {
max_freq = intel_pstate_get_max_freq(cpu);
@@ -2758,9 +2719,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_freq;
- update_turbo_state();
- global.turbo_disabled_mf = global.turbo_disabled;
- policy->cpuinfo.max_freq = global.turbo_disabled ?
+ policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
policy->min = policy->cpuinfo.min_freq;
@@ -2925,8 +2884,6 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
int target_pstate;
- update_turbo_state();
-
freqs.old = policy->cur;
freqs.new = target_freq;
@@ -2948,8 +2905,6 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
struct cpudata *cpu = all_cpu_data[policy->cpu];
int target_pstate;
- update_turbo_state();
-
target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
@@ -2967,9 +2922,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
int old_pstate = cpu->pstate.current_pstate;
int cap_pstate, min_pstate, max_pstate, target_pstate;
- update_turbo_state();
- cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
- HWP_HIGHEST_PERF(hwp_cap);
+ cap_pstate = READ_ONCE(global.no_turbo) ?
+ HWP_GUARANTEED_PERF(hwp_cap) :
+ HWP_HIGHEST_PERF(hwp_cap);
/* Optimization: Avoid unnecessary divisions. */
@@ -3137,10 +3092,8 @@ static void intel_pstate_driver_cleanup(void)
if (intel_pstate_driver == &intel_pstate)
intel_pstate_clear_update_util_hook(cpu);
- spin_lock(&hwp_notify_lock);
kfree(all_cpu_data[cpu]);
WRITE_ONCE(all_cpu_data[cpu], NULL);
- spin_unlock(&hwp_notify_lock);
}
}
cpus_read_unlock();
@@ -3157,6 +3110,10 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
memset(&global, 0, sizeof(global));
global.max_perf_pct = 100;
+ global.turbo_disabled = turbo_is_disabled();
+ global.no_turbo = global.turbo_disabled;
+
+ arch_set_max_freq_ratio(global.turbo_disabled);
intel_pstate_driver = driver;
ret = cpufreq_register_driver(intel_pstate_driver);
@@ -3407,14 +3364,31 @@ static bool intel_pstate_hwp_is_enabled(void)
return !!(value & 0x1);
}
-static const struct x86_cpu_id intel_epp_balance_perf[] = {
+#define POWERSAVE_MASK GENMASK(7, 0)
+#define BALANCE_POWER_MASK GENMASK(15, 8)
+#define BALANCE_PERFORMANCE_MASK GENMASK(23, 16)
+#define PERFORMANCE_MASK GENMASK(31, 24)
+
+#define HWP_SET_EPP_VALUES(powersave, balance_power, balance_perf, performance) \
+ (FIELD_PREP_CONST(POWERSAVE_MASK, powersave) |\
+ FIELD_PREP_CONST(BALANCE_POWER_MASK, balance_power) |\
+ FIELD_PREP_CONST(BALANCE_PERFORMANCE_MASK, balance_perf) |\
+ FIELD_PREP_CONST(PERFORMANCE_MASK, performance))
+
+#define HWP_SET_DEF_BALANCE_PERF_EPP(balance_perf) \
+ (HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, HWP_EPP_BALANCE_POWERSAVE,\
+ balance_perf, HWP_EPP_PERFORMANCE))
+
+static const struct x86_cpu_id intel_epp_default[] = {
/*
* Set EPP value as 102, this is the max suggested EPP
* which can result in one core turbo frequency for
* AlderLake Mobile CPUs.
*/
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 32),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
+ HWP_EPP_BALANCE_POWERSAVE, 115, 16)),
{}
};
@@ -3451,7 +3425,7 @@ static int __init intel_pstate_init(void)
* deal with it.
*/
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
- WRITE_ONCE(hwp_active, 1);
+ hwp_active = true;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
@@ -3512,11 +3486,24 @@ hwp_cpu_matched:
intel_pstate_sysfs_expose_params();
if (hwp_active) {
- const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
+ const struct x86_cpu_id *id = x86_match_cpu(intel_epp_default);
const struct x86_cpu_id *hybrid_id = x86_match_cpu(intel_hybrid_scaling_factor);
- if (id)
- epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
+ if (id) {
+ epp_values[EPP_INDEX_POWERSAVE] =
+ FIELD_GET(POWERSAVE_MASK, id->driver_data);
+ epp_values[EPP_INDEX_BALANCE_POWERSAVE] =
+ FIELD_GET(BALANCE_POWER_MASK, id->driver_data);
+ epp_values[EPP_INDEX_BALANCE_PERFORMANCE] =
+ FIELD_GET(BALANCE_PERFORMANCE_MASK, id->driver_data);
+ epp_values[EPP_INDEX_PERFORMANCE] =
+ FIELD_GET(PERFORMANCE_MASK, id->driver_data);
+ pr_debug("Updated EPPs powersave:%x balanced power:%x balanced perf:%x performance:%x\n",
+ epp_values[EPP_INDEX_POWERSAVE],
+ epp_values[EPP_INDEX_BALANCE_POWERSAVE],
+ epp_values[EPP_INDEX_BALANCE_PERFORMANCE],
+ epp_values[EPP_INDEX_PERFORMANCE]);
+ }
if (hybrid_id) {
hybrid_scaling_factor = hybrid_id->driver_data;
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index d46afb3c0092..8d097dcddda4 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#define LUT_MAX_ENTRIES 32U
@@ -300,7 +301,23 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
const void *data;
- int ret;
+ int ret, cpu;
+ struct device *cpu_dev;
+ struct regulator *cpu_reg;
+
+ /* Make sure that all CPU supplies are available before proceeding. */
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
+ "Failed to get cpu%d device\n", cpu);
+
+ cpu_reg = devm_regulator_get(cpu_dev, "cpu");
+ if (IS_ERR(cpu_reg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cpu_reg),
+ "CPU%d regulator get failed\n", cpu);
+ }
+
data = of_device_get_match_data(&pdev->dev);
if (!data)
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index a0a61919bc4c..518606adf14e 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -707,6 +707,15 @@ static const struct mtk_cpufreq_platform_data mt7623_platform_data = {
.ccifreq_supported = false,
};
+static const struct mtk_cpufreq_platform_data mt7988_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 900000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1150000,
+ .ccifreq_supported = true,
+};
+
static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
.min_volt_shift = 100000,
.max_volt_shift = 200000,
@@ -740,6 +749,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
{ .compatible = "mediatek,mt7623", .data = &mt7623_platform_data },
+ { .compatible = "mediatek,mt7988a", .data = &mt7988_platform_data },
{ .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
{ .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index df3567c1e93b..6c9f0888a2a7 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -120,9 +120,9 @@ static int cpu_750fx_cpu_speed(int low_speed)
/* tweak L2 for high voltage */
if (has_cpu_l2lve) {
- hid2 = mfspr(SPRN_HID2);
+ hid2 = mfspr(SPRN_HID2_750FX);
hid2 &= ~0x2000;
- mtspr(SPRN_HID2, hid2);
+ mtspr(SPRN_HID2_750FX, hid2);
}
}
#ifdef CONFIG_PPC_BOOK3S_32
@@ -131,9 +131,9 @@ static int cpu_750fx_cpu_speed(int low_speed)
if (low_speed == 1) {
/* tweak L2 for low voltage */
if (has_cpu_l2lve) {
- hid2 = mfspr(SPRN_HID2);
+ hid2 = mfspr(SPRN_HID2_750FX);
hid2 |= 0x2000;
- mtspr(SPRN_HID2, hid2);
+ mtspr(SPRN_HID2_750FX, hid2);
}
/* ramping down, set voltage last */
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 70b0f21968a0..ec8df5496a0c 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -347,8 +347,8 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
throttled_freq = freq_hz / HZ_PER_KHZ;
- /* Update thermal pressure (the boost frequencies are accepted) */
- arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+ /* Update HW pressure (the boost frequencies are accepted) */
+ arch_update_hw_pressure(policy->related_cpus, throttled_freq);
/*
* In the unlikely case policy is unregistered do not enable
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 4ee23f4ebf4a..3b4f6bfb2f4c 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -30,6 +30,7 @@ struct scmi_data {
static struct scmi_protocol_handle *ph;
static const struct scmi_perf_proto_ops *perf_ops;
+static struct cpufreq_driver scmi_cpufreq_driver;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
@@ -144,6 +145,35 @@ scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power,
return 0;
}
+static int
+scmi_get_rate_limit(u32 domain, bool has_fast_switch)
+{
+ int ret, rate_limit;
+
+ if (has_fast_switch) {
+ /*
+ * Fast channels are used whenever available,
+ * so use their rate_limit value if populated.
+ */
+ ret = perf_ops->fast_switch_rate_limit(ph, domain,
+ &rate_limit);
+ if (!ret && rate_limit)
+ return rate_limit;
+ }
+
+ ret = perf_ops->rate_limit_get(ph, domain, &rate_limit);
+ if (ret)
+ return 0;
+
+ return rate_limit;
+}
+
+static struct freq_attr *scmi_cpufreq_hw_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+ NULL,
+};
+
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, nr_opp, domain;
@@ -250,6 +280,20 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible =
perf_ops->fast_switch_possible(ph, domain);
+ policy->transition_delay_us =
+ scmi_get_rate_limit(domain, policy->fast_switch_possible);
+
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret) {
+ dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+ goto out_free_opp;
+ } else {
+ scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+ scmi_cpufreq_driver.boost_enabled = true;
+ }
+ }
+
return 0;
out_free_opp:
@@ -308,7 +352,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = cpufreq_generic_attr,
+ .attr = scmi_cpufreq_hw_attr,
.target_index = scmi_cpufreq_set_target,
.fast_switch = scmi_cpufreq_fast_switch,
.get = scmi_cpufreq_get_rate,
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 32a9c88f8ff6..0b882765cd66 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/arm-smccc.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
@@ -18,26 +19,155 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
-#define MAX_NAME_LEN 7
-
#define NVMEM_MASK 0x7
#define NVMEM_SHIFT 5
static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
+struct sunxi_cpufreq_data {
+ u32 (*efuse_xlate)(u32 speedbin);
+};
+
+static u32 sun50i_h6_efuse_xlate(u32 speedbin)
+{
+ u32 efuse_value;
+
+ efuse_value = (speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
+
+ /*
+ * We treat unexpected efuse values as if the SoC was from
+ * the slowest bin. Expected efuse values are 1-3, slowest
+ * to fastest.
+ */
+ if (efuse_value >= 1 && efuse_value <= 3)
+ return efuse_value - 1;
+ else
+ return 0;
+}
+
+static int get_soc_id_revision(void)
+{
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+ return arm_smccc_get_soc_id_revision();
+#else
+ return SMCCC_RET_NOT_SUPPORTED;
+#endif
+}
+
+/*
+ * Judging by the OPP tables in the vendor BSP, the quality order of the
+ * returned speedbin index is 4 -> 0/2 -> 3 -> 1, from worst to best.
+ * 0 and 2 seem identical from the OPP tables' point of view.
+ */
+static u32 sun50i_h616_efuse_xlate(u32 speedbin)
+{
+ int ver_bits = get_soc_id_revision();
+ u32 value = 0;
+
+ switch (speedbin & 0xffff) {
+ case 0x2000:
+ value = 0;
+ break;
+ case 0x2400:
+ case 0x7400:
+ case 0x2c00:
+ case 0x7c00:
+ if (ver_bits != SMCCC_RET_NOT_SUPPORTED && ver_bits <= 1) {
+ /* ic version A/B */
+ value = 1;
+ } else {
+ /* ic version C and later version */
+ value = 2;
+ }
+ break;
+ case 0x5000:
+ case 0x5400:
+ case 0x6000:
+ value = 3;
+ break;
+ case 0x5c00:
+ value = 4;
+ break;
+ case 0x5d00:
+ value = 0;
+ break;
+ default:
+ pr_warn("sun50i-cpufreq-nvmem: unknown speed bin 0x%x, using default bin 0\n",
+ speedbin & 0xffff);
+ value = 0;
+ break;
+ }
+
+ return value;
+}
+
+static struct sunxi_cpufreq_data sun50i_h6_cpufreq_data = {
+ .efuse_xlate = sun50i_h6_efuse_xlate,
+};
+
+static struct sunxi_cpufreq_data sun50i_h616_cpufreq_data = {
+ .efuse_xlate = sun50i_h616_efuse_xlate,
+};
+
+static const struct of_device_id cpu_opp_match_list[] = {
+ { .compatible = "allwinner,sun50i-h6-operating-points",
+ .data = &sun50i_h6_cpufreq_data,
+ },
+ { .compatible = "allwinner,sun50i-h616-operating-points",
+ .data = &sun50i_h616_cpufreq_data,
+ },
+ {}
+};
+
+/**
+ * dt_has_supported_hw() - Check if any OPPs use opp-supported-hw
+ *
+ * If we ask the cpufreq framework to use the opp-supported-hw feature, it
+ * will ignore every OPP node without that DT property. If none of the OPPs
+ * have it, the driver will fail probing, due to the lack of OPPs.
+ *
+ * Returns true if we have at least one OPP with the opp-supported-hw property.
+ */
+static bool dt_has_supported_hw(void)
+{
+ bool has_opp_supported_hw = false;
+ struct device_node *np, *opp;
+ struct device *cpu_dev;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return false;
+
+ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!np)
+ return false;
+
+ for_each_child_of_node(np, opp) {
+ if (of_find_property(opp, "opp-supported-hw", NULL)) {
+ has_opp_supported_hw = true;
+ break;
+ }
+ }
+
+ of_node_put(np);
+
+ return has_opp_supported_hw;
+}
+
/**
* sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value
- * @versions: Set to the value parsed from efuse
*
- * Returns 0 if success.
+ * Returns non-negative speed bin index on success, a negative error
+ * value otherwise.
*/
-static int sun50i_cpufreq_get_efuse(u32 *versions)
+static int sun50i_cpufreq_get_efuse(void)
{
+ const struct sunxi_cpufreq_data *opp_data;
struct nvmem_cell *speedbin_nvmem;
+ const struct of_device_id *match;
struct device_node *np;
struct device *cpu_dev;
- u32 *speedbin, efuse_value;
- size_t len;
+ u32 *speedbin;
int ret;
cpu_dev = get_cpu_device(0);
@@ -48,12 +178,12 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
if (!np)
return -ENOENT;
- ret = of_device_is_compatible(np,
- "allwinner,sun50i-h6-operating-points");
- if (!ret) {
+ match = of_match_node(cpu_opp_match_list, np);
+ if (!match) {
of_node_put(np);
return -ENOENT;
}
+ opp_data = match->data;
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
of_node_put(np);
@@ -61,33 +191,25 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
- speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+ speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
nvmem_cell_put(speedbin_nvmem);
if (IS_ERR(speedbin))
return PTR_ERR(speedbin);
- efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
-
- /*
- * We treat unexpected efuse values as if the SoC was from
- * the slowest bin. Expected efuse values are 1-3, slowest
- * to fastest.
- */
- if (efuse_value >= 1 && efuse_value <= 3)
- *versions = efuse_value - 1;
- else
- *versions = 0;
+ ret = opp_data->efuse_xlate(*speedbin);
kfree(speedbin);
- return 0;
+
+ return ret;
};
static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
{
int *opp_tokens;
- char name[MAX_NAME_LEN];
- unsigned int cpu;
- u32 speed = 0;
+ char name[] = "speedXXXXXXXXXXX"; /* Integers can take 11 chars max */
+ unsigned int cpu, supported_hw;
+ struct dev_pm_opp_config config = {};
+ int speed;
int ret;
opp_tokens = kcalloc(num_possible_cpus(), sizeof(*opp_tokens),
@@ -95,13 +217,24 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
if (!opp_tokens)
return -ENOMEM;
- ret = sun50i_cpufreq_get_efuse(&speed);
- if (ret) {
+ speed = sun50i_cpufreq_get_efuse();
+ if (speed < 0) {
kfree(opp_tokens);
- return ret;
+ return speed;
}
- snprintf(name, MAX_NAME_LEN, "speed%d", speed);
+ /*
+ * We need at least one OPP with the "opp-supported-hw" property,
+ * or else the upper layers will ignore every OPP and will bail out.
+ */
+ if (dt_has_supported_hw()) {
+ supported_hw = 1U << speed;
+ config.supported_hw = &supported_hw;
+ config.supported_hw_count = 1;
+ }
+
+ snprintf(name, sizeof(name), "speed%d", speed);
+ config.prop_name = name;
for_each_possible_cpu(cpu) {
struct device *cpu_dev = get_cpu_device(cpu);
@@ -111,12 +244,11 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
goto free_opp;
}
- opp_tokens[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
- if (opp_tokens[cpu] < 0) {
- ret = opp_tokens[cpu];
- pr_err("Failed to set prop name\n");
+ ret = dev_pm_opp_set_config(cpu_dev, &config);
+ if (ret < 0)
goto free_opp;
- }
+
+ opp_tokens[cpu] = ret;
}
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
@@ -131,7 +263,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
free_opp:
for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
return ret;
@@ -145,7 +277,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
}
@@ -160,6 +292,9 @@ static struct platform_driver sun50i_cpufreq_driver = {
static const struct of_device_id sun50i_cpufreq_match_list[] = {
{ .compatible = "allwinner,sun50i-h6" },
+ { .compatible = "allwinner,sun50i-h616" },
+ { .compatible = "allwinner,sun50i-h618" },
+ { .compatible = "allwinner,sun50i-h700" },
{}
};
MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list);
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index aae951d4e77c..514146d98bca 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -52,12 +52,15 @@ out:
static int tegra124_cpufreq_probe(struct platform_device *pdev)
{
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
struct tegra124_cpufreq_priv *priv;
- struct device_node *np;
struct device *cpu_dev;
struct platform_device_info cpufreq_dt_devinfo = {};
int ret;
+ if (!np)
+ return -ENODEV;
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -66,15 +69,9 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
if (!cpu_dev)
return -ENODEV;
- np = of_cpu_device_node_get(0);
- if (!np)
- return -ENODEV;
-
priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
- if (IS_ERR(priv->cpu_clk)) {
- ret = PTR_ERR(priv->cpu_clk);
- goto out_put_np;
- }
+ if (IS_ERR(priv->cpu_clk))
+ return PTR_ERR(priv->cpu_clk);
priv->dfll_clk = of_clk_get_by_name(np, "dfll");
if (IS_ERR(priv->dfll_clk)) {
@@ -110,8 +107,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- of_node_put(np);
-
return 0;
out_put_pllp_clk:
@@ -122,8 +117,6 @@ out_put_dfll_clk:
clk_put(priv->dfll_clk);
out_put_cpu_clk:
clk_put(priv->cpu_clk);
-out_put_np:
- of_node_put(np);
return ret;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 46c41e2ca727..714ed53753fa 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -347,12 +347,10 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
static const struct of_device_id *ti_cpufreq_match_node(void)
{
- struct device_node *np;
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
const struct of_device_id *match;
- np = of_find_node_by_path("/");
match = of_match_node(ti_cpufreq_of_match, np);
- of_node_put(np);
return match;
}