aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/intel_pstate.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/cpufreq/intel_pstate.c138
1 files changed, 98 insertions, 40 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 815df3daae9d..bc7f7e6759bd 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -338,6 +338,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
+#define CPPC_MAX_PERF U8_MAX
+
static void intel_pstate_set_itmt_prio(int cpu)
{
struct cppc_perf_caps cppc_perf;
@@ -349,6 +351,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
return;
/*
+ * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
+ * In this case we can't use CPPC.highest_perf to enable ITMT.
+ * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
+ */
+ if (cppc_perf.highest_perf == CPPC_MAX_PERF)
+ cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
+
+ /*
* The priorities can be set regardless of whether or not
* sched_set_itmt_support(true) has been called and it is valid to
* update them at any time after it has been called.
@@ -654,19 +664,29 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
* 3 balance_power
* 4 power
*/
+
+enum energy_perf_value_index {
+ EPP_INDEX_DEFAULT = 0,
+ EPP_INDEX_PERFORMANCE,
+ EPP_INDEX_BALANCE_PERFORMANCE,
+ EPP_INDEX_BALANCE_POWERSAVE,
+ EPP_INDEX_POWERSAVE,
+};
+
static const char * const energy_perf_strings[] = {
- "default",
- "performance",
- "balance_performance",
- "balance_power",
- "power",
+ [EPP_INDEX_DEFAULT] = "default",
+ [EPP_INDEX_PERFORMANCE] = "performance",
+ [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
+ [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
+ [EPP_INDEX_POWERSAVE] = "power",
NULL
};
-static const unsigned int epp_values[] = {
- HWP_EPP_PERFORMANCE,
- HWP_EPP_BALANCE_PERFORMANCE,
- HWP_EPP_BALANCE_POWERSAVE,
- HWP_EPP_POWERSAVE
+static unsigned int epp_values[] = {
+ [EPP_INDEX_DEFAULT] = 0, /* Unused index */
+ [EPP_INDEX_PERFORMANCE] = HWP_EPP_PERFORMANCE,
+ [EPP_INDEX_BALANCE_PERFORMANCE] = HWP_EPP_BALANCE_PERFORMANCE,
+ [EPP_INDEX_BALANCE_POWERSAVE] = HWP_EPP_BALANCE_POWERSAVE,
+ [EPP_INDEX_POWERSAVE] = HWP_EPP_POWERSAVE,
};
static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
@@ -680,14 +700,14 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw
return epp;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
- if (epp == HWP_EPP_PERFORMANCE)
- return 1;
- if (epp == HWP_EPP_BALANCE_PERFORMANCE)
- return 2;
- if (epp == HWP_EPP_BALANCE_POWERSAVE)
- return 3;
- if (epp == HWP_EPP_POWERSAVE)
- return 4;
+ if (epp == epp_values[EPP_INDEX_PERFORMANCE])
+ return EPP_INDEX_PERFORMANCE;
+ if (epp == epp_values[EPP_INDEX_BALANCE_PERFORMANCE])
+ return EPP_INDEX_BALANCE_PERFORMANCE;
+ if (epp == epp_values[EPP_INDEX_BALANCE_POWERSAVE])
+ return EPP_INDEX_BALANCE_POWERSAVE;
+ if (epp == epp_values[EPP_INDEX_POWERSAVE])
+ return EPP_INDEX_POWERSAVE;
*raw_epp = epp;
return 0;
} else if (boot_cpu_has(X86_FEATURE_EPB)) {
@@ -747,7 +767,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
if (use_raw)
epp = raw_epp;
else if (epp == -EINVAL)
- epp = epp_values[pref_index - 1];
+ epp = epp_values[pref_index];
/*
* To avoid confusion, refuse to set EPP to any values different
@@ -833,7 +853,7 @@ static ssize_t store_energy_performance_preference(
* upfront.
*/
if (!raw)
- epp = ret ? epp_values[ret - 1] : cpu->epp_default;
+ epp = ret ? epp_values[ret] : cpu->epp_default;
if (cpu->epp_cached != epp) {
int err;
@@ -1006,6 +1026,12 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
*/
value &= ~GENMASK_ULL(31, 24);
value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
+ /*
+ * However, make sure that EPP will be set to "performance" when
+ * the CPU is brought back online again and the "performance"
+ * scaling algorithm is still in effect.
+ */
+ cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
}
/*
@@ -1108,19 +1134,22 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
+static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
+ struct cpufreq_policy *policy)
+{
+ policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+ cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+ refresh_frequency_limits(policy);
+}
+
static void intel_pstate_update_max_freq(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
- struct cpudata *cpudata;
if (!policy)
return;
- cpudata = all_cpu_data[cpu];
- policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
- cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
-
- refresh_frequency_limits(policy);
+ __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
cpufreq_cpu_release(policy);
}
@@ -1568,8 +1597,15 @@ static void intel_pstate_notify_work(struct work_struct *work)
{
struct cpudata *cpudata =
container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
+
+ if (policy) {
+ intel_pstate_get_hwp_cap(cpudata);
+ __intel_pstate_update_max_freq(cpudata, policy);
+
+ cpufreq_cpu_release(policy);
+ }
- cpufreq_update_policy(cpudata->cpu);
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
@@ -1663,10 +1699,18 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
- if (cpudata->epp_default == -EINVAL)
- cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
intel_pstate_enable_hwp_interrupt(cpudata);
+
+ if (cpudata->epp_default >= 0)
+ return;
+
+ if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) {
+ cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
+ } else {
+ cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
+ intel_pstate_set_epp(cpudata, cpudata->epp_default);
+ }
}
static int atom_get_min_pstate(void)
@@ -2353,6 +2397,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(BROADWELL_D, core_funcs),
X86_MATCH(BROADWELL_X, core_funcs),
X86_MATCH(SKYLAKE_X, core_funcs),
+ X86_MATCH(ICELAKE_X, core_funcs),
{}
};
@@ -2469,18 +2514,14 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
* HWP needs some special consideration, because HWP_REQUEST uses
* abstract values to represent performance rather than pure ratios.
*/
- if (hwp_active) {
- intel_pstate_get_hwp_cap(cpu);
+ if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
+ int scaling = cpu->pstate.scaling;
+ int freq;
- if (cpu->pstate.scaling != perf_ctl_scaling) {
- int scaling = cpu->pstate.scaling;
- int freq;
-
- freq = max_policy_perf * perf_ctl_scaling;
- max_policy_perf = DIV_ROUND_UP(freq, scaling);
- freq = min_policy_perf * perf_ctl_scaling;
- min_policy_perf = DIV_ROUND_UP(freq, scaling);
- }
+ freq = max_policy_perf * perf_ctl_scaling;
+ max_policy_perf = DIV_ROUND_UP(freq, scaling);
+ freq = min_policy_perf * perf_ctl_scaling;
+ min_policy_perf = DIV_ROUND_UP(freq, scaling);
}
pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
@@ -3332,6 +3373,16 @@ static bool intel_pstate_hwp_is_enabled(void)
return !!(value & 0x1);
}
+static const struct x86_cpu_id intel_epp_balance_perf[] = {
+ /*
+ * Set EPP value as 102, this is the max suggested EPP
+ * which can result in one core turbo frequency for
+ * AlderLake Mobile CPUs.
+ */
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
+ {}
+};
+
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
@@ -3421,6 +3472,13 @@ hwp_cpu_matched:
intel_pstate_sysfs_expose_params();
+ if (hwp_active) {
+ const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
+
+ if (id)
+ epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
+ }
+
mutex_lock(&intel_pstate_driver_lock);
rc = intel_pstate_register_driver(default_driver);
mutex_unlock(&intel_pstate_driver_lock);