summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/intel_pstate.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
-rw-r--r--drivers/cpufreq/intel_pstate.c42
1 files changed, 31 insertions, 11 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index aaea9a39eced..cd2ac1ba53d2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1034,7 +1034,7 @@ static void __hybrid_init_cpu_capacity_scaling(void)
hybrid_update_cpu_capacity_scaling();
}
-static void hybrid_init_cpu_capacity_scaling(void)
+static void hybrid_init_cpu_capacity_scaling(bool refresh)
{
bool disable_itmt = false;
@@ -1045,7 +1045,7 @@ static void hybrid_init_cpu_capacity_scaling(void)
* scaling has been enabled already and the driver is just changing the
* operation mode.
*/
- if (hybrid_max_perf_cpu) {
+ if (refresh) {
__hybrid_init_cpu_capacity_scaling();
goto unlock;
}
@@ -1071,6 +1071,18 @@ unlock:
sched_clear_itmt_support();
}
+static bool hybrid_clear_max_perf_cpu(void)
+{
+ bool ret;
+
+ guard(mutex)(&hybrid_capacity_lock);
+
+ ret = !!hybrid_max_perf_cpu;
+ hybrid_max_perf_cpu = NULL;
+
+ return ret;
+}
+
static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
u64 cap;
@@ -1845,7 +1857,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
-static DEFINE_SPINLOCK(hwp_notify_lock);
+static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
static cpumask_t hwp_intr_enable_mask;
#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0)
@@ -1868,7 +1880,7 @@ void notify_hwp_interrupt(void)
if (!(value & status_mask))
return;
- spin_lock_irqsave(&hwp_notify_lock, flags);
+ raw_spin_lock_irqsave(&hwp_notify_lock, flags);
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
goto ack_intr;
@@ -1876,13 +1888,13 @@ void notify_hwp_interrupt(void)
schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
msecs_to_jiffies(10));
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
return;
ack_intr:
wrmsrl_safe(MSR_HWP_STATUS, 0);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
}
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
@@ -1895,9 +1907,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- spin_lock_irq(&hwp_notify_lock);
+ raw_spin_lock_irq(&hwp_notify_lock);
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
- spin_unlock_irq(&hwp_notify_lock);
+ raw_spin_unlock_irq(&hwp_notify_lock);
if (cancel_work)
cancel_delayed_work_sync(&cpudata->hwp_notify_work);
@@ -1912,10 +1924,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ;
- spin_lock_irq(&hwp_notify_lock);
+ raw_spin_lock_irq(&hwp_notify_lock);
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
- spin_unlock_irq(&hwp_notify_lock);
+ raw_spin_unlock_irq(&hwp_notify_lock);
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;
@@ -2263,6 +2275,11 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
} else {
cpu->pstate.scaling = perf_ctl_scaling;
}
+ /*
+ * If the CPU is going online for the first time and it was
+ * offline initially, asym capacity scaling needs to be updated.
+ */
+ hybrid_update_capacity(cpu);
} else {
cpu->pstate.scaling = perf_ctl_scaling;
cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
@@ -3352,6 +3369,7 @@ static void intel_pstate_driver_cleanup(void)
static int intel_pstate_register_driver(struct cpufreq_driver *driver)
{
+ bool refresh_cpu_cap_scaling;
int ret;
if (driver == &intel_pstate)
@@ -3364,6 +3382,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
arch_set_max_freq_ratio(global.turbo_disabled);
+ refresh_cpu_cap_scaling = hybrid_clear_max_perf_cpu();
+
intel_pstate_driver = driver;
ret = cpufreq_register_driver(intel_pstate_driver);
if (ret) {
@@ -3373,7 +3393,7 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
global.min_perf_pct = min_perf_pct_min();
- hybrid_init_cpu_capacity_scaling();
+ hybrid_init_cpu_capacity_scaling(refresh_cpu_cap_scaling);
return 0;
}