summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-10-04 11:57:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-10-04 11:57:15 -0700
commit5d18081de22cb73f0959deb0327292da30c9771c (patch)
tree33fc475ad96d93b878b4ef0da8dd486a7faf7143 /drivers
parentcc70ce8fccd3f81c58f1e983336568d7c9df0e3b (diff)
parentc0f02536fffbbec71aced36d52a765f8c4493dc2 (diff)
Merge tag 'pm-6.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management fixes from Rafael Wysocki: "These fix two cpufreq issues, one in the core and one in the intel_pstate driver: - Fix CPU device node reference counting in the cpufreq core (Miquel Sabaté Solà) - Turn the spinlock used by the intel_pstate driver in hard IRQ context into a raw one to prevent the driver from crashing when PREEMPT_RT is enabled (Uwe Kleine-König)" * tag 'pm-6.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: cpufreq: Avoid a bad reference count on CPU node cpufreq: intel_pstate: Make hwp_notify_lock a raw spinlock
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/intel_pstate.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index aaea9a39eced..b0018f371ea3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1845,7 +1845,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
-static DEFINE_SPINLOCK(hwp_notify_lock);
+static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
static cpumask_t hwp_intr_enable_mask;
#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0)
@@ -1868,7 +1868,7 @@ void notify_hwp_interrupt(void)
if (!(value & status_mask))
return;
- spin_lock_irqsave(&hwp_notify_lock, flags);
+ raw_spin_lock_irqsave(&hwp_notify_lock, flags);
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
goto ack_intr;
@@ -1876,13 +1876,13 @@ void notify_hwp_interrupt(void)
schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
msecs_to_jiffies(10));
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
return;
ack_intr:
wrmsrl_safe(MSR_HWP_STATUS, 0);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
}
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
@@ -1895,9 +1895,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- spin_lock_irq(&hwp_notify_lock);
+ raw_spin_lock_irq(&hwp_notify_lock);
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
- spin_unlock_irq(&hwp_notify_lock);
+ raw_spin_unlock_irq(&hwp_notify_lock);
if (cancel_work)
cancel_delayed_work_sync(&cpudata->hwp_notify_work);
@@ -1912,10 +1912,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ;
- spin_lock_irq(&hwp_notify_lock);
+ raw_spin_lock_irq(&hwp_notify_lock);
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
- spin_unlock_irq(&hwp_notify_lock);
+ raw_spin_unlock_irq(&hwp_notify_lock);
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;