summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-02 17:33:52 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-02 17:33:52 -0800
commit080e4168c0834ccc853c48259e16a5c556c7ecba (patch)
tree641a264718c6f1b8bf9525e4e4a073565044cd03 /drivers/base
parentbbe08c0a43e2c5ee3a00de68c0e867a08a9aa990 (diff)
parent9b5e9cb164ee93ae19c4c6593e8188a55481f78b (diff)
Merge tag 'pm-extra-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull more power management updates deom Rafael Wysocki: "These fix two bugs introduced by recent power management updates (in the cpuidle menu governor and intel_pstate) and a few other issues, clean up things and remove unused code. Specifics: - Fix for a cpuidle menu governor problem that started to take an unnecessary spinlock after one of the recent updates and that did not play well with the RT patch (Rafael Wysocki). - Fix for the new intel_pstate operation mode switching feature added recently that did not reinitialize P-state limits properly when switching operation modes (Rafael Wysocki). - Removal of unused global notifiers from the PM QoS framework (Viresh Kumar). - Generic power domains framework update to make it handle asynchronous invocations of PM callbacks in the "noirq" phases of system suspend/hibernation correctly (Ulf Hansson). - Two hibernation core cleanups (Rafael Wysocki). - intel_idle cleanup related to the sysfs interface (Len Brown). - Off-by-one bug fix in the OPP (Operating Performance Points) framework (Andrzej Hajda). - OPP framework's documentation fix (Viresh Kumar). - cpufreq qoriq driver cleanup (Tang Yuantian). - Fixes for typos in comments in the device runtime PM framework (Christophe Jaillet)" * tag 'pm-extra-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: PM / OPP: Documentation: Fix opp-microvolt in examples intel_idle: stop exposing platform acronyms in sysfs cpufreq: intel_pstate: Fix limits issue with operation mode switching PM / hibernate: Define pr_fmt() and use pr_*() instead of printk() PM / hibernate: Untangle power_down() cpuidle: menu: Avoid taking spinlock for accessing QoS values PM / QoS: Remove global notifiers PM / runtime: Fix some typos cpufreq: qoriq: clean up unused code PM / OPP: fix off-by-one bug in dev_pm_opp_get_max_volt_latency loop PM / Domains: Power off masters immediately in the power off sequence PM / Domains: Rename is_async to one_dev_on for genpd_power_off() PM / Domains: Move genpd_power_off() above genpd_power_on()
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/domain.c178
-rw-r--r--drivers/base/power/opp/core.c3
-rw-r--r--drivers/base/power/qos.c53
3 files changed, 100 insertions, 134 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 3a75fb1b4126..e697dec9d25b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -274,6 +274,93 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
}
/**
+ * genpd_power_off - Remove power from a given PM domain.
+ * @genpd: PM domain to power down.
+ * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
+ * RPM status of the releated device is in an intermediate state, not yet turned
+ * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
+ * be RPM_SUSPENDED, while it tries to power off the PM domain.
+ *
+ * If all of the @genpd's devices have been suspended and all of its subdomains
+ * have been powered down, remove power from @genpd.
+ */
+static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
+ unsigned int depth)
+{
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+ unsigned int not_suspended = 0;
+
+ /*
+ * Do not try to power off the domain in the following situations:
+ * (1) The domain is already in the "power off" state.
+ * (2) System suspend is in progress.
+ */
+ if (genpd->status == GPD_STATE_POWER_OFF
+ || genpd->prepared_count > 0)
+ return 0;
+
+ if (atomic_read(&genpd->sd_count) > 0)
+ return -EBUSY;
+
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ enum pm_qos_flags_status stat;
+
+ stat = dev_pm_qos_flags(pdd->dev,
+ PM_QOS_FLAG_NO_POWER_OFF
+ | PM_QOS_FLAG_REMOTE_WAKEUP);
+ if (stat > PM_QOS_FLAGS_NONE)
+ return -EBUSY;
+
+ /*
+ * Do not allow PM domain to be powered off, when an IRQ safe
+ * device is part of a non-IRQ safe domain.
+ */
+ if (!pm_runtime_suspended(pdd->dev) ||
+ irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
+ not_suspended++;
+ }
+
+ if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
+ return -EBUSY;
+
+ if (genpd->gov && genpd->gov->power_down_ok) {
+ if (!genpd->gov->power_down_ok(&genpd->domain))
+ return -EAGAIN;
+ }
+
+ if (genpd->power_off) {
+ int ret;
+
+ if (atomic_read(&genpd->sd_count) > 0)
+ return -EBUSY;
+
+ /*
+ * If sd_count > 0 at this point, one of the subdomains hasn't
+ * managed to call genpd_power_on() for the master yet after
+ * incrementing it. In that case genpd_power_on() will wait
+ * for us to drop the lock, so we can call .power_off() and let
+ * the genpd_power_on() restore power for us (this shouldn't
+ * happen very often).
+ */
+ ret = _genpd_power_off(genpd, true);
+ if (ret)
+ return ret;
+ }
+
+ genpd->status = GPD_STATE_POWER_OFF;
+
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ genpd_sd_counter_dec(link->master);
+ genpd_lock_nested(link->master, depth + 1);
+ genpd_power_off(link->master, false, depth + 1);
+ genpd_unlock(link->master);
+ }
+
+ return 0;
+}
+
+/**
* genpd_power_on - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
* @depth: nesting count for lockdep.
@@ -321,7 +408,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
&genpd->slave_links,
slave_node) {
genpd_sd_counter_dec(link->master);
- genpd_queue_power_off_work(link->master);
+ genpd_lock_nested(link->master, depth + 1);
+ genpd_power_off(link->master, false, depth + 1);
+ genpd_unlock(link->master);
}
return ret;
@@ -368,87 +457,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
}
/**
- * genpd_power_off - Remove power from a given PM domain.
- * @genpd: PM domain to power down.
- * @is_async: PM domain is powered down from a scheduled work
- *
- * If all of the @genpd's devices have been suspended and all of its subdomains
- * have been powered down, remove power from @genpd.
- */
-static int genpd_power_off(struct generic_pm_domain *genpd, bool is_async)
-{
- struct pm_domain_data *pdd;
- struct gpd_link *link;
- unsigned int not_suspended = 0;
-
- /*
- * Do not try to power off the domain in the following situations:
- * (1) The domain is already in the "power off" state.
- * (2) System suspend is in progress.
- */
- if (genpd->status == GPD_STATE_POWER_OFF
- || genpd->prepared_count > 0)
- return 0;
-
- if (atomic_read(&genpd->sd_count) > 0)
- return -EBUSY;
-
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- enum pm_qos_flags_status stat;
-
- stat = dev_pm_qos_flags(pdd->dev,
- PM_QOS_FLAG_NO_POWER_OFF
- | PM_QOS_FLAG_REMOTE_WAKEUP);
- if (stat > PM_QOS_FLAGS_NONE)
- return -EBUSY;
-
- /*
- * Do not allow PM domain to be powered off, when an IRQ safe
- * device is part of a non-IRQ safe domain.
- */
- if (!pm_runtime_suspended(pdd->dev) ||
- irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
- not_suspended++;
- }
-
- if (not_suspended > 1 || (not_suspended == 1 && is_async))
- return -EBUSY;
-
- if (genpd->gov && genpd->gov->power_down_ok) {
- if (!genpd->gov->power_down_ok(&genpd->domain))
- return -EAGAIN;
- }
-
- if (genpd->power_off) {
- int ret;
-
- if (atomic_read(&genpd->sd_count) > 0)
- return -EBUSY;
-
- /*
- * If sd_count > 0 at this point, one of the subdomains hasn't
- * managed to call genpd_power_on() for the master yet after
- * incrementing it. In that case genpd_power_on() will wait
- * for us to drop the lock, so we can call .power_off() and let
- * the genpd_power_on() restore power for us (this shouldn't
- * happen very often).
- */
- ret = _genpd_power_off(genpd, true);
- if (ret)
- return ret;
- }
-
- genpd->status = GPD_STATE_POWER_OFF;
-
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
- genpd_queue_power_off_work(link->master);
- }
-
- return 0;
-}
-
-/**
* genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
* @work: Work structure used for scheduling the execution of this function.
*/
@@ -459,7 +467,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work);
genpd_lock(genpd);
- genpd_power_off(genpd, true);
+ genpd_power_off(genpd, false, 0);
genpd_unlock(genpd);
}
@@ -578,7 +586,7 @@ static int genpd_runtime_suspend(struct device *dev)
return 0;
genpd_lock(genpd);
- genpd_power_off(genpd, false);
+ genpd_power_off(genpd, true, 0);
genpd_unlock(genpd);
return 0;
@@ -658,7 +666,7 @@ err_poweroff:
if (!pm_runtime_is_irq_safe(dev) ||
(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
genpd_lock(genpd);
- genpd_power_off(genpd, 0);
+ genpd_power_off(genpd, true, 0);
genpd_unlock(genpd);
}
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 91ec3232d630..dae61720b314 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -231,7 +231,8 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
* The caller needs to ensure that opp_table (and hence the regulator)
* isn't freed, while we are executing this routine.
*/
- for (i = 0; reg = regulators[i], i < count; i++) {
+ for (i = 0; i < count; i++) {
+ reg = regulators[i];
ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
if (ret > 0)
latency_ns += ret * 1000;
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index d888d9869b6a..f850daeffba4 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -17,12 +17,9 @@
*
* This QoS design is best effort based. Dependents register their QoS needs.
* Watchers register to keep track of the current QoS needs of the system.
- * Watchers can register different types of notification callbacks:
- * . a per-device notification callback using the dev_pm_qos_*_notifier API.
- * The notification chain data is stored in the per-device constraint
- * data struct.
- * . a system-wide notification callback using the dev_pm_qos_*_global_notifier
- * API. The notification chain data is stored in a static variable.
+ * Watchers can register a per-device notification callback using the
+ * dev_pm_qos_*_notifier API. The notification chain data is stored in the
+ * per-device constraint data struct.
*
* Note about the per-device constraint data struct allocation:
* . The per-device constraints data struct ptr is tored into the device
@@ -49,8 +46,6 @@
static DEFINE_MUTEX(dev_pm_qos_mtx);
static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
-static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
-
/**
* __dev_pm_qos_flags - Check PM QoS flags for a given device.
* @dev: Device to check the PM QoS flags for.
@@ -108,8 +103,7 @@ s32 __dev_pm_qos_read_value(struct device *dev)
{
lockdep_assert_held(&dev->power.lock);
- return IS_ERR_OR_NULL(dev->power.qos) ?
- 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
+ return dev_pm_qos_raw_read_value(dev);
}
/**
@@ -135,8 +129,7 @@ s32 dev_pm_qos_read_value(struct device *dev)
* @value: Value to assign to the QoS request.
*
* Internal function to update the constraints list using the PM QoS core
- * code and if needed call the per-device and the global notification
- * callbacks
+ * code and if needed call the per-device callbacks.
*/
static int apply_constraint(struct dev_pm_qos_request *req,
enum pm_qos_req_action action, s32 value)
@@ -148,12 +141,6 @@ static int apply_constraint(struct dev_pm_qos_request *req,
case DEV_PM_QOS_RESUME_LATENCY:
ret = pm_qos_update_target(&qos->resume_latency,
&req->data.pnode, action, value);
- if (ret) {
- value = pm_qos_read_value(&qos->resume_latency);
- blocking_notifier_call_chain(&dev_pm_notifiers,
- (unsigned long)value,
- req);
- }
break;
case DEV_PM_QOS_LATENCY_TOLERANCE:
ret = pm_qos_update_target(&qos->latency_tolerance,
@@ -536,36 +523,6 @@ int dev_pm_qos_remove_notifier(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
/**
- * dev_pm_qos_add_global_notifier - sets notification entry for changes to
- * target value of the PM QoS constraints for any device
- *
- * @notifier: notifier block managed by caller.
- *
- * Will register the notifier into a notification chain that gets called
- * upon changes to the target value for any device.
- */
-int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
-{
- return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
-
-/**
- * dev_pm_qos_remove_global_notifier - deletes notification for changes to
- * target value of PM QoS constraints for any device
- *
- * @notifier: notifier block to be removed.
- *
- * Will remove the notifier from the notification chain that gets called
- * upon changes to the target value for any device.
- */
-int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
-{
- return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
-}
-EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
-
-/**
* dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
* @dev: Device whose ancestor to add the request for.
* @req: Pointer to the preallocated handle.