From 4bee77dc734ccd4119bb750479809ebc82776bef Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 20 Oct 2016 15:44:53 +0900 Subject: PM / OPP: make _of_get_opp_desc_node() a static function Since commit f47b72a15a96 ("PM / OPP: Move CONFIG_OF dependent code in a separate file"), this function is defined and called only in drivers/base/power/opp/of.c . Signed-off-by: Masahiro Yamada Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp/of.c | 2 +- drivers/base/power/opp/opp.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 5552211e6fcd..6480137bd256 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c @@ -198,7 +198,7 @@ void dev_pm_opp_of_remove_table(struct device *dev) EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); /* Returns opp descriptor node for a device, caller must do of_node_put() */ -struct device_node *_of_get_opp_desc_node(struct device *dev) +static struct device_node *_of_get_opp_desc_node(struct device *dev) { /* * TODO: Support for multiple OPP tables. diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h index fabd5ca1a083..96cd30ac6c1d 100644 --- a/drivers/base/power/opp/opp.h +++ b/drivers/base/power/opp/opp.h @@ -190,7 +190,6 @@ struct opp_table { /* Routines internal to opp core */ struct opp_table *_find_opp_table(struct device *dev); struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); -struct device_node *_of_get_opp_desc_node(struct device *dev); void _dev_pm_opp_remove_table(struct device *dev, bool remove_all); struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table); int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table); -- cgit v1.2.3 From 349aa92e81bd14552d8d2335aff490f306038603 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 20 Oct 2016 16:12:49 +0900 Subject: PM / OPP: fix debug/error messages in dev_pm_opp_of_get_sharing_cpus() These log messages are wrong because _of_get_opp_desc_node() returns an operating-points-v2 node. Commit a6eed752f5fb ("PM / OPP: passing NULL to PTR_ERR()") fixed static checker warnings, and reworded the messages at the same time (but the latter was not mentioned in the git-log). Restore the correct messages. Signed-off-by: Masahiro Yamada Acked-by: Viresh Kumar Reviewed-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp/of.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 6480137bd256..5b3755e49731 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c @@ -562,7 +562,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, /* Get OPP descriptor node */ np = _of_get_opp_desc_node(cpu_dev); if (!np) { - dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__); + dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__); return -ENOENT; } @@ -587,7 +587,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, /* Get OPP descriptor node */ tmp_np = _of_get_opp_desc_node(tcpu_dev); if (!tmp_np) { - dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n", + dev_err(tcpu_dev, "%s: Couldn't find opp node.\n", __func__); ret = -ENOENT; goto put_cpu_node; -- cgit v1.2.3 From 62006c1702b3b1be0c0726949e0ee0ea2326be9c Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Mon, 17 Oct 2016 20:16:58 +0200 Subject: PM / Runtime: Remove the exported function pm_children_suspended() The exported function pm_children_suspended() has only one caller, which is the runtime PM internal function, rpm_check_suspend_allowed(). Let's clean-up this code, by removing pm_children_suspended() altogether and instead do the one-liner check directly in rpm_check_suspend_allowed(). Signed-off-by: Ulf Hansson Reviewed-by: Linus Walleij Signed-off-by: Rafael J. Wysocki --- drivers/base/power/runtime.c | 3 ++- include/linux/pm_runtime.h | 7 ------- 2 files changed, 2 insertions(+), 8 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 82a081ea4317..53b427dfc403 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -241,7 +241,8 @@ static int rpm_check_suspend_allowed(struct device *dev) retval = -EACCES; else if (atomic_read(&dev->power.usage_count) > 0) retval = -EAGAIN; - else if (!pm_children_suspended(dev)) + else if (!dev->power.ignore_children && + atomic_read(&dev->power.child_count)) retval = -EBUSY; /* Pending resume requests take precedence over suspends. */ diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 2e14d2667b6c..61ea5666c94c 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -61,12 +61,6 @@ static inline void pm_suspend_ignore_children(struct device *dev, bool enable) dev->power.ignore_children = enable; } -static inline bool pm_children_suspended(struct device *dev) -{ - return dev->power.ignore_children - || !atomic_read(&dev->power.child_count); -} - static inline void pm_runtime_get_noresume(struct device *dev) { atomic_inc(&dev->power.usage_count); @@ -162,7 +156,6 @@ static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} -static inline bool pm_children_suspended(struct device *dev) { return false; } static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {} static inline bool device_run_wake(struct device *dev) { return false; } -- cgit v1.2.3 From 216ef0b6b8c7f041a618913e94da52c3fdf82a99 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Mon, 17 Oct 2016 20:16:59 +0200 Subject: PM / Runtime: Clarify comment in rpm_resume() when resuming the parent Signed-off-by: Ulf Hansson Reviewed-by: Linus Walleij Signed-off-by: Rafael J. Wysocki --- drivers/base/power/runtime.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 53b427dfc403..117db71e84cb 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -713,8 +713,8 @@ static int rpm_resume(struct device *dev, int rpmflags) spin_lock(&parent->power.lock); /* - * We can resume if the parent's runtime PM is disabled or it - * is set to ignore children. + * Resume the parent if it has runtime PM enabled and not been + * set to ignore its children. */ if (!parent->power.disable_depth && !parent->power.ignore_children) { -- cgit v1.2.3 From 59d65b73a23cee48e6f3e44686f199d79b7ee854 Mon Sep 17 00:00:00 2001 From: Lina Iyer Date: Fri, 14 Oct 2016 10:47:49 -0700 Subject: PM / Domains: Make genpd state allocation dynamic Allow PM Domain states to be defined dynamically by the drivers. This removes the limitation on the maximum number of states possible for a domain. Suggested-by: Ulf Hansson Signed-off-by: Lina Iyer Acked-by: Ulf Hansson Reviewed-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki --- arch/arm/mach-imx/gpc.c | 17 ++++++++++------- drivers/base/power/domain.c | 35 +++++++++++++++++++++++------------ include/linux/pm_domain.h | 5 ++--- 3 files changed, 35 insertions(+), 22 deletions(-) (limited to 'drivers/base') diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index 0df062d8b2c9..57a410bbb6a2 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c @@ -380,13 +380,6 @@ static struct pu_domain imx6q_pu_domain = { .name = "PU", .power_off = imx6q_pm_pu_power_off, .power_on = imx6q_pm_pu_power_on, - .states = { - [0] = { - .power_off_latency_ns = 25000, - .power_on_latency_ns = 2000000, - }, - }, - .state_count = 1, }, }; @@ -430,6 +423,16 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg) if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) return 0; + imx6q_pu_domain.base.states = devm_kzalloc(dev, + sizeof(*imx6q_pu_domain.base.states), + GFP_KERNEL); + if (!imx6q_pu_domain.base.states) + return -ENOMEM; + + imx6q_pu_domain.base.states[0].power_off_latency_ns = 25000; + imx6q_pu_domain.base.states[0].power_on_latency_ns = 2000000; + imx6q_pu_domain.base.state_count = 1; + pm_genpd_init(&imx6q_pu_domain.base, NULL, false); return of_genpd_add_provider_onecell(dev->of_node, &imx_gpc_onecell_data); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e023066e4215..37ab7f1ef178 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1282,6 +1282,21 @@ out: } EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); +static int genpd_set_default_power_state(struct generic_pm_domain *genpd) +{ + struct genpd_power_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + genpd->states = state; + genpd->state_count = 1; + genpd->free = state; + + return 0; +} + /** * pm_genpd_init - Initialize a generic I/O PM domain object. * @genpd: PM domain object to initialize. @@ -1293,6 +1308,8 @@ EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); int pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off) { + int ret; + if (IS_ERR_OR_NULL(genpd)) return -EINVAL; @@ -1325,19 +1342,12 @@ int pm_genpd_init(struct generic_pm_domain *genpd, genpd->dev_ops.start = pm_clk_resume; } - if (genpd->state_idx >= GENPD_MAX_NUM_STATES) { - pr_warn("Initial state index out of bounds.\n"); - genpd->state_idx = GENPD_MAX_NUM_STATES - 1; - } - - if (genpd->state_count > GENPD_MAX_NUM_STATES) { - pr_warn("Limiting states to %d\n", GENPD_MAX_NUM_STATES); - genpd->state_count = GENPD_MAX_NUM_STATES; - } - /* Use only one "off" state if there were no states declared */ - if (genpd->state_count == 0) - genpd->state_count = 1; + if (genpd->state_count == 0) { + ret = genpd_set_default_power_state(genpd); + if (ret) + return ret; + } mutex_lock(&gpd_list_lock); list_add(&genpd->gpd_list_node, &gpd_list); @@ -1377,6 +1387,7 @@ static int genpd_remove(struct generic_pm_domain *genpd) list_del(&genpd->gpd_list_node); mutex_unlock(&genpd->lock); cancel_work_sync(&genpd->power_off_work); + kfree(genpd->free); pr_debug("%s: removed %s\n", __func__, genpd->name); return 0; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index a09fe5c009c8..de1d8f331b03 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -19,8 +19,6 @@ /* Defines used for the flags field in the struct generic_pm_domain */ #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ -#define GENPD_MAX_NUM_STATES 8 /* Number of possible low power states */ - enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ GPD_STATE_POWER_OFF, /* PM domain is off */ @@ -70,9 +68,10 @@ struct generic_pm_domain { void (*detach_dev)(struct generic_pm_domain *domain, struct device *dev); unsigned int flags; /* Bit field of configs for genpd */ - struct genpd_power_state states[GENPD_MAX_NUM_STATES]; + struct genpd_power_state *states; unsigned int state_count; /* number of states */ unsigned int state_idx; /* state that genpd will go to when off */ + void *free; /* Free the state that was allocated for default */ }; -- cgit v1.2.3 From 30f604283e05d34cb10108c7ba017e5f4fc9d62c Mon Sep 17 00:00:00 2001 From: Lina Iyer Date: Fri, 14 Oct 2016 10:47:51 -0700 Subject: PM / Domains: Allow domain power states to be read from DT This patch allows domains to define idle states in the DT. SoC's can define domain idle states in DT using the "domain-idle-states" property of the domain provider. Add API to read the idle states from DT that can be set in the genpd object. This patch is based on the original patch by Marc Titinger. Signed-off-by: Marc Titinger Signed-off-by: Ulf Hansson Signed-off-by: Lina Iyer Reviewed-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 94 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/pm_domain.h | 8 ++++ 2 files changed, 102 insertions(+) (limited to 'drivers/base') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 37ab7f1ef178..9af75ba0472a 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1916,6 +1916,100 @@ out: return ret ? -EPROBE_DEFER : 0; } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); + +static const struct of_device_id idle_state_match[] = { + { .compatible = "arm,idle-state", }, + { } +}; + +static int genpd_parse_state(struct genpd_power_state *genpd_state, + struct device_node *state_node) +{ + int err; + u32 residency; + u32 entry_latency, exit_latency; + const struct of_device_id *match_id; + + match_id = of_match_node(idle_state_match, state_node); + if (!match_id) + return -EINVAL; + + err = of_property_read_u32(state_node, "entry-latency-us", + &entry_latency); + if (err) { + pr_debug(" * %s missing entry-latency-us property\n", + state_node->full_name); + return -EINVAL; + } + + err = of_property_read_u32(state_node, "exit-latency-us", + &exit_latency); + if (err) { + pr_debug(" * %s missing exit-latency-us property\n", + state_node->full_name); + return -EINVAL; + } + + err = of_property_read_u32(state_node, "min-residency-us", &residency); + if (!err) + genpd_state->residency_ns = 1000 * residency; + + genpd_state->power_on_latency_ns = 1000 * exit_latency; + genpd_state->power_off_latency_ns = 1000 * entry_latency; + + return 0; +} + +/** + * of_genpd_parse_idle_states: Return array of idle states for the genpd. + * + * @dn: The genpd device node + * @states: The pointer to which the state array will be saved. + * @n: The count of elements in the array returned from this function. + * + * Returns the device states parsed from the OF node. The memory for the states + * is allocated by this function and is the responsibility of the caller to + * free the memory after use. + */ +int of_genpd_parse_idle_states(struct device_node *dn, + struct genpd_power_state **states, int *n) +{ + struct genpd_power_state *st; + struct device_node *np; + int i = 0; + int err, ret; + int count; + struct of_phandle_iterator it; + + count = of_count_phandle_with_args(dn, "domain-idle-states", NULL); + if (!count) + return -EINVAL; + + st = kcalloc(count, sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + /* Loop over the phandles until all the requested entry is found */ + of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) { + np = it.node; + ret = genpd_parse_state(&st[i++], np); + if (ret) { + pr_err + ("Parsing idle state node %s failed with err %d\n", + np->full_name, ret); + of_node_put(np); + kfree(st); + return ret; + } + } + + *n = count; + *states = st; + + return 0; +} +EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); + #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index f4492eb71701..b4894969fbec 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -205,6 +205,8 @@ extern int of_genpd_add_device(struct of_phandle_args *args, extern int of_genpd_add_subdomain(struct of_phandle_args *parent, struct of_phandle_args *new_subdomain); extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); +extern int of_genpd_parse_idle_states(struct device_node *dn, + struct genpd_power_state **states, int *n); int genpd_dev_pm_attach(struct device *dev); #else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ @@ -234,6 +236,12 @@ static inline int of_genpd_add_subdomain(struct of_phandle_args *parent, return -ENODEV; } +static inline int of_genpd_parse_idle_states(struct device_node *dn, + struct genpd_power_state **states, int *n) +{ + return -ENODEV; +} + static inline int genpd_dev_pm_attach(struct device *dev) { return -ENODEV; -- cgit v1.2.3 From 0c9b694a8a7d4853318c4f2ce315afa2bd3664b6 Mon Sep 17 00:00:00 2001 From: Lina Iyer Date: Fri, 14 Oct 2016 10:47:52 -0700 Subject: PM / Domains: Save the fwnode in genpd_power_state Save the fwnode for the genpd state in the state node. PM Domain clients may use the fwnode to read in the platform specific domain state properties and associate them with the state. Signed-off-by: Lina Iyer Acked-by: Ulf Hansson Reviewed-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 1 + include/linux/pm_domain.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/base') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 9af75ba0472a..1a6073aaca0e 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1956,6 +1956,7 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, genpd_state->power_on_latency_ns = 1000 * exit_latency; genpd_state->power_off_latency_ns = 1000 * entry_latency; + genpd_state->fwnode = &state_node->fwnode; return 0; } diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index b4894969fbec..6a8988166899 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -39,6 +39,7 @@ struct genpd_power_state { s64 power_off_latency_ns; s64 power_on_latency_ns; s64 residency_ns; + struct fwnode_handle *fwnode; }; struct generic_pm_domain { -- cgit v1.2.3 From 35241d12f750d2f1556a9c85f175ce7044716881 Mon Sep 17 00:00:00 2001 From: Lina Iyer Date: Fri, 14 Oct 2016 10:47:54 -0700 Subject: PM / Domains: Abstract genpd locking Abstract genpd lock/unlock calls, in preparation for domain specific locks added in the following patches. Signed-off-by: Lina Iyer Signed-off-by: Ulf Hansson Reviewed-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 121 +++++++++++++++++++++++++++++--------------- include/linux/pm_domain.h | 5 +- 2 files changed, 85 insertions(+), 41 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 1a6073aaca0e..4194012cdf86 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -39,6 +39,46 @@ static LIST_HEAD(gpd_list); static DEFINE_MUTEX(gpd_list_lock); +struct genpd_lock_ops { + void (*lock)(struct generic_pm_domain *genpd); + void (*lock_nested)(struct generic_pm_domain *genpd, int depth); + int (*lock_interruptible)(struct generic_pm_domain *genpd); + void (*unlock)(struct generic_pm_domain *genpd); +}; + +static void genpd_lock_mtx(struct generic_pm_domain *genpd) +{ + mutex_lock(&genpd->mlock); +} + +static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, + int depth) +{ + mutex_lock_nested(&genpd->mlock, depth); +} + +static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) +{ + return mutex_lock_interruptible(&genpd->mlock); +} + +static void genpd_unlock_mtx(struct generic_pm_domain *genpd) +{ + return mutex_unlock(&genpd->mlock); +} + +static const struct genpd_lock_ops genpd_mtx_ops = { + .lock = genpd_lock_mtx, + .lock_nested = genpd_lock_nested_mtx, + .lock_interruptible = genpd_lock_interruptible_mtx, + .unlock = genpd_unlock_mtx, +}; + +#define genpd_lock(p) p->lock_ops->lock(p) +#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) +#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) +#define genpd_unlock(p) p->lock_ops->unlock(p) + /* * Get the generic PM domain for a particular struct device. * This validates the struct device pointer, the PM domain pointer, @@ -200,9 +240,9 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) genpd_sd_counter_inc(master); - mutex_lock_nested(&master->lock, depth + 1); + genpd_lock_nested(master, depth + 1); ret = genpd_poweron(master, depth + 1); - mutex_unlock(&master->lock); + genpd_unlock(master); if (ret) { genpd_sd_counter_dec(master); @@ -255,9 +295,9 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, spin_unlock_irq(&dev->power.lock); if (!IS_ERR(genpd)) { - mutex_lock(&genpd->lock); + genpd_lock(genpd); genpd->max_off_time_changed = true; - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); } dev = dev->parent; @@ -354,9 +394,9 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); - mutex_lock(&genpd->lock); + genpd_lock(genpd); genpd_poweroff(genpd, true); - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); } /** @@ -472,9 +512,9 @@ static int genpd_runtime_suspend(struct device *dev) if (dev->power.irq_safe) return 0; - mutex_lock(&genpd->lock); + genpd_lock(genpd); genpd_poweroff(genpd, false); - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); return 0; } @@ -509,9 +549,9 @@ static int genpd_runtime_resume(struct device *dev) goto out; } - mutex_lock(&genpd->lock); + genpd_lock(genpd); ret = genpd_poweron(genpd, 0); - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); if (ret) return ret; @@ -547,9 +587,9 @@ err_stop: genpd_stop_dev(genpd, dev); err_poweroff: if (!dev->power.irq_safe) { - mutex_lock(&genpd->lock); + genpd_lock(genpd); genpd_poweroff(genpd, 0); - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); } return ret; @@ -732,20 +772,20 @@ static int pm_genpd_prepare(struct device *dev) if (resume_needed(dev, genpd)) pm_runtime_resume(dev); - mutex_lock(&genpd->lock); + genpd_lock(genpd); if (genpd->prepared_count++ == 0) genpd->suspended_count = 0; - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); ret = pm_generic_prepare(dev); if (ret) { - mutex_lock(&genpd->lock); + genpd_lock(genpd); genpd->prepared_count--; - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); } return ret; @@ -936,13 +976,13 @@ static void pm_genpd_complete(struct device *dev) pm_generic_complete(dev); - mutex_lock(&genpd->lock); + genpd_lock(genpd); genpd->prepared_count--; if (!genpd->prepared_count) genpd_queue_power_off_work(genpd); - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); } /** @@ -1071,7 +1111,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR(gpd_data)) return PTR_ERR(gpd_data); - mutex_lock(&genpd->lock); + genpd_lock(genpd); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -1088,7 +1128,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); out: - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); if (ret) genpd_free_dev_data(dev, gpd_data); @@ -1130,7 +1170,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, gpd_data = to_gpd_data(pdd); dev_pm_qos_remove_notifier(dev, &gpd_data->nb); - mutex_lock(&genpd->lock); + genpd_lock(genpd); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -1145,14 +1185,14 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, list_del_init(&pdd->list_node); - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); genpd_free_dev_data(dev, gpd_data); return 0; out: - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); dev_pm_qos_add_notifier(dev, &gpd_data->nb); return ret; @@ -1187,8 +1227,8 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd, if (!link) return -ENOMEM; - mutex_lock(&subdomain->lock); - mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); + genpd_lock(subdomain); + genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); if (genpd->status == GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_POWER_OFF) { @@ -1211,8 +1251,8 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd, genpd_sd_counter_inc(genpd); out: - mutex_unlock(&genpd->lock); - mutex_unlock(&subdomain->lock); + genpd_unlock(genpd); + genpd_unlock(subdomain); if (ret) kfree(link); return ret; @@ -1250,8 +1290,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; - mutex_lock(&subdomain->lock); - mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); + genpd_lock(subdomain); + genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); if (!list_empty(&subdomain->master_links) || subdomain->device_count) { pr_warn("%s: unable to remove subdomain %s\n", genpd->name, @@ -1275,8 +1315,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, } out: - mutex_unlock(&genpd->lock); - mutex_unlock(&subdomain->lock); + genpd_unlock(genpd); + genpd_unlock(subdomain); return ret; } @@ -1316,7 +1356,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd, INIT_LIST_HEAD(&genpd->master_links); INIT_LIST_HEAD(&genpd->slave_links); INIT_LIST_HEAD(&genpd->dev_list); - mutex_init(&genpd->lock); + mutex_init(&genpd->mlock); + genpd->lock_ops = &genpd_mtx_ops; genpd->gov = gov; INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); atomic_set(&genpd->sd_count, 0); @@ -1364,16 +1405,16 @@ static int genpd_remove(struct generic_pm_domain *genpd) if (IS_ERR_OR_NULL(genpd)) return -EINVAL; - mutex_lock(&genpd->lock); + genpd_lock(genpd); if (genpd->has_provider) { - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); pr_err("Provider present, unable to remove %s\n", genpd->name); return -EBUSY; } if (!list_empty(&genpd->master_links) || genpd->device_count) { - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); pr_err("%s: unable to remove %s\n", __func__, genpd->name); return -EBUSY; } @@ -1385,7 +1426,7 @@ static int genpd_remove(struct generic_pm_domain *genpd) } list_del(&genpd->gpd_list_node); - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); cancel_work_sync(&genpd->power_off_work); kfree(genpd->free); pr_debug("%s: removed %s\n", __func__, genpd->name); @@ -1909,9 +1950,9 @@ int genpd_dev_pm_attach(struct device *dev) dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; - mutex_lock(&pd->lock); + genpd_lock(pd); ret = genpd_poweron(pd, 0); - mutex_unlock(&pd->lock); + genpd_unlock(pd); out: return ret ? -EPROBE_DEFER : 0; } @@ -2064,7 +2105,7 @@ static int pm_genpd_summary_one(struct seq_file *s, char state[16]; int ret; - ret = mutex_lock_interruptible(&genpd->lock); + ret = genpd_lock_interruptible(genpd); if (ret) return -ERESTARTSYS; @@ -2101,7 +2142,7 @@ static int pm_genpd_summary_one(struct seq_file *s, seq_puts(s, "\n"); exit: - mutex_unlock(&genpd->lock); + genpd_unlock(genpd); return 0; } diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 6a8988166899..811b968eb740 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -42,13 +42,14 @@ struct genpd_power_state { struct fwnode_handle *fwnode; }; +struct genpd_lock_ops; + struct generic_pm_domain { struct dev_pm_domain domain; /* PM domain operations */ struct list_head gpd_list_node; /* Node in the global PM domains list */ struct list_head master_links; /* Links with PM domain as a master */ struct list_head slave_links; /* Links with PM domain as a slave */ struct list_head dev_list; /* List of devices */ - struct mutex lock; struct dev_power_governor *gov; struct work_struct power_off_work; struct fwnode_handle *provider; /* Identity of the domain provider */ @@ -74,6 +75,8 @@ struct generic_pm_domain { unsigned int state_count; /* number of states */ unsigned int state_idx; /* state that genpd will go to when off */ void *free; /* Free the state that was allocated for default */ + const struct genpd_lock_ops *lock_ops; + struct mutex mlock; }; -- cgit v1.2.3 From d716f4798ff8c65ace4a6ab291f9a4ff265df4ba Mon Sep 17 00:00:00 2001 From: Lina Iyer Date: Fri, 14 Oct 2016 10:47:55 -0700 Subject: PM / Domains: Support IRQ safe PM domains Generic Power Domains currently support turning on/off only in process context. This prevents the usage of PM domains for domains that could be powered on/off in a context where IRQs are disabled. Many such domains exist today and do not get powered off, when the IRQ safe devices in that domain are powered off, because of this limitation. However, not all domains can operate in IRQ safe contexts. Genpd therefore, has to support both cases where the domain may or may not operate in IRQ safe contexts. Configuring genpd to use an appropriate lock for that domain, would allow domains that have IRQ safe devices to runtime suspend and resume, in atomic context. To achieve domain specific locking, set the domain's ->flag to GENPD_FLAG_IRQ_SAFE while defining the domain. This indicates that genpd should use a spinlock instead of a mutex for locking the domain. Locking is abstracted through genpd_lock() and genpd_unlock() functions that use the flag to determine the appropriate lock to be used for that domain. Domains that have lower latency to suspend and resume and can operate with IRQs disabled may now be able to save power, when the component devices and sub-domains are idle at runtime. The restriction this imposes on the domain hierarchy is that non-IRQ safe domains may not have IRQ-safe subdomains, but IRQ safe domains may have IRQ safe and non-IRQ safe subdomains and devices. Signed-off-by: Lina Iyer Acked-by: Ulf Hansson Reviewed-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 111 ++++++++++++++++++++++++++++++++++++++++---- include/linux/pm_domain.h | 10 +++- 2 files changed, 110 insertions(+), 11 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 4194012cdf86..aac656a889dc 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -74,11 +74,70 @@ static const struct genpd_lock_ops genpd_mtx_ops = { .unlock = genpd_unlock_mtx, }; +static void genpd_lock_spin(struct generic_pm_domain *genpd) + __acquires(&genpd->slock) +{ + unsigned long flags; + + spin_lock_irqsave(&genpd->slock, flags); + genpd->lock_flags = flags; +} + +static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, + int depth) + __acquires(&genpd->slock) +{ + unsigned long flags; + + spin_lock_irqsave_nested(&genpd->slock, flags, depth); + genpd->lock_flags = flags; +} + +static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) + __acquires(&genpd->slock) +{ + unsigned long flags; + + spin_lock_irqsave(&genpd->slock, flags); + genpd->lock_flags = flags; + return 0; +} + +static void genpd_unlock_spin(struct generic_pm_domain *genpd) + __releases(&genpd->slock) +{ + spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); +} + +static const struct genpd_lock_ops genpd_spin_ops = { + .lock = genpd_lock_spin, + .lock_nested = genpd_lock_nested_spin, + .lock_interruptible = genpd_lock_interruptible_spin, + .unlock = genpd_unlock_spin, +}; + #define genpd_lock(p) p->lock_ops->lock(p) #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) #define genpd_unlock(p) p->lock_ops->unlock(p) +#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) + +static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev, + struct generic_pm_domain *genpd) +{ + bool ret; + + ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); + + /* Warn once for each IRQ safe dev in no sleep domain */ + if (ret) + dev_warn_once(dev, "PM domain %s will not be powered off\n", + genpd->name); + + return ret; +} + /* * Get the generic PM domain for a particular struct device. * This validates the struct device pointer, the PM domain pointer, @@ -343,7 +402,12 @@ static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async) if (stat > PM_QOS_FLAGS_NONE) return -EBUSY; - if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe) + /* + * Do not allow PM domain to be powered off, when an IRQ safe + * device is part of a non-IRQ safe domain. + */ + if (!pm_runtime_suspended(pdd->dev) || + irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) not_suspended++; } @@ -506,10 +570,10 @@ static int genpd_runtime_suspend(struct device *dev) } /* - * If power.irq_safe is set, this routine will be run with interrupts - * off, so it can't use mutexes. + * If power.irq_safe is set, this routine may be run with + * IRQs disabled, so suspend only if the PM domain also is irq_safe. */ - if (dev->power.irq_safe) + if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) return 0; genpd_lock(genpd); @@ -543,8 +607,11 @@ static int genpd_runtime_resume(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - /* If power.irq_safe, the PM domain is never powered off. */ - if (dev->power.irq_safe) { + /* + * As we don't power off a non IRQ safe domain, which holds + * an IRQ safe device, we don't need to restore power to it. + */ + if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) { timed = false; goto out; } @@ -586,7 +653,8 @@ static int genpd_runtime_resume(struct device *dev) err_stop: genpd_stop_dev(genpd, dev); err_poweroff: - if (!dev->power.irq_safe) { + if (!pm_runtime_is_irq_safe(dev) || + (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) { genpd_lock(genpd); genpd_poweroff(genpd, 0); genpd_unlock(genpd); @@ -1223,6 +1291,17 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd, || genpd == subdomain) return -EINVAL; + /* + * If the domain can be powered on/off in an IRQ safe + * context, ensure that the subdomain can also be + * powered on/off in that context. + */ + if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { + WARN("Parent %s of subdomain %s must be IRQ safe\n", + genpd->name, subdomain->name); + return -EINVAL; + } + link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) return -ENOMEM; @@ -1337,6 +1416,17 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd) return 0; } +static void genpd_lock_init(struct generic_pm_domain *genpd) +{ + if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { + spin_lock_init(&genpd->slock); + genpd->lock_ops = &genpd_spin_ops; + } else { + mutex_init(&genpd->mlock); + genpd->lock_ops = &genpd_mtx_ops; + } +} + /** * pm_genpd_init - Initialize a generic I/O PM domain object. * @genpd: PM domain object to initialize. @@ -1356,8 +1446,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, INIT_LIST_HEAD(&genpd->master_links); INIT_LIST_HEAD(&genpd->slave_links); INIT_LIST_HEAD(&genpd->dev_list); - mutex_init(&genpd->mlock); - genpd->lock_ops = &genpd_mtx_ops; + genpd_lock_init(genpd); genpd->gov = gov; INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); atomic_set(&genpd->sd_count, 0); @@ -2131,7 +2220,9 @@ static int pm_genpd_summary_one(struct seq_file *s, } list_for_each_entry(pm_data, &genpd->dev_list, list_node) { - kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); + kobj_path = kobject_get_path(&pm_data->dev->kobj, + genpd_is_irq_safe(genpd) ? + GFP_ATOMIC : GFP_KERNEL); if (kobj_path == NULL) continue; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 811b968eb740..81ece61075df 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -15,9 +15,11 @@ #include #include #include +#include /* Defines used for the flags field in the struct generic_pm_domain */ #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ +#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */ enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ @@ -76,7 +78,13 @@ struct generic_pm_domain { unsigned int state_idx; /* state that genpd will go to when off */ void *free; /* Free the state that was allocated for default */ const struct genpd_lock_ops *lock_ops; - struct mutex mlock; + union { + struct mutex mlock; + struct { + spinlock_t slock; + unsigned long lock_flags; + }; + }; }; -- cgit v1.2.3 From a1fee00dc95644e0590b4f1a3755c9f6b1243b3a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 25 Oct 2016 17:33:27 +0100 Subject: PM / Domains: check for negative return from of_count_phandle_with_args() The return from of_count_phandle_with_args can be negative, so we should avoid kcalloc of a negative count of genpd_power_stat structs by sanity checking if count is zero or less. Signed-off-by: Colin Ian King Acked-by: Kevin Hilman Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index aac656a889dc..661737c2bae0 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2113,7 +2113,7 @@ int of_genpd_parse_idle_states(struct device_node *dn, struct of_phandle_iterator it; count = of_count_phandle_with_args(dn, "domain-idle-states", NULL); - if (!count) + if (count <= 0) return -EINVAL; st = kcalloc(count, sizeof(*st), GFP_KERNEL); -- cgit v1.2.3 From a8636c89648ab12e59d8f3aa667ec76fc96fd643 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Mon, 17 Oct 2016 20:17:01 +0200 Subject: PM / Runtime: Don't allow to suspend a device with an active child When resuming a device in __pm_runtime_set_status(), the prerequisite is that its parent must already be active, else an error code is returned and the device's status remains suspended. When suspending a device there is no similar constraints being validated. Let's change this to make the behaviour consistent, by not allowing to suspend a device with an active child, unless it has been explicitly set to ignore its children. Signed-off-by: Ulf Hansson Reviewed-by: Linus Walleij Signed-off-by: Rafael J. Wysocki --- drivers/base/power/runtime.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 117db71e84cb..60ebb04d8140 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1028,7 +1028,17 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) goto out_set; if (status == RPM_SUSPENDED) { - /* It always is possible to set the status to 'suspended'. */ + /* + * It is invalid to suspend a device with an active child, + * unless it has been set to ignore its children. + */ + if (!dev->power.ignore_children && + atomic_read(&dev->power.child_count)) { + dev_err(dev, "runtime PM trying to suspend device but active child\n"); + error = -EBUSY; + goto out; + } + if (parent) { atomic_add_unless(&parent->power.child_count, -1, 0); notify_parent = !parent->power.ignore_children; -- cgit v1.2.3 From 44cae7d5a1b4a4e4db24eae478a9cfba0ea2b9d2 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 10 Nov 2016 15:52:15 +0300 Subject: PM / Domains: Fix a warning message The first argument of WARN() is the condition, followed by the message. Signed-off-by: Dan Carpenter Acked-by: Pavel Machek Acked-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 661737c2bae0..7b4d41ff9506 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1297,7 +1297,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd, * powered on/off in that context. */ if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { - WARN("Parent %s of subdomain %s must be IRQ safe\n", + WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", genpd->name, subdomain->name); return -EINVAL; } -- cgit v1.2.3 From 1d9174fbc55ec99ccbfcafa3de2528ef78a849aa Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Thu, 13 Oct 2016 16:58:54 +0200 Subject: PM / Runtime: Defer resuming of the device in pm_runtime_force_resume() When the pm_runtime_force_suspend|resume() helpers were invented, we still had CONFIG_PM_RUNTIME and CONFIG_PM_SLEEP as separate Kconfig options. To make sure these helpers worked for all combinations and without introducing too much of complexity, the device was always resumed in pm_runtime_force_resume(). More precisely, when CONFIG_PM_SLEEP was set and CONFIG_PM_RUNTIME was unset, we needed to resume the device as the subsystem/driver couldn't rely on using runtime PM to do it. As the CONFIG_PM_RUNTIME option was merged into CONFIG_PM a while ago, it removed this combination, of using CONFIG_PM_SLEEP without the earlier CONFIG_PM_RUNTIME. For this reason we can now rely on the subsystem/driver to use runtime PM to resume the device, instead of forcing that to be done in all cases. In other words, let's defer the runtime resume to a later point when it's actually needed. Signed-off-by: Ulf Hansson Tested-by: Marek Szyprowski Tested-by: Geert Uytterhoeven Acked-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki --- drivers/base/power/runtime.c | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 60ebb04d8140..f0d863089345 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1489,6 +1489,16 @@ int pm_runtime_force_suspend(struct device *dev) if (ret) goto err; + /* + * Increase the runtime PM usage count for the device's parent, in case + * when we find the device being used when system suspend was invoked. + * This informs pm_runtime_force_resume() to resume the parent + * immediately, which is needed to be able to resume its children, + * when not deferring the resume to be managed via runtime PM. + */ + if (dev->parent && atomic_read(&dev->power.usage_count) > 1) + pm_runtime_get_noresume(dev->parent); + pm_runtime_set_suspended(dev); return 0; err: @@ -1498,16 +1508,20 @@ err: EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); /** - * pm_runtime_force_resume - Force a device into resume state. + * pm_runtime_force_resume - Force a device into resume state if needed. * @dev: Device to resume. * * Prior invoking this function we expect the user to have brought the device * into low power state by a call to pm_runtime_force_suspend(). Here we reverse - * those actions and brings the device into full power. We update the runtime PM - * status and re-enables runtime PM. + * those actions and brings the device into full power, if it is expected to be + * used on system resume. To distinguish that, we check whether the runtime PM + * usage count is greater than 1 (the PM core increases the usage count in the + * system PM prepare phase), as that indicates a real user (such as a subsystem, + * driver, userspace, etc.) is using it. If that is the case, the device is + * expected to be used on system resume as well, so then we resume it. In the + * other case, we defer the resume to be managed via runtime PM. * - * Typically this function may be invoked from a system resume callback to make - * sure the device is put into full power state. + * Typically this function may be invoked from a system resume callback. */ int pm_runtime_force_resume(struct device *dev) { @@ -1524,6 +1538,17 @@ int pm_runtime_force_resume(struct device *dev) if (!pm_runtime_status_suspended(dev)) goto out; + /* + * Decrease the parent's runtime PM usage count, if we increased it + * during system suspend in pm_runtime_force_suspend(). + */ + if (atomic_read(&dev->power.usage_count) > 1) { + if (dev->parent) + pm_runtime_put_noidle(dev->parent); + } else { + goto out; + } + ret = pm_runtime_set_active(dev); if (ret) goto out; -- cgit v1.2.3 From ee061da8d9dfc30ced06f25c18694cffa70eac1e Mon Sep 17 00:00:00 2001 From: Andrew Lutomirski Date: Tue, 29 Nov 2016 17:11:50 -0800 Subject: PM / QoS: Improve sysfs pm_qos_latency_tolerance validation Negative values are special. Don't let users write them directly. Signed-off-by: Andy Lutomirski Signed-off-by: Rafael J. Wysocki --- drivers/base/power/sysfs.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a7b46798c81d..33b4b902741a 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -263,7 +263,11 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev, s32 value; int ret; - if (kstrtos32(buf, 0, &value)) { + if (kstrtos32(buf, 0, &value) == 0) { + /* Users can't write negative values directly */ + if (value < 0) + return -EINVAL; + } else { if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n")) value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) -- cgit v1.2.3 From 80a6f7c79b7822726a096ce9e01cc00a1eacc2c4 Mon Sep 17 00:00:00 2001 From: Andrew Lutomirski Date: Tue, 29 Nov 2016 17:11:51 -0800 Subject: PM / QoS: Fix writing 'auto' to pm_qos_latency_tolerance_us If it was already 'auto', then writing 'auto' again would incorrectly fail. Signed-off-by: Andy Lutomirski Signed-off-by: Rafael J. Wysocki --- drivers/base/power/qos.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 7f3646e459cb..6a1f2c7e01ea 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -856,7 +856,10 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) struct dev_pm_qos_request *req; if (val < 0) { - ret = -EINVAL; + if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT) + ret = 0; + else + ret = -EINVAL; goto out; } req = kzalloc(sizeof(*req), GFP_KERNEL); -- cgit v1.2.3 From 034e7906211c18c230ef4da43a1c44796dd5b95e Mon Sep 17 00:00:00 2001 From: Andrew Lutomirski Date: Tue, 29 Nov 2016 17:11:52 -0800 Subject: PM / QoS: Export dev_pm_qos_update_user_latency_tolerance nvme wants a module parameter that overrides the default latency tolerance. This makes it easy for nvme to reflect that default in sysfs. Signed-off-by: Andy Lutomirski Signed-off-by: Rafael J. Wysocki --- drivers/base/power/qos.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/base') diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 6a1f2c7e01ea..58fcc758334e 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -886,6 +886,7 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) mutex_unlock(&dev_pm_qos_mtx); return ret; } +EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance); /** * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace -- cgit v1.2.3 From 34994692216b5af2f05858324c3b4863cbaf41cf Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 30 Nov 2016 13:24:56 +0100 Subject: PM / Domains: Do not print PM domain add error message if EPROBE_DEFER EPROBE_DEFER is not an error, hence printing an error message like renesas_irqc e61c0000.interrupt-controller: failed to add to PM domain always-on: -517 may confuse the user. Suppress the error message in case of EPROBE_DEFER to fix this. Reported-by: Yoshihiro Shimoda Signed-off-by: Geert Uytterhoeven Reviewed-by: Simon Horman Acked-by: Kevin Hilman Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 7b4d41ff9506..46e7f6052c9b 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2031,8 +2031,9 @@ int genpd_dev_pm_attach(struct device *dev) mutex_unlock(&gpd_list_lock); if (ret < 0) { - dev_err(dev, "failed to add to PM domain %s: %d", - pd->name, ret); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to add to PM domain %s: %d", + pd->name, ret); goto out; } -- cgit v1.2.3 From 91291d9ad92faa65a56a9a19d658d8049b78d3d4 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 30 Nov 2016 16:21:25 +0530 Subject: PM / OPP: Pass opp_table to dev_pm_opp_put_regulator() Joonyoung Shim reported an interesting problem on his ARM octa-core Odoroid-XU3 platform. During system suspend, dev_pm_opp_put_regulator() was failing for a struct device for which dev_pm_opp_set_regulator() is called earlier. This happened because an earlier call to dev_pm_opp_of_cpumask_remove_table() function (from cpufreq-dt.c file) removed all the entries from opp_table->dev_list apart from the last CPU device in the cpumask of CPUs sharing the OPP. But both dev_pm_opp_set_regulator() and dev_pm_opp_put_regulator() routines get CPU device for the first CPU in the cpumask. And so the OPP core failed to find the OPP table for the struct device. This patch attempts to fix this problem by returning a pointer to the opp_table from dev_pm_opp_set_regulator() and using that as the parameter to dev_pm_opp_put_regulator(). This ensures that the dev_pm_opp_put_regulator() doesn't fail to find the opp table. Note that similar design problem also exists with other dev_pm_opp_put_*() APIs, but those aren't used currently by anyone and so we don't need to update them for now. Cc: 4.4+ # 4.4+ Reported-by: Joonyoung Shim Signed-off-by: Stephen Boyd Signed-off-by: Viresh Kumar [ Viresh: Wrote commit log and tested on exynos 5250 ] Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp/core.c | 22 ++++++---------------- drivers/cpufreq/cpufreq-dt.c | 12 ++++++++---- include/linux/pm_opp.h | 11 ++++++----- 3 files changed, 20 insertions(+), 25 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 4c7c6da7a989..2824d3a5e9f0 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -1316,7 +1316,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. */ -int dev_pm_opp_set_regulator(struct device *dev, const char *name) +struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name) { struct opp_table *opp_table; struct regulator *reg; @@ -1354,20 +1354,20 @@ int dev_pm_opp_set_regulator(struct device *dev, const char *name) opp_table->regulator = reg; mutex_unlock(&opp_table_lock); - return 0; + return opp_table; err: _remove_opp_table(opp_table); unlock: mutex_unlock(&opp_table_lock); - return ret; + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator); /** * dev_pm_opp_put_regulator() - Releases resources blocked for regulator - * @dev: Device for which regulator was set. + * @opp_table: OPP table returned from dev_pm_opp_set_regulator(). * * Locking: The internal opp_table and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks @@ -1375,22 +1375,12 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator); * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. */ -void dev_pm_opp_put_regulator(struct device *dev) +void dev_pm_opp_put_regulator(struct opp_table *opp_table) { - struct opp_table *opp_table; - mutex_lock(&opp_table_lock); - /* Check for existing table for 'dev' first */ - opp_table = _find_opp_table(dev); - if (IS_ERR(opp_table)) { - dev_err(dev, "Failed to find opp_table: %ld\n", - PTR_ERR(opp_table)); - goto unlock; - } - if (IS_ERR(opp_table->regulator)) { - dev_err(dev, "%s: Doesn't have regulator set\n", __func__); + pr_err("%s: Doesn't have regulator set\n", __func__); goto unlock; } diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 5c07ae05d69a..4d3ec92cbabf 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -28,6 +28,7 @@ #include "cpufreq-dt.h" struct private_data { + struct opp_table *opp_table; struct device *cpu_dev; struct thermal_cooling_device *cdev; const char *reg_name; @@ -143,6 +144,7 @@ static int resources_available(void) static int cpufreq_init(struct cpufreq_policy *policy) { struct cpufreq_frequency_table *freq_table; + struct opp_table *opp_table = NULL; struct private_data *priv; struct device *cpu_dev; struct clk *cpu_clk; @@ -186,8 +188,9 @@ static int cpufreq_init(struct cpufreq_policy *policy) */ name = find_supply_name(cpu_dev); if (name) { - ret = dev_pm_opp_set_regulator(cpu_dev, name); - if (ret) { + opp_table = dev_pm_opp_set_regulator(cpu_dev, name); + if (IS_ERR(opp_table)) { + ret = PTR_ERR(opp_table); dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n", policy->cpu, ret); goto out_put_clk; @@ -237,6 +240,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) } priv->reg_name = name; + priv->opp_table = opp_table; ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { @@ -285,7 +289,7 @@ out_free_priv: out_free_opp: dev_pm_opp_of_cpumask_remove_table(policy->cpus); if (name) - dev_pm_opp_put_regulator(cpu_dev); + dev_pm_opp_put_regulator(opp_table); out_put_clk: clk_put(cpu_clk); @@ -300,7 +304,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy) dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); if (priv->reg_name) - dev_pm_opp_put_regulator(priv->cpu_dev); + dev_pm_opp_put_regulator(priv->opp_table); clk_put(policy->clk); kfree(priv); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index bca26157f5b6..f6bc76501912 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -19,6 +19,7 @@ struct dev_pm_opp; struct device; +struct opp_table; enum dev_pm_opp_event { OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, @@ -62,8 +63,8 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, void dev_pm_opp_put_supported_hw(struct device *dev); int dev_pm_opp_set_prop_name(struct device *dev, const char *name); void dev_pm_opp_put_prop_name(struct device *dev); -int dev_pm_opp_set_regulator(struct device *dev, const char *name); -void dev_pm_opp_put_regulator(struct device *dev); +struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name); +void dev_pm_opp_put_regulator(struct opp_table *opp_table); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); @@ -170,12 +171,12 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) static inline void dev_pm_opp_put_prop_name(struct device *dev) {} -static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name) +static inline struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name) { - return -ENOTSUPP; + return ERR_PTR(-ENOTSUPP); } -static inline void dev_pm_opp_put_regulator(struct device *dev) {} +static inline void dev_pm_opp_put_regulator(struct opp_table *opp_table) {} static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { -- cgit v1.2.3 From dc39d06fcd7a4a82d72eae7b71e94e888b96d29e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 1 Dec 2016 16:28:16 +0530 Subject: PM / OPP: Don't use OPP structure outside of rcu protected section The OPP structure must not be used out of the rcu protected section. Cache the values to be used in separate variables instead. Cc: 4.6+ # 4.6+ Signed-off-by: Viresh Kumar Reviewed-by: Stephen Boyd Tested-by: Dave Gerlach Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp/core.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 2824d3a5e9f0..6441dfda489f 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -584,6 +584,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) struct clk *clk; unsigned long freq, old_freq; unsigned long u_volt, u_volt_min, u_volt_max; + unsigned long old_u_volt, old_u_volt_min, old_u_volt_max; int ret; if (unlikely(!target_freq)) { @@ -633,6 +634,14 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) return ret; } + if (IS_ERR(old_opp)) { + old_u_volt = 0; + } else { + old_u_volt = old_opp->u_volt; + old_u_volt_min = old_opp->u_volt_min; + old_u_volt_max = old_opp->u_volt_max; + } + u_volt = opp->u_volt; u_volt_min = opp->u_volt_min; u_volt_max = opp->u_volt_max; @@ -677,9 +686,10 @@ restore_freq: __func__, old_freq); restore_voltage: /* This shouldn't harm even if the voltages weren't updated earlier */ - if (!IS_ERR(old_opp)) - _set_opp_voltage(dev, reg, old_opp->u_volt, - old_opp->u_volt_min, old_opp->u_volt_max); + if (old_u_volt) { + _set_opp_voltage(dev, reg, old_u_volt, old_u_volt_min, + old_u_volt_max); + } return ret; } -- cgit v1.2.3 From 0f0fe7e01327b3d524787a2e8b7e78f010db2bb8 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 1 Dec 2016 16:28:17 +0530 Subject: PM / OPP: Manage supply's voltage/current in a separate structure This is a preparatory step for multiple regulator per device support. Move the voltage/current variables to a new structure. Signed-off-by: Viresh Kumar Tested-by: Dave Gerlach Reviewed-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp/core.c | 44 +++++++++++++++++++++------------------- drivers/base/power/opp/debugfs.c | 8 ++++---- drivers/base/power/opp/of.c | 18 ++++++++-------- drivers/base/power/opp/opp.h | 11 +++------- include/linux/pm_opp.h | 16 +++++++++++++++ 5 files changed, 55 insertions(+), 42 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 6441dfda489f..188048d3d203 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -112,7 +112,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) if (IS_ERR_OR_NULL(tmp_opp)) pr_err("%s: Invalid parameters\n", __func__); else - v = tmp_opp->u_volt; + v = tmp_opp->supply.u_volt; return v; } @@ -246,10 +246,10 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) if (!opp->available) continue; - if (opp->u_volt_min < min_uV) - min_uV = opp->u_volt_min; - if (opp->u_volt_max > max_uV) - max_uV = opp->u_volt_max; + if (opp->supply.u_volt_min < min_uV) + min_uV = opp->supply.u_volt_min; + if (opp->supply.u_volt_max > max_uV) + max_uV = opp->supply.u_volt_max; } rcu_read_unlock(); @@ -637,14 +637,14 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) if (IS_ERR(old_opp)) { old_u_volt = 0; } else { - old_u_volt = old_opp->u_volt; - old_u_volt_min = old_opp->u_volt_min; - old_u_volt_max = old_opp->u_volt_max; + old_u_volt = old_opp->supply.u_volt; + old_u_volt_min = old_opp->supply.u_volt_min; + old_u_volt_max = old_opp->supply.u_volt_max; } - u_volt = opp->u_volt; - u_volt_min = opp->u_volt_min; - u_volt_max = opp->u_volt_max; + u_volt = opp->supply.u_volt; + u_volt_min = opp->supply.u_volt_min; + u_volt_max = opp->supply.u_volt_max; reg = opp_table->regulator; @@ -957,10 +957,11 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, struct regulator *reg = opp_table->regulator; if (!IS_ERR(reg) && - !regulator_is_supported_voltage(reg, opp->u_volt_min, - opp->u_volt_max)) { + !regulator_is_supported_voltage(reg, opp->supply.u_volt_min, + opp->supply.u_volt_max)) { pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n", - __func__, opp->u_volt_min, opp->u_volt_max); + __func__, opp->supply.u_volt_min, + opp->supply.u_volt_max); return false; } @@ -993,11 +994,12 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, /* Duplicate OPPs */ dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", - __func__, opp->rate, opp->u_volt, opp->available, - new_opp->rate, new_opp->u_volt, new_opp->available); + __func__, opp->rate, opp->supply.u_volt, + opp->available, new_opp->rate, new_opp->supply.u_volt, + new_opp->available); - return opp->available && new_opp->u_volt == opp->u_volt ? - 0 : -EEXIST; + return opp->available && + new_opp->supply.u_volt == opp->supply.u_volt ? 0 : -EEXIST; } new_opp->opp_table = opp_table; @@ -1064,9 +1066,9 @@ int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, /* populate the opp table */ new_opp->rate = freq; tol = u_volt * opp_table->voltage_tolerance_v1 / 100; - new_opp->u_volt = u_volt; - new_opp->u_volt_min = u_volt - tol; - new_opp->u_volt_max = u_volt + tol; + new_opp->supply.u_volt = u_volt; + new_opp->supply.u_volt_min = u_volt - tol; + new_opp->supply.u_volt_max = u_volt + tol; new_opp->available = true; new_opp->dynamic = dynamic; diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c index ef1ae6b52042..c897676ca35f 100644 --- a/drivers/base/power/opp/debugfs.c +++ b/drivers/base/power/opp/debugfs.c @@ -63,16 +63,16 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate)) return -ENOMEM; - if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt)) + if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->supply.u_volt)) return -ENOMEM; - if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min)) + if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->supply.u_volt_min)) return -ENOMEM; - if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max)) + if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->supply.u_volt_max)) return -ENOMEM; - if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp)) + if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->supply.u_amp)) return -ENOMEM; if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d, diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 5b3755e49731..bdf409d42126 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c @@ -148,14 +148,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, return -EINVAL; } - opp->u_volt = microvolt[0]; + opp->supply.u_volt = microvolt[0]; if (count == 1) { - opp->u_volt_min = opp->u_volt; - opp->u_volt_max = opp->u_volt; + opp->supply.u_volt_min = opp->supply.u_volt; + opp->supply.u_volt_max = opp->supply.u_volt; } else { - opp->u_volt_min = microvolt[1]; - opp->u_volt_max = microvolt[2]; + opp->supply.u_volt_min = microvolt[1]; + opp->supply.u_volt_max = microvolt[2]; } /* Search for "opp-microamp-" */ @@ -173,7 +173,7 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, } if (prop && !of_property_read_u32(opp->np, name, &val)) - opp->u_amp = val; + opp->supply.u_amp = val; return 0; } @@ -303,9 +303,9 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) mutex_unlock(&opp_table_lock); pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", - __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, - new_opp->u_volt_min, new_opp->u_volt_max, - new_opp->clock_latency_ns); + __func__, new_opp->turbo, new_opp->rate, + new_opp->supply.u_volt, new_opp->supply.u_volt_min, + new_opp->supply.u_volt_max, new_opp->clock_latency_ns); /* * Notify the changes in the availability of the operable diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h index 96cd30ac6c1d..8a02516542c2 100644 --- a/drivers/base/power/opp/opp.h +++ b/drivers/base/power/opp/opp.h @@ -61,10 +61,7 @@ extern struct list_head opp_tables; * @turbo: true if turbo (boost) OPP * @suspend: true if suspend OPP * @rate: Frequency in hertz - * @u_volt: Target voltage in microvolts corresponding to this OPP - * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP - * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP - * @u_amp: Maximum current drawn by the device in microamperes + * @supply: Power supply voltage/current values * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's * frequency from any other OPP's frequency. * @opp_table: points back to the opp_table struct this opp belongs to @@ -83,10 +80,8 @@ struct dev_pm_opp { bool suspend; unsigned long rate; - unsigned long u_volt; - unsigned long u_volt_min; - unsigned long u_volt_max; - unsigned long u_amp; + struct dev_pm_opp_supply supply; + unsigned long clock_latency_ns; struct opp_table *opp_table; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index f6bc76501912..824f7268f687 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -25,6 +25,22 @@ enum dev_pm_opp_event { OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, }; +/** + * struct dev_pm_opp_supply - Power supply voltage/current values + * @u_volt: Target voltage in microvolts corresponding to this OPP + * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP + * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP + * @u_amp: Maximum current drawn by the device in microamperes + * + * This structure stores the voltage/current values for a single power supply. + */ +struct dev_pm_opp_supply { + unsigned long u_volt; + unsigned long u_volt_min; + unsigned long u_volt_max; + unsigned long u_amp; +}; + #if defined(CONFIG_PM_OPP) unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); -- cgit v1.2.3 From ce31781a7574ac1b11b1b66e0d54c8bab41f56eb Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 1 Dec 2016 16:28:18 +0530 Subject: PM / OPP: Pass struct dev_pm_opp_supply to _set_opp_voltage() Pass the entire supply structure instead of all of its fields. Signed-off-by: Viresh Kumar Tested-by: Dave Gerlach Reviewed-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp/core.c | 44 +++++++++++++++++-------------------------- 1 file changed, 17 insertions(+), 27 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 188048d3d203..46ad8470c438 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -542,8 +542,7 @@ unlock: } static int _set_opp_voltage(struct device *dev, struct regulator *reg, - unsigned long u_volt, unsigned long u_volt_min, - unsigned long u_volt_max) + struct dev_pm_opp_supply *supply) { int ret; @@ -554,14 +553,15 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg, return 0; } - dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min, - u_volt, u_volt_max); + dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, + supply->u_volt_min, supply->u_volt, supply->u_volt_max); - ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt, - u_volt_max); + ret = regulator_set_voltage_triplet(reg, supply->u_volt_min, + supply->u_volt, supply->u_volt_max); if (ret) dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n", - __func__, u_volt_min, u_volt, u_volt_max, ret); + __func__, supply->u_volt_min, supply->u_volt, + supply->u_volt_max, ret); return ret; } @@ -583,8 +583,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) struct regulator *reg; struct clk *clk; unsigned long freq, old_freq; - unsigned long u_volt, u_volt_min, u_volt_max; - unsigned long old_u_volt, old_u_volt_min, old_u_volt_max; + struct dev_pm_opp_supply old_supply, new_supply; int ret; if (unlikely(!target_freq)) { @@ -634,17 +633,12 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) return ret; } - if (IS_ERR(old_opp)) { - old_u_volt = 0; - } else { - old_u_volt = old_opp->supply.u_volt; - old_u_volt_min = old_opp->supply.u_volt_min; - old_u_volt_max = old_opp->supply.u_volt_max; - } + if (IS_ERR(old_opp)) + old_supply.u_volt = 0; + else + memcpy(&old_supply, &old_opp->supply, sizeof(old_supply)); - u_volt = opp->supply.u_volt; - u_volt_min = opp->supply.u_volt_min; - u_volt_max = opp->supply.u_volt_max; + memcpy(&new_supply, &opp->supply, sizeof(new_supply)); reg = opp_table->regulator; @@ -652,8 +646,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) /* Scaling up? Scale voltage before frequency */ if (freq > old_freq) { - ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min, - u_volt_max); + ret = _set_opp_voltage(dev, reg, &new_supply); if (ret) goto restore_voltage; } @@ -672,8 +665,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) /* Scaling down? Scale voltage after frequency */ if (freq < old_freq) { - ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min, - u_volt_max); + ret = _set_opp_voltage(dev, reg, &new_supply); if (ret) goto restore_freq; } @@ -686,10 +678,8 @@ restore_freq: __func__, old_freq); restore_voltage: /* This shouldn't harm even if the voltages weren't updated earlier */ - if (old_u_volt) { - _set_opp_voltage(dev, reg, old_u_volt, old_u_volt_min, - old_u_volt_max); - } + if (old_supply.u_volt) + _set_opp_voltage(dev, reg, &old_supply); return ret; } -- cgit v1.2.3 From dfbe4678d709e25e0f36e6b6333e2a7a67aefb7e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 1 Dec 2016 16:28:19 +0530 Subject: PM / OPP: Add infrastructure to manage multiple regulators This patch adds infrastructure to manage multiple regulators and updates the only user (cpufreq-dt) of dev_pm_opp_set{put}_regulator(). This is preparatory work for adding full support for devices with multiple regulators. Signed-off-by: Viresh Kumar Tested-by: Dave Gerlach Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp/core.c | 248 +++++++++++++++++++++++++++------------ drivers/base/power/opp/debugfs.c | 52 ++++++-- drivers/base/power/opp/of.c | 103 +++++++++++----- drivers/base/power/opp/opp.h | 10 +- drivers/cpufreq/cpufreq-dt.c | 6 +- include/linux/pm_opp.h | 8 +- 6 files changed, 300 insertions(+), 127 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 46ad8470c438..b4da31c5a5eb 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -93,6 +93,8 @@ struct opp_table *_find_opp_table(struct device *dev) * Return: voltage in micro volt corresponding to the opp, else * return 0 * + * This is useful only for devices with single power supply. + * * Locking: This function must be called under rcu_read_lock(). opp is a rcu * protected pointer. This means that opp which could have been fetched by * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are @@ -112,7 +114,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) if (IS_ERR_OR_NULL(tmp_opp)) pr_err("%s: Invalid parameters\n", __func__); else - v = tmp_opp->supply.u_volt; + v = tmp_opp->supplies[0].u_volt; return v; } @@ -210,6 +212,24 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); +static int _get_regulator_count(struct device *dev) +{ + struct opp_table *opp_table; + int count; + + rcu_read_lock(); + + opp_table = _find_opp_table(dev); + if (!IS_ERR(opp_table)) + count = opp_table->regulator_count; + else + count = 0; + + rcu_read_unlock(); + + return count; +} + /** * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds * @dev: device for which we do this operation @@ -222,34 +242,51 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) { struct opp_table *opp_table; struct dev_pm_opp *opp; - struct regulator *reg; + struct regulator *reg, **regulators; unsigned long latency_ns = 0; - unsigned long min_uV = ~0, max_uV = 0; - int ret; + int ret, i, count; + struct { + unsigned long min; + unsigned long max; + } *uV; + + count = _get_regulator_count(dev); + + /* Regulator may not be required for the device */ + if (!count) + return 0; + + regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL); + if (!regulators) + return 0; + + uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); + if (!uV) + goto free_regulators; rcu_read_lock(); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { rcu_read_unlock(); - return 0; + goto free_uV; } - reg = opp_table->regulator; - if (IS_ERR(reg)) { - /* Regulator may not be required for device */ - rcu_read_unlock(); - return 0; - } + memcpy(regulators, opp_table->regulators, count * sizeof(*regulators)); - list_for_each_entry_rcu(opp, &opp_table->opp_list, node) { - if (!opp->available) - continue; + for (i = 0; i < count; i++) { + uV[i].min = ~0; + uV[i].max = 0; - if (opp->supply.u_volt_min < min_uV) - min_uV = opp->supply.u_volt_min; - if (opp->supply.u_volt_max > max_uV) - max_uV = opp->supply.u_volt_max; + list_for_each_entry_rcu(opp, &opp_table->opp_list, node) { + if (!opp->available) + continue; + + if (opp->supplies[i].u_volt_min < uV[i].min) + uV[i].min = opp->supplies[i].u_volt_min; + if (opp->supplies[i].u_volt_max > uV[i].max) + uV[i].max = opp->supplies[i].u_volt_max; + } } rcu_read_unlock(); @@ -258,9 +295,16 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) * The caller needs to ensure that opp_table (and hence the regulator) * isn't freed, while we are executing this routine. */ - ret = regulator_set_voltage_time(reg, min_uV, max_uV); - if (ret > 0) - latency_ns = ret * 1000; + for (i = 0; reg = regulators[i], i < count; i++) { + ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); + if (ret > 0) + latency_ns += ret * 1000; + } + +free_uV: + kfree(uV); +free_regulators: + kfree(regulators); return latency_ns; } @@ -580,7 +624,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { struct opp_table *opp_table; struct dev_pm_opp *old_opp, *opp; - struct regulator *reg; + struct regulator *reg = ERR_PTR(-ENXIO); struct clk *clk; unsigned long freq, old_freq; struct dev_pm_opp_supply old_supply, new_supply; @@ -633,14 +677,23 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) return ret; } + if (opp_table->regulators) { + /* This function only supports single regulator per device */ + if (WARN_ON(opp_table->regulator_count > 1)) { + dev_err(dev, "multiple regulators not supported\n"); + rcu_read_unlock(); + return -EINVAL; + } + + reg = opp_table->regulators[0]; + } + if (IS_ERR(old_opp)) old_supply.u_volt = 0; else - memcpy(&old_supply, &old_opp->supply, sizeof(old_supply)); - - memcpy(&new_supply, &opp->supply, sizeof(new_supply)); + memcpy(&old_supply, old_opp->supplies, sizeof(old_supply)); - reg = opp_table->regulator; + memcpy(&new_supply, opp->supplies, sizeof(new_supply)); rcu_read_unlock(); @@ -764,9 +817,6 @@ static struct opp_table *_add_opp_table(struct device *dev) _of_init_opp_table(opp_table, dev); - /* Set regulator to a non-NULL error value */ - opp_table->regulator = ERR_PTR(-ENXIO); - /* Find clk for the device */ opp_table->clk = clk_get(dev, NULL); if (IS_ERR(opp_table->clk)) { @@ -815,7 +865,7 @@ static void _remove_opp_table(struct opp_table *opp_table) if (opp_table->prop_name) return; - if (!IS_ERR(opp_table->regulator)) + if (opp_table->regulators) return; /* Release clk */ @@ -924,35 +974,50 @@ struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table) { struct dev_pm_opp *opp; + int count, supply_size; + struct opp_table *table; - /* allocate new OPP node */ - opp = kzalloc(sizeof(*opp), GFP_KERNEL); - if (!opp) + table = _add_opp_table(dev); + if (!table) return NULL; - INIT_LIST_HEAD(&opp->node); + /* Allocate space for at least one supply */ + count = table->regulator_count ? table->regulator_count : 1; + supply_size = sizeof(*opp->supplies) * count; - *opp_table = _add_opp_table(dev); - if (!*opp_table) { - kfree(opp); + /* allocate new OPP node and supplies structures */ + opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL); + if (!opp) { + kfree(table); return NULL; } + /* Put the supplies at the end of the OPP structure as an empty array */ + opp->supplies = (struct dev_pm_opp_supply *)(opp + 1); + INIT_LIST_HEAD(&opp->node); + + *opp_table = table; + return opp; } static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, struct opp_table *opp_table) { - struct regulator *reg = opp_table->regulator; - - if (!IS_ERR(reg) && - !regulator_is_supported_voltage(reg, opp->supply.u_volt_min, - opp->supply.u_volt_max)) { - pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n", - __func__, opp->supply.u_volt_min, - opp->supply.u_volt_max); - return false; + struct regulator *reg; + int i; + + for (i = 0; i < opp_table->regulator_count; i++) { + reg = opp_table->regulators[i]; + + if (!regulator_is_supported_voltage(reg, + opp->supplies[i].u_volt_min, + opp->supplies[i].u_volt_max)) { + pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n", + __func__, opp->supplies[i].u_volt_min, + opp->supplies[i].u_volt_max); + return false; + } } return true; @@ -984,12 +1049,13 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, /* Duplicate OPPs */ dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", - __func__, opp->rate, opp->supply.u_volt, - opp->available, new_opp->rate, new_opp->supply.u_volt, - new_opp->available); + __func__, opp->rate, opp->supplies[0].u_volt, + opp->available, new_opp->rate, + new_opp->supplies[0].u_volt, new_opp->available); + /* Should we compare voltages for all regulators here ? */ return opp->available && - new_opp->supply.u_volt == opp->supply.u_volt ? 0 : -EEXIST; + new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? 0 : -EEXIST; } new_opp->opp_table = opp_table; @@ -1056,9 +1122,9 @@ int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, /* populate the opp table */ new_opp->rate = freq; tol = u_volt * opp_table->voltage_tolerance_v1 / 100; - new_opp->supply.u_volt = u_volt; - new_opp->supply.u_volt_min = u_volt - tol; - new_opp->supply.u_volt_max = u_volt + tol; + new_opp->supplies[0].u_volt = u_volt; + new_opp->supplies[0].u_volt_min = u_volt - tol; + new_opp->supplies[0].u_volt_max = u_volt + tol; new_opp->available = true; new_opp->dynamic = dynamic; @@ -1303,12 +1369,14 @@ unlock: EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); /** - * dev_pm_opp_set_regulator() - Set regulator name for the device + * dev_pm_opp_set_regulators() - Set regulator names for the device * @dev: Device for which regulator name is being set. - * @name: Name of the regulator. + * @names: Array of pointers to the names of the regulator. + * @count: Number of regulators. * * In order to support OPP switching, OPP layer needs to know the name of the - * device's regulator, as the core would be required to switch voltages as well. + * device's regulators, as the core would be required to switch voltages as + * well. * * This must be called before any OPPs are initialized for the device. * @@ -1318,11 +1386,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. */ -struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name) +struct opp_table *dev_pm_opp_set_regulators(struct device *dev, + const char * const names[], + unsigned int count) { struct opp_table *opp_table; struct regulator *reg; - int ret; + int ret, i; mutex_lock(&opp_table_lock); @@ -1338,26 +1408,44 @@ struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name) goto err; } - /* Already have a regulator set */ - if (WARN_ON(!IS_ERR(opp_table->regulator))) { + /* Already have regulators set */ + if (WARN_ON(opp_table->regulators)) { ret = -EBUSY; goto err; } - /* Allocate the regulator */ - reg = regulator_get_optional(dev, name); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - if (ret != -EPROBE_DEFER) - dev_err(dev, "%s: no regulator (%s) found: %d\n", - __func__, name, ret); + + opp_table->regulators = kmalloc_array(count, + sizeof(*opp_table->regulators), + GFP_KERNEL); + if (!opp_table->regulators) { + ret = -ENOMEM; goto err; } - opp_table->regulator = reg; + for (i = 0; i < count; i++) { + reg = regulator_get_optional(dev, names[i]); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + if (ret != -EPROBE_DEFER) + dev_err(dev, "%s: no regulator (%s) found: %d\n", + __func__, names[i], ret); + goto free_regulators; + } + + opp_table->regulators[i] = reg; + } + + opp_table->regulator_count = count; mutex_unlock(&opp_table_lock); return opp_table; +free_regulators: + while (i != 0) + regulator_put(opp_table->regulators[--i]); + + kfree(opp_table->regulators); + opp_table->regulators = NULL; err: _remove_opp_table(opp_table); unlock: @@ -1365,11 +1453,11 @@ unlock: return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator); +EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators); /** - * dev_pm_opp_put_regulator() - Releases resources blocked for regulator - * @opp_table: OPP table returned from dev_pm_opp_set_regulator(). + * dev_pm_opp_put_regulators() - Releases resources blocked for regulator + * @opp_table: OPP table returned from dev_pm_opp_set_regulators(). * * Locking: The internal opp_table and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks @@ -1377,20 +1465,26 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator); * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. */ -void dev_pm_opp_put_regulator(struct opp_table *opp_table) +void dev_pm_opp_put_regulators(struct opp_table *opp_table) { + int i; + mutex_lock(&opp_table_lock); - if (IS_ERR(opp_table->regulator)) { - pr_err("%s: Doesn't have regulator set\n", __func__); + if (!opp_table->regulators) { + pr_err("%s: Doesn't have regulators set\n", __func__); goto unlock; } /* Make sure there are no concurrent readers while updating opp_table */ WARN_ON(!list_empty(&opp_table->opp_list)); - regulator_put(opp_table->regulator); - opp_table->regulator = ERR_PTR(-ENXIO); + for (i = opp_table->regulator_count - 1; i >= 0; i--) + regulator_put(opp_table->regulators[i]); + + kfree(opp_table->regulators); + opp_table->regulators = NULL; + opp_table->regulator_count = 0; /* Try freeing opp_table if this was the last blocking resource */ _remove_opp_table(opp_table); @@ -1398,7 +1492,7 @@ void dev_pm_opp_put_regulator(struct opp_table *opp_table) unlock: mutex_unlock(&opp_table_lock); } -EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator); +EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators); /** * dev_pm_opp_add() - Add an OPP table from a table definitions diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c index c897676ca35f..95f433db4ac7 100644 --- a/drivers/base/power/opp/debugfs.c +++ b/drivers/base/power/opp/debugfs.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "opp.h" @@ -34,6 +35,46 @@ void opp_debug_remove_one(struct dev_pm_opp *opp) debugfs_remove_recursive(opp->dentry); } +static bool opp_debug_create_supplies(struct dev_pm_opp *opp, + struct opp_table *opp_table, + struct dentry *pdentry) +{ + struct dentry *d; + int i = 0; + char *name; + + /* Always create at least supply-0 directory */ + do { + name = kasprintf(GFP_KERNEL, "supply-%d", i); + + /* Create per-opp directory */ + d = debugfs_create_dir(name, pdentry); + + kfree(name); + + if (!d) + return false; + + if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, + &opp->supplies[i].u_volt)) + return false; + + if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, + &opp->supplies[i].u_volt_min)) + return false; + + if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, + &opp->supplies[i].u_volt_max)) + return false; + + if (!debugfs_create_ulong("u_amp", S_IRUGO, d, + &opp->supplies[i].u_amp)) + return false; + } while (++i < opp_table->regulator_count); + + return true; +} + int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) { struct dentry *pdentry = opp_table->dentry; @@ -63,16 +104,7 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate)) return -ENOMEM; - if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->supply.u_volt)) - return -ENOMEM; - - if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->supply.u_volt_min)) - return -ENOMEM; - - if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->supply.u_volt_max)) - return -ENOMEM; - - if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->supply.u_amp)) + if (!opp_debug_create_supplies(opp, opp_table, d)) return -ENOMEM; if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d, diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index bdf409d42126..3f7d2591b173 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "opp.h" @@ -101,16 +102,16 @@ static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, return true; } -/* TODO: Support multiple regulators */ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, struct opp_table *opp_table) { - u32 microvolt[3] = {0}; - u32 val; - int count, ret; + u32 *microvolt, *microamp = NULL; + int supplies, vcount, icount, ret, i, j; struct property *prop = NULL; char name[NAME_MAX]; + supplies = opp_table->regulator_count ? opp_table->regulator_count : 1; + /* Search for "opp-microvolt-" */ if (opp_table->prop_name) { snprintf(name, sizeof(name), "opp-microvolt-%s", @@ -128,34 +129,29 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, return 0; } - count = of_property_count_u32_elems(opp->np, name); - if (count < 0) { + vcount = of_property_count_u32_elems(opp->np, name); + if (vcount < 0) { dev_err(dev, "%s: Invalid %s property (%d)\n", - __func__, name, count); - return count; + __func__, name, vcount); + return vcount; } - /* There can be one or three elements here */ - if (count != 1 && count != 3) { - dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n", - __func__, name, count); + /* There can be one or three elements per supply */ + if (vcount != supplies && vcount != supplies * 3) { + dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n", + __func__, name, vcount, supplies); return -EINVAL; } - ret = of_property_read_u32_array(opp->np, name, microvolt, count); + microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL); + if (!microvolt) + return -ENOMEM; + + ret = of_property_read_u32_array(opp->np, name, microvolt, vcount); if (ret) { dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret); - return -EINVAL; - } - - opp->supply.u_volt = microvolt[0]; - - if (count == 1) { - opp->supply.u_volt_min = opp->supply.u_volt; - opp->supply.u_volt_max = opp->supply.u_volt; - } else { - opp->supply.u_volt_min = microvolt[1]; - opp->supply.u_volt_max = microvolt[2]; + ret = -EINVAL; + goto free_microvolt; } /* Search for "opp-microamp-" */ @@ -172,10 +168,59 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, prop = of_find_property(opp->np, name, NULL); } - if (prop && !of_property_read_u32(opp->np, name, &val)) - opp->supply.u_amp = val; + if (prop) { + icount = of_property_count_u32_elems(opp->np, name); + if (icount < 0) { + dev_err(dev, "%s: Invalid %s property (%d)\n", __func__, + name, icount); + ret = icount; + goto free_microvolt; + } - return 0; + if (icount != supplies) { + dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n", + __func__, name, icount, supplies); + ret = -EINVAL; + goto free_microvolt; + } + + microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL); + if (!microamp) { + ret = -EINVAL; + goto free_microvolt; + } + + ret = of_property_read_u32_array(opp->np, name, microamp, + icount); + if (ret) { + dev_err(dev, "%s: error parsing %s: %d\n", __func__, + name, ret); + ret = -EINVAL; + goto free_microamp; + } + } + + for (i = 0, j = 0; i < supplies; i++) { + opp->supplies[i].u_volt = microvolt[j++]; + + if (vcount == supplies) { + opp->supplies[i].u_volt_min = opp->supplies[i].u_volt; + opp->supplies[i].u_volt_max = opp->supplies[i].u_volt; + } else { + opp->supplies[i].u_volt_min = microvolt[j++]; + opp->supplies[i].u_volt_max = microvolt[j++]; + } + + if (microamp) + opp->supplies[i].u_amp = microamp[i]; + } + +free_microamp: + kfree(microamp); +free_microvolt: + kfree(microvolt); + + return ret; } /** @@ -304,8 +349,8 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", __func__, new_opp->turbo, new_opp->rate, - new_opp->supply.u_volt, new_opp->supply.u_volt_min, - new_opp->supply.u_volt_max, new_opp->clock_latency_ns); + new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min, + new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns); /* * Notify the changes in the availability of the operable diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h index 8a02516542c2..5b0f7e53bede 100644 --- a/drivers/base/power/opp/opp.h +++ b/drivers/base/power/opp/opp.h @@ -61,7 +61,7 @@ extern struct list_head opp_tables; * @turbo: true if turbo (boost) OPP * @suspend: true if suspend OPP * @rate: Frequency in hertz - * @supply: Power supply voltage/current values + * @supplies: Power supplies voltage/current values * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's * frequency from any other OPP's frequency. * @opp_table: points back to the opp_table struct this opp belongs to @@ -80,7 +80,7 @@ struct dev_pm_opp { bool suspend; unsigned long rate; - struct dev_pm_opp_supply supply; + struct dev_pm_opp_supply *supplies; unsigned long clock_latency_ns; @@ -139,7 +139,8 @@ enum opp_table_access { * @supported_hw_count: Number of elements in supported_hw array. * @prop_name: A name to postfix to many DT properties, while parsing them. * @clk: Device's clock handle - * @regulator: Supply regulator + * @regulators: Supply regulators + * @regulator_count: Number of power supply regulators * @dentry: debugfs dentry pointer of the real device directory (not links). * @dentry_name: Name of the real dentry. * @@ -174,7 +175,8 @@ struct opp_table { unsigned int supported_hw_count; const char *prop_name; struct clk *clk; - struct regulator *regulator; + struct regulator **regulators; + unsigned int regulator_count; #ifdef CONFIG_DEBUG_FS struct dentry *dentry; diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 4d3ec92cbabf..269013311e79 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -188,7 +188,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) */ name = find_supply_name(cpu_dev); if (name) { - opp_table = dev_pm_opp_set_regulator(cpu_dev, name); + opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1); if (IS_ERR(opp_table)) { ret = PTR_ERR(opp_table); dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n", @@ -289,7 +289,7 @@ out_free_priv: out_free_opp: dev_pm_opp_of_cpumask_remove_table(policy->cpus); if (name) - dev_pm_opp_put_regulator(opp_table); + dev_pm_opp_put_regulators(opp_table); out_put_clk: clk_put(cpu_clk); @@ -304,7 +304,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy) dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); if (priv->reg_name) - dev_pm_opp_put_regulator(priv->opp_table); + dev_pm_opp_put_regulators(priv->opp_table); clk_put(policy->clk); kfree(priv); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 824f7268f687..9a825ae78653 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -79,8 +79,8 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, void dev_pm_opp_put_supported_hw(struct device *dev); int dev_pm_opp_set_prop_name(struct device *dev, const char *name); void dev_pm_opp_put_prop_name(struct device *dev); -struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name); -void dev_pm_opp_put_regulator(struct opp_table *opp_table); +struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); +void dev_pm_opp_put_regulators(struct opp_table *opp_table); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); @@ -187,12 +187,12 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) static inline void dev_pm_opp_put_prop_name(struct device *dev) {} -static inline struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name) +static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) { return ERR_PTR(-ENOTSUPP); } -static inline void dev_pm_opp_put_regulator(struct opp_table *opp_table) {} +static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { -- cgit v1.2.3 From 947355850fcb3bb6549294316667d0f53bc03082 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 1 Dec 2016 16:28:20 +0530 Subject: PM / OPP: Separate out _generic_set_opp() Later patches would add support for custom set_opp() callbacks. This patch separates out the code for _generic_set_opp() handler in order to prepare for that. Signed-off-by: Viresh Kumar Tested-by: Dave Gerlach Reviewed-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp/core.c | 181 +++++++++++++++++++++++++++++------------- drivers/base/power/opp/opp.h | 3 + include/linux/pm_opp.h | 35 ++++++++ 3 files changed, 166 insertions(+), 53 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index b4da31c5a5eb..e33198ce41b4 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -610,6 +610,69 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg, return ret; } +static inline int +_generic_set_opp_clk_only(struct device *dev, struct clk *clk, + unsigned long old_freq, unsigned long freq) +{ + int ret; + + ret = clk_set_rate(clk, freq); + if (ret) { + dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, + ret); + } + + return ret; +} + +static int _generic_set_opp(struct dev_pm_set_opp_data *data) +{ + struct dev_pm_opp_supply *old_supply = data->old_opp.supplies; + struct dev_pm_opp_supply *new_supply = data->new_opp.supplies; + unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate; + struct regulator *reg = data->regulators[0]; + struct device *dev= data->dev; + int ret; + + /* This function only supports single regulator per device */ + if (WARN_ON(data->regulator_count > 1)) { + dev_err(dev, "multiple regulators are not supported\n"); + return -EINVAL; + } + + /* Scaling up? Scale voltage before frequency */ + if (freq > old_freq) { + ret = _set_opp_voltage(dev, reg, new_supply); + if (ret) + goto restore_voltage; + } + + /* Change frequency */ + ret = _generic_set_opp_clk_only(dev, data->clk, old_freq, freq); + if (ret) + goto restore_voltage; + + /* Scaling down? Scale voltage after frequency */ + if (freq < old_freq) { + ret = _set_opp_voltage(dev, reg, new_supply); + if (ret) + goto restore_freq; + } + + return 0; + +restore_freq: + if (_generic_set_opp_clk_only(dev, data->clk, freq, old_freq)) + dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", + __func__, old_freq); +restore_voltage: + /* This shouldn't harm even if the voltages weren't updated earlier */ + if (old_supply->u_volt) + _set_opp_voltage(dev, reg, old_supply); + + return ret; +} + /** * dev_pm_opp_set_rate() - Configure new OPP based on frequency * @dev: device for which we do this operation @@ -623,12 +686,12 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg, int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { struct opp_table *opp_table; + unsigned long freq, old_freq; struct dev_pm_opp *old_opp, *opp; - struct regulator *reg = ERR_PTR(-ENXIO); + struct regulator **regulators; + struct dev_pm_set_opp_data *data; struct clk *clk; - unsigned long freq, old_freq; - struct dev_pm_opp_supply old_supply, new_supply; - int ret; + int ret, size; if (unlikely(!target_freq)) { dev_err(dev, "%s: Invalid target frequency %lu\n", __func__, @@ -677,64 +740,36 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) return ret; } - if (opp_table->regulators) { - /* This function only supports single regulator per device */ - if (WARN_ON(opp_table->regulator_count > 1)) { - dev_err(dev, "multiple regulators not supported\n"); - rcu_read_unlock(); - return -EINVAL; - } + dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__, + old_freq, freq); - reg = opp_table->regulators[0]; + regulators = opp_table->regulators; + + /* Only frequency scaling */ + if (!regulators) { + rcu_read_unlock(); + return _generic_set_opp_clk_only(dev, clk, old_freq, freq); } + data = opp_table->set_opp_data; + data->regulators = regulators; + data->regulator_count = opp_table->regulator_count; + data->clk = clk; + data->dev = dev; + + data->old_opp.rate = old_freq; + size = sizeof(*opp->supplies) * opp_table->regulator_count; if (IS_ERR(old_opp)) - old_supply.u_volt = 0; + memset(data->old_opp.supplies, 0, size); else - memcpy(&old_supply, old_opp->supplies, sizeof(old_supply)); + memcpy(data->old_opp.supplies, old_opp->supplies, size); - memcpy(&new_supply, opp->supplies, sizeof(new_supply)); + data->new_opp.rate = freq; + memcpy(data->new_opp.supplies, opp->supplies, size); rcu_read_unlock(); - /* Scaling up? Scale voltage before frequency */ - if (freq > old_freq) { - ret = _set_opp_voltage(dev, reg, &new_supply); - if (ret) - goto restore_voltage; - } - - /* Change frequency */ - - dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", - __func__, old_freq, freq); - - ret = clk_set_rate(clk, freq); - if (ret) { - dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, - ret); - goto restore_voltage; - } - - /* Scaling down? Scale voltage after frequency */ - if (freq < old_freq) { - ret = _set_opp_voltage(dev, reg, &new_supply); - if (ret) - goto restore_freq; - } - - return 0; - -restore_freq: - if (clk_set_rate(clk, old_freq)) - dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", - __func__, old_freq); -restore_voltage: - /* This shouldn't harm even if the voltages weren't updated earlier */ - if (old_supply.u_volt) - _set_opp_voltage(dev, reg, &old_supply); - - return ret; + return _generic_set_opp(data); } EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); @@ -1368,6 +1403,38 @@ unlock: } EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); +static int _allocate_set_opp_data(struct opp_table *opp_table) +{ + struct dev_pm_set_opp_data *data; + int len, count = opp_table->regulator_count; + + if (WARN_ON(!count)) + return -EINVAL; + + /* space for set_opp_data */ + len = sizeof(*data); + + /* space for old_opp.supplies and new_opp.supplies */ + len += 2 * sizeof(struct dev_pm_opp_supply) * count; + + data = kzalloc(len, GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->old_opp.supplies = (void *)(data + 1); + data->new_opp.supplies = data->old_opp.supplies + count; + + opp_table->set_opp_data = data; + + return 0; +} + +static void _free_set_opp_data(struct opp_table *opp_table) +{ + kfree(opp_table->set_opp_data); + opp_table->set_opp_data = NULL; +} + /** * dev_pm_opp_set_regulators() - Set regulator names for the device * @dev: Device for which regulator name is being set. @@ -1437,6 +1504,11 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev, opp_table->regulator_count = count; + /* Allocate block only once to pass to set_opp() routines */ + ret = _allocate_set_opp_data(opp_table); + if (ret) + goto free_regulators; + mutex_unlock(&opp_table_lock); return opp_table; @@ -1446,6 +1518,7 @@ free_regulators: kfree(opp_table->regulators); opp_table->regulators = NULL; + opp_table->regulator_count = 0; err: _remove_opp_table(opp_table); unlock: @@ -1482,6 +1555,8 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table) for (i = opp_table->regulator_count - 1; i >= 0; i--) regulator_put(opp_table->regulators[i]); + _free_set_opp_data(opp_table); + kfree(opp_table->regulators); opp_table->regulators = NULL; opp_table->regulator_count = 0; diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h index 5b0f7e53bede..a05e43912c6b 100644 --- a/drivers/base/power/opp/opp.h +++ b/drivers/base/power/opp/opp.h @@ -141,6 +141,7 @@ enum opp_table_access { * @clk: Device's clock handle * @regulators: Supply regulators * @regulator_count: Number of power supply regulators + * @set_opp_data: Data to be passed to set_opp callback * @dentry: debugfs dentry pointer of the real device directory (not links). * @dentry_name: Name of the real dentry. * @@ -178,6 +179,8 @@ struct opp_table { struct regulator **regulators; unsigned int regulator_count; + struct dev_pm_set_opp_data *set_opp_data; + #ifdef CONFIG_DEBUG_FS struct dentry *dentry; char dentry_name[NAME_MAX]; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 9a825ae78653..779b40a9287d 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -17,6 +17,8 @@ #include #include +struct clk; +struct regulator; struct dev_pm_opp; struct device; struct opp_table; @@ -41,6 +43,39 @@ struct dev_pm_opp_supply { unsigned long u_amp; }; +/** + * struct dev_pm_opp_info - OPP freq/voltage/current values + * @rate: Target clk rate in hz + * @supplies: Array of voltage/current values for all power supplies + * + * This structure stores the freq/voltage/current values for a single OPP. + */ +struct dev_pm_opp_info { + unsigned long rate; + struct dev_pm_opp_supply *supplies; +}; + +/** + * struct dev_pm_set_opp_data - Set OPP data + * @old_opp: Old OPP info + * @new_opp: New OPP info + * @regulators: Array of regulator pointers + * @regulator_count: Number of regulators + * @clk: Pointer to clk + * @dev: Pointer to the struct device + * + * This structure contains all information required for setting an OPP. + */ +struct dev_pm_set_opp_data { + struct dev_pm_opp_info old_opp; + struct dev_pm_opp_info new_opp; + + struct regulator **regulators; + unsigned int regulator_count; + struct clk *clk; + struct device *dev; +}; + #if defined(CONFIG_PM_OPP) unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); -- cgit v1.2.3 From 4dab160eb1586f67e8ba7c55ffdd2373f7a5553e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 1 Dec 2016 16:28:21 +0530 Subject: PM / OPP: Allow platform specific custom set_opp() callbacks The generic set_opp() handler isn't sufficient for platforms with complex DVFS. For example, some TI platforms have multiple regulators for a CPU device. The order in which various supplies need to be programmed is only known to the platform code and its best to leave it to it. This patch implements APIs to register platform specific set_opp() callback. Signed-off-by: Viresh Kumar Tested-by: Dave Gerlach Reviewed-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp/core.c | 114 +++++++++++++++++++++++++++++++++++++++++- drivers/base/power/opp/opp.h | 2 + include/linux/pm_opp.h | 10 ++++ 3 files changed, 125 insertions(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index e33198ce41b4..eceebef36f21 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -687,6 +687,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { struct opp_table *opp_table; unsigned long freq, old_freq; + int (*set_opp)(struct dev_pm_set_opp_data *data); struct dev_pm_opp *old_opp, *opp; struct regulator **regulators; struct dev_pm_set_opp_data *data; @@ -751,6 +752,11 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) return _generic_set_opp_clk_only(dev, clk, old_freq, freq); } + if (opp_table->set_opp) + set_opp = opp_table->set_opp; + else + set_opp = _generic_set_opp; + data = opp_table->set_opp_data; data->regulators = regulators; data->regulator_count = opp_table->regulator_count; @@ -769,7 +775,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) rcu_read_unlock(); - return _generic_set_opp(data); + return set_opp(data); } EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); @@ -903,6 +909,9 @@ static void _remove_opp_table(struct opp_table *opp_table) if (opp_table->regulators) return; + if (opp_table->set_opp) + return; + /* Release clk */ if (!IS_ERR(opp_table->clk)) clk_put(opp_table->clk); @@ -1569,6 +1578,109 @@ unlock: } EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators); +/** + * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper + * @dev: Device for which the helper is getting registered. + * @set_opp: Custom set OPP helper. + * + * This is useful to support complex platforms (like platforms with multiple + * regulators per device), instead of the generic OPP set rate helper. + * + * This must be called before any OPPs are initialized for the device. + * + * Locking: The internal opp_table and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +int dev_pm_opp_register_set_opp_helper(struct device *dev, + int (*set_opp)(struct dev_pm_set_opp_data *data)) +{ + struct opp_table *opp_table; + int ret; + + if (!set_opp) + return -EINVAL; + + mutex_lock(&opp_table_lock); + + opp_table = _add_opp_table(dev); + if (!opp_table) { + ret = -ENOMEM; + goto unlock; + } + + /* This should be called before OPPs are initialized */ + if (WARN_ON(!list_empty(&opp_table->opp_list))) { + ret = -EBUSY; + goto err; + } + + /* Already have custom set_opp helper */ + if (WARN_ON(opp_table->set_opp)) { + ret = -EBUSY; + goto err; + } + + opp_table->set_opp = set_opp; + + mutex_unlock(&opp_table_lock); + return 0; + +err: + _remove_opp_table(opp_table); +unlock: + mutex_unlock(&opp_table_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper); + +/** + * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for + * set_opp helper + * @dev: Device for which custom set_opp helper has to be cleared. + * + * Locking: The internal opp_table and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +void dev_pm_opp_register_put_opp_helper(struct device *dev) +{ + struct opp_table *opp_table; + + mutex_lock(&opp_table_lock); + + /* Check for existing table for 'dev' first */ + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + dev_err(dev, "Failed to find opp_table: %ld\n", + PTR_ERR(opp_table)); + goto unlock; + } + + if (!opp_table->set_opp) { + dev_err(dev, "%s: Doesn't have custom set_opp helper set\n", + __func__); + goto unlock; + } + + /* Make sure there are no concurrent readers while updating opp_table */ + WARN_ON(!list_empty(&opp_table->opp_list)); + + opp_table->set_opp = NULL; + + /* Try freeing opp_table if this was the last blocking resource */ + _remove_opp_table(opp_table); + +unlock: + mutex_unlock(&opp_table_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper); + /** * dev_pm_opp_add() - Add an OPP table from a table definitions * @dev: device for which we do this operation diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h index a05e43912c6b..af9f2b849a66 100644 --- a/drivers/base/power/opp/opp.h +++ b/drivers/base/power/opp/opp.h @@ -141,6 +141,7 @@ enum opp_table_access { * @clk: Device's clock handle * @regulators: Supply regulators * @regulator_count: Number of power supply regulators + * @set_opp: Platform specific set_opp callback * @set_opp_data: Data to be passed to set_opp callback * @dentry: debugfs dentry pointer of the real device directory (not links). * @dentry_name: Name of the real dentry. @@ -179,6 +180,7 @@ struct opp_table { struct regulator **regulators; unsigned int regulator_count; + int (*set_opp)(struct dev_pm_set_opp_data *data); struct dev_pm_set_opp_data *set_opp_data; #ifdef CONFIG_DEBUG_FS diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 779b40a9287d..0edd88f93904 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -116,6 +116,8 @@ int dev_pm_opp_set_prop_name(struct device *dev, const char *name); void dev_pm_opp_put_prop_name(struct device *dev); struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); void dev_pm_opp_put_regulators(struct opp_table *opp_table); +int dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); +void dev_pm_opp_register_put_opp_helper(struct device *dev); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); @@ -215,6 +217,14 @@ static inline int dev_pm_opp_set_supported_hw(struct device *dev, static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} +static inline int dev_pm_opp_register_set_opp_helper(struct device *dev, + int (*set_opp)(struct dev_pm_set_opp_data *data)) +{ + return -ENOTSUPP; +} + +static inline void dev_pm_opp_register_put_opp_helper(struct device *dev) {} + static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) { return -ENOTSUPP; -- cgit v1.2.3 From e231f8d7ed9a01280d18cd897ae0bbb4118bc954 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 1 Dec 2016 16:28:22 +0530 Subject: PM / OPP: Don't WARN on multiple calls to dev_pm_opp_set_regulators() If a platform specific OPP driver has called this routine first and set the regulators, then the second call from cpufreq-dt driver will hit the WARN_ON(). Remove the WARN_ON(), but continue to return error in such cases. Signed-off-by: Viresh Kumar Reviewed-by: Stephen Boyd Tested-by: Dave Gerlach Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index eceebef36f21..35ff06283738 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -1485,7 +1485,7 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev, } /* Already have regulators set */ - if (WARN_ON(opp_table->regulators)) { + if (opp_table->regulators) { ret = -EBUSY; goto err; } -- cgit v1.2.3 From 598da548ef78927c6e8a6baeed8072fa9db74ff1 Mon Sep 17 00:00:00 2001 From: Lina Iyer Date: Thu, 3 Nov 2016 14:54:35 -0700 Subject: PM / Domains: Fix compatible for domain idle state Re-using idle state definition provided by arm,idle-state for domain idle states creates a lot of confusion and limits further evolution of the domain idle definition. To keep things clear and simple, define a idle states for domain using a new compatible "domain-idle-state". Fix existing PM domains code to look for the newly defined compatible. Signed-off-by: Lina Iyer Reviewed-by: Ulf Hansson Reviewed-by: Sudeep Holla Signed-off-by: Rafael J. Wysocki --- .../bindings/power/domain-idle-state.txt | 33 ++++++++++++++++++++++ .../devicetree/bindings/power/power_domain.txt | 8 +++--- drivers/base/power/domain.c | 2 +- 3 files changed, 38 insertions(+), 5 deletions(-) create mode 100644 Documentation/devicetree/bindings/power/domain-idle-state.txt (limited to 'drivers/base') diff --git a/Documentation/devicetree/bindings/power/domain-idle-state.txt b/Documentation/devicetree/bindings/power/domain-idle-state.txt new file mode 100644 index 000000000000..eefc7ed22ca2 --- /dev/null +++ b/Documentation/devicetree/bindings/power/domain-idle-state.txt @@ -0,0 +1,33 @@ +PM Domain Idle State Node: + +A domain idle state node represents the state parameters that will be used to +select the state when there are no active components in the domain. + +The state node has the following parameters - + +- compatible: + Usage: Required + Value type: + Definition: Must be "domain-idle-state". + +- entry-latency-us + Usage: Required + Value type: + Definition: u32 value representing worst case latency in + microseconds required to enter the idle state. + The exit-latency-us duration may be guaranteed + only after entry-latency-us has passed. + +- exit-latency-us + Usage: Required + Value type: + Definition: u32 value representing worst case latency + in microseconds required to exit the idle state. + +- min-residency-us + Usage: Required + Value type: + Definition: u32 value representing minimum residency duration + in microseconds after which the idle state will yield + power benefits after overcoming the overhead in entering +i the idle state. diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt index e1650364b296..723e1ad937da 100644 --- a/Documentation/devicetree/bindings/power/power_domain.txt +++ b/Documentation/devicetree/bindings/power/power_domain.txt @@ -31,7 +31,7 @@ Optional properties: - domain-idle-states : A phandle of an idle-state that shall be soaked into a generic domain power state. The idle state definitions are - compatible with arm,idle-state specified in [1]. + compatible with domain-idle-state specified in [1]. The domain-idle-state property reflects the idle state of this PM domain and not the idle states of the devices or sub-domains in the PM domain. Devices and sub-domains have their own idle-states independent of the parent @@ -85,7 +85,7 @@ Example 3: }; DOMAIN_RET: state@0 { - compatible = "arm,idle-state"; + compatible = "domain-idle-state"; reg = <0x0>; entry-latency-us = <1000>; exit-latency-us = <2000>; @@ -93,7 +93,7 @@ Example 3: }; DOMAIN_PWR_DN: state@1 { - compatible = "arm,idle-state"; + compatible = "domain-idle-state"; reg = <0x1>; entry-latency-us = <5000>; exit-latency-us = <8000>; @@ -118,4 +118,4 @@ The node above defines a typical PM domain consumer device, which is located inside a PM domain with index 0 of a power controller represented by a node with the label "power". -[1]. Documentation/devicetree/bindings/arm/idle-states.txt +[1]. Documentation/devicetree/bindings/power/domain-idle-state.txt diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 46e7f6052c9b..5711708532db 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2049,7 +2049,7 @@ out: EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); static const struct of_device_id idle_state_match[] = { - { .compatible = "arm,idle-state", }, + { .compatible = "domain-idle-state", }, { } }; -- cgit v1.2.3 From bed570307ed78f21b77cb04a1df781dee4a8f05a Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 5 Dec 2016 16:38:16 -0800 Subject: PM / wakeirq: Fix dedicated wakeirq for drivers not using autosuspend I noticed some wakeirq flakeyness with consumer drivers not using autosuspend. For drivers not using autosuspend, the wakeirq may never get unmasked in rpm_suspend() because of irq desc->depth. We are configuring dedicated wakeirqs to start with IRQ_NOAUTOEN as we naturally don't want them running until rpm_suspend() is called. However, when a consumer driver initially calls pm_runtime_get(), we now wrongly start with disable_irq_nosync() call on the dedicated wakeirq that is disabled to start with. This causes desc->depth to toggle between 1 and 2 instead of the usual 0 and 1. This can prevent enable_irq() from unmasking the wakeirq as that only happens at desc->depth 1. This does not necessarily show up with drivers using autosuspend as there is time for disable_irq_nosync() before rpm_suspend() gets called after the autosuspend timeout. Let's fix the issue by adding wirq->status that lazily gets set on the first rpm_suspend(). We also need PM runtime core private functions for dev_pm_enable_wake_irq_check() and dev_pm_disable_wake_irq_check() so we can enable the dedicated wakeirq on the first rpm_suspend(). While at it, let's also fix the comments for dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq(). Those can still be used by the consumer drivers as needed because the IRQ core manages the interrupt usecount for us. Fixes: 4990d4fe327b (PM / Wakeirq: Add automated device wake IRQ handling) Signed-off-by: Tony Lindgren Signed-off-by: Rafael J. Wysocki --- drivers/base/power/power.h | 19 ++++++++++- drivers/base/power/runtime.c | 8 ++--- drivers/base/power/wakeirq.c | 76 ++++++++++++++++++++++++++++++++++++++------ 3 files changed, 88 insertions(+), 15 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 50e30e7b059d..a84332aefc2d 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -21,14 +21,22 @@ extern void pm_runtime_init(struct device *dev); extern void pm_runtime_reinit(struct device *dev); extern void pm_runtime_remove(struct device *dev); +#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0) +#define WAKE_IRQ_DEDICATED_MANAGED BIT(1) +#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \ + WAKE_IRQ_DEDICATED_MANAGED) + struct wake_irq { struct device *dev; + unsigned int status; int irq; - bool dedicated_irq:1; }; extern void dev_pm_arm_wake_irq(struct wake_irq *wirq); extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq); +extern void dev_pm_enable_wake_irq_check(struct device *dev, + bool can_change_status); +extern void dev_pm_disable_wake_irq_check(struct device *dev); #ifdef CONFIG_PM_SLEEP @@ -104,6 +112,15 @@ static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq) { } +static inline void dev_pm_enable_wake_irq_check(struct device *dev, + bool can_change_status) +{ +} + +static inline void dev_pm_disable_wake_irq_check(struct device *dev) +{ +} + #endif #ifdef CONFIG_PM_SLEEP diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index f0d863089345..26856d050037 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -516,7 +516,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) callback = RPM_GET_CALLBACK(dev, runtime_suspend); - dev_pm_enable_wake_irq(dev); + dev_pm_enable_wake_irq_check(dev, true); retval = rpm_callback(callback, dev); if (retval) goto fail; @@ -555,7 +555,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) return retval; fail: - dev_pm_disable_wake_irq(dev); + dev_pm_disable_wake_irq_check(dev); __update_runtime_status(dev, RPM_ACTIVE); dev->power.deferred_resume = false; wake_up_all(&dev->power.wait_queue); @@ -738,12 +738,12 @@ static int rpm_resume(struct device *dev, int rpmflags) callback = RPM_GET_CALLBACK(dev, runtime_resume); - dev_pm_disable_wake_irq(dev); + dev_pm_disable_wake_irq_check(dev); retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_cancel_pending(dev); - dev_pm_enable_wake_irq(dev); + dev_pm_enable_wake_irq_check(dev, false); } else { no_callback: __update_runtime_status(dev, RPM_ACTIVE); diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index 0d77cd6fd8d1..404d94c6c8bc 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -110,8 +110,10 @@ void dev_pm_clear_wake_irq(struct device *dev) dev->power.wakeirq = NULL; spin_unlock_irqrestore(&dev->power.lock, flags); - if (wirq->dedicated_irq) + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) { free_irq(wirq->irq, wirq); + wirq->status &= ~WAKE_IRQ_DEDICATED_MASK; + } kfree(wirq); } EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq); @@ -179,7 +181,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) wirq->dev = dev; wirq->irq = irq; - wirq->dedicated_irq = true; irq_set_status_flags(irq, IRQ_NOAUTOEN); /* @@ -195,6 +196,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) if (err) goto err_free_irq; + wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED; + return err; err_free_irq: @@ -210,9 +213,9 @@ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq); * dev_pm_enable_wake_irq - Enable device wake-up interrupt * @dev: Device * - * Called from the bus code or the device driver for - * runtime_suspend() to enable the wake-up interrupt while - * the device is running. + * Optionally called from the bus code or the device driver for + * runtime_resume() to override the PM runtime core managed wake-up + * interrupt handling to enable the wake-up interrupt. * * Note that for runtime_suspend()) the wake-up interrupts * should be unconditionally enabled unlike for suspend() @@ -222,7 +225,7 @@ void dev_pm_enable_wake_irq(struct device *dev) { struct wake_irq *wirq = dev->power.wakeirq; - if (wirq && wirq->dedicated_irq) + if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)) enable_irq(wirq->irq); } EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq); @@ -231,19 +234,72 @@ EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq); * dev_pm_disable_wake_irq - Disable device wake-up interrupt * @dev: Device * - * Called from the bus code or the device driver for - * runtime_resume() to disable the wake-up interrupt while - * the device is running. + * Optionally called from the bus code or the device driver for + * runtime_suspend() to override the PM runtime core managed wake-up + * interrupt handling to disable the wake-up interrupt. */ void dev_pm_disable_wake_irq(struct device *dev) { struct wake_irq *wirq = dev->power.wakeirq; - if (wirq && wirq->dedicated_irq) + if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)) disable_irq_nosync(wirq->irq); } EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq); +/** + * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt + * @dev: Device + * @can_change_status: Can change wake-up interrupt status + * + * Enables wakeirq conditionally. We need to enable wake-up interrupt + * lazily on the first rpm_suspend(). This is needed as the consumer device + * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would + * otherwise try to disable already disabled wakeirq. The wake-up interrupt + * starts disabled with IRQ_NOAUTOEN set. + * + * Should be only called from rpm_suspend() and rpm_resume() path. + * Caller must hold &dev->power.lock to change wirq->status + */ +void dev_pm_enable_wake_irq_check(struct device *dev, + bool can_change_status) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK))) + return; + + if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) { + goto enable; + } else if (can_change_status) { + wirq->status |= WAKE_IRQ_DEDICATED_MANAGED; + goto enable; + } + + return; + +enable: + enable_irq(wirq->irq); +} + +/** + * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt + * @dev: Device + * + * Disables wake-up interrupt conditionally based on status. + * Should be only called from rpm_suspend() and rpm_resume() path. + */ +void dev_pm_disable_wake_irq_check(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK))) + return; + + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) + disable_irq_nosync(wirq->irq); +} + /** * dev_pm_arm_wake_irq - Arm device wake-up * @wirq: Device wake-up interrupt -- cgit v1.2.3 From 05a926227742b0bcbef366bbd710c4f6631c7d9f Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Wed, 7 Dec 2016 20:10:32 +0530 Subject: PM / core: Fix bug in the error handling of async suspend If async_suspend is enabled for parent and child devices, then PM framework has to ensure that parent's async suspend gets called only after child's async suspend is done. In case if child's async suspend fails with error, then parent's async suspend must not be invoked. The current code uses async_error to ensure this but there is a problem with it in __device_suspend(). This function notifies the completion of child's async suspend before updating its error via async_error variable. As a result, parent's async suspend gets invoked even though it's child suspend has failed. Fix this bug by updating the async_error before notifying the child's completion. Signed-off-by: Sahitya Tummala [ rjw: Rearranged wthitespace ] Signed-off-by: Rafael J. Wysocki --- drivers/base/power/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e44944f4be77..d5a44aacba7f 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1460,10 +1460,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) dpm_watchdog_clear(&wd); Complete: - complete_all(&dev->power.completion); if (error) async_error = error; + complete_all(&dev->power.completion); TRACE_SUSPEND(error); return error; } -- cgit v1.2.3 From 9320f95c0b8f1d074f570385e6855d4554f693e4 Mon Sep 17 00:00:00 2001 From: xing wei Date: Wed, 7 Dec 2016 19:31:16 +0800 Subject: PM / sleep: Print active wakeup sources when blocking on wakeup_count reads If there are any wakeup events being processed, read operation on /sys/power/wakeup_count will be blocked, so print the names of all active wakeup sources to help to find out who is preventing system suspend from triggering. While at it change pr_info() in pm_print_active_wakeup_sources() to pr_debug() to avoid excessive log noise. Signed-off-by: xing wei [ rjw: Subject & changelog ] Signed-off-by: Rafael J. Wysocki --- drivers/base/power/wakeup.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 62e4de2aa8d1..bf9ba26981a5 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -811,7 +811,7 @@ void pm_print_active_wakeup_sources(void) rcu_read_lock(); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->active) { - pr_info("active wakeup source: %s\n", ws->name); + pr_debug("active wakeup source: %s\n", ws->name); active = 1; } else if (!active && (!last_activity_ws || @@ -822,7 +822,7 @@ void pm_print_active_wakeup_sources(void) } if (!active && last_activity_ws) - pr_info("last active wakeup source: %s\n", + pr_debug("last active wakeup source: %s\n", last_activity_ws->name); rcu_read_unlock(); } @@ -905,7 +905,7 @@ bool pm_get_wakeup_count(unsigned int *count, bool block) split_counters(&cnt, &inpr); if (inpr == 0 || signal_pending(current)) break; - + pm_print_active_wakeup_sources(); schedule(); } finish_wait(&wakeup_count_wait_queue, &wait); -- cgit v1.2.3