summaryrefslogtreecommitdiff
path: root/drivers/opp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/opp')
-rw-r--r--drivers/opp/Kconfig2
-rw-r--r--drivers/opp/core.c131
-rw-r--r--drivers/opp/debugfs.c42
-rw-r--r--drivers/opp/of.c205
-rw-r--r--drivers/opp/opp.h10
5 files changed, 361 insertions, 29 deletions
diff --git a/drivers/opp/Kconfig b/drivers/opp/Kconfig
index 35dfc7e80f92..e8ce47b32735 100644
--- a/drivers/opp/Kconfig
+++ b/drivers/opp/Kconfig
@@ -2,7 +2,7 @@
config PM_OPP
bool
select SRCU
- ---help---
+ help
SOCs have a standard set of tuples consisting of frequency and
voltage pairs that the device will support per voltage domain. This
is called Operating Performance Point or OPP. The actual definitions
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index ba43e6a3dc0a..dfbd3d10410c 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -664,7 +664,7 @@ static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
return ret;
}
-static int _generic_set_opp_regulator(const struct opp_table *opp_table,
+static int _generic_set_opp_regulator(struct opp_table *opp_table,
struct device *dev,
unsigned long old_freq,
unsigned long freq,
@@ -699,6 +699,18 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
goto restore_freq;
}
+ /*
+ * Enable the regulator after setting its voltages, otherwise it breaks
+ * some boot-enabled regulators.
+ */
+ if (unlikely(!opp_table->regulator_enabled)) {
+ ret = regulator_enable(reg);
+ if (ret < 0)
+ dev_warn(dev, "Failed to enable regulator: %d", ret);
+ else
+ opp_table->regulator_enabled = true;
+ }
+
return 0;
restore_freq:
@@ -713,6 +725,34 @@ restore_voltage:
return ret;
}
+static int _set_opp_bw(const struct opp_table *opp_table,
+ struct dev_pm_opp *opp, struct device *dev, bool remove)
+{
+ u32 avg, peak;
+ int i, ret;
+
+ if (!opp_table->paths)
+ return 0;
+
+ for (i = 0; i < opp_table->path_count; i++) {
+ if (remove) {
+ avg = 0;
+ peak = 0;
+ } else {
+ avg = opp->bandwidth[i].avg;
+ peak = opp->bandwidth[i].peak;
+ }
+ ret = icc_set_bw(opp_table->paths[i], avg, peak);
+ if (ret) {
+ dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
+ remove ? "remove" : "set", i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int _set_opp_custom(const struct opp_table *opp_table,
struct device *dev, unsigned long old_freq,
unsigned long freq,
@@ -817,13 +857,31 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
}
if (unlikely(!target_freq)) {
- if (opp_table->required_opp_tables) {
- ret = _set_required_opps(dev, opp_table, NULL);
- } else {
+ /*
+ * Some drivers need to support cases where some platforms may
+ * have OPP table for the device, while others don't and
+ * opp_set_rate() just needs to behave like clk_set_rate().
+ */
+ if (!_get_opp_count(opp_table))
+ return 0;
+
+ if (!opp_table->required_opp_tables && !opp_table->regulators &&
+ !opp_table->paths) {
dev_err(dev, "target frequency can't be 0\n");
ret = -EINVAL;
+ goto put_opp_table;
+ }
+
+ ret = _set_opp_bw(opp_table, NULL, dev, true);
+ if (ret)
+ return ret;
+
+ if (opp_table->regulator_enabled) {
+ regulator_disable(opp_table->regulators[0]);
+ opp_table->regulator_enabled = false;
}
+ ret = _set_required_opps(dev, opp_table, NULL);
goto put_opp_table;
}
@@ -849,6 +907,18 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
goto put_opp_table;
}
+ /*
+ * For IO devices which require an OPP on some platforms/SoCs
+ * while just needing to scale the clock on some others
+ * we look for empty OPP tables with just a clock handle and
+ * scale only the clk. This makes dev_pm_opp_set_rate()
+ * equivalent to a clk_set_rate()
+ */
+ if (!_get_opp_count(opp_table)) {
+ ret = _generic_set_opp_clk_only(dev, clk, freq);
+ goto put_opp_table;
+ }
+
temp_freq = old_freq;
old_opp = _find_freq_ceil(opp_table, &temp_freq);
if (IS_ERR(old_opp)) {
@@ -895,6 +965,9 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
dev_err(dev, "Failed to set required opps: %d\n", ret);
}
+ if (!ret)
+ ret = _set_opp_bw(opp_table, opp, dev, false);
+
put_opp:
dev_pm_opp_put(opp);
put_old_opp:
@@ -985,6 +1058,12 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
ret);
}
+ /* Find interconnect path(s) for the device */
+ ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
+ if (ret)
+ dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
+ __func__, ret);
+
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
INIT_LIST_HEAD(&opp_table->opp_list);
kref_init(&opp_table->kref);
@@ -1043,6 +1122,7 @@ static void _opp_table_kref_release(struct kref *kref)
{
struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
struct opp_device *opp_dev, *temp;
+ int i;
_of_clear_opp_table(opp_table);
@@ -1050,6 +1130,12 @@ static void _opp_table_kref_release(struct kref *kref)
if (!IS_ERR(opp_table->clk))
clk_put(opp_table->clk);
+ if (opp_table->paths) {
+ for (i = 0; i < opp_table->path_count; i++)
+ icc_put(opp_table->paths[i]);
+ kfree(opp_table->paths);
+ }
+
WARN_ON(!list_empty(&opp_table->opp_list));
list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
@@ -1229,19 +1315,23 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
struct dev_pm_opp *_opp_allocate(struct opp_table *table)
{
struct dev_pm_opp *opp;
- int count, supply_size;
+ int supply_count, supply_size, icc_size;
/* Allocate space for at least one supply */
- count = table->regulator_count > 0 ? table->regulator_count : 1;
- supply_size = sizeof(*opp->supplies) * count;
+ supply_count = table->regulator_count > 0 ? table->regulator_count : 1;
+ supply_size = sizeof(*opp->supplies) * supply_count;
+ icc_size = sizeof(*opp->bandwidth) * table->path_count;
/* allocate new OPP node and supplies structures */
- opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
+ opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL);
+
if (!opp)
return NULL;
/* Put the supplies at the end of the OPP structure as an empty array */
opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
+ if (icc_size)
+ opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count);
INIT_LIST_HEAD(&opp->node);
return opp;
@@ -1272,11 +1362,24 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
return true;
}
+int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
+{
+ if (opp1->rate != opp2->rate)
+ return opp1->rate < opp2->rate ? -1 : 1;
+ if (opp1->bandwidth && opp2->bandwidth &&
+ opp1->bandwidth[0].peak != opp2->bandwidth[0].peak)
+ return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1;
+ if (opp1->level != opp2->level)
+ return opp1->level < opp2->level ? -1 : 1;
+ return 0;
+}
+
static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
struct opp_table *opp_table,
struct list_head **head)
{
struct dev_pm_opp *opp;
+ int opp_cmp;
/*
* Insert new OPP in order of increasing frequency and discard if
@@ -1287,12 +1390,13 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
* loop.
*/
list_for_each_entry(opp, &opp_table->opp_list, node) {
- if (new_opp->rate > opp->rate) {
+ opp_cmp = _opp_compare_key(new_opp, opp);
+ if (opp_cmp > 0) {
*head = &opp->node;
continue;
}
- if (new_opp->rate < opp->rate)
+ if (opp_cmp < 0)
return 0;
/* Duplicate OPPs */
@@ -1656,6 +1760,13 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
+ if (opp_table->regulator_enabled) {
+ for (i = opp_table->regulator_count - 1; i >= 0; i--)
+ regulator_disable(opp_table->regulators[i]);
+
+ opp_table->regulator_enabled = false;
+ }
+
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_put(opp_table->regulators[i]);
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 609665e339b6..596c185b5dda 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -32,6 +32,47 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
debugfs_remove_recursive(opp->dentry);
}
+static ssize_t bw_name_read(struct file *fp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct icc_path *path = fp->private_data;
+ char buf[64];
+ int i;
+
+ i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path));
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, i);
+}
+
+static const struct file_operations bw_name_fops = {
+ .open = simple_open,
+ .read = bw_name_read,
+ .llseek = default_llseek,
+};
+
+static void opp_debug_create_bw(struct dev_pm_opp *opp,
+ struct opp_table *opp_table,
+ struct dentry *pdentry)
+{
+ struct dentry *d;
+ char name[11];
+ int i;
+
+ for (i = 0; i < opp_table->path_count; i++) {
+ snprintf(name, sizeof(name), "icc-path-%.1d", i);
+
+ /* Create per-path directory */
+ d = debugfs_create_dir(name, pdentry);
+
+ debugfs_create_file("name", S_IRUGO, d, opp_table->paths[i],
+ &bw_name_fops);
+ debugfs_create_u32("peak_bw", S_IRUGO, d,
+ &opp->bandwidth[i].peak);
+ debugfs_create_u32("avg_bw", S_IRUGO, d,
+ &opp->bandwidth[i].avg);
+ }
+}
+
static void opp_debug_create_supplies(struct dev_pm_opp *opp,
struct opp_table *opp_table,
struct dentry *pdentry)
@@ -94,6 +135,7 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
&opp->clock_latency_ns);
opp_debug_create_supplies(opp, opp_table, d);
+ opp_debug_create_bw(opp, opp_table, d);
opp->dentry = d;
}
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 9cd8f0adacae..9a5873591a40 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -332,6 +332,105 @@ free_required_opps:
return ret;
}
+static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
+{
+ struct device_node *np, *opp_np;
+ struct property *prop;
+
+ if (!opp_table) {
+ np = of_node_get(dev->of_node);
+ if (!np)
+ return -ENODEV;
+
+ opp_np = _opp_of_get_opp_desc_node(np, 0);
+ of_node_put(np);
+ } else {
+ opp_np = of_node_get(opp_table->np);
+ }
+
+ /* Lets not fail in case we are parsing opp-v1 bindings */
+ if (!opp_np)
+ return 0;
+
+ /* Checking only first OPP is sufficient */
+ np = of_get_next_available_child(opp_np, NULL);
+ if (!np) {
+ dev_err(dev, "OPP table empty\n");
+ return -EINVAL;
+ }
+ of_node_put(opp_np);
+
+ prop = of_find_property(np, "opp-peak-kBps", NULL);
+ of_node_put(np);
+
+ if (!prop || !prop->length)
+ return 0;
+
+ return 1;
+}
+
+int dev_pm_opp_of_find_icc_paths(struct device *dev,
+ struct opp_table *opp_table)
+{
+ struct device_node *np;
+ int ret, i, count, num_paths;
+ struct icc_path **paths;
+
+ ret = _bandwidth_supported(dev, opp_table);
+ if (ret <= 0)
+ return ret;
+
+ ret = 0;
+
+ np = of_node_get(dev->of_node);
+ if (!np)
+ return 0;
+
+ count = of_count_phandle_with_args(np, "interconnects",
+ "#interconnect-cells");
+ of_node_put(np);
+ if (count < 0)
+ return 0;
+
+ /* two phandles when #interconnect-cells = <1> */
+ if (count % 2) {
+ dev_err(dev, "%s: Invalid interconnects values\n", __func__);
+ return -EINVAL;
+ }
+
+ num_paths = count / 2;
+ paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL);
+ if (!paths)
+ return -ENOMEM;
+
+ for (i = 0; i < num_paths; i++) {
+ paths[i] = of_icc_get_by_index(dev, i);
+ if (IS_ERR(paths[i])) {
+ ret = PTR_ERR(paths[i]);
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "%s: Unable to get path%d: %d\n",
+ __func__, i, ret);
+ }
+ goto err;
+ }
+ }
+
+ if (opp_table) {
+ opp_table->paths = paths;
+ opp_table->path_count = num_paths;
+ return 0;
+ }
+
+err:
+ while (i--)
+ icc_put(paths[i]);
+
+ kfree(paths);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths);
+
static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
struct device_node *np)
{
@@ -521,6 +620,90 @@ void dev_pm_opp_of_remove_table(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
+static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
+ struct device_node *np, bool peak)
+{
+ const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
+ struct property *prop;
+ int i, count, ret;
+ u32 *bw;
+
+ prop = of_find_property(np, name, NULL);
+ if (!prop)
+ return -ENODEV;
+
+ count = prop->length / sizeof(u32);
+ if (table->path_count != count) {
+ pr_err("%s: Mismatch between %s and paths (%d %d)\n",
+ __func__, name, count, table->path_count);
+ return -EINVAL;
+ }
+
+ bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL);
+ if (!bw)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(np, name, bw, count);
+ if (ret) {
+ pr_err("%s: Error parsing %s: %d\n", __func__, name, ret);
+ goto out;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (peak)
+ new_opp->bandwidth[i].peak = kBps_to_icc(bw[i]);
+ else
+ new_opp->bandwidth[i].avg = kBps_to_icc(bw[i]);
+ }
+
+out:
+ kfree(bw);
+ return ret;
+}
+
+static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table,
+ struct device_node *np, bool *rate_not_available)
+{
+ bool found = false;
+ u64 rate;
+ int ret;
+
+ ret = of_property_read_u64(np, "opp-hz", &rate);
+ if (!ret) {
+ /*
+ * Rate is defined as an unsigned long in clk API, and so
+ * casting explicitly to its type. Must be fixed once rate is 64
+ * bit guaranteed in clk API.
+ */
+ new_opp->rate = (unsigned long)rate;
+ found = true;
+ }
+ *rate_not_available = !!ret;
+
+ /*
+ * Bandwidth consists of peak and average (optional) values:
+ * opp-peak-kBps = <path1_value path2_value>;
+ * opp-avg-kBps = <path1_value path2_value>;
+ */
+ ret = _read_bw(new_opp, table, np, true);
+ if (!ret) {
+ found = true;
+ ret = _read_bw(new_opp, table, np, false);
+ }
+
+ /* The properties were found but we failed to parse them */
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ if (!of_property_read_u32(np, "opp-level", &new_opp->level))
+ found = true;
+
+ if (found)
+ return 0;
+
+ return ret;
+}
+
/**
* _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
* @opp_table: OPP table
@@ -558,26 +741,12 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
if (!new_opp)
return ERR_PTR(-ENOMEM);
- ret = of_property_read_u64(np, "opp-hz", &rate);
- if (ret < 0) {
- /* "opp-hz" is optional for devices like power domains. */
- if (!opp_table->is_genpd) {
- dev_err(dev, "%s: opp-hz not found\n", __func__);
- goto free_opp;
- }
-
- rate_not_available = true;
- } else {
- /*
- * Rate is defined as an unsigned long in clk API, and so
- * casting explicitly to its type. Must be fixed once rate is 64
- * bit guaranteed in clk API.
- */
- new_opp->rate = (unsigned long)rate;
+ ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available);
+ if (ret < 0 && !opp_table->is_genpd) {
+ dev_err(dev, "%s: opp key field not found\n", __func__);
+ goto free_opp;
}
- of_property_read_u32(np, "opp-level", &new_opp->level);
-
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index d14e27102730..e51646ff279e 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -12,6 +12,7 @@
#define __DRIVER_OPP_H__
#include <linux/device.h>
+#include <linux/interconnect.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
@@ -59,6 +60,7 @@ extern struct list_head opp_tables;
* @rate: Frequency in hertz
* @level: Performance level
* @supplies: Power supplies voltage/current values
+ * @bandwidth: Interconnect bandwidth values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
* @required_opps: List of OPPs that are required by this OPP.
@@ -81,6 +83,7 @@ struct dev_pm_opp {
unsigned int level;
struct dev_pm_opp_supply *supplies;
+ struct dev_pm_opp_icc_bw *bandwidth;
unsigned long clock_latency_ns;
@@ -144,8 +147,11 @@ enum opp_table_access {
* @clk: Device's clock handle
* @regulators: Supply regulators
* @regulator_count: Number of power supply regulators. Its value can be -1
+ * @regulator_enabled: Set to true if regulators were previously enabled.
* (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt
* property).
+ * @paths: Interconnect path handles
+ * @path_count: Number of interconnect paths
* @genpd_performance_state: Device's power domain support performance state.
* @is_genpd: Marks if the OPP table belongs to a genpd.
* @set_opp: Platform specific set_opp callback
@@ -189,6 +195,9 @@ struct opp_table {
struct clk *clk;
struct regulator **regulators;
int regulator_count;
+ bool regulator_enabled;
+ struct icc_path **paths;
+ unsigned int path_count;
bool genpd_performance_state;
bool is_genpd;
@@ -211,6 +220,7 @@ struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_
void _dev_pm_opp_find_and_remove_table(struct device *dev);
struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
void _opp_free(struct dev_pm_opp *opp);
+int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);