summaryrefslogtreecommitdiff
path: root/drivers/clk/clk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r--drivers/clk/clk.c205
1 files changed, 143 insertions, 62 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c42e608af6bb..7cf2c093cc54 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -277,6 +277,10 @@ static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
if (!d)
goto err_out;
+ if (clk->ops->debug_init)
+ if (clk->ops->debug_init(clk->hw, clk->dentry))
+ goto err_out;
+
ret = 0;
goto out;
@@ -1339,8 +1343,11 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
if (clk->notifier_count)
ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
- if (ret & NOTIFY_STOP_MASK)
+ if (ret & NOTIFY_STOP_MASK) {
+ pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
+ __func__, clk->name, ret);
goto out;
+ }
hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_speculate_rates(child, new_rate);
@@ -1588,7 +1595,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
/* notify that we are about to change rates */
fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
if (fail_clk) {
- pr_warn("%s: failed to set %s rate\n", __func__,
+ pr_debug("%s: failed to set %s rate\n", __func__,
fail_clk->name);
clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
ret = -EBUSY;
@@ -1977,9 +1984,28 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(__clk_register);
-static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
+/**
+ * clk_register - allocate a new clock, register it and return an opaque cookie
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * clk_register is the primary interface for populating the clock tree with new
+ * clock nodes. It returns a pointer to the newly allocated struct clk which
+ * cannot be dereferenced by driver code but may be used in conjuction with the
+ * rest of the clock API. In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
+ */
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
{
int i, ret;
+ struct clk *clk;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk) {
+ pr_err("%s: could not allocate clk\n", __func__);
+ ret = -ENOMEM;
+ goto fail_out;
+ }
clk->name = kstrdup(hw->init->name, GFP_KERNEL);
if (!clk->name) {
@@ -2019,7 +2045,7 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
ret = __clk_init(dev, clk);
if (!ret)
- return 0;
+ return clk;
fail_parent_names_copy:
while (--i >= 0)
@@ -2028,36 +2054,6 @@ fail_parent_names_copy:
fail_parent_names:
kfree(clk->name);
fail_name:
- return ret;
-}
-
-/**
- * clk_register - allocate a new clock, register it and return an opaque cookie
- * @dev: device that is registering this clock
- * @hw: link to hardware-specific clock data
- *
- * clk_register is the primary interface for populating the clock tree with new
- * clock nodes. It returns a pointer to the newly allocated struct clk which
- * cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API. In the event of an error clk_register will return an
- * error code; drivers must test for an error code after calling clk_register.
- */
-struct clk *clk_register(struct device *dev, struct clk_hw *hw)
-{
- int ret;
- struct clk *clk;
-
- clk = kzalloc(sizeof(*clk), GFP_KERNEL);
- if (!clk) {
- pr_err("%s: could not allocate clk\n", __func__);
- ret = -ENOMEM;
- goto fail_out;
- }
-
- ret = _clk_register(dev, hw, clk);
- if (!ret)
- return clk;
-
kfree(clk);
fail_out:
return ERR_PTR(ret);
@@ -2144,9 +2140,10 @@ void clk_unregister(struct clk *clk)
if (!hlist_empty(&clk->children)) {
struct clk *child;
+ struct hlist_node *t;
/* Reparent all children to the orphan list. */
- hlist_for_each_entry(child, &clk->children, child_node)
+ hlist_for_each_entry_safe(child, t, &clk->children, child_node)
clk_set_parent(child, NULL);
}
@@ -2166,7 +2163,7 @@ EXPORT_SYMBOL_GPL(clk_unregister);
static void devm_clk_release(struct device *dev, void *res)
{
- clk_unregister(res);
+ clk_unregister(*(struct clk **)res);
}
/**
@@ -2181,18 +2178,18 @@ static void devm_clk_release(struct device *dev, void *res)
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
{
struct clk *clk;
- int ret;
+ struct clk **clkp;
- clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
- if (!clk)
+ clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
+ if (!clkp)
return ERR_PTR(-ENOMEM);
- ret = _clk_register(dev, hw, clk);
- if (!ret) {
- devres_add(dev, clk);
+ clk = clk_register(dev, hw);
+ if (!IS_ERR(clk)) {
+ *clkp = clk;
+ devres_add(dev, clkp);
} else {
- devres_free(clk);
- clk = ERR_PTR(ret);
+ devres_free(clkp);
}
return clk;
@@ -2260,20 +2257,11 @@ void __clk_put(struct clk *clk)
* re-enter into the clk framework by calling any top-level clk APIs;
* this will cause a nested prepare_lock mutex.
*
- * Pre-change notifier callbacks will be passed the current, pre-change
- * rate of the clk via struct clk_notifier_data.old_rate. The new,
- * post-change rate of the clk is passed via struct
+ * In all notification cases cases (pre, post and abort rate change) the
+ * original clock rate is passed to the callback via struct
+ * clk_notifier_data.old_rate and the new frequency is passed via struct
* clk_notifier_data.new_rate.
*
- * Post-change notifiers will pass the now-current, post-change rate of
- * the clk in both struct clk_notifier_data.old_rate and struct
- * clk_notifier_data.new_rate.
- *
- * Abort-change notifiers are effectively the opposite of pre-change
- * notifiers: the original pre-change clk rate is passed in via struct
- * clk_notifier_data.new_rate and the failed post-change rate is passed
- * in via struct clk_notifier_data.old_rate.
- *
* clk_notifier_register() must be called from non-atomic context.
* Returns -EINVAL if called with null arguments, -ENOMEM upon
* allocation failure; otherwise, passes along the return value of
@@ -2473,7 +2461,7 @@ EXPORT_SYMBOL_GPL(of_clk_del_provider);
struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
struct of_clk_provider *provider;
- struct clk *clk = ERR_PTR(-ENOENT);
+ struct clk *clk = ERR_PTR(-EPROBE_DEFER);
/* Check if we have such a provider in our array */
list_for_each_entry(provider, &of_clk_providers, link) {
@@ -2506,8 +2494,12 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
const char *of_clk_get_parent_name(struct device_node *np, int index)
{
struct of_phandle_args clkspec;
+ struct property *prop;
const char *clk_name;
+ const __be32 *vp;
+ u32 pv;
int rc;
+ int count;
if (index < 0)
return NULL;
@@ -2517,8 +2509,22 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
if (rc)
return NULL;
+ index = clkspec.args_count ? clkspec.args[0] : 0;
+ count = 0;
+
+ /* if there is an indices property, use it to transfer the index
+ * specified into an array offset for the clock-output-names property.
+ */
+ of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
+ if (index == pv) {
+ index = count;
+ break;
+ }
+ count++;
+ }
+
if (of_property_read_string_index(clkspec.np, "clock-output-names",
- clkspec.args_count ? clkspec.args[0] : 0,
+ index,
&clk_name) < 0)
clk_name = clkspec.np->name;
@@ -2527,24 +2533,99 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
+struct clock_provider {
+ of_clk_init_cb_t clk_init_cb;
+ struct device_node *np;
+ struct list_head node;
+};
+
+static LIST_HEAD(clk_provider_list);
+
+/*
+ * This function looks for a parent clock. If there is one, then it
+ * checks that the provider for this parent clock was initialized, in
+ * this case the parent clock will be ready.
+ */
+static int parent_ready(struct device_node *np)
+{
+ int i = 0;
+
+ while (true) {
+ struct clk *clk = of_clk_get(np, i);
+
+ /* this parent is ready we can check the next one */
+ if (!IS_ERR(clk)) {
+ clk_put(clk);
+ i++;
+ continue;
+ }
+
+ /* at least one parent is not ready, we exit now */
+ if (PTR_ERR(clk) == -EPROBE_DEFER)
+ return 0;
+
+ /*
+ * Here we make assumption that the device tree is
+ * written correctly. So an error means that there is
+ * no more parent. As we didn't exit yet, then the
+ * previous parent are ready. If there is no clock
+ * parent, no need to wait for them, then we can
+ * consider their absence as being ready
+ */
+ return 1;
+ }
+}
+
/**
* of_clk_init() - Scan and init clock providers from the DT
* @matches: array of compatible values and init functions for providers.
*
- * This function scans the device tree for matching clock providers and
- * calls their initialization functions
+ * This function scans the device tree for matching clock providers
+ * and calls their initialization functions. It also does it by trying
+ * to follow the dependencies.
*/
void __init of_clk_init(const struct of_device_id *matches)
{
const struct of_device_id *match;
struct device_node *np;
+ struct clock_provider *clk_provider, *next;
+ bool is_init_done;
+ bool force = false;
if (!matches)
matches = &__clk_of_table;
+ /* First prepare the list of the clocks providers */
for_each_matching_node_and_match(np, matches, &match) {
- of_clk_init_cb_t clk_init_cb = match->data;
- clk_init_cb(np);
+ struct clock_provider *parent =
+ kzalloc(sizeof(struct clock_provider), GFP_KERNEL);
+
+ parent->clk_init_cb = match->data;
+ parent->np = np;
+ list_add_tail(&parent->node, &clk_provider_list);
+ }
+
+ while (!list_empty(&clk_provider_list)) {
+ is_init_done = false;
+ list_for_each_entry_safe(clk_provider, next,
+ &clk_provider_list, node) {
+ if (force || parent_ready(clk_provider->np)) {
+ clk_provider->clk_init_cb(clk_provider->np);
+ list_del(&clk_provider->node);
+ kfree(clk_provider);
+ is_init_done = true;
+ }
+ }
+
+ /*
+ * We didn't manage to initialize any of the
+ * remaining providers during the last loop, so now we
+ * initialize all the remaining ones unconditionally
+ * in case the clock parent was not mandatory
+ */
+ if (!is_init_done)
+ force = true;
+
}
}
#endif