diff options
author | Colin Cross <ccross@android.com> | 2011-02-12 16:43:05 -0800 |
---|---|---|
committer | Colin Cross <ccross@android.com> | 2011-02-21 00:10:43 -0800 |
commit | 4729fd7a7dfe7847b4870801ad12222adaeb016c (patch) | |
tree | 4e91328db46cc6c5ddd91d6e75323020c4623c42 /arch/arm/mach-tegra/clock.c | |
parent | f151961173bf28047d01b410969f05e485f56d7e (diff) |
ARM: tegra: clock: Convert global lock to a lock per clock
Give each clock its own lock, and remove all lock traversals from
parent to child clocks to prevent AB-BA deadlocks.
This brings the locking in line with the common struct clk
patches and should make conversion simple.
Acked-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Colin Cross <ccross@android.com>
Diffstat (limited to 'arch/arm/mach-tegra/clock.c')
-rw-r--r-- | arch/arm/mach-tegra/clock.c | 351 |
1 files changed, 226 insertions, 125 deletions
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c index 8fd96bfb0cde..aff4c5b8c378 100644 --- a/arch/arm/mach-tegra/clock.c +++ b/arch/arm/mach-tegra/clock.c @@ -18,83 +18,117 @@ #include <linux/kernel.h> #include <linux/clk.h> -#include <linux/list.h> +#include <linux/clkdev.h> +#include <linux/debugfs.h> +#include <linux/delay.h> #include <linux/init.h> +#include <linux/list.h> #include <linux/module.h> -#include <linux/debugfs.h> -#include <linux/slab.h> +#include <linux/sched.h> #include <linux/seq_file.h> -#include <linux/clkdev.h> +#include <linux/slab.h> + +#include <mach/clk.h> #include "board.h" #include "clock.h" +/* + * Locking: + * + * Each struct clk has a spinlock. + * + * To avoid AB-BA locking problems, locks must always be traversed from child + * clock to parent clock. For example, when enabling a clock, the clock's lock + * is taken, and then clk_enable is called on the parent, which take's the + * parent clock's lock. There is one exceptions to this ordering: When dumping + * the clock tree through debugfs. In this case, clk_lock_all is called, + * which attemps to iterate through the entire list of clocks and take every + * clock lock. If any call to spin_trylock fails, all locked clocks are + * unlocked, and the process is retried. When all the locks are held, + * the only clock operation that can be called is clk_get_rate_all_locked. + * + * Within a single clock, no clock operation can call another clock operation + * on itself, except for clk_get_rate_locked and clk_set_rate_locked. Any + * clock operation can call any other clock operation on any of it's possible + * parents. + * + * An additional mutex, clock_list_lock, is used to protect the list of all + * clocks. + * + * The clock operations must lock internally to protect against + * read-modify-write on registers that are shared by multiple clocks + */ +static DEFINE_MUTEX(clock_list_lock); static LIST_HEAD(clocks); -static DEFINE_SPINLOCK(clock_lock); struct clk *tegra_get_clock_by_name(const char *name) { struct clk *c; struct clk *ret = NULL; - unsigned long flags; - spin_lock_irqsave(&clock_lock, flags); + mutex_lock(&clock_list_lock); list_for_each_entry(c, &clocks, node) { if (strcmp(c->name, name) == 0) { ret = c; break; } } - spin_unlock_irqrestore(&clock_lock, flags); + mutex_unlock(&clock_list_lock); return ret; } -static void clk_recalculate_rate(struct clk *c) +/* Must be called with c->spinlock held */ +static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p) { u64 rate; - if (!c->parent) - return; - - rate = c->parent->rate; + rate = clk_get_rate(p); if (c->mul != 0 && c->div != 0) { - rate = rate * c->mul; + rate *= c->mul; do_div(rate, c->div); } - if (rate > c->max_rate) - pr_warn("clocks: Set clock %s to rate %llu, max is %lu\n", - c->name, rate, c->max_rate); - - c->rate = rate; + return rate; } -int clk_reparent(struct clk *c, struct clk *parent) +/* Must be called with c->spinlock held */ +unsigned long clk_get_rate_locked(struct clk *c) { - c->parent = parent; - list_del(&c->sibling); - list_add_tail(&c->sibling, &parent->children); - return 0; -} + unsigned long rate; -static void propagate_rate(struct clk *c) -{ - struct clk *clkp; + if (c->parent) + rate = clk_predict_rate_from_parent(c, c->parent); + else + rate = c->rate; - list_for_each_entry(clkp, &c->children, sibling) { - clk_recalculate_rate(clkp); - propagate_rate(clkp); - } + return rate; } -void clk_init(struct clk *c) +unsigned long clk_get_rate(struct clk *c) { unsigned long flags; + unsigned long rate; - spin_lock_irqsave(&clock_lock, flags); + spin_lock_irqsave(&c->spinlock, flags); - INIT_LIST_HEAD(&c->children); - INIT_LIST_HEAD(&c->sibling); + rate = clk_get_rate_locked(c); + + spin_unlock_irqrestore(&c->spinlock, flags); + + return rate; +} +EXPORT_SYMBOL(clk_get_rate); + +int clk_reparent(struct clk *c, struct clk *parent) +{ + c->parent = parent; + return 0; +} + +void clk_init(struct clk *c) +{ + spin_lock_init(&c->spinlock); if (c->ops && c->ops->init) c->ops->init(c); @@ -108,33 +142,31 @@ void clk_init(struct clk *c) c->state = ON; } - clk_recalculate_rate(c); - + mutex_lock(&clock_list_lock); list_add(&c->node, &clocks); - - if (c->parent) - list_add_tail(&c->sibling, &c->parent->children); - - spin_unlock_irqrestore(&clock_lock, flags); + mutex_unlock(&clock_list_lock); } -int clk_enable_locked(struct clk *c) +int clk_enable(struct clk *c) { - int ret; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&c->spinlock, flags); if (c->refcnt == 0) { if (c->parent) { - ret = clk_enable_locked(c->parent); + ret = clk_enable(c->parent); if (ret) - return ret; + goto out; } if (c->ops && c->ops->enable) { ret = c->ops->enable(c); if (ret) { if (c->parent) - clk_disable_locked(c->parent); - return ret; + clk_disable(c->parent); + goto out; } c->state = ON; #ifdef CONFIG_DEBUG_FS @@ -143,27 +175,21 @@ int clk_enable_locked(struct clk *c) } } c->refcnt++; - - return 0; +out: + spin_unlock_irqrestore(&c->spinlock, flags); + return ret; } +EXPORT_SYMBOL(clk_enable); -int clk_enable(struct clk *c) +void clk_disable(struct clk *c) { - int ret; unsigned long flags; - spin_lock_irqsave(&clock_lock, flags); - ret = clk_enable_locked(c); - spin_unlock_irqrestore(&clock_lock, flags); + spin_lock_irqsave(&c->spinlock, flags); - return ret; -} -EXPORT_SYMBOL(clk_enable); - -void clk_disable_locked(struct clk *c) -{ if (c->refcnt == 0) { WARN(1, "Attempting to disable clock %s with refcnt 0", c->name); + spin_unlock_irqrestore(&c->spinlock, flags); return; } if (c->refcnt == 1) { @@ -171,49 +197,39 @@ void clk_disable_locked(struct clk *c) c->ops->disable(c); if (c->parent) - clk_disable_locked(c->parent); + clk_disable(c->parent); c->state = OFF; } c->refcnt--; -} -void clk_disable(struct clk *c) -{ - unsigned long flags; - - spin_lock_irqsave(&clock_lock, flags); - clk_disable_locked(c); - spin_unlock_irqrestore(&clock_lock, flags); + spin_unlock_irqrestore(&c->spinlock, flags); } EXPORT_SYMBOL(clk_disable); -int clk_set_parent_locked(struct clk *c, struct clk *parent) +int clk_set_parent(struct clk *c, struct clk *parent) { int ret; + unsigned long flags; + unsigned long new_rate; + unsigned long old_rate; - if (!c->ops || !c->ops->set_parent) - return -ENOSYS; + spin_lock_irqsave(&c->spinlock, flags); - ret = c->ops->set_parent(c, parent); - - if (ret) - return ret; - - clk_recalculate_rate(c); + if (!c->ops || !c->ops->set_parent) { + ret = -ENOSYS; + goto out; + } - propagate_rate(c); + new_rate = clk_predict_rate_from_parent(c, parent); + old_rate = clk_get_rate_locked(c); - return 0; -} + ret = c->ops->set_parent(c, parent); + if (ret) + goto out; -int clk_set_parent(struct clk *c, struct clk *parent) -{ - int ret; - unsigned long flags; - spin_lock_irqsave(&clock_lock, flags); - ret = clk_set_parent_locked(c, parent); - spin_unlock_irqrestore(&clock_lock, flags); +out: + spin_unlock_irqrestore(&c->spinlock, flags); return ret; } EXPORT_SYMBOL(clk_set_parent); @@ -226,62 +242,75 @@ EXPORT_SYMBOL(clk_get_parent); int clk_set_rate_locked(struct clk *c, unsigned long rate) { - int ret; - - if (rate > c->max_rate) - rate = c->max_rate; - if (!c->ops || !c->ops->set_rate) return -ENOSYS; - ret = c->ops->set_rate(c, rate); - - if (ret) - return ret; - - clk_recalculate_rate(c); - - propagate_rate(c); + if (rate > c->max_rate) + rate = c->max_rate; - return 0; + return c->ops->set_rate(c, rate); } int clk_set_rate(struct clk *c, unsigned long rate) { - int ret = 0; + int ret; unsigned long flags; - spin_lock_irqsave(&clock_lock, flags); + spin_lock_irqsave(&c->spinlock, flags); + ret = clk_set_rate_locked(c, rate); - spin_unlock_irqrestore(&clock_lock, flags); + + spin_unlock_irqrestore(&c->spinlock, flags); return ret; } EXPORT_SYMBOL(clk_set_rate); -unsigned long clk_get_rate(struct clk *c) -{ - unsigned long flags; - unsigned long ret; - spin_lock_irqsave(&clock_lock, flags); +/* Must be called with clocks lock and all indvidual clock locks held */ +unsigned long clk_get_rate_all_locked(struct clk *c) +{ + u64 rate; + int mul = 1; + int div = 1; + struct clk *p = c; + + while (p) { + c = p; + if (c->mul != 0 && c->div != 0) { + mul *= c->mul; + div *= c->div; + } + p = c->parent; + } - ret = c->rate; + rate = c->rate; + rate *= mul; + do_div(rate, div); - spin_unlock_irqrestore(&clock_lock, flags); - return ret; + return rate; } -EXPORT_SYMBOL(clk_get_rate); long clk_round_rate(struct clk *c, unsigned long rate) { - if (!c->ops || !c->ops->round_rate) - return -ENOSYS; + unsigned long flags; + long ret; + + spin_lock_irqsave(&c->spinlock, flags); + + if (!c->ops || !c->ops->round_rate) { + ret = -ENOSYS; + goto out; + } if (rate > c->max_rate) rate = c->max_rate; - return c->ops->round_rate(c, rate); + ret = c->ops->round_rate(c, rate); + +out: + spin_unlock_irqrestore(&c->spinlock, flags); + return ret; } EXPORT_SYMBOL(clk_round_rate); @@ -364,13 +393,75 @@ void __init tegra_init_clock(void) } #ifdef CONFIG_DEBUG_FS + +static int __clk_lock_all_spinlocks(void) +{ + struct clk *c; + + list_for_each_entry(c, &clocks, node) + if (!spin_trylock(&c->spinlock)) + goto unlock_spinlocks; + + return 0; + +unlock_spinlocks: + list_for_each_entry_continue_reverse(c, &clocks, node) + spin_unlock(&c->spinlock); + + return -EAGAIN; +} + +static void __clk_unlock_all_spinlocks(void) +{ + struct clk *c; + + list_for_each_entry_reverse(c, &clocks, node) + spin_unlock(&c->spinlock); +} + +/* + * This function retries until it can take all locks, and may take + * an arbitrarily long time to complete. + * Must be called with irqs enabled, returns with irqs disabled + * Must be called with clock_list_lock held + */ +static void clk_lock_all(void) +{ + int ret; +retry: + local_irq_disable(); + + ret = __clk_lock_all_spinlocks(); + if (ret) + goto failed_spinlocks; + + /* All locks taken successfully, return */ + return; + +failed_spinlocks: + local_irq_enable(); + yield(); + goto retry; +} + +/* + * Unlocks all clocks after a clk_lock_all + * Must be called with irqs disabled, returns with irqs enabled + * Must be called with clock_list_lock held + */ +static void clk_unlock_all(void) +{ + __clk_unlock_all_spinlocks(); + + local_irq_enable(); +} + static struct dentry *clk_debugfs_root; static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) { struct clk *child; - struct clk *safe; const char *state = "uninit"; char div[8] = {0}; @@ -401,8 +492,12 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) c->rate > c->max_rate ? '!' : ' ', !c->set ? '*' : ' ', 30 - level * 3, c->name, - state, c->refcnt, div, c->rate); - list_for_each_entry_safe(child, safe, &c->children, sibling) { + state, c->refcnt, div, clk_get_rate_all_locked(c)); + + list_for_each_entry(child, &clocks, node) { + if (child->parent != c) + continue; + clock_tree_show_one(s, child, level + 1); } } @@ -410,14 +505,20 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) static int clock_tree_show(struct seq_file *s, void *data) { struct clk *c; - unsigned long flags; seq_printf(s, " clock state ref div rate\n"); seq_printf(s, "--------------------------------------------------------------\n"); - spin_lock_irqsave(&clock_lock, flags); + + mutex_lock(&clock_list_lock); + + clk_lock_all(); + list_for_each_entry(c, &clocks, node) if (c->parent == NULL) clock_tree_show_one(s, c, 0); - spin_unlock_irqrestore(&clock_lock, flags); + + clk_unlock_all(); + + mutex_unlock(&clock_list_lock); return 0; } |