diff options
author | Masahiro Yamada <yamada.masahiro@socionext.com> | 2016-04-26 09:11:13 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@armlinux.org.uk> | 2016-05-05 19:03:39 +0100 |
commit | 6427a840ff6aeaac36c59872b0b4b2040ed26c9b (patch) | |
tree | 09374292684b9d4d7fd4518854d028abd1398e7c /arch/arm | |
parent | 7274a69cd86f61602a49a3d0b64d29b465f46a15 (diff) |
ARM: 8567/1: cache-uniphier: activate ways for secondary CPUs
This outer cache allows to control active ways independently for
each CPU, but currently nothing is done for secondary CPUs. In
other words, all the ways are locked for secondary CPUs by default.
This commit fixes it to fully bring out the performance of this
outer cache.
There would be two possible ways to achieve this:
[1] Each CPU initializes active ways for itself. This can be done
via the SSCLPDAWCR register. This is a banked register, so each
CPU sees a different instance of the register for its own.
[2] The master CPU initializes active ways for all the CPUs. This
is available via SSCDAWCARMR(N) registers, where all instances
of SSCLPDAWCR are mirrored. They are mapped at the address
SSCDAWCARMR + 4 * N, where N is the CPU number.
The outer cache frame work does not support a per-CPU init callback.
So this commit adopts [2]; the master CPU iterates over possible CPUs
setting up SSCDAWCARMR(N) registers.
Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/mm/cache-uniphier.c | 26 |
1 files changed, 24 insertions, 2 deletions
diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c index a6fa7b73fbe0..c8e2f4947223 100644 --- a/arch/arm/mm/cache-uniphier.c +++ b/arch/arm/mm/cache-uniphier.c @@ -96,6 +96,7 @@ struct uniphier_cache_data { void __iomem *ctrl_base; void __iomem *rev_base; void __iomem *op_base; + void __iomem *way_ctrl_base; u32 way_present_mask; u32 way_locked_mask; u32 nsets; @@ -256,10 +257,13 @@ static void __init __uniphier_cache_set_locked_ways( struct uniphier_cache_data *data, u32 way_mask) { + unsigned int cpu; + data->way_locked_mask = way_mask & data->way_present_mask; - writel_relaxed(~data->way_locked_mask & data->way_present_mask, - data->ctrl_base + UNIPHIER_SSCLPDAWCR); + for_each_possible_cpu(cpu) + writel_relaxed(~data->way_locked_mask & data->way_present_mask, + data->way_ctrl_base + 4 * cpu); } static void uniphier_cache_maint_range(unsigned long start, unsigned long end, @@ -459,6 +463,8 @@ static int __init __uniphier_cache_init(struct device_node *np, goto err; } + data->way_ctrl_base = data->ctrl_base + 0xc00; + if (*cache_level == 2) { u32 revision = readl(data->rev_base + UNIPHIER_SSCID); /* @@ -467,6 +473,22 @@ static int __init __uniphier_cache_init(struct device_node *np, */ if (revision <= 0x16) data->range_op_max_size = (u32)1 << 22; + + /* + * Unfortunatly, the offset address of active way control base + * varies from SoC to SoC. + */ + switch (revision) { + case 0x11: /* sLD3 */ + data->way_ctrl_base = data->ctrl_base + 0x870; + break; + case 0x12: /* LD4 */ + case 0x16: /* sld8 */ + data->way_ctrl_base = data->ctrl_base + 0x840; + break; + default: + break; + } } data->range_op_max_size -= data->line_size; |