diff options
author | Suzuki K Poulose <suzuki.poulose@arm.com> | 2016-09-09 14:07:10 +0100 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2016-09-09 15:03:28 +0100 |
commit | c47a1900ad710fd2c97127e2ba19da1df79cf733 (patch) | |
tree | 280ee1f75639c56aca3f82abd3065f94b740c68f /arch/arm64 | |
parent | 89ba26458b72422e0a1d85eb729a15220b204458 (diff) |
arm64: Rearrange CPU errata workaround checks
Right now we run through the work around checks on a CPU
from __cpuinfo_store_cpu. There are some problems with that:
1) We initialise the system wide CPU feature registers only after the
Boot CPU updates its cpuinfo. Now, if a work around depends on the
variance of a CPU ID feature (e.g, check for Cache Line size mismatch),
we have no way of performing it cleanly for the boot CPU.
2) It is out of place, invoked from __cpuinfo_store_cpu() in cpuinfo.c. It
is not an obvious place for that.
This patch rearranges the CPU specific capability(aka work around) checks.
1) At the moment we use verify_local_cpu_capabilities() to check if a new
CPU has all the system advertised features. Use this for the secondary CPUs
to perform the work around check. For that we rename
verify_local_cpu_capabilities() => check_local_cpu_capabilities()
which:
If the system wide capabilities haven't been initialised (i.e, the CPU
is activated at the boot), update the system wide detected work arounds.
Otherwise (i.e a CPU hotplugged in later) verify that this CPU conforms to the
system wide capabilities.
2) Boot CPU updates the work arounds from smp_prepare_boot_cpu() after we have
initialised the system wide CPU feature values.
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Andre Przywara <andre.przywara@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 30 | ||||
-rw-r--r-- | arch/arm64/kernel/cpuinfo.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 8 |
4 files changed, 29 insertions, 15 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index ddea66642ce1..6806b86ab791 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -202,11 +202,11 @@ void __init setup_cpu_features(void); void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, const char *info); void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps); +void check_local_cpu_capabilities(void); + void update_cpu_errata_workarounds(void); void __init enable_errata_workarounds(void); - void verify_local_cpu_errata_workarounds(void); -void verify_local_cpu_capabilities(void); u64 read_system_reg(u32 id); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c74b8215991f..d577f263cc4a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1009,23 +1009,33 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps) * cannot do anything to fix it up and could cause unexpected failures. So * we park the CPU. */ -void verify_local_cpu_capabilities(void) +static void verify_local_cpu_capabilities(void) { + verify_local_cpu_errata_workarounds(); + verify_local_cpu_features(arm64_features); + verify_local_elf_hwcaps(arm64_elf_hwcaps); + if (system_supports_32bit_el0()) + verify_local_elf_hwcaps(compat_elf_hwcaps); +} +void check_local_cpu_capabilities(void) +{ + /* + * All secondary CPUs should conform to the early CPU features + * in use by the kernel based on boot CPU. + */ check_early_cpu_features(); /* - * If we haven't computed the system capabilities, there is nothing - * to verify. + * If we haven't finalised the system capabilities, this CPU gets + * a chance to update the errata work arounds. + * Otherwise, this CPU should verify that it has all the system + * advertised capabilities. */ if (!sys_caps_initialised) - return; - - verify_local_cpu_errata_workarounds(); - verify_local_cpu_features(arm64_features); - verify_local_elf_hwcaps(arm64_elf_hwcaps); - if (system_supports_32bit_el0()) - verify_local_elf_hwcaps(compat_elf_hwcaps); + update_cpu_errata_workarounds(); + else + verify_local_cpu_capabilities(); } static void __init setup_feature_capabilities(void) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 4fa7b73e27e4..b3d5b3e8fbcb 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -363,8 +363,6 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) } cpuinfo_detect_icache_policy(info); - - update_cpu_errata_workarounds(); } void cpuinfo_store_cpu(void) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 8b048e6ec34a..a8e64095cd69 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -239,7 +239,7 @@ asmlinkage void secondary_start_kernel(void) * this CPU ticks all of those. If it doesn't, the CPU will * fail to come online. */ - verify_local_cpu_capabilities(); + check_local_cpu_capabilities(); if (cpu_ops[cpu]->cpu_postboot) cpu_ops[cpu]->cpu_postboot(); @@ -444,6 +444,12 @@ void __init smp_prepare_boot_cpu(void) jump_label_init(); cpuinfo_store_boot_cpu(); save_boot_cpu_run_el(); + /* + * Run the errata work around checks on the boot CPU, once we have + * initialised the cpu feature infrastructure from + * cpuinfo_store_boot_cpu() above. + */ + update_cpu_errata_workarounds(); } static u64 __init of_get_cpu_mpidr(struct device_node *dn) |