diff options
Diffstat (limited to 'arch/arm64/kernel/smp.c')
-rw-r--r-- | arch/arm64/kernel/smp.c | 250 |
1 files changed, 187 insertions, 63 deletions
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 2cb008177252..50fb4696654e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -17,6 +17,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/acpi.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> @@ -248,20 +249,20 @@ static int op_cpu_kill(unsigned int cpu) * time and hope that it's dead, so let's skip the wait and just hope. */ if (!cpu_ops[cpu]->cpu_kill) - return 1; + return 0; return cpu_ops[cpu]->cpu_kill(cpu); } -static DECLARE_COMPLETION(cpu_died); - /* * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ void __cpu_die(unsigned int cpu) { - if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { + int err; + + if (!cpu_wait_death(cpu, 5)) { pr_crit("CPU%u: cpu didn't die\n", cpu); return; } @@ -273,8 +274,10 @@ void __cpu_die(unsigned int cpu) * verify that it has really left the kernel before we consider * clobbering anything it might still be using. */ - if (!op_cpu_kill(cpu)) - pr_warn("CPU%d may not have shut down cleanly\n", cpu); + err = op_cpu_kill(cpu); + if (err) + pr_warn("CPU%d may not have shut down cleanly: %d\n", + cpu, err); } /* @@ -294,7 +297,7 @@ void cpu_die(void) local_irq_disable(); /* Tell __cpu_die() that this CPU is now safe to dispose of */ - complete(&cpu_died); + (void)cpu_report_death(); /* * Actually shutdown the CPU. This must never fail. The specific hotplug @@ -318,57 +321,158 @@ void __init smp_prepare_boot_cpu(void) set_my_cpu_offset(per_cpu_offset(smp_processor_id())); } +static u64 __init of_get_cpu_mpidr(struct device_node *dn) +{ + const __be32 *cell; + u64 hwid; + + /* + * A cpu node with missing "reg" property is + * considered invalid to build a cpu_logical_map + * entry. + */ + cell = of_get_property(dn, "reg", NULL); + if (!cell) { + pr_err("%s: missing reg property\n", dn->full_name); + return INVALID_HWID; + } + + hwid = of_read_number(cell, of_n_addr_cells(dn)); + /* + * Non affinity bits must be set to 0 in the DT + */ + if (hwid & ~MPIDR_HWID_BITMASK) { + pr_err("%s: invalid reg property\n", dn->full_name); + return INVALID_HWID; + } + return hwid; +} + +/* + * Duplicate MPIDRs are a recipe for disaster. Scan all initialized + * entries and check for duplicates. If any is found just ignore the + * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid + * matching valid MPIDR values. + */ +static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) +{ + unsigned int i; + + for (i = 1; (i < cpu) && (i < NR_CPUS); i++) + if (cpu_logical_map(i) == hwid) + return true; + return false; +} + +/* + * Initialize cpu operations for a logical cpu and + * set it in the possible mask on success + */ +static int __init smp_cpu_setup(int cpu) +{ + if (cpu_read_ops(cpu)) + return -ENODEV; + + if (cpu_ops[cpu]->cpu_init(cpu)) + return -ENODEV; + + set_cpu_possible(cpu, true); + + return 0; +} + +static bool bootcpu_valid __initdata; +static unsigned int cpu_count = 1; + +#ifdef CONFIG_ACPI +/* + * acpi_map_gic_cpu_interface - parse processor MADT entry + * + * Carry out sanity checks on MADT processor entry and initialize + * cpu_logical_map on success + */ +static void __init +acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) +{ + u64 hwid = processor->arm_mpidr; + + if (!(processor->flags & ACPI_MADT_ENABLED)) { + pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); + return; + } + + if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { + pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); + return; + } + + if (is_mpidr_duplicate(cpu_count, hwid)) { + pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); + return; + } + + /* Check if GICC structure of boot CPU is available in the MADT */ + if (cpu_logical_map(0) == hwid) { + if (bootcpu_valid) { + pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", + hwid); + return; + } + bootcpu_valid = true; + return; + } + + if (cpu_count >= NR_CPUS) + return; + + /* map the logical cpu id to cpu MPIDR */ + cpu_logical_map(cpu_count) = hwid; + + cpu_count++; +} + +static int __init +acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *processor; + + processor = (struct acpi_madt_generic_interrupt *)header; + if (BAD_MADT_GICC_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + acpi_map_gic_cpu_interface(processor); + + return 0; +} +#else +#define acpi_table_parse_madt(...) do { } while (0) +#endif + /* * Enumerate the possible CPU set from the device tree and build the * cpu logical map array containing MPIDR values related to logical * cpus. Assumes that cpu_logical_map(0) has already been initialized. */ -void __init of_smp_init_cpus(void) +void __init of_parse_and_init_cpus(void) { struct device_node *dn = NULL; - unsigned int i, cpu = 1; - bool bootcpu_valid = false; while ((dn = of_find_node_by_type(dn, "cpu"))) { - const u32 *cell; - u64 hwid; + u64 hwid = of_get_cpu_mpidr(dn); - /* - * A cpu node with missing "reg" property is - * considered invalid to build a cpu_logical_map - * entry. - */ - cell = of_get_property(dn, "reg", NULL); - if (!cell) { - pr_err("%s: missing reg property\n", dn->full_name); + if (hwid == INVALID_HWID) goto next; - } - hwid = of_read_number(cell, of_n_addr_cells(dn)); - /* - * Non affinity bits must be set to 0 in the DT - */ - if (hwid & ~MPIDR_HWID_BITMASK) { - pr_err("%s: invalid reg property\n", dn->full_name); + if (is_mpidr_duplicate(cpu_count, hwid)) { + pr_err("%s: duplicate cpu reg properties in the DT\n", + dn->full_name); goto next; } /* - * Duplicate MPIDRs are a recipe for disaster. Scan - * all initialized entries and check for - * duplicates. If any is found just ignore the cpu. - * cpu_logical_map was initialized to INVALID_HWID to - * avoid matching valid MPIDR values. - */ - for (i = 1; (i < cpu) && (i < NR_CPUS); i++) { - if (cpu_logical_map(i) == hwid) { - pr_err("%s: duplicate cpu reg properties in the DT\n", - dn->full_name); - goto next; - } - } - - /* * The numbering scheme requires that the boot CPU * must be assigned logical id 0. Record it so that * the logical map built from DT is validated and can @@ -392,38 +496,58 @@ void __init of_smp_init_cpus(void) continue; } - if (cpu >= NR_CPUS) - goto next; - - if (cpu_read_ops(dn, cpu) != 0) - goto next; - - if (cpu_ops[cpu]->cpu_init(dn, cpu)) + if (cpu_count >= NR_CPUS) goto next; pr_debug("cpu logical map 0x%llx\n", hwid); - cpu_logical_map(cpu) = hwid; + cpu_logical_map(cpu_count) = hwid; next: - cpu++; + cpu_count++; } +} + +/* + * Enumerate the possible CPU set from the device tree or ACPI and build the + * cpu logical map array containing MPIDR values related to logical + * cpus. Assumes that cpu_logical_map(0) has already been initialized. + */ +void __init smp_init_cpus(void) +{ + int i; - /* sanity check */ - if (cpu > NR_CPUS) - pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n", - cpu, NR_CPUS); + if (acpi_disabled) + of_parse_and_init_cpus(); + else + /* + * do a walk of MADT to determine how many CPUs + * we have including disabled CPUs, and get information + * we need for SMP init + */ + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + acpi_parse_gic_cpu_interface, 0); + + if (cpu_count > NR_CPUS) + pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", + cpu_count, NR_CPUS); if (!bootcpu_valid) { - pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n"); + pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); return; } /* - * All the cpus that made it to the cpu_logical_map have been - * validated so set them as possible cpus. + * We need to set the cpu_logical_map entries before enabling + * the cpus so that cpu processor description entries (DT cpu nodes + * and ACPI MADT entries) can be retrieved by matching the cpu hwid + * with entries in cpu_logical_map while initializing the cpus. + * If the cpu set-up fails, invalidate the cpu_logical_map entry. */ - for (i = 0; i < NR_CPUS; i++) - if (cpu_logical_map(i) != INVALID_HWID) - set_cpu_possible(i, true); + for (i = 1; i < NR_CPUS; i++) { + if (cpu_logical_map(i) != INVALID_HWID) { + if (smp_cpu_setup(i)) + cpu_logical_map(i) = INVALID_HWID; + } + } } void __init smp_prepare_cpus(unsigned int max_cpus) @@ -569,7 +693,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) struct pt_regs *old_regs = set_irq_regs(regs); if ((unsigned)ipinr < NR_IPI) { - trace_ipi_entry(ipi_types[ipinr]); + trace_ipi_entry_rcuidle(ipi_types[ipinr]); __inc_irq_stat(cpu, ipi_irqs[ipinr]); } @@ -612,7 +736,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) } if ((unsigned)ipinr < NR_IPI) - trace_ipi_exit(ipi_types[ipinr]); + trace_ipi_exit_rcuidle(ipi_types[ipinr]); set_irq_regs(old_regs); } |