diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2017-04-12 10:41:13 +0100 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2017-04-12 10:41:50 +0100 |
commit | 494bc3cd3dd02e259d5db9372754e993e4a21902 (patch) | |
tree | b3feaba8a9d1e4f068870288c3a87a6cfa9c51c8 /arch/arm64/kernel | |
parent | d91750f12c79101028cb93dc35eed6989fae4405 (diff) | |
parent | f00fa5f4163b40c3ec8590d9a7bd845c19bf8d16 (diff) |
Merge branch 'will/for-next/perf' into for-next/core
* will/for-next/perf:
arm64: pmuv3: use arm_pmu ACPI framework
arm64: pmuv3: handle !PMUv3 when probing
drivers/perf: arm_pmu: add ACPI framework
arm64: add function to get a cpu's MADT GICC table
drivers/perf: arm_pmu: split out platform device probe logic
drivers/perf: arm_pmu: move irq request/free into probe
drivers/perf: arm_pmu: split cpu-local irq request/free
drivers/perf: arm_pmu: rename irq request/free functions
drivers/perf: arm_pmu: handle no platform_device
drivers/perf: arm_pmu: simplify cpu_pmu_request_irqs()
drivers/perf: arm_pmu: factor out pmu registration
drivers/perf: arm_pmu: fold init into alloc
drivers/perf: arm_pmu: define armpmu_init_fn
drivers/perf: arm_pmu: remove pointless PMU disabling
perf: qcom: Add L3 cache PMU driver
drivers/perf: arm_pmu: split irq request from enable
drivers/perf: arm_pmu: manage interrupts per-cpu
drivers/perf: arm_pmu: rework per-cpu allocation
MAINTAINERS: Add file patterns for perf device tree bindings
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/perf_event.c | 113 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 10 |
2 files changed, 90 insertions, 33 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 57ae9d9ed9bb..98c749394c4b 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -957,11 +957,26 @@ static int armv8_vulcan_map_event(struct perf_event *event) ARMV8_PMU_EVTYPE_EVENT); } +struct armv8pmu_probe_info { + struct arm_pmu *pmu; + bool present; +}; + static void __armv8pmu_probe_pmu(void *info) { - struct arm_pmu *cpu_pmu = info; + struct armv8pmu_probe_info *probe = info; + struct arm_pmu *cpu_pmu = probe->pmu; + u64 dfr0, pmuver; u32 pmceid[2]; + dfr0 = read_sysreg(id_aa64dfr0_el1); + pmuver = cpuid_feature_extract_unsigned_field(dfr0, + ID_AA64DFR0_PMUVER_SHIFT); + if (pmuver != 1) + return; + + probe->present = true; + /* Read the nb of CNTx counters supported from PMNC */ cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; @@ -979,13 +994,27 @@ static void __armv8pmu_probe_pmu(void *info) static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) { - return smp_call_function_any(&cpu_pmu->supported_cpus, + struct armv8pmu_probe_info probe = { + .pmu = cpu_pmu, + .present = false, + }; + int ret; + + ret = smp_call_function_any(&cpu_pmu->supported_cpus, __armv8pmu_probe_pmu, - cpu_pmu, 1); + &probe, 1); + if (ret) + return ret; + + return probe.present ? 0 : -ENODEV; } -static void armv8_pmu_init(struct arm_pmu *cpu_pmu) +static int armv8_pmu_init(struct arm_pmu *cpu_pmu) { + int ret = armv8pmu_probe_pmu(cpu_pmu); + if (ret) + return ret; + cpu_pmu->handle_irq = armv8pmu_handle_irq, cpu_pmu->enable = armv8pmu_enable_event, cpu_pmu->disable = armv8pmu_disable_event, @@ -997,78 +1026,104 @@ static void armv8_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->reset = armv8pmu_reset, cpu_pmu->max_period = (1LLU << 32) - 1, cpu_pmu->set_event_filter = armv8pmu_set_event_filter; + + return 0; } static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) { - armv8_pmu_init(cpu_pmu); + int ret = armv8_pmu_init(cpu_pmu); + if (ret) + return ret; + cpu_pmu->name = "armv8_pmuv3"; cpu_pmu->map_event = armv8_pmuv3_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group; - return armv8pmu_probe_pmu(cpu_pmu); + + return 0; } static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) { - armv8_pmu_init(cpu_pmu); + int ret = armv8_pmu_init(cpu_pmu); + if (ret) + return ret; + cpu_pmu->name = "armv8_cortex_a53"; cpu_pmu->map_event = armv8_a53_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group; - return armv8pmu_probe_pmu(cpu_pmu); + + return 0; } static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) { - armv8_pmu_init(cpu_pmu); + int ret = armv8_pmu_init(cpu_pmu); + if (ret) + return ret; + cpu_pmu->name = "armv8_cortex_a57"; cpu_pmu->map_event = armv8_a57_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group; - return armv8pmu_probe_pmu(cpu_pmu); + + return 0; } static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) { - armv8_pmu_init(cpu_pmu); + int ret = armv8_pmu_init(cpu_pmu); + if (ret) + return ret; + cpu_pmu->name = "armv8_cortex_a72"; cpu_pmu->map_event = armv8_a57_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group; - return armv8pmu_probe_pmu(cpu_pmu); + + return 0; } static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) { - armv8_pmu_init(cpu_pmu); + int ret = armv8_pmu_init(cpu_pmu); + if (ret) + return ret; + cpu_pmu->name = "armv8_cavium_thunder"; cpu_pmu->map_event = armv8_thunder_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group; - return armv8pmu_probe_pmu(cpu_pmu); + + return 0; } static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) { - armv8_pmu_init(cpu_pmu); + int ret = armv8_pmu_init(cpu_pmu); + if (ret) + return ret; + cpu_pmu->name = "armv8_brcm_vulcan"; cpu_pmu->map_event = armv8_vulcan_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group; - return armv8pmu_probe_pmu(cpu_pmu); + + return 0; } static const struct of_device_id armv8_pmu_of_device_ids[] = { @@ -1081,24 +1136,9 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = { {}, }; -/* - * Non DT systems have their micro/arch events probed at run-time. - * A fairly complete list of generic events are provided and ones that - * aren't supported by the current PMU are disabled. - */ -static const struct pmu_probe_info armv8_pmu_probe_table[] = { - PMU_PROBE(0, 0, armv8_pmuv3_init), /* enable all defined counters */ - { /* sentinel value */ } -}; - static int armv8_pmu_device_probe(struct platform_device *pdev) { - if (acpi_disabled) - return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, - NULL); - - return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, - armv8_pmu_probe_table); + return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL); } static struct platform_driver armv8_pmu_driver = { @@ -1109,4 +1149,11 @@ static struct platform_driver armv8_pmu_driver = { .probe = armv8_pmu_device_probe, }; -builtin_platform_driver(armv8_pmu_driver); +static int __init armv8_pmu_driver_init(void) +{ + if (acpi_disabled) + return platform_driver_register(&armv8_pmu_driver); + else + return arm_pmu_acpi_probe(armv8_pmuv3_init); +} +device_initcall(armv8_pmu_driver_init) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ffee4e454ac5..596990039b43 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -521,6 +521,13 @@ static bool bootcpu_valid __initdata; static unsigned int cpu_count = 1; #ifdef CONFIG_ACPI +static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; + +struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) +{ + return &cpu_madt_gicc[cpu]; +} + /* * acpi_map_gic_cpu_interface - parse processor MADT entry * @@ -555,6 +562,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) return; } bootcpu_valid = true; + cpu_madt_gicc[0] = *processor; early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid)); return; } @@ -565,6 +573,8 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) /* map the logical cpu id to cpu MPIDR */ cpu_logical_map(cpu_count) = hwid; + cpu_madt_gicc[cpu_count] = *processor; + /* * Set-up the ACPI parking protocol cpu entries * while initializing the cpu_logical_map to |