diff options
author | Mark Rutland <mark.rutland@arm.com> | 2014-05-13 19:46:10 +0100 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2014-10-30 12:17:00 +0000 |
commit | 5ebd92003494a19ac5246ae385c073be16de1144 (patch) | |
tree | 8c9367b21854ea92a92a355978523a33abf48a3c /arch/arm/kernel/perf_event_cpu.c | |
parent | 116792508607002896b706fbad8310419fcc5742 (diff) |
arm: perf: fold percpu_pmu into pmu_hw_events
Currently the percpu_pmu pointers used as percpu_irq dev_id values are
defined separately from the other per-cpu accounting data, which make
dynamically allocating the data (as will be required for systems with
heterogeneous CPUs) difficult.
This patch moves the percpu_pmu pointers into pmu_hw_events (which is
itself allocated per cpu), which will allow for easier dynamic
allocation. Both percpu and regular irqs are requested using percpu_pmu
pointers as tokens, freeing us from having to know whether an irq is
percpu within the handler, and thus avoiding a radix tree lookup on the
handler path.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Stephen Boyd <sboyd@codeaurora.org>
Tested-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event_cpu.c')
-rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index fd24ad84dba6..b9391fa2368d 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -35,7 +35,6 @@ /* Set at runtime when we know what CPU type we are. */ static struct arm_pmu *cpu_pmu; -static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu); static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); /* @@ -85,20 +84,21 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) { int i, irq, irqs; struct platform_device *pmu_device = cpu_pmu->plat_device; + struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; irqs = min(pmu_device->num_resources, num_possible_cpus()); irq = platform_get_irq(pmu_device, 0); if (irq >= 0 && irq_is_percpu(irq)) { on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); - free_percpu_irq(irq, &percpu_pmu); + free_percpu_irq(irq, &hw_events->percpu_pmu); } else { for (i = 0; i < irqs; ++i) { if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) continue; irq = platform_get_irq(pmu_device, i); if (irq >= 0) - free_irq(irq, cpu_pmu); + free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, i)); } } } @@ -107,6 +107,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) { int i, err, irq, irqs; struct platform_device *pmu_device = cpu_pmu->plat_device; + struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; if (!pmu_device) return -ENODEV; @@ -119,7 +120,8 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) irq = platform_get_irq(pmu_device, 0); if (irq >= 0 && irq_is_percpu(irq)) { - err = request_percpu_irq(irq, handler, "arm-pmu", &percpu_pmu); + err = request_percpu_irq(irq, handler, "arm-pmu", + &hw_events->percpu_pmu); if (err) { pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); @@ -146,7 +148,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) err = request_irq(irq, handler, IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", - cpu_pmu); + per_cpu_ptr(&hw_events->percpu_pmu, i)); if (err) { pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); @@ -166,7 +168,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) for_each_possible_cpu(cpu) { struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); raw_spin_lock_init(&events->pmu_lock); - per_cpu(percpu_pmu, cpu) = cpu_pmu; + events->percpu_pmu = cpu_pmu; } cpu_pmu->hw_events = &cpu_hw_events; |