diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 12:54:49 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 12:54:49 -0700 |
commit | 5d70f79b5ef6ea2de4f72a37b2d96e2601e40a22 (patch) | |
tree | a0d6de0930ba83ecf4629c2e2e261f5eaa2d8f33 /arch/arm | |
parent | 888a6f77e0418b049f83d37547c209b904d30af4 (diff) | |
parent | 750ed158bf6c782d2813da1bca2c824365a0b777 (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (163 commits)
tracing: Fix compile issue for trace_sched_wakeup.c
[S390] hardirq: remove pointless header file includes
[IA64] Move local_softirq_pending() definition
perf, powerpc: Fix power_pmu_event_init to not use event->ctx
ftrace: Remove recursion between recordmcount and scripts/mod/empty
jump_label: Add COND_STMT(), reducer wrappery
perf: Optimize sw events
perf: Use jump_labels to optimize the scheduler hooks
jump_label: Add atomic_t interface
jump_label: Use more consistent naming
perf, hw_breakpoint: Fix crash in hw_breakpoint creation
perf: Find task before event alloc
perf: Fix task refcount bugs
perf: Fix group moving
irq_work: Add generic hardirq context callbacks
perf_events: Fix transaction recovery in group_sched_in()
perf_events: Fix bogus AMD64 generic TLB events
perf_events: Fix bogus context time tracking
tracing: Remove parent recording in latency tracer graph options
tracing: Use one prologue for the preempt irqs off tracer function tracers
...
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/perf_event.h | 12 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 212 | ||||
-rw-r--r-- | arch/arm/oprofile/Makefile | 4 | ||||
-rw-r--r-- | arch/arm/oprofile/common.c | 311 |
5 files changed, 113 insertions, 427 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9c26ba7244fb..9103904b3dab 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -23,6 +23,7 @@ config ARM select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZO select HAVE_KERNEL_LZMA + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index b5799a3b7117..c4aa4e8c6af9 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -12,18 +12,6 @@ #ifndef __ARM_PERF_EVENT_H__ #define __ARM_PERF_EVENT_H__ -/* - * NOP: on *most* (read: all supported) ARM platforms, the performance - * counter interrupts are regular interrupts and not an NMI. This - * means that when we receive the interrupt we can call - * perf_event_do_pending() that handles all of the work with - * interrupts disabled. - */ -static inline void -set_perf_event_pending(void) -{ -} - /* ARM performance counters start from 1 (in the cp15 accesses) so use the * same indexes here for consistency. */ #define PERF_EVENT_INDEX_OFFSET 1 diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index ecbb0288e5dd..49643b1467e6 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -123,6 +123,12 @@ armpmu_get_max_events(void) } EXPORT_SYMBOL_GPL(armpmu_get_max_events); +int perf_num_counters(void) +{ + return armpmu_get_max_events(); +} +EXPORT_SYMBOL_GPL(perf_num_counters); + #define HW_OP_UNSUPPORTED 0xFFFF #define C(_x) \ @@ -221,46 +227,56 @@ again: } static void -armpmu_disable(struct perf_event *event) +armpmu_read(struct perf_event *event) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - - WARN_ON(idx < 0); - - clear_bit(idx, cpuc->active_mask); - armpmu->disable(hwc, idx); - - barrier(); - armpmu_event_update(event, hwc, idx); - cpuc->events[idx] = NULL; - clear_bit(idx, cpuc->used_mask); + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; - perf_event_update_userpage(event); + armpmu_event_update(event, hwc, hwc->idx); } static void -armpmu_read(struct perf_event *event) +armpmu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; - /* Don't read disabled counters! */ - if (hwc->idx < 0) + if (!armpmu) return; - armpmu_event_update(event, hwc, hwc->idx); + /* + * ARM pmu always has to update the counter, so ignore + * PERF_EF_UPDATE, see comments in armpmu_start(). + */ + if (!(hwc->state & PERF_HES_STOPPED)) { + armpmu->disable(hwc, hwc->idx); + barrier(); /* why? */ + armpmu_event_update(event, hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } } static void -armpmu_unthrottle(struct perf_event *event) +armpmu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; + if (!armpmu) + return; + + /* + * ARM pmu always has to reprogram the period, so ignore + * PERF_EF_RELOAD, see the comment below. + */ + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; /* * Set the period again. Some counters can't be stopped, so when we - * were throttled we simply disabled the IRQ source and the counter + * were stopped we simply disabled the IRQ source and the counter * may have been left counting. If we don't do this step then we may * get an interrupt too soon or *way* too late if the overflow has * happened since disabling. @@ -269,14 +285,33 @@ armpmu_unthrottle(struct perf_event *event) armpmu->enable(hwc, hwc->idx); } +static void +armpmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + WARN_ON(idx < 0); + + clear_bit(idx, cpuc->active_mask); + armpmu_stop(event, PERF_EF_UPDATE); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + static int -armpmu_enable(struct perf_event *event) +armpmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx; int err = 0; + perf_pmu_disable(event->pmu); + /* If we don't have a space for the counter then finish early. */ idx = armpmu->get_event_idx(cpuc, hwc); if (idx < 0) { @@ -293,25 +328,19 @@ armpmu_enable(struct perf_event *event) cpuc->events[idx] = event; set_bit(idx, cpuc->active_mask); - /* Set the period for the event. */ - armpmu_event_set_period(event, hwc, idx); - - /* Enable the event. */ - armpmu->enable(hwc, idx); + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + armpmu_start(event, PERF_EF_RELOAD); /* Propagate our changes to the userspace mapping. */ perf_event_update_userpage(event); out: + perf_pmu_enable(event->pmu); return err; } -static struct pmu pmu = { - .enable = armpmu_enable, - .disable = armpmu_disable, - .unthrottle = armpmu_unthrottle, - .read = armpmu_read, -}; +static struct pmu pmu; static int validate_event(struct cpu_hw_events *cpuc, @@ -491,20 +520,29 @@ __hw_perf_event_init(struct perf_event *event) return err; } -const struct pmu * -hw_perf_event_init(struct perf_event *event) +static int armpmu_event_init(struct perf_event *event) { int err = 0; + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + if (!armpmu) - return ERR_PTR(-ENODEV); + return -ENODEV; event->destroy = hw_perf_event_destroy; if (!atomic_inc_not_zero(&active_events)) { - if (atomic_read(&active_events) > perf_max_events) { + if (atomic_read(&active_events) > armpmu->num_events) { atomic_dec(&active_events); - return ERR_PTR(-ENOSPC); + return -ENOSPC; } mutex_lock(&pmu_reserve_mutex); @@ -518,17 +556,16 @@ hw_perf_event_init(struct perf_event *event) } if (err) - return ERR_PTR(err); + return err; err = __hw_perf_event_init(event); if (err) hw_perf_event_destroy(event); - return err ? ERR_PTR(err) : &pmu; + return err; } -void -hw_perf_enable(void) +static void armpmu_enable(struct pmu *pmu) { /* Enable all of the perf events on hardware. */ int idx; @@ -549,13 +586,23 @@ hw_perf_enable(void) armpmu->start(); } -void -hw_perf_disable(void) +static void armpmu_disable(struct pmu *pmu) { if (armpmu) armpmu->stop(); } +static struct pmu pmu = { + .pmu_enable = armpmu_enable, + .pmu_disable = armpmu_disable, + .event_init = armpmu_event_init, + .add = armpmu_add, + .del = armpmu_del, + .start = armpmu_start, + .stop = armpmu_stop, + .read = armpmu_read, +}; + /* * ARMv6 Performance counter handling code. * @@ -1045,7 +1092,7 @@ armv6pmu_handle_irq(int irq_num, * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ - perf_event_do_pending(); + irq_work_run(); return IRQ_HANDLED; } @@ -2021,7 +2068,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ - perf_event_do_pending(); + irq_work_run(); return IRQ_HANDLED; } @@ -2389,7 +2436,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) armpmu->disable(hwc, idx); } - perf_event_do_pending(); + irq_work_run(); /* * Re-enable the PMU. @@ -2716,7 +2763,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) armpmu->disable(hwc, idx); } - perf_event_do_pending(); + irq_work_run(); /* * Re-enable the PMU. @@ -2933,14 +2980,12 @@ init_hw_perf_events(void) armpmu = &armv6pmu; memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, sizeof(armv6_perf_cache_map)); - perf_max_events = armv6pmu.num_events; break; case 0xB020: /* ARM11mpcore */ armpmu = &armv6mpcore_pmu; memcpy(armpmu_perf_cache_map, armv6mpcore_perf_cache_map, sizeof(armv6mpcore_perf_cache_map)); - perf_max_events = armv6mpcore_pmu.num_events; break; case 0xC080: /* Cortex-A8 */ armv7pmu.id = ARM_PERF_PMU_ID_CA8; @@ -2952,7 +2997,6 @@ init_hw_perf_events(void) /* Reset PMNC and read the nb of CNTx counters supported */ armv7pmu.num_events = armv7_reset_read_pmnc(); - perf_max_events = armv7pmu.num_events; break; case 0xC090: /* Cortex-A9 */ armv7pmu.id = ARM_PERF_PMU_ID_CA9; @@ -2964,7 +3008,6 @@ init_hw_perf_events(void) /* Reset PMNC and read the nb of CNTx counters supported */ armv7pmu.num_events = armv7_reset_read_pmnc(); - perf_max_events = armv7pmu.num_events; break; } /* Intel CPUs [xscale]. */ @@ -2975,13 +3018,11 @@ init_hw_perf_events(void) armpmu = &xscale1pmu; memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, sizeof(xscale_perf_cache_map)); - perf_max_events = xscale1pmu.num_events; break; case 2: armpmu = &xscale2pmu; memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, sizeof(xscale_perf_cache_map)); - perf_max_events = xscale2pmu.num_events; break; } } @@ -2991,9 +3032,10 @@ init_hw_perf_events(void) arm_pmu_names[armpmu->id], armpmu->num_events); } else { pr_info("no hardware support available\n"); - perf_max_events = -1; } + perf_pmu_register(&pmu); + return 0; } arch_initcall(init_hw_perf_events); @@ -3001,13 +3043,6 @@ arch_initcall(init_hw_perf_events); /* * Callchain handling code. */ -static inline void -callchain_store(struct perf_callchain_entry *entry, - u64 ip) -{ - if (entry->nr < PERF_MAX_STACK_DEPTH) - entry->ip[entry->nr++] = ip; -} /* * The registers we're interested in are at the end of the variable @@ -3039,7 +3074,7 @@ user_backtrace(struct frame_tail *tail, if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) return NULL; - callchain_store(entry, buftail.lr); + perf_callchain_store(entry, buftail.lr); /* * Frame pointers should strictly progress back up the stack @@ -3051,16 +3086,11 @@ user_backtrace(struct frame_tail *tail, return buftail.fp - 1; } -static void -perf_callchain_user(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void +perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct frame_tail *tail; - callchain_store(entry, PERF_CONTEXT_USER); - - if (!user_mode(regs)) - regs = task_pt_regs(current); tail = (struct frame_tail *)regs->ARM_fp - 1; @@ -3078,56 +3108,18 @@ callchain_trace(struct stackframe *fr, void *data) { struct perf_callchain_entry *entry = data; - callchain_store(entry, fr->pc); + perf_callchain_store(entry, fr->pc); return 0; } -static void -perf_callchain_kernel(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void +perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct stackframe fr; - callchain_store(entry, PERF_CONTEXT_KERNEL); fr.fp = regs->ARM_fp; fr.sp = regs->ARM_sp; fr.lr = regs->ARM_lr; fr.pc = regs->ARM_pc; walk_stackframe(&fr, callchain_trace, entry); } - -static void -perf_do_callchain(struct pt_regs *regs, - struct perf_callchain_entry *entry) -{ - int is_user; - - if (!regs) - return; - - is_user = user_mode(regs); - - if (!current || !current->pid) - return; - - if (is_user && current->state != TASK_RUNNING) - return; - - if (!is_user) - perf_callchain_kernel(regs, entry); - - if (current->mm) - perf_callchain_user(regs, entry); -} - -static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); - -struct perf_callchain_entry * -perf_callchain(struct pt_regs *regs) -{ - struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); - - entry->nr = 0; - perf_do_callchain(regs, entry); - return entry; -} diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile index e666eafed152..b2215c61cdf0 100644 --- a/arch/arm/oprofile/Makefile +++ b/arch/arm/oprofile/Makefile @@ -6,4 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ oprofilefs.o oprofile_stats.o \ timer_int.o ) +ifeq ($(CONFIG_HW_PERF_EVENTS),y) +DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o) +endif + oprofile-y := $(DRIVER_OBJS) common.o diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index 72e09eb642dd..8aa974491dfc 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c @@ -25,139 +25,10 @@ #include <asm/ptrace.h> #ifdef CONFIG_HW_PERF_EVENTS -/* - * Per performance monitor configuration as set via oprofilefs. - */ -struct op_counter_config { - unsigned long count; - unsigned long enabled; - unsigned long event; - unsigned long unit_mask; - unsigned long kernel; - unsigned long user; - struct perf_event_attr attr; -}; - -static int op_arm_enabled; -static DEFINE_MUTEX(op_arm_mutex); - -static struct op_counter_config *counter_config; -static struct perf_event **perf_events[nr_cpumask_bits]; -static int perf_num_counters; - -/* - * Overflow callback for oprofile. - */ -static void op_overflow_handler(struct perf_event *event, int unused, - struct perf_sample_data *data, struct pt_regs *regs) +char *op_name_from_perf_id(void) { - int id; - u32 cpu = smp_processor_id(); - - for (id = 0; id < perf_num_counters; ++id) - if (perf_events[cpu][id] == event) - break; - - if (id != perf_num_counters) - oprofile_add_sample(regs, id); - else - pr_warning("oprofile: ignoring spurious overflow " - "on cpu %u\n", cpu); -} - -/* - * Called by op_arm_setup to create perf attributes to mirror the oprofile - * settings in counter_config. Attributes are created as `pinned' events and - * so are permanently scheduled on the PMU. - */ -static void op_perf_setup(void) -{ - int i; - u32 size = sizeof(struct perf_event_attr); - struct perf_event_attr *attr; - - for (i = 0; i < perf_num_counters; ++i) { - attr = &counter_config[i].attr; - memset(attr, 0, size); - attr->type = PERF_TYPE_RAW; - attr->size = size; - attr->config = counter_config[i].event; - attr->sample_period = counter_config[i].count; - attr->pinned = 1; - } -} - -static int op_create_counter(int cpu, int event) -{ - int ret = 0; - struct perf_event *pevent; - - if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL)) - return ret; - - pevent = perf_event_create_kernel_counter(&counter_config[event].attr, - cpu, -1, - op_overflow_handler); - - if (IS_ERR(pevent)) { - ret = PTR_ERR(pevent); - } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { - perf_event_release_kernel(pevent); - pr_warning("oprofile: failed to enable event %d " - "on CPU %d\n", event, cpu); - ret = -EBUSY; - } else { - perf_events[cpu][event] = pevent; - } - - return ret; -} + enum arm_perf_pmu_ids id = armpmu_get_pmu_id(); -static void op_destroy_counter(int cpu, int event) -{ - struct perf_event *pevent = perf_events[cpu][event]; - - if (pevent) { - perf_event_release_kernel(pevent); - perf_events[cpu][event] = NULL; - } -} - -/* - * Called by op_arm_start to create active perf events based on the - * perviously configured attributes. - */ -static int op_perf_start(void) -{ - int cpu, event, ret = 0; - - for_each_online_cpu(cpu) { - for (event = 0; event < perf_num_counters; ++event) { - ret = op_create_counter(cpu, event); - if (ret) - goto out; - } - } - -out: - return ret; -} - -/* - * Called by op_arm_stop at the end of a profiling run. - */ -static void op_perf_stop(void) -{ - int cpu, event; - - for_each_online_cpu(cpu) - for (event = 0; event < perf_num_counters; ++event) - op_destroy_counter(cpu, event); -} - - -static char *op_name_from_perf_id(enum arm_perf_pmu_ids id) -{ switch (id) { case ARM_PERF_PMU_ID_XSCALE1: return "arm/xscale1"; @@ -176,116 +47,6 @@ static char *op_name_from_perf_id(enum arm_perf_pmu_ids id) } } -static int op_arm_create_files(struct super_block *sb, struct dentry *root) -{ - unsigned int i; - - for (i = 0; i < perf_num_counters; i++) { - struct dentry *dir; - char buf[4]; - - snprintf(buf, sizeof buf, "%d", i); - dir = oprofilefs_mkdir(sb, root, buf); - oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); - oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event); - oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count); - oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); - oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); - oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); - } - - return 0; -} - -static int op_arm_setup(void) -{ - spin_lock(&oprofilefs_lock); - op_perf_setup(); - spin_unlock(&oprofilefs_lock); - return 0; -} - -static int op_arm_start(void) -{ - int ret = -EBUSY; - - mutex_lock(&op_arm_mutex); - if (!op_arm_enabled) { - ret = 0; - op_perf_start(); - op_arm_enabled = 1; - } - mutex_unlock(&op_arm_mutex); - return ret; -} - -static void op_arm_stop(void) -{ - mutex_lock(&op_arm_mutex); - if (op_arm_enabled) - op_perf_stop(); - op_arm_enabled = 0; - mutex_unlock(&op_arm_mutex); -} - -#ifdef CONFIG_PM -static int op_arm_suspend(struct platform_device *dev, pm_message_t state) -{ - mutex_lock(&op_arm_mutex); - if (op_arm_enabled) - op_perf_stop(); - mutex_unlock(&op_arm_mutex); - return 0; -} - -static int op_arm_resume(struct platform_device *dev) -{ - mutex_lock(&op_arm_mutex); - if (op_arm_enabled && op_perf_start()) - op_arm_enabled = 0; - mutex_unlock(&op_arm_mutex); - return 0; -} - -static struct platform_driver oprofile_driver = { - .driver = { - .name = "arm-oprofile", - }, - .resume = op_arm_resume, - .suspend = op_arm_suspend, -}; - -static struct platform_device *oprofile_pdev; - -static int __init init_driverfs(void) -{ - int ret; - - ret = platform_driver_register(&oprofile_driver); - if (ret) - goto out; - - oprofile_pdev = platform_device_register_simple( - oprofile_driver.driver.name, 0, NULL, 0); - if (IS_ERR(oprofile_pdev)) { - ret = PTR_ERR(oprofile_pdev); - platform_driver_unregister(&oprofile_driver); - } - -out: - return ret; -} - -static void exit_driverfs(void) -{ - platform_device_unregister(oprofile_pdev); - platform_driver_unregister(&oprofile_driver); -} -#else -static int __init init_driverfs(void) { return 0; } -#define exit_driverfs() do { } while (0) -#endif /* CONFIG_PM */ - static int report_trace(struct stackframe *frame, void *d) { unsigned int *depth = d; @@ -350,74 +111,14 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth) int __init oprofile_arch_init(struct oprofile_operations *ops) { - int cpu, ret = 0; - - perf_num_counters = armpmu_get_max_events(); - - counter_config = kcalloc(perf_num_counters, - sizeof(struct op_counter_config), GFP_KERNEL); - - if (!counter_config) { - pr_info("oprofile: failed to allocate %d " - "counters\n", perf_num_counters); - return -ENOMEM; - } - - ret = init_driverfs(); - if (ret) { - kfree(counter_config); - counter_config = NULL; - return ret; - } - - for_each_possible_cpu(cpu) { - perf_events[cpu] = kcalloc(perf_num_counters, - sizeof(struct perf_event *), GFP_KERNEL); - if (!perf_events[cpu]) { - pr_info("oprofile: failed to allocate %d perf events " - "for cpu %d\n", perf_num_counters, cpu); - while (--cpu >= 0) - kfree(perf_events[cpu]); - return -ENOMEM; - } - } - ops->backtrace = arm_backtrace; - ops->create_files = op_arm_create_files; - ops->setup = op_arm_setup; - ops->start = op_arm_start; - ops->stop = op_arm_stop; - ops->shutdown = op_arm_stop; - ops->cpu_type = op_name_from_perf_id(armpmu_get_pmu_id()); - - if (!ops->cpu_type) - ret = -ENODEV; - else - pr_info("oprofile: using %s\n", ops->cpu_type); - return ret; + return oprofile_perf_init(ops); } -void oprofile_arch_exit(void) +void __exit oprofile_arch_exit(void) { - int cpu, id; - struct perf_event *event; - - if (*perf_events) { - for_each_possible_cpu(cpu) { - for (id = 0; id < perf_num_counters; ++id) { - event = perf_events[cpu][id]; - if (event != NULL) - perf_event_release_kernel(event); - } - kfree(perf_events[cpu]); - } - } - - if (counter_config) { - kfree(counter_config); - exit_driverfs(); - } + oprofile_perf_exit(); } #else int __init oprofile_arch_init(struct oprofile_operations *ops) @@ -425,5 +126,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) pr_info("oprofile: hardware counters not available\n"); return -ENODEV; } -void oprofile_arch_exit(void) {} +void __exit oprofile_arch_exit(void) {} #endif /* CONFIG_HW_PERF_EVENTS */ |