diff options
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 24 | ||||
-rw-r--r-- | include/linux/perf_event.h | 2 | ||||
-rw-r--r-- | kernel/perf_event.c | 20 |
3 files changed, 40 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index a920f173a220..9173ea95f918 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1495,7 +1495,7 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc, hwc->last_tag == cpuc->tags[i]; } -static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc); +static void x86_pmu_stop(struct perf_event *event); void hw_perf_enable(void) { @@ -1533,7 +1533,7 @@ void hw_perf_enable(void) match_prev_assignment(hwc, cpuc, i)) continue; - __x86_pmu_disable(event, cpuc); + x86_pmu_stop(event); hwc->idx = -1; } @@ -1801,6 +1801,19 @@ static int x86_pmu_enable(struct perf_event *event) return 0; } +static int x86_pmu_start(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->idx == -1) + return -EAGAIN; + + x86_perf_event_set_period(event, hwc, hwc->idx); + x86_pmu.enable(hwc, hwc->idx); + + return 0; +} + static void x86_pmu_unthrottle(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -1924,8 +1937,9 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc) event->pending_kill = POLL_IN; } -static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc) +static void x86_pmu_stop(struct perf_event *event) { + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; @@ -1954,7 +1968,7 @@ static void x86_pmu_disable(struct perf_event *event) struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int i; - __x86_pmu_disable(event, cpuc); + x86_pmu_stop(event); for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event_list[i]) { @@ -2667,6 +2681,8 @@ static inline void x86_pmu_read(struct perf_event *event) static const struct pmu pmu = { .enable = x86_pmu_enable, .disable = x86_pmu_disable, + .start = x86_pmu_start, + .stop = x86_pmu_stop, .read = x86_pmu_read, .unthrottle = x86_pmu_unthrottle, }; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 071a7db52549..b08dfdad08cb 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -513,6 +513,8 @@ struct perf_event; struct pmu { int (*enable) (struct perf_event *event); void (*disable) (struct perf_event *event); + int (*start) (struct perf_event *event); + void (*stop) (struct perf_event *event); void (*read) (struct perf_event *event); void (*unthrottle) (struct perf_event *event); }; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 5a69abb05ac3..74c60021cdbc 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1493,6 +1493,22 @@ do { \ return div64_u64(dividend, divisor); } +static void perf_event_stop(struct perf_event *event) +{ + if (!event->pmu->stop) + return event->pmu->disable(event); + + return event->pmu->stop(event); +} + +static int perf_event_start(struct perf_event *event) +{ + if (!event->pmu->start) + return event->pmu->enable(event); + + return event->pmu->start(event); +} + static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) { struct hw_perf_event *hwc = &event->hw; @@ -1513,9 +1529,9 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) if (atomic64_read(&hwc->period_left) > 8*sample_period) { perf_disable(); - event->pmu->disable(event); + perf_event_stop(event); atomic64_set(&hwc->period_left, 0); - event->pmu->enable(event); + perf_event_start(event); perf_enable(); } } |