summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2011-12-25 15:44:43 +0200
committerAvi Kivity <avi@redhat.com>2011-12-27 11:22:24 +0200
commit9e31905f293ae84e4f120ed9e414031eaefa0bdf (patch)
tree153204ff0dca820e760007bc24075ec7fb46a276 /kernel
parentff5c2c0316ff0e3e2dba3ca14167d994453df093 (diff)
parentb3d9468a8bd218a695e3a0ff112cd4efd27b670a (diff)
Merge remote-tracking branch 'tip/perf/core' into kvm-updates/3.3
* tip/perf/core: (66 commits) perf, x86: Expose perf capability to other modules perf, x86: Implement arch event mask as quirk x86, perf: Disable non available architectural events jump_label: Provide jump_label_key initializers jump_label, x86: Fix section mismatch perf, core: Rate limit perf_sched_events jump_label patching perf: Fix enable_on_exec for sibling events perf: Remove superfluous arguments perf, x86: Prefer fixed-purpose counters when scheduling perf, x86: Fix event scheduler for constraints with overlapping counters perf, x86: Implement event scheduler helper functions perf: Avoid a useless pmu_disable() in the perf-tick x86/tools: Add decoded instruction dump mode x86: Update instruction decoder to support new AVX formats x86/tools: Fix insn_sanity message outputs x86/tools: Fix instruction decoder message output x86: Fix instruction decoder to handle grouped AVX instructions x86/tools: Fix Makefile to build all test tools perf test: Soft errors shouldn't stop the "Validate PERF_RECORD_" test perf test: Validate PERF_RECORD_ events and perf_sample fields ... Signed-off-by: Avi Kivity <avi@redhat.com> * commit 'b3d9468a8bd218a695e3a0ff112cd4efd27b670a': (66 commits) perf, x86: Expose perf capability to other modules perf, x86: Implement arch event mask as quirk x86, perf: Disable non available architectural events jump_label: Provide jump_label_key initializers jump_label, x86: Fix section mismatch perf, core: Rate limit perf_sched_events jump_label patching perf: Fix enable_on_exec for sibling events perf: Remove superfluous arguments perf, x86: Prefer fixed-purpose counters when scheduling perf, x86: Fix event scheduler for constraints with overlapping counters perf, x86: Implement event scheduler helper functions perf: Avoid a useless pmu_disable() in the perf-tick x86/tools: Add decoded instruction dump mode x86: Update instruction decoder to support new AVX formats x86/tools: Fix insn_sanity message outputs x86/tools: Fix instruction decoder message output x86: Fix instruction decoder to handle grouped AVX instructions x86/tools: Fix Makefile to build all test tools perf test: Soft errors shouldn't stop the "Validate PERF_RECORD_" test perf test: Validate PERF_RECORD_ events and perf_sample fields ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/Makefile2
-rw-r--r--kernel/events/callchain.c191
-rw-r--r--kernel/events/core.c296
-rw-r--r--kernel/events/internal.h39
-rw-r--r--kernel/jump_label.c49
-rw-r--r--kernel/lockdep.c30
-rw-r--r--kernel/trace/trace.c105
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_events_filter.c26
-rw-r--r--kernel/trace/trace_irqsoff.c13
-rw-r--r--kernel/trace/trace_output.c16
-rw-r--r--kernel/trace/trace_sched_wakeup.c13
12 files changed, 476 insertions, 306 deletions
diff --git a/kernel/events/Makefile b/kernel/events/Makefile
index 89e5e8aa4c3..22d901f9caf 100644
--- a/kernel/events/Makefile
+++ b/kernel/events/Makefile
@@ -2,5 +2,5 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_core.o = -pg
endif
-obj-y := core.o ring_buffer.o
+obj-y := core.o ring_buffer.o callchain.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
new file mode 100644
index 00000000000..057e24b665c
--- /dev/null
+++ b/kernel/events/callchain.c
@@ -0,0 +1,191 @@
+/*
+ * Performance events callchain code, extracted from core.c:
+ *
+ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * For licensing details see kernel-base/COPYING
+ */
+
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+struct callchain_cpus_entries {
+ struct rcu_head rcu_head;
+ struct perf_callchain_entry *cpu_entries[0];
+};
+
+static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
+static atomic_t nr_callchain_events;
+static DEFINE_MUTEX(callchain_mutex);
+static struct callchain_cpus_entries *callchain_cpus_entries;
+
+
+__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+}
+
+__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+}
+
+static void release_callchain_buffers_rcu(struct rcu_head *head)
+{
+ struct callchain_cpus_entries *entries;
+ int cpu;
+
+ entries = container_of(head, struct callchain_cpus_entries, rcu_head);
+
+ for_each_possible_cpu(cpu)
+ kfree(entries->cpu_entries[cpu]);
+
+ kfree(entries);
+}
+
+static void release_callchain_buffers(void)
+{
+ struct callchain_cpus_entries *entries;
+
+ entries = callchain_cpus_entries;
+ rcu_assign_pointer(callchain_cpus_entries, NULL);
+ call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
+}
+
+static int alloc_callchain_buffers(void)
+{
+ int cpu;
+ int size;
+ struct callchain_cpus_entries *entries;
+
+ /*
+ * We can't use the percpu allocation API for data that can be
+ * accessed from NMI. Use a temporary manual per cpu allocation
+ * until that gets sorted out.
+ */
+ size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
+
+ entries = kzalloc(size, GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
+
+ for_each_possible_cpu(cpu) {
+ entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
+ cpu_to_node(cpu));
+ if (!entries->cpu_entries[cpu])
+ goto fail;
+ }
+
+ rcu_assign_pointer(callchain_cpus_entries, entries);
+
+ return 0;
+
+fail:
+ for_each_possible_cpu(cpu)
+ kfree(entries->cpu_entries[cpu]);
+ kfree(entries);
+
+ return -ENOMEM;
+}
+
+int get_callchain_buffers(void)
+{
+ int err = 0;
+ int count;
+
+ mutex_lock(&callchain_mutex);
+
+ count = atomic_inc_return(&nr_callchain_events);
+ if (WARN_ON_ONCE(count < 1)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (count > 1) {
+ /* If the allocation failed, give up */
+ if (!callchain_cpus_entries)
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = alloc_callchain_buffers();
+ if (err)
+ release_callchain_buffers();
+exit:
+ mutex_unlock(&callchain_mutex);
+
+ return err;
+}
+
+void put_callchain_buffers(void)
+{
+ if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
+ release_callchain_buffers();
+ mutex_unlock(&callchain_mutex);
+ }
+}
+
+static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+{
+ int cpu;
+ struct callchain_cpus_entries *entries;
+
+ *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+ if (*rctx == -1)
+ return NULL;
+
+ entries = rcu_dereference(callchain_cpus_entries);
+ if (!entries)
+ return NULL;
+
+ cpu = smp_processor_id();
+
+ return &entries->cpu_entries[cpu][*rctx];
+}
+
+static void
+put_callchain_entry(int rctx)
+{
+ put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+}
+
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+ int rctx;
+ struct perf_callchain_entry *entry;
+
+
+ entry = get_callchain_entry(&rctx);
+ if (rctx == -1)
+ return NULL;
+
+ if (!entry)
+ goto exit_put;
+
+ entry->nr = 0;
+
+ if (!user_mode(regs)) {
+ perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+ perf_callchain_kernel(entry, regs);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+
+ if (regs) {
+ perf_callchain_store(entry, PERF_CONTEXT_USER);
+ perf_callchain_user(entry, regs);
+ }
+
+exit_put:
+ put_callchain_entry(rctx);
+
+ return entry;
+}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 58690af323e..24e3a4b1c89 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -128,7 +128,7 @@ enum event_type_t {
* perf_sched_events : >0 events exist
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
*/
-struct jump_label_key perf_sched_events __read_mostly;
+struct jump_label_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static atomic_t nr_mmap_events __read_mostly;
@@ -1130,6 +1130,8 @@ event_sched_out(struct perf_event *event,
if (!is_software_event(event))
cpuctx->active_oncpu--;
ctx->nr_active--;
+ if (event->attr.freq && event->attr.sample_freq)
+ ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
}
@@ -1325,6 +1327,7 @@ retry:
}
raw_spin_unlock_irq(&ctx->lock);
}
+EXPORT_SYMBOL_GPL(perf_event_disable);
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
@@ -1406,6 +1409,8 @@ event_sched_in(struct perf_event *event,
if (!is_software_event(event))
cpuctx->active_oncpu++;
ctx->nr_active++;
+ if (event->attr.freq && event->attr.sample_freq)
+ ctx->nr_freq++;
if (event->attr.exclusive)
cpuctx->exclusive = 1;
@@ -1662,8 +1667,7 @@ retry:
* Note: this works for group members as well as group leaders
* since the non-leader members' sibling_lists will be empty.
*/
-static void __perf_event_mark_enabled(struct perf_event *event,
- struct perf_event_context *ctx)
+static void __perf_event_mark_enabled(struct perf_event *event)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
@@ -1701,7 +1705,7 @@ static int __perf_event_enable(void *info)
*/
perf_cgroup_set_timestamp(current, ctx);
- __perf_event_mark_enabled(event, ctx);
+ __perf_event_mark_enabled(event);
if (!event_filter_match(event)) {
if (is_cgroup_event(event))
@@ -1782,7 +1786,7 @@ void perf_event_enable(struct perf_event *event)
retry:
if (!ctx->is_active) {
- __perf_event_mark_enabled(event, ctx);
+ __perf_event_mark_enabled(event);
goto out;
}
@@ -1809,6 +1813,7 @@ retry:
out:
raw_spin_unlock_irq(&ctx->lock);
}
+EXPORT_SYMBOL_GPL(perf_event_enable);
int perf_event_refresh(struct perf_event *event, int refresh)
{
@@ -2327,6 +2332,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
u64 interrupts, now;
s64 delta;
+ if (!ctx->nr_freq)
+ return;
+
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
@@ -2382,12 +2390,14 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
{
u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
struct perf_event_context *ctx = NULL;
- int rotate = 0, remove = 1;
+ int rotate = 0, remove = 1, freq = 0;
if (cpuctx->ctx.nr_events) {
remove = 0;
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
rotate = 1;
+ if (cpuctx->ctx.nr_freq)
+ freq = 1;
}
ctx = cpuctx->task_ctx;
@@ -2395,33 +2405,40 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
remove = 0;
if (ctx->nr_events != ctx->nr_active)
rotate = 1;
+ if (ctx->nr_freq)
+ freq = 1;
}
+ if (!rotate && !freq)
+ goto done;
+
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
- perf_ctx_adjust_freq(&cpuctx->ctx, interval);
- if (ctx)
- perf_ctx_adjust_freq(ctx, interval);
- if (!rotate)
- goto done;
+ if (freq) {
+ perf_ctx_adjust_freq(&cpuctx->ctx, interval);
+ if (ctx)
+ perf_ctx_adjust_freq(ctx, interval);
+ }
- cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- if (ctx)
- ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
+ if (rotate) {
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+ if (ctx)
+ ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
- rotate_ctx(&cpuctx->ctx);
- if (ctx)
- rotate_ctx(ctx);
+ rotate_ctx(&cpuctx->ctx);
+ if (ctx)
+ rotate_ctx(ctx);
- perf_event_sched_in(cpuctx, ctx, current);
+ perf_event_sched_in(cpuctx, ctx, current);
+ }
+
+ perf_pmu_enable(cpuctx->ctx.pmu);
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
done:
if (remove)
list_del_init(&cpuctx->rotation_list);
-
- perf_pmu_enable(cpuctx->ctx.pmu);
- perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
void perf_event_task_tick(void)
@@ -2448,7 +2465,7 @@ static int event_enable_on_exec(struct perf_event *event,
if (event->state >= PERF_EVENT_STATE_INACTIVE)
return 0;
- __perf_event_mark_enabled(event, ctx);
+ __perf_event_mark_enabled(event);
return 1;
}
@@ -2480,13 +2497,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
raw_spin_lock(&ctx->lock);
task_ctx_sched_out(ctx);
- list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
- ret = event_enable_on_exec(event, ctx);
- if (ret)
- enabled = 1;
- }
-
- list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
+ list_for_each_entry(event, &ctx->event_list, event_entry) {
ret = event_enable_on_exec(event, ctx);
if (ret)
enabled = 1;
@@ -2574,215 +2585,6 @@ static u64 perf_event_read(struct perf_event *event)
}
/*
- * Callchain support
- */
-
-struct callchain_cpus_entries {
- struct rcu_head rcu_head;
- struct perf_callchain_entry *cpu_entries[0];
-};
-
-static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
-static atomic_t nr_callchain_events;
-static DEFINE_MUTEX(callchain_mutex);
-struct callchain_cpus_entries *callchain_cpus_entries;
-
-
-__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
-{
-}
-
-__weak void perf_callchain_user(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
-{
-}
-
-static void release_callchain_buffers_rcu(struct rcu_head *head)
-{
- struct callchain_cpus_entries *entries;
- int cpu;
-
- entries = container_of(head, struct callchain_cpus_entries, rcu_head);
-
- for_each_possible_cpu(cpu)
- kfree(entries->cpu_entries[cpu]);
-
- kfree(entries);
-}
-
-static void release_callchain_buffers(void)
-{
- struct callchain_cpus_entries *entries;
-
- entries = callchain_cpus_entries;
- rcu_assign_pointer(callchain_cpus_entries, NULL);
- call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
-}
-
-static int alloc_callchain_buffers(void)
-{
- int cpu;
- int size;
- struct callchain_cpus_entries *entries;
-
- /*
- * We can't use the percpu allocation API for data that can be
- * accessed from NMI. Use a temporary manual per cpu allocation
- * until that gets sorted out.
- */
- size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
-
- entries = kzalloc(size, GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
-
- for_each_possible_cpu(cpu) {
- entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
- cpu_to_node(cpu));
- if (!entries->cpu_entries[cpu])
- goto fail;
- }
-
- rcu_assign_pointer(callchain_cpus_entries, entries);
-
- return 0;
-
-fail:
- for_each_possible_cpu(cpu)
- kfree(entries->cpu_entries[cpu]);
- kfree(entries);
-
- return -ENOMEM;
-}
-
-static int get_callchain_buffers(void)
-{
- int err = 0;
- int count;
-
- mutex_lock(&callchain_mutex);
-
- count = atomic_inc_return(&nr_callchain_events);
- if (WARN_ON_ONCE(count < 1)) {
- err = -EINVAL;
- goto exit;
- }
-
- if (count > 1) {
- /* If the allocation failed, give up */
- if (!callchain_cpus_entries)
- err = -ENOMEM;
- goto exit;
- }
-
- err = alloc_callchain_buffers();
- if (err)
- release_callchain_buffers();
-exit:
- mutex_unlock(&callchain_mutex);
-
- return err;
-}
-
-static void put_callchain_buffers(void)
-{
- if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
- release_callchain_buffers();
- mutex_unlock(&callchain_mutex);
- }
-}
-
-static int get_recursion_context(int *recursion)
-{
- int rctx;
-
- if (in_nmi())
- rctx = 3;
- else if (in_irq())
- rctx = 2;
- else if (in_softirq())
- rctx = 1;
- else
- rctx = 0;
-
- if (recursion[rctx])
- return -1;
-
- recursion[rctx]++;
- barrier();
-
- return rctx;
-}
-
-static inline void put_recursion_context(int *recursion, int rctx)
-{
- barrier();
- recursion[rctx]--;
-}
-
-static struct perf_callchain_entry *get_callchain_entry(int *rctx)
-{
- int cpu;
- struct callchain_cpus_entries *entries;
-
- *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
- if (*rctx == -1)
- return NULL;
-
- entries = rcu_dereference(callchain_cpus_entries);
- if (!entries)
- return NULL;
-
- cpu = smp_processor_id();
-
- return &entries->cpu_entries[cpu][*rctx];
-}
-
-static void
-put_callchain_entry(int rctx)
-{
- put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
-}
-
-static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
- int rctx;
- struct perf_callchain_entry *entry;
-
-
- entry = get_callchain_entry(&rctx);
- if (rctx == -1)
- return NULL;
-
- if (!entry)
- goto exit_put;
-
- entry->nr = 0;
-
- if (!user_mode(regs)) {
- perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
- perf_callchain_kernel(entry, regs);
- if (current->mm)
- regs = task_pt_regs(current);
- else
- regs = NULL;
- }
-
- if (regs) {
- perf_callchain_store(entry, PERF_CONTEXT_USER);
- perf_callchain_user(entry, regs);
- }
-
-exit_put:
- put_callchain_entry(rctx);
-
- return entry;
-}
-
-/*
* Initialize the perf_event context in a task_struct:
*/
static void __perf_event_init_context(struct perf_event_context *ctx)
@@ -2946,7 +2748,7 @@ static void free_event(struct perf_event *event)
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
- jump_label_dec(&perf_sched_events);
+ jump_label_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
@@ -2957,7 +2759,7 @@ static void free_event(struct perf_event *event)
put_callchain_buffers();
if (is_cgroup_event(event)) {
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
- jump_label_dec(&perf_sched_events);
+ jump_label_dec_deferred(&perf_sched_events);
}
}
@@ -4820,7 +4622,6 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
struct hw_perf_event *hwc = &event->hw;
int throttle = 0;
- data->period = event->hw.last_period;
if (!overflow)
overflow = perf_swevent_set_period(event);
@@ -4854,6 +4655,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
if (!is_sampling_event(event))
return;
+ if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
+ data->period = nr;
+ return perf_swevent_overflow(event, 1, data, regs);
+ } else
+ data->period = event->hw.last_period;
+
if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
return perf_swevent_overflow(event, 1, data, regs);
@@ -5981,7 +5788,7 @@ done:
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
- jump_label_inc(&perf_sched_events);
+ jump_label_inc(&perf_sched_events.key);
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
@@ -6219,7 +6026,7 @@ SYSCALL_DEFINE5(perf_event_open,
* - that may need work on context switch
*/
atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
- jump_label_inc(&perf_sched_events);
+ jump_label_inc(&perf_sched_events.key);
}
/*
@@ -7065,6 +6872,9 @@ void __init perf_event_init(void)
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
+
+ /* do not patch jump label more than once per second */
+ jump_label_rate_limit(&perf_sched_events, HZ);
}
static int __init perf_event_sysfs_init(void)
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 64568a69937..b0b107f90af 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -1,6 +1,10 @@
#ifndef _KERNEL_EVENTS_INTERNAL_H
#define _KERNEL_EVENTS_INTERNAL_H
+#include <linux/hardirq.h>
+
+/* Buffer handling */
+
#define RING_BUFFER_WRITABLE 0x01
struct ring_buffer {
@@ -67,7 +71,7 @@ static inline int page_order(struct ring_buffer *rb)
}
#endif
-static unsigned long perf_data_size(struct ring_buffer *rb)
+static inline unsigned long perf_data_size(struct ring_buffer *rb)
{
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
}
@@ -96,4 +100,37 @@ __output_copy(struct perf_output_handle *handle,
} while (len);
}
+/* Callchain handling */
+extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+extern int get_callchain_buffers(void);
+extern void put_callchain_buffers(void);
+
+static inline int get_recursion_context(int *recursion)
+{
+ int rctx;
+
+ if (in_nmi())
+ rctx = 3;
+ else if (in_irq())
+ rctx = 2;
+ else if (in_softirq())
+ rctx = 1;
+ else
+ rctx = 0;
+
+ if (recursion[rctx])
+ return -1;
+
+ recursion[rctx]++;
+ barrier();
+
+ return rctx;
+}
+
+static inline void put_recursion_context(int *recursion, int rctx)
+{
+ barrier();
+ recursion[rctx]--;
+}
+
#endif /* _KERNEL_EVENTS_INTERNAL_H */
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 2af9027106a..01d3b70fc98 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -73,16 +73,47 @@ void jump_label_inc(struct jump_label_key *key)
}
EXPORT_SYMBOL_GPL(jump_label_inc);
-void jump_label_dec(struct jump_label_key *key)
+static void __jump_label_dec(struct jump_label_key *key,
+ unsigned long rate_limit, struct delayed_work *work)
{
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
return;
- jump_label_update(key, JUMP_LABEL_DISABLE);
+ if (rate_limit) {
+ atomic_inc(&key->enabled);
+ schedule_delayed_work(work, rate_limit);
+ } else
+ jump_label_update(key, JUMP_LABEL_DISABLE);
+
jump_label_unlock();
}
EXPORT_SYMBOL_GPL(jump_label_dec);
+static void jump_label_update_timeout(struct work_struct *work)
+{
+ struct jump_label_key_deferred *key =
+ container_of(work, struct jump_label_key_deferred, work.work);
+ __jump_label_dec(&key->key, 0, NULL);
+}
+
+void jump_label_dec(struct jump_label_key *key)
+{
+ __jump_label_dec(key, 0, NULL);
+}
+
+void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+{
+ __jump_label_dec(&key->key, key->timeout, &key->work);
+}
+
+
+void jump_label_rate_limit(struct jump_label_key_deferred *key,
+ unsigned long rl)
+{
+ key->timeout = rl;
+ INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
+}
+
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
if (entry->code <= (unsigned long)end &&
@@ -113,7 +144,7 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start,
* running code can override this to make the non-live update case
* cheaper.
*/
-void __weak arch_jump_label_transform_static(struct jump_entry *entry,
+void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
arch_jump_label_transform(entry, type);
@@ -219,8 +250,13 @@ void jump_label_apply_nops(struct module *mod)
if (iter_start == iter_stop)
return;
- for (iter = iter_start; iter < iter_stop; iter++)
- arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
+ for (iter = iter_start; iter < iter_stop; iter++) {
+ struct jump_label_key *iterk;
+
+ iterk = (struct jump_label_key *)(unsigned long)iter->key;
+ arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
+ JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
+ }
}
static int jump_label_add_module(struct module *mod)
@@ -260,8 +296,7 @@ static int jump_label_add_module(struct module *mod)
key->next = jlm;
if (jump_label_enabled(key))
- __jump_label_update(key, iter, iter_stop,
- JUMP_LABEL_ENABLE);
+ __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
}
return 0;
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b2e08c932d9..24f176c9fc9 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -499,36 +499,32 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
usage[i] = '\0';
}
-static int __print_lock_name(struct lock_class *class)
+static void __print_lock_name(struct lock_class *class)
{
char str[KSYM_NAME_LEN];
const char *name;
name = class->name;
- if (!name)
- name = __get_key_name(class->key, str);
-
- return printk("%s", name);
-}
-
-static void print_lock_name(struct lock_class *class)
-{
- char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
- const char *name;
-
- get_usage_chars(class, usage);
-
- name = class->name;
if (!name) {
name = __get_key_name(class->key, str);
- printk(" (%s", name);
+ printk("%s", name);
} else {
- printk(" (%s", name);
+ printk("%s", name);
if (class->name_version > 1)
printk("#%d", class->name_version);
if (class->subclass)
printk("/%d", class->subclass);
}
+}
+
+static void print_lock_name(struct lock_class *class)
+{
+ char usage[LOCK_USAGE_CHARS];
+
+ get_usage_chars(class, usage);
+
+ printk(" (");
+ __print_lock_name(class);
printk("){%s}", usage);
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f2bd275bb60..7392070ffc3 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -338,7 +338,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
/* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
- TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
+ TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
+ TRACE_ITER_IRQ_INFO;
static int trace_stop_count;
static DEFINE_RAW_SPINLOCK(tracing_start_lock);
@@ -426,6 +427,7 @@ static const char *trace_options[] = {
"record-cmd",
"overwrite",
"disable_on_free",
+ "irq-info",
NULL
};
@@ -1843,6 +1845,33 @@ static void s_stop(struct seq_file *m, void *p)
trace_event_read_unlock();
}
+static void
+get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
+{
+ unsigned long count;
+ int cpu;
+
+ *total = 0;
+ *entries = 0;
+
+ for_each_tracing_cpu(cpu) {
+ count = ring_buffer_entries_cpu(tr->buffer, cpu);
+ /*
+ * If this buffer has skipped entries, then we hold all
+ * entries for the trace and we need to ignore the
+ * ones before the time stamp.
+ */
+ if (tr->data[cpu]->skipped_entries) {
+ count -= tr->data[cpu]->skipped_entries;
+ /* total is the same as the entries */
+ *total += count;
+ } else
+ *total += count +
+ ring_buffer_overrun_cpu(tr->buffer, cpu);
+ *entries += count;
+ }
+}
+
static void print_lat_help_header(struct seq_file *m)
{
seq_puts(m, "# _------=> CPU# \n");
@@ -1855,12 +1884,35 @@ static void print_lat_help_header(struct seq_file *m)
seq_puts(m, "# \\ / ||||| \\ | / \n");
}
-static void print_func_help_header(struct seq_file *m)
+static void print_event_info(struct trace_array *tr, struct seq_file *m)
+{
+ unsigned long total;
+ unsigned long entries;
+
+ get_total_entries(tr, &total, &entries);
+ seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
+ entries, total, num_online_cpus());
+ seq_puts(m, "#\n");
+}
+
+static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
{
- seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
+ print_event_info(tr, m);
+ seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
seq_puts(m, "# | | | | |\n");
}
+static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
+{
+ print_event_info(tr, m);
+ seq_puts(m, "# _-----=> irqs-off\n");
+ seq_puts(m, "# / _----=> need-resched\n");
+ seq_puts(m, "# | / _---=> hardirq/softirq\n");
+ seq_puts(m, "# || / _--=> preempt-depth\n");
+ seq_puts(m, "# ||| / delay\n");
+ seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
+ seq_puts(m, "# | | | |||| | |\n");
+}
void
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
@@ -1869,32 +1921,14 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
struct trace_array *tr = iter->tr;
struct trace_array_cpu *data = tr->data[tr->cpu];
struct tracer *type = current_trace;
- unsigned long entries = 0;
- unsigned long total = 0;
- unsigned long count;
+ unsigned long entries;
+ unsigned long total;
const char *name = "preemption";
- int cpu;
if (type)
name = type->name;
-
- for_each_tracing_cpu(cpu) {
- count = ring_buffer_entries_cpu(tr->buffer, cpu);
- /*
- * If this buffer has skipped entries, then we hold all
- * entries for the trace and we need to ignore the
- * ones before the time stamp.
- */
- if (tr->data[cpu]->skipped_entries) {
- count -= tr->data[cpu]->skipped_entries;
- /* total is the same as the entries */
- total += count;
- } else
- total += count +
- ring_buffer_overrun_cpu(tr->buffer, cpu);
- entries += count;
- }
+ get_total_entries(tr, &total, &entries);
seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
name, UTS_RELEASE);
@@ -2140,6 +2174,21 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
return print_trace_fmt(iter);
}
+void trace_latency_header(struct seq_file *m)
+{
+ struct trace_iterator *iter = m->private;
+
+ /* print nothing if the buffers are empty */
+ if (trace_empty(iter))
+ return;
+
+ if (iter->iter_flags & TRACE_FILE_LAT_FMT)
+ print_trace_header(m, iter);
+
+ if (!(trace_flags & TRACE_ITER_VERBOSE))
+ print_lat_help_header(m);
+}
+
void trace_default_header(struct seq_file *m)
{
struct trace_iterator *iter = m->private;
@@ -2155,8 +2204,12 @@ void trace_default_header(struct seq_file *m)
if (!(trace_flags & TRACE_ITER_VERBOSE))
print_lat_help_header(m);
} else {
- if (!(trace_flags & TRACE_ITER_VERBOSE))
- print_func_help_header(m);
+ if (!(trace_flags & TRACE_ITER_VERBOSE)) {
+ if (trace_flags & TRACE_ITER_IRQ_INFO)
+ print_func_help_header_irq(iter->tr, m);
+ else
+ print_func_help_header(iter->tr, m);
+ }
}
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 092e1f8d18d..2c2657462ac 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -370,6 +370,7 @@ void trace_graph_function(struct trace_array *tr,
unsigned long ip,
unsigned long parent_ip,
unsigned long flags, int pc);
+void trace_latency_header(struct seq_file *m);
void trace_default_header(struct seq_file *m);
void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
int trace_empty(struct trace_iterator *iter);
@@ -654,6 +655,7 @@ enum trace_iterator_flags {
TRACE_ITER_RECORD_CMD = 0x100000,
TRACE_ITER_OVERWRITE = 0x200000,
TRACE_ITER_STOP_ON_FREE = 0x400000,
+ TRACE_ITER_IRQ_INFO = 0x800000,
};
/*
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 95dc31efd6d..f04cc3136bd 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -27,6 +27,12 @@
#include "trace.h"
#include "trace_output.h"
+#define DEFAULT_SYS_FILTER_MESSAGE \
+ "### global filter ###\n" \
+ "# Use this to set filters for multiple events.\n" \
+ "# Only events with the given fields will be affected.\n" \
+ "# If no events are modified, an error message will be displayed here"
+
enum filter_op_ids
{
OP_OR,
@@ -646,7 +652,7 @@ void print_subsystem_event_filter(struct event_subsystem *system,
if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
- trace_seq_printf(s, "none\n");
+ trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
mutex_unlock(&event_mutex);
}
@@ -1838,7 +1844,10 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
if (!filter)
goto out;
- replace_filter_string(filter, filter_string);
+ /* System filters just show a default message */
+ kfree(filter->filter_string);
+ filter->filter_string = NULL;
+
/*
* No event actually uses the system filter
* we can free it without synchronize_sched().
@@ -1848,14 +1857,12 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
parse_init(ps, filter_ops, filter_string);
err = filter_parse(ps);
- if (err) {
- append_filter_err(ps, system->filter);
- goto out;
- }
+ if (err)
+ goto err_filter;
err = replace_system_preds(system, ps, filter_string);
if (err)
- append_filter_err(ps, system->filter);
+ goto err_filter;
out:
filter_opstack_clear(ps);
@@ -1865,6 +1872,11 @@ out_unlock:
mutex_unlock(&event_mutex);
return err;
+
+err_filter:
+ replace_filter_string(filter, filter_string);
+ append_filter_err(ps, system->filter);
+ goto out;
}
#ifdef CONFIG_PERF_EVENTS
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 20dad0d7a16..99d20e92036 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -280,9 +280,20 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
}
static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
-static void irqsoff_print_header(struct seq_file *s) { }
static void irqsoff_trace_open(struct trace_iterator *iter) { }
static void irqsoff_trace_close(struct trace_iterator *iter) { }
+
+#ifdef CONFIG_FUNCTION_TRACER
+static void irqsoff_print_header(struct seq_file *s)
+{
+ trace_default_header(s);
+}
+#else
+static void irqsoff_print_header(struct seq_file *s)
+{
+ trace_latency_header(s);
+}
+#endif /* CONFIG_FUNCTION_TRACER */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 51999309a6c..0d6ff355594 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -627,11 +627,23 @@ int trace_print_context(struct trace_iterator *iter)
unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned long secs = (unsigned long)t;
char comm[TASK_COMM_LEN];
+ int ret;
trace_find_cmdline(entry->pid, comm);
- return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
- comm, entry->pid, iter->cpu, secs, usec_rem);
+ ret = trace_seq_printf(s, "%16s-%-5d [%03d] ",
+ comm, entry->pid, iter->cpu);
+ if (!ret)
+ return 0;
+
+ if (trace_flags & TRACE_ITER_IRQ_INFO) {
+ ret = trace_print_lat_fmt(s, entry);
+ if (!ret)
+ return 0;
+ }
+
+ return trace_seq_printf(s, " %5lu.%06lu: ",
+ secs, usec_rem);
}
int trace_print_lat_context(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e4a70c0c71b..ff791ea48b5 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -280,9 +280,20 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
}
static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
-static void wakeup_print_header(struct seq_file *s) { }
static void wakeup_trace_open(struct trace_iterator *iter) { }
static void wakeup_trace_close(struct trace_iterator *iter) { }
+
+#ifdef CONFIG_FUNCTION_TRACER
+static void wakeup_print_header(struct seq_file *s)
+{
+ trace_default_header(s);
+}
+#else
+static void wakeup_print_header(struct seq_file *s)
+{
+ trace_latency_header(s);
+}
+#endif /* CONFIG_FUNCTION_TRACER */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*