summaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c19
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_boot.c12
-rw-r--r--kernel/trace/trace_functions.c14
-rw-r--r--kernel/trace/trace_hw_branches.c14
-rw-r--r--kernel/trace/trace_mmiotrace.c6
-rw-r--r--kernel/trace/trace_sched_switch.c16
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_sysprof.c12
10 files changed, 39 insertions, 67 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index bb6922a931b1..76f34c0ef29c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -838,6 +838,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
* back to us). This allows us to do a simple loop to
* assign the commit to the tail.
*/
+ again:
while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
cpu_buffer->commit_page->page->commit =
cpu_buffer->commit_page->write;
@@ -853,6 +854,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->commit_page->write;
barrier();
}
+
+ /* again, keep gcc from optimizing */
+ barrier();
+
+ /*
+ * If an interrupt came in just after the first while loop
+ * and pushed the tail page forward, we will be left with
+ * a dangling commit that will never go forward.
+ */
+ if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
+ goto again;
}
static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
@@ -950,12 +962,15 @@ static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
unsigned type, unsigned long length, u64 *ts)
{
- struct buffer_page *tail_page, *head_page, *reader_page;
+ struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
unsigned long tail, write;
struct ring_buffer *buffer = cpu_buffer->buffer;
struct ring_buffer_event *event;
unsigned long flags;
+ commit_page = cpu_buffer->commit_page;
+ /* we just need to protect against interrupts */
+ barrier();
tail_page = cpu_buffer->tail_page;
write = local_add_return(length, &tail_page->write);
tail = write - length;
@@ -981,7 +996,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
* it all the way around the buffer, bail, and warn
* about it.
*/
- if (unlikely(next_page == cpu_buffer->commit_page)) {
+ if (unlikely(next_page == commit_page)) {
WARN_ON_ONCE(1);
goto out_unlock;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0eb6d48347f7..79db26e8216e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -679,6 +679,16 @@ void tracing_reset(struct trace_array *tr, int cpu)
ftrace_enable_cpu();
}
+void tracing_reset_online_cpus(struct trace_array *tr)
+{
+ int cpu;
+
+ tr->time_start = ftrace_now(tr->cpu);
+
+ for_each_online_cpu(cpu)
+ tracing_reset(tr, cpu);
+}
+
#define SAVED_CMDLINES 128
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index fc75dce7a664..cc7a4f864036 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -374,6 +374,7 @@ struct trace_iterator {
int tracing_is_enabled(void);
void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu);
+void tracing_reset_online_cpus(struct trace_array *tr);
int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *tracing_init_dentry(void);
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index a4fa2c57e34e..3ccebde28482 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -37,16 +37,6 @@ void disable_boot_trace(void)
tracing_stop_sched_switch_record();
}
-static void reset_boot_trace(struct trace_array *tr)
-{
- int cpu;
-
- tr->time_start = ftrace_now(tr->cpu);
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-}
-
static int boot_trace_init(struct trace_array *tr)
{
int cpu;
@@ -130,7 +120,7 @@ struct tracer boot_tracer __read_mostly =
{
.name = "initcall",
.init = boot_trace_init,
- .reset = reset_boot_trace,
+ .reset = tracing_reset_online_cpus,
.print_line = initcall_print_line,
};
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index e74f6d0a3216..9236d7e25a16 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -16,20 +16,10 @@
#include "trace.h"
-static void function_reset(struct trace_array *tr)
-{
- int cpu;
-
- tr->time_start = ftrace_now(tr->cpu);
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-}
-
static void start_function_trace(struct trace_array *tr)
{
tr->cpu = get_cpu();
- function_reset(tr);
+ tracing_reset_online_cpus(tr);
put_cpu();
tracing_start_cmdline_record();
@@ -55,7 +45,7 @@ static void function_trace_reset(struct trace_array *tr)
static void function_trace_start(struct trace_array *tr)
{
- function_reset(tr);
+ tracing_reset_online_cpus(tr);
}
static struct tracer function_trace __read_mostly =
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index ee29e012aa97..b6a3e20a49a9 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -25,16 +25,6 @@ static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
#define this_buffer per_cpu(buffer, smp_processor_id())
-static void bts_trace_reset(struct trace_array *tr)
-{
- int cpu;
-
- tr->time_start = ftrace_now(tr->cpu);
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-}
-
static void bts_trace_start_cpu(void *arg)
{
if (this_tracer)
@@ -54,7 +44,7 @@ static void bts_trace_start(struct trace_array *tr)
{
int cpu;
- bts_trace_reset(tr);
+ tracing_reset_online_cpus(tr);
for_each_cpu_mask(cpu, cpu_possible_map)
smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
@@ -78,7 +68,7 @@ static void bts_trace_stop(struct trace_array *tr)
static int bts_trace_init(struct trace_array *tr)
{
- bts_trace_reset(tr);
+ tracing_reset_online_cpus(tr);
bts_trace_start(tr);
return 0;
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 2fb6da6523b3..fffcb069f1dc 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -22,14 +22,10 @@ static unsigned long prev_overruns;
static void mmio_reset_data(struct trace_array *tr)
{
- int cpu;
-
overrun_detected = false;
prev_overruns = 0;
- tr->time_start = ftrace_now(tr->cpu);
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
+ tracing_reset_online_cpus(tr);
}
static int mmio_trace_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 781d72ef873c..df175cb4564f 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -49,7 +49,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
}
static void
-probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
+probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
{
struct trace_array_cpu *data;
unsigned long flags;
@@ -72,16 +72,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
local_irq_restore(flags);
}
-static void sched_switch_reset(struct trace_array *tr)
-{
- int cpu;
-
- tr->time_start = ftrace_now(tr->cpu);
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-}
-
static int tracing_sched_register(void)
{
int ret;
@@ -197,7 +187,7 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
static void start_sched_trace(struct trace_array *tr)
{
- sched_switch_reset(tr);
+ tracing_reset_online_cpus(tr);
tracing_start_sched_switch_record();
}
@@ -221,7 +211,7 @@ static void sched_switch_trace_reset(struct trace_array *tr)
static void sched_switch_trace_start(struct trace_array *tr)
{
- sched_switch_reset(tr);
+ tracing_reset_online_cpus(tr);
tracing_start_sched_switch();
}
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 0067b49746c1..43586b689e31 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -211,7 +211,7 @@ static void wakeup_reset(struct trace_array *tr)
}
static void
-probe_wakeup(struct rq *rq, struct task_struct *p)
+probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
int cpu = smp_processor_id();
unsigned long flags;
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 54960edb96d0..01becf1f19ff 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -234,20 +234,10 @@ static void stop_stack_timers(void)
stop_stack_timer(cpu);
}
-static void stack_reset(struct trace_array *tr)
-{
- int cpu;
-
- tr->time_start = ftrace_now(tr->cpu);
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-}
-
static void start_stack_trace(struct trace_array *tr)
{
mutex_lock(&sample_timer_lock);
- stack_reset(tr);
+ tracing_reset_online_cpus(tr);
start_stack_timers();
tracer_enabled = 1;
mutex_unlock(&sample_timer_lock);