From 1c5eb4481e0151d579f738175497f998840f7bbc Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 9 Jan 2020 18:53:48 -0500 Subject: tracing: Rename trace_buffer to array_buffer As we are working to remove the generic "ring_buffer" name that is used by both tracing and perf, the ring_buffer name for tracing will be renamed to trace_buffer, and perf's ring buffer will be renamed to perf_buffer. As there already exists a trace_buffer that is used by the trace_arrays, it needs to be first renamed to array_buffer. Link: https://lore.kernel.org/r/20191213153553.GE20583@krava Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/blktrace.c | 4 +- kernel/trace/ftrace.c | 8 +- kernel/trace/trace.c | 230 +++++++++++++++++------------------ kernel/trace/trace.h | 16 +-- kernel/trace/trace_branch.c | 4 +- kernel/trace/trace_events.c | 18 +-- kernel/trace/trace_events_hist.c | 2 +- kernel/trace/trace_functions.c | 8 +- kernel/trace/trace_functions_graph.c | 14 +-- kernel/trace/trace_hwlat.c | 2 +- kernel/trace/trace_irqsoff.c | 8 +- kernel/trace/trace_kdb.c | 8 +- kernel/trace/trace_mmiotrace.c | 12 +- kernel/trace/trace_output.c | 2 +- kernel/trace/trace_sched_wakeup.c | 20 +-- kernel/trace/trace_selftest.c | 26 ++-- kernel/trace/trace_syscalls.c | 4 +- 17 files changed, 193 insertions(+), 193 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 475e29498bca..3b926f62ed83 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -75,7 +75,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, ssize_t cgid_len = cgid ? sizeof(cgid) : 0; if (blk_tracer) { - buffer = blk_tr->trace_buffer.buffer; + buffer = blk_tr->array_buffer.buffer; pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_BLK, sizeof(*t) + len + cgid_len, @@ -248,7 +248,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, if (blk_tracer) { tracing_record_cmdline(current); - buffer = blk_tr->trace_buffer.buffer; + buffer = blk_tr->array_buffer.buffer; pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_BLK, sizeof(*t) + pdu_len + cgid_len, diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9bf1f2cd515e..3f0ae07e72ef 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -146,7 +146,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, { struct trace_array *tr = op->private; - if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid)) + if (tr && this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid)) return; op->saved_func(ip, parent_ip, op, regs); @@ -6922,7 +6922,7 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, pid_list = rcu_dereference_sched(tr->function_pids); - this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, + this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, trace_ignore_this_task(pid_list, next)); } @@ -6976,7 +6976,7 @@ static void clear_ftrace_pids(struct trace_array *tr) unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); for_each_possible_cpu(cpu) - per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false; + per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = false; rcu_assign_pointer(tr->function_pids, NULL); @@ -7100,7 +7100,7 @@ static void ignore_task_cpu(void *data) pid_list = rcu_dereference_protected(tr->function_pids, mutex_is_locked(&ftrace_lock)); - this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, + this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, trace_ignore_this_task(pid_list, current)); } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ddb7e7f5fe8d..67084b7945ff 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -603,7 +603,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids, return read; } -static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) +static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu) { u64 ts; @@ -619,7 +619,7 @@ static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) u64 ftrace_now(int cpu) { - return buffer_ftrace_now(&global_trace.trace_buffer, cpu); + return buffer_ftrace_now(&global_trace.array_buffer, cpu); } /** @@ -796,8 +796,8 @@ __trace_buffer_lock_reserve(struct ring_buffer *buffer, void tracer_tracing_on(struct trace_array *tr) { - if (tr->trace_buffer.buffer) - ring_buffer_record_on(tr->trace_buffer.buffer); + if (tr->array_buffer.buffer) + ring_buffer_record_on(tr->array_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to @@ -865,7 +865,7 @@ int __trace_puts(unsigned long ip, const char *str, int size) alloc = sizeof(*entry) + size + 2; /* possible \n added */ local_save_flags(irq_flags); - buffer = global_trace.trace_buffer.buffer; + buffer = global_trace.array_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, irq_flags, pc); if (!event) @@ -913,7 +913,7 @@ int __trace_bputs(unsigned long ip, const char *str) return 0; local_save_flags(irq_flags); - buffer = global_trace.trace_buffer.buffer; + buffer = global_trace.array_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, irq_flags, pc); if (!event) @@ -1036,9 +1036,9 @@ void *tracing_cond_snapshot_data(struct trace_array *tr) } EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); -static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, - struct trace_buffer *size_buf, int cpu_id); -static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); +static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, + struct array_buffer *size_buf, int cpu_id); +static void set_buffer_entries(struct array_buffer *buf, unsigned long val); int tracing_alloc_snapshot_instance(struct trace_array *tr) { @@ -1048,7 +1048,7 @@ int tracing_alloc_snapshot_instance(struct trace_array *tr) /* allocate spare buffer */ ret = resize_buffer_duplicate_size(&tr->max_buffer, - &tr->trace_buffer, RING_BUFFER_ALL_CPUS); + &tr->array_buffer, RING_BUFFER_ALL_CPUS); if (ret < 0) return ret; @@ -1251,8 +1251,8 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); void tracer_tracing_off(struct trace_array *tr) { - if (tr->trace_buffer.buffer) - ring_buffer_record_off(tr->trace_buffer.buffer); + if (tr->array_buffer.buffer) + ring_buffer_record_off(tr->array_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to @@ -1294,8 +1294,8 @@ void disable_trace_on_warning(void) */ bool tracer_tracing_is_on(struct trace_array *tr) { - if (tr->trace_buffer.buffer) - return ring_buffer_record_is_on(tr->trace_buffer.buffer); + if (tr->array_buffer.buffer) + return ring_buffer_record_is_on(tr->array_buffer.buffer); return !tr->buffer_disabled; } @@ -1590,8 +1590,8 @@ void latency_fsnotify(struct trace_array *tr) static void __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { - struct trace_buffer *trace_buf = &tr->trace_buffer; - struct trace_buffer *max_buf = &tr->max_buffer; + struct array_buffer *trace_buf = &tr->array_buffer; + struct array_buffer *max_buf = &tr->max_buffer; struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); @@ -1649,8 +1649,8 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, arch_spin_lock(&tr->max_lock); - /* Inherit the recordable setting from trace_buffer */ - if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) + /* Inherit the recordable setting from array_buffer */ + if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) ring_buffer_record_on(tr->max_buffer.buffer); else ring_buffer_record_off(tr->max_buffer.buffer); @@ -1659,7 +1659,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) goto out_unlock; #endif - swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); + swap(tr->array_buffer.buffer, tr->max_buffer.buffer); __update_max_tr(tr, tsk, cpu); @@ -1692,7 +1692,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) arch_spin_lock(&tr->max_lock); - ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); + ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); if (ret == -EBUSY) { /* @@ -1718,7 +1718,7 @@ static int wait_on_pipe(struct trace_iterator *iter, int full) if (trace_buffer_iter(iter, iter->cpu_file)) return 0; - return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, + return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full); } @@ -1769,7 +1769,7 @@ static int run_tracer_selftest(struct tracer *type) * internal tracing to verify that everything is in order. * If we fail, we do not register this tracer. */ - tracing_reset_online_cpus(&tr->trace_buffer); + tracing_reset_online_cpus(&tr->array_buffer); tr->current_trace = type; @@ -1795,7 +1795,7 @@ static int run_tracer_selftest(struct tracer *type) return -1; } /* Only reset on passing, to avoid touching corrupted buffers */ - tracing_reset_online_cpus(&tr->trace_buffer); + tracing_reset_online_cpus(&tr->array_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { @@ -1962,7 +1962,7 @@ int __init register_tracer(struct tracer *type) return ret; } -static void tracing_reset_cpu(struct trace_buffer *buf, int cpu) +static void tracing_reset_cpu(struct array_buffer *buf, int cpu) { struct ring_buffer *buffer = buf->buffer; @@ -1978,7 +1978,7 @@ static void tracing_reset_cpu(struct trace_buffer *buf, int cpu) ring_buffer_record_enable(buffer); } -void tracing_reset_online_cpus(struct trace_buffer *buf) +void tracing_reset_online_cpus(struct array_buffer *buf) { struct ring_buffer *buffer = buf->buffer; int cpu; @@ -2008,7 +2008,7 @@ void tracing_reset_all_online_cpus(void) if (!tr->clear_trace) continue; tr->clear_trace = false; - tracing_reset_online_cpus(&tr->trace_buffer); + tracing_reset_online_cpus(&tr->array_buffer); #ifdef CONFIG_TRACER_MAX_TRACE tracing_reset_online_cpus(&tr->max_buffer); #endif @@ -2117,7 +2117,7 @@ void tracing_start(void) /* Prevent the buffers from switching */ arch_spin_lock(&global_trace.max_lock); - buffer = global_trace.trace_buffer.buffer; + buffer = global_trace.array_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); @@ -2156,7 +2156,7 @@ static void tracing_start_tr(struct trace_array *tr) goto out; } - buffer = tr->trace_buffer.buffer; + buffer = tr->array_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); @@ -2182,7 +2182,7 @@ void tracing_stop(void) /* Prevent the buffers from switching */ arch_spin_lock(&global_trace.max_lock); - buffer = global_trace.trace_buffer.buffer; + buffer = global_trace.array_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); @@ -2211,7 +2211,7 @@ static void tracing_stop_tr(struct trace_array *tr) if (tr->stop_count++) goto out; - buffer = tr->trace_buffer.buffer; + buffer = tr->array_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); @@ -2572,7 +2572,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, struct ring_buffer_event *entry; int val; - *current_rb = trace_file->tr->trace_buffer.buffer; + *current_rb = trace_file->tr->array_buffer.buffer; if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && @@ -2845,7 +2845,7 @@ trace_function(struct trace_array *tr, int pc) { struct trace_event_call *call = &event_function; - struct ring_buffer *buffer = tr->trace_buffer.buffer; + struct ring_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct ftrace_entry *entry; @@ -2971,7 +2971,7 @@ static inline void ftrace_trace_stack(struct trace_array *tr, void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc) { - struct ring_buffer *buffer = tr->trace_buffer.buffer; + struct ring_buffer *buffer = tr->array_buffer.buffer; if (rcu_is_watching()) { __ftrace_trace_stack(buffer, flags, skip, pc, NULL); @@ -3009,7 +3009,7 @@ void trace_dump_stack(int skip) /* Skip 1 to skip this function. */ skip++; #endif - __ftrace_trace_stack(global_trace.trace_buffer.buffer, + __ftrace_trace_stack(global_trace.array_buffer.buffer, flags, skip, preempt_count(), NULL); } EXPORT_SYMBOL_GPL(trace_dump_stack); @@ -3154,7 +3154,7 @@ void trace_printk_init_buffers(void) * directly here. If the global_trace.buffer is already * allocated here, then this was called by module code. */ - if (global_trace.trace_buffer.buffer) + if (global_trace.array_buffer.buffer) tracing_start_cmdline_record(); } EXPORT_SYMBOL_GPL(trace_printk_init_buffers); @@ -3217,7 +3217,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) local_save_flags(flags); size = sizeof(*entry) + sizeof(u32) * len; - buffer = tr->trace_buffer.buffer; + buffer = tr->array_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, flags, pc); if (!event) @@ -3302,7 +3302,7 @@ __printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { - return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); + return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); } __printf(3, 0) @@ -3367,7 +3367,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, if (buf_iter) event = ring_buffer_iter_peek(buf_iter, ts); else - event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, + event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, lost_events); if (event) { @@ -3382,7 +3382,7 @@ static struct trace_entry * __find_next_entry(struct trace_iterator *iter, int *ent_cpu, unsigned long *missing_events, u64 *ent_ts) { - struct ring_buffer *buffer = iter->trace_buffer->buffer; + struct ring_buffer *buffer = iter->array_buffer->buffer; struct trace_entry *ent, *next = NULL; unsigned long lost_events = 0, next_lost = 0; int cpu_file = iter->cpu_file; @@ -3459,7 +3459,7 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter) static void trace_consume(struct trace_iterator *iter) { - ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, + ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts, &iter->lost_events); } @@ -3497,7 +3497,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) unsigned long entries = 0; u64 ts; - per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; + per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0; buf_iter = trace_buffer_iter(iter, cpu); if (!buf_iter) @@ -3511,13 +3511,13 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) * by the timestamp being before the start of the buffer. */ while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { - if (ts >= iter->trace_buffer->time_start) + if (ts >= iter->array_buffer->time_start) break; entries++; ring_buffer_read(buf_iter, NULL); } - per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; + per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; } /* @@ -3602,7 +3602,7 @@ static void s_stop(struct seq_file *m, void *p) } static void -get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total, +get_total_entries_cpu(struct array_buffer *buf, unsigned long *total, unsigned long *entries, int cpu) { unsigned long count; @@ -3624,7 +3624,7 @@ get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total, } static void -get_total_entries(struct trace_buffer *buf, +get_total_entries(struct array_buffer *buf, unsigned long *total, unsigned long *entries) { unsigned long t, e; @@ -3647,7 +3647,7 @@ unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) if (!tr) tr = &global_trace; - get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu); + get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); return entries; } @@ -3659,7 +3659,7 @@ unsigned long trace_total_entries(struct trace_array *tr) if (!tr) tr = &global_trace; - get_total_entries(&tr->trace_buffer, &total, &entries); + get_total_entries(&tr->array_buffer, &total, &entries); return entries; } @@ -3676,7 +3676,7 @@ static void print_lat_help_header(struct seq_file *m) "# \\ / ||||| \\ | / \n"); } -static void print_event_info(struct trace_buffer *buf, struct seq_file *m) +static void print_event_info(struct array_buffer *buf, struct seq_file *m) { unsigned long total; unsigned long entries; @@ -3687,7 +3687,7 @@ static void print_event_info(struct trace_buffer *buf, struct seq_file *m) seq_puts(m, "#\n"); } -static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, +static void print_func_help_header(struct array_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; @@ -3698,7 +3698,7 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } -static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, +static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; @@ -3720,7 +3720,7 @@ void print_trace_header(struct seq_file *m, struct trace_iterator *iter) { unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); - struct trace_buffer *buf = iter->trace_buffer; + struct array_buffer *buf = iter->array_buffer; struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); struct tracer *type = iter->trace; unsigned long entries; @@ -3795,7 +3795,7 @@ static void test_cpu_buff_start(struct trace_iterator *iter) cpumask_test_cpu(iter->cpu, iter->started)) return; - if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) + if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries) return; if (cpumask_available(iter->started)) @@ -3929,7 +3929,7 @@ int trace_empty(struct trace_iterator *iter) if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { - if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) + if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) return 0; } return 1; @@ -3941,7 +3941,7 @@ int trace_empty(struct trace_iterator *iter) if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { - if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) + if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) return 0; } } @@ -4031,10 +4031,10 @@ void trace_default_header(struct seq_file *m) } else { if (!(trace_flags & TRACE_ITER_VERBOSE)) { if (trace_flags & TRACE_ITER_IRQ_INFO) - print_func_help_header_irq(iter->trace_buffer, + print_func_help_header_irq(iter->array_buffer, m, trace_flags); else - print_func_help_header(iter->trace_buffer, m, + print_func_help_header(iter->array_buffer, m, trace_flags); } } @@ -4192,10 +4192,10 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) #ifdef CONFIG_TRACER_MAX_TRACE /* Currently only the top directory has a snapshot */ if (tr->current_trace->print_max || snapshot) - iter->trace_buffer = &tr->max_buffer; + iter->array_buffer = &tr->max_buffer; else #endif - iter->trace_buffer = &tr->trace_buffer; + iter->array_buffer = &tr->array_buffer; iter->snapshot = snapshot; iter->pos = -1; iter->cpu_file = tracing_get_cpu(inode); @@ -4206,7 +4206,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ - if (ring_buffer_overruns(iter->trace_buffer->buffer)) + if (ring_buffer_overruns(iter->array_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ @@ -4220,7 +4220,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { iter->buffer_iter[cpu] = - ring_buffer_read_prepare(iter->trace_buffer->buffer, + ring_buffer_read_prepare(iter->array_buffer->buffer, cpu, GFP_KERNEL); } ring_buffer_read_prepare_sync(); @@ -4231,7 +4231,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) } else { cpu = iter->cpu_file; iter->buffer_iter[cpu] = - ring_buffer_read_prepare(iter->trace_buffer->buffer, + ring_buffer_read_prepare(iter->array_buffer->buffer, cpu, GFP_KERNEL); ring_buffer_read_prepare_sync(); ring_buffer_read_start(iter->buffer_iter[cpu]); @@ -4357,7 +4357,7 @@ static int tracing_open(struct inode *inode, struct file *file) /* If this file was open for write, then erase contents */ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { int cpu = tracing_get_cpu(inode); - struct trace_buffer *trace_buf = &tr->trace_buffer; + struct array_buffer *trace_buf = &tr->array_buffer; #ifdef CONFIG_TRACER_MAX_TRACE if (tr->current_trace->print_max) @@ -4578,13 +4578,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, */ if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && !cpumask_test_cpu(cpu, tracing_cpumask_new)) { - atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); - ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); + atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); + ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { - atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); - ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); + atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); + ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); } } arch_spin_unlock(&tr->max_lock); @@ -4726,7 +4726,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) ftrace_pid_follow_fork(tr, enabled); if (mask == TRACE_ITER_OVERWRITE) { - ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); + ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); #ifdef CONFIG_TRACER_MAX_TRACE ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); #endif @@ -5534,11 +5534,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, int tracer_init(struct tracer *t, struct trace_array *tr) { - tracing_reset_online_cpus(&tr->trace_buffer); + tracing_reset_online_cpus(&tr->array_buffer); return t->init(tr); } -static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) +static void set_buffer_entries(struct array_buffer *buf, unsigned long val) { int cpu; @@ -5548,8 +5548,8 @@ static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) #ifdef CONFIG_TRACER_MAX_TRACE /* resize @tr's buffer to the size of @size_tr's entries */ -static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, - struct trace_buffer *size_buf, int cpu_id) +static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, + struct array_buffer *size_buf, int cpu_id) { int cpu, ret = 0; @@ -5587,10 +5587,10 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr, ring_buffer_expanded = true; /* May be called before buffers are initialized */ - if (!tr->trace_buffer.buffer) + if (!tr->array_buffer.buffer) return 0; - ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); + ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); if (ret < 0) return ret; @@ -5601,8 +5601,8 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr, ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); if (ret < 0) { - int r = resize_buffer_duplicate_size(&tr->trace_buffer, - &tr->trace_buffer, cpu); + int r = resize_buffer_duplicate_size(&tr->array_buffer, + &tr->array_buffer, cpu); if (r < 0) { /* * AARGH! We are left with different @@ -5633,9 +5633,9 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr, #endif /* CONFIG_TRACER_MAX_TRACE */ if (cpu == RING_BUFFER_ALL_CPUS) - set_buffer_entries(&tr->trace_buffer, size); + set_buffer_entries(&tr->array_buffer, size); else - per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; + per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size; return ret; } @@ -5979,7 +5979,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; iter->tr = tr; - iter->trace_buffer = &tr->trace_buffer; + iter->array_buffer = &tr->array_buffer; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); filp->private_data = iter; @@ -6039,7 +6039,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl */ return EPOLLIN | EPOLLRDNORM; else - return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, + return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, filp, poll_table); } @@ -6356,8 +6356,8 @@ tracing_entries_read(struct file *filp, char __user *ubuf, for_each_tracing_cpu(cpu) { /* fill in the size from first enabled cpu */ if (size == 0) - size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; - if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { + size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; + if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { buf_size_same = 0; break; } @@ -6373,7 +6373,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, } else r = sprintf(buf, "X\n"); } else - r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); + r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); mutex_unlock(&trace_types_lock); @@ -6420,7 +6420,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf, mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { - size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; + size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; if (!ring_buffer_expanded) expanded_size += trace_buf_size >> 10; } @@ -6499,7 +6499,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, if (cnt < FAULTED_SIZE) size += FAULTED_SIZE - cnt; - buffer = tr->trace_buffer.buffer; + buffer = tr->array_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, irq_flags, preempt_count()); if (unlikely(!event)) @@ -6579,7 +6579,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf, if (cnt < FAULT_SIZE_ID) size += FAULT_SIZE_ID - cnt; - buffer = tr->trace_buffer.buffer; + buffer = tr->array_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, irq_flags, preempt_count()); if (!event) @@ -6634,13 +6634,13 @@ int tracing_set_clock(struct trace_array *tr, const char *clockstr) tr->clock_id = i; - ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); + ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); /* * New clock may not be consistent with the previous clock. * Reset the buffer so that it doesn't have incomparable timestamps. */ - tracing_reset_online_cpus(&tr->trace_buffer); + tracing_reset_online_cpus(&tr->array_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) @@ -6703,7 +6703,7 @@ static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) mutex_lock(&trace_types_lock); - if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer)) + if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) seq_puts(m, "delta [absolute]\n"); else seq_puts(m, "[delta] absolute\n"); @@ -6748,7 +6748,7 @@ int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs) goto out; } - ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs); + ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) @@ -6797,7 +6797,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file) ret = 0; iter->tr = tr; - iter->trace_buffer = &tr->max_buffer; + iter->array_buffer = &tr->max_buffer; iter->cpu_file = tracing_get_cpu(inode); m->private = iter; file->private_data = m; @@ -6860,7 +6860,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, #endif if (tr->allocated_snapshot) ret = resize_buffer_duplicate_size(&tr->max_buffer, - &tr->trace_buffer, iter->cpu_file); + &tr->array_buffer, iter->cpu_file); else ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) @@ -6935,7 +6935,7 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp) } info->iter.snapshot = true; - info->iter.trace_buffer = &info->iter.tr->max_buffer; + info->iter.array_buffer = &info->iter.tr->max_buffer; return ret; } @@ -7310,7 +7310,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) info->iter.tr = tr; info->iter.cpu_file = tracing_get_cpu(inode); info->iter.trace = tr->current_trace; - info->iter.trace_buffer = &tr->trace_buffer; + info->iter.array_buffer = &tr->array_buffer; info->spare = NULL; /* Force reading ring buffer for first read */ info->read = (unsigned int)-1; @@ -7355,7 +7355,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, #endif if (!info->spare) { - info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, + info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer, iter->cpu_file); if (IS_ERR(info->spare)) { ret = PTR_ERR(info->spare); @@ -7373,7 +7373,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, again: trace_access_lock(iter->cpu_file); - ret = ring_buffer_read_page(iter->trace_buffer->buffer, + ret = ring_buffer_read_page(iter->array_buffer->buffer, &info->spare, count, iter->cpu_file, 0); @@ -7423,7 +7423,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) __trace_array_put(iter->tr); if (info->spare) - ring_buffer_free_read_page(iter->trace_buffer->buffer, + ring_buffer_free_read_page(iter->array_buffer->buffer, info->spare_cpu, info->spare); kfree(info); @@ -7528,7 +7528,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, again: trace_access_lock(iter->cpu_file); - entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); + entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { struct page *page; @@ -7541,7 +7541,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, } refcount_set(&ref->refcount, 1); - ref->buffer = iter->trace_buffer->buffer; + ref->buffer = iter->array_buffer->buffer; ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); if (IS_ERR(ref->page)) { ret = PTR_ERR(ref->page); @@ -7569,7 +7569,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, spd.nr_pages++; *ppos += PAGE_SIZE; - entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); + entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); } trace_access_unlock(iter->cpu_file); @@ -7613,7 +7613,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; - struct trace_buffer *trace_buf = &tr->trace_buffer; + struct array_buffer *trace_buf = &tr->array_buffer; int cpu = tracing_get_cpu(inode); struct trace_seq *s; unsigned long cnt; @@ -8272,7 +8272,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; - struct ring_buffer *buffer = tr->trace_buffer.buffer; + struct ring_buffer *buffer = tr->array_buffer.buffer; unsigned long val; int ret; @@ -8362,7 +8362,7 @@ static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); static int -allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) +allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) { enum ring_buffer_flags rb_flags; @@ -8382,8 +8382,8 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size } /* Allocate the first page for all buffers */ - set_buffer_entries(&tr->trace_buffer, - ring_buffer_size(tr->trace_buffer.buffer, 0)); + set_buffer_entries(&tr->array_buffer, + ring_buffer_size(tr->array_buffer.buffer, 0)); return 0; } @@ -8392,7 +8392,7 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) { int ret; - ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); + ret = allocate_trace_buffer(tr, &tr->array_buffer, size); if (ret) return ret; @@ -8400,10 +8400,10 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) ret = allocate_trace_buffer(tr, &tr->max_buffer, allocate_snapshot ? size : 1); if (WARN_ON(ret)) { - ring_buffer_free(tr->trace_buffer.buffer); - tr->trace_buffer.buffer = NULL; - free_percpu(tr->trace_buffer.data); - tr->trace_buffer.data = NULL; + ring_buffer_free(tr->array_buffer.buffer); + tr->array_buffer.buffer = NULL; + free_percpu(tr->array_buffer.data); + tr->array_buffer.data = NULL; return -ENOMEM; } tr->allocated_snapshot = allocate_snapshot; @@ -8417,7 +8417,7 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) return 0; } -static void free_trace_buffer(struct trace_buffer *buf) +static void free_trace_buffer(struct array_buffer *buf) { if (buf->buffer) { ring_buffer_free(buf->buffer); @@ -8432,7 +8432,7 @@ static void free_trace_buffers(struct trace_array *tr) if (!tr) return; - free_trace_buffer(&tr->trace_buffer); + free_trace_buffer(&tr->array_buffer); #ifdef CONFIG_TRACER_MAX_TRACE free_trace_buffer(&tr->max_buffer); @@ -9036,13 +9036,13 @@ void trace_init_global_iter(struct trace_iterator *iter) iter->tr = &global_trace; iter->trace = iter->tr->current_trace; iter->cpu_file = RING_BUFFER_ALL_CPUS; - iter->trace_buffer = &global_trace.trace_buffer; + iter->array_buffer = &global_trace.array_buffer; if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ - if (ring_buffer_overruns(iter->trace_buffer->buffer)) + if (ring_buffer_overruns(iter->array_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ @@ -9083,7 +9083,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) trace_init_global_iter(&iter); for_each_tracing_cpu(cpu) { - atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); + atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); } old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; @@ -9151,7 +9151,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) tr->trace_flags |= old_userobj; for_each_tracing_cpu(cpu) { - atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); + atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); } atomic_dec(&dump_running); printk_nmi_direct_exit(); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 63bf60f79398..fd679fe92c1f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -176,7 +176,7 @@ struct trace_array_cpu { struct tracer; struct trace_option_dentry; -struct trace_buffer { +struct array_buffer { struct trace_array *tr; struct ring_buffer *buffer; struct trace_array_cpu __percpu *data; @@ -249,7 +249,7 @@ struct cond_snapshot { struct trace_array { struct list_head list; char *name; - struct trace_buffer trace_buffer; + struct array_buffer array_buffer; #ifdef CONFIG_TRACER_MAX_TRACE /* * The max_buffer is used to snapshot the trace when a maximum @@ -257,12 +257,12 @@ struct trace_array { * Some tracers will use this to store a maximum trace while * it continues examining live traces. * - * The buffers for the max_buffer are set up the same as the trace_buffer + * The buffers for the max_buffer are set up the same as the array_buffer * When a snapshot is taken, the buffer of the max_buffer is swapped - * with the buffer of the trace_buffer and the buffers are reset for - * the trace_buffer so the tracing can continue. + * with the buffer of the array_buffer and the buffers are reset for + * the array_buffer so the tracing can continue. */ - struct trace_buffer max_buffer; + struct array_buffer max_buffer; bool allocated_snapshot; #endif #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) @@ -685,7 +685,7 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu) int tracer_init(struct tracer *t, struct trace_array *tr); int tracing_is_enabled(void); -void tracing_reset_online_cpus(struct trace_buffer *buf); +void tracing_reset_online_cpus(struct array_buffer *buf); void tracing_reset_current(int cpu); void tracing_reset_all_online_cpus(void); int tracing_open_generic(struct inode *inode, struct file *filp); @@ -1057,7 +1057,7 @@ struct ftrace_func_command { extern bool ftrace_filter_param __initdata; static inline int ftrace_trace_task(struct trace_array *tr) { - return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid); + return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); } extern int ftrace_is_dead(void); int ftrace_create_function_files(struct trace_array *tr, diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 88e158d27965..d5989284a99a 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -55,12 +55,12 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect) raw_local_irq_save(flags); current->trace_recursion |= TRACE_BRANCH_BIT; - data = this_cpu_ptr(tr->trace_buffer.data); + data = this_cpu_ptr(tr->array_buffer.data); if (atomic_read(&data->disabled)) goto out; pc = preempt_count(); - buffer = tr->trace_buffer.buffer; + buffer = tr->array_buffer.buffer; event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, sizeof(*entry), flags, pc); if (!event) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index a5b614cc3887..ac557f685f0b 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -237,7 +237,7 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file) if (!pid_list) return false; - data = this_cpu_ptr(tr->trace_buffer.data); + data = this_cpu_ptr(tr->array_buffer.data); return data->ignore_pid; } @@ -546,7 +546,7 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, pid_list = rcu_dereference_sched(tr->filtered_pids); - this_cpu_write(tr->trace_buffer.data->ignore_pid, + this_cpu_write(tr->array_buffer.data->ignore_pid, trace_ignore_this_task(pid_list, prev) && trace_ignore_this_task(pid_list, next)); } @@ -560,7 +560,7 @@ event_filter_pid_sched_switch_probe_post(void *data, bool preempt, pid_list = rcu_dereference_sched(tr->filtered_pids); - this_cpu_write(tr->trace_buffer.data->ignore_pid, + this_cpu_write(tr->array_buffer.data->ignore_pid, trace_ignore_this_task(pid_list, next)); } @@ -571,12 +571,12 @@ event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) struct trace_pid_list *pid_list; /* Nothing to do if we are already tracing */ - if (!this_cpu_read(tr->trace_buffer.data->ignore_pid)) + if (!this_cpu_read(tr->array_buffer.data->ignore_pid)) return; pid_list = rcu_dereference_sched(tr->filtered_pids); - this_cpu_write(tr->trace_buffer.data->ignore_pid, + this_cpu_write(tr->array_buffer.data->ignore_pid, trace_ignore_this_task(pid_list, task)); } @@ -587,13 +587,13 @@ event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) struct trace_pid_list *pid_list; /* Nothing to do if we are not tracing */ - if (this_cpu_read(tr->trace_buffer.data->ignore_pid)) + if (this_cpu_read(tr->array_buffer.data->ignore_pid)) return; pid_list = rcu_dereference_sched(tr->filtered_pids); /* Set tracing if current is enabled */ - this_cpu_write(tr->trace_buffer.data->ignore_pid, + this_cpu_write(tr->array_buffer.data->ignore_pid, trace_ignore_this_task(pid_list, current)); } @@ -625,7 +625,7 @@ static void __ftrace_clear_event_pids(struct trace_array *tr) } for_each_possible_cpu(cpu) - per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false; + per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false; rcu_assign_pointer(tr->filtered_pids, NULL); @@ -1594,7 +1594,7 @@ static void ignore_task_cpu(void *data) pid_list = rcu_dereference_protected(tr->filtered_pids, mutex_is_locked(&event_mutex)); - this_cpu_write(tr->trace_buffer.data->ignore_pid, + this_cpu_write(tr->array_buffer.data->ignore_pid, trace_ignore_this_task(pid_list, current)); } diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index f62de5f43e79..94c581c1a897 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -895,7 +895,7 @@ static notrace void trace_event_raw_event_synth(void *__data, * Avoid ring buffer recursion detection, as this event * is being performed within another event. */ - buffer = trace_file->tr->trace_buffer.buffer; + buffer = trace_file->tr->array_buffer.buffer; ring_buffer_nest_start(buffer); entry = trace_event_buffer_reserve(&fbuffer, trace_file, diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index b611cd36e22d..8a4c8d5c2c98 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -101,7 +101,7 @@ static int function_trace_init(struct trace_array *tr) ftrace_init_array_ops(tr, func); - tr->trace_buffer.cpu = get_cpu(); + tr->array_buffer.cpu = get_cpu(); put_cpu(); tracing_start_cmdline_record(); @@ -118,7 +118,7 @@ static void function_trace_reset(struct trace_array *tr) static void function_trace_start(struct trace_array *tr) { - tracing_reset_online_cpus(&tr->trace_buffer); + tracing_reset_online_cpus(&tr->array_buffer); } static void @@ -143,7 +143,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, goto out; cpu = smp_processor_id(); - data = per_cpu_ptr(tr->trace_buffer.data, cpu); + data = per_cpu_ptr(tr->array_buffer.data, cpu); if (!atomic_read(&data->disabled)) { local_save_flags(flags); trace_function(tr, ip, parent_ip, flags, pc); @@ -192,7 +192,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, */ local_irq_save(flags); cpu = raw_smp_processor_id(); - data = per_cpu_ptr(tr->trace_buffer.data, cpu); + data = per_cpu_ptr(tr->array_buffer.data, cpu); disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 78af97163147..79b2c2df00c5 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -101,7 +101,7 @@ int __trace_graph_entry(struct trace_array *tr, { struct trace_event_call *call = &event_funcgraph_entry; struct ring_buffer_event *event; - struct ring_buffer *buffer = tr->trace_buffer.buffer; + struct ring_buffer *buffer = tr->array_buffer.buffer; struct ftrace_graph_ent_entry *entry; event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, @@ -171,7 +171,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) local_irq_save(flags); cpu = raw_smp_processor_id(); - data = per_cpu_ptr(tr->trace_buffer.data, cpu); + data = per_cpu_ptr(tr->array_buffer.data, cpu); disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { pc = preempt_count(); @@ -221,7 +221,7 @@ void __trace_graph_return(struct trace_array *tr, { struct trace_event_call *call = &event_funcgraph_exit; struct ring_buffer_event *event; - struct ring_buffer *buffer = tr->trace_buffer.buffer; + struct ring_buffer *buffer = tr->array_buffer.buffer; struct ftrace_graph_ret_entry *entry; event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, @@ -252,7 +252,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) local_irq_save(flags); cpu = raw_smp_processor_id(); - data = per_cpu_ptr(tr->trace_buffer.data, cpu); + data = per_cpu_ptr(tr->array_buffer.data, cpu); disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { pc = preempt_count(); @@ -444,9 +444,9 @@ get_return_for_leaf(struct trace_iterator *iter, * We need to consume the current entry to see * the next one. */ - ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, + ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, NULL, NULL); - event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, + event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu, NULL, NULL); } @@ -503,7 +503,7 @@ print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s) { unsigned long long usecs; - usecs = iter->ts - iter->trace_buffer->time_start; + usecs = iter->ts - iter->array_buffer->time_start; do_div(usecs, NSEC_PER_USEC); trace_seq_printf(s, "%9llu us | ", usecs); diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index 6638d63f0921..fc62a6049bd3 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -104,7 +104,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample) { struct trace_array *tr = hwlat_trace; struct trace_event_call *call = &event_hwlat; - struct ring_buffer *buffer = tr->trace_buffer.buffer; + struct ring_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct hwlat_entry *entry; unsigned long flags; diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index a745b0cee5d3..10bbb0f381d5 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -122,7 +122,7 @@ static int func_prolog_dec(struct trace_array *tr, if (!irqs_disabled_flags(*flags) && !preempt_count()) return 0; - *data = per_cpu_ptr(tr->trace_buffer.data, cpu); + *data = per_cpu_ptr(tr->array_buffer.data, cpu); disabled = atomic_inc_return(&(*data)->disabled); if (likely(disabled == 1)) @@ -167,7 +167,7 @@ static int irqsoff_display_graph(struct trace_array *tr, int set) per_cpu(tracing_cpu, cpu) = 0; tr->max_latency = 0; - tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); + tracing_reset_online_cpus(&irqsoff_trace->array_buffer); return start_irqsoff_tracer(irqsoff_trace, set); } @@ -382,7 +382,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) if (per_cpu(tracing_cpu, cpu)) return; - data = per_cpu_ptr(tr->trace_buffer.data, cpu); + data = per_cpu_ptr(tr->array_buffer.data, cpu); if (unlikely(!data) || atomic_read(&data->disabled)) return; @@ -420,7 +420,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) if (!tracer_enabled || !tracing_is_enabled()) return; - data = per_cpu_ptr(tr->trace_buffer.data, cpu); + data = per_cpu_ptr(tr->array_buffer.data, cpu); if (unlikely(!data) || !data->critical_start || atomic_read(&data->disabled)) diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index cca65044c14c..9da76104f7a2 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c @@ -43,7 +43,7 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file) if (cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { iter.buffer_iter[cpu] = - ring_buffer_read_prepare(iter.trace_buffer->buffer, + ring_buffer_read_prepare(iter.array_buffer->buffer, cpu, GFP_ATOMIC); ring_buffer_read_start(iter.buffer_iter[cpu]); tracing_iter_reset(&iter, cpu); @@ -51,7 +51,7 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file) } else { iter.cpu_file = cpu_file; iter.buffer_iter[cpu_file] = - ring_buffer_read_prepare(iter.trace_buffer->buffer, + ring_buffer_read_prepare(iter.array_buffer->buffer, cpu_file, GFP_ATOMIC); ring_buffer_read_start(iter.buffer_iter[cpu_file]); tracing_iter_reset(&iter, cpu_file); @@ -124,7 +124,7 @@ static int kdb_ftdump(int argc, const char **argv) iter.buffer_iter = buffer_iter; for_each_tracing_cpu(cpu) { - atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); + atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); } /* A negative skip_entries means skip all but the last entries */ @@ -139,7 +139,7 @@ static int kdb_ftdump(int argc, const char **argv) ftrace_dump_buf(skip_entries, cpu_file); for_each_tracing_cpu(cpu) { - atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); + atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); } kdb_trap_printk--; diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index b0388016b687..c30137148759 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -32,7 +32,7 @@ static void mmio_reset_data(struct trace_array *tr) overrun_detected = false; prev_overruns = 0; - tracing_reset_online_cpus(&tr->trace_buffer); + tracing_reset_online_cpus(&tr->array_buffer); } static int mmio_trace_init(struct trace_array *tr) @@ -122,7 +122,7 @@ static void mmio_close(struct trace_iterator *iter) static unsigned long count_overruns(struct trace_iterator *iter) { unsigned long cnt = atomic_xchg(&dropped_count, 0); - unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer); + unsigned long over = ring_buffer_overruns(iter->array_buffer->buffer); if (over > prev_overruns) cnt += over - prev_overruns; @@ -297,7 +297,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, struct mmiotrace_rw *rw) { struct trace_event_call *call = &event_mmiotrace_rw; - struct ring_buffer *buffer = tr->trace_buffer.buffer; + struct ring_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct trace_mmiotrace_rw *entry; int pc = preempt_count(); @@ -318,7 +318,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, void mmio_trace_rw(struct mmiotrace_rw *rw) { struct trace_array *tr = mmio_trace_array; - struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); + struct trace_array_cpu *data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id()); __trace_mmiotrace_rw(tr, data, rw); } @@ -327,7 +327,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, struct mmiotrace_map *map) { struct trace_event_call *call = &event_mmiotrace_map; - struct ring_buffer *buffer = tr->trace_buffer.buffer; + struct ring_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct trace_mmiotrace_map *entry; int pc = preempt_count(); @@ -351,7 +351,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map) struct trace_array_cpu *data; preempt_disable(); - data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); + data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id()); __trace_mmiotrace_map(tr, data, map); preempt_enable(); } diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index d9b4b7c22db4..b4909082f6a4 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -538,7 +538,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) struct trace_array *tr = iter->tr; unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE; unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS; - unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start; + unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start; unsigned long long rel_ts = next_ts - iter->ts; struct trace_seq *s = &iter->seq; diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 617e297f46dc..510fda2fcd24 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -82,7 +82,7 @@ func_prolog_preempt_disable(struct trace_array *tr, if (cpu != wakeup_current_cpu) goto out_enable; - *data = per_cpu_ptr(tr->trace_buffer.data, cpu); + *data = per_cpu_ptr(tr->array_buffer.data, cpu); disabled = atomic_inc_return(&(*data)->disabled); if (unlikely(disabled != 1)) goto out; @@ -378,7 +378,7 @@ tracing_sched_switch_trace(struct trace_array *tr, unsigned long flags, int pc) { struct trace_event_call *call = &event_context_switch; - struct ring_buffer *buffer = tr->trace_buffer.buffer; + struct ring_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct ctx_switch_entry *entry; @@ -408,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, struct trace_event_call *call = &event_wakeup; struct ring_buffer_event *event; struct ctx_switch_entry *entry; - struct ring_buffer *buffer = tr->trace_buffer.buffer; + struct ring_buffer *buffer = tr->array_buffer.buffer; event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, sizeof(*entry), flags, pc); @@ -459,7 +459,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt, /* disable local data, not wakeup_cpu data */ cpu = raw_smp_processor_id(); - disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); + disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); if (likely(disabled != 1)) goto out; @@ -471,7 +471,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt, goto out_unlock; /* The task we are waiting for is waking up */ - data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); + data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu); __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); @@ -494,7 +494,7 @@ out_unlock: arch_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: - atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); + atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); } static void __wakeup_reset(struct trace_array *tr) @@ -513,7 +513,7 @@ static void wakeup_reset(struct trace_array *tr) { unsigned long flags; - tracing_reset_online_cpus(&tr->trace_buffer); + tracing_reset_online_cpus(&tr->array_buffer); local_irq_save(flags); arch_spin_lock(&wakeup_lock); @@ -551,7 +551,7 @@ probe_wakeup(void *ignore, struct task_struct *p) return; pc = preempt_count(); - disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); + disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); if (unlikely(disabled != 1)) goto out; @@ -583,7 +583,7 @@ probe_wakeup(void *ignore, struct task_struct *p) local_save_flags(flags); - data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); + data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu); data->preempt_timestamp = ftrace_now(cpu); tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); __trace_stack(wakeup_trace, flags, 0, pc); @@ -598,7 +598,7 @@ probe_wakeup(void *ignore, struct task_struct *p) out_locked: arch_spin_unlock(&wakeup_lock); out: - atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); + atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); } static void start_wakeup_tracer(struct trace_array *tr) diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 69ee8ef12cee..b5e3496cf803 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -23,7 +23,7 @@ static inline int trace_valid_entry(struct trace_entry *entry) return 0; } -static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu) +static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu) { struct ring_buffer_event *event; struct trace_entry *entry; @@ -60,7 +60,7 @@ static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu) * Test the trace buffer to see if all the elements * are still sane. */ -static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count) +static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count) { unsigned long flags, cnt = 0; int cpu, ret = 0; @@ -362,7 +362,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, msleep(100); /* we should have nothing in the buffer */ - ret = trace_test_buffer(&tr->trace_buffer, &count); + ret = trace_test_buffer(&tr->array_buffer, &count); if (ret) goto out; @@ -383,7 +383,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, ftrace_enabled = 0; /* check the trace buffer */ - ret = trace_test_buffer(&tr->trace_buffer, &count); + ret = trace_test_buffer(&tr->array_buffer, &count); ftrace_enabled = 1; tracing_start(); @@ -682,7 +682,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) ftrace_enabled = 0; /* check the trace buffer */ - ret = trace_test_buffer(&tr->trace_buffer, &count); + ret = trace_test_buffer(&tr->array_buffer, &count); ftrace_enabled = 1; trace->reset(tr); @@ -768,7 +768,7 @@ trace_selftest_startup_function_graph(struct tracer *trace, * Simulate the init() callback but we attach a watchdog callback * to detect and recover from possible hangs */ - tracing_reset_online_cpus(&tr->trace_buffer); + tracing_reset_online_cpus(&tr->array_buffer); set_graph_array(tr); ret = register_ftrace_graph(&fgraph_ops); if (ret) { @@ -790,7 +790,7 @@ trace_selftest_startup_function_graph(struct tracer *trace, tracing_stop(); /* check the trace buffer */ - ret = trace_test_buffer(&tr->trace_buffer, &count); + ret = trace_test_buffer(&tr->array_buffer, &count); /* Need to also simulate the tr->reset to remove this fgraph_ops */ tracing_stop_cmdline_record(); @@ -848,7 +848,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ - ret = trace_test_buffer(&tr->trace_buffer, NULL); + ret = trace_test_buffer(&tr->array_buffer, NULL); if (!ret) ret = trace_test_buffer(&tr->max_buffer, &count); trace->reset(tr); @@ -910,7 +910,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ - ret = trace_test_buffer(&tr->trace_buffer, NULL); + ret = trace_test_buffer(&tr->array_buffer, NULL); if (!ret) ret = trace_test_buffer(&tr->max_buffer, &count); trace->reset(tr); @@ -976,7 +976,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ - ret = trace_test_buffer(&tr->trace_buffer, NULL); + ret = trace_test_buffer(&tr->array_buffer, NULL); if (ret) goto out; @@ -1006,7 +1006,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ - ret = trace_test_buffer(&tr->trace_buffer, NULL); + ret = trace_test_buffer(&tr->array_buffer, NULL); if (ret) goto out; @@ -1136,7 +1136,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ - ret = trace_test_buffer(&tr->trace_buffer, NULL); + ret = trace_test_buffer(&tr->array_buffer, NULL); if (!ret) ret = trace_test_buffer(&tr->max_buffer, &count); @@ -1177,7 +1177,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) /* stop the tracing. */ tracing_stop(); /* check the trace buffer */ - ret = trace_test_buffer(&tr->trace_buffer, &count); + ret = trace_test_buffer(&tr->array_buffer, &count); trace->reset(tr); tracing_start(); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 16fa218556fa..bd92843c2b0e 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -345,7 +345,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) local_save_flags(irq_flags); pc = preempt_count(); - buffer = tr->trace_buffer.buffer; + buffer = tr->array_buffer.buffer; event = trace_buffer_lock_reserve(buffer, sys_data->enter_event->event.type, size, irq_flags, pc); if (!event) @@ -391,7 +391,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) local_save_flags(irq_flags); pc = preempt_count(); - buffer = tr->trace_buffer.buffer; + buffer = tr->array_buffer.buffer; event = trace_buffer_lock_reserve(buffer, sys_data->exit_event->event.type, sizeof(*entry), irq_flags, pc); -- cgit v1.2.3 From 13292494379f92f532de71b31a54018336adc589 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 13 Dec 2019 13:58:57 -0500 Subject: tracing: Make struct ring_buffer less ambiguous As there's two struct ring_buffers in the kernel, it causes some confusion. The other one being the perf ring buffer. It was agreed upon that as neither of the ring buffers are generic enough to be used globally, they should be renamed as: perf's ring_buffer -> perf_buffer ftrace's ring_buffer -> trace_buffer This implements the changes to the ring buffer that ftrace uses. Link: https://lore.kernel.org/r/20191213140531.116b3200@gandalf.local.home Signed-off-by: Steven Rostedt (VMware) --- drivers/oprofile/cpu_buffer.c | 2 +- include/linux/ring_buffer.h | 110 +++++++++++++++---------------- include/linux/trace_events.h | 4 +- include/trace/trace_events.h | 2 +- kernel/trace/blktrace.c | 4 +- kernel/trace/ring_buffer.c | 124 +++++++++++++++++------------------ kernel/trace/ring_buffer_benchmark.c | 2 +- kernel/trace/trace.c | 70 ++++++++++---------- kernel/trace/trace.h | 22 +++---- kernel/trace/trace_branch.c | 2 +- kernel/trace/trace_events.c | 2 +- kernel/trace/trace_events_hist.c | 2 +- kernel/trace/trace_functions_graph.c | 4 +- kernel/trace/trace_hwlat.c | 2 +- kernel/trace/trace_kprobe.c | 4 +- kernel/trace/trace_mmiotrace.c | 4 +- kernel/trace/trace_sched_wakeup.c | 4 +- kernel/trace/trace_syscalls.c | 4 +- kernel/trace/trace_uprobe.c | 2 +- 19 files changed, 185 insertions(+), 185 deletions(-) (limited to 'kernel/trace') diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index eda2633a393d..9210a95cb4e6 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -32,7 +32,7 @@ #define OP_BUFFER_FLAGS 0 -static struct ring_buffer *op_ring_buffer; +static struct trace_buffer *op_ring_buffer; DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); static void wq_sync_buffer(struct work_struct *work); diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 1a40277b512c..df0124eabece 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -6,7 +6,7 @@ #include #include -struct ring_buffer; +struct trace_buffer; struct ring_buffer_iter; /* @@ -77,13 +77,13 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event); * else * ring_buffer_unlock_commit(buffer, event); */ -void ring_buffer_discard_commit(struct ring_buffer *buffer, +void ring_buffer_discard_commit(struct trace_buffer *buffer, struct ring_buffer_event *event); /* * size is in bytes for each per CPU buffer. */ -struct ring_buffer * +struct trace_buffer * __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); /* @@ -97,38 +97,38 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k __ring_buffer_alloc((size), (flags), &__key); \ }) -int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full); -__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, +int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full); +__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, struct file *filp, poll_table *poll_table); #define RING_BUFFER_ALL_CPUS -1 -void ring_buffer_free(struct ring_buffer *buffer); +void ring_buffer_free(struct trace_buffer *buffer); -int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu); +int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu); -void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val); +void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val); -struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, +struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length); -int ring_buffer_unlock_commit(struct ring_buffer *buffer, +int ring_buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event); -int ring_buffer_write(struct ring_buffer *buffer, +int ring_buffer_write(struct trace_buffer *buffer, unsigned long length, void *data); -void ring_buffer_nest_start(struct ring_buffer *buffer); -void ring_buffer_nest_end(struct ring_buffer *buffer); +void ring_buffer_nest_start(struct trace_buffer *buffer); +void ring_buffer_nest_end(struct trace_buffer *buffer); struct ring_buffer_event * -ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, +ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events); struct ring_buffer_event * -ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, +ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events); struct ring_buffer_iter * -ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags); +ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags); void ring_buffer_read_prepare_sync(void); void ring_buffer_read_start(struct ring_buffer_iter *iter); void ring_buffer_read_finish(struct ring_buffer_iter *iter); @@ -140,59 +140,59 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); void ring_buffer_iter_reset(struct ring_buffer_iter *iter); int ring_buffer_iter_empty(struct ring_buffer_iter *iter); -unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); -void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); -void ring_buffer_reset(struct ring_buffer *buffer); +void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu); +void ring_buffer_reset(struct trace_buffer *buffer); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP -int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, - struct ring_buffer *buffer_b, int cpu); +int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, + struct trace_buffer *buffer_b, int cpu); #else static inline int -ring_buffer_swap_cpu(struct ring_buffer *buffer_a, - struct ring_buffer *buffer_b, int cpu) +ring_buffer_swap_cpu(struct trace_buffer *buffer_a, + struct trace_buffer *buffer_b, int cpu) { return -ENODEV; } #endif -bool ring_buffer_empty(struct ring_buffer *buffer); -bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); - -void ring_buffer_record_disable(struct ring_buffer *buffer); -void ring_buffer_record_enable(struct ring_buffer *buffer); -void ring_buffer_record_off(struct ring_buffer *buffer); -void ring_buffer_record_on(struct ring_buffer *buffer); -bool ring_buffer_record_is_on(struct ring_buffer *buffer); -bool ring_buffer_record_is_set_on(struct ring_buffer *buffer); -void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); -void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); - -u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); -unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); -unsigned long ring_buffer_entries(struct ring_buffer *buffer); -unsigned long ring_buffer_overruns(struct ring_buffer *buffer); -unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); -unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); -unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); -unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); -unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu); - -u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); -void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, +bool ring_buffer_empty(struct trace_buffer *buffer); +bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu); + +void ring_buffer_record_disable(struct trace_buffer *buffer); +void ring_buffer_record_enable(struct trace_buffer *buffer); +void ring_buffer_record_off(struct trace_buffer *buffer); +void ring_buffer_record_on(struct trace_buffer *buffer); +bool ring_buffer_record_is_on(struct trace_buffer *buffer); +bool ring_buffer_record_is_set_on(struct trace_buffer *buffer); +void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu); +void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu); + +u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_entries(struct trace_buffer *buffer); +unsigned long ring_buffer_overruns(struct trace_buffer *buffer); +unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu); + +u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu); +void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, int cpu, u64 *ts); -void ring_buffer_set_clock(struct ring_buffer *buffer, +void ring_buffer_set_clock(struct trace_buffer *buffer, u64 (*clock)(void)); -void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs); -bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer); +void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs); +bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer); -size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu); -size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu); +size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu); +size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu); -void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); -void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data); -int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, +void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu); +void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data); +int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page, size_t len, int cpu, int full); struct trace_seq; diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index f70e5bc7e8db..5f7b2b1fce24 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -153,7 +153,7 @@ void tracing_generic_entry_update(struct trace_entry *entry, struct trace_event_file; struct ring_buffer_event * -trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, +trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer, struct trace_event_file *trace_file, int type, unsigned long len, unsigned long flags, int pc); @@ -210,7 +210,7 @@ extern int trace_event_reg(struct trace_event_call *event, enum trace_reg type, void *data); struct trace_event_buffer { - struct ring_buffer *buffer; + struct trace_buffer *buffer; struct ring_buffer_event *event; struct trace_event_file *trace_file; void *entry; diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 472b33d23a10..13a58d453992 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -570,7 +570,7 @@ static inline notrace int trace_event_get_offsets_##call( \ * enum event_trigger_type __tt = ETT_NONE; * struct ring_buffer_event *event; * struct trace_event_raw_ *entry; <-- defined in stage 1 - * struct ring_buffer *buffer; + * struct trace_buffer *buffer; * unsigned long irq_flags; * int __data_size; * int pc; diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 3b926f62ed83..0735ae8545d8 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -68,7 +68,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, { struct blk_io_trace *t; struct ring_buffer_event *event = NULL; - struct ring_buffer *buffer = NULL; + struct trace_buffer *buffer = NULL; int pc = 0; int cpu = smp_processor_id(); bool blk_tracer = blk_tracer_enabled; @@ -215,7 +215,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, { struct task_struct *tsk = current; struct ring_buffer_event *event = NULL; - struct ring_buffer *buffer = NULL; + struct trace_buffer *buffer = NULL; struct blk_io_trace *t; unsigned long flags = 0; unsigned long *sequence; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 3f655371eaf6..f846de2aa435 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -443,7 +443,7 @@ enum { struct ring_buffer_per_cpu { int cpu; atomic_t record_disabled; - struct ring_buffer *buffer; + struct trace_buffer *buffer; raw_spinlock_t reader_lock; /* serialize readers */ arch_spinlock_t lock; struct lock_class_key lock_key; @@ -482,7 +482,7 @@ struct ring_buffer_per_cpu { struct rb_irq_work irq_work; }; -struct ring_buffer { +struct trace_buffer { unsigned flags; int cpus; atomic_t record_disabled; @@ -518,7 +518,7 @@ struct ring_buffer_iter { * * Returns the number of pages used by a per_cpu buffer of the ring buffer. */ -size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu) +size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) { return buffer->buffers[cpu]->nr_pages; } @@ -530,7 +530,7 @@ size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu) * * Returns the number of pages that have content in the ring buffer. */ -size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu) +size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) { size_t read; size_t cnt; @@ -573,7 +573,7 @@ static void rb_wake_up_waiters(struct irq_work *work) * as data is added to any of the @buffer's cpu buffers. Otherwise * it will wait for data to be added to a specific cpu buffer. */ -int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full) +int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) { struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); DEFINE_WAIT(wait); @@ -684,7 +684,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full) * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, * zero otherwise. */ -__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, +__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, struct file *filp, poll_table *poll_table) { struct ring_buffer_per_cpu *cpu_buffer; @@ -742,13 +742,13 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, /* Up this if you want to test the TIME_EXTENTS and normalization */ #define DEBUG_SHIFT 0 -static inline u64 rb_time_stamp(struct ring_buffer *buffer) +static inline u64 rb_time_stamp(struct trace_buffer *buffer) { /* shift to debug/test normalization and TIME_EXTENTS */ return buffer->clock() << DEBUG_SHIFT; } -u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) +u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu) { u64 time; @@ -760,7 +760,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) } EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); -void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, +void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, int cpu, u64 *ts) { /* Just stupid testing the normalize function and deltas */ @@ -1283,7 +1283,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, } static struct ring_buffer_per_cpu * -rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu) +rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; struct buffer_page *bpage; @@ -1374,10 +1374,10 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) * when the buffer wraps. If this flag is not set, the buffer will * drop data when the tail hits the head. */ -struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, +struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key) { - struct ring_buffer *buffer; + struct trace_buffer *buffer; long nr_pages; int bsize; int cpu; @@ -1447,7 +1447,7 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc); * @buffer: the buffer to free. */ void -ring_buffer_free(struct ring_buffer *buffer) +ring_buffer_free(struct trace_buffer *buffer) { int cpu; @@ -1463,18 +1463,18 @@ ring_buffer_free(struct ring_buffer *buffer) } EXPORT_SYMBOL_GPL(ring_buffer_free); -void ring_buffer_set_clock(struct ring_buffer *buffer, +void ring_buffer_set_clock(struct trace_buffer *buffer, u64 (*clock)(void)) { buffer->clock = clock; } -void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs) +void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) { buffer->time_stamp_abs = abs; } -bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer) +bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) { return buffer->time_stamp_abs; } @@ -1712,7 +1712,7 @@ static void update_pages_handler(struct work_struct *work) * * Returns 0 on success and < 0 on failure. */ -int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, +int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu_id) { struct ring_buffer_per_cpu *cpu_buffer; @@ -1891,7 +1891,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, } EXPORT_SYMBOL_GPL(ring_buffer_resize); -void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) +void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) { mutex_lock(&buffer->mutex); if (val) @@ -2206,7 +2206,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, { struct buffer_page *tail_page = info->tail_page; struct buffer_page *commit_page = cpu_buffer->commit_page; - struct ring_buffer *buffer = cpu_buffer->buffer; + struct trace_buffer *buffer = cpu_buffer->buffer; struct buffer_page *next_page; int ret; @@ -2609,7 +2609,7 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, } static __always_inline void -rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) +rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) { size_t nr_pages; size_t dirty; @@ -2733,7 +2733,7 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) * Call this function before calling another ring_buffer_lock_reserve() and * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit(). */ -void ring_buffer_nest_start(struct ring_buffer *buffer) +void ring_buffer_nest_start(struct trace_buffer *buffer) { struct ring_buffer_per_cpu *cpu_buffer; int cpu; @@ -2753,7 +2753,7 @@ void ring_buffer_nest_start(struct ring_buffer *buffer) * Must be called after ring_buffer_nest_start() and after the * ring_buffer_unlock_commit(). */ -void ring_buffer_nest_end(struct ring_buffer *buffer) +void ring_buffer_nest_end(struct trace_buffer *buffer) { struct ring_buffer_per_cpu *cpu_buffer; int cpu; @@ -2775,7 +2775,7 @@ void ring_buffer_nest_end(struct ring_buffer *buffer) * * Must be paired with ring_buffer_lock_reserve. */ -int ring_buffer_unlock_commit(struct ring_buffer *buffer, +int ring_buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) { struct ring_buffer_per_cpu *cpu_buffer; @@ -2868,7 +2868,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, } static __always_inline struct ring_buffer_event * -rb_reserve_next_event(struct ring_buffer *buffer, +rb_reserve_next_event(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer, unsigned long length) { @@ -2961,7 +2961,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, * If NULL is returned, then nothing has been allocated or locked. */ struct ring_buffer_event * -ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) +ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) { struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event; @@ -3062,7 +3062,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, * If this function is called, do not call ring_buffer_unlock_commit on * the event. */ -void ring_buffer_discard_commit(struct ring_buffer *buffer, +void ring_buffer_discard_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) { struct ring_buffer_per_cpu *cpu_buffer; @@ -3113,7 +3113,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); * Note, like ring_buffer_lock_reserve, the length is the length of the data * and not the length of the event which would hold the header. */ -int ring_buffer_write(struct ring_buffer *buffer, +int ring_buffer_write(struct trace_buffer *buffer, unsigned long length, void *data) { @@ -3193,7 +3193,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) * * The caller should call synchronize_rcu() after this. */ -void ring_buffer_record_disable(struct ring_buffer *buffer) +void ring_buffer_record_disable(struct trace_buffer *buffer) { atomic_inc(&buffer->record_disabled); } @@ -3206,7 +3206,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable); * Note, multiple disables will need the same number of enables * to truly enable the writing (much like preempt_disable). */ -void ring_buffer_record_enable(struct ring_buffer *buffer) +void ring_buffer_record_enable(struct trace_buffer *buffer) { atomic_dec(&buffer->record_disabled); } @@ -3223,7 +3223,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable); * it works like an on/off switch, where as the disable() version * must be paired with a enable(). */ -void ring_buffer_record_off(struct ring_buffer *buffer) +void ring_buffer_record_off(struct trace_buffer *buffer) { unsigned int rd; unsigned int new_rd; @@ -3246,7 +3246,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_off); * it works like an on/off switch, where as the enable() version * must be paired with a disable(). */ -void ring_buffer_record_on(struct ring_buffer *buffer) +void ring_buffer_record_on(struct trace_buffer *buffer) { unsigned int rd; unsigned int new_rd; @@ -3264,7 +3264,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_on); * * Returns true if the ring buffer is in a state that it accepts writes. */ -bool ring_buffer_record_is_on(struct ring_buffer *buffer) +bool ring_buffer_record_is_on(struct trace_buffer *buffer) { return !atomic_read(&buffer->record_disabled); } @@ -3280,7 +3280,7 @@ bool ring_buffer_record_is_on(struct ring_buffer *buffer) * ring_buffer_record_disable(), as that is a temporary disabling of * the ring buffer. */ -bool ring_buffer_record_is_set_on(struct ring_buffer *buffer) +bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) { return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); } @@ -3295,7 +3295,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer) * * The caller should call synchronize_rcu() after this. */ -void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) +void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; @@ -3315,7 +3315,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); * Note, multiple disables will need the same number of enables * to truly enable the writing (much like preempt_disable). */ -void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) +void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; @@ -3345,7 +3345,7 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) * @buffer: The ring buffer * @cpu: The per CPU buffer to read from. */ -u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) +u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) { unsigned long flags; struct ring_buffer_per_cpu *cpu_buffer; @@ -3378,7 +3378,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); * @buffer: The ring buffer * @cpu: The per CPU buffer to read from. */ -unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu) +unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long ret; @@ -3398,7 +3398,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); * @buffer: The ring buffer * @cpu: The per CPU buffer to get the entries from. */ -unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) +unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; @@ -3417,7 +3417,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); * @buffer: The ring buffer * @cpu: The per CPU buffer to get the number of overruns from */ -unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) +unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long ret; @@ -3440,7 +3440,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); * @cpu: The per CPU buffer to get the number of overruns from */ unsigned long -ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) +ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long ret; @@ -3462,7 +3462,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); * @cpu: The per CPU buffer to get the number of overruns from */ unsigned long -ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) +ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long ret; @@ -3483,7 +3483,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); * @cpu: The per CPU buffer to get the number of events read */ unsigned long -ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu) +ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; @@ -3502,7 +3502,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); * Returns the total number of entries in the ring buffer * (all CPU entries) */ -unsigned long ring_buffer_entries(struct ring_buffer *buffer) +unsigned long ring_buffer_entries(struct trace_buffer *buffer) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long entries = 0; @@ -3525,7 +3525,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries); * Returns the total number of overruns in the ring buffer * (all CPU entries) */ -unsigned long ring_buffer_overruns(struct ring_buffer *buffer) +unsigned long ring_buffer_overruns(struct trace_buffer *buffer) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long overruns = 0; @@ -3949,7 +3949,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_peek); static struct ring_buffer_event * rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) { - struct ring_buffer *buffer; + struct trace_buffer *buffer; struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event; int nr_loops = 0; @@ -4077,7 +4077,7 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) * not consume the data. */ struct ring_buffer_event * -ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, +ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; @@ -4141,7 +4141,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) * and eventually empty the ring buffer if the producer is slower. */ struct ring_buffer_event * -ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, +ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_per_cpu *cpu_buffer; @@ -4201,7 +4201,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume); * This overall must be paired with ring_buffer_read_finish. */ struct ring_buffer_iter * -ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags) +ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) { struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_iter *iter; @@ -4332,7 +4332,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read); * ring_buffer_size - return the size of the ring buffer (in bytes) * @buffer: The ring buffer. */ -unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu) +unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) { /* * Earlier, this method returned @@ -4398,7 +4398,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) * @buffer: The ring buffer to reset a per cpu buffer of * @cpu: The CPU buffer to be reset */ -void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) +void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; unsigned long flags; @@ -4435,7 +4435,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); * ring_buffer_reset - reset a ring buffer * @buffer: The ring buffer to reset all cpu buffers */ -void ring_buffer_reset(struct ring_buffer *buffer) +void ring_buffer_reset(struct trace_buffer *buffer) { int cpu; @@ -4448,7 +4448,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset); * rind_buffer_empty - is the ring buffer empty? * @buffer: The ring buffer to test */ -bool ring_buffer_empty(struct ring_buffer *buffer) +bool ring_buffer_empty(struct trace_buffer *buffer) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long flags; @@ -4478,7 +4478,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty); * @buffer: The ring buffer * @cpu: The CPU buffer to test */ -bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) +bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long flags; @@ -4510,8 +4510,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); * it is expected that the tracer handles the cpu buffer not being * used at the moment. */ -int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, - struct ring_buffer *buffer_b, int cpu) +int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, + struct trace_buffer *buffer_b, int cpu) { struct ring_buffer_per_cpu *cpu_buffer_a; struct ring_buffer_per_cpu *cpu_buffer_b; @@ -4590,7 +4590,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); * Returns: * The page allocated, or ERR_PTR */ -void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) +void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; struct buffer_data_page *bpage = NULL; @@ -4637,7 +4637,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); * * Free a page allocated from ring_buffer_alloc_read_page. */ -void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data) +void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) { struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct buffer_data_page *bpage = data; @@ -4697,7 +4697,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); * >=0 if data has been transferred, returns the offset of consumed data. * <0 if no data has been transferred. */ -int ring_buffer_read_page(struct ring_buffer *buffer, +int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page, size_t len, int cpu, int full) { struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; @@ -4868,12 +4868,12 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_page); */ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) { - struct ring_buffer *buffer; + struct trace_buffer *buffer; long nr_pages_same; int cpu_i; unsigned long nr_pages; - buffer = container_of(node, struct ring_buffer, node); + buffer = container_of(node, struct trace_buffer, node); if (cpumask_test_cpu(cpu, buffer->cpumask)) return 0; @@ -4923,7 +4923,7 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) static struct task_struct *rb_threads[NR_CPUS] __initdata; struct rb_test_data { - struct ring_buffer *buffer; + struct trace_buffer *buffer; unsigned long events; unsigned long bytes_written; unsigned long bytes_alloc; @@ -5065,7 +5065,7 @@ static __init int rb_hammer_test(void *arg) static __init int test_ringbuffer(void) { struct task_struct *rb_hammer; - struct ring_buffer *buffer; + struct trace_buffer *buffer; int cpu; int ret = 0; diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 32149e46551c..8df0aa810950 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c @@ -29,7 +29,7 @@ static int reader_finish; static DECLARE_COMPLETION(read_start); static DECLARE_COMPLETION(read_done); -static struct ring_buffer *buffer; +static struct trace_buffer *buffer; static struct task_struct *producer; static struct task_struct *consumer; static unsigned long read; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 67084b7945ff..b4a07d7ed82a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -163,7 +163,7 @@ static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int tracing_set_tracer(struct trace_array *tr, const char *buf); -static void ftrace_trace_userstack(struct ring_buffer *buffer, +static void ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc); #define MAX_TRACER_SIZE 100 @@ -338,7 +338,7 @@ int tracing_check_open_get_tr(struct trace_array *tr) } int call_filter_check_discard(struct trace_event_call *call, void *rec, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event) { if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && @@ -747,22 +747,22 @@ static inline void trace_access_lock_init(void) #endif #ifdef CONFIG_STACKTRACE -static void __ftrace_trace_stack(struct ring_buffer *buffer, +static void __ftrace_trace_stack(struct trace_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); static inline void ftrace_trace_stack(struct trace_array *tr, - struct ring_buffer *buffer, + struct trace_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); #else -static inline void __ftrace_trace_stack(struct ring_buffer *buffer, +static inline void __ftrace_trace_stack(struct trace_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { } static inline void ftrace_trace_stack(struct trace_array *tr, - struct ring_buffer *buffer, + struct trace_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { @@ -780,7 +780,7 @@ trace_event_setup(struct ring_buffer_event *event, } static __always_inline struct ring_buffer_event * -__trace_buffer_lock_reserve(struct ring_buffer *buffer, +__trace_buffer_lock_reserve(struct trace_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) @@ -825,7 +825,7 @@ EXPORT_SYMBOL_GPL(tracing_on); static __always_inline void -__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) +__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) { __this_cpu_write(trace_taskinfo_save, true); @@ -848,7 +848,7 @@ __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *eve int __trace_puts(unsigned long ip, const char *str, int size) { struct ring_buffer_event *event; - struct ring_buffer *buffer; + struct trace_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; int alloc; @@ -898,7 +898,7 @@ EXPORT_SYMBOL_GPL(__trace_puts); int __trace_bputs(unsigned long ip, const char *str) { struct ring_buffer_event *event; - struct ring_buffer *buffer; + struct trace_buffer *buffer; struct bputs_entry *entry; unsigned long irq_flags; int size = sizeof(struct bputs_entry); @@ -1964,7 +1964,7 @@ int __init register_tracer(struct tracer *type) static void tracing_reset_cpu(struct array_buffer *buf, int cpu) { - struct ring_buffer *buffer = buf->buffer; + struct trace_buffer *buffer = buf->buffer; if (!buffer) return; @@ -1980,7 +1980,7 @@ static void tracing_reset_cpu(struct array_buffer *buf, int cpu) void tracing_reset_online_cpus(struct array_buffer *buf) { - struct ring_buffer *buffer = buf->buffer; + struct trace_buffer *buffer = buf->buffer; int cpu; if (!buffer) @@ -2098,7 +2098,7 @@ int is_tracing_stopped(void) */ void tracing_start(void) { - struct ring_buffer *buffer; + struct trace_buffer *buffer; unsigned long flags; if (tracing_disabled) @@ -2135,7 +2135,7 @@ void tracing_start(void) static void tracing_start_tr(struct trace_array *tr) { - struct ring_buffer *buffer; + struct trace_buffer *buffer; unsigned long flags; if (tracing_disabled) @@ -2172,7 +2172,7 @@ static void tracing_start_tr(struct trace_array *tr) */ void tracing_stop(void) { - struct ring_buffer *buffer; + struct trace_buffer *buffer; unsigned long flags; raw_spin_lock_irqsave(&global_trace.start_lock, flags); @@ -2200,7 +2200,7 @@ void tracing_stop(void) static void tracing_stop_tr(struct trace_array *tr) { - struct ring_buffer *buffer; + struct trace_buffer *buffer; unsigned long flags; /* If global, we need to also stop the max tracer */ @@ -2442,7 +2442,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned short type, EXPORT_SYMBOL_GPL(tracing_generic_entry_update); struct ring_buffer_event * -trace_buffer_lock_reserve(struct ring_buffer *buffer, +trace_buffer_lock_reserve(struct trace_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) @@ -2561,10 +2561,10 @@ void trace_buffered_event_disable(void) preempt_enable(); } -static struct ring_buffer *temp_buffer; +static struct trace_buffer *temp_buffer; struct ring_buffer_event * -trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, +trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, struct trace_event_file *trace_file, int type, unsigned long len, unsigned long flags, int pc) @@ -2689,7 +2689,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit); # define STACK_SKIP 3 void trace_buffer_unlock_commit_regs(struct trace_array *tr, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs) @@ -2710,7 +2710,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr, * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. */ void -trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, +trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, struct ring_buffer_event *event) { __buffer_unlock_commit(buffer, event); @@ -2845,7 +2845,7 @@ trace_function(struct trace_array *tr, int pc) { struct trace_event_call *call = &event_function; - struct ring_buffer *buffer = tr->array_buffer.buffer; + struct trace_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct ftrace_entry *entry; @@ -2883,7 +2883,7 @@ struct ftrace_stacks { static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); static DEFINE_PER_CPU(int, ftrace_stack_reserve); -static void __ftrace_trace_stack(struct ring_buffer *buffer, +static void __ftrace_trace_stack(struct trace_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { @@ -2958,7 +2958,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, } static inline void ftrace_trace_stack(struct trace_array *tr, - struct ring_buffer *buffer, + struct trace_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { @@ -2971,7 +2971,7 @@ static inline void ftrace_trace_stack(struct trace_array *tr, void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc) { - struct ring_buffer *buffer = tr->array_buffer.buffer; + struct trace_buffer *buffer = tr->array_buffer.buffer; if (rcu_is_watching()) { __ftrace_trace_stack(buffer, flags, skip, pc, NULL); @@ -3018,7 +3018,7 @@ EXPORT_SYMBOL_GPL(trace_dump_stack); static DEFINE_PER_CPU(int, user_stack_count); static void -ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) +ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc) { struct trace_event_call *call = &event_user_stack; struct ring_buffer_event *event; @@ -3063,7 +3063,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) preempt_enable(); } #else /* CONFIG_USER_STACKTRACE_SUPPORT */ -static void ftrace_trace_userstack(struct ring_buffer *buffer, +static void ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc) { } @@ -3188,7 +3188,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_bprint; struct ring_buffer_event *event; - struct ring_buffer *buffer; + struct trace_buffer *buffer; struct trace_array *tr = &global_trace; struct bprint_entry *entry; unsigned long flags; @@ -3245,7 +3245,7 @@ EXPORT_SYMBOL_GPL(trace_vbprintk); __printf(3, 0) static int -__trace_array_vprintk(struct ring_buffer *buffer, +__trace_array_vprintk(struct trace_buffer *buffer, unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_print; @@ -3326,7 +3326,7 @@ int trace_array_printk(struct trace_array *tr, EXPORT_SYMBOL_GPL(trace_array_printk); __printf(3, 4) -int trace_array_printk_buf(struct ring_buffer *buffer, +int trace_array_printk_buf(struct trace_buffer *buffer, unsigned long ip, const char *fmt, ...) { int ret; @@ -3382,7 +3382,7 @@ static struct trace_entry * __find_next_entry(struct trace_iterator *iter, int *ent_cpu, unsigned long *missing_events, u64 *ent_ts) { - struct ring_buffer *buffer = iter->array_buffer->buffer; + struct trace_buffer *buffer = iter->array_buffer->buffer; struct trace_entry *ent, *next = NULL; unsigned long lost_events = 0, next_lost = 0; int cpu_file = iter->cpu_file; @@ -6470,7 +6470,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; enum event_trigger_type tt = ETT_NONE; - struct ring_buffer *buffer; + struct trace_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; ssize_t written; @@ -6550,7 +6550,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf, { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; - struct ring_buffer *buffer; + struct trace_buffer *buffer; struct raw_data_entry *entry; unsigned long irq_flags; ssize_t written; @@ -7433,7 +7433,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) } struct buffer_ref { - struct ring_buffer *buffer; + struct trace_buffer *buffer; void *page; int cpu; refcount_t refcount; @@ -8272,7 +8272,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; - struct ring_buffer *buffer = tr->array_buffer.buffer; + struct trace_buffer *buffer = tr->array_buffer.buffer; unsigned long val; int ret; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index fd679fe92c1f..4812a36affac 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -178,7 +178,7 @@ struct trace_option_dentry; struct array_buffer { struct trace_array *tr; - struct ring_buffer *buffer; + struct trace_buffer *buffer; struct trace_array_cpu __percpu *data; u64 time_start; int cpu; @@ -705,7 +705,7 @@ struct dentry *tracing_init_dentry(void); struct ring_buffer_event; struct ring_buffer_event * -trace_buffer_lock_reserve(struct ring_buffer *buffer, +trace_buffer_lock_reserve(struct trace_buffer *buffer, int type, unsigned long len, unsigned long flags, @@ -717,7 +717,7 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts); -void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, +void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, struct ring_buffer_event *event); int trace_empty(struct trace_iterator *iter); @@ -873,7 +873,7 @@ trace_vprintk(unsigned long ip, const char *fmt, va_list args); extern int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args); -int trace_array_printk_buf(struct ring_buffer *buffer, +int trace_array_printk_buf(struct trace_buffer *buffer, unsigned long ip, const char *fmt, ...); void trace_printk_seq(struct trace_seq *s); enum print_line_t print_trace_line(struct trace_iterator *iter); @@ -1367,17 +1367,17 @@ struct trace_subsystem_dir { }; extern int call_filter_check_discard(struct trace_event_call *call, void *rec, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event); void trace_buffer_unlock_commit_regs(struct trace_array *tr, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs); static inline void trace_buffer_unlock_commit(struct trace_array *tr, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc) { @@ -1390,7 +1390,7 @@ void trace_buffered_event_disable(void); void trace_buffered_event_enable(void); static inline void -__trace_event_discard_commit(struct ring_buffer *buffer, +__trace_event_discard_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) { if (this_cpu_read(trace_buffered_event) == event) { @@ -1416,7 +1416,7 @@ __trace_event_discard_commit(struct ring_buffer *buffer, */ static inline bool __event_trigger_test_discard(struct trace_event_file *file, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event, void *entry, enum event_trigger_type *tt) @@ -1451,7 +1451,7 @@ __event_trigger_test_discard(struct trace_event_file *file, */ static inline void event_trigger_unlock_commit(struct trace_event_file *file, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event, void *entry, unsigned long irq_flags, int pc) { @@ -1482,7 +1482,7 @@ event_trigger_unlock_commit(struct trace_event_file *file, */ static inline void event_trigger_unlock_commit_regs(struct trace_event_file *file, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event, void *entry, unsigned long irq_flags, int pc, struct pt_regs *regs) diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index d5989284a99a..eff099123aa2 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -32,10 +32,10 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect) { struct trace_event_call *call = &event_branch; struct trace_array *tr = branch_tracer; + struct trace_buffer *buffer; struct trace_array_cpu *data; struct ring_buffer_event *event; struct trace_branch *entry; - struct ring_buffer *buffer; unsigned long flags; int pc; const char *p; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index ac557f685f0b..a16d1b601c5c 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -3391,8 +3391,8 @@ static void __init function_test_events_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) { + struct trace_buffer *buffer; struct ring_buffer_event *event; - struct ring_buffer *buffer; struct ftrace_entry *entry; unsigned long flags; long disabled; diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 94c581c1a897..0454abaeb486 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -879,7 +879,7 @@ static notrace void trace_event_raw_event_synth(void *__data, struct trace_event_file *trace_file = __data; struct synth_trace_event *entry; struct trace_event_buffer fbuffer; - struct ring_buffer *buffer; + struct trace_buffer *buffer; struct synth_event *event; unsigned int i, n_u64; int fields_size = 0; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 79b2c2df00c5..7d71546ba00a 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -101,7 +101,7 @@ int __trace_graph_entry(struct trace_array *tr, { struct trace_event_call *call = &event_funcgraph_entry; struct ring_buffer_event *event; - struct ring_buffer *buffer = tr->array_buffer.buffer; + struct trace_buffer *buffer = tr->array_buffer.buffer; struct ftrace_graph_ent_entry *entry; event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, @@ -221,7 +221,7 @@ void __trace_graph_return(struct trace_array *tr, { struct trace_event_call *call = &event_funcgraph_exit; struct ring_buffer_event *event; - struct ring_buffer *buffer = tr->array_buffer.buffer; + struct trace_buffer *buffer = tr->array_buffer.buffer; struct ftrace_graph_ret_entry *entry; event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index fc62a6049bd3..b44446bf0872 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -104,7 +104,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample) { struct trace_array *tr = hwlat_trace; struct trace_event_call *call = &event_hwlat; - struct ring_buffer *buffer = tr->array_buffer.buffer; + struct trace_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct hwlat_entry *entry; unsigned long flags; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 7f890262c8a3..477b6b011e7d 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1175,8 +1175,8 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, struct trace_event_file *trace_file) { struct kprobe_trace_entry_head *entry; + struct trace_buffer *buffer; struct ring_buffer_event *event; - struct ring_buffer *buffer; int size, dsize, pc; unsigned long irq_flags; struct trace_event_call *call = trace_probe_event_call(&tk->tp); @@ -1223,8 +1223,8 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, struct trace_event_file *trace_file) { struct kretprobe_trace_entry_head *entry; + struct trace_buffer *buffer; struct ring_buffer_event *event; - struct ring_buffer *buffer; int size, pc, dsize; unsigned long irq_flags; struct trace_event_call *call = trace_probe_event_call(&tk->tp); diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index c30137148759..84582bf1ed5f 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -297,7 +297,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, struct mmiotrace_rw *rw) { struct trace_event_call *call = &event_mmiotrace_rw; - struct ring_buffer *buffer = tr->array_buffer.buffer; + struct trace_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct trace_mmiotrace_rw *entry; int pc = preempt_count(); @@ -327,7 +327,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, struct mmiotrace_map *map) { struct trace_event_call *call = &event_mmiotrace_map; - struct ring_buffer *buffer = tr->array_buffer.buffer; + struct trace_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct trace_mmiotrace_map *entry; int pc = preempt_count(); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 510fda2fcd24..97b10bb31a1f 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -378,7 +378,7 @@ tracing_sched_switch_trace(struct trace_array *tr, unsigned long flags, int pc) { struct trace_event_call *call = &event_context_switch; - struct ring_buffer *buffer = tr->array_buffer.buffer; + struct trace_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct ctx_switch_entry *entry; @@ -408,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, struct trace_event_call *call = &event_wakeup; struct ring_buffer_event *event; struct ctx_switch_entry *entry; - struct ring_buffer *buffer = tr->array_buffer.buffer; + struct trace_buffer *buffer = tr->array_buffer.buffer; event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, sizeof(*entry), flags, pc); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index bd92843c2b0e..837ad4818bb4 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -317,7 +317,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) struct syscall_trace_enter *entry; struct syscall_metadata *sys_data; struct ring_buffer_event *event; - struct ring_buffer *buffer; + struct trace_buffer *buffer; unsigned long irq_flags; unsigned long args[6]; int pc; @@ -367,7 +367,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) struct syscall_trace_exit *entry; struct syscall_metadata *sys_data; struct ring_buffer_event *event; - struct ring_buffer *buffer; + struct trace_buffer *buffer; unsigned long irq_flags; int pc; int syscall_nr; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 352073d36585..6c75d94f5c2f 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -938,8 +938,8 @@ static void __uprobe_trace_func(struct trace_uprobe *tu, struct trace_event_file *trace_file) { struct uprobe_trace_entry_head *entry; + struct trace_buffer *buffer; struct ring_buffer_event *event; - struct ring_buffer *buffer; void *data; int size, esize; struct trace_event_call *call = trace_probe_event_call(&tu->tp); -- cgit v1.2.3 From d8d0c245a7fdd176e2cf6317b3fddda650059d06 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 11 Jan 2020 01:05:18 +0900 Subject: tracing: Apply soft-disabled and filter to tracepoints printk Apply soft-disabled and the filter rule of the trace events to the printk output of tracepoints (a.k.a. tp_printk kernel parameter) as same as trace buffer output. Link: http://lkml.kernel.org/r/157867231876.17873.15825819592284704068.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b4a07d7ed82a..b4294eb020f8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2610,6 +2610,7 @@ static DEFINE_MUTEX(tracepoint_printk_mutex); static void output_printk(struct trace_event_buffer *fbuffer) { struct trace_event_call *event_call; + struct trace_event_file *file; struct trace_event *event; unsigned long flags; struct trace_iterator *iter = tracepoint_print_iter; @@ -2623,6 +2624,12 @@ static void output_printk(struct trace_event_buffer *fbuffer) !event_call->event.funcs->trace) return; + file = fbuffer->trace_file; + if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || + (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && + !filter_match_preds(file->filter, fbuffer->entry))) + return; + event = &fbuffer->trace_file->event_call->event; spin_lock_irqsave(&tracepoint_iter_lock, flags); -- cgit v1.2.3 From 8cfcf15503f607e9597de19afeaa621897ae397e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 11 Jan 2020 01:05:31 +0900 Subject: tracing: kprobes: Output kprobe event to printk buffer Since kprobe-events use event_trigger_unlock_commit_regs() directly, that events doesn't show up in printk buffer if "tp_printk" is set. Use trace_event_buffer_commit() in kprobe events so that it can invoke output_printk() as same as other trace events. Link: http://lkml.kernel.org/r/157867233085.17873.5210928676787339604.stgit@devnote2 Signed-off-by: Masami Hiramatsu [ Adjusted data var declaration placement in __kretprobe_trace_func() ] Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 1 + kernel/trace/trace.c | 4 ++-- kernel/trace/trace_events.c | 1 + kernel/trace/trace_kprobe.c | 57 ++++++++++++++++++++++---------------------- 4 files changed, 32 insertions(+), 31 deletions(-) (limited to 'kernel/trace') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 5f7b2b1fce24..20948ee56f8c 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -216,6 +216,7 @@ struct trace_event_buffer { void *entry; unsigned long flags; int pc; + struct pt_regs *regs; }; void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b4294eb020f8..cb850d2c4bfa 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2680,9 +2680,9 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) if (static_key_false(&tracepoint_printk_key.key)) output_printk(fbuffer); - event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer, + event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer, fbuffer->event, fbuffer->entry, - fbuffer->flags, fbuffer->pc); + fbuffer->flags, fbuffer->pc, fbuffer->regs); } EXPORT_SYMBOL_GPL(trace_event_buffer_commit); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index a16d1b601c5c..dfb736a964d6 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -272,6 +272,7 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, if (!fbuffer->event) return NULL; + fbuffer->regs = NULL; fbuffer->entry = ring_buffer_event_data(fbuffer->event); return fbuffer->entry; } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 477b6b011e7d..33a6a661904b 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1175,35 +1175,35 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, struct trace_event_file *trace_file) { struct kprobe_trace_entry_head *entry; - struct trace_buffer *buffer; - struct ring_buffer_event *event; - int size, dsize, pc; - unsigned long irq_flags; struct trace_event_call *call = trace_probe_event_call(&tk->tp); + struct trace_event_buffer fbuffer; + int dsize; WARN_ON(call != trace_file->event_call); if (trace_trigger_soft_disabled(trace_file)) return; - local_save_flags(irq_flags); - pc = preempt_count(); + local_save_flags(fbuffer.flags); + fbuffer.pc = preempt_count(); + fbuffer.trace_file = trace_file; dsize = __get_data_size(&tk->tp, regs); - size = sizeof(*entry) + tk->tp.size + dsize; - event = trace_event_buffer_lock_reserve(&buffer, trace_file, - call->event.type, - size, irq_flags, pc); - if (!event) + fbuffer.event = + trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file, + call->event.type, + sizeof(*entry) + tk->tp.size + dsize, + fbuffer.flags, fbuffer.pc); + if (!fbuffer.event) return; - entry = ring_buffer_event_data(event); + fbuffer.regs = regs; + entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); entry->ip = (unsigned long)tk->rp.kp.addr; store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); - event_trigger_unlock_commit_regs(trace_file, buffer, event, - entry, irq_flags, pc, regs); + trace_event_buffer_commit(&fbuffer); } static void @@ -1223,36 +1223,35 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, struct trace_event_file *trace_file) { struct kretprobe_trace_entry_head *entry; - struct trace_buffer *buffer; - struct ring_buffer_event *event; - int size, pc, dsize; - unsigned long irq_flags; + struct trace_event_buffer fbuffer; struct trace_event_call *call = trace_probe_event_call(&tk->tp); + int dsize; WARN_ON(call != trace_file->event_call); if (trace_trigger_soft_disabled(trace_file)) return; - local_save_flags(irq_flags); - pc = preempt_count(); + local_save_flags(fbuffer.flags); + fbuffer.pc = preempt_count(); + fbuffer.trace_file = trace_file; dsize = __get_data_size(&tk->tp, regs); - size = sizeof(*entry) + tk->tp.size + dsize; - - event = trace_event_buffer_lock_reserve(&buffer, trace_file, - call->event.type, - size, irq_flags, pc); - if (!event) + fbuffer.event = + trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file, + call->event.type, + sizeof(*entry) + tk->tp.size + dsize, + fbuffer.flags, fbuffer.pc); + if (!fbuffer.event) return; - entry = ring_buffer_event_data(event); + fbuffer.regs = regs; + entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); entry->func = (unsigned long)tk->rp.kp.addr; entry->ret_ip = (unsigned long)ri->ret_addr; store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); - event_trigger_unlock_commit_regs(trace_file, buffer, event, - entry, irq_flags, pc, regs); + trace_event_buffer_commit(&fbuffer); } static void -- cgit v1.2.3 From d8d4c6d0e79c418f8c63f3c82429b1462f196155 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 11 Jan 2020 01:05:42 +0900 Subject: tracing: kprobes: Register to dynevent earlier stage Register kprobe event to dynevent in subsys_initcall level. This will allow kernel to register new kprobe events in fs_initcall level via trace_run_command. Link: http://lkml.kernel.org/r/157867234213.17873.18039000024374948737.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 33a6a661904b..8113d6aa7bc5 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1685,11 +1685,12 @@ static __init void setup_boot_kprobe_events(void) enable_boot_kprobe_events(); } -/* Make a tracefs interface for controlling probe points */ -static __init int init_kprobe_trace(void) +/* + * Register dynevent at subsys_initcall. This allows kernel to setup kprobe + * events in fs_initcall without tracefs. + */ +static __init int init_kprobe_trace_early(void) { - struct dentry *d_tracer; - struct dentry *entry; int ret; ret = dyn_event_register(&trace_kprobe_ops); @@ -1699,6 +1700,16 @@ static __init int init_kprobe_trace(void) if (register_module_notifier(&trace_kprobe_module_nb)) return -EINVAL; + return 0; +} +subsys_initcall(init_kprobe_trace_early); + +/* Make a tracefs interface for controlling probe points */ +static __init int init_kprobe_trace(void) +{ + struct dentry *d_tracer; + struct dentry *entry; + d_tracer = tracing_init_dentry(); if (IS_ERR(d_tracer)) return 0; -- cgit v1.2.3 From b05e89ae7cf3bcabc52399cb833ecc9aaa51ae04 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 11 Jan 2020 01:05:53 +0900 Subject: tracing: Accept different type for synthetic event fields Make the synthetic event accepts a different type field to record. However, the size and signed flag must be same. Link: http://lkml.kernel.org/r/157867235358.17873.61732996461602171.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 0454abaeb486..4f4759c6e972 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -4110,8 +4110,11 @@ static int check_synth_field(struct synth_event *event, field = event->fields[field_pos]; - if (strcmp(field->type, hist_field->type) != 0) - return -EINVAL; + if (strcmp(field->type, hist_field->type) != 0) { + if (field->size != hist_field->size || + field->is_signed != hist_field->is_signed) + return -EINVAL; + } return 0; } -- cgit v1.2.3 From 48ac9488a597eb6116472b6cc0bd875e245e252c Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 11 Jan 2020 01:06:05 +0900 Subject: tracing: Add NULL trace-array check in print_synth_event() Add NULL trace-array check in print_synth_event(), because if we enable tp_printk option, iter->tr can be NULL. Link: http://lkml.kernel.org/r/157867236536.17873.12529350542460184019.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 4f4759c6e972..1cb4c4c8e5b7 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -833,7 +833,7 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter, fmt = synth_field_fmt(se->fields[i]->type); /* parameter types */ - if (tr->trace_flags & TRACE_ITER_VERBOSE) + if (tr && tr->trace_flags & TRACE_ITER_VERBOSE) trace_seq_printf(s, "%s ", fmt); snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt); -- cgit v1.2.3 From 9c5b9d3d65e485826fb935453f01171b1a337aa8 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 11 Jan 2020 01:06:17 +0900 Subject: tracing/boot: Add boot-time tracing Setup tracing options via extra boot config in addition to kernel command line. This adds following commands support. These are applied to the global trace instance. - ftrace.options = OPT1[,OPT2...] Enable given ftrace options. - ftrace.trace_clock = CLOCK Set given CLOCK to ftrace's trace_clock. - ftrace.buffer_size = SIZE Configure ftrace buffer size to SIZE. You can use "KB" or "MB" for that SIZE. - ftrace.events = EVENT[, EVENT2...] Enable given events on boot. You can use a wild card in EVENT. - ftrace.tracer = TRACER Set TRACER to current tracer on boot. (e.g. function) Note that this is NOT replacing the kernel parameters, because this boot config based setting is later than that. If you want to trace earlier boot events, you still need kernel parameters. Link: http://lkml.kernel.org/r/157867237723.17873.17494943526320587488.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/Kconfig | 9 ++++ kernel/trace/Makefile | 1 + kernel/trace/trace.c | 10 ++-- kernel/trace/trace_boot.c | 113 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 128 insertions(+), 5 deletions(-) create mode 100644 kernel/trace/trace_boot.c (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 25a0fcfa7a5d..75326d8ab1af 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -814,6 +814,15 @@ config GCOV_PROFILE_FTRACE Note that on a kernel compiled with this config, ftrace will run significantly slower. +config BOOTTIME_TRACING + bool "Boot-time Tracing support" + depends on BOOT_CONFIG && TRACING + default y + help + Enable developer to setup ftrace subsystem via supplemental + kernel cmdline at boot time for debugging (tracing) driver + initialization and boot process. + endif # FTRACE endif # TRACING_SUPPORT diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 0e63db62225f..395e2db9c742 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -83,6 +83,7 @@ endif obj-$(CONFIG_DYNAMIC_EVENTS) += trace_dynevent.o obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o +obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index cb850d2c4bfa..6c996d1b1687 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -162,7 +162,7 @@ union trace_eval_map_item { static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ -static int tracing_set_tracer(struct trace_array *tr, const char *buf); +int tracing_set_tracer(struct trace_array *tr, const char *buf); static void ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc); @@ -4747,7 +4747,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) return 0; } -static int trace_set_options(struct trace_array *tr, char *option) +int trace_set_options(struct trace_array *tr, char *option) { char *cmp; int neg = 0; @@ -5647,8 +5647,8 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr, return ret; } -static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, - unsigned long size, int cpu_id) +ssize_t tracing_resize_ring_buffer(struct trace_array *tr, + unsigned long size, int cpu_id) { int ret = size; @@ -5727,7 +5727,7 @@ static void add_tracer_options(struct trace_array *tr, struct tracer *t) create_trace_option_files(tr, t); } -static int tracing_set_tracer(struct trace_array *tr, const char *buf) +int tracing_set_tracer(struct trace_array *tr, const char *buf) { struct tracer *t; #ifdef CONFIG_TRACER_MAX_TRACE diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c new file mode 100644 index 000000000000..4b41310184df --- /dev/null +++ b/kernel/trace/trace_boot.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * trace_boot.c + * Tracing kernel boot-time + */ + +#define pr_fmt(fmt) "trace_boot: " fmt + +#include +#include +#include + +#include "trace.h" + +#define MAX_BUF_LEN 256 + +extern int trace_set_options(struct trace_array *tr, char *option); +extern int tracing_set_tracer(struct trace_array *tr, const char *buf); +extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, + unsigned long size, int cpu_id); + +static void __init +trace_boot_set_ftrace_options(struct trace_array *tr, struct xbc_node *node) +{ + struct xbc_node *anode; + const char *p; + char buf[MAX_BUF_LEN]; + unsigned long v = 0; + + /* Common ftrace options */ + xbc_node_for_each_array_value(node, "options", anode, p) { + if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) { + pr_err("String is too long: %s\n", p); + continue; + } + + if (trace_set_options(tr, buf) < 0) + pr_err("Failed to set option: %s\n", buf); + } + + p = xbc_node_find_value(node, "trace_clock", NULL); + if (p && *p != '\0') { + if (tracing_set_clock(tr, p) < 0) + pr_err("Failed to set trace clock: %s\n", p); + } + + p = xbc_node_find_value(node, "buffer_size", NULL); + if (p && *p != '\0') { + v = memparse(p, NULL); + if (v < PAGE_SIZE) + pr_err("Buffer size is too small: %s\n", p); + if (tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0) + pr_err("Failed to resize trace buffer to %s\n", p); + } +} + +#ifdef CONFIG_EVENT_TRACING +extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); + +static void __init +trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node) +{ + struct xbc_node *anode; + char buf[MAX_BUF_LEN]; + const char *p; + + xbc_node_for_each_array_value(node, "events", anode, p) { + if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) { + pr_err("String is too long: %s\n", p); + continue; + } + + if (ftrace_set_clr_event(tr, buf, 1) < 0) + pr_err("Failed to enable event: %s\n", p); + } +} +#else +#define trace_boot_enable_events(tr, node) do {} while (0) +#endif + +static void __init +trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node) +{ + const char *p; + + p = xbc_node_find_value(node, "tracer", NULL); + if (p && *p != '\0') { + if (tracing_set_tracer(tr, p) < 0) + pr_err("Failed to set given tracer: %s\n", p); + } +} + +static int __init trace_boot_init(void) +{ + struct xbc_node *trace_node; + struct trace_array *tr; + + trace_node = xbc_find_node("ftrace"); + if (!trace_node) + return 0; + + tr = top_trace_array(); + if (!tr) + return 0; + + trace_boot_set_ftrace_options(tr, trace_node); + trace_boot_enable_events(tr, trace_node); + trace_boot_enable_tracer(tr, trace_node); + + return 0; +} + +fs_initcall(trace_boot_init); -- cgit v1.2.3 From 81a59555ff1593642824414267e1859024bd0162 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 11 Jan 2020 01:06:29 +0900 Subject: tracing/boot: Add per-event settings Add per-event settings for boottime tracing. User can set filter, actions and enable on each event on boot. The event entries are under ftrace.event.GROUP.EVENT node (note that the option key includes event's group name and event name.) This supports below configs. - ftrace.event.GROUP.EVENT.enable Enables GROUP:EVENT tracing. - ftrace.event.GROUP.EVENT.filter = FILTER Set FILTER rule to the GROUP:EVENT. - ftrace.event.GROUP.EVENT.actions = ACTION[, ACTION2...] Set ACTIONs to the GROUP:EVENT. For example, ftrace.event.sched.sched_process_exec { filter = "pid < 128" enable } this will enable tracing "sched:sched_process_exec" event with "pid < 128" filter. Link: http://lkml.kernel.org/r/157867238942.17873.11177628789184546198.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 60 +++++++++++++++++++++++++++++++++++++ kernel/trace/trace_events_trigger.c | 2 +- 2 files changed, 61 insertions(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 4b41310184df..37524031533e 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -56,6 +56,7 @@ trace_boot_set_ftrace_options(struct trace_array *tr, struct xbc_node *node) #ifdef CONFIG_EVENT_TRACING extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); +extern int trigger_process_regex(struct trace_event_file *file, char *buff); static void __init trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node) @@ -74,8 +75,66 @@ trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node) pr_err("Failed to enable event: %s\n", p); } } + +static void __init +trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, + struct xbc_node *enode) +{ + struct trace_event_file *file; + struct xbc_node *anode; + char buf[MAX_BUF_LEN]; + const char *p, *group, *event; + + group = xbc_node_get_data(gnode); + event = xbc_node_get_data(enode); + + mutex_lock(&event_mutex); + file = find_event_file(tr, group, event); + if (!file) { + pr_err("Failed to find event: %s:%s\n", group, event); + goto out; + } + + p = xbc_node_find_value(enode, "filter", NULL); + if (p && *p != '\0') { + if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) + pr_err("filter string is too long: %s\n", p); + else if (apply_event_filter(file, buf) < 0) + pr_err("Failed to apply filter: %s\n", buf); + } + + xbc_node_for_each_array_value(enode, "actions", anode, p) { + if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) + pr_err("action string is too long: %s\n", p); + else if (trigger_process_regex(file, buf) < 0) + pr_err("Failed to apply an action: %s\n", buf); + } + + if (xbc_node_find_value(enode, "enable", NULL)) { + if (trace_event_enable_disable(file, 1, 0) < 0) + pr_err("Failed to enable event node: %s:%s\n", + group, event); + } +out: + mutex_unlock(&event_mutex); +} + +static void __init +trace_boot_init_events(struct trace_array *tr, struct xbc_node *node) +{ + struct xbc_node *gnode, *enode; + + node = xbc_node_find_child(node, "event"); + if (!node) + return; + /* per-event key starts with "event.GROUP.EVENT" */ + xbc_node_for_each_child(node, gnode) + xbc_node_for_each_child(gnode, enode) + trace_boot_init_one_event(tr, gnode, enode); +} #else #define trace_boot_enable_events(tr, node) do {} while (0) +#define trace_boot_init_events(tr, node) do {} while (0) #endif static void __init @@ -104,6 +163,7 @@ static int __init trace_boot_init(void) return 0; trace_boot_set_ftrace_options(tr, trace_node); + trace_boot_init_events(tr, trace_node); trace_boot_enable_events(tr, trace_node); trace_boot_enable_tracer(tr, trace_node); diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 2cd53ca21b51..d8ada4c6f3f7 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -213,7 +213,7 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file) return ret; } -static int trigger_process_regex(struct trace_event_file *file, char *buff) +int trigger_process_regex(struct trace_event_file *file, char *buff) { char *command, *next = buff; struct event_command *p; -- cgit v1.2.3 From 4d655281eb1bb59fad021c0f68afd033f8d0320d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 11 Jan 2020 01:06:41 +0900 Subject: tracing/boot Add kprobe event support Add kprobe event support on event node to boot-time tracing. If the group name of event is "kprobes", the boot-time tracing defines new probe event according to "probes" values. - ftrace.event.kprobes.EVENT.probes = PROBE[, PROBE2...] Defines new kprobe event based on PROBEs. It is able to define multiple probes on one event, but those must have same type of arguments. For example, ftrace.events.kprobes.myevent { probes = "vfs_read $arg1 $arg2"; enable; } This will add kprobes:myevent on vfs_read with the 1st and the 2nd arguments. Link: http://lkml.kernel.org/r/157867240104.17873.9712052065426433111.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 46 +++++++++++++++++++++++++++++++++++++++++++++ kernel/trace/trace_kprobe.c | 5 +++++ 2 files changed, 51 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 37524031533e..a11dc60299fb 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -76,6 +76,48 @@ trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node) } } +#ifdef CONFIG_KPROBE_EVENTS +extern int trace_kprobe_run_command(const char *command); + +static int __init +trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) +{ + struct xbc_node *anode; + char buf[MAX_BUF_LEN]; + const char *val; + char *p; + int len; + + len = snprintf(buf, ARRAY_SIZE(buf) - 1, "p:kprobes/%s ", event); + if (len >= ARRAY_SIZE(buf)) { + pr_err("Event name is too long: %s\n", event); + return -E2BIG; + } + p = buf + len; + len = ARRAY_SIZE(buf) - len; + + xbc_node_for_each_array_value(node, "probes", anode, val) { + if (strlcpy(p, val, len) >= len) { + pr_err("Probe definition is too long: %s\n", val); + return -E2BIG; + } + if (trace_kprobe_run_command(buf) < 0) { + pr_err("Failed to add probe: %s\n", buf); + return -EINVAL; + } + } + + return 0; +} +#else +static inline int __init +trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) +{ + pr_err("Kprobe event is not supported.\n"); + return -ENOTSUPP; +} +#endif + static void __init trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, struct xbc_node *enode) @@ -88,6 +130,10 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, group = xbc_node_get_data(gnode); event = xbc_node_get_data(enode); + if (!strcmp(group, "kprobes")) + if (trace_boot_add_kprobe_event(enode, event) < 0) + return; + mutex_lock(&event_mutex); file = find_event_file(tr, group, event); if (!file) { diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8113d6aa7bc5..283b7c437440 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -902,6 +902,11 @@ static int create_or_delete_trace_kprobe(int argc, char **argv) return ret == -ECANCELED ? -EINVAL : ret; } +int trace_kprobe_run_command(const char *command) +{ + return trace_run_command(command, create_or_delete_trace_kprobe); +} + static int trace_kprobe_release(struct dyn_event *ev) { struct trace_kprobe *tk = to_trace_kprobe(ev); -- cgit v1.2.3 From 3fbe2d6e1fce255d918b622fb2af22e98364a154 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 11 Jan 2020 01:06:52 +0900 Subject: tracing/boot: Add synthetic event support Add synthetic event node support to boot time tracing. The synthetic event is a kind of event node, but the group name is "synthetic". - ftrace.event.synthetic.EVENT.fields = FIELD[, FIELD2...] Defines new synthetic event with FIELDs. Each field should be "type varname". The synthetic node requires "fields" string arraies, which defines the fields as same as tracing/synth_events interface. Link: http://lkml.kernel.org/r/157867241236.17873.12411615143321557709.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 47 ++++++++++++++++++++++++++++++++++++++++ kernel/trace/trace_events_hist.c | 5 +++++ 2 files changed, 52 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index a11dc60299fb..3054921b0877 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -118,6 +118,50 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) } #endif +#ifdef CONFIG_HIST_TRIGGERS +extern int synth_event_run_command(const char *command); + +static int __init +trace_boot_add_synth_event(struct xbc_node *node, const char *event) +{ + struct xbc_node *anode; + char buf[MAX_BUF_LEN], *q; + const char *p; + int len, delta, ret; + + len = ARRAY_SIZE(buf); + delta = snprintf(buf, len, "%s", event); + if (delta >= len) { + pr_err("Event name is too long: %s\n", event); + return -E2BIG; + } + len -= delta; q = buf + delta; + + xbc_node_for_each_array_value(node, "fields", anode, p) { + delta = snprintf(q, len, " %s;", p); + if (delta >= len) { + pr_err("fields string is too long: %s\n", p); + return -E2BIG; + } + len -= delta; q += delta; + } + + ret = synth_event_run_command(buf); + if (ret < 0) + pr_err("Failed to add synthetic event: %s\n", buf); + + + return ret; +} +#else +static inline int __init +trace_boot_add_synth_event(struct xbc_node *node, const char *event) +{ + pr_err("Synthetic event is not supported.\n"); + return -ENOTSUPP; +} +#endif + static void __init trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, struct xbc_node *enode) @@ -133,6 +177,9 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, if (!strcmp(group, "kprobes")) if (trace_boot_add_kprobe_event(enode, event) < 0) return; + if (!strcmp(group, "synthetic")) + if (trace_boot_add_synth_event(enode, event) < 0) + return; mutex_lock(&event_mutex); file = find_event_file(tr, group, event); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 1cb4c4c8e5b7..8e90f1ada437 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1384,6 +1384,11 @@ static int create_or_delete_synth_event(int argc, char **argv) return ret == -ECANCELED ? -EINVAL : ret; } +int synth_event_run_command(const char *command) +{ + return trace_run_command(command, create_or_delete_synth_event); +} + static int synth_event_create(int argc, const char **argv) { const char *name = argv[0]; -- cgit v1.2.3 From 4f712a4d04a4e49167118b41d8ea96df70a98985 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 11 Jan 2020 01:07:04 +0900 Subject: tracing/boot: Add instance node support Add instance node support to boot-time tracing. User can set some options and event nodes under instance node. - ftrace.instance.INSTANCE[...] Add new INSTANCE instance. Some options and event nodes are acceptable for instance node. Link: http://lkml.kernel.org/r/157867242413.17873.9814204526141500278.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 43 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 5 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 3054921b0877..f5db30d25b0b 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -20,7 +20,7 @@ extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu_id); static void __init -trace_boot_set_ftrace_options(struct trace_array *tr, struct xbc_node *node) +trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node) { struct xbc_node *anode; const char *p; @@ -242,6 +242,40 @@ trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node) } } +static void __init +trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node) +{ + trace_boot_set_instance_options(tr, node); + trace_boot_init_events(tr, node); + trace_boot_enable_events(tr, node); + trace_boot_enable_tracer(tr, node); +} + +static void __init +trace_boot_init_instances(struct xbc_node *node) +{ + struct xbc_node *inode; + struct trace_array *tr; + const char *p; + + node = xbc_node_find_child(node, "instance"); + if (!node) + return; + + xbc_node_for_each_child(node, inode) { + p = xbc_node_get_data(inode); + if (!p || *p == '\0') + continue; + + tr = trace_array_get_by_name(p); + if (IS_ERR(tr)) { + pr_err("Failed to get trace instance %s\n", p); + continue; + } + trace_boot_init_one_instance(tr, inode); + } +} + static int __init trace_boot_init(void) { struct xbc_node *trace_node; @@ -255,10 +289,9 @@ static int __init trace_boot_init(void) if (!tr) return 0; - trace_boot_set_ftrace_options(tr, trace_node); - trace_boot_init_events(tr, trace_node); - trace_boot_enable_events(tr, trace_node); - trace_boot_enable_tracer(tr, trace_node); + /* Global trace array is also one instance */ + trace_boot_init_one_instance(tr, trace_node); + trace_boot_init_instances(trace_node); return 0; } -- cgit v1.2.3 From 9d15dbbde1048e490f3773e67d7ef7604bab1409 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 11 Jan 2020 01:07:16 +0900 Subject: tracing/boot: Add cpu_mask option support Add ftrace.cpumask option support to boot-time tracing. This sets cpumask for each instance. - ftrace.[instance.INSTANCE.]cpumask = CPUMASK; Set the trace cpumask. Note that the CPUMASK should be a string which /tracing_cpumask can accepts. Link: http://lkml.kernel.org/r/157867243625.17873.13613922641273149372.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.c | 42 +++++++++++++++++++++++++++++------------- kernel/trace/trace_boot.c | 14 ++++++++++++++ 2 files changed, 43 insertions(+), 13 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6c996d1b1687..106bbc0988fe 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4561,20 +4561,13 @@ out_err: return count; } -static ssize_t -tracing_cpumask_write(struct file *filp, const char __user *ubuf, - size_t count, loff_t *ppos) +int tracing_set_cpumask(struct trace_array *tr, + cpumask_var_t tracing_cpumask_new) { - struct trace_array *tr = file_inode(filp)->i_private; - cpumask_var_t tracing_cpumask_new; - int err, cpu; - - if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) - return -ENOMEM; + int cpu; - err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); - if (err) - goto err_unlock; + if (!tr) + return -EINVAL; local_irq_disable(); arch_spin_lock(&tr->max_lock); @@ -4598,11 +4591,34 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, local_irq_enable(); cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); + + return 0; +} + +static ssize_t +tracing_cpumask_write(struct file *filp, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct trace_array *tr = file_inode(filp)->i_private; + cpumask_var_t tracing_cpumask_new; + int err; + + if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) + return -ENOMEM; + + err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); + if (err) + goto err_free; + + err = tracing_set_cpumask(tr, tracing_cpumask_new); + if (err) + goto err_free; + free_cpumask_var(tracing_cpumask_new); return count; -err_unlock: +err_free: free_cpumask_var(tracing_cpumask_new); return err; diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index f5db30d25b0b..81d923c16a4d 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -18,6 +18,8 @@ extern int trace_set_options(struct trace_array *tr, char *option); extern int tracing_set_tracer(struct trace_array *tr, const char *buf); extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu_id); +extern int tracing_set_cpumask(struct trace_array *tr, + cpumask_var_t tracing_cpumask_new); static void __init trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node) @@ -52,6 +54,18 @@ trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node) if (tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0) pr_err("Failed to resize trace buffer to %s\n", p); } + + p = xbc_node_find_value(node, "cpumask", NULL); + if (p && *p != '\0') { + cpumask_var_t new_mask; + + if (alloc_cpumask_var(&new_mask, GFP_KERNEL)) { + if (cpumask_parse(p, new_mask) < 0 || + tracing_set_cpumask(tr, new_mask) < 0) + pr_err("Failed to set new CPU mask %s\n", p); + free_cpumask_var(new_mask); + } + } } #ifdef CONFIG_EVENT_TRACING -- cgit v1.2.3 From fe1efe9252f938f0cc624e4ac817c27bcaef6ed0 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 11 Jan 2020 01:07:28 +0900 Subject: tracing/boot: Add function tracer filter options Add below function-tracer filter options to boot-time tracing. - ftrace.[instance.INSTANCE.]ftrace.filters This will take an array of tracing function filter rules - ftrace.[instance.INSTANCE.]ftrace.notraces This will take an array of NON-tracing function filter rules Link: http://lkml.kernel.org/r/157867244841.17873.10933616628243103561.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 81d923c16a4d..fa9603dc6469 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -244,11 +244,51 @@ trace_boot_init_events(struct trace_array *tr, struct xbc_node *node) #define trace_boot_init_events(tr, node) do {} while (0) #endif +#ifdef CONFIG_DYNAMIC_FTRACE +extern bool ftrace_filter_param __initdata; +extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); +extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); +static void __init +trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node) +{ + struct xbc_node *anode; + const char *p; + char *q; + + xbc_node_for_each_array_value(node, "ftrace.filters", anode, p) { + q = kstrdup(p, GFP_KERNEL); + if (!q) + return; + if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0) + pr_err("Failed to add %s to ftrace filter\n", p); + else + ftrace_filter_param = true; + kfree(q); + } + xbc_node_for_each_array_value(node, "ftrace.notraces", anode, p) { + q = kstrdup(p, GFP_KERNEL); + if (!q) + return; + if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0) + pr_err("Failed to add %s to ftrace filter\n", p); + else + ftrace_filter_param = true; + kfree(q); + } +} +#else +#define trace_boot_set_ftrace_filter(tr, node) do {} while (0) +#endif + static void __init trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node) { const char *p; + trace_boot_set_ftrace_filter(tr, node); + p = xbc_node_find_value(node, "tracer", NULL); if (p && *p != '\0') { if (tracing_set_tracer(tr, p) < 0) -- cgit v1.2.3 From 3b42a4c83a31d8f1d8a7cb7eb2f4ee809d42c69d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 20 Dec 2019 11:31:43 +0900 Subject: tracing: trigger: Replace unneeded RCU-list traversals With CONFIG_PROVE_RCU_LIST, I had many suspicious RCU warnings when I ran ftracetest trigger testcases. ----- # dmesg -c > /dev/null # ./ftracetest test.d/trigger ... # dmesg | grep "RCU-list traversed" | cut -f 2 -d ] | cut -f 2 -d " " kernel/trace/trace_events_hist.c:6070 kernel/trace/trace_events_hist.c:1760 kernel/trace/trace_events_hist.c:5911 kernel/trace/trace_events_trigger.c:504 kernel/trace/trace_events_hist.c:1810 kernel/trace/trace_events_hist.c:3158 kernel/trace/trace_events_hist.c:3105 kernel/trace/trace_events_hist.c:5518 kernel/trace/trace_events_hist.c:5998 kernel/trace/trace_events_hist.c:6019 kernel/trace/trace_events_hist.c:6044 kernel/trace/trace_events_trigger.c:1500 kernel/trace/trace_events_trigger.c:1540 kernel/trace/trace_events_trigger.c:539 kernel/trace/trace_events_trigger.c:584 ----- I investigated those warnings and found that the RCU-list traversals in event trigger and hist didn't need to use RCU version because those were called only under event_mutex. I also checked other RCU-list traversals related to event trigger list, and found that most of them were called from event_hist_trigger_func() or hist_unregister_trigger() or register/unregister functions except for a few cases. Replace these unneeded RCU-list traversals with normal list traversal macro and lockdep_assert_held() to check the event_mutex is held. Link: http://lkml.kernel.org/r/157680910305.11685.15110237954275915782.stgit@devnote2 Reviewed-by: Tom Zanussi Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 41 +++++++++++++++++++++++++++---------- kernel/trace/trace_events_trigger.c | 20 +++++++++++++----- 2 files changed, 45 insertions(+), 16 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 8e90f1ada437..117a1202a6b9 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1771,11 +1771,13 @@ static struct hist_field *find_var(struct hist_trigger_data *hist_data, struct event_trigger_data *test; struct hist_field *hist_field; + lockdep_assert_held(&event_mutex); + hist_field = find_var_field(hist_data, var_name); if (hist_field) return hist_field; - list_for_each_entry_rcu(test, &file->triggers, list) { + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { test_data = test->private_data; hist_field = find_var_field(test_data, var_name); @@ -1825,7 +1827,9 @@ static struct hist_field *find_file_var(struct trace_event_file *file, struct event_trigger_data *test; struct hist_field *hist_field; - list_for_each_entry_rcu(test, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { test_data = test->private_data; hist_field = find_var_field(test_data, var_name); @@ -3120,7 +3124,9 @@ static char *find_trigger_filter(struct hist_trigger_data *hist_data, { struct event_trigger_data *test; - list_for_each_entry_rcu(test, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { if (test->private_data == hist_data) return test->filter_str; @@ -3171,9 +3177,11 @@ find_compatible_hist(struct hist_trigger_data *target_hist_data, struct event_trigger_data *test; unsigned int n_keys; + lockdep_assert_held(&event_mutex); + n_keys = target_hist_data->n_fields - target_hist_data->n_vals; - list_for_each_entry_rcu(test, &file->triggers, list) { + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { hist_data = test->private_data; @@ -5536,7 +5544,7 @@ static int hist_show(struct seq_file *m, void *v) goto out_unlock; } - list_for_each_entry_rcu(data, &event_file->triggers, list) { + list_for_each_entry(data, &event_file->triggers, list) { if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) hist_trigger_show(m, data, n++); } @@ -5929,7 +5937,9 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops, if (hist_data->attrs->name && !named_data) goto new; - list_for_each_entry_rcu(test, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { if (!hist_trigger_match(data, test, named_data, false)) continue; @@ -6013,10 +6023,12 @@ static bool have_hist_trigger_match(struct event_trigger_data *data, struct event_trigger_data *test, *named_data = NULL; bool match = false; + lockdep_assert_held(&event_mutex); + if (hist_data->attrs->name) named_data = find_named_trigger(hist_data->attrs->name); - list_for_each_entry_rcu(test, &file->triggers, list) { + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { if (hist_trigger_match(data, test, named_data, false)) { match = true; @@ -6034,10 +6046,12 @@ static bool hist_trigger_check_refs(struct event_trigger_data *data, struct hist_trigger_data *hist_data = data->private_data; struct event_trigger_data *test, *named_data = NULL; + lockdep_assert_held(&event_mutex); + if (hist_data->attrs->name) named_data = find_named_trigger(hist_data->attrs->name); - list_for_each_entry_rcu(test, &file->triggers, list) { + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { if (!hist_trigger_match(data, test, named_data, false)) continue; @@ -6059,10 +6073,12 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops, struct event_trigger_data *test, *named_data = NULL; bool unregistered = false; + lockdep_assert_held(&event_mutex); + if (hist_data->attrs->name) named_data = find_named_trigger(hist_data->attrs->name); - list_for_each_entry_rcu(test, &file->triggers, list) { + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { if (!hist_trigger_match(data, test, named_data, false)) continue; @@ -6088,7 +6104,9 @@ static bool hist_file_check_refs(struct trace_event_file *file) struct hist_trigger_data *hist_data; struct event_trigger_data *test; - list_for_each_entry_rcu(test, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { hist_data = test->private_data; if (check_var_refs(hist_data)) @@ -6331,7 +6349,8 @@ hist_enable_trigger(struct event_trigger_data *data, void *rec, struct enable_trigger_data *enable_data = data->private_data; struct event_trigger_data *test; - list_for_each_entry_rcu(test, &enable_data->file->triggers, list) { + list_for_each_entry_rcu(test, &enable_data->file->triggers, list, + lockdep_is_held(&event_mutex)) { if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { if (enable_data->enable) test->paused = false; diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index d8ada4c6f3f7..60959c31791d 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -501,7 +501,9 @@ void update_cond_flag(struct trace_event_file *file) struct event_trigger_data *data; bool set_cond = false; - list_for_each_entry_rcu(data, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(data, &file->triggers, list) { if (data->filter || event_command_post_trigger(data->cmd_ops) || event_command_needs_rec(data->cmd_ops)) { set_cond = true; @@ -536,7 +538,9 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops, struct event_trigger_data *test; int ret = 0; - list_for_each_entry_rcu(test, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(test, &file->triggers, list) { if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) { ret = -EEXIST; goto out; @@ -581,7 +585,9 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops, struct event_trigger_data *data; bool unregistered = false; - list_for_each_entry_rcu(data, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(data, &file->triggers, list) { if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { unregistered = true; list_del_rcu(&data->list); @@ -1497,7 +1503,9 @@ int event_enable_register_trigger(char *glob, struct event_trigger_data *test; int ret = 0; - list_for_each_entry_rcu(test, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(test, &file->triggers, list) { test_enable_data = test->private_data; if (test_enable_data && (test->cmd_ops->trigger_type == @@ -1537,7 +1545,9 @@ void event_enable_unregister_trigger(char *glob, struct event_trigger_data *data; bool unregistered = false; - list_for_each_entry_rcu(data, &file->triggers, list) { + lockdep_assert_held(&event_mutex); + + list_for_each_entry(data, &file->triggers, list) { enable_data = data->private_data; if (enable_data && (data->cmd_ops->trigger_type == -- cgit v1.2.3 From 59e7cffe5cca6f15c21d394492fa4739172de1c5 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Thu, 5 Jun 2014 20:22:05 +0200 Subject: ring-bufer: kernel-doc warning fixes Also fixes a couple of typos Link: http://lkml.kernel.org/r/1401992525-10417-1-git-send-email-fabf@skynet.be Cc: Andrew Morton Signed-off-by: Fabian Frederick [ Found this deep in the abyss of my INBOX ] Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ring_buffer.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index f846de2aa435..46d67ff68795 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1368,6 +1368,7 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) * __ring_buffer_alloc - allocate a new ring_buffer * @size: the size in bytes per cpu that is needed. * @flags: attributes to set for the ring buffer. + * @key: ring buffer reader_lock_key. * * Currently the only flag that is available is the RB_FL_OVERWRITE * flag. This flag means that the buffer will overwrite old data @@ -4331,6 +4332,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read); /** * ring_buffer_size - return the size of the ring buffer (in bytes) * @buffer: The ring buffer. + * @cpu: The CPU to get ring buffer size from. */ unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) { @@ -4504,6 +4506,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers * @buffer_a: One buffer to swap with * @buffer_b: The other buffer to swap with + * @cpu: the CPU of the buffers to swap * * This function is useful for tracers that want to take a "snapshot" * of a CPU buffer and has another back up buffer lying around. -- cgit v1.2.3 From cfc585a401764f0d352602d614c19866bb84738a Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 14 Jan 2020 16:27:51 -0500 Subject: ring-buffer: Fix kernel doc for rb_update_event() rb_update_event has changed without the kernel-doc update. Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ring_buffer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 46d67ff68795..3bab9b0a90b6 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2331,11 +2331,11 @@ static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, /** * rb_update_event - update event type and data + * @cpu_buffer: The per cpu buffer of the @event * @event: the event to update - * @type: the type of event - * @length: the size of the event field in the ring buffer + * @info: The info to update the @event with (contains length and delta) * - * Update the type and data fields of the event. The length + * Update the type and data fields of the @event. The length * is the actual size that is written to the ring buffer, * and with this, we can determine what to place into the * data field. -- cgit v1.2.3 From 82d1b8158c9a77c2c9b04c4af22fd62f3686cd9d Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 16 Jan 2020 08:20:18 -0500 Subject: tracing: Allow trace_printk() to nest in other tracing code trace_printk() is used to debug the kernel which includes the tracing infrastructure. But because it writes to the ring buffer, and so does much of the tracing infrastructure, the ring buffer's recursive detection will drop writes to the ring buffer that is in the same context as the current write is happening (it allows interrupts to write when normal context is writing, but wont let normal context write while normal context is writing). This can cause confusion and think that the code is where the trace_printk() exists is not hit. To solve this, up the recursive nesting of the ring buffer when trace_printk() is called before it writes to the buffer itself. Note, this does make it dangerous to use trace_printk() in the ring buffer code itself, because this basically disables the recursion protection of trace_printk() buffer writes. But as trace_printk() is only used for debugging, and if this does occur, the developer will see the cause real quick (recursive blowing up of the stack). Thus the developer can deal with that. But having trace_printk() silently ignored is a much bigger problem, and disabling recursive protection is a small price to pay to fix it. Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 106bbc0988fe..2e1db19dce97 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -866,10 +866,13 @@ int __trace_puts(unsigned long ip, const char *str, int size) local_save_flags(irq_flags); buffer = global_trace.array_buffer.buffer; + ring_buffer_nest_start(buffer); event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, irq_flags, pc); - if (!event) - return 0; + if (!event) { + size = 0; + goto out; + } entry = ring_buffer_event_data(event); entry->ip = ip; @@ -885,7 +888,8 @@ int __trace_puts(unsigned long ip, const char *str, int size) __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); - + out: + ring_buffer_nest_end(buffer); return size; } EXPORT_SYMBOL_GPL(__trace_puts); @@ -902,6 +906,7 @@ int __trace_bputs(unsigned long ip, const char *str) struct bputs_entry *entry; unsigned long irq_flags; int size = sizeof(struct bputs_entry); + int ret = 0; int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) @@ -914,10 +919,12 @@ int __trace_bputs(unsigned long ip, const char *str) local_save_flags(irq_flags); buffer = global_trace.array_buffer.buffer; + + ring_buffer_nest_start(buffer); event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, irq_flags, pc); if (!event) - return 0; + goto out; entry = ring_buffer_event_data(event); entry->ip = ip; @@ -926,7 +933,10 @@ int __trace_bputs(unsigned long ip, const char *str) __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); - return 1; + ret = 1; + out: + ring_buffer_nest_end(buffer); + return ret; } EXPORT_SYMBOL_GPL(__trace_bputs); @@ -3225,6 +3235,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) local_save_flags(flags); size = sizeof(*entry) + sizeof(u32) * len; buffer = tr->array_buffer.buffer; + ring_buffer_nest_start(buffer); event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, flags, pc); if (!event) @@ -3240,6 +3251,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) } out: + ring_buffer_nest_end(buffer); put_trace_buf(); out_nobuffer: @@ -3282,6 +3294,7 @@ __trace_array_vprintk(struct trace_buffer *buffer, local_save_flags(flags); size = sizeof(*entry) + len + 1; + ring_buffer_nest_start(buffer); event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, flags, pc); if (!event) @@ -3296,6 +3309,7 @@ __trace_array_vprintk(struct trace_buffer *buffer, } out: + ring_buffer_nest_end(buffer); put_trace_buf(); out_nobuffer: -- cgit v1.2.3 From 9a09cd74e7dc6c2ac34b39ea6e74440ceb4c501e Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 21 Jan 2020 13:50:07 +0800 Subject: ftrace: Remove abandoned macros These 2 macros aren't used from commit eee8ded131f1 ("ftrace: Have the function probes call their own function"), so remove them. Link: http://lkml.kernel.org/r/1579585807-43316-1-git-send-email-alex.shi@linux.alibaba.com Signed-off-by: Alex Shi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ftrace.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 3f0ae07e72ef..7fe87c7ab1a8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -62,8 +62,6 @@ }) /* hash bits for specific function selection */ -#define FTRACE_HASH_BITS 7 -#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) #define FTRACE_HASH_DEFAULT_BITS 10 #define FTRACE_HASH_MAX_BITS 12 -- cgit v1.2.3 From aff4866db56e5cc5601dcd896e056160e07ca361 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 21 Jan 2020 13:54:23 +0800 Subject: ftrace: Remove NR_TO_INIT macro This macro isn't used from commit cb7be3b2fc2c ("ftrace: remove daemon"). So no needs to keep it. Link: http://lkml.kernel.org/r/1579586063-44984-1-git-send-email-alex.shi@linux.alibaba.com Signed-off-by: Alex Shi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ftrace.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7fe87c7ab1a8..5c701765da5b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1101,9 +1101,6 @@ struct ftrace_page { #define ENTRY_SIZE sizeof(struct dyn_ftrace) #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) -/* estimate from running different kernels */ -#define NR_TO_INIT 10000 - static struct ftrace_page *ftrace_pages_start; static struct ftrace_page *ftrace_pages; -- cgit v1.2.3 From b83479482ff6f856b1308a17768f228be779543a Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 21 Jan 2020 13:54:40 +0800 Subject: ring-buffer: Remove abandoned macro RB_MISSED_FLAGS This macro isn't used since commit d325c402964e ("ring-buffer: Remove unused function ring_buffer_page_len()"), so better to remove it. Link: http://lkml.kernel.org/r/1579586080-45300-1-git-send-email-alex.shi@linux.alibaba.com Signed-off-by: Alex Shi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ring_buffer.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 3bab9b0a90b6..61f0e92ace99 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -300,8 +300,6 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event) /* Missed count stored at end */ #define RB_MISSED_STORED (1 << 30) -#define RB_MISSED_FLAGS (RB_MISSED_EVENTS|RB_MISSED_STORED) - struct buffer_data_page { u64 time_stamp; /* page time stamp */ local_t commit; /* write committed index */ -- cgit v1.2.3 From 141597204ea2dc173668c8cd7202c4bac2b0c476 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 21 Jan 2020 13:54:46 +0800 Subject: tracing: Remove unused TRACE_SEQ_BUF_USED This macro isn't used from commit 3a161d99c43c ("tracing: Create seq_buf layer in trace_seq"). so no needs to keep it. Link: http://lkml.kernel.org/r/1579586086-45543-1-git-send-email-alex.shi@linux.alibaba.com Signed-off-by: Alex Shi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_seq.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c index 87de6edafd14..1d84fcc78e3e 100644 --- a/kernel/trace/trace_seq.c +++ b/kernel/trace/trace_seq.c @@ -30,9 +30,6 @@ /* How much buffer is left on the trace_seq? */ #define TRACE_SEQ_BUF_LEFT(s) seq_buf_buffer_left(&(s)->seq) -/* How much buffer is written? */ -#define TRACE_SEQ_BUF_USED(s) seq_buf_used(&(s)->seq) - /* * trace_seq should work with being initialized with 0s. */ -- cgit v1.2.3 From 532f49a6f19a153e202b5a174f8556fd50c36dd4 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 17 Jan 2020 08:30:07 +0300 Subject: tracing/boot: Fix an IS_ERR() vs NULL bug The trace_array_get_by_name() function doesn't return error pointers, it returns NULL on error. Link: http://lkml.kernel.org/r/20200117053007.5h2juv272pokqhtq@kili.mountain Fixes: 4f712a4d04a4 ("tracing/boot: Add instance node support") Acked-by: Masami Hiramatsu Signed-off-by: Dan Carpenter Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index fa9603dc6469..cd541ac1cbc1 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -322,7 +322,7 @@ trace_boot_init_instances(struct xbc_node *node) continue; tr = trace_array_get_by_name(p); - if (IS_ERR(tr)) { + if (!tr) { pr_err("Failed to get trace instance %s\n", p); continue; } -- cgit v1.2.3 From 34423f250a372d71346922edf2b84a19d811a311 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 22 Jan 2020 06:44:50 -0500 Subject: tracing: Fix uninitialized buffer var on early exit to trace_vbprintk() If we exit due to a bad input to trace_printk() (highly unlikely), then the buffer variable will not be initialized when we unnest the ring buffer. Reported-by: kbuild test robot Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2e1db19dce97..d1410b4462ac 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3230,7 +3230,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) - goto out; + goto out_put; local_save_flags(flags); size = sizeof(*entry) + sizeof(u32) * len; @@ -3252,6 +3252,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) out: ring_buffer_nest_end(buffer); +out_put: put_trace_buf(); out_nobuffer: -- cgit v1.2.3 From 659ded30272d67a04b3692f0bfa12263be20d790 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 21 Jan 2020 13:54:35 +0800 Subject: trace/kprobe: Remove unused MAX_KPROBE_CMDLINE_SIZE This limitation are never lunched from introduce commit 970988e19eb0 ("tracing/kprobe: Add kprobe_event= boot parameter") Could we remove it if no intention to implement it? Link: http://lkml.kernel.org/r/1579586075-45132-1-git-send-email-alex.shi@linux.alibaba.com Acked-by: Masami Hiramatsu Signed-off-by: Alex Shi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 283b7c437440..bf20cd7f2666 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -22,7 +22,6 @@ #define KPROBE_EVENT_SYSTEM "kprobes" #define KRETPROBE_MAXACTIVE_MAX 4096 -#define MAX_KPROBE_CMDLINE_SIZE 1024 /* Kprobe early definition from command line */ static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata; -- cgit v1.2.3 From dfb6cd1e654315168e36d947471bd2a0ccd834ae Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 24 Jan 2020 17:47:49 -0500 Subject: tracing: Fix very unlikely race of registering two stat tracers Looking through old emails in my INBOX, I came across a patch from Luis Henriques that attempted to fix a race of two stat tracers registering the same stat trace (extremely unlikely, as this is done in the kernel, and probably doesn't even exist). The submitted patch wasn't quite right as it needed to deal with clean up a bit better (if two stat tracers were the same, it would have the same files). But to make the code cleaner, all we needed to do is to keep the all_stat_sessions_mutex held for most of the registering function. Link: http://lkml.kernel.org/r/1410299375-20068-1-git-send-email-luis.henriques@canonical.com Fixes: 002bb86d8d42f ("tracing/ftrace: separate events tracing and stats tracing engine") Reported-by: Luis Henriques Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_stat.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index 874f1274cf99..da8a38c3d5e4 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c @@ -304,7 +304,7 @@ static int init_stat_file(struct stat_session *session) int register_stat_tracer(struct tracer_stat *trace) { struct stat_session *session, *node; - int ret; + int ret = -EINVAL; if (!trace) return -EINVAL; @@ -315,17 +315,15 @@ int register_stat_tracer(struct tracer_stat *trace) /* Already registered? */ mutex_lock(&all_stat_sessions_mutex); list_for_each_entry(node, &all_stat_sessions, session_list) { - if (node->ts == trace) { - mutex_unlock(&all_stat_sessions_mutex); - return -EINVAL; - } + if (node->ts == trace) + goto out; } - mutex_unlock(&all_stat_sessions_mutex); + ret = -ENOMEM; /* Init the session */ session = kzalloc(sizeof(*session), GFP_KERNEL); if (!session) - return -ENOMEM; + goto out; session->ts = trace; INIT_LIST_HEAD(&session->session_list); @@ -334,15 +332,16 @@ int register_stat_tracer(struct tracer_stat *trace) ret = init_stat_file(session); if (ret) { destroy_session(session); - return ret; + goto out; } + ret = 0; /* Register */ - mutex_lock(&all_stat_sessions_mutex); list_add_tail(&session->session_list, &all_stat_sessions); + out: mutex_unlock(&all_stat_sessions_mutex); - return 0; + return ret; } void unregister_stat_tracer(struct tracer_stat *trace) -- cgit v1.2.3 From afccc00f75bbbee4e4ae833a96c2d29a7259c693 Mon Sep 17 00:00:00 2001 From: Luis Henriques Date: Tue, 9 Sep 2014 22:49:41 +0100 Subject: tracing: Fix tracing_stat return values in error handling paths tracing_stat_init() was always returning '0', even on the error paths. It now returns -ENODEV if tracing_init_dentry() fails or -ENOMEM if it fails to created the 'trace_stat' debugfs directory. Link: http://lkml.kernel.org/r/1410299381-20108-1-git-send-email-luis.henriques@canonical.com Fixes: ed6f1c996bfe4 ("tracing: Check return value of tracing_init_dentry()") Signed-off-by: Luis Henriques [ Pulled from the archeological digging of my INBOX ] Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_stat.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index da8a38c3d5e4..d1fa19773cc8 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c @@ -280,18 +280,22 @@ static int tracing_stat_init(void) d_tracing = tracing_init_dentry(); if (IS_ERR(d_tracing)) - return 0; + return -ENODEV; stat_dir = tracefs_create_dir("trace_stat", d_tracing); - if (!stat_dir) + if (!stat_dir) { pr_warn("Could not create tracefs 'trace_stat' entry\n"); + return -ENOMEM; + } return 0; } static int init_stat_file(struct stat_session *session) { - if (!stat_dir && tracing_stat_init()) - return -ENODEV; + int ret; + + if (!stat_dir && (ret = tracing_stat_init())) + return ret; session->file = tracefs_create_file(session->ts->name, 0644, stat_dir, -- cgit v1.2.3 From cbc3b92ce037f5e7536f6db157d185cd8b8f615c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 24 Sep 2014 16:14:12 -0400 Subject: tracing: Set kernel_stack's caller size properly I noticed when trying to use the trace-cmd python interface that reading the raw buffer wasn't working for kernel_stack events. This is because it uses a stubbed version of __dynamic_array that doesn't do the __data_loc trick and encode the length of the array into the field. Instead it just shows up as a size of 0. So change this to __array and set the len to FTRACE_STACK_ENTRIES since this is what we actually do in practice and matches how user_stack_trace works. Link: http://lkml.kernel.org/r/1411589652-1318-1-git-send-email-jbacik@fb.com Signed-off-by: Josef Bacik [ Pulled from the archeological digging of my INBOX ] Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_entries.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index fc8e97328e54..78c146efb862 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -174,7 +174,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry, F_STRUCT( __field( int, size ) - __dynamic_array(unsigned long, caller ) + __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) ), F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n" -- cgit v1.2.3 From b3f7a6cd490112085eadb578b6f4a5a34d140726 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 22 Nov 2014 21:30:12 +0300 Subject: tracing: Remove unneeded NULL check We checked "iter->trace" earlier so there is no need to check here. Link: http://lkml.kernel.org/r/20141122183012.GB6994@mwanda Signed-off-by: Dan Carpenter [ Pulled from the archeological digging of my INBOX ] Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d1410b4462ac..6fed9b0a8d58 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4224,7 +4224,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) mutex_init(&iter->mutex); /* Notify the tracer early; before we stop tracing. */ - if (iter->trace && iter->trace->open) + if (iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ -- cgit v1.2.3 From 28394da25888168df379c40910591b95e8e449f7 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 24 Jan 2020 20:47:46 -0500 Subject: tracing: Decrement trace_array when bootconfig creates an instance The trace_array_get_by_name() creates a ftrace instance and trace_array_put() is used to remove the reference. Even though the trace_array_get_by_name() creates the instance, it also adds a reference count to it, that prevents user space from removing it. As the bootconfig just creates the instance on boot up, it should still be used where it can be deleted by user space after boot. A trace_array_put() is required to let that happen. Also, change the documentation on trace_array_get_by_name() to make this not be so confusing. Link: https://lore.kernel.org/r/20200124205927.76128804@rorschach.local.home Fixes: 4f712a4d04a4e ("tracing/boot: Add instance node support") Acked-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.c | 4 ++++ kernel/trace/trace_boot.c | 1 + 2 files changed, 5 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6fed9b0a8d58..0a5569b1cace 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -8602,6 +8602,10 @@ out_unlock: * NOTE: This function increments the reference counter associated with the * trace array returned. This makes sure it cannot be freed while in use. * Use trace_array_put() once the trace array is no longer needed. + * If the trace_array is to be freed, trace_array_destroy() needs to + * be called after the trace_array_put(), or simply let user space delete + * it from the tracefs instances directory. But until the + * trace_array_put() is called, user space can not delete it. * */ struct trace_array *trace_array_get_by_name(const char *name) diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index cd541ac1cbc1..2f616cd926b0 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -327,6 +327,7 @@ trace_boot_init_instances(struct xbc_node *node) continue; } trace_boot_init_one_instance(tr, inode); + trace_array_put(tr); } } -- cgit v1.2.3 From 24589e3a20876dc07c62f45c8f8f8266dd39ba38 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Sat, 25 Jan 2020 10:52:30 -0500 Subject: tracing: Use pr_err() instead of WARN() for memory failures As warnings can trigger panics, especially when "panic_on_warn" is set, memory failure warnings can cause panics and fail fuzz testers that are stressing memory. Create a MEM_FAIL() macro to use instead of WARN() in the tracing code (perhaps this should be a kernel wide macro?), and use that for memory failure issues. This should stop failing fuzz tests due to warnings. Link: https://lore.kernel.org/r/CACT4Y+ZP-7np20GVRu3p+eZys9GPtbu+JpfV+HtsufAzvTgJrg@mail.gmail.com Suggested-by: Dmitry Vyukov Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ftrace.c | 4 ++-- kernel/trace/trace.c | 18 +++++++++--------- kernel/trace/trace.h | 12 ++++++++++++ 3 files changed, 23 insertions(+), 11 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 5c701765da5b..fdb1a9532420 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5459,7 +5459,7 @@ static void __init set_ftrace_early_graph(char *buf, int enable) struct ftrace_hash *hash; hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); - if (WARN_ON(!hash)) + if (MEM_FAIL(!hash, "Failed to allocate hash\n")) return; while (buf) { @@ -6591,7 +6591,7 @@ static void add_to_clear_hash_list(struct list_head *clear_list, func = kmalloc(sizeof(*func), GFP_KERNEL); if (!func) { - WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n"); + MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n"); return; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0a5569b1cace..6a28b1b9bf42 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3126,7 +3126,7 @@ static int alloc_percpu_trace_buffer(void) struct trace_buffer_struct *buffers; buffers = alloc_percpu(struct trace_buffer_struct); - if (WARN(!buffers, "Could not allocate percpu trace_printk buffer")) + if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) return -ENOMEM; trace_percpu_buffer = buffers; @@ -7932,7 +7932,7 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); - WARN_ONCE(!tr->percpu_dir, + MEM_FAIL(!tr->percpu_dir, "Could not create tracefs directory 'per_cpu/%d'\n", cpu); return tr->percpu_dir; @@ -8253,7 +8253,7 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer) for (cnt = 0; opts[cnt].name; cnt++) { create_trace_option_file(tr, &topts[cnt], flags, &opts[cnt]); - WARN_ONCE(topts[cnt].entry == NULL, + MEM_FAIL(topts[cnt].entry == NULL, "Failed to create trace option: %s", opts[cnt].name); } @@ -8437,7 +8437,7 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) #ifdef CONFIG_TRACER_MAX_TRACE ret = allocate_trace_buffer(tr, &tr->max_buffer, allocate_snapshot ? size : 1); - if (WARN_ON(ret)) { + if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { ring_buffer_free(tr->array_buffer.buffer); tr->array_buffer.buffer = NULL; free_percpu(tr->array_buffer.data); @@ -8726,7 +8726,7 @@ static __init void create_trace_instances(struct dentry *d_tracer) trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, instance_mkdir, instance_rmdir); - if (WARN_ON(!trace_instance_dir)) + if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n")) return; } @@ -8796,7 +8796,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) #endif if (ftrace_create_function_files(tr, d_tracer)) - WARN(1, "Could not allocate function filter files"); + MEM_FAIL(1, "Could not allocate function filter files"); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_file("snapshot", 0644, d_tracer, @@ -9348,8 +9348,7 @@ __init static int tracer_alloc_buffers(void) /* TODO: make the number of buffers hot pluggable with CPUS */ if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { - printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); - WARN_ON(1); + MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n"); goto out_free_savedcmd; } @@ -9422,7 +9421,8 @@ void __init early_trace_init(void) if (tracepoint_printk) { tracepoint_print_iter = kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); - if (WARN_ON(!tracepoint_print_iter)) + if (MEM_FAIL(!tracepoint_print_iter, + "Failed to allocate trace iterator\n")) tracepoint_printk = 0; else static_key_enable(&tracepoint_printk_key.key); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 4812a36affac..6bb64d89c321 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -94,6 +94,18 @@ enum trace_type { #include "trace_entries.h" +/* Use this for memory failure errors */ +#define MEM_FAIL(condition, fmt, ...) ({ \ + static bool __section(.data.once) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + pr_err("ERROR: " fmt, ##__VA_ARGS__); \ + } \ + unlikely(__ret_warn_once); \ +}) + /* * syscalls are special, and need special handling, this is why * they are not included in trace_entries.h -- cgit v1.2.3 From b527b638fd63ba791dc90a0a6e9a3035b10df52b Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 28 Jun 2019 12:40:20 -0500 Subject: tracing: Simplify assignment parsing for hist triggers In the process of adding better error messages for sorting, I realized that strsep was being used incorrectly and some of the error paths I was expecting to be hit weren't and just fell through to the common invalid key error case. It also became obvious that for keyword assignments, it wasn't necessary to save the full assignment and reparse it later, and having a common empty-assignment check would also make more sense in terms of error processing. Change the code to fix these problems and simplify it for new error message changes in a subsequent patch. Link: http://lkml.kernel.org/r/1c3ef0b6655deaf345f6faee2584a0298ac2d743.1561743018.git.zanussi@kernel.org Fixes: e62347d24534 ("tracing: Add hist trigger support for user-defined sorting ('sort=' param)") Fixes: 7ef224d1d0e3 ("tracing: Add 'hist' event trigger command") Fixes: a4072fe85ba3 ("tracing: Add a clock attribute for hist triggers") Reported-by: Masami Hiramatsu Reviewed-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 70 ++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 43 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 117a1202a6b9..bf2bcb8d7725 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -2039,12 +2039,6 @@ static int parse_map_size(char *str) unsigned long size, map_bits; int ret; - strsep(&str, "="); - if (!str) { - ret = -EINVAL; - goto out; - } - ret = kstrtoul(str, 0, &size); if (ret) goto out; @@ -2104,25 +2098,25 @@ static int parse_action(char *str, struct hist_trigger_attrs *attrs) static int parse_assignment(struct trace_array *tr, char *str, struct hist_trigger_attrs *attrs) { - int ret = 0; + int len, ret = 0; - if ((str_has_prefix(str, "key=")) || - (str_has_prefix(str, "keys="))) { - attrs->keys_str = kstrdup(str, GFP_KERNEL); + if ((len = str_has_prefix(str, "key=")) || + (len = str_has_prefix(str, "keys="))) { + attrs->keys_str = kstrdup(str + len, GFP_KERNEL); if (!attrs->keys_str) { ret = -ENOMEM; goto out; } - } else if ((str_has_prefix(str, "val=")) || - (str_has_prefix(str, "vals=")) || - (str_has_prefix(str, "values="))) { - attrs->vals_str = kstrdup(str, GFP_KERNEL); + } else if ((len = str_has_prefix(str, "val=")) || + (len = str_has_prefix(str, "vals=")) || + (len = str_has_prefix(str, "values="))) { + attrs->vals_str = kstrdup(str + len, GFP_KERNEL); if (!attrs->vals_str) { ret = -ENOMEM; goto out; } - } else if (str_has_prefix(str, "sort=")) { - attrs->sort_key_str = kstrdup(str, GFP_KERNEL); + } else if ((len = str_has_prefix(str, "sort="))) { + attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL); if (!attrs->sort_key_str) { ret = -ENOMEM; goto out; @@ -2133,12 +2127,8 @@ static int parse_assignment(struct trace_array *tr, ret = -ENOMEM; goto out; } - } else if (str_has_prefix(str, "clock=")) { - strsep(&str, "="); - if (!str) { - ret = -EINVAL; - goto out; - } + } else if ((len = str_has_prefix(str, "clock="))) { + str += len; str = strstrip(str); attrs->clock = kstrdup(str, GFP_KERNEL); @@ -2146,8 +2136,8 @@ static int parse_assignment(struct trace_array *tr, ret = -ENOMEM; goto out; } - } else if (str_has_prefix(str, "size=")) { - int map_bits = parse_map_size(str); + } else if ((len = str_has_prefix(str, "size="))) { + int map_bits = parse_map_size(str + len); if (map_bits < 0) { ret = map_bits; @@ -2187,8 +2177,14 @@ parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) while (trigger_str) { char *str = strsep(&trigger_str, ":"); + char *rhs; - if (strchr(str, '=')) { + rhs = strchr(str, '='); + if (rhs) { + if (!strlen(++rhs)) { + ret = -EINVAL; + goto free; + } ret = parse_assignment(tr, str, attrs); if (ret) goto free; @@ -4522,10 +4518,6 @@ static int create_val_fields(struct hist_trigger_data *hist_data, if (!fields_str) goto out; - strsep(&fields_str, "="); - if (!fields_str) - goto out; - for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX && j < TRACING_MAP_VALS_MAX; i++) { field_str = strsep(&fields_str, ","); @@ -4620,10 +4612,6 @@ static int create_key_fields(struct hist_trigger_data *hist_data, if (!fields_str) goto out; - strsep(&fields_str, "="); - if (!fields_str) - goto out; - for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) { field_str = strsep(&fields_str, ","); if (!field_str) @@ -4781,12 +4769,6 @@ static int create_sort_keys(struct hist_trigger_data *hist_data) if (!fields_str) goto out; - strsep(&fields_str, "="); - if (!fields_str) { - ret = -EINVAL; - goto out; - } - for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) { struct hist_field *hist_field; char *field_str, *field_name; @@ -4795,9 +4777,11 @@ static int create_sort_keys(struct hist_trigger_data *hist_data) sort_key = &hist_data->sort_keys[i]; field_str = strsep(&fields_str, ","); - if (!field_str) { - if (i == 0) - ret = -EINVAL; + if (!field_str) + break; + + if (!*field_str) { + ret = -EINVAL; break; } @@ -4807,7 +4791,7 @@ static int create_sort_keys(struct hist_trigger_data *hist_data) } field_name = strsep(&field_str, "."); - if (!field_name) { + if (!field_name || !*field_name) { ret = -EINVAL; break; } -- cgit v1.2.3 From 4de26c8c967d55551d3983771116a2c3c0a4f464 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 28 Jun 2019 12:40:21 -0500 Subject: tracing: Add hist trigger error messages for sort specification Add error codes and messages for all the error paths leading to sort specification parsing errors. Link: http://lkml.kernel.org/r/237830dc05e583fbb53664d817a784297bf961be.1561743018.git.zanussi@kernel.org Reviewed-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index bf2bcb8d7725..23458ba9e5f5 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -66,7 +66,12 @@ C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \ C(INVALID_REF_KEY, "Using variable references in keys not supported"), \ C(VAR_NOT_FOUND, "Couldn't find variable"), \ - C(FIELD_NOT_FOUND, "Couldn't find field"), + C(FIELD_NOT_FOUND, "Couldn't find field"), \ + C(EMPTY_ASSIGNMENT, "Empty assignment"), \ + C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \ + C(EMPTY_SORT_FIELD, "Empty sort field"), \ + C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \ + C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), #undef C #define C(a, b) HIST_ERR_##a @@ -2183,6 +2188,7 @@ parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) if (rhs) { if (!strlen(++rhs)) { ret = -EINVAL; + hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str)); goto free; } ret = parse_assignment(tr, str, attrs); @@ -4743,7 +4749,7 @@ static int create_hist_fields(struct hist_trigger_data *hist_data, return ret; } -static int is_descending(const char *str) +static int is_descending(struct trace_array *tr, const char *str) { if (!str) return 0; @@ -4754,11 +4760,14 @@ static int is_descending(const char *str) if (strcmp(str, "ascending") == 0) return 0; + hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str)); + return -EINVAL; } static int create_sort_keys(struct hist_trigger_data *hist_data) { + struct trace_array *tr = hist_data->event_file->tr; char *fields_str = hist_data->attrs->sort_key_str; struct tracing_map_sort_key *sort_key; int descending, ret = 0; @@ -4782,10 +4791,12 @@ static int create_sort_keys(struct hist_trigger_data *hist_data) if (!*field_str) { ret = -EINVAL; + hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort=")); break; } if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) { + hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort=")); ret = -EINVAL; break; } @@ -4793,11 +4804,12 @@ static int create_sort_keys(struct hist_trigger_data *hist_data) field_name = strsep(&field_str, "."); if (!field_name || !*field_name) { ret = -EINVAL; + hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort=")); break; } if (strcmp(field_name, "hitcount") == 0) { - descending = is_descending(field_str); + descending = is_descending(tr, field_str); if (descending < 0) { ret = descending; break; @@ -4819,7 +4831,7 @@ static int create_sort_keys(struct hist_trigger_data *hist_data) if (strcmp(field_name, test_name) == 0) { sort_key->field_idx = idx; - descending = is_descending(field_str); + descending = is_descending(tr, field_str); if (descending < 0) { ret = descending; goto out; @@ -4830,6 +4842,7 @@ static int create_sort_keys(struct hist_trigger_data *hist_data) } if (j == hist_data->n_fields) { ret = -EINVAL; + hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name)); break; } } -- cgit v1.2.3 From d0a497066f92eee6e5750af6a0ca32866030931a Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 28 Jun 2019 12:40:22 -0500 Subject: tracing: Add 'hist:' to hist trigger error log error string The 'hist:' prefix gets stripped from the command text during command processing, but should be added back when displaying the command during error processing. Not only because it's what should be displayed but also because not having it means the test cases fail because the caret is miscalculated by the length of the prefix string. Link: http://lkml.kernel.org/r/449df721f560042e22382f67574bcc5b4d830d3d.1561743018.git.zanussi@kernel.org Reviewed-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 23458ba9e5f5..c322826e0726 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -612,7 +612,8 @@ static void last_cmd_set(struct trace_event_file *file, char *str) if (!str) return; - strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1); + strcpy(last_cmd, "hist:"); + strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:")); if (file) { call = file->event_call; -- cgit v1.2.3 From 76a598ec8c4fde58aab79b9f7c40c33d54eca67b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Jan 2020 18:36:35 +0900 Subject: tracing/boot: Include required headers and sort it alphabetically Include some required (but currently indirectly included) headers and sort it alphabetically. Link: http://lkml.kernel.org/r/158029059514.12381.6597832266860248781.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 2f616cd926b0..5aad41961f03 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -6,9 +6,16 @@ #define pr_fmt(fmt) "trace_boot: " fmt +#include +#include #include #include -#include +#include +#include +#include +#include +#include +#include #include "trace.h" -- cgit v1.2.3 From 5c3469cb899abe998299aafb8f16f325d62d2d68 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Jan 2020 18:36:44 +0900 Subject: tracing/boot: Move external function declarations to kernel/trace/trace.h Move external function declarations into kernel/trace/trace.h from trace_boot.c for tracing subsystem internal use. Link: http://lkml.kernel.org/r/158029060405.12381.11944554430359702545.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.h | 17 +++++++++++++++++ kernel/trace/trace_boot.c | 15 --------------- 2 files changed, 17 insertions(+), 15 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6bb64d89c321..b3075b637d14 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1157,6 +1157,11 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd); void ftrace_create_filter_files(struct ftrace_ops *ops, struct dentry *parent); void ftrace_destroy_filter_files(struct ftrace_ops *ops); + +extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); +extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); #else struct ftrace_func_command; @@ -1905,6 +1910,15 @@ void trace_printk_start_comm(void); int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); +/* Used from boot time tracer */ +extern int trace_set_options(struct trace_array *tr, char *option); +extern int tracing_set_tracer(struct trace_array *tr, const char *buf); +extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, + unsigned long size, int cpu_id); +extern int tracing_set_cpumask(struct trace_array *tr, + cpumask_var_t tracing_cpumask_new); + + #define MAX_EVENT_NAME_LEN 64 extern int trace_run_command(const char *buf, int (*createfn)(int, char**)); @@ -1964,6 +1978,9 @@ static inline const char *get_syscall_name(int syscall) #ifdef CONFIG_EVENT_TRACING void trace_event_init(void); void trace_event_eval_update(struct trace_eval_map **map, int len); +/* Used from boot time tracer */ +extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); +extern int trigger_process_regex(struct trace_event_file *file, char *buff); #else static inline void __init trace_event_init(void) { } static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 5aad41961f03..4d37bf5c3742 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -21,13 +21,6 @@ #define MAX_BUF_LEN 256 -extern int trace_set_options(struct trace_array *tr, char *option); -extern int tracing_set_tracer(struct trace_array *tr, const char *buf); -extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, - unsigned long size, int cpu_id); -extern int tracing_set_cpumask(struct trace_array *tr, - cpumask_var_t tracing_cpumask_new); - static void __init trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node) { @@ -76,9 +69,6 @@ trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node) } #ifdef CONFIG_EVENT_TRACING -extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); -extern int trigger_process_regex(struct trace_event_file *file, char *buff); - static void __init trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node) { @@ -252,11 +242,6 @@ trace_boot_init_events(struct trace_array *tr, struct xbc_node *node) #endif #ifdef CONFIG_DYNAMIC_FTRACE -extern bool ftrace_filter_param __initdata; -extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, - int len, int reset); -extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, - int len, int reset); static void __init trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node) { -- cgit v1.2.3 From 64ae572bc7d0060429e40e1c8d803ce5eb31a0d6 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Sat, 17 Aug 2019 10:12:08 -0400 Subject: tracing: Fix sched switch start/stop refcount racy updates Reading the sched_cmdline_ref and sched_tgid_ref initial state within tracing_start_sched_switch without holding the sched_register_mutex is racy against concurrent updates, which can lead to tracepoint probes being registered more than once (and thus trigger warnings within tracepoint.c). [ May be the fix for this bug ] Link: https://lore.kernel.org/r/000000000000ab6f84056c786b93@google.com Link: http://lkml.kernel.org/r/20190817141208.15226-1-mathieu.desnoyers@efficios.com Cc: stable@vger.kernel.org CC: Steven Rostedt (VMware) CC: Joel Fernandes (Google) CC: Peter Zijlstra CC: Thomas Gleixner CC: Paul E. McKenney Reported-by: syzbot+774fddf07b7ab29a1e55@syzkaller.appspotmail.com Fixes: d914ba37d7145 ("tracing: Add support for recording tgid of tasks") Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_sched_switch.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index e288168661e1..e304196d7c28 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -89,8 +89,10 @@ static void tracing_sched_unregister(void) static void tracing_start_sched_switch(int ops) { - bool sched_register = (!sched_cmdline_ref && !sched_tgid_ref); + bool sched_register; + mutex_lock(&sched_register_mutex); + sched_register = (!sched_cmdline_ref && !sched_tgid_ref); switch (ops) { case RECORD_CMDLINE: -- cgit v1.2.3 From e4075e8bdffd93a9b6d6e1d52fabedceeca5a91b Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Fri, 24 Jan 2020 10:02:56 +0300 Subject: ftrace: fpid_next() should increase position index if seq_file .next fuction does not change position index, read after some lseek can generate unexpected output. Without patch: # dd bs=4 skip=1 if=/sys/kernel/tracing/set_ftrace_pid dd: /sys/kernel/tracing/set_ftrace_pid: cannot skip to specified offset id no pid 2+1 records in 2+1 records out 10 bytes copied, 0.000213285 s, 46.9 kB/s Notice the "id" followed by "no pid". With the patch: # dd bs=4 skip=1 if=/sys/kernel/tracing/set_ftrace_pid dd: /sys/kernel/tracing/set_ftrace_pid: cannot skip to specified offset id 0+1 records in 0+1 records out 3 bytes copied, 0.000202112 s, 14.8 kB/s Notice that it only prints "id" and not the "no pid" afterward. Link: http://lkml.kernel.org/r/4f87c6ad-f114-30bb-8506-c32274ce2992@virtuozzo.com https://bugzilla.kernel.org/show_bug.cgi?id=206283 Signed-off-by: Vasily Averin Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ftrace.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fdb1a9532420..0e9612c30995 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -7026,9 +7026,10 @@ static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) struct trace_array *tr = m->private; struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); - if (v == FTRACE_NO_PIDS) + if (v == FTRACE_NO_PIDS) { + (*pos)++; return NULL; - + } return trace_pid_next(pid_list, v, pos); } -- cgit v1.2.3 From 039958a5f7aad695d4d52683c7d48aa13fb18249 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Fri, 24 Jan 2020 10:03:01 +0300 Subject: tracing: eval_map_next() should always increase position index if seq_file .next fuction does not change position index, read after some lseek can generate unexpected output. Link: http://lkml.kernel.org/r/7ad85b22-1866-977c-db17-88ac438bc764@virtuozzo.com Signed-off-by: Vasily Averin [ This is not a bug fix, it just makes it "technically correct" which is why I applied it. NULL is only returned on an anomaly which triggers a WARN_ON ] Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6a28b1b9bf42..8d144fd94aa8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -5399,14 +5399,12 @@ static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) * Paranoid! If ptr points to end, we don't want to increment past it. * This really should never happen. */ + (*pos)++; ptr = update_eval_map(ptr); if (WARN_ON_ONCE(!ptr)) return NULL; ptr++; - - (*pos)++; - ptr = update_eval_map(ptr); return ptr; -- cgit v1.2.3 From 6722b23e7a2ace078344064a9735fb73e554e9ef Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Fri, 24 Jan 2020 10:03:06 +0300 Subject: trigger_next should increase position index if seq_file .next fuction does not change position index, read after some lseek can generate unexpected output. Without patch: # dd bs=30 skip=1 if=/sys/kernel/tracing/events/sched/sched_switch/trigger dd: /sys/kernel/tracing/events/sched/sched_switch/trigger: cannot skip to specified offset n traceoff snapshot stacktrace enable_event disable_event enable_hist disable_hist hist # Available triggers: # traceon traceoff snapshot stacktrace enable_event disable_event enable_hist disable_hist hist 6+1 records in 6+1 records out 206 bytes copied, 0.00027916 s, 738 kB/s Notice the printing of "# Available triggers:..." after the line. With the patch: # dd bs=30 skip=1 if=/sys/kernel/tracing/events/sched/sched_switch/trigger dd: /sys/kernel/tracing/events/sched/sched_switch/trigger: cannot skip to specified offset n traceoff snapshot stacktrace enable_event disable_event enable_hist disable_hist hist 2+1 records in 2+1 records out 88 bytes copied, 0.000526867 s, 167 kB/s It only prints the end of the file, and does not restart. Link: http://lkml.kernel.org/r/3c35ee24-dd3a-8119-9c19-552ed253388a@virtuozzo.com https://bugzilla.kernel.org/show_bug.cgi?id=206283 Signed-off-by: Vasily Averin Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_trigger.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 60959c31791d..dd34a1b46a86 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -116,9 +116,10 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) { struct trace_event_file *event_file = event_file_data(m->private); - if (t == SHOW_AVAILABLE_TRIGGERS) + if (t == SHOW_AVAILABLE_TRIGGERS) { + (*pos)++; return NULL; - + } return seq_list_next(t, &event_file->triggers, pos); } -- cgit v1.2.3 From 89c95fcef1942415e0f20d8c82e6e36ff8eeca9c Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 29 Jan 2020 12:59:21 -0600 Subject: tracing: Add trace_array_find/_get() to find instance trace arrays Add a new trace_array_find() function that can be used to find a trace array given the instance name, and replace existing code that does the same thing with it. Also add trace_array_find_get() which does the same but returns the trace array after upping its refcount. Also make both available for use outside of trace.c. Link: http://lkml.kernel.org/r/cb68528c975eba95bee4561ac67dd1499423b2e5.1580323897.git.zanussi@kernel.org Acked-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.c | 43 +++++++++++++++++++++++++++++++++---------- kernel/trace/trace.h | 2 ++ 2 files changed, 35 insertions(+), 10 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8d144fd94aa8..183b031a3828 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -8499,6 +8499,34 @@ static void update_tracer_options(struct trace_array *tr) mutex_unlock(&trace_types_lock); } +/* Must have trace_types_lock held */ +struct trace_array *trace_array_find(const char *instance) +{ + struct trace_array *tr, *found = NULL; + + list_for_each_entry(tr, &ftrace_trace_arrays, list) { + if (tr->name && strcmp(tr->name, instance) == 0) { + found = tr; + break; + } + } + + return found; +} + +struct trace_array *trace_array_find_get(const char *instance) +{ + struct trace_array *tr; + + mutex_lock(&trace_types_lock); + tr = trace_array_find(instance); + if (tr) + tr->ref++; + mutex_unlock(&trace_types_lock); + + return tr; +} + static struct trace_array *trace_array_create(const char *name) { struct trace_array *tr; @@ -8575,10 +8603,8 @@ static int instance_mkdir(const char *name) mutex_lock(&trace_types_lock); ret = -EEXIST; - list_for_each_entry(tr, &ftrace_trace_arrays, list) { - if (tr->name && strcmp(tr->name, name) == 0) - goto out_unlock; - } + if (trace_array_find(name)) + goto out_unlock; tr = trace_array_create(name); @@ -8706,12 +8732,9 @@ static int instance_rmdir(const char *name) mutex_lock(&trace_types_lock); ret = -ENODEV; - list_for_each_entry(tr, &ftrace_trace_arrays, list) { - if (tr->name && strcmp(tr->name, name) == 0) { - ret = __remove_instance(tr); - break; - } - } + tr = trace_array_find(name); + if (tr) + ret = __remove_instance(tr); mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index b3075b637d14..f5480a2aa334 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -358,6 +358,8 @@ extern struct mutex trace_types_lock; extern int trace_array_get(struct trace_array *tr); extern int tracing_check_open_get_tr(struct trace_array *tr); +extern struct trace_array *trace_array_find(const char *instance); +extern struct trace_array *trace_array_find_get(const char *instance); extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs); extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); -- cgit v1.2.3 From e3e2a2cc9c96725457ad6f31712ea7681a55666e Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 29 Jan 2020 12:59:22 -0600 Subject: tracing: Add trace_get/put_event_file() Add a function to get an event file and prevent it from going away on module or instance removal. trace_get_event_file() will find an event file in a given instance (if instance is NULL, it assumes the top trace array) and return it, pinning the instance's trace array as well as the event's module, if applicable, so they won't go away while in use. trace_put_event_file() does the matching release. Link: http://lkml.kernel.org/r/bb31ac4bdda168d5ed3c4b5f5a4c8f633e8d9118.1580323897.git.zanussi@kernel.org Acked-by: Masami Hiramatsu Signed-off-by: Tom Zanussi [ Moved trace_array_put() to end of trace_put_event_file() ] Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 5 +++ kernel/trace/trace_events.c | 85 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+) (limited to 'kernel/trace') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 20948ee56f8c..8d621a73c97e 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -349,6 +349,11 @@ enum { EVENT_FILE_FL_WAS_ENABLED_BIT, }; +extern struct trace_event_file *trace_get_event_file(const char *instance, + const char *system, + const char *event); +extern void trace_put_event_file(struct trace_event_file *file); + /* * Event file flags: * ENABLED - The event is enabled diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index dfb736a964d6..da62472b1297 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2536,6 +2536,91 @@ find_event_file(struct trace_array *tr, const char *system, const char *event) return file; } +/** + * trace_get_event_file - Find and return a trace event file + * @instance: The name of the trace instance containing the event + * @system: The name of the system containing the event + * @event: The name of the event + * + * Return a trace event file given the trace instance name, trace + * system, and trace event name. If the instance name is NULL, it + * refers to the top-level trace array. + * + * This function will look it up and return it if found, after calling + * trace_array_get() to prevent the instance from going away, and + * increment the event's module refcount to prevent it from being + * removed. + * + * To release the file, call trace_put_event_file(), which will call + * trace_array_put() and decrement the event's module refcount. + * + * Return: The trace event on success, ERR_PTR otherwise. + */ +struct trace_event_file *trace_get_event_file(const char *instance, + const char *system, + const char *event) +{ + struct trace_array *tr = top_trace_array(); + struct trace_event_file *file = NULL; + int ret = -EINVAL; + + if (instance) { + tr = trace_array_find_get(instance); + if (!tr) + return ERR_PTR(-ENOENT); + } else { + ret = trace_array_get(tr); + if (ret) + return ERR_PTR(ret); + } + + mutex_lock(&event_mutex); + + file = find_event_file(tr, system, event); + if (!file) { + trace_array_put(tr); + ret = -EINVAL; + goto out; + } + + /* Don't let event modules unload while in use */ + ret = try_module_get(file->event_call->mod); + if (!ret) { + trace_array_put(tr); + ret = -EBUSY; + goto out; + } + + ret = 0; + out: + mutex_unlock(&event_mutex); + + if (ret) + file = ERR_PTR(ret); + + return file; +} +EXPORT_SYMBOL_GPL(trace_get_event_file); + +/** + * trace_put_event_file - Release a file from trace_get_event_file() + * @file: The trace event file + * + * If a file was retrieved using trace_get_event_file(), this should + * be called when it's no longer needed. It will cancel the previous + * trace_array_get() called by that function, and decrement the + * event's module refcount. + */ +void trace_put_event_file(struct trace_event_file *file) +{ + mutex_lock(&event_mutex); + module_put(file->event_call->mod); + mutex_unlock(&event_mutex); + + trace_array_put(file->tr); +} +EXPORT_SYMBOL_GPL(trace_put_event_file); + #ifdef CONFIG_DYNAMIC_FTRACE /* Avoid typos */ -- cgit v1.2.3 From f5f6b255a253e2c3132ca283e9090a6343bfb719 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 29 Jan 2020 12:59:23 -0600 Subject: tracing: Add synth_event_delete() create_or_delete_synth_event() contains code to delete a synthetic event, which would be useful on its own - specifically, it would be useful to allow event-creating modules to call it separately. Separate out the delete code from that function and create an exported function named synth_event_delete(). Link: http://lkml.kernel.org/r/050db3b06df7f0a4b8a2922da602d1d879c7c1c2.1580323897.git.zanussi@kernel.org Acked-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 2 ++ kernel/trace/trace_events_hist.c | 57 +++++++++++++++++++++++++++++----------- 2 files changed, 43 insertions(+), 16 deletions(-) (limited to 'kernel/trace') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 8d621a73c97e..25fe743bcbaf 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -354,6 +354,8 @@ extern struct trace_event_file *trace_get_event_file(const char *instance, const char *event); extern void trace_put_event_file(struct trace_event_file *file); +extern int synth_event_delete(const char *name); + /* * Event file flags: * ENABLED - The event is enabled diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index c322826e0726..21e316732700 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1360,29 +1360,54 @@ static int __create_synth_event(int argc, const char *name, const char **argv) goto out; } +static int destroy_synth_event(struct synth_event *se) +{ + int ret; + + if (se->ref) + ret = -EBUSY; + else { + ret = unregister_synth_event(se); + if (!ret) { + dyn_event_remove(&se->devent); + free_synth_event(se); + } + } + + return ret; +} + +/** + * synth_event_delete - Delete a synthetic event + * @event_name: The name of the new sythetic event + * + * Delete a synthetic event that was created with synth_event_create(). + * + * Return: 0 if successful, error otherwise. + */ +int synth_event_delete(const char *event_name) +{ + struct synth_event *se = NULL; + int ret = -ENOENT; + + mutex_lock(&event_mutex); + se = find_synth_event(event_name); + if (se) + ret = destroy_synth_event(se); + mutex_unlock(&event_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_delete); + static int create_or_delete_synth_event(int argc, char **argv) { const char *name = argv[0]; - struct synth_event *event = NULL; int ret; /* trace_run_command() ensures argc != 0 */ if (name[0] == '!') { - mutex_lock(&event_mutex); - event = find_synth_event(name + 1); - if (event) { - if (event->ref) - ret = -EBUSY; - else { - ret = unregister_synth_event(event); - if (!ret) { - dyn_event_remove(&event->devent); - free_synth_event(event); - } - } - } else - ret = -ENOENT; - mutex_unlock(&event_mutex); + ret = synth_event_delete(name + 1); return ret; } -- cgit v1.2.3 From 86c5426baddae9ff192e3159b9c2e7c14e3964c6 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 29 Jan 2020 12:59:24 -0600 Subject: tracing: Add dynamic event command creation interface Add an interface used to build up dynamic event creation commands, such as synthetic and kprobe events. Interfaces specific to those particular types of events and others can be built on top of this interface. Command creation is started by first using the dynevent_cmd_init() function to initialize the dynevent_cmd object. Following that, args are appended and optionally checked by the dynevent_arg_add() and dynevent_arg_pair_add() functions, which use objects representing arguments and pairs of arguments, initialized respectively by dynevent_arg_init() and dynevent_arg_pair_init(). Finally, once all args have been successfully added, the command is finalized and actually created using dynevent_create(). The code here for actually printing into the dyn_event->cmd buffer using snprintf() etc was adapted from v4 of Masami's 'tracing/boot: Add synthetic event support' patch. Link: http://lkml.kernel.org/r/1f65fa44390b6f238f6036777c3784ced1dcc6a0.1580323897.git.zanussi@kernel.org Signed-off-by: Tom Zanussi Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 23 ++++ kernel/trace/trace_dynevent.c | 240 ++++++++++++++++++++++++++++++++++++++++++ kernel/trace/trace_dynevent.h | 33 ++++++ 3 files changed, 296 insertions(+) (limited to 'kernel/trace') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 25fe743bcbaf..651b03d5e272 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -354,6 +354,29 @@ extern struct trace_event_file *trace_get_event_file(const char *instance, const char *event); extern void trace_put_event_file(struct trace_event_file *file); +#define MAX_DYNEVENT_CMD_LEN (2048) + +enum dynevent_type { + DYNEVENT_TYPE_NONE, +}; + +struct dynevent_cmd; + +typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd); + +struct dynevent_cmd { + char *buf; + const char *event_name; + int maxlen; + int remaining; + unsigned int n_fields; + enum dynevent_type type; + dynevent_create_fn_t run_command; + void *private_data; +}; + +extern int dynevent_create(struct dynevent_cmd *cmd); + extern int synth_event_delete(const char *name); /* diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index 89779eb84a07..6ffdbc4fda53 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -223,3 +223,243 @@ static __init int init_dynamic_event(void) return 0; } fs_initcall(init_dynamic_event); + +/** + * dynevent_arg_add - Add an arg to a dynevent_cmd + * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd + * @arg: The argument to append to the current cmd + * + * Append an argument to a dynevent_cmd. The argument string will be + * appended to the current cmd string, followed by a separator, if + * applicable. Before the argument is added, the check_arg() + * function, if defined, is called. + * + * The cmd string, separator, and check_arg() function should be set + * using the dynevent_arg_init() before any arguments are added using + * this function. + * + * Return: 0 if successful, error otherwise. + */ +int dynevent_arg_add(struct dynevent_cmd *cmd, + struct dynevent_arg *arg) +{ + int ret = 0; + int delta; + char *q; + + if (arg->check_arg) { + ret = arg->check_arg(arg); + if (ret) + return ret; + } + + q = cmd->buf + (cmd->maxlen - cmd->remaining); + + delta = snprintf(q, cmd->remaining, " %s%c", arg->str, arg->separator); + if (delta >= cmd->remaining) { + pr_err("String is too long: %s\n", arg->str); + return -E2BIG; + } + cmd->remaining -= delta; + + return ret; +} + +/** + * dynevent_arg_pair_add - Add an arg pair to a dynevent_cmd + * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd + * @arg_pair: The argument pair to append to the current cmd + * + * Append an argument pair to a dynevent_cmd. An argument pair + * consists of a left-hand-side argument and a right-hand-side + * argument separated by an operator, which can be whitespace, all + * followed by a separator, if applicable. This can be used to add + * arguments of the form 'type variable_name;' or 'x+y'. + * + * The lhs argument string will be appended to the current cmd string, + * followed by an operator, if applicable, followd by the rhs string, + * followed finally by a separator, if applicable. Before anything is + * added, the check_arg() function, if defined, is called. + * + * The cmd strings, operator, separator, and check_arg() function + * should be set using the dynevent_arg_pair_init() before any arguments + * are added using this function. + * + * Return: 0 if successful, error otherwise. + */ +int dynevent_arg_pair_add(struct dynevent_cmd *cmd, + struct dynevent_arg_pair *arg_pair) +{ + int ret = 0; + int delta; + char *q; + + if (arg_pair->check_arg) { + ret = arg_pair->check_arg(arg_pair); + if (ret) + return ret; + } + + q = cmd->buf + (cmd->maxlen - cmd->remaining); + + delta = snprintf(q, cmd->remaining, " %s%c", arg_pair->lhs, + arg_pair->operator); + if (delta >= cmd->remaining) { + pr_err("field string is too long: %s\n", arg_pair->lhs); + return -E2BIG; + } + cmd->remaining -= delta; q += delta; + + delta = snprintf(q, cmd->remaining, "%s%c", arg_pair->rhs, + arg_pair->separator); + if (delta >= cmd->remaining) { + pr_err("field string is too long: %s\n", arg_pair->rhs); + return -E2BIG; + } + cmd->remaining -= delta; q += delta; + + return ret; +} + +/** + * dynevent_str_add - Add a string to a dynevent_cmd + * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd + * @str: The string to append to the current cmd + * + * Append a string to a dynevent_cmd. The string will be appended to + * the current cmd string as-is, with nothing prepended or appended. + * + * Return: 0 if successful, error otherwise. + */ +int dynevent_str_add(struct dynevent_cmd *cmd, const char *str) +{ + int ret = 0; + int delta; + char *q; + + q = cmd->buf + (cmd->maxlen - cmd->remaining); + + delta = snprintf(q, cmd->remaining, "%s", str); + if (delta >= cmd->remaining) { + pr_err("String is too long: %s\n", str); + return -E2BIG; + } + cmd->remaining -= delta; + + return ret; +} + +/** + * dynevent_cmd_init - Initialize a dynevent_cmd object + * @cmd: A pointer to the dynevent_cmd struct representing the cmd + * @buf: A pointer to the buffer to generate the command into + * @maxlen: The length of the buffer the command will be generated into + * @type: The type of the cmd, checked against further operations + * @run_command: The type-specific function that will actually run the command + * + * Initialize a dynevent_cmd. A dynevent_cmd is used to build up and + * run dynamic event creation commands, such as commands for creating + * synthetic and kprobe events. Before calling any of the functions + * used to build the command, a dynevent_cmd object should be + * instantiated and initialized using this function. + * + * The initialization sets things up by saving a pointer to the + * user-supplied buffer and its length via the @buf and @maxlen + * params, and by saving the cmd-specific @type and @run_command + * params which are used to check subsequent dynevent_cmd operations + * and actually run the command when complete. + */ +void dynevent_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen, + enum dynevent_type type, + dynevent_create_fn_t run_command) +{ + memset(cmd, '\0', sizeof(*cmd)); + + cmd->buf = buf; + cmd->maxlen = maxlen; + cmd->remaining = cmd->maxlen; + cmd->type = type; + cmd->run_command = run_command; +} + +/** + * dynevent_arg_init - Initialize a dynevent_arg object + * @arg: A pointer to the dynevent_arg struct representing the arg + * @check_arg: An (optional) pointer to a function checking arg sanity + * @separator: An (optional) separator, appended after adding the arg + * + * Initialize a dynevent_arg object. A dynevent_arg represents an + * object used to append single arguments to the current command + * string. The @check_arg function, if present, will be used to check + * the sanity of the current arg string (which is directly set by the + * caller). After the arg string is successfully appended to the + * command string, the optional @separator is appended. If no + * separator was specified when initializing the arg, a space will be + * appended. + */ +void dynevent_arg_init(struct dynevent_arg *arg, + dynevent_check_arg_fn_t check_arg, + char separator) +{ + memset(arg, '\0', sizeof(*arg)); + + if (!separator) + separator = ' '; + arg->separator = separator; + + arg->check_arg = check_arg; +} + +/** + * dynevent_arg_pair_init - Initialize a dynevent_arg_pair object + * @arg_pair: A pointer to the dynevent_arg_pair struct representing the arg + * @check_arg: An (optional) pointer to a function checking arg sanity + * @operator: An (optional) operator, appended after adding the first arg + * @separator: An (optional) separator, appended after adding the second arg + * + * Initialize a dynevent_arg_pair object. A dynevent_arg_pair + * represents an object used to append argument pairs such as 'type + * variable_name;' or 'x+y' to the current command string. An + * argument pair consists of a left-hand-side argument and a + * right-hand-side argument separated by an operator, which can be + * whitespace, all followed by a separator, if applicable. The + * @check_arg function, if present, will be used to check the sanity + * of the current arg strings (which is directly set by the caller). + * After the first arg string is successfully appended to the command + * string, the optional @operator is appended, followed by the second + * arg and and optional @separator. If no separator was specified + * when initializing the arg, a space will be appended. + */ +void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair, + dynevent_check_arg_fn_t check_arg, + char operator, char separator) +{ + memset(arg_pair, '\0', sizeof(*arg_pair)); + + if (!operator) + operator = ' '; + arg_pair->operator = operator; + + if (!separator) + separator = ' '; + arg_pair->separator = separator; + + arg_pair->check_arg = check_arg; +} + +/** + * dynevent_create - Create the dynamic event contained in dynevent_cmd + * @cmd: The dynevent_cmd object containing the dynamic event creation command + * + * Once a dynevent_cmd object has been successfully built up via the + * dynevent_cmd_init(), dynevent_arg_add() and dynevent_arg_pair_add() + * functions, this function runs the final command to actually create + * the event. + * + * Return: 0 if the event was successfully created, error otherwise. + */ +int dynevent_create(struct dynevent_cmd *cmd) +{ + return cmd->run_command(cmd); +} +EXPORT_SYMBOL_GPL(dynevent_create); diff --git a/kernel/trace/trace_dynevent.h b/kernel/trace/trace_dynevent.h index 46898138d2df..b593fc34c5b1 100644 --- a/kernel/trace/trace_dynevent.h +++ b/kernel/trace/trace_dynevent.h @@ -117,4 +117,37 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type); #define for_each_dyn_event_safe(pos, n) \ list_for_each_entry_safe(pos, n, &dyn_event_list, list) +extern void dynevent_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen, + enum dynevent_type type, + dynevent_create_fn_t run_command); + +typedef int (*dynevent_check_arg_fn_t)(void *data); + +struct dynevent_arg { + const char *str; + char separator; /* e.g. ';', ',', or nothing */ + dynevent_check_arg_fn_t check_arg; +}; + +extern void dynevent_arg_init(struct dynevent_arg *arg, + dynevent_check_arg_fn_t check_arg, + char separator); +extern int dynevent_arg_add(struct dynevent_cmd *cmd, + struct dynevent_arg *arg); + +struct dynevent_arg_pair { + const char *lhs; + const char *rhs; + char operator; /* e.g. '=' or nothing */ + char separator; /* e.g. ';', ',', or nothing */ + dynevent_check_arg_fn_t check_arg; +}; + +extern void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair, + dynevent_check_arg_fn_t check_arg, + char operator, char separator); +extern int dynevent_arg_pair_add(struct dynevent_cmd *cmd, + struct dynevent_arg_pair *arg_pair); +extern int dynevent_str_add(struct dynevent_cmd *cmd, const char *str); + #endif -- cgit v1.2.3 From 35ca5207c2d111abb9e072f028945d5c12b20836 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 29 Jan 2020 12:59:25 -0600 Subject: tracing: Add synthetic event command generation functions Add functions used to generate synthetic event commands, built on top of the dynevent_cmd interface. synth_event_gen_cmd_start() is used to create a synthetic event command using a variable arg list and synth_event_gen_cmd_array_start() does the same thing but using an array of field descriptors. synth_event_add_field(), synth_event_add_field_str() and synth_event_add_fields() can be used to add single fields one by one or as a group. Once all desired fields are added, synth_event_gen_cmd_end() is used to actually execute the command and create the event. synth_event_create() does everything, including creating the event, in a single call. Link: http://lkml.kernel.org/r/38fef702fad5ef208009f459552f34a94befd860.1580323897.git.zanussi@kernel.org Acked-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 37 ++++ kernel/trace/trace_events_hist.c | 379 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 412 insertions(+), 4 deletions(-) (limited to 'kernel/trace') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 651b03d5e272..07b83532a3c6 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -357,6 +357,7 @@ extern void trace_put_event_file(struct trace_event_file *file); #define MAX_DYNEVENT_CMD_LEN (2048) enum dynevent_type { + DYNEVENT_TYPE_SYNTH = 1, DYNEVENT_TYPE_NONE, }; @@ -379,6 +380,42 @@ extern int dynevent_create(struct dynevent_cmd *cmd); extern int synth_event_delete(const char *name); +extern void synth_event_cmd_init(struct dynevent_cmd *cmd, + char *buf, int maxlen); + +extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, + const char *name, + struct module *mod, ...); + +#define synth_event_gen_cmd_start(cmd, name, mod, ...) \ + __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL) + +struct synth_field_desc { + const char *type; + const char *name; +}; + +extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, + const char *name, + struct module *mod, + struct synth_field_desc *fields, + unsigned int n_fields); +extern int synth_event_create(const char *name, + struct synth_field_desc *fields, + unsigned int n_fields, struct module *mod); + +extern int synth_event_add_field(struct dynevent_cmd *cmd, + const char *type, + const char *name); +extern int synth_event_add_field_str(struct dynevent_cmd *cmd, + const char *type_name); +extern int synth_event_add_fields(struct dynevent_cmd *cmd, + struct synth_field_desc *fields, + unsigned int n_fields); + +#define synth_event_gen_cmd_end(cmd) \ + dynevent_create(cmd) + /* * Event file flags: * ENABLED - The event is enabled diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 21e316732700..5a910bb193e9 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -379,7 +379,7 @@ struct hist_trigger_data { unsigned int n_save_var_str; }; -static int synth_event_create(int argc, const char **argv); +static int create_synth_event(int argc, const char **argv); static int synth_event_show(struct seq_file *m, struct dyn_event *ev); static int synth_event_release(struct dyn_event *ev); static bool synth_event_is_busy(struct dyn_event *ev); @@ -387,7 +387,7 @@ static bool synth_event_match(const char *system, const char *event, int argc, const char **argv, struct dyn_event *ev); static struct dyn_event_operations synth_event_ops = { - .create = synth_event_create, + .create = create_synth_event, .show = synth_event_show, .is_busy = synth_event_is_busy, .free = synth_event_release, @@ -412,6 +412,7 @@ struct synth_event { struct trace_event_class class; struct trace_event_call call; struct tracepoint *tp; + struct module *mod; }; static bool is_synth_event(struct dyn_event *ev) @@ -1292,6 +1293,273 @@ struct hist_var_data { struct hist_trigger_data *hist_data; }; +static int synth_event_check_arg_fn(void *data) +{ + struct dynevent_arg_pair *arg_pair = data; + int size; + + size = synth_field_size((char *)arg_pair->lhs); + + return size ? 0 : -EINVAL; +} + +/** + * synth_event_add_field - Add a new field to a synthetic event cmd + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @type: The type of the new field to add + * @name: The name of the new field to add + * + * Add a new field to a synthetic event cmd object. Field ordering is in + * the same order the fields are added. + * + * See synth_field_size() for available types. If field_name contains + * [n] the field is considered to be an array. + * + * Return: 0 if successful, error otherwise. + */ +int synth_event_add_field(struct dynevent_cmd *cmd, const char *type, + const char *name) +{ + struct dynevent_arg_pair arg_pair; + int ret; + + if (cmd->type != DYNEVENT_TYPE_SYNTH) + return -EINVAL; + + if (!type || !name) + return -EINVAL; + + dynevent_arg_pair_init(&arg_pair, synth_event_check_arg_fn, 0, ';'); + + arg_pair.lhs = type; + arg_pair.rhs = name; + + ret = dynevent_arg_pair_add(cmd, &arg_pair); + if (ret) + return ret; + + if (++cmd->n_fields > SYNTH_FIELDS_MAX) + ret = -EINVAL; + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_add_field); + +/** + * synth_event_add_field_str - Add a new field to a synthetic event cmd + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @type_name: The type and name of the new field to add, as a single string + * + * Add a new field to a synthetic event cmd object, as a single + * string. The @type_name string is expected to be of the form 'type + * name', which will be appended by ';'. No sanity checking is done - + * what's passed in is assumed to already be well-formed. Field + * ordering is in the same order the fields are added. + * + * See synth_field_size() for available types. If field_name contains + * [n] the field is considered to be an array. + * + * Return: 0 if successful, error otherwise. + */ +int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name) +{ + struct dynevent_arg arg; + int ret; + + if (cmd->type != DYNEVENT_TYPE_SYNTH) + return -EINVAL; + + if (!type_name) + return -EINVAL; + + dynevent_arg_init(&arg, NULL, ';'); + + arg.str = type_name; + + ret = dynevent_arg_add(cmd, &arg); + if (ret) + return ret; + + if (++cmd->n_fields > SYNTH_FIELDS_MAX) + ret = -EINVAL; + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_add_field_str); + +/** + * synth_event_add_fields - Add multiple fields to a synthetic event cmd + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @fields: An array of type/name field descriptions + * @n_fields: The number of field descriptions contained in the fields array + * + * Add a new set of fields to a synthetic event cmd object. The event + * fields that will be defined for the event should be passed in as an + * array of struct synth_field_desc, and the number of elements in the + * array passed in as n_fields. Field ordering will retain the + * ordering given in the fields array. + * + * See synth_field_size() for available types. If field_name contains + * [n] the field is considered to be an array. + * + * Return: 0 if successful, error otherwise. + */ +int synth_event_add_fields(struct dynevent_cmd *cmd, + struct synth_field_desc *fields, + unsigned int n_fields) +{ + unsigned int i; + int ret = 0; + + for (i = 0; i < n_fields; i++) { + if (fields[i].type == NULL || fields[i].name == NULL) { + ret = -EINVAL; + break; + } + + ret = synth_event_add_field(cmd, fields[i].type, fields[i].name); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_add_fields); + +/** + * __synth_event_gen_cmd_start - Start a synthetic event command from arg list + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @name: The name of the synthetic event + * @mod: The module creating the event, NULL if not created from a module + * @args: Variable number of arg (pairs), one pair for each field + * + * NOTE: Users normally won't want to call this function directly, but + * rather use the synth_event_gen_cmd_start() wrapper, which + * automatically adds a NULL to the end of the arg list. If this + * function is used directly, make sure the last arg in the variable + * arg list is NULL. + * + * Generate a synthetic event command to be executed by + * synth_event_gen_cmd_end(). This function can be used to generate + * the complete command or only the first part of it; in the latter + * case, synth_event_add_field(), synth_event_add_field_str(), or + * synth_event_add_fields() can be used to add more fields following + * this. + * + * There should be an even number variable args, each pair consisting + * of a type followed by a field name. + * + * See synth_field_size() for available types. If field_name contains + * [n] the field is considered to be an array. + * + * Return: 0 if successful, error otherwise. + */ +int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name, + struct module *mod, ...) +{ + struct dynevent_arg arg; + va_list args; + int ret; + + cmd->event_name = name; + cmd->private_data = mod; + + if (cmd->type != DYNEVENT_TYPE_SYNTH) + return -EINVAL; + + dynevent_arg_init(&arg, NULL, 0); + arg.str = name; + ret = dynevent_arg_add(cmd, &arg); + if (ret) + return ret; + + va_start(args, mod); + for (;;) { + const char *type, *name; + + type = va_arg(args, const char *); + if (!type) + break; + name = va_arg(args, const char *); + if (!name) + break; + + if (++cmd->n_fields > SYNTH_FIELDS_MAX) { + ret = -EINVAL; + break; + } + + ret = synth_event_add_field(cmd, type, name); + if (ret) + break; + } + va_end(args); + + return ret; +} +EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start); + +/** + * synth_event_gen_cmd_array_start - Start synthetic event command from an array + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @name: The name of the synthetic event + * @fields: An array of type/name field descriptions + * @n_fields: The number of field descriptions contained in the fields array + * + * Generate a synthetic event command to be executed by + * synth_event_gen_cmd_end(). This function can be used to generate + * the complete command or only the first part of it; in the latter + * case, synth_event_add_field(), synth_event_add_field_str(), or + * synth_event_add_fields() can be used to add more fields following + * this. + * + * The event fields that will be defined for the event should be + * passed in as an array of struct synth_field_desc, and the number of + * elements in the array passed in as n_fields. Field ordering will + * retain the ordering given in the fields array. + * + * See synth_field_size() for available types. If field_name contains + * [n] the field is considered to be an array. + * + * Return: 0 if successful, error otherwise. + */ +int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name, + struct module *mod, + struct synth_field_desc *fields, + unsigned int n_fields) +{ + struct dynevent_arg arg; + unsigned int i; + int ret = 0; + + cmd->event_name = name; + cmd->private_data = mod; + + if (cmd->type != DYNEVENT_TYPE_SYNTH) + return -EINVAL; + + if (n_fields > SYNTH_FIELDS_MAX) + return -EINVAL; + + dynevent_arg_init(&arg, NULL, 0); + arg.str = name; + ret = dynevent_arg_add(cmd, &arg); + if (ret) + return ret; + + for (i = 0; i < n_fields; i++) { + if (fields[i].type == NULL || fields[i].name == NULL) + return -EINVAL; + + ret = synth_event_add_field(cmd, fields[i].type, fields[i].name); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start); + static int __create_synth_event(int argc, const char *name, const char **argv) { struct synth_field *field, *fields[SYNTH_FIELDS_MAX]; @@ -1360,6 +1628,56 @@ static int __create_synth_event(int argc, const char *name, const char **argv) goto out; } +/** + * synth_event_create - Create a new synthetic event + * @name: The name of the new sythetic event + * @fields: An array of type/name field descriptions + * @n_fields: The number of field descriptions contained in the fields array + * @mod: The module creating the event, NULL if not created from a module + * + * Create a new synthetic event with the given name under the + * trace/events/synthetic/ directory. The event fields that will be + * defined for the event should be passed in as an array of struct + * synth_field_desc, and the number elements in the array passed in as + * n_fields. Field ordering will retain the ordering given in the + * fields array. + * + * If the new synthetic event is being created from a module, the mod + * param must be non-NULL. This will ensure that the trace buffer + * won't contain unreadable events. + * + * The new synth event should be deleted using synth_event_delete() + * function. The new synthetic event can be generated from modules or + * other kernel code using trace_synth_event() and related functions. + * + * Return: 0 if successful, error otherwise. + */ +int synth_event_create(const char *name, struct synth_field_desc *fields, + unsigned int n_fields, struct module *mod) +{ + struct dynevent_cmd cmd; + char *buf; + int ret; + + buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN); + + ret = synth_event_gen_cmd_array_start(&cmd, name, mod, + fields, n_fields); + if (ret) + goto out; + + ret = synth_event_gen_cmd_end(&cmd); + out: + kfree(buf); + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_create); + static int destroy_synth_event(struct synth_event *se) { int ret; @@ -1388,14 +1706,33 @@ static int destroy_synth_event(struct synth_event *se) int synth_event_delete(const char *event_name) { struct synth_event *se = NULL; + struct module *mod = NULL; int ret = -ENOENT; mutex_lock(&event_mutex); se = find_synth_event(event_name); - if (se) + if (se) { + mod = se->mod; ret = destroy_synth_event(se); + } mutex_unlock(&event_mutex); + if (mod) { + mutex_lock(&trace_types_lock); + /* + * It is safest to reset the ring buffer if the module + * being unloaded registered any events that were + * used. The only worry is if a new module gets + * loaded, and takes on the same id as the events of + * this module. When printing out the buffer, traced + * events left over from this module may be passed to + * the new module events and unexpected results may + * occur. + */ + tracing_reset_all_online_cpus(); + mutex_unlock(&trace_types_lock); + } + return ret; } EXPORT_SYMBOL_GPL(synth_event_delete); @@ -1420,7 +1757,41 @@ int synth_event_run_command(const char *command) return trace_run_command(command, create_or_delete_synth_event); } -static int synth_event_create(int argc, const char **argv) +static int synth_event_run_cmd(struct dynevent_cmd *cmd) +{ + struct synth_event *se; + int ret; + + ret = trace_run_command(cmd->buf, create_or_delete_synth_event); + if (ret) + return ret; + + se = find_synth_event(cmd->event_name); + if (WARN_ON(!se)) + return -ENOENT; + + se->mod = cmd->private_data; + + return ret; +} + +/** + * synth_event_cmd_init - Initialize a synthetic event command object + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @buf: A pointer to the buffer used to build the command + * @maxlen: The length of the buffer passed in @buf + * + * Initialize a synthetic event command object. Use this before + * calling any of the other dyenvent_cmd functions. + */ +void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen) +{ + dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH, + synth_event_run_cmd); +} +EXPORT_SYMBOL_GPL(synth_event_cmd_init); + +static int create_synth_event(int argc, const char **argv) { const char *name = argv[0]; int len; -- cgit v1.2.3 From 8dcc53ad956d2caf4c5c2dda196e6801b71a3154 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 29 Jan 2020 12:59:27 -0600 Subject: tracing: Add synth_event_trace() and related functions Add an exported function named synth_event_trace(), allowing modules or other kernel code to trace synthetic events. Also added are several functions that allow the same functionality to be broken out in a piecewise fashion, which are useful in situations where tracing an event from a full array of values would be cumbersome. Those functions are synth_event_trace_start/end() and synth_event_add_(next)_val(). Link: http://lkml.kernel.org/r/7a84de5f1854acf4144b57efe835ca645afa764f.1580323897.git.zanussi@kernel.org Acked-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 26 +++ kernel/trace/trace_events_hist.c | 463 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 489 insertions(+) (limited to 'kernel/trace') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 07b83532a3c6..bf03d12efb28 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -416,6 +416,32 @@ extern int synth_event_add_fields(struct dynevent_cmd *cmd, #define synth_event_gen_cmd_end(cmd) \ dynevent_create(cmd) +struct synth_event; + +struct synth_event_trace_state { + struct trace_event_buffer fbuffer; + struct synth_trace_event *entry; + struct trace_buffer *buffer; + struct synth_event *event; + unsigned int cur_field; + unsigned int n_u64; + bool enabled; + bool add_next; + bool add_name; +}; + +extern int synth_event_trace(struct trace_event_file *file, + unsigned int n_vals, ...); +extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals, + unsigned int n_vals); +extern int synth_event_trace_start(struct trace_event_file *file, + struct synth_event_trace_state *trace_state); +extern int synth_event_add_next_val(u64 val, + struct synth_event_trace_state *trace_state); +extern int synth_event_add_val(const char *field_name, u64 val, + struct synth_event_trace_state *trace_state); +extern int synth_event_trace_end(struct synth_event_trace_state *trace_state); + /* * Event file flags: * ENABLED - The event is enabled diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 5a910bb193e9..4d56a4f0310d 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -398,6 +398,7 @@ struct synth_field { char *type; char *name; size_t size; + unsigned int offset; bool is_signed; bool is_string; }; @@ -668,6 +669,8 @@ static int synth_event_define_fields(struct trace_event_call *call) if (ret) break; + event->fields[i]->offset = n_u64; + if (event->fields[i]->is_string) { offset += STR_VAR_LEN_MAX; n_u64 += STR_VAR_LEN_MAX / sizeof(u64); @@ -1791,6 +1794,466 @@ void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen) } EXPORT_SYMBOL_GPL(synth_event_cmd_init); +/** + * synth_event_trace - Trace a synthetic event + * @file: The trace_event_file representing the synthetic event + * @n_vals: The number of values in vals + * @args: Variable number of args containing the event values + * + * Trace a synthetic event using the values passed in the variable + * argument list. + * + * The argument list should be a list 'n_vals' u64 values. The number + * of vals must match the number of field in the synthetic event, and + * must be in the same order as the synthetic event fields. + * + * All vals should be cast to u64, and string vals are just pointers + * to strings, cast to u64. Strings will be copied into space + * reserved in the event for the string, using these pointers. + * + * Return: 0 on success, err otherwise. + */ +int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...) +{ + struct trace_event_buffer fbuffer; + struct synth_trace_event *entry; + struct trace_buffer *buffer; + struct synth_event *event; + unsigned int i, n_u64; + int fields_size = 0; + va_list args; + int ret = 0; + + /* + * Normal event generation doesn't get called at all unless + * the ENABLED bit is set (which attaches the probe thus + * allowing this code to be called, etc). Because this is + * called directly by the user, we don't have that but we + * still need to honor not logging when disabled. + */ + if (!(file->flags & EVENT_FILE_FL_ENABLED)) + return 0; + + event = file->event_call->data; + + if (n_vals != event->n_fields) + return -EINVAL; + + if (trace_trigger_soft_disabled(file)) + return -EINVAL; + + fields_size = event->n_u64 * sizeof(u64); + + /* + * Avoid ring buffer recursion detection, as this event + * is being performed within another event. + */ + buffer = file->tr->array_buffer.buffer; + ring_buffer_nest_start(buffer); + + entry = trace_event_buffer_reserve(&fbuffer, file, + sizeof(*entry) + fields_size); + if (!entry) { + ret = -EINVAL; + goto out; + } + + va_start(args, n_vals); + for (i = 0, n_u64 = 0; i < event->n_fields; i++) { + u64 val; + + val = va_arg(args, u64); + + if (event->fields[i]->is_string) { + char *str_val = (char *)(long)val; + char *str_field = (char *)&entry->fields[n_u64]; + + strscpy(str_field, str_val, STR_VAR_LEN_MAX); + n_u64 += STR_VAR_LEN_MAX / sizeof(u64); + } else { + entry->fields[n_u64] = val; + n_u64++; + } + } + va_end(args); + + trace_event_buffer_commit(&fbuffer); +out: + ring_buffer_nest_end(buffer); + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_trace); + +/** + * synth_event_trace_array - Trace a synthetic event from an array + * @file: The trace_event_file representing the synthetic event + * @vals: Array of values + * @n_vals: The number of values in vals + * + * Trace a synthetic event using the values passed in as 'vals'. + * + * The 'vals' array is just an array of 'n_vals' u64. The number of + * vals must match the number of field in the synthetic event, and + * must be in the same order as the synthetic event fields. + * + * All vals should be cast to u64, and string vals are just pointers + * to strings, cast to u64. Strings will be copied into space + * reserved in the event for the string, using these pointers. + * + * Return: 0 on success, err otherwise. + */ +int synth_event_trace_array(struct trace_event_file *file, u64 *vals, + unsigned int n_vals) +{ + struct trace_event_buffer fbuffer; + struct synth_trace_event *entry; + struct trace_buffer *buffer; + struct synth_event *event; + unsigned int i, n_u64; + int fields_size = 0; + int ret = 0; + + /* + * Normal event generation doesn't get called at all unless + * the ENABLED bit is set (which attaches the probe thus + * allowing this code to be called, etc). Because this is + * called directly by the user, we don't have that but we + * still need to honor not logging when disabled. + */ + if (!(file->flags & EVENT_FILE_FL_ENABLED)) + return 0; + + event = file->event_call->data; + + if (n_vals != event->n_fields) + return -EINVAL; + + if (trace_trigger_soft_disabled(file)) + return -EINVAL; + + fields_size = event->n_u64 * sizeof(u64); + + /* + * Avoid ring buffer recursion detection, as this event + * is being performed within another event. + */ + buffer = file->tr->array_buffer.buffer; + ring_buffer_nest_start(buffer); + + entry = trace_event_buffer_reserve(&fbuffer, file, + sizeof(*entry) + fields_size); + if (!entry) { + ret = -EINVAL; + goto out; + } + + for (i = 0, n_u64 = 0; i < event->n_fields; i++) { + if (event->fields[i]->is_string) { + char *str_val = (char *)(long)vals[i]; + char *str_field = (char *)&entry->fields[n_u64]; + + strscpy(str_field, str_val, STR_VAR_LEN_MAX); + n_u64 += STR_VAR_LEN_MAX / sizeof(u64); + } else { + entry->fields[n_u64] = vals[i]; + n_u64++; + } + } + + trace_event_buffer_commit(&fbuffer); +out: + ring_buffer_nest_end(buffer); + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_trace_array); + +/** + * synth_event_trace_start - Start piecewise synthetic event trace + * @file: The trace_event_file representing the synthetic event + * @trace_state: A pointer to object tracking the piecewise trace state + * + * Start the trace of a synthetic event field-by-field rather than all + * at once. + * + * This function 'opens' an event trace, which means space is reserved + * for the event in the trace buffer, after which the event's + * individual field values can be set through either + * synth_event_add_next_val() or synth_event_add_val(). + * + * A pointer to a trace_state object is passed in, which will keep + * track of the current event trace state until the event trace is + * closed (and the event finally traced) using + * synth_event_trace_end(). + * + * Note that synth_event_trace_end() must be called after all values + * have been added for each event trace, regardless of whether adding + * all field values succeeded or not. + * + * Note also that for a given event trace, all fields must be added + * using either synth_event_add_next_val() or synth_event_add_val() + * but not both together or interleaved. + * + * Return: 0 on success, err otherwise. + */ +int synth_event_trace_start(struct trace_event_file *file, + struct synth_event_trace_state *trace_state) +{ + struct synth_trace_event *entry; + int fields_size = 0; + int ret = 0; + + if (!trace_state) { + ret = -EINVAL; + goto out; + } + + memset(trace_state, '\0', sizeof(*trace_state)); + + /* + * Normal event tracing doesn't get called at all unless the + * ENABLED bit is set (which attaches the probe thus allowing + * this code to be called, etc). Because this is called + * directly by the user, we don't have that but we still need + * to honor not logging when disabled. For the the iterated + * trace case, we save the enabed state upon start and just + * ignore the following data calls. + */ + if (!(file->flags & EVENT_FILE_FL_ENABLED)) { + trace_state->enabled = false; + goto out; + } + + trace_state->enabled = true; + + trace_state->event = file->event_call->data; + + if (trace_trigger_soft_disabled(file)) { + ret = -EINVAL; + goto out; + } + + fields_size = trace_state->event->n_u64 * sizeof(u64); + + /* + * Avoid ring buffer recursion detection, as this event + * is being performed within another event. + */ + trace_state->buffer = file->tr->array_buffer.buffer; + ring_buffer_nest_start(trace_state->buffer); + + entry = trace_event_buffer_reserve(&trace_state->fbuffer, file, + sizeof(*entry) + fields_size); + if (!entry) { + ret = -EINVAL; + goto out; + } + + trace_state->entry = entry; +out: + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_trace_start); + +static int save_synth_val(struct synth_field *field, u64 val, + struct synth_event_trace_state *trace_state) +{ + struct synth_trace_event *entry = trace_state->entry; + + if (field->is_string) { + char *str_val = (char *)(long)val; + char *str_field; + + if (!str_val) + return -EINVAL; + + str_field = (char *)&entry->fields[field->offset]; + strscpy(str_field, str_val, STR_VAR_LEN_MAX); + } else + entry->fields[field->offset] = val; + + return 0; +} + +/** + * synth_event_add_next_val - Add the next field's value to an open synth trace + * @val: The value to set the next field to + * @trace_state: A pointer to object tracking the piecewise trace state + * + * Set the value of the next field in an event that's been opened by + * synth_event_trace_start(). + * + * The val param should be the value cast to u64. If the value points + * to a string, the val param should be a char * cast to u64. + * + * This function assumes all the fields in an event are to be set one + * after another - successive calls to this function are made, one for + * each field, in the order of the fields in the event, until all + * fields have been set. If you'd rather set each field individually + * without regard to ordering, synth_event_add_val() can be used + * instead. + * + * Note however that synth_event_add_next_val() and + * synth_event_add_val() can't be intermixed for a given event trace - + * one or the other but not both can be used at the same time. + * + * Note also that synth_event_trace_end() must be called after all + * values have been added for each event trace, regardless of whether + * adding all field values succeeded or not. + * + * Return: 0 on success, err otherwise. + */ +int synth_event_add_next_val(u64 val, + struct synth_event_trace_state *trace_state) +{ + struct synth_field *field; + struct synth_event *event; + int ret = 0; + + if (!trace_state) { + ret = -EINVAL; + goto out; + } + + /* can't mix add_next_synth_val() with add_synth_val() */ + if (trace_state->add_name) { + ret = -EINVAL; + goto out; + } + trace_state->add_next = true; + + if (!trace_state->enabled) + goto out; + + event = trace_state->event; + + if (trace_state->cur_field >= event->n_fields) { + ret = -EINVAL; + goto out; + } + + field = event->fields[trace_state->cur_field++]; + ret = save_synth_val(field, val, trace_state); + out: + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_add_next_val); + +static struct synth_field *find_synth_field(struct synth_event *event, + const char *field_name) +{ + struct synth_field *field = NULL; + unsigned int i; + + for (i = 0; i < event->n_fields; i++) { + field = event->fields[i]; + if (strcmp(field->name, field_name) == 0) + return field; + } + + return NULL; +} + +/** + * synth_event_add_val - Add a named field's value to an open synth trace + * @field_name: The name of the synthetic event field value to set + * @val: The value to set the next field to + * @trace_state: A pointer to object tracking the piecewise trace state + * + * Set the value of the named field in an event that's been opened by + * synth_event_trace_start(). + * + * The val param should be the value cast to u64. If the value points + * to a string, the val param should be a char * cast to u64. + * + * This function looks up the field name, and if found, sets the field + * to the specified value. This lookup makes this function more + * expensive than synth_event_add_next_val(), so use that or the + * none-piecewise synth_event_trace() instead if efficiency is more + * important. + * + * Note however that synth_event_add_next_val() and + * synth_event_add_val() can't be intermixed for a given event trace - + * one or the other but not both can be used at the same time. + * + * Note also that synth_event_trace_end() must be called after all + * values have been added for each event trace, regardless of whether + * adding all field values succeeded or not. + * + * Return: 0 on success, err otherwise. + */ +int synth_event_add_val(const char *field_name, u64 val, + struct synth_event_trace_state *trace_state) +{ + struct synth_trace_event *entry; + struct synth_event *event; + struct synth_field *field; + int ret = 0; + + if (!trace_state) { + ret = -EINVAL; + goto out; + } + + /* can't mix add_next_synth_val() with add_synth_val() */ + if (trace_state->add_next) { + ret = -EINVAL; + goto out; + } + trace_state->add_name = true; + + if (!trace_state->enabled) + goto out; + + event = trace_state->event; + entry = trace_state->entry; + + field = find_synth_field(event, field_name); + if (!field) { + ret = -EINVAL; + goto out; + } + + ret = save_synth_val(field, val, trace_state); + out: + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_add_val); + +/** + * synth_event_trace_end - End piecewise synthetic event trace + * @trace_state: A pointer to object tracking the piecewise trace state + * + * End the trace of a synthetic event opened by + * synth_event_trace__start(). + * + * This function 'closes' an event trace, which basically means that + * it commits the reserved event and cleans up other loose ends. + * + * A pointer to a trace_state object is passed in, which will keep + * track of the current event trace state opened with + * synth_event_trace_start(). + * + * Note that this function must be called after all values have been + * added for each event trace, regardless of whether adding all field + * values succeeded or not. + * + * Return: 0 on success, err otherwise. + */ +int synth_event_trace_end(struct synth_event_trace_state *trace_state) +{ + if (!trace_state) + return -EINVAL; + + trace_event_buffer_commit(&trace_state->fbuffer); + + ring_buffer_nest_end(trace_state->buffer); + + return 0; +} +EXPORT_SYMBOL_GPL(synth_event_trace_end); + static int create_synth_event(int argc, const char **argv) { const char *name = argv[0]; -- cgit v1.2.3 From 9fe41efaca08416657efa8731c0d47ccb6a3f3eb Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 29 Jan 2020 12:59:28 -0600 Subject: tracing: Add synth event generation test module Add a test module that checks the basic functionality of the in-kernel synthetic event generation API by generating and tracing synthetic events from a module. Link: http://lkml.kernel.org/r/fcb4dd9eb9eefb70ab20538d3529d51642389664.1580323897.git.zanussi@kernel.org Acked-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/Kconfig | 13 + kernel/trace/Makefile | 1 + kernel/trace/synth_event_gen_test.c | 523 ++++++++++++++++++++++++++++++++++++ 3 files changed, 537 insertions(+) create mode 100644 kernel/trace/synth_event_gen_test.c (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 75326d8ab1af..4f2041166a2f 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -774,6 +774,19 @@ config PREEMPTIRQ_DELAY_TEST If unsure, say N +config SYNTH_EVENT_GEN_TEST + tristate "Test module for in-kernel synthetic event generation" + depends on HIST_TRIGGERS + help + This option creates a test module to check the base + functionality of in-kernel synthetic event definition and + generation. + + To test, insert the module, and then check the trace buffer + for the generated sample events. + + If unsure, say N. + config TRACE_EVAL_MAP_FILE bool "Show eval mappings for trace events" depends on TRACING diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 395e2db9c742..32012f50fb79 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -44,6 +44,7 @@ obj-$(CONFIG_TRACING) += trace_stat.o obj-$(CONFIG_TRACING) += trace_printk.o obj-$(CONFIG_TRACING_MAP) += tracing_map.o obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o +obj-$(CONFIG_SYNTH_EVENT_GEN_TEST) += synth_event_gen_test.o obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o obj-$(CONFIG_PREEMPTIRQ_TRACEPOINTS) += trace_preemptirq.o diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c new file mode 100644 index 000000000000..4aefe003cb7c --- /dev/null +++ b/kernel/trace/synth_event_gen_test.c @@ -0,0 +1,523 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test module for in-kernel sythetic event creation and generation. + * + * Copyright (C) 2019 Tom Zanussi + */ + +#include +#include + +/* + * This module is a simple test of basic functionality for in-kernel + * synthetic event creation and generation, the first and second tests + * using synth_event_gen_cmd_start() and synth_event_add_field(), the + * third uses synth_event_create() to do it all at once with a static + * field array. + * + * Following that are a few examples using the created events to test + * various ways of tracing a synthetic event. + * + * To test, select CONFIG_SYNTH_EVENT_GEN_TEST and build the module. + * Then: + * + * # insmod kernel/trace/synth_event_gen_test.ko + * # cat /sys/kernel/debug/tracing/trace + * + * You should see several events in the trace buffer - + * "create_synth_test", "empty_synth_test", and several instances of + * "gen_synth_test". + * + * To remove the events, remove the module: + * + * # rmmod synth_event_gen_test + * + */ + +static struct trace_event_file *create_synth_test; +static struct trace_event_file *empty_synth_test; +static struct trace_event_file *gen_synth_test; + +/* + * Test to make sure we can create a synthetic event, then add more + * fields. + */ +static int __init test_gen_synth_cmd(void) +{ + struct dynevent_cmd cmd; + u64 vals[7]; + char *buf; + int ret; + + /* Create a buffer to hold the generated command */ + buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Before generating the command, initialize the cmd object */ + synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN); + + /* + * Create the empty gen_synth_test synthetic event with the + * first 4 fields. + */ + ret = synth_event_gen_cmd_start(&cmd, "gen_synth_test", THIS_MODULE, + "pid_t", "next_pid_field", + "char[16]", "next_comm_field", + "u64", "ts_ns", + "u64", "ts_ms"); + if (ret) + goto free; + + /* Use synth_event_add_field to add the rest of the fields */ + + ret = synth_event_add_field(&cmd, "unsigned int", "cpu"); + if (ret) + goto free; + + ret = synth_event_add_field(&cmd, "char[64]", "my_string_field"); + if (ret) + goto free; + + ret = synth_event_add_field(&cmd, "int", "my_int_field"); + if (ret) + goto free; + + ret = synth_event_gen_cmd_end(&cmd); + if (ret) + goto free; + + /* + * Now get the gen_synth_test event file. We need to prevent + * the instance and event from disappearing from underneath + * us, which trace_get_event_file() does (though in this case + * we're using the top-level instance which never goes away). + */ + gen_synth_test = trace_get_event_file(NULL, "synthetic", + "gen_synth_test"); + if (IS_ERR(gen_synth_test)) { + ret = PTR_ERR(gen_synth_test); + goto delete; + } + + /* Enable the event or you won't see anything */ + ret = trace_array_set_clr_event(gen_synth_test->tr, + "synthetic", "gen_synth_test", true); + if (ret) { + trace_put_event_file(gen_synth_test); + goto delete; + } + + /* Create some bogus values just for testing */ + + vals[0] = 777; /* next_pid_field */ + vals[1] = (u64)"hula hoops"; /* next_comm_field */ + vals[2] = 1000000; /* ts_ns */ + vals[3] = 1000; /* ts_ms */ + vals[4] = smp_processor_id(); /* cpu */ + vals[5] = (u64)"thneed"; /* my_string_field */ + vals[6] = 598; /* my_int_field */ + + /* Now generate a gen_synth_test event */ + ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals)); + out: + return ret; + delete: + /* We got an error after creating the event, delete it */ + synth_event_delete("gen_synth_test"); + free: + kfree(buf); + + goto out; +} + +/* + * Test to make sure we can create an initially empty synthetic event, + * then add all the fields. + */ +static int __init test_empty_synth_event(void) +{ + struct dynevent_cmd cmd; + u64 vals[7]; + char *buf; + int ret; + + /* Create a buffer to hold the generated command */ + buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Before generating the command, initialize the cmd object */ + synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN); + + /* + * Create the empty_synth_test synthetic event with no fields. + */ + ret = synth_event_gen_cmd_start(&cmd, "empty_synth_test", THIS_MODULE); + if (ret) + goto free; + + /* Use synth_event_add_field to add all of the fields */ + + ret = synth_event_add_field(&cmd, "pid_t", "next_pid_field"); + if (ret) + goto free; + + ret = synth_event_add_field(&cmd, "char[16]", "next_comm_field"); + if (ret) + goto free; + + ret = synth_event_add_field(&cmd, "u64", "ts_ns"); + if (ret) + goto free; + + ret = synth_event_add_field(&cmd, "u64", "ts_ms"); + if (ret) + goto free; + + ret = synth_event_add_field(&cmd, "unsigned int", "cpu"); + if (ret) + goto free; + + ret = synth_event_add_field(&cmd, "char[64]", "my_string_field"); + if (ret) + goto free; + + ret = synth_event_add_field(&cmd, "int", "my_int_field"); + if (ret) + goto free; + + /* All fields have been added, close and register the synth event */ + + ret = synth_event_gen_cmd_end(&cmd); + if (ret) + goto free; + + /* + * Now get the empty_synth_test event file. We need to + * prevent the instance and event from disappearing from + * underneath us, which trace_get_event_file() does (though in + * this case we're using the top-level instance which never + * goes away). + */ + empty_synth_test = trace_get_event_file(NULL, "synthetic", + "empty_synth_test"); + if (IS_ERR(empty_synth_test)) { + ret = PTR_ERR(empty_synth_test); + goto delete; + } + + /* Enable the event or you won't see anything */ + ret = trace_array_set_clr_event(empty_synth_test->tr, + "synthetic", "empty_synth_test", true); + if (ret) { + trace_put_event_file(empty_synth_test); + goto delete; + } + + /* Create some bogus values just for testing */ + + vals[0] = 777; /* next_pid_field */ + vals[1] = (u64)"tiddlywinks"; /* next_comm_field */ + vals[2] = 1000000; /* ts_ns */ + vals[3] = 1000; /* ts_ms */ + vals[4] = smp_processor_id(); /* cpu */ + vals[5] = (u64)"thneed_2.0"; /* my_string_field */ + vals[6] = 399; /* my_int_field */ + + /* Now trace an empty_synth_test event */ + ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals)); + out: + return ret; + delete: + /* We got an error after creating the event, delete it */ + synth_event_delete("empty_synth_test"); + free: + kfree(buf); + + goto out; +} + +static struct synth_field_desc create_synth_test_fields[] = { + { .type = "pid_t", .name = "next_pid_field" }, + { .type = "char[16]", .name = "next_comm_field" }, + { .type = "u64", .name = "ts_ns" }, + { .type = "u64", .name = "ts_ms" }, + { .type = "unsigned int", .name = "cpu" }, + { .type = "char[64]", .name = "my_string_field" }, + { .type = "int", .name = "my_int_field" }, +}; + +/* + * Test synthetic event creation all at once from array of field + * descriptors. + */ +static int __init test_create_synth_event(void) +{ + u64 vals[7]; + int ret; + + /* Create the create_synth_test event with the fields above */ + ret = synth_event_create("create_synth_test", + create_synth_test_fields, + ARRAY_SIZE(create_synth_test_fields), + THIS_MODULE); + if (ret) + goto out; + + /* + * Now get the create_synth_test event file. We need to + * prevent the instance and event from disappearing from + * underneath us, which trace_get_event_file() does (though in + * this case we're using the top-level instance which never + * goes away). + */ + create_synth_test = trace_get_event_file(NULL, "synthetic", + "create_synth_test"); + if (IS_ERR(create_synth_test)) { + ret = PTR_ERR(create_synth_test); + goto delete; + } + + /* Enable the event or you won't see anything */ + ret = trace_array_set_clr_event(create_synth_test->tr, + "synthetic", "create_synth_test", true); + if (ret) { + trace_put_event_file(create_synth_test); + goto delete; + } + + /* Create some bogus values just for testing */ + + vals[0] = 777; /* next_pid_field */ + vals[1] = (u64)"tiddlywinks"; /* next_comm_field */ + vals[2] = 1000000; /* ts_ns */ + vals[3] = 1000; /* ts_ms */ + vals[4] = smp_processor_id(); /* cpu */ + vals[5] = (u64)"thneed"; /* my_string_field */ + vals[6] = 398; /* my_int_field */ + + /* Now generate a create_synth_test event */ + ret = synth_event_trace_array(create_synth_test, vals, ARRAY_SIZE(vals)); + out: + return ret; + delete: + /* We got an error after creating the event, delete it */ + ret = synth_event_delete("create_synth_test"); + + goto out; +} + +/* + * Test tracing a synthetic event by reserving trace buffer space, + * then filling in fields one after another. + */ +static int __init test_add_next_synth_val(void) +{ + struct synth_event_trace_state trace_state; + int ret; + + /* Start by reserving space in the trace buffer */ + ret = synth_event_trace_start(gen_synth_test, &trace_state); + if (ret) + return ret; + + /* Write some bogus values into the trace buffer, one after another */ + + /* next_pid_field */ + ret = synth_event_add_next_val(777, &trace_state); + if (ret) + goto out; + + /* next_comm_field */ + ret = synth_event_add_next_val((u64)"slinky", &trace_state); + if (ret) + goto out; + + /* ts_ns */ + ret = synth_event_add_next_val(1000000, &trace_state); + if (ret) + goto out; + + /* ts_ms */ + ret = synth_event_add_next_val(1000, &trace_state); + if (ret) + goto out; + + /* cpu */ + ret = synth_event_add_next_val(smp_processor_id(), &trace_state); + if (ret) + goto out; + + /* my_string_field */ + ret = synth_event_add_next_val((u64)"thneed_2.01", &trace_state); + if (ret) + goto out; + + /* my_int_field */ + ret = synth_event_add_next_val(395, &trace_state); + out: + /* Finally, commit the event */ + ret = synth_event_trace_end(&trace_state); + + return ret; +} + +/* + * Test tracing a synthetic event by reserving trace buffer space, + * then filling in fields using field names, which can be done in any + * order. + */ +static int __init test_add_synth_val(void) +{ + struct synth_event_trace_state trace_state; + int ret; + + /* Start by reserving space in the trace buffer */ + ret = synth_event_trace_start(gen_synth_test, &trace_state); + if (ret) + return ret; + + /* Write some bogus values into the trace buffer, using field names */ + + ret = synth_event_add_val("ts_ns", 1000000, &trace_state); + if (ret) + goto out; + + ret = synth_event_add_val("ts_ms", 1000, &trace_state); + if (ret) + goto out; + + ret = synth_event_add_val("cpu", smp_processor_id(), &trace_state); + if (ret) + goto out; + + ret = synth_event_add_val("next_pid_field", 777, &trace_state); + if (ret) + goto out; + + ret = synth_event_add_val("next_comm_field", (u64)"silly putty", + &trace_state); + if (ret) + goto out; + + ret = synth_event_add_val("my_string_field", (u64)"thneed_9", + &trace_state); + if (ret) + goto out; + + ret = synth_event_add_val("my_int_field", 3999, &trace_state); + out: + /* Finally, commit the event */ + ret = synth_event_trace_end(&trace_state); + + return ret; +} + +/* + * Test tracing a synthetic event all at once from array of values. + */ +static int __init test_trace_synth_event(void) +{ + int ret; + + /* Trace some bogus values just for testing */ + ret = synth_event_trace(create_synth_test, 7, /* number of values */ + 444, /* next_pid_field */ + (u64)"clackers", /* next_comm_field */ + 1000000, /* ts_ns */ + 1000, /* ts_ms */ + smp_processor_id(), /* cpu */ + (u64)"Thneed", /* my_string_field */ + 999); /* my_int_field */ + return ret; +} + +static int __init synth_event_gen_test_init(void) +{ + int ret; + + ret = test_gen_synth_cmd(); + if (ret) + return ret; + + ret = test_empty_synth_event(); + if (ret) { + WARN_ON(trace_array_set_clr_event(gen_synth_test->tr, + "synthetic", + "gen_synth_test", false)); + trace_put_event_file(gen_synth_test); + WARN_ON(synth_event_delete("gen_synth_test")); + goto out; + } + + ret = test_create_synth_event(); + if (ret) { + WARN_ON(trace_array_set_clr_event(gen_synth_test->tr, + "synthetic", + "gen_synth_test", false)); + trace_put_event_file(gen_synth_test); + WARN_ON(synth_event_delete("gen_synth_test")); + + WARN_ON(trace_array_set_clr_event(empty_synth_test->tr, + "synthetic", + "empty_synth_test", false)); + trace_put_event_file(empty_synth_test); + WARN_ON(synth_event_delete("empty_synth_test")); + goto out; + } + + ret = test_add_next_synth_val(); + WARN_ON(ret); + + ret = test_add_synth_val(); + WARN_ON(ret); + + ret = test_trace_synth_event(); + WARN_ON(ret); + out: + return ret; +} + +static void __exit synth_event_gen_test_exit(void) +{ + /* Disable the event or you can't remove it */ + WARN_ON(trace_array_set_clr_event(gen_synth_test->tr, + "synthetic", + "gen_synth_test", false)); + + /* Now give the file and instance back */ + trace_put_event_file(gen_synth_test); + + /* Now unregister and free the synthetic event */ + WARN_ON(synth_event_delete("gen_synth_test")); + + /* Disable the event or you can't remove it */ + WARN_ON(trace_array_set_clr_event(empty_synth_test->tr, + "synthetic", + "empty_synth_test", false)); + + /* Now give the file and instance back */ + trace_put_event_file(empty_synth_test); + + /* Now unregister and free the synthetic event */ + WARN_ON(synth_event_delete("empty_synth_test")); + + /* Disable the event or you can't remove it */ + WARN_ON(trace_array_set_clr_event(create_synth_test->tr, + "synthetic", + "create_synth_test", false)); + + /* Now give the file and instance back */ + trace_put_event_file(create_synth_test); + + /* Now unregister and free the synthetic event */ + WARN_ON(synth_event_delete("create_synth_test")); +} + +module_init(synth_event_gen_test_init) +module_exit(synth_event_gen_test_exit) + +MODULE_AUTHOR("Tom Zanussi"); +MODULE_DESCRIPTION("synthetic event generation test"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 2a588dd1d5d649a183a2ff6fa1b80e870cf821d8 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 29 Jan 2020 12:59:29 -0600 Subject: tracing: Add kprobe event command generation functions Add functions used to generate kprobe event commands, built on top of the dynevent_cmd interface. kprobe_event_gen_cmd_start() is used to create a kprobe event command using a variable arg list, and kretprobe_event_gen_cmd_start() does the same for kretprobe event commands. kprobe_event_add_fields() can be used to add single fields one by one or as a group. Once all desired fields are added, kprobe_event_gen_cmd_end() or kretprobe_event_gen_cmd_end() respectively are used to actually execute the command and create the event. Link: http://lkml.kernel.org/r/95cc4696502bb6017f9126f306a45ad19b4cc14f.1580323897.git.zanussi@kernel.org Acked-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 31 +++++++++ kernel/trace/trace_kprobe.c | 161 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 192 insertions(+) (limited to 'kernel/trace') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index bf03d12efb28..7c307a7c9c6a 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -358,6 +358,7 @@ extern void trace_put_event_file(struct trace_event_file *file); enum dynevent_type { DYNEVENT_TYPE_SYNTH = 1, + DYNEVENT_TYPE_KPROBE, DYNEVENT_TYPE_NONE, }; @@ -442,6 +443,36 @@ extern int synth_event_add_val(const char *field_name, u64 val, struct synth_event_trace_state *trace_state); extern int synth_event_trace_end(struct synth_event_trace_state *trace_state); +extern int kprobe_event_delete(const char *name); + +extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd, + char *buf, int maxlen); + +#define kprobe_event_gen_cmd_start(cmd, name, loc, ...) \ + __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL) + +#define kretprobe_event_gen_cmd_start(cmd, name, loc, ...) \ + __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL) + +extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, + bool kretprobe, + const char *name, + const char *loc, ...); + +#define kprobe_event_add_fields(cmd, ...) \ + __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL) + +#define kprobe_event_add_field(cmd, field) \ + __kprobe_event_add_fields(cmd, field, NULL) + +extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...); + +#define kprobe_event_gen_cmd_end(cmd) \ + dynevent_create(cmd) + +#define kretprobe_event_gen_cmd_end(cmd) \ + dynevent_create(cmd) + /* * Event file flags: * ENABLED - The event is enabled diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index bf20cd7f2666..f43548b466d0 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -906,6 +906,167 @@ int trace_kprobe_run_command(const char *command) return trace_run_command(command, create_or_delete_trace_kprobe); } +static int trace_kprobe_run_cmd(struct dynevent_cmd *cmd) +{ + return trace_run_command(cmd->buf, create_or_delete_trace_kprobe); +} + +/** + * kprobe_event_cmd_init - Initialize a kprobe event command object + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @buf: A pointer to the buffer used to build the command + * @maxlen: The length of the buffer passed in @buf + * + * Initialize a synthetic event command object. Use this before + * calling any of the other kprobe_event functions. + */ +void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen) +{ + dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE, + trace_kprobe_run_cmd); +} +EXPORT_SYMBOL_GPL(kprobe_event_cmd_init); + +/** + * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @name: The name of the kprobe event + * @loc: The location of the kprobe event + * @kretprobe: Is this a return probe? + * @args: Variable number of arg (pairs), one pair for each field + * + * NOTE: Users normally won't want to call this function directly, but + * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically + * adds a NULL to the end of the arg list. If this function is used + * directly, make sure the last arg in the variable arg list is NULL. + * + * Generate a kprobe event command to be executed by + * kprobe_event_gen_cmd_end(). This function can be used to generate the + * complete command or only the first part of it; in the latter case, + * kprobe_event_add_fields() can be used to add more fields following this. + * + * Return: 0 if successful, error otherwise. + */ +int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe, + const char *name, const char *loc, ...) +{ + char buf[MAX_EVENT_NAME_LEN]; + struct dynevent_arg arg; + va_list args; + int ret; + + if (cmd->type != DYNEVENT_TYPE_KPROBE) + return -EINVAL; + + if (kretprobe) + snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name); + else + snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name); + + ret = dynevent_str_add(cmd, buf); + if (ret) + return ret; + + dynevent_arg_init(&arg, NULL, 0); + arg.str = loc; + ret = dynevent_arg_add(cmd, &arg); + if (ret) + return ret; + + va_start(args, loc); + for (;;) { + const char *field; + + field = va_arg(args, const char *); + if (!field) + break; + + if (++cmd->n_fields > MAX_TRACE_ARGS) { + ret = -EINVAL; + break; + } + + arg.str = field; + ret = dynevent_arg_add(cmd, &arg); + if (ret) + break; + } + va_end(args); + + return ret; +} +EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start); + +/** + * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @args: Variable number of arg (pairs), one pair for each field + * + * NOTE: Users normally won't want to call this function directly, but + * rather use the kprobe_event_add_fields() wrapper, which + * automatically adds a NULL to the end of the arg list. If this + * function is used directly, make sure the last arg in the variable + * arg list is NULL. + * + * Add probe fields to an existing kprobe command using a variable + * list of args. Fields are added in the same order they're listed. + * + * Return: 0 if successful, error otherwise. + */ +int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...) +{ + struct dynevent_arg arg; + va_list args; + int ret; + + if (cmd->type != DYNEVENT_TYPE_KPROBE) + return -EINVAL; + + dynevent_arg_init(&arg, NULL, 0); + + va_start(args, cmd); + for (;;) { + const char *field; + + field = va_arg(args, const char *); + if (!field) + break; + + if (++cmd->n_fields > MAX_TRACE_ARGS) { + ret = -EINVAL; + break; + } + + arg.str = field; + ret = dynevent_arg_add(cmd, &arg); + if (ret) + break; + } + va_end(args); + + return ret; +} +EXPORT_SYMBOL_GPL(__kprobe_event_add_fields); + +/** + * kprobe_event_delete - Delete a kprobe event + * @name: The name of the kprobe event to delete + * + * Delete a kprobe event with the give @name from kernel code rather + * than directly from the command line. + * + * Return: 0 if successful, error otherwise. + */ +int kprobe_event_delete(const char *name) +{ + char buf[MAX_EVENT_NAME_LEN]; + + snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name); + + return trace_run_command(buf, create_or_delete_trace_kprobe); +} +EXPORT_SYMBOL_GPL(kprobe_event_delete); + static int trace_kprobe_release(struct dyn_event *ev) { struct trace_kprobe *tk = to_trace_kprobe(ev); -- cgit v1.2.3 From 29a15481054681fa2d450b60a6feea8e6ca6f511 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 29 Jan 2020 12:59:30 -0600 Subject: tracing: Change trace_boot to use kprobe_event interface Have trace_boot_add_kprobe_event() use the kprobe_event interface. Also, rename kprobe_event_run_cmd() to kprobe_event_run_command() now that trace_boot's version is gone. Link: http://lkml.kernel.org/r/af5429d11291ab1e9a85a0ff944af3b2bcf193c7.1580323897.git.zanussi@kernel.org Acked-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 35 +++++++++++++++-------------------- kernel/trace/trace_kprobe.c | 9 ++------- 2 files changed, 17 insertions(+), 27 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 4d37bf5c3742..2298a70cdda6 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -88,37 +88,32 @@ trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node) } #ifdef CONFIG_KPROBE_EVENTS -extern int trace_kprobe_run_command(const char *command); - static int __init trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) { + struct dynevent_cmd cmd; struct xbc_node *anode; char buf[MAX_BUF_LEN]; const char *val; - char *p; - int len; + int ret; - len = snprintf(buf, ARRAY_SIZE(buf) - 1, "p:kprobes/%s ", event); - if (len >= ARRAY_SIZE(buf)) { - pr_err("Event name is too long: %s\n", event); - return -E2BIG; - } - p = buf + len; - len = ARRAY_SIZE(buf) - len; + kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN); + + ret = kprobe_event_gen_cmd_start(&cmd, event, NULL); + if (ret) + return ret; xbc_node_for_each_array_value(node, "probes", anode, val) { - if (strlcpy(p, val, len) >= len) { - pr_err("Probe definition is too long: %s\n", val); - return -E2BIG; - } - if (trace_kprobe_run_command(buf) < 0) { - pr_err("Failed to add probe: %s\n", buf); - return -EINVAL; - } + ret = kprobe_event_add_field(&cmd, val); + if (ret) + return ret; } - return 0; + ret = kprobe_event_gen_cmd_end(&cmd); + if (ret) + pr_err("Failed to add probe: %s\n", buf); + + return ret; } #else static inline int __init diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f43548b466d0..307abb724a71 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -901,12 +901,7 @@ static int create_or_delete_trace_kprobe(int argc, char **argv) return ret == -ECANCELED ? -EINVAL : ret; } -int trace_kprobe_run_command(const char *command) -{ - return trace_run_command(command, create_or_delete_trace_kprobe); -} - -static int trace_kprobe_run_cmd(struct dynevent_cmd *cmd) +static int trace_kprobe_run_command(struct dynevent_cmd *cmd) { return trace_run_command(cmd->buf, create_or_delete_trace_kprobe); } @@ -923,7 +918,7 @@ static int trace_kprobe_run_cmd(struct dynevent_cmd *cmd) void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen) { dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE, - trace_kprobe_run_cmd); + trace_kprobe_run_command); } EXPORT_SYMBOL_GPL(kprobe_event_cmd_init); -- cgit v1.2.3 From 64836248dda20c8e7427b493f7e06d9bf8f58850 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 29 Jan 2020 12:59:31 -0600 Subject: tracing: Add kprobe event command generation test module Add a test module that checks the basic functionality of the in-kernel kprobe event command generation API by creating kprobe events from a module. Link: http://lkml.kernel.org/r/97e502b204f9dba948e3fa3a4315448298218787.1580323897.git.zanussi@kernel.org Acked-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/Kconfig | 12 ++ kernel/trace/Makefile | 1 + kernel/trace/kprobe_event_gen_test.c | 225 +++++++++++++++++++++++++++++++++++ 3 files changed, 238 insertions(+) create mode 100644 kernel/trace/kprobe_event_gen_test.c (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 4f2041166a2f..4484e783f68d 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -787,6 +787,18 @@ config SYNTH_EVENT_GEN_TEST If unsure, say N. +config KPROBE_EVENT_GEN_TEST + tristate "Test module for in-kernel kprobe event generation" + depends on KPROBE_EVENTS + help + This option creates a test module to check the base + functionality of in-kernel kprobe event definition. + + To test, insert the module, and then check the trace buffer + for the generated kprobe events. + + If unsure, say N. + config TRACE_EVAL_MAP_FILE bool "Show eval mappings for trace events" depends on TRACING diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 32012f50fb79..f9dcd19165fa 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_TRACING) += trace_printk.o obj-$(CONFIG_TRACING_MAP) += tracing_map.o obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o obj-$(CONFIG_SYNTH_EVENT_GEN_TEST) += synth_event_gen_test.o +obj-$(CONFIG_KPROBE_EVENT_GEN_TEST) += kprobe_event_gen_test.o obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o obj-$(CONFIG_PREEMPTIRQ_TRACEPOINTS) += trace_preemptirq.o diff --git a/kernel/trace/kprobe_event_gen_test.c b/kernel/trace/kprobe_event_gen_test.c new file mode 100644 index 000000000000..18b0f1cbb947 --- /dev/null +++ b/kernel/trace/kprobe_event_gen_test.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test module for in-kernel kprobe event creation and generation. + * + * Copyright (C) 2019 Tom Zanussi + */ + +#include +#include + +/* + * This module is a simple test of basic functionality for in-kernel + * kprobe/kretprobe event creation. The first test uses + * kprobe_event_gen_cmd_start(), kprobe_event_add_fields() and + * kprobe_event_gen_cmd_end() to create a kprobe event, which is then + * enabled in order to generate trace output. The second creates a + * kretprobe event using kretprobe_event_gen_cmd_start() and + * kretprobe_event_gen_cmd_end(), and is also then enabled. + * + * To test, select CONFIG_KPROBE_EVENT_GEN_TEST and build the module. + * Then: + * + * # insmod kernel/trace/kprobe_event_gen_test.ko + * # cat /sys/kernel/debug/tracing/trace + * + * You should see many instances of the "gen_kprobe_test" and + * "gen_kretprobe_test" events in the trace buffer. + * + * To remove the events, remove the module: + * + * # rmmod kprobe_event_gen_test + * + */ + +static struct trace_event_file *gen_kprobe_test; +static struct trace_event_file *gen_kretprobe_test; + +/* + * Test to make sure we can create a kprobe event, then add more + * fields. + */ +static int __init test_gen_kprobe_cmd(void) +{ + struct dynevent_cmd cmd; + char *buf; + int ret; + + /* Create a buffer to hold the generated command */ + buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Before generating the command, initialize the cmd object */ + kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN); + + /* + * Define the gen_kprobe_test event with the first 2 kprobe + * fields. + */ + ret = kprobe_event_gen_cmd_start(&cmd, "gen_kprobe_test", + "do_sys_open", + "dfd=%ax", "filename=%dx"); + if (ret) + goto free; + + /* Use kprobe_event_add_fields to add the rest of the fields */ + + ret = kprobe_event_add_fields(&cmd, "flags=%cx", "mode=+4($stack)"); + if (ret) + goto free; + + /* + * This actually creates the event. + */ + ret = kprobe_event_gen_cmd_end(&cmd); + if (ret) + goto free; + + /* + * Now get the gen_kprobe_test event file. We need to prevent + * the instance and event from disappearing from underneath + * us, which trace_get_event_file() does (though in this case + * we're using the top-level instance which never goes away). + */ + gen_kprobe_test = trace_get_event_file(NULL, "kprobes", + "gen_kprobe_test"); + if (IS_ERR(gen_kprobe_test)) { + ret = PTR_ERR(gen_kprobe_test); + goto delete; + } + + /* Enable the event or you won't see anything */ + ret = trace_array_set_clr_event(gen_kprobe_test->tr, + "kprobes", "gen_kprobe_test", true); + if (ret) { + trace_put_event_file(gen_kprobe_test); + goto delete; + } + out: + return ret; + delete: + /* We got an error after creating the event, delete it */ + ret = kprobe_event_delete("gen_kprobe_test"); + free: + kfree(buf); + + goto out; +} + +/* + * Test to make sure we can create a kretprobe event. + */ +static int __init test_gen_kretprobe_cmd(void) +{ + struct dynevent_cmd cmd; + char *buf; + int ret; + + /* Create a buffer to hold the generated command */ + buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Before generating the command, initialize the cmd object */ + kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN); + + /* + * Define the kretprobe event. + */ + ret = kretprobe_event_gen_cmd_start(&cmd, "gen_kretprobe_test", + "do_sys_open", + "$retval"); + if (ret) + goto free; + + /* + * This actually creates the event. + */ + ret = kretprobe_event_gen_cmd_end(&cmd); + if (ret) + goto free; + + /* + * Now get the gen_kretprobe_test event file. We need to + * prevent the instance and event from disappearing from + * underneath us, which trace_get_event_file() does (though in + * this case we're using the top-level instance which never + * goes away). + */ + gen_kretprobe_test = trace_get_event_file(NULL, "kprobes", + "gen_kretprobe_test"); + if (IS_ERR(gen_kretprobe_test)) { + ret = PTR_ERR(gen_kretprobe_test); + goto delete; + } + + /* Enable the event or you won't see anything */ + ret = trace_array_set_clr_event(gen_kretprobe_test->tr, + "kprobes", "gen_kretprobe_test", true); + if (ret) { + trace_put_event_file(gen_kretprobe_test); + goto delete; + } + out: + return ret; + delete: + /* We got an error after creating the event, delete it */ + ret = kprobe_event_delete("gen_kretprobe_test"); + free: + kfree(buf); + + goto out; +} + +static int __init kprobe_event_gen_test_init(void) +{ + int ret; + + ret = test_gen_kprobe_cmd(); + if (ret) + return ret; + + ret = test_gen_kretprobe_cmd(); + if (ret) { + WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr, + "kprobes", + "gen_kretprobe_test", false)); + trace_put_event_file(gen_kretprobe_test); + WARN_ON(kprobe_event_delete("gen_kretprobe_test")); + } + + return ret; +} + +static void __exit kprobe_event_gen_test_exit(void) +{ + /* Disable the event or you can't remove it */ + WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr, + "kprobes", + "gen_kprobe_test", false)); + + /* Now give the file and instance back */ + trace_put_event_file(gen_kprobe_test); + + /* Now unregister and free the event */ + WARN_ON(kprobe_event_delete("gen_kprobe_test")); + + /* Disable the event or you can't remove it */ + WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr, + "kprobes", + "gen_kretprobe_test", false)); + + /* Now give the file and instance back */ + trace_put_event_file(gen_kretprobe_test); + + /* Now unregister and free the event */ + WARN_ON(kprobe_event_delete("gen_kretprobe_test")); +} + +module_init(kprobe_event_gen_test_init) +module_exit(kprobe_event_gen_test_exit) + +MODULE_AUTHOR("Tom Zanussi"); +MODULE_DESCRIPTION("kprobe event generation test"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 61778cd70c1dcef082e5ee6781c7c0c4d7d5b576 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 29 Jan 2020 16:19:10 -0500 Subject: tracing: Move all function tracing configs together The features that depend on the function tracer were spread out through the tracing menu, pull them together as it is easier to manage. Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/Kconfig | 142 +++++++++++++++++++++++++-------------------------- 1 file changed, 71 insertions(+), 71 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 4484e783f68d..32fcbc00753b 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -172,6 +172,77 @@ config FUNCTION_GRAPH_TRACER the return value. This is done by setting the current return address on the current task structure into a stack of calls. +config DYNAMIC_FTRACE + bool "enable/disable function tracing dynamically" + depends on FUNCTION_TRACER + depends on HAVE_DYNAMIC_FTRACE + default y + help + This option will modify all the calls to function tracing + dynamically (will patch them out of the binary image and + replace them with a No-Op instruction) on boot up. During + compile time, a table is made of all the locations that ftrace + can function trace, and this table is linked into the kernel + image. When this is enabled, functions can be individually + enabled, and the functions not enabled will not affect + performance of the system. + + See the files in /sys/kernel/debug/tracing: + available_filter_functions + set_ftrace_filter + set_ftrace_notrace + + This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but + otherwise has native performance as long as no tracing is active. + +config DYNAMIC_FTRACE_WITH_REGS + def_bool y + depends on DYNAMIC_FTRACE + depends on HAVE_DYNAMIC_FTRACE_WITH_REGS + +config DYNAMIC_FTRACE_WITH_DIRECT_CALLS + def_bool y + depends on DYNAMIC_FTRACE + depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + +config FUNCTION_PROFILER + bool "Kernel function profiler" + depends on FUNCTION_TRACER + default n + help + This option enables the kernel function profiler. A file is created + in debugfs called function_profile_enabled which defaults to zero. + When a 1 is echoed into this file profiling begins, and when a + zero is entered, profiling stops. A "functions" file is created in + the trace_stat directory; this file shows the list of functions that + have been hit and their counters. + + If in doubt, say N. + +config STACK_TRACER + bool "Trace max stack" + depends on HAVE_FUNCTION_TRACER + select FUNCTION_TRACER + select STACKTRACE + select KALLSYMS + help + This special tracer records the maximum stack footprint of the + kernel and displays it in /sys/kernel/debug/tracing/stack_trace. + + This tracer works by hooking into every function call that the + kernel executes, and keeping a maximum stack depth value and + stack-trace saved. If this is configured with DYNAMIC_FTRACE + then it will not have any overhead while the stack tracer + is disabled. + + To enable the stack tracer on bootup, pass in 'stacktrace' + on the kernel command line. + + The stack tracer can also be enabled or disabled via the + sysctl kernel.stack_tracer_enabled + + Say N if unsure. + config TRACE_PREEMPT_TOGGLE bool help @@ -410,30 +481,6 @@ config BRANCH_TRACER Say N if unsure. -config STACK_TRACER - bool "Trace max stack" - depends on HAVE_FUNCTION_TRACER - select FUNCTION_TRACER - select STACKTRACE - select KALLSYMS - help - This special tracer records the maximum stack footprint of the - kernel and displays it in /sys/kernel/debug/tracing/stack_trace. - - This tracer works by hooking into every function call that the - kernel executes, and keeping a maximum stack depth value and - stack-trace saved. If this is configured with DYNAMIC_FTRACE - then it will not have any overhead while the stack tracer - is disabled. - - To enable the stack tracer on bootup, pass in 'stacktrace' - on the kernel command line. - - The stack tracer can also be enabled or disabled via the - sysctl kernel.stack_tracer_enabled - - Say N if unsure. - config BLK_DEV_IO_TRACE bool "Support for tracing block IO actions" depends on SYSFS @@ -531,53 +578,6 @@ config DYNAMIC_EVENTS config PROBE_EVENTS def_bool n -config DYNAMIC_FTRACE - bool "enable/disable function tracing dynamically" - depends on FUNCTION_TRACER - depends on HAVE_DYNAMIC_FTRACE - default y - help - This option will modify all the calls to function tracing - dynamically (will patch them out of the binary image and - replace them with a No-Op instruction) on boot up. During - compile time, a table is made of all the locations that ftrace - can function trace, and this table is linked into the kernel - image. When this is enabled, functions can be individually - enabled, and the functions not enabled will not affect - performance of the system. - - See the files in /sys/kernel/debug/tracing: - available_filter_functions - set_ftrace_filter - set_ftrace_notrace - - This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but - otherwise has native performance as long as no tracing is active. - -config DYNAMIC_FTRACE_WITH_REGS - def_bool y - depends on DYNAMIC_FTRACE - depends on HAVE_DYNAMIC_FTRACE_WITH_REGS - -config DYNAMIC_FTRACE_WITH_DIRECT_CALLS - def_bool y - depends on DYNAMIC_FTRACE - depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS - -config FUNCTION_PROFILER - bool "Kernel function profiler" - depends on FUNCTION_TRACER - default n - help - This option enables the kernel function profiler. A file is created - in debugfs called function_profile_enabled which defaults to zero. - When a 1 is echoed into this file profiling begins, and when a - zero is entered, profiling stops. A "functions" file is created in - the trace_stat directory; this file shows the list of functions that - have been hit and their counters. - - If in doubt, say N. - config BPF_KPROBE_OVERRIDE bool "Enable BPF programs to override a kprobed function" depends on BPF_EVENTS -- cgit v1.2.3 From a48fc4f5f1d24c70906359731d7080f9ad78a1cb Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 29 Jan 2020 16:23:04 -0500 Subject: tracing: Move tracing test module configs together The MMIO test module was by itself, move it to the other test modules. Also, add the text "Test module" to PREEMPTIRQ_DELAY_TEST as that create a test module as well. Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/Kconfig | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 32fcbc00753b..47d0149347a9 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -680,16 +680,6 @@ config TRACE_EVENT_INJECT If unsure, say N. -config MMIOTRACE_TEST - tristate "Test module for mmiotrace" - depends on MMIOTRACE && m - help - This is a dumb module for testing mmiotrace. It is very dangerous - as it will write garbage to IO memory starting at a given address. - However, it should be safe to use on e.g. unused portion of VRAM. - - Say N, unless you absolutely know what you are doing. - config TRACEPOINT_BENCHMARK bool "Add tracepoint that benchmarks tracepoints" help @@ -759,8 +749,18 @@ config RING_BUFFER_STARTUP_TEST If unsure, say N +config MMIOTRACE_TEST + tristate "Test module for mmiotrace" + depends on MMIOTRACE && m + help + This is a dumb module for testing mmiotrace. It is very dangerous + as it will write garbage to IO memory starting at a given address. + However, it should be safe to use on e.g. unused portion of VRAM. + + Say N, unless you absolutely know what you are doing. + config PREEMPTIRQ_DELAY_TEST - tristate "Preempt / IRQ disable delay thread to test latency tracers" + tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers" depends on m help Select this option to build a test module that can help test latency -- cgit v1.2.3 From 21b3ce3063be8eb7150a717d7c5286fb56ba8cea Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 29 Jan 2020 16:26:45 -0500 Subject: tracing: Move mmio tracer config up with the other tracers Move the config that enables the mmiotracer with the other tracers such that all the tracers are together. Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/Kconfig | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 47d0149347a9..2014056682f5 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -353,6 +353,19 @@ config HWLAT_TRACER file. Every time a latency is greater than tracing_thresh, it will be recorded into the ring buffer. +config MMIOTRACE + bool "Memory mapped IO tracing" + depends on HAVE_MMIOTRACE_SUPPORT && PCI + select GENERIC_TRACER + help + Mmiotrace traces Memory Mapped I/O access and is meant for + debugging and reverse engineering. It is called from the ioremap + implementation and works via page faults. Tracing is disabled by + default and can be enabled at run-time. + + See Documentation/trace/mmiotrace.rst. + If you are not helping to develop drivers, say N. + config ENABLE_DEFAULT_TRACERS bool "Trace process context switches and events" depends on !GENERIC_TRACER @@ -627,19 +640,6 @@ config EVENT_TRACE_TEST_SYSCALLS TBD - enable a way to actually call the syscalls as we test their events -config MMIOTRACE - bool "Memory mapped IO tracing" - depends on HAVE_MMIOTRACE_SUPPORT && PCI - select GENERIC_TRACER - help - Mmiotrace traces Memory Mapped I/O access and is meant for - debugging and reverse engineering. It is called from the ioremap - implementation and works via page faults. Tracing is disabled by - default and can be enabled at run-time. - - See Documentation/trace/mmiotrace.rst. - If you are not helping to develop drivers, say N. - config TRACING_MAP bool depends on ARCH_HAVE_NMI_SAFE_CMPXCHG -- cgit v1.2.3 From 1e837945a8854227f3f4e4d2d7abff64ed320830 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 29 Jan 2020 16:30:30 -0500 Subject: tracing: Move tracing selftests to bottom of menu Move all the tracing selftest configs to the bottom of the tracing menu. There's no reason for them to be interspersed throughout. Also, move the bootconfig menu to the top. Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/Kconfig | 168 +++++++++++++++++++++++++-------------------------- 1 file changed, 84 insertions(+), 84 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 2014056682f5..91e885194dbc 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -141,6 +141,15 @@ menuconfig FTRACE if FTRACE +config BOOTTIME_TRACING + bool "Boot-time Tracing support" + depends on BOOT_CONFIG && TRACING + default y + help + Enable developer to setup ftrace subsystem via supplemental + kernel cmdline at boot time for debugging (tracing) driver + initialization and boot process. + config FUNCTION_TRACER bool "Kernel Function Tracer" depends on HAVE_FUNCTION_TRACER @@ -605,41 +614,6 @@ config FTRACE_MCOUNT_RECORD depends on DYNAMIC_FTRACE depends on HAVE_FTRACE_MCOUNT_RECORD -config FTRACE_SELFTEST - bool - -config FTRACE_STARTUP_TEST - bool "Perform a startup test on ftrace" - depends on GENERIC_TRACER - select FTRACE_SELFTEST - help - This option performs a series of startup tests on ftrace. On bootup - a series of tests are made to verify that the tracer is - functioning properly. It will do tests on all the configured - tracers of ftrace. - -config EVENT_TRACE_STARTUP_TEST - bool "Run selftest on trace events" - depends on FTRACE_STARTUP_TEST - default y - help - This option performs a test on all trace events in the system. - It basically just enables each event and runs some code that - will trigger events (not necessarily the event it enables) - This may take some time run as there are a lot of events. - -config EVENT_TRACE_TEST_SYSCALLS - bool "Run selftest on syscall events" - depends on EVENT_TRACE_STARTUP_TEST - help - This option will also enable testing every syscall event. - It only enables the event and disables it and runs various loads - with the event enabled. This adds a bit more time for kernel boot - up since it runs this on every system call defined. - - TBD - enable a way to actually call the syscalls as we test their - events - config TRACING_MAP bool depends on ARCH_HAVE_NMI_SAFE_CMPXCHG @@ -726,6 +700,81 @@ config RING_BUFFER_BENCHMARK If unsure, say N. +config TRACE_EVAL_MAP_FILE + bool "Show eval mappings for trace events" + depends on TRACING + help + The "print fmt" of the trace events will show the enum/sizeof names + instead of their values. This can cause problems for user space tools + that use this string to parse the raw data as user space does not know + how to convert the string to its value. + + To fix this, there's a special macro in the kernel that can be used + to convert an enum/sizeof into its value. If this macro is used, then + the print fmt strings will be converted to their values. + + If something does not get converted properly, this option can be + used to show what enums/sizeof the kernel tried to convert. + + This option is for debugging the conversions. A file is created + in the tracing directory called "eval_map" that will show the + names matched with their values and what trace event system they + belong too. + + Normally, the mapping of the strings to values will be freed after + boot up or module load. With this option, they will not be freed, as + they are needed for the "eval_map" file. Enabling this option will + increase the memory footprint of the running kernel. + + If unsure, say N. + +config GCOV_PROFILE_FTRACE + bool "Enable GCOV profiling on ftrace subsystem" + depends on GCOV_KERNEL + help + Enable GCOV profiling on ftrace subsystem for checking + which functions/lines are tested. + + If unsure, say N. + + Note that on a kernel compiled with this config, ftrace will + run significantly slower. + +config FTRACE_SELFTEST + bool + +config FTRACE_STARTUP_TEST + bool "Perform a startup test on ftrace" + depends on GENERIC_TRACER + select FTRACE_SELFTEST + help + This option performs a series of startup tests on ftrace. On bootup + a series of tests are made to verify that the tracer is + functioning properly. It will do tests on all the configured + tracers of ftrace. + +config EVENT_TRACE_STARTUP_TEST + bool "Run selftest on trace events" + depends on FTRACE_STARTUP_TEST + default y + help + This option performs a test on all trace events in the system. + It basically just enables each event and runs some code that + will trigger events (not necessarily the event it enables) + This may take some time run as there are a lot of events. + +config EVENT_TRACE_TEST_SYSCALLS + bool "Run selftest on syscall events" + depends on EVENT_TRACE_STARTUP_TEST + help + This option will also enable testing every syscall event. + It only enables the event and disables it and runs various loads + with the event enabled. This adds a bit more time for kernel boot + up since it runs this on every system call defined. + + TBD - enable a way to actually call the syscalls as we test their + events + config RING_BUFFER_STARTUP_TEST bool "Ring buffer startup self test" depends on RING_BUFFER @@ -799,55 +848,6 @@ config KPROBE_EVENT_GEN_TEST If unsure, say N. -config TRACE_EVAL_MAP_FILE - bool "Show eval mappings for trace events" - depends on TRACING - help - The "print fmt" of the trace events will show the enum/sizeof names - instead of their values. This can cause problems for user space tools - that use this string to parse the raw data as user space does not know - how to convert the string to its value. - - To fix this, there's a special macro in the kernel that can be used - to convert an enum/sizeof into its value. If this macro is used, then - the print fmt strings will be converted to their values. - - If something does not get converted properly, this option can be - used to show what enums/sizeof the kernel tried to convert. - - This option is for debugging the conversions. A file is created - in the tracing directory called "eval_map" that will show the - names matched with their values and what trace event system they - belong too. - - Normally, the mapping of the strings to values will be freed after - boot up or module load. With this option, they will not be freed, as - they are needed for the "eval_map" file. Enabling this option will - increase the memory footprint of the running kernel. - - If unsure, say N. - -config GCOV_PROFILE_FTRACE - bool "Enable GCOV profiling on ftrace subsystem" - depends on GCOV_KERNEL - help - Enable GCOV profiling on ftrace subsystem for checking - which functions/lines are tested. - - If unsure, say N. - - Note that on a kernel compiled with this config, ftrace will - run significantly slower. - -config BOOTTIME_TRACING - bool "Boot-time Tracing support" - depends on BOOT_CONFIG && TRACING - default y - help - Enable developer to setup ftrace subsystem via supplemental - kernel cmdline at boot time for debugging (tracing) driver - initialization and boot process. - endif # FTRACE endif # TRACING_SUPPORT -- cgit v1.2.3 From fdeb1aca2861472b38779be44141757483300827 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 29 Jan 2020 12:59:26 -0600 Subject: tracing: Change trace_boot to use synth_event interface Have trace_boot_add_synth_event() use the synth_event interface. Also, rename synth_event_run_cmd() to synth_event_run_command() now that trace_boot's version is gone. Link: http://lkml.kernel.org/r/94f1fa0e31846d0bddca916b8663404b20559e34.1580323897.git.zanussi@kernel.org Acked-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 31 ++++++++++++------------------- kernel/trace/trace_events_hist.c | 9 ++------- 2 files changed, 14 insertions(+), 26 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 2298a70cdda6..06d7feb5255f 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -125,38 +125,31 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) #endif #ifdef CONFIG_HIST_TRIGGERS -extern int synth_event_run_command(const char *command); - static int __init trace_boot_add_synth_event(struct xbc_node *node, const char *event) { + struct dynevent_cmd cmd; struct xbc_node *anode; - char buf[MAX_BUF_LEN], *q; + char buf[MAX_BUF_LEN]; const char *p; - int len, delta, ret; + int ret; - len = ARRAY_SIZE(buf); - delta = snprintf(buf, len, "%s", event); - if (delta >= len) { - pr_err("Event name is too long: %s\n", event); - return -E2BIG; - } - len -= delta; q = buf + delta; + synth_event_cmd_init(&cmd, buf, MAX_BUF_LEN); + + ret = synth_event_gen_cmd_start(&cmd, event, NULL); + if (ret) + return ret; xbc_node_for_each_array_value(node, "fields", anode, p) { - delta = snprintf(q, len, " %s;", p); - if (delta >= len) { - pr_err("fields string is too long: %s\n", p); - return -E2BIG; - } - len -= delta; q += delta; + ret = synth_event_add_field_str(&cmd, p); + if (ret) + return ret; } - ret = synth_event_run_command(buf); + ret = synth_event_gen_cmd_end(&cmd); if (ret < 0) pr_err("Failed to add synthetic event: %s\n", buf); - return ret; } #else diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 4d56a4f0310d..2e88c9805f4b 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1755,12 +1755,7 @@ static int create_or_delete_synth_event(int argc, char **argv) return ret == -ECANCELED ? -EINVAL : ret; } -int synth_event_run_command(const char *command) -{ - return trace_run_command(command, create_or_delete_synth_event); -} - -static int synth_event_run_cmd(struct dynevent_cmd *cmd) +static int synth_event_run_command(struct dynevent_cmd *cmd) { struct synth_event *se; int ret; @@ -1790,7 +1785,7 @@ static int synth_event_run_cmd(struct dynevent_cmd *cmd) void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen) { dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH, - synth_event_run_cmd); + synth_event_run_command); } EXPORT_SYMBOL_GPL(synth_event_cmd_init); -- cgit v1.2.3 From d380dcde9a07ca5de4805dee11f58a98ec0ad6ff Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 29 Jan 2020 21:18:18 -0500 Subject: tracing: Fix now invalid var_ref_vals assumption in trace action The patch 'tracing: Fix histogram code when expression has same var as value' added code to return an existing variable reference when creating a new variable reference, which resulted in var_ref_vals slots being reused instead of being duplicated. The implementation of the trace action assumes that the end of the var_ref_vals array starting at action_data.var_ref_idx corresponds to the values that will be assigned to the trace params. The patch mentioned above invalidates that assumption, which means that each param needs to explicitly specify its index into var_ref_vals. This fix changes action_data.var_ref_idx to an array of var ref indexes to account for that. Link: https://lore.kernel.org/r/1580335695.6220.8.camel@kernel.org Fixes: 8bcebc77e85f ("tracing: Fix histogram code when expression has same var as value") Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 53 ++++++++++++++++++++++++++++------------ 1 file changed, 38 insertions(+), 15 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 2e88c9805f4b..5b4e04780411 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -476,11 +476,12 @@ struct action_data { * When a histogram trigger is hit, the values of any * references to variables, including variables being passed * as parameters to synthetic events, are collected into a - * var_ref_vals array. This var_ref_idx is the index of the - * first param in the array to be passed to the synthetic - * event invocation. + * var_ref_vals array. This var_ref_idx array is an array of + * indices into the var_ref_vals array, one for each synthetic + * event param, and is passed to the synthetic event + * invocation. */ - unsigned int var_ref_idx; + unsigned int var_ref_idx[TRACING_MAP_VARS_MAX]; struct synth_event *synth_event; bool use_trace_keyword; char *synth_event_name; @@ -884,14 +885,14 @@ static struct trace_event_functions synth_event_funcs = { static notrace void trace_event_raw_event_synth(void *__data, u64 *var_ref_vals, - unsigned int var_ref_idx) + unsigned int *var_ref_idx) { struct trace_event_file *trace_file = __data; struct synth_trace_event *entry; struct trace_event_buffer fbuffer; struct trace_buffer *buffer; struct synth_event *event; - unsigned int i, n_u64; + unsigned int i, n_u64, val_idx; int fields_size = 0; event = trace_file->event_call->data; @@ -914,15 +915,16 @@ static notrace void trace_event_raw_event_synth(void *__data, goto out; for (i = 0, n_u64 = 0; i < event->n_fields; i++) { + val_idx = var_ref_idx[i]; if (event->fields[i]->is_string) { - char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i]; + char *str_val = (char *)(long)var_ref_vals[val_idx]; char *str_field = (char *)&entry->fields[n_u64]; strscpy(str_field, str_val, STR_VAR_LEN_MAX); n_u64 += STR_VAR_LEN_MAX / sizeof(u64); } else { struct synth_field *field = event->fields[i]; - u64 val = var_ref_vals[var_ref_idx + i]; + u64 val = var_ref_vals[val_idx]; switch (field->size) { case 1: @@ -1122,10 +1124,10 @@ static struct tracepoint *alloc_synth_tracepoint(char *name) } typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals, - unsigned int var_ref_idx); + unsigned int *var_ref_idx); static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, - unsigned int var_ref_idx) + unsigned int *var_ref_idx) { struct tracepoint *tp = event->tp; @@ -3506,6 +3508,22 @@ static int init_var_ref(struct hist_field *ref_field, goto out; } +static int find_var_ref_idx(struct hist_trigger_data *hist_data, + struct hist_field *var_field) +{ + struct hist_field *ref_field; + int i; + + for (i = 0; i < hist_data->n_var_refs; i++) { + ref_field = hist_data->var_refs[i]; + if (ref_field->var.idx == var_field->var.idx && + ref_field->var.hist_data == var_field->hist_data) + return i; + } + + return -ENOENT; +} + /** * create_var_ref - Create a variable reference and attach it to trigger * @hist_data: The trigger that will be referencing the variable @@ -5071,11 +5089,11 @@ static int trace_action_create(struct hist_trigger_data *hist_data, struct trace_array *tr = hist_data->event_file->tr; char *event_name, *param, *system = NULL; struct hist_field *hist_field, *var_ref; - unsigned int i, var_ref_idx; + unsigned int i; unsigned int field_pos = 0; struct synth_event *event; char *synth_event_name; - int ret = 0; + int var_ref_idx, ret = 0; lockdep_assert_held(&event_mutex); @@ -5092,8 +5110,6 @@ static int trace_action_create(struct hist_trigger_data *hist_data, event->ref++; - var_ref_idx = hist_data->n_var_refs; - for (i = 0; i < data->n_params; i++) { char *p; @@ -5142,6 +5158,14 @@ static int trace_action_create(struct hist_trigger_data *hist_data, goto err; } + var_ref_idx = find_var_ref_idx(hist_data, var_ref); + if (WARN_ON(var_ref_idx < 0)) { + ret = var_ref_idx; + goto err; + } + + data->var_ref_idx[i] = var_ref_idx; + field_pos++; kfree(p); continue; @@ -5160,7 +5184,6 @@ static int trace_action_create(struct hist_trigger_data *hist_data, } data->synth_event = event; - data->var_ref_idx = var_ref_idx; out: return ret; err: -- cgit v1.2.3 From 249d7b2ef674cdae28c377cfe6f56696548305d5 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 31 Jan 2020 15:55:31 -0600 Subject: tracing: Consolidate some synth_event_trace code The synth_event trace code contains some almost identical functions and some small functions that are called only once - consolidate the common code into single functions and fold in the small functions to simplify the code overall. Link: http://lkml.kernel.org/r/d1c8d8ad124a653b7543afe801d38c199ca5c20e.1580506712.git.zanussi@kernel.org Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 141 ++++++++++++++++----------------------- 1 file changed, 57 insertions(+), 84 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 5b4e04780411..42058a1b5146 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -2053,24 +2053,72 @@ out: } EXPORT_SYMBOL_GPL(synth_event_trace_start); -static int save_synth_val(struct synth_field *field, u64 val, - struct synth_event_trace_state *trace_state) +static int __synth_event_add_val(const char *field_name, u64 val, + struct synth_event_trace_state *trace_state) { - struct synth_trace_event *entry = trace_state->entry; + struct synth_field *field = NULL; + struct synth_trace_event *entry; + struct synth_event *event; + int i, ret = 0; + + if (!trace_state) { + ret = -EINVAL; + goto out; + } + + /* can't mix add_next_synth_val() with add_synth_val() */ + if (field_name) { + if (trace_state->add_next) { + ret = -EINVAL; + goto out; + } + trace_state->add_name = true; + } else { + if (trace_state->add_name) { + ret = -EINVAL; + goto out; + } + trace_state->add_next = true; + } + + if (!trace_state->enabled) + goto out; + + event = trace_state->event; + if (trace_state->add_name) { + for (i = 0; i < event->n_fields; i++) { + field = event->fields[i]; + if (strcmp(field->name, field_name) == 0) + break; + } + if (!field) { + ret = -EINVAL; + goto out; + } + } else { + if (trace_state->cur_field >= event->n_fields) { + ret = -EINVAL; + goto out; + } + field = event->fields[trace_state->cur_field++]; + } + entry = trace_state->entry; if (field->is_string) { char *str_val = (char *)(long)val; char *str_field; - if (!str_val) - return -EINVAL; + if (!str_val) { + ret = -EINVAL; + goto out; + } str_field = (char *)&entry->fields[field->offset]; strscpy(str_field, str_val, STR_VAR_LEN_MAX); } else entry->fields[field->offset] = val; - - return 0; + out: + return ret; } /** @@ -2104,54 +2152,10 @@ static int save_synth_val(struct synth_field *field, u64 val, int synth_event_add_next_val(u64 val, struct synth_event_trace_state *trace_state) { - struct synth_field *field; - struct synth_event *event; - int ret = 0; - - if (!trace_state) { - ret = -EINVAL; - goto out; - } - - /* can't mix add_next_synth_val() with add_synth_val() */ - if (trace_state->add_name) { - ret = -EINVAL; - goto out; - } - trace_state->add_next = true; - - if (!trace_state->enabled) - goto out; - - event = trace_state->event; - - if (trace_state->cur_field >= event->n_fields) { - ret = -EINVAL; - goto out; - } - - field = event->fields[trace_state->cur_field++]; - ret = save_synth_val(field, val, trace_state); - out: - return ret; + return __synth_event_add_val(NULL, val, trace_state); } EXPORT_SYMBOL_GPL(synth_event_add_next_val); -static struct synth_field *find_synth_field(struct synth_event *event, - const char *field_name) -{ - struct synth_field *field = NULL; - unsigned int i; - - for (i = 0; i < event->n_fields; i++) { - field = event->fields[i]; - if (strcmp(field->name, field_name) == 0) - return field; - } - - return NULL; -} - /** * synth_event_add_val - Add a named field's value to an open synth trace * @field_name: The name of the synthetic event field value to set @@ -2183,38 +2187,7 @@ static struct synth_field *find_synth_field(struct synth_event *event, int synth_event_add_val(const char *field_name, u64 val, struct synth_event_trace_state *trace_state) { - struct synth_trace_event *entry; - struct synth_event *event; - struct synth_field *field; - int ret = 0; - - if (!trace_state) { - ret = -EINVAL; - goto out; - } - - /* can't mix add_next_synth_val() with add_synth_val() */ - if (trace_state->add_next) { - ret = -EINVAL; - goto out; - } - trace_state->add_name = true; - - if (!trace_state->enabled) - goto out; - - event = trace_state->event; - entry = trace_state->entry; - - field = find_synth_field(event, field_name); - if (!field) { - ret = -EINVAL; - goto out; - } - - ret = save_synth_val(field, val, trace_state); - out: - return ret; + return __synth_event_add_val(field_name, val, trace_state); } EXPORT_SYMBOL_GPL(synth_event_add_val); -- cgit v1.2.3 From 74403b6c50dd7a633d3f22f59f975d6081eae093 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 31 Jan 2020 15:55:32 -0600 Subject: tracing: Remove check_arg() callbacks from dynevent args It's kind of strange to have check_arg() callbacks as part of the arg objects themselves; it makes more sense to just pass these in when the args are added instead. Remove the check_arg() callbacks from those objects which also means removing the check_arg() args from the init functions, adding them to the add functions and fixing up existing callers. Link: http://lkml.kernel.org/r/c7708d6f177fcbe1a36b6e4e8e150907df0fa5d2.1580506712.git.zanussi@kernel.org Reviewed-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_dynevent.c | 62 ++++++++++++++++++---------------------- kernel/trace/trace_dynevent.h | 11 ++++--- kernel/trace/trace_events_hist.c | 16 +++++------ kernel/trace/trace_kprobe.c | 10 +++---- 4 files changed, 46 insertions(+), 53 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index 6ffdbc4fda53..f9cfcdc9d1f3 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -228,27 +228,30 @@ fs_initcall(init_dynamic_event); * dynevent_arg_add - Add an arg to a dynevent_cmd * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd * @arg: The argument to append to the current cmd + * @check_arg: An (optional) pointer to a function checking arg sanity * * Append an argument to a dynevent_cmd. The argument string will be * appended to the current cmd string, followed by a separator, if - * applicable. Before the argument is added, the check_arg() - * function, if defined, is called. + * applicable. Before the argument is added, the @check_arg function, + * if present, will be used to check the sanity of the current arg + * string. * - * The cmd string, separator, and check_arg() function should be set - * using the dynevent_arg_init() before any arguments are added using - * this function. + * The cmd string and separator should be set using the + * dynevent_arg_init() before any arguments are added using this + * function. * * Return: 0 if successful, error otherwise. */ int dynevent_arg_add(struct dynevent_cmd *cmd, - struct dynevent_arg *arg) + struct dynevent_arg *arg, + dynevent_check_arg_fn_t check_arg) { int ret = 0; int delta; char *q; - if (arg->check_arg) { - ret = arg->check_arg(arg); + if (check_arg) { + ret = check_arg(arg); if (ret) return ret; } @@ -269,6 +272,7 @@ int dynevent_arg_add(struct dynevent_cmd *cmd, * dynevent_arg_pair_add - Add an arg pair to a dynevent_cmd * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd * @arg_pair: The argument pair to append to the current cmd + * @check_arg: An (optional) pointer to a function checking arg sanity * * Append an argument pair to a dynevent_cmd. An argument pair * consists of a left-hand-side argument and a right-hand-side @@ -278,24 +282,26 @@ int dynevent_arg_add(struct dynevent_cmd *cmd, * * The lhs argument string will be appended to the current cmd string, * followed by an operator, if applicable, followd by the rhs string, - * followed finally by a separator, if applicable. Before anything is - * added, the check_arg() function, if defined, is called. + * followed finally by a separator, if applicable. Before the + * argument is added, the @check_arg function, if present, will be + * used to check the sanity of the current arg strings. * - * The cmd strings, operator, separator, and check_arg() function - * should be set using the dynevent_arg_pair_init() before any arguments - * are added using this function. + * The cmd strings, operator, and separator should be set using the + * dynevent_arg_pair_init() before any arguments are added using this + * function. * * Return: 0 if successful, error otherwise. */ int dynevent_arg_pair_add(struct dynevent_cmd *cmd, - struct dynevent_arg_pair *arg_pair) + struct dynevent_arg_pair *arg_pair, + dynevent_check_arg_fn_t check_arg) { int ret = 0; int delta; char *q; - if (arg_pair->check_arg) { - ret = arg_pair->check_arg(arg_pair); + if (check_arg) { + ret = check_arg(arg_pair); if (ret) return ret; } @@ -385,20 +391,16 @@ void dynevent_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen, /** * dynevent_arg_init - Initialize a dynevent_arg object * @arg: A pointer to the dynevent_arg struct representing the arg - * @check_arg: An (optional) pointer to a function checking arg sanity * @separator: An (optional) separator, appended after adding the arg * * Initialize a dynevent_arg object. A dynevent_arg represents an * object used to append single arguments to the current command - * string. The @check_arg function, if present, will be used to check - * the sanity of the current arg string (which is directly set by the - * caller). After the arg string is successfully appended to the + * string. After the arg string is successfully appended to the * command string, the optional @separator is appended. If no * separator was specified when initializing the arg, a space will be * appended. */ void dynevent_arg_init(struct dynevent_arg *arg, - dynevent_check_arg_fn_t check_arg, char separator) { memset(arg, '\0', sizeof(*arg)); @@ -406,14 +408,11 @@ void dynevent_arg_init(struct dynevent_arg *arg, if (!separator) separator = ' '; arg->separator = separator; - - arg->check_arg = check_arg; } /** * dynevent_arg_pair_init - Initialize a dynevent_arg_pair object * @arg_pair: A pointer to the dynevent_arg_pair struct representing the arg - * @check_arg: An (optional) pointer to a function checking arg sanity * @operator: An (optional) operator, appended after adding the first arg * @separator: An (optional) separator, appended after adding the second arg * @@ -422,16 +421,13 @@ void dynevent_arg_init(struct dynevent_arg *arg, * variable_name;' or 'x+y' to the current command string. An * argument pair consists of a left-hand-side argument and a * right-hand-side argument separated by an operator, which can be - * whitespace, all followed by a separator, if applicable. The - * @check_arg function, if present, will be used to check the sanity - * of the current arg strings (which is directly set by the caller). - * After the first arg string is successfully appended to the command - * string, the optional @operator is appended, followed by the second - * arg and and optional @separator. If no separator was specified - * when initializing the arg, a space will be appended. + * whitespace, all followed by a separator, if applicable. After the + * first arg string is successfully appended to the command string, + * the optional @operator is appended, followed by the second arg and + * and optional @separator. If no separator was specified when + * initializing the arg, a space will be appended. */ void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair, - dynevent_check_arg_fn_t check_arg, char operator, char separator) { memset(arg_pair, '\0', sizeof(*arg_pair)); @@ -443,8 +439,6 @@ void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair, if (!separator) separator = ' '; arg_pair->separator = separator; - - arg_pair->check_arg = check_arg; } /** diff --git a/kernel/trace/trace_dynevent.h b/kernel/trace/trace_dynevent.h index b593fc34c5b1..d6857a254ede 100644 --- a/kernel/trace/trace_dynevent.h +++ b/kernel/trace/trace_dynevent.h @@ -126,28 +126,27 @@ typedef int (*dynevent_check_arg_fn_t)(void *data); struct dynevent_arg { const char *str; char separator; /* e.g. ';', ',', or nothing */ - dynevent_check_arg_fn_t check_arg; }; extern void dynevent_arg_init(struct dynevent_arg *arg, - dynevent_check_arg_fn_t check_arg, char separator); extern int dynevent_arg_add(struct dynevent_cmd *cmd, - struct dynevent_arg *arg); + struct dynevent_arg *arg, + dynevent_check_arg_fn_t check_arg); struct dynevent_arg_pair { const char *lhs; const char *rhs; char operator; /* e.g. '=' or nothing */ char separator; /* e.g. ';', ',', or nothing */ - dynevent_check_arg_fn_t check_arg; }; extern void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair, - dynevent_check_arg_fn_t check_arg, char operator, char separator); + extern int dynevent_arg_pair_add(struct dynevent_cmd *cmd, - struct dynevent_arg_pair *arg_pair); + struct dynevent_arg_pair *arg_pair, + dynevent_check_arg_fn_t check_arg); extern int dynevent_str_add(struct dynevent_cmd *cmd, const char *str); #endif diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 42058a1b5146..d2817fe52f32 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1334,12 +1334,12 @@ int synth_event_add_field(struct dynevent_cmd *cmd, const char *type, if (!type || !name) return -EINVAL; - dynevent_arg_pair_init(&arg_pair, synth_event_check_arg_fn, 0, ';'); + dynevent_arg_pair_init(&arg_pair, 0, ';'); arg_pair.lhs = type; arg_pair.rhs = name; - ret = dynevent_arg_pair_add(cmd, &arg_pair); + ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn); if (ret) return ret; @@ -1377,11 +1377,11 @@ int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name) if (!type_name) return -EINVAL; - dynevent_arg_init(&arg, NULL, ';'); + dynevent_arg_init(&arg, ';'); arg.str = type_name; - ret = dynevent_arg_add(cmd, &arg); + ret = dynevent_arg_add(cmd, &arg, NULL); if (ret) return ret; @@ -1472,9 +1472,9 @@ int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name, if (cmd->type != DYNEVENT_TYPE_SYNTH) return -EINVAL; - dynevent_arg_init(&arg, NULL, 0); + dynevent_arg_init(&arg, 0); arg.str = name; - ret = dynevent_arg_add(cmd, &arg); + ret = dynevent_arg_add(cmd, &arg, NULL); if (ret) return ret; @@ -1546,9 +1546,9 @@ int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name, if (n_fields > SYNTH_FIELDS_MAX) return -EINVAL; - dynevent_arg_init(&arg, NULL, 0); + dynevent_arg_init(&arg, 0); arg.str = name; - ret = dynevent_arg_add(cmd, &arg); + ret = dynevent_arg_add(cmd, &arg, NULL); if (ret) return ret; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 307abb724a71..fe183d4045d2 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -962,9 +962,9 @@ int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe, if (ret) return ret; - dynevent_arg_init(&arg, NULL, 0); + dynevent_arg_init(&arg, 0); arg.str = loc; - ret = dynevent_arg_add(cmd, &arg); + ret = dynevent_arg_add(cmd, &arg, NULL); if (ret) return ret; @@ -982,7 +982,7 @@ int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe, } arg.str = field; - ret = dynevent_arg_add(cmd, &arg); + ret = dynevent_arg_add(cmd, &arg, NULL); if (ret) break; } @@ -1017,7 +1017,7 @@ int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...) if (cmd->type != DYNEVENT_TYPE_KPROBE) return -EINVAL; - dynevent_arg_init(&arg, NULL, 0); + dynevent_arg_init(&arg, 0); va_start(args, cmd); for (;;) { @@ -1033,7 +1033,7 @@ int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...) } arg.str = field; - ret = dynevent_arg_add(cmd, &arg); + ret = dynevent_arg_add(cmd, &arg, NULL); if (ret) break; } -- cgit v1.2.3 From e9260f6257efa6a6293507696e875b6494ee3744 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 31 Jan 2020 15:55:33 -0600 Subject: tracing: Remove useless code in dynevent_arg_pair_add() The final addition to q is unnecessary, since q isn't ever used afterwards. Link: http://lkml.kernel.org/r/7880a1268217886cdba7035526650195668da856.1580506712.git.zanussi@kernel.org Reviewed-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_dynevent.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index f9cfcdc9d1f3..204275ec8d71 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -322,7 +322,7 @@ int dynevent_arg_pair_add(struct dynevent_cmd *cmd, pr_err("field string is too long: %s\n", arg_pair->rhs); return -E2BIG; } - cmd->remaining -= delta; q += delta; + cmd->remaining -= delta; return ret; } -- cgit v1.2.3 From 2b90927c77c973771cc658d639724d5b247a83eb Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 31 Jan 2020 15:55:34 -0600 Subject: tracing: Use seq_buf for building dynevent_cmd string The dynevent_cmd commands that build up the command string don't need to do that themselves - there's a seq_buf facility that does pretty much the same thing those command are doing manually, so use it instead. Link: http://lkml.kernel.org/r/eb8a6e835c964d0ab8a38cbf5ffa60746b54a465.1580506712.git.zanussi@kernel.org Reviewed-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 4 +--- kernel/trace/trace_dynevent.c | 48 +++++++++++----------------------------- kernel/trace/trace_events_hist.c | 2 +- kernel/trace/trace_kprobe.c | 2 +- 4 files changed, 16 insertions(+), 40 deletions(-) (limited to 'kernel/trace') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 7c307a7c9c6a..67f528ecb9e5 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -367,10 +367,8 @@ struct dynevent_cmd; typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd); struct dynevent_cmd { - char *buf; + struct seq_buf seq; const char *event_name; - int maxlen; - int remaining; unsigned int n_fields; enum dynevent_type type; dynevent_create_fn_t run_command; diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index 204275ec8d71..9f2e8520b748 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -247,8 +247,6 @@ int dynevent_arg_add(struct dynevent_cmd *cmd, dynevent_check_arg_fn_t check_arg) { int ret = 0; - int delta; - char *q; if (check_arg) { ret = check_arg(arg); @@ -256,14 +254,11 @@ int dynevent_arg_add(struct dynevent_cmd *cmd, return ret; } - q = cmd->buf + (cmd->maxlen - cmd->remaining); - - delta = snprintf(q, cmd->remaining, " %s%c", arg->str, arg->separator); - if (delta >= cmd->remaining) { - pr_err("String is too long: %s\n", arg->str); + ret = seq_buf_printf(&cmd->seq, " %s%c", arg->str, arg->separator); + if (ret) { + pr_err("String is too long: %s%c\n", arg->str, arg->separator); return -E2BIG; } - cmd->remaining -= delta; return ret; } @@ -297,8 +292,6 @@ int dynevent_arg_pair_add(struct dynevent_cmd *cmd, dynevent_check_arg_fn_t check_arg) { int ret = 0; - int delta; - char *q; if (check_arg) { ret = check_arg(arg_pair); @@ -306,23 +299,15 @@ int dynevent_arg_pair_add(struct dynevent_cmd *cmd, return ret; } - q = cmd->buf + (cmd->maxlen - cmd->remaining); - - delta = snprintf(q, cmd->remaining, " %s%c", arg_pair->lhs, - arg_pair->operator); - if (delta >= cmd->remaining) { - pr_err("field string is too long: %s\n", arg_pair->lhs); - return -E2BIG; - } - cmd->remaining -= delta; q += delta; - - delta = snprintf(q, cmd->remaining, "%s%c", arg_pair->rhs, - arg_pair->separator); - if (delta >= cmd->remaining) { - pr_err("field string is too long: %s\n", arg_pair->rhs); + ret = seq_buf_printf(&cmd->seq, " %s%c%s%c", arg_pair->lhs, + arg_pair->operator, arg_pair->rhs, + arg_pair->separator); + if (ret) { + pr_err("field string is too long: %s%c%s%c\n", arg_pair->lhs, + arg_pair->operator, arg_pair->rhs, + arg_pair->separator); return -E2BIG; } - cmd->remaining -= delta; return ret; } @@ -340,17 +325,12 @@ int dynevent_arg_pair_add(struct dynevent_cmd *cmd, int dynevent_str_add(struct dynevent_cmd *cmd, const char *str) { int ret = 0; - int delta; - char *q; - - q = cmd->buf + (cmd->maxlen - cmd->remaining); - delta = snprintf(q, cmd->remaining, "%s", str); - if (delta >= cmd->remaining) { + ret = seq_buf_puts(&cmd->seq, str); + if (ret) { pr_err("String is too long: %s\n", str); return -E2BIG; } - cmd->remaining -= delta; return ret; } @@ -381,9 +361,7 @@ void dynevent_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen, { memset(cmd, '\0', sizeof(*cmd)); - cmd->buf = buf; - cmd->maxlen = maxlen; - cmd->remaining = cmd->maxlen; + seq_buf_init(&cmd->seq, buf, maxlen); cmd->type = type; cmd->run_command = run_command; } diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index d2817fe52f32..b3bcfd8c7332 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1762,7 +1762,7 @@ static int synth_event_run_command(struct dynevent_cmd *cmd) struct synth_event *se; int ret; - ret = trace_run_command(cmd->buf, create_or_delete_synth_event); + ret = trace_run_command(cmd->seq.buffer, create_or_delete_synth_event); if (ret) return ret; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index fe183d4045d2..51efc790aea8 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -903,7 +903,7 @@ static int create_or_delete_trace_kprobe(int argc, char **argv) static int trace_kprobe_run_command(struct dynevent_cmd *cmd) { - return trace_run_command(cmd->buf, create_or_delete_trace_kprobe); + return trace_run_command(cmd->seq.buffer, create_or_delete_trace_kprobe); } /** -- cgit v1.2.3 From 24a9729f831462b1d9d61dc85ecc91c59037243f Mon Sep 17 00:00:00 2001 From: Amol Grover Date: Sat, 1 Feb 2020 12:57:04 +0530 Subject: tracing: Annotate ftrace_graph_hash pointer with __rcu Fix following instances of sparse error kernel/trace/ftrace.c:5664:29: error: incompatible types in comparison kernel/trace/ftrace.c:5785:21: error: incompatible types in comparison kernel/trace/ftrace.c:5864:36: error: incompatible types in comparison kernel/trace/ftrace.c:5866:25: error: incompatible types in comparison Use rcu_dereference_protected to access the __rcu annotated pointer. Link: http://lkml.kernel.org/r/20200201072703.17330-1-frextrite@gmail.com Reviewed-by: Joel Fernandes (Google) Signed-off-by: Amol Grover Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ftrace.c | 2 +- kernel/trace/trace.h | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0e9612c30995..01d2ecd66161 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5591,7 +5591,7 @@ static const struct file_operations ftrace_notrace_fops = { static DEFINE_MUTEX(graph_lock); -struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH; +struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH; struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH; enum graph_filter_type { diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f5480a2aa334..18ceab59a5ba 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -964,22 +964,25 @@ extern void __trace_graph_return(struct trace_array *tr, unsigned long flags, int pc); #ifdef CONFIG_DYNAMIC_FTRACE -extern struct ftrace_hash *ftrace_graph_hash; +extern struct ftrace_hash __rcu *ftrace_graph_hash; extern struct ftrace_hash *ftrace_graph_notrace_hash; static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) { unsigned long addr = trace->func; int ret = 0; + struct ftrace_hash *hash; preempt_disable_notrace(); - if (ftrace_hash_empty(ftrace_graph_hash)) { + hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible()); + + if (ftrace_hash_empty(hash)) { ret = 1; goto out; } - if (ftrace_lookup_ip(ftrace_graph_hash, addr)) { + if (ftrace_lookup_ip(hash, addr)) { /* * This needs to be cleared on the return functions -- cgit v1.2.3 From fd0e6852c407dd9aefc594f54ddcc21d84803d3b Mon Sep 17 00:00:00 2001 From: Amol Grover Date: Wed, 5 Feb 2020 11:27:02 +0530 Subject: tracing: Annotate ftrace_graph_notrace_hash pointer with __rcu Fix following instances of sparse error kernel/trace/ftrace.c:5667:29: error: incompatible types in comparison kernel/trace/ftrace.c:5813:21: error: incompatible types in comparison kernel/trace/ftrace.c:5868:36: error: incompatible types in comparison kernel/trace/ftrace.c:5870:25: error: incompatible types in comparison Use rcu_dereference_protected to dereference the newly annotated pointer. Link: http://lkml.kernel.org/r/20200205055701.30195-1-frextrite@gmail.com Signed-off-by: Amol Grover Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ftrace.c | 2 +- kernel/trace/trace.h | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 01d2ecd66161..481ede3eac13 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5592,7 +5592,7 @@ static const struct file_operations ftrace_notrace_fops = { static DEFINE_MUTEX(graph_lock); struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH; -struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH; +struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH; enum graph_filter_type { GRAPH_FILTER_NOTRACE = 0, diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 18ceab59a5ba..022def96d307 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -965,7 +965,7 @@ extern void __trace_graph_return(struct trace_array *tr, #ifdef CONFIG_DYNAMIC_FTRACE extern struct ftrace_hash __rcu *ftrace_graph_hash; -extern struct ftrace_hash *ftrace_graph_notrace_hash; +extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash; static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) { @@ -1018,10 +1018,14 @@ static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) static inline int ftrace_graph_notrace_addr(unsigned long addr) { int ret = 0; + struct ftrace_hash *notrace_hash; preempt_disable_notrace(); - if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr)) + notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, + !preemptible()); + + if (ftrace_lookup_ip(notrace_hash, addr)) ret = 1; preempt_enable_notrace(); -- cgit v1.2.3 From 16052dd5bdfa16dbe18d8c1d4cde2ddab9d23177 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 5 Feb 2020 02:17:57 -0500 Subject: ftrace: Add comment to why rcu_dereference_sched() is open coded Because the function graph tracer can execute in sections where RCU is not "watching", the rcu_dereference_sched() for the has needs to be open coded. This is fine because the RCU "flavor" of the ftrace hash is protected by its own RCU handling (it does its own little synchronization on every CPU and does not rely on RCU sched). Acked-by: Joel Fernandes (Google) Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 022def96d307..8c52f5de9384 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -975,6 +975,11 @@ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) preempt_disable_notrace(); + /* + * Have to open code "rcu_dereference_sched()" because the + * function graph tracer can be called when RCU is not + * "watching". + */ hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible()); if (ftrace_hash_empty(hash)) { @@ -1022,6 +1027,11 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr) preempt_disable_notrace(); + /* + * Have to open code "rcu_dereference_sched()" because the + * function graph tracer can be called when RCU is not + * "watching". + */ notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, !preemptible()); -- cgit v1.2.3 From 54a16ff6f2e50775145b210bcd94d62c3c2af117 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 5 Feb 2020 09:20:32 -0500 Subject: ftrace: Protect ftrace_graph_hash with ftrace_sync As function_graph tracer can run when RCU is not "watching", it can not be protected by synchronize_rcu() it requires running a task on each CPU before it can be freed. Calling schedule_on_each_cpu(ftrace_sync) needs to be used. Link: https://lore.kernel.org/r/20200205131110.GT2935@paulmck-ThinkPad-P72 Cc: stable@vger.kernel.org Fixes: b9b0c831bed26 ("ftrace: Convert graph filter to use hash tables") Reported-by: "Paul E. McKenney" Reviewed-by: Joel Fernandes (Google) Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ftrace.c | 11 +++++++++-- kernel/trace/trace.h | 2 ++ 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 481ede3eac13..3f7ee102868a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5867,8 +5867,15 @@ ftrace_graph_release(struct inode *inode, struct file *file) mutex_unlock(&graph_lock); - /* Wait till all users are no longer using the old hash */ - synchronize_rcu(); + /* + * We need to do a hard force of sched synchronization. + * This is because we use preempt_disable() to do RCU, but + * the function tracers can be called where RCU is not watching + * (like before user_exit()). We can not rely on the RCU + * infrastructure to do the synchronization, thus we must do it + * ourselves. + */ + schedule_on_each_cpu(ftrace_sync); free_ftrace_hash(old_hash); } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 8c52f5de9384..3c75d29bd861 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -979,6 +979,7 @@ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) * Have to open code "rcu_dereference_sched()" because the * function graph tracer can be called when RCU is not * "watching". + * Protected with schedule_on_each_cpu(ftrace_sync) */ hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible()); @@ -1031,6 +1032,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr) * Have to open code "rcu_dereference_sched()" because the * function graph tracer can be called when RCU is not * "watching". + * Protected with schedule_on_each_cpu(ftrace_sync) */ notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, !preemptible()); -- cgit v1.2.3