diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-10 19:58:13 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-10 19:58:13 -0800 |
commit | 1dd7dcb6eaa677b034e7ef63df8320277507ae70 (patch) | |
tree | 3f1592b634d7bdde94e00570925be2dade8433d4 /kernel/trace | |
parent | b6da0076bab5a12afb19312ffee41c95490af2a0 (diff) | |
parent | 3558a5ac50dbb2419cc649d5e154af161d661037 (diff) |
Merge tag 'trace-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"There was a lot of clean ups and minor fixes. One of those clean ups
was to the trace_seq code. It also removed the return values to the
trace_seq_*() functions and use trace_seq_has_overflowed() to see if
the buffer filled up or not. This is similar to work being done to
the seq_file code as well in another tree.
Some of the other goodies include:
- Added some "!" (NOT) logic to the tracing filter.
- Fixed the frame pointer logic to the x86_64 mcount trampolines
- Added the logic for dynamic trampolines on !CONFIG_PREEMPT systems.
That is, the ftrace trampoline can be dynamically allocated and be
called directly by functions that only have a single hook to them"
* tag 'trace-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (55 commits)
tracing: Truncated output is better than nothing
tracing: Add additional marks to signal very large time deltas
Documentation: describe trace_buf_size parameter more accurately
tracing: Allow NOT to filter AND and OR clauses
tracing: Add NOT to filtering logic
ftrace/fgraph/x86: Have prepare_ftrace_return() take ip as first parameter
ftrace/x86: Get rid of ftrace_caller_setup
ftrace/x86: Have save_mcount_regs macro also save stack frames if needed
ftrace/x86: Add macro MCOUNT_REG_SIZE for amount of stack used to save mcount regs
ftrace/x86: Simplify save_mcount_regs on getting RIP
ftrace/x86: Have save_mcount_regs store RIP in %rdi for first parameter
ftrace/x86: Rename MCOUNT_SAVE_FRAME and add more detailed comments
ftrace/x86: Move MCOUNT_SAVE_FRAME out of header file
ftrace/x86: Have static tracing also use ftrace_caller_setup
ftrace/x86: Have static function tracing always test for function graph
kprobes: Add IPMODIFY flag to kprobe_ftrace_ops
ftrace, kprobes: Support IPMODIFY flag to find IP modify conflict
kprobes/ftrace: Recover original IP if pre_handler doesn't change it
tracing/trivial: Fix typos and make an int into a bool
tracing: Deletion of an unnecessary check before iput()
...
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/blktrace.c | 148 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 330 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 75 | ||||
-rw-r--r-- | kernel/trace/trace.c | 159 | ||||
-rw-r--r-- | kernel/trace/trace.h | 17 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 47 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 29 | ||||
-rw-r--r-- | kernel/trace/trace_events_trigger.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 119 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 412 | ||||
-rw-r--r-- | kernel/trace/trace_kdb.c | 21 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 46 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 52 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 446 | ||||
-rw-r--r-- | kernel/trace/trace_output.h | 16 | ||||
-rw-r--r-- | kernel/trace/trace_printk.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_probe.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 144 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 56 | ||||
-rw-r--r-- | kernel/trace/trace_seq.c | 86 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 47 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 28 |
23 files changed, 1125 insertions, 1175 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index c1bd4ada2a04..11b9cb36092b 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1142,9 +1142,9 @@ static void get_pdu_remap(const struct trace_entry *ent, r->sector_from = be64_to_cpu(sector_from); } -typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act); +typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act); -static int blk_log_action_classic(struct trace_iterator *iter, const char *act) +static void blk_log_action_classic(struct trace_iterator *iter, const char *act) { char rwbs[RWBS_LEN]; unsigned long long ts = iter->ts; @@ -1154,33 +1154,33 @@ static int blk_log_action_classic(struct trace_iterator *iter, const char *act) fill_rwbs(rwbs, t); - return trace_seq_printf(&iter->seq, - "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", - MAJOR(t->device), MINOR(t->device), iter->cpu, - secs, nsec_rem, iter->ent->pid, act, rwbs); + trace_seq_printf(&iter->seq, + "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", + MAJOR(t->device), MINOR(t->device), iter->cpu, + secs, nsec_rem, iter->ent->pid, act, rwbs); } -static int blk_log_action(struct trace_iterator *iter, const char *act) +static void blk_log_action(struct trace_iterator *iter, const char *act) { char rwbs[RWBS_LEN]; const struct blk_io_trace *t = te_blk_io_trace(iter->ent); fill_rwbs(rwbs, t); - return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", - MAJOR(t->device), MINOR(t->device), act, rwbs); + trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", + MAJOR(t->device), MINOR(t->device), act, rwbs); } -static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) +static void blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) { const unsigned char *pdu_buf; int pdu_len; - int i, end, ret; + int i, end; pdu_buf = pdu_start(ent); pdu_len = te_blk_io_trace(ent)->pdu_len; if (!pdu_len) - return 1; + return; /* find the last zero that needs to be printed */ for (end = pdu_len - 1; end >= 0; end--) @@ -1188,119 +1188,107 @@ static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) break; end++; - if (!trace_seq_putc(s, '(')) - return 0; + trace_seq_putc(s, '('); for (i = 0; i < pdu_len; i++) { - ret = trace_seq_printf(s, "%s%02x", - i == 0 ? "" : " ", pdu_buf[i]); - if (!ret) - return ret; + trace_seq_printf(s, "%s%02x", + i == 0 ? "" : " ", pdu_buf[i]); /* * stop when the rest is just zeroes and indicate so * with a ".." appended */ - if (i == end && end != pdu_len - 1) - return trace_seq_puts(s, " ..) "); + if (i == end && end != pdu_len - 1) { + trace_seq_puts(s, " ..) "); + return; + } } - return trace_seq_puts(s, ") "); + trace_seq_puts(s, ") "); } -static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) +static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) { char cmd[TASK_COMM_LEN]; trace_find_cmdline(ent->pid, cmd); if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { - int ret; - - ret = trace_seq_printf(s, "%u ", t_bytes(ent)); - if (!ret) - return 0; - ret = blk_log_dump_pdu(s, ent); - if (!ret) - return 0; - return trace_seq_printf(s, "[%s]\n", cmd); + trace_seq_printf(s, "%u ", t_bytes(ent)); + blk_log_dump_pdu(s, ent); + trace_seq_printf(s, "[%s]\n", cmd); } else { if (t_sec(ent)) - return trace_seq_printf(s, "%llu + %u [%s]\n", + trace_seq_printf(s, "%llu + %u [%s]\n", t_sector(ent), t_sec(ent), cmd); - return trace_seq_printf(s, "[%s]\n", cmd); + else + trace_seq_printf(s, "[%s]\n", cmd); } } -static int blk_log_with_error(struct trace_seq *s, +static void blk_log_with_error(struct trace_seq *s, const struct trace_entry *ent) { if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { - int ret; - - ret = blk_log_dump_pdu(s, ent); - if (ret) - return trace_seq_printf(s, "[%d]\n", t_error(ent)); - return 0; + blk_log_dump_pdu(s, ent); + trace_seq_printf(s, "[%d]\n", t_error(ent)); } else { if (t_sec(ent)) - return trace_seq_printf(s, "%llu + %u [%d]\n", - t_sector(ent), - t_sec(ent), t_error(ent)); - return trace_seq_printf(s, "%llu [%d]\n", - t_sector(ent), t_error(ent)); + trace_seq_printf(s, "%llu + %u [%d]\n", + t_sector(ent), + t_sec(ent), t_error(ent)); + else + trace_seq_printf(s, "%llu [%d]\n", + t_sector(ent), t_error(ent)); } } -static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) +static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) { struct blk_io_trace_remap r = { .device_from = 0, }; get_pdu_remap(ent, &r); - return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", - t_sector(ent), t_sec(ent), - MAJOR(r.device_from), MINOR(r.device_from), - (unsigned long long)r.sector_from); + trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", + t_sector(ent), t_sec(ent), + MAJOR(r.device_from), MINOR(r.device_from), + (unsigned long long)r.sector_from); } -static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) +static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) { char cmd[TASK_COMM_LEN]; trace_find_cmdline(ent->pid, cmd); - return trace_seq_printf(s, "[%s]\n", cmd); + trace_seq_printf(s, "[%s]\n", cmd); } -static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) +static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) { char cmd[TASK_COMM_LEN]; trace_find_cmdline(ent->pid, cmd); - return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent)); + trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent)); } -static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent) +static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent) { char cmd[TASK_COMM_LEN]; trace_find_cmdline(ent->pid, cmd); - return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), - get_pdu_int(ent), cmd); + trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), + get_pdu_int(ent), cmd); } -static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent) +static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent) { - int ret; const struct blk_io_trace *t = te_blk_io_trace(ent); - ret = trace_seq_putmem(s, t + 1, t->pdu_len); - if (ret) - return trace_seq_putc(s, '\n'); - return ret; + trace_seq_putmem(s, t + 1, t->pdu_len); + trace_seq_putc(s, '\n'); } /* @@ -1339,7 +1327,7 @@ static void blk_tracer_reset(struct trace_array *tr) static const struct { const char *act[2]; - int (*print)(struct trace_seq *s, const struct trace_entry *ent); + void (*print)(struct trace_seq *s, const struct trace_entry *ent); } what2act[] = { [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, @@ -1364,7 +1352,6 @@ static enum print_line_t print_one_line(struct trace_iterator *iter, struct trace_seq *s = &iter->seq; const struct blk_io_trace *t; u16 what; - int ret; bool long_act; blk_log_action_t *log_action; @@ -1374,21 +1361,18 @@ static enum print_line_t print_one_line(struct trace_iterator *iter, log_action = classic ? &blk_log_action_classic : &blk_log_action; if (t->action == BLK_TN_MESSAGE) { - ret = log_action(iter, long_act ? "message" : "m"); - if (ret) - ret = blk_log_msg(s, iter->ent); - goto out; + log_action(iter, long_act ? "message" : "m"); + blk_log_msg(s, iter->ent); } if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) - ret = trace_seq_printf(s, "Unknown action %x\n", what); + trace_seq_printf(s, "Unknown action %x\n", what); else { - ret = log_action(iter, what2act[what].act[long_act]); - if (ret) - ret = what2act[what].print(s, iter->ent); + log_action(iter, what2act[what].act[long_act]); + what2act[what].print(s, iter->ent); } -out: - return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; + + return trace_handle_return(s); } static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, @@ -1397,7 +1381,7 @@ static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, return print_one_line(iter, false); } -static int blk_trace_synthesize_old_trace(struct trace_iterator *iter) +static void blk_trace_synthesize_old_trace(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; @@ -1407,18 +1391,18 @@ static int blk_trace_synthesize_old_trace(struct trace_iterator *iter) .time = iter->ts, }; - if (!trace_seq_putmem(s, &old, offset)) - return 0; - return trace_seq_putmem(s, &t->sector, - sizeof(old) - offset + t->pdu_len); + trace_seq_putmem(s, &old, offset); + trace_seq_putmem(s, &t->sector, + sizeof(old) - offset + t->pdu_len); } static enum print_line_t blk_trace_event_print_binary(struct trace_iterator *iter, int flags, struct trace_event *event) { - return blk_trace_synthesize_old_trace(iter) ? - TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; + blk_trace_synthesize_old_trace(iter); + + return trace_handle_return(&iter->seq); } static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 31c90fec4158..929a733d302e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -387,6 +387,8 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list, return ret; } +static void ftrace_update_trampoline(struct ftrace_ops *ops); + static int __register_ftrace_function(struct ftrace_ops *ops) { if (ops->flags & FTRACE_OPS_FL_DELETED) @@ -416,9 +418,13 @@ static int __register_ftrace_function(struct ftrace_ops *ops) if (control_ops_alloc(ops)) return -ENOMEM; add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); + /* The control_ops needs the trampoline update */ + ops = &control_ops; } else add_ftrace_ops(&ftrace_ops_list, ops); + ftrace_update_trampoline(ops); + if (ftrace_enabled) update_ftrace_function(); @@ -565,13 +571,13 @@ static int function_stat_cmp(void *p1, void *p2) static int function_stat_headers(struct seq_file *m) { #ifdef CONFIG_FUNCTION_GRAPH_TRACER - seq_printf(m, " Function " - "Hit Time Avg s^2\n" - " -------- " - "--- ---- --- ---\n"); + seq_puts(m, " Function " + "Hit Time Avg s^2\n" + " -------- " + "--- ---- --- ---\n"); #else - seq_printf(m, " Function Hit\n" - " -------- ---\n"); + seq_puts(m, " Function Hit\n" + " -------- ---\n"); #endif return 0; } @@ -598,7 +604,7 @@ static int function_stat_show(struct seq_file *m, void *v) seq_printf(m, " %-30.30s %10lu", str, rec->counter); #ifdef CONFIG_FUNCTION_GRAPH_TRACER - seq_printf(m, " "); + seq_puts(m, " "); avg = rec->time; do_div(avg, rec->counter); @@ -1111,6 +1117,43 @@ static struct ftrace_ops global_ops = { FTRACE_OPS_FL_INITIALIZED, }; +/* + * This is used by __kernel_text_address() to return true if the + * address is on a dynamically allocated trampoline that would + * not return true for either core_kernel_text() or + * is_module_text_address(). + */ +bool is_ftrace_trampoline(unsigned long addr) +{ + struct ftrace_ops *op; + bool ret = false; + + /* + * Some of the ops may be dynamically allocated, + * they are freed after a synchronize_sched(). + */ + preempt_disable_notrace(); + + do_for_each_ftrace_op(op, ftrace_ops_list) { + /* + * This is to check for dynamically allocated trampolines. + * Trampolines that are in kernel text will have + * core_kernel_text() return true. + */ + if (op->trampoline && op->trampoline_size) + if (addr >= op->trampoline && + addr < op->trampoline + op->trampoline_size) { + ret = true; + goto out; + } + } while_for_each_ftrace_op(op); + + out: + preempt_enable_notrace(); + + return ret; +} + struct ftrace_page { struct ftrace_page *next; struct dyn_ftrace *records; @@ -1315,6 +1358,9 @@ ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); +static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, + struct ftrace_hash *new_hash); + static int ftrace_hash_move(struct ftrace_ops *ops, int enable, struct ftrace_hash **dst, struct ftrace_hash *src) @@ -1325,8 +1371,13 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, struct ftrace_hash *new_hash; int size = src->count; int bits = 0; + int ret; int i; + /* Reject setting notrace hash on IPMODIFY ftrace_ops */ + if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) + return -EINVAL; + /* * If the new source is empty, just free dst and assign it * the empty_hash. @@ -1360,6 +1411,16 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, } update: + /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ + if (enable) { + /* IPMODIFY should be updated only when filter_hash updating */ + ret = ftrace_hash_ipmodify_update(ops, new_hash); + if (ret < 0) { + free_ftrace_hash(new_hash); + return ret; + } + } + /* * Remove the current set, update the hash and add * them back. @@ -1724,6 +1785,114 @@ static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, ftrace_hash_rec_update_modify(ops, filter_hash, 1); } +/* + * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK + * or no-needed to update, -EBUSY if it detects a conflict of the flag + * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. + * Note that old_hash and new_hash has below meanings + * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) + * - If the hash is EMPTY_HASH, it hits nothing + * - Anything else hits the recs which match the hash entries. + */ +static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, + struct ftrace_hash *old_hash, + struct ftrace_hash *new_hash) +{ + struct ftrace_page *pg; + struct dyn_ftrace *rec, *end = NULL; + int in_old, in_new; + + /* Only update if the ops has been registered */ + if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) + return 0; + + if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) + return 0; + + /* + * Since the IPMODIFY is a very address sensitive action, we do not + * allow ftrace_ops to set all functions to new hash. + */ + if (!new_hash || !old_hash) + return -EINVAL; + + /* Update rec->flags */ + do_for_each_ftrace_rec(pg, rec) { + /* We need to update only differences of filter_hash */ + in_old = !!ftrace_lookup_ip(old_hash, rec->ip); + in_new = !!ftrace_lookup_ip(new_hash, rec->ip); + if (in_old == in_new) + continue; + + if (in_new) { + /* New entries must ensure no others are using it */ + if (rec->flags & FTRACE_FL_IPMODIFY) + goto rollback; + rec->flags |= FTRACE_FL_IPMODIFY; + } else /* Removed entry */ + rec->flags &= ~FTRACE_FL_IPMODIFY; + } while_for_each_ftrace_rec(); + + return 0; + +rollback: + end = rec; + + /* Roll back what we did above */ + do_for_each_ftrace_rec(pg, rec) { + if (rec == end) + goto err_out; + + in_old = !!ftrace_lookup_ip(old_hash, rec->ip); + in_new = !!ftrace_lookup_ip(new_hash, rec->ip); + if (in_old == in_new) + continue; + + if (in_new) + rec->flags &= ~FTRACE_FL_IPMODIFY; + else + rec->flags |= FTRACE_FL_IPMODIFY; + } while_for_each_ftrace_rec(); + +err_out: + return -EBUSY; +} + +static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) +{ + struct ftrace_hash *hash = ops->func_hash->filter_hash; + + if (ftrace_hash_empty(hash)) + hash = NULL; + + return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); +} + +/* Disabling always succeeds */ +static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) +{ + struct ftrace_hash *hash = ops->func_hash->filter_hash; + + if (ftrace_hash_empty(hash)) + hash = NULL; + + __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); +} + +static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, + struct ftrace_hash *new_hash) +{ + struct ftrace_hash *old_hash = ops->func_hash->filter_hash; + + if (ftrace_hash_empty(old_hash)) + old_hash = NULL; + + if (ftrace_hash_empty(new_hash)) + new_hash = NULL; + + return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); +} + static void print_ip_ins(const char *fmt, unsigned char *p) { int i; @@ -1734,10 +1903,13 @@ static void print_ip_ins(const char *fmt, unsigned char *p) printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); } +static struct ftrace_ops * +ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); + /** * ftrace_bug - report and shutdown function tracer * @failed: The failed type (EFAULT, EINVAL, EPERM) - * @ip: The address that failed + * @rec: The record that failed * * The arch code that enables or disables the function tracing * can call ftrace_bug() when it has detected a problem in @@ -1746,8 +1918,10 @@ static void print_ip_ins(const char *fmt, unsigned char *p) * EINVAL - if what is read at @ip is not what was expected * EPERM - if the problem happens on writting to the @ip address */ -void ftrace_bug(int failed, unsigned long ip) +void ftrace_bug(int failed, struct dyn_ftrace *rec) { + unsigned long ip = rec ? rec->ip : 0; + switch (failed) { case -EFAULT: FTRACE_WARN_ON_ONCE(1); @@ -1759,7 +1933,7 @@ void ftrace_bug(int failed, unsigned long ip) pr_info("ftrace failed to modify "); print_ip_sym(ip); print_ip_ins(" actual: ", (unsigned char *)ip); - printk(KERN_CONT "\n"); + pr_cont("\n"); break; case -EPERM: FTRACE_WARN_ON_ONCE(1); @@ -1771,6 +1945,24 @@ void ftrace_bug(int failed, unsigned long ip) pr_info("ftrace faulted on unknown error "); print_ip_sym(ip); } + if (rec) { + struct ftrace_ops *ops = NULL; + + pr_info("ftrace record flags: %lx\n", rec->flags); + pr_cont(" (%ld)%s", ftrace_rec_count(rec), + rec->flags & FTRACE_FL_REGS ? " R" : " "); + if (rec->flags & FTRACE_FL_TRAMP_EN) { + ops = ftrace_find_tramp_ops_any(rec); + if (ops) + pr_cont("\ttramp: %pS", + (void *)ops->trampoline); + else + pr_cont("\ttramp: ERROR!"); + + } + ip = ftrace_get_addr_curr(rec); + pr_cont(" expected tramp: %lx\n", ip); + } } static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) @@ -2093,7 +2285,7 @@ void __weak ftrace_replace_code(int enable) do_for_each_ftrace_rec(pg, rec) { failed = __ftrace_replace_code(rec, enable); if (failed) { - ftrace_bug(failed, rec->ip); + ftrace_bug(failed, rec); /* Stop processing */ return; } @@ -2175,17 +2367,14 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) static int ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) { - unsigned long ip; int ret; - ip = rec->ip; - if (unlikely(ftrace_disabled)) return 0; ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); if (ret) { - ftrace_bug(ret, ip); + ftrace_bug(ret, rec); return 0; } return 1; @@ -2320,6 +2509,10 @@ static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, static ftrace_func_t saved_ftrace_func; static int ftrace_start_up; +void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) +{ +} + static void control_ops_free(struct ftrace_ops *ops) { free_percpu(ops->disabled); @@ -2369,6 +2562,15 @@ static int ftrace_startup(struct ftrace_ops *ops, int command) */ ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; + ret = ftrace_hash_ipmodify_enable(ops); + if (ret < 0) { + /* Rollback registration process */ + __unregister_ftrace_function(ops); + ftrace_start_up--; + ops->flags &= ~FTRACE_OPS_FL_ENABLED; + return ret; + } + ftrace_hash_rec_enable(ops, 1); ftrace_startup_enable(command); @@ -2397,6 +2599,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) */ WARN_ON_ONCE(ftrace_start_up < 0); + /* Disabling ipmodify never fails */ + ftrace_hash_ipmodify_disable(ops); ftrace_hash_rec_disable(ops, 1); ops->flags &= ~FTRACE_OPS_FL_ENABLED; @@ -2471,6 +2675,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { schedule_on_each_cpu(ftrace_sync); + arch_ftrace_trampoline_free(ops); + if (ops->flags & FTRACE_OPS_FL_CONTROL) control_ops_free(ops); } @@ -2623,7 +2829,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) if (ftrace_start_up && cnt) { int failed = __ftrace_replace_code(p, 1); if (failed) - ftrace_bug(failed, p->ip); + ftrace_bug(failed, p); } } } @@ -2948,6 +3154,22 @@ static void t_stop(struct seq_file *m, void *p) mutex_unlock(&ftrace_lock); } +void * __weak +arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) +{ + return NULL; +} + +static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, + struct dyn_ftrace *rec) +{ + void *ptr; + + ptr = arch_ftrace_trampoline_func(ops, rec); + if (ptr) + seq_printf(m, " ->%pS", ptr); +} + static int t_show(struct seq_file *m, void *v) { struct ftrace_iterator *iter = m->private; @@ -2958,9 +3180,9 @@ static int t_show(struct seq_file *m, void *v) if (iter->flags & FTRACE_ITER_PRINTALL) { if (iter->flags & FTRACE_ITER_NOTRACE) - seq_printf(m, "#### no functions disabled ####\n"); + seq_puts(m, "#### no functions disabled ####\n"); else - seq_printf(m, "#### all functions enabled ####\n"); + seq_puts(m, "#### all functions enabled ####\n"); return 0; } @@ -2971,22 +3193,25 @@ static int t_show(struct seq_file *m, void *v) seq_printf(m, "%ps", (void *)rec->ip); if (iter->flags & FTRACE_ITER_ENABLED) { - seq_printf(m, " (%ld)%s", + struct ftrace_ops *ops = NULL; + + seq_printf(m, " (%ld)%s%s", ftrace_rec_count(rec), - rec->flags & FTRACE_FL_REGS ? " R" : " "); + rec->flags & FTRACE_FL_REGS ? " R" : " ", + rec->flags & FTRACE_FL_IPMODIFY ? " I" : " "); if (rec->flags & FTRACE_FL_TRAMP_EN) { - struct ftrace_ops *ops; - ops = ftrace_find_tramp_ops_any(rec); if (ops) seq_printf(m, "\ttramp: %pS", (void *)ops->trampoline); else - seq_printf(m, "\ttramp: ERROR!"); + seq_puts(m, "\ttramp: ERROR!"); + } + add_trampoline_func(m, ops, rec); } - seq_printf(m, "\n"); + seq_putc(m, '\n'); return 0; } @@ -3020,9 +3245,6 @@ ftrace_enabled_open(struct inode *inode, struct file *file) { struct ftrace_iterator *iter; - if (unlikely(ftrace_disabled)) - return -ENODEV; - iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); if (iter) { iter->pg = ftrace_pages_start; @@ -3975,6 +4197,9 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); +static unsigned long save_global_trampoline; +static unsigned long save_global_flags; + static int __init set_graph_function(char *str) { strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); @@ -4183,9 +4408,9 @@ static int g_show(struct seq_file *m, void *v) struct ftrace_graph_data *fgd = m->private; if (fgd->table == ftrace_graph_funcs) - seq_printf(m, "#### all functions enabled ####\n"); + seq_puts(m, "#### all functions enabled ####\n"); else - seq_printf(m, "#### no functions disabled ####\n"); + seq_puts(m, "#### no functions disabled ####\n"); return 0; } @@ -4696,6 +4921,32 @@ void __init ftrace_init(void) ftrace_disabled = 1; } +/* Do nothing if arch does not support this */ +void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) +{ +} + +static void ftrace_update_trampoline(struct ftrace_ops *ops) +{ + +/* + * Currently there's no safe way to free a trampoline when the kernel + * is configured with PREEMPT. That is because a task could be preempted + * when it jumped to the trampoline, it may be preempted for a long time + * depending on the system load, and currently there's no way to know + * when it will be off the trampoline. If the trampoline is freed + * too early, when the task runs again, it will be executing on freed + * memory and crash. + */ +#ifdef CONFIG_PREEMPT + /* Currently, only non dynamic ops can have a trampoline */ + if (ops->flags & FTRACE_OPS_FL_DYNAMIC) + return; +#endif + + arch_ftrace_update_trampoline(ops); +} + #else static struct ftrace_ops global_ops = { @@ -4738,6 +4989,10 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) return 1; } +static void ftrace_update_trampoline(struct ftrace_ops *ops) +{ +} + #endif /* CONFIG_DYNAMIC_FTRACE */ __init void ftrace_init_global_array_ops(struct trace_array *tr) @@ -5075,12 +5330,12 @@ static int fpid_show(struct seq_file *m, void *v) const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); if (v == (void *)1) { - seq_printf(m, "no pid\n"); + seq_puts(m, "no pid\n"); return 0; } if (fpid->pid == ftrace_swapper_pid) - seq_printf(m, "swapper tasks\n"); + seq_puts(m, "swapper tasks\n"); else seq_printf(m, "%u\n", pid_vnr(fpid->pid)); @@ -5293,6 +5548,7 @@ static struct ftrace_ops graph_ops = { FTRACE_OPS_FL_STUB, #ifdef FTRACE_GRAPH_TRAMP_ADDR .trampoline = FTRACE_GRAPH_TRAMP_ADDR, + /* trampoline_size is only needed for dynamically allocated tramps */ #endif ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) }; @@ -5522,7 +5778,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, update_function_graph_func(); ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); - out: mutex_unlock(&ftrace_lock); return ret; @@ -5543,6 +5798,17 @@ void unregister_ftrace_graph(void) unregister_pm_notifier(&ftrace_suspend_notifier); unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); +#ifdef CONFIG_DYNAMIC_FTRACE + /* + * Function graph does not allocate the trampoline, but + * other global_ops do. We need to reset the ALLOC_TRAMP flag + * if one was used. + */ + global_ops.trampoline = save_global_trampoline; + if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP) + global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP; +#endif + out: mutex_unlock(&ftrace_lock); } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a56e07c8d15b..7a4104cb95cb 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -34,21 +34,19 @@ static void update_pages_handler(struct work_struct *work); */ int ring_buffer_print_entry_header(struct trace_seq *s) { - int ret; - - ret = trace_seq_puts(s, "# compressed entry header\n"); - ret = trace_seq_puts(s, "\ttype_len : 5 bits\n"); - ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n"); - ret = trace_seq_puts(s, "\tarray : 32 bits\n"); - ret = trace_seq_putc(s, '\n'); - ret = trace_seq_printf(s, "\tpadding : type == %d\n", - RINGBUF_TYPE_PADDING); - ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", - RINGBUF_TYPE_TIME_EXTEND); - ret = trace_seq_printf(s, "\tdata max type_len == %d\n", - RINGBUF_TYPE_DATA_TYPE_LEN_MAX); + trace_seq_puts(s, "# compressed entry header\n"); + trace_seq_puts(s, "\ttype_len : 5 bits\n"); + trace_seq_puts(s, "\ttime_delta : 27 bits\n"); + trace_seq_puts(s, "\tarray : 32 bits\n"); + trace_seq_putc(s, '\n'); + trace_seq_printf(s, "\tpadding : type == %d\n", + RINGBUF_TYPE_PADDING); + trace_seq_printf(s, "\ttime_extend : type == %d\n", + RINGBUF_TYPE_TIME_EXTEND); + trace_seq_printf(s, "\tdata max type_len == %d\n", + RINGBUF_TYPE_DATA_TYPE_LEN_MAX); - return ret; + return !trace_seq_has_overflowed(s); } /* @@ -419,32 +417,31 @@ static inline int test_time_stamp(u64 delta) int ring_buffer_print_page_header(struct trace_seq *s) { struct buffer_data_page field; - int ret; - - ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" - "offset:0;\tsize:%u;\tsigned:%u;\n", - (unsigned int)sizeof(field.time_stamp), - (unsigned int)is_signed_type(u64)); - - ret = trace_seq_printf(s, "\tfield: local_t commit;\t" - "offset:%u;\tsize:%u;\tsigned:%u;\n", - (unsigned int)offsetof(typeof(field), commit), - (unsigned int)sizeof(field.commit), - (unsigned int)is_signed_type(long)); - - ret = trace_seq_printf(s, "\tfield: int overwrite;\t" - "offset:%u;\tsize:%u;\tsigned:%u;\n", - (unsigned int)offsetof(typeof(field), commit), - 1, - (unsigned int)is_signed_type(long)); - - ret = trace_seq_printf(s, "\tfield: char data;\t" - "offset:%u;\tsize:%u;\tsigned:%u;\n", - (unsigned int)offsetof(typeof(field), data), - (unsigned int)BUF_PAGE_SIZE, - (unsigned int)is_signed_type(char)); - return ret; + trace_seq_printf(s, "\tfield: u64 timestamp;\t" + "offset:0;\tsize:%u;\tsigned:%u;\n", + (unsigned int)sizeof(field.time_stamp), + (unsigned int)is_signed_type(u64)); + + trace_seq_printf(s, "\tfield: local_t commit;\t" + "offset:%u;\tsize:%u;\tsigned:%u;\n", + (unsigned int)offsetof(typeof(field), commit), + (unsigned int)sizeof(field.commit), + (unsigned int)is_signed_type(long)); + + trace_seq_printf(s, "\tfield: int overwrite;\t" + "offset:%u;\tsize:%u;\tsigned:%u;\n", + (unsigned int)offsetof(typeof(field), commit), + 1, + (unsigned int)is_signed_type(long)); + + trace_seq_printf(s, "\tfield: char data;\t" + "offset:%u;\tsize:%u;\tsigned:%u;\n", + (unsigned int)offsetof(typeof(field), data), + (unsigned int)BUF_PAGE_SIZE, + (unsigned int)is_signed_type(char)); + + return !trace_seq_has_overflowed(s); } struct rb_irq_work { diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 426962b04183..ce11fa50a2f0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -155,10 +155,11 @@ __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); static int __init stop_trace_on_warning(char *str) { - __disable_trace_on_warning = 1; + if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) + __disable_trace_on_warning = 1; return 1; } -__setup("traceoff_on_warning=", stop_trace_on_warning); +__setup("traceoff_on_warning", stop_trace_on_warning); static int __init boot_alloc_snapshot(char *str) { @@ -2158,9 +2159,7 @@ __trace_array_vprintk(struct ring_buffer *buffer, goto out; } - len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); - if (len > TRACE_BUF_SIZE) - goto out; + len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); local_save_flags(flags); size = sizeof(*entry) + len + 1; @@ -2171,8 +2170,7 @@ __trace_array_vprintk(struct ring_buffer *buffer, entry = ring_buffer_event_data(event); entry->ip = ip; - memcpy(&entry->buf, tbuffer, len); - entry->buf[len] = '\0'; + memcpy(&entry->buf, tbuffer, len + 1); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); ftrace_trace_stack(buffer, flags, 6, pc); @@ -2509,14 +2507,14 @@ get_total_entries(struct trace_buffer *buf, static void print_lat_help_header(struct seq_file *m) { - seq_puts(m, "# _------=> CPU# \n"); - seq_puts(m, "# / _-----=> irqs-off \n"); - seq_puts(m, "# | / _----=> need-resched \n"); - seq_puts(m, "# || / _---=> hardirq/softirq \n"); - seq_puts(m, "# ||| / _--=> preempt-depth \n"); - seq_puts(m, "# |||| / delay \n"); - seq_puts(m, "# cmd pid ||||| time | caller \n"); - seq_puts(m, "# \\ / ||||| \\ | / \n"); + seq_puts(m, "# _------=> CPU# \n" + "# / _-----=> irqs-off \n" + "# | / _----=> need-resched \n" + "# || / _---=> hardirq/softirq \n" + "# ||| / _--=> preempt-depth \n" + "# |||| / delay \n" + "# cmd pid ||||| time | caller \n" + "# \\ / ||||| \\ | / \n"); } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) @@ -2533,20 +2531,20 @@ static void print_event_info(struct trace_buffer *buf, struct seq_file *m) static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) { print_event_info(buf, m); - seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); - seq_puts(m, "# | | | | |\n"); + seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n" + "# | | | | |\n"); } static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) { print_event_info(buf, m); - seq_puts(m, "# _-----=> irqs-off\n"); - seq_puts(m, "# / _----=> need-resched\n"); - seq_puts(m, "# | / _---=> hardirq/softirq\n"); - seq_puts(m, "# || / _--=> preempt-depth\n"); - seq_puts(m, "# ||| / delay\n"); - seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); - seq_puts(m, "# | | | |||| | |\n"); + seq_puts(m, "# _-----=> irqs-off\n" + "# / _----=> need-resched\n" + "# | / _---=> hardirq/softirq\n" + "# || / _--=> preempt-depth\n" + "# ||| / delay\n" + "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n" + "# | | | |||| | |\n"); } void @@ -2649,24 +2647,21 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) event = ftrace_find_event(entry->type); if (trace_flags & TRACE_ITER_CONTEXT_INFO) { - if (iter->iter_flags & TRACE_FILE_LAT_FMT) { - if (!trace_print_lat_context(iter)) - goto partial; - } else { - if (!trace_print_context(iter)) - goto partial; - } + if (iter->iter_flags & TRACE_FILE_LAT_FMT) + trace_print_lat_context(iter); + else + trace_print_context(iter); } + if (trace_seq_has_overflowed(s)) + return TRACE_TYPE_PARTIAL_LINE; + if (event) return event->funcs->trace(iter, sym_flags, event); - if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) - goto partial; + trace_seq_printf(s, "Unknown type %d\n", entry->type); - return TRACE_TYPE_HANDLED; -partial: - return TRACE_TYPE_PARTIAL_LINE; + return trace_handle_return(s); } static enum print_line_t print_raw_fmt(struct trace_iterator *iter) @@ -2677,22 +2672,20 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) entry = iter->ent; - if (trace_flags & TRACE_ITER_CONTEXT_INFO) { - if (!trace_seq_printf(s, "%d %d %llu ", - entry->pid, iter->cpu, iter->ts)) - goto partial; - } + if (trace_flags & TRACE_ITER_CONTEXT_INFO) + trace_seq_printf(s, "%d %d %llu ", + entry->pid, iter->cpu, iter->ts); + + if (trace_seq_has_overflowed(s)) + return TRACE_TYPE_PARTIAL_LINE; event = ftrace_find_event(entry->type); if (event) return event->funcs->raw(iter, 0, event); - if (!trace_seq_printf(s, "%d ?\n", entry->type)) - goto partial; + trace_seq_printf(s, "%d ?\n", entry->type); - return TRACE_TYPE_HANDLED; -partial: - return TRACE_TYPE_PARTIAL_LINE; + return trace_handle_return(s); } static enum print_line_t print_hex_fmt(struct trace_iterator *iter) @@ -2705,9 +2698,11 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) entry = iter->ent; if (trace_flags & TRACE_ITER_CONTEXT_INFO) { - SEQ_PUT_HEX_FIELD_RET(s, entry->pid); - SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); - SEQ_PUT_HEX_FIELD_RET(s, iter->ts); + SEQ_PUT_HEX_FIELD(s, entry->pid); + SEQ_PUT_HEX_FIELD(s, iter->cpu); + SEQ_PUT_HEX_FIELD(s, iter->ts); + if (trace_seq_has_overflowed(s)) + return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); @@ -2717,9 +2712,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) return ret; } - SEQ_PUT_FIELD_RET(s, newline); + SEQ_PUT_FIELD(s, newline); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } static enum print_line_t print_bin_fmt(struct trace_iterator *iter) @@ -2731,9 +2726,11 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) entry = iter->ent; if (trace_flags & TRACE_ITER_CONTEXT_INFO) { - SEQ_PUT_FIELD_RET(s, entry->pid); - SEQ_PUT_FIELD_RET(s, iter->cpu); - SEQ_PUT_FIELD_RET(s, iter->ts); + SEQ_PUT_FIELD(s, entry->pid); + SEQ_PUT_FIELD(s, iter->cpu); + SEQ_PUT_FIELD(s, iter->ts); + if (trace_seq_has_overflowed(s)) + return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); @@ -2779,10 +2776,12 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) { enum print_line_t ret; - if (iter->lost_events && - !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", - iter->cpu, iter->lost_events)) - return TRACE_TYPE_PARTIAL_LINE; + if (iter->lost_events) { + trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", + iter->cpu, iter->lost_events); + if (trace_seq_has_overflowed(&iter->seq)) + return TRACE_TYPE_PARTIAL_LINE; + } if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); @@ -2860,44 +2859,44 @@ static void test_ftrace_alive(struct seq_file *m) { if (!ftrace_is_dead()) return; - seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"); - seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); + seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" + "# MAY BE MISSING FUNCTION EVENTS\n"); } #ifdef CONFIG_TRACER_MAX_TRACE static void show_snapshot_main_help(struct seq_file *m) { - seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); - seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); - seq_printf(m, "# Takes a snapshot of the main buffer.\n"); - seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"); - seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); - seq_printf(m, "# is not a '0' or '1')\n"); + seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" + "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" + "# Takes a snapshot of the main buffer.\n" + "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" + "# (Doesn't have to be '2' works with any number that\n" + "# is not a '0' or '1')\n"); } static void show_snapshot_percpu_help(struct seq_file *m) { - seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); + seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP - seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); - seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n"); + seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" + "# Takes a snapshot of the main buffer for this cpu.\n"); #else - seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n"); - seq_printf(m, "# Must use main snapshot file to allocate.\n"); + seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" + "# Must use main snapshot file to allocate.\n"); #endif - seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"); - seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); - seq_printf(m, "# is not a '0' or '1')\n"); + seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" + "# (Doesn't have to be '2' works with any number that\n" + "# is not a '0' or '1')\n"); } static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { if (iter->tr->allocated_snapshot) - seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); + seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); else - seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); + seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); - seq_printf(m, "# Snapshot commands:\n"); + seq_puts(m, "# Snapshot commands:\n"); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) show_snapshot_main_help(m); else @@ -3251,7 +3250,7 @@ static int t_show(struct seq_file *m, void *v) if (!t) return 0; - seq_printf(m, "%s", t->name); + seq_puts(m, t->name); if (t->next) seq_putc(m, ' '); else @@ -5749,10 +5748,10 @@ ftrace_snapshot_print(struct seq_file *m, unsigned long ip, seq_printf(m, "%ps:", (void *)ip); - seq_printf(m, "snapshot"); + seq_puts(m, "snapshot"); if (count == -1) - seq_printf(m, ":unlimited\n"); + seq_puts(m, ":unlimited\n"); else seq_printf(m, ":count=%ld\n", count); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 385391fb1d3b..3255dfb054a0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -14,6 +14,7 @@ #include <linux/trace_seq.h> #include <linux/ftrace_event.h> #include <linux/compiler.h> +#include <linux/trace_seq.h> #ifdef CONFIG_FTRACE_SYSCALLS #include <asm/unistd.h> /* For NR_SYSCALLS */ @@ -569,15 +570,6 @@ void trace_init_global_iter(struct trace_iterator *iter); void tracing_iter_reset(struct trace_iterator *iter, int cpu); -void tracing_sched_switch_trace(struct trace_array *tr, - struct task_struct *prev, - struct task_struct *next, - unsigned long flags, int pc); - -void tracing_sched_wakeup_trace(struct trace_array *tr, - struct task_struct *wakee, - struct task_struct *cur, - unsigned long flags, int pc); void trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, @@ -597,9 +589,6 @@ void set_graph_array(struct trace_array *tr); void tracing_start_cmdline_record(void); void tracing_stop_cmdline_record(void); -void tracing_sched_switch_assign_trace(struct trace_array *tr); -void tracing_stop_sched_switch_record(void); -void tracing_start_sched_switch_record(void); int register_tracer(struct tracer *type); int is_tracing_stopped(void); @@ -719,6 +708,8 @@ enum print_line_t print_trace_line(struct trace_iterator *iter); extern unsigned long trace_flags; +extern char trace_find_mark(unsigned long long duration); + /* Standard output formatting function used for function return traces */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -737,7 +728,7 @@ extern unsigned long trace_flags; extern enum print_line_t print_graph_function_flags(struct trace_iterator *iter, u32 flags); extern void print_graph_headers_flags(struct seq_file *s, u32 flags); -extern enum print_line_t +extern void trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); extern void graph_trace_open(struct trace_iterator *iter); extern void graph_trace_close(struct trace_iterator *iter); diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 697fb9bac8f0..7d6e2afde669 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -151,22 +151,21 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter, trace_assign_type(field, iter->ent); - if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", - field->correct ? " ok " : " MISS ", - field->func, - field->file, - field->line)) - return TRACE_TYPE_PARTIAL_LINE; - - return TRACE_TYPE_HANDLED; + trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", + field->correct ? " ok " : " MISS ", + field->func, + field->file, + field->line); + + return trace_handle_return(&iter->seq); } static void branch_print_header(struct seq_file *s) { seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" - " FUNC:FILE:LINE\n"); - seq_puts(s, "# | | | | | " - " |\n"); + " FUNC:FILE:LINE\n" + "# | | | | | " + " |\n"); } static struct trace_event_functions trace_branch_funcs = { @@ -233,12 +232,12 @@ extern unsigned long __stop_annotated_branch_profile[]; static int annotated_branch_stat_headers(struct seq_file *m) { - seq_printf(m, " correct incorrect %% "); - seq_printf(m, " Function " - " File Line\n" - " ------- --------- - " - " -------- " - " ---- ----\n"); + seq_puts(m, " correct incorrect % " + " Function " + " File Line\n" + " ------- --------- - " + " -------- " + " ---- ----\n"); return 0; } @@ -274,7 +273,7 @@ static int branch_stat_show(struct seq_file *m, void *v) seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); if (percent < 0) - seq_printf(m, " X "); + seq_puts(m, " X "); else seq_printf(m, "%3ld ", percent); seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); @@ -362,12 +361,12 @@ extern unsigned long __stop_branch_profile[]; static int all_branch_stat_headers(struct seq_file *m) { - seq_printf(m, " miss hit %% "); - seq_printf(m, " Function " - " File Line\n" - " ------- --------- - " - " -------- " - " ---- ----\n"); + seq_puts(m, " miss hit % " + " Function " + " File Line\n" + " ------- --------- - " + " -------- " + " ---- ----\n"); return 0; } diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 1b0df1e504f0..139716bcef7a 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -918,7 +918,7 @@ static int f_show(struct seq_file *m, void *v) case FORMAT_HEADER: seq_printf(m, "name: %s\n", ftrace_event_name(call)); seq_printf(m, "ID: %d\n", call->event.type); - seq_printf(m, "format:\n"); + seq_puts(m, "format:\n"); return 0; case FORMAT_FIELD_SEPERATOR: @@ -1988,7 +1988,7 @@ event_enable_print(struct seq_file *m, unsigned long ip, ftrace_event_name(data->file->event_call)); if (data->count == -1) - seq_printf(m, ":unlimited\n"); + seq_puts(m, ":unlimited\n"); else seq_printf(m, ":count=%ld\n", data->count); diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 7a8c1528e141..ced69da0ff55 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -45,6 +45,7 @@ enum filter_op_ids OP_GT, OP_GE, OP_BAND, + OP_NOT, OP_NONE, OP_OPEN_PAREN, }; @@ -67,6 +68,7 @@ static struct filter_op filter_ops[] = { { OP_GT, ">", 5 }, { OP_GE, ">=", 5 }, { OP_BAND, "&", 6 }, + { OP_NOT, "!", 6 }, { OP_NONE, "OP_NONE", 0 }, { OP_OPEN_PAREN, "(", 0 }, }; @@ -85,6 +87,7 @@ enum { FILT_ERR_MISSING_FIELD, FILT_ERR_INVALID_FILTER, FILT_ERR_IP_FIELD_ONLY, + FILT_ERR_ILLEGAL_NOT_OP, }; static char *err_text[] = { @@ -101,6 +104,7 @@ static char *err_text[] = { "Missing field name and/or value", "Meaningless filter expression", "Only 'ip' field is supported for function trace", + "Illegal use of '!'", }; struct opstack_op { @@ -139,6 +143,7 @@ struct pred_stack { int index; }; +/* If not of not match is equal to not of not, then it is a match */ #define DEFINE_COMPARISON_PRED(type) \ static int filter_pred_##type(struct filter_pred *pred, void *event) \ { \ @@ -166,7 +171,7 @@ static int filter_pred_##type(struct filter_pred *pred, void *event) \ break; \ } \ \ - return match; \ + return !!match == !pred->not; \ } #define DEFINE_EQUALITY_PRED(size) \ @@ -484,9 +489,10 @@ static int process_ops(struct filter_pred *preds, if (!WARN_ON_ONCE(!pred->fn)) match = pred->fn(pred, rec); if (!!match == type) - return match; + break; } - return match; + /* If not of not match is equal to not of not, then it is a match */ + return !!match == !op->not; } struct filter_match_preds_data { @@ -735,10 +741,10 @@ static int filter_set_pred(struct event_filter *filter, * then this op can be folded. */ if (left->index & FILTER_PRED_FOLD && - (left->op == dest->op || + ((left->op == dest->op && !left->not) || left->left == FILTER_PRED_INVALID) && right->index & FILTER_PRED_FOLD && - (right->op == dest->op || + ((right->op == dest->op && !right->not) || right->left == FILTER_PRED_INVALID)) dest->index |= FILTER_PRED_FOLD; @@ -1028,7 +1034,7 @@ static int init_pred(struct filter_parse_state *ps, } if (pred->op == OP_NE) - pred->not = 1; + pred->not ^= 1; pred->fn = fn; return 0; @@ -1590,6 +1596,17 @@ static int replace_preds(struct ftrace_event_call *call, continue; } + if (elt->op == OP_NOT) { + if (!n_preds || operand1 || operand2) { + parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0); + err = -EINVAL; + goto fail; + } + if (!dry_run) + filter->preds[n_preds - 1].not ^= 1; + continue; + } + if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) { parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); err = -ENOSPC; diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 4747b476a030..8712df9decb4 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -373,7 +373,7 @@ event_trigger_print(const char *name, struct seq_file *m, { long count = (long)data; - seq_printf(m, "%s", name); + seq_puts(m, name); if (count == -1) seq_puts(m, ":unlimited"); @@ -383,7 +383,7 @@ event_trigger_print(const char *name, struct seq_file *m, if (filter_str) seq_printf(m, " if %s\n", filter_str); else - seq_puts(m, "\n"); + seq_putc(m, '\n'); return 0; } @@ -1105,7 +1105,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, if (data->filter_str) seq_printf(m, " if %s\n", data->filter_str); else - seq_puts(m, "\n"); + seq_putc(m, '\n'); return 0; } diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 57f0ec962d2c..fcd41a166405 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -261,37 +261,74 @@ static struct tracer function_trace __tracer_data = }; #ifdef CONFIG_DYNAMIC_FTRACE -static int update_count(void **data) +static void update_traceon_count(void **data, bool on) { - unsigned long *count = (long *)data; + long *count = (long *)data; + long old_count = *count; - if (!*count) - return 0; + /* + * Tracing gets disabled (or enabled) once per count. + * This function can be called at the same time on multiple CPUs. + * It is fine if both disable (or enable) tracing, as disabling + * (or enabling) the second time doesn't do anything as the + * state of the tracer is already disabled (or enabled). + * What needs to be synchronized in this case is that the count + * only gets decremented once, even if the tracer is disabled + * (or enabled) twice, as the second one is really a nop. + * + * The memory barriers guarantee that we only decrement the + * counter once. First the count is read to a local variable + * and a read barrier is used to make sure that it is loaded + * before checking if the tracer is in the state we want. + * If the tracer is not in the state we want, then the count + * is guaranteed to be the old count. + * + * Next the tracer is set to the state we want (disabled or enabled) + * then a write memory barrier is used to make sure that + * the new state is visible before changing the counter by + * one minus the old counter. This guarantees that another CPU + * executing this code will see the new state before seeing + * the new counter value, and would not do anything if the new + * counter is seen. + * + * Note, there is no synchronization between this and a user + * setting the tracing_on file. But we currently don't care + * about that. + */ + if (!old_count) + return; - if (*count != -1) - (*count)--; + /* Make sure we see count before checking tracing state */ + smp_rmb(); - return 1; + if (on == !!tracing_is_on()) + return; + + if (on) + tracing_on(); + else + tracing_off(); + + /* unlimited? */ + if (old_count == -1) + return; + + /* Make sure tracing state is visible before updating count */ + smp_wmb(); + + *count = old_count - 1; } static void ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) { - if (tracing_is_on()) - return; - - if (update_count(data)) - tracing_on(); + update_traceon_count(data, 1); } static void ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) { - if (!tracing_is_on()) - return; - - if (update_count(data)) - tracing_off(); + update_traceon_count(data, 0); } static void @@ -330,11 +367,49 @@ ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data) static void ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) { - if (!tracing_is_on()) - return; + long *count = (long *)data; + long old_count; + long new_count; - if (update_count(data)) - trace_dump_stack(STACK_SKIP); + /* + * Stack traces should only execute the number of times the + * user specified in the counter. + */ + do { + + if (!tracing_is_on()) + return; + + old_count = *count; + + if (!old_count) + return; + + /* unlimited? */ + if (old_count == -1) { + trace_dump_stack(STACK_SKIP); + return; + } + + new_count = old_count - 1; + new_count = cmpxchg(count, old_count, new_count); + if (new_count == old_count) + trace_dump_stack(STACK_SKIP); + + } while (new_count != old_count); +} + +static int update_count(void **data) +{ + unsigned long *count = (long *)data; + + if (!*count) + return 0; + + if (*count != -1) + (*count)--; + + return 1; } static void @@ -361,7 +436,7 @@ ftrace_probe_print(const char *name, struct seq_file *m, seq_printf(m, "%ps:%s", (void *)ip, name); if (count == -1) - seq_printf(m, ":unlimited\n"); + seq_puts(m, ":unlimited\n"); else seq_printf(m, ":count=%ld\n", count); diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index f0a0c982cde3..6c2ab955018c 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -107,7 +107,7 @@ enum { FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, }; -static enum print_line_t +static void print_graph_duration(unsigned long long duration, struct trace_seq *s, u32 flags); @@ -483,33 +483,24 @@ static int graph_trace_update_thresh(struct trace_array *tr) static int max_bytes_for_cpu; -static enum print_line_t -print_graph_cpu(struct trace_seq *s, int cpu) +static void print_graph_cpu(struct trace_seq *s, int cpu) { - int ret; - /* * Start with a space character - to make it stand out * to the right a bit when trace output is pasted into * email: */ - ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - return TRACE_TYPE_HANDLED; + trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); } #define TRACE_GRAPH_PROCINFO_LENGTH 14 -static enum print_line_t -print_graph_proc(struct trace_seq *s, pid_t pid) +static void print_graph_proc(struct trace_seq *s, pid_t pid) { char comm[TASK_COMM_LEN]; /* sign + log10(MAX_INT) + '\0' */ char pid_str[11]; int spaces = 0; - int ret; int len; int i; @@ -524,56 +515,43 @@ print_graph_proc(struct trace_seq *s, pid_t pid) spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; /* First spaces to align center */ - for (i = 0; i < spaces / 2; i++) { - ret = trace_seq_putc(s, ' '); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + for (i = 0; i < spaces / 2; i++) + trace_seq_putc(s, ' '); - ret = trace_seq_printf(s, "%s-%s", comm, pid_str); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_printf(s, "%s-%s", comm, pid_str); /* Last spaces to align center */ - for (i = 0; i < spaces - (spaces / 2); i++) { - ret = trace_seq_putc(s, ' '); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } - return TRACE_TYPE_HANDLED; + for (i = 0; i < spaces - (spaces / 2); i++) + trace_seq_putc(s, ' '); } -static enum print_line_t -print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) +static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) { - if (!trace_seq_putc(s, ' ')) - return 0; - - return trace_print_lat_fmt(s, entry); + trace_seq_putc(s, ' '); + trace_print_lat_fmt(s, entry); } /* If the pid changed since the last trace, output this event */ -static enum print_line_t +static void verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) { pid_t prev_pid; pid_t *last_pid; - int ret; if (!data) - return TRACE_TYPE_HANDLED; + return; last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); if (*last_pid == pid) - return TRACE_TYPE_HANDLED; + return; prev_pid = *last_pid; *last_pid = pid; if (prev_pid == -1) - return TRACE_TYPE_HANDLED; + return; /* * Context-switch trace line: @@ -582,33 +560,12 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) ------------------------------------------ */ - ret = trace_seq_puts(s, - " ------------------------------------------\n"); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - ret = print_graph_cpu(s, cpu); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - - ret = print_graph_proc(s, prev_pid); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - - ret = trace_seq_puts(s, " => "); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - ret = print_graph_proc(s, pid); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - - ret = trace_seq_puts(s, - "\n ------------------------------------------\n\n"); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - return TRACE_TYPE_HANDLED; + trace_seq_puts(s, " ------------------------------------------\n"); + print_graph_cpu(s, cpu); + print_graph_proc(s, prev_pid); + trace_seq_puts(s, " => "); + print_graph_proc(s, pid); + trace_seq_puts(s, "\n ------------------------------------------\n\n"); } static struct ftrace_graph_ret_entry * @@ -682,175 +639,122 @@ get_return_for_leaf(struct trace_iterator *iter, return next; } -static int print_graph_abs_time(u64 t, struct trace_seq *s) +static void print_graph_abs_time(u64 t, struct trace_seq *s) { unsigned long usecs_rem; usecs_rem = do_div(t, NSEC_PER_SEC); usecs_rem /= 1000; - return trace_seq_printf(s, "%5lu.%06lu | ", - (unsigned long)t, usecs_rem); + trace_seq_printf(s, "%5lu.%06lu | ", + (unsigned long)t, usecs_rem); } -static enum print_line_t +static void print_graph_irq(struct trace_iterator *iter, unsigned long addr, enum trace_type type, int cpu, pid_t pid, u32 flags) { - int ret; struct trace_seq *s = &iter->seq; + struct trace_entry *ent = iter->ent; if (addr < (unsigned long)__irqentry_text_start || addr >= (unsigned long)__irqentry_text_end) - return TRACE_TYPE_UNHANDLED; + return; if (trace_flags & TRACE_ITER_CONTEXT_INFO) { /* Absolute time */ - if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { - ret = print_graph_abs_time(iter->ts, s); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + if (flags & TRACE_GRAPH_PRINT_ABS_TIME) + print_graph_abs_time(iter->ts, s); /* Cpu */ - if (flags & TRACE_GRAPH_PRINT_CPU) { - ret = print_graph_cpu(s, cpu); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - } + if (flags & TRACE_GRAPH_PRINT_CPU) + print_graph_cpu(s, cpu); /* Proc */ if (flags & TRACE_GRAPH_PRINT_PROC) { - ret = print_graph_proc(s, pid); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - ret = trace_seq_puts(s, " | "); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + print_graph_proc(s, pid); + trace_seq_puts(s, " | "); } + + /* Latency format */ + if (trace_flags & TRACE_ITER_LATENCY_FMT) + print_graph_lat_fmt(s, ent); } /* No overhead */ - ret = print_graph_duration(0, s, flags | FLAGS_FILL_START); - if (ret != TRACE_TYPE_HANDLED) - return ret; + print_graph_duration(0, s, flags | FLAGS_FILL_START); if (type == TRACE_GRAPH_ENT) - ret = trace_seq_puts(s, "==========>"); + trace_seq_puts(s, "==========>"); else - ret = trace_seq_puts(s, "<=========="); - - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_puts(s, "<=========="); - ret = print_graph_duration(0, s, flags | FLAGS_FILL_END); - if (ret != TRACE_TYPE_HANDLED) - return ret; - - ret = trace_seq_putc(s, '\n'); - - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - return TRACE_TYPE_HANDLED; + print_graph_duration(0, s, flags | FLAGS_FILL_END); + trace_seq_putc(s, '\n'); } -enum print_line_t +void trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) { unsigned long nsecs_rem = do_div(duration, 1000); /* log10(ULONG_MAX) + '\0' */ - char msecs_str[21]; + char usecs_str[21]; char nsecs_str[5]; - int ret, len; + int len; int i; - sprintf(msecs_str, "%lu", (unsigned long) duration); + sprintf(usecs_str, "%lu", (unsigned long) duration); /* Print msecs */ - ret = trace_seq_printf(s, "%s", msecs_str); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_printf(s, "%s", usecs_str); - len = strlen(msecs_str); + len = strlen(usecs_str); /* Print nsecs (we don't want to exceed 7 numbers) */ if (len < 7) { size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); snprintf(nsecs_str, slen, "%03lu", nsecs_rem); - ret = trace_seq_printf(s, ".%s", nsecs_str); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_printf(s, ".%s", nsecs_str); len += strlen(nsecs_str); } - ret = trace_seq_puts(s, " us "); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_puts(s, " us "); /* Print remaining spaces to fit the row's width */ - for (i = len; i < 7; i++) { - ret = trace_seq_putc(s, ' '); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } - return TRACE_TYPE_HANDLED; + for (i = len; i < 7; i++) + trace_seq_putc(s, ' '); } -static enum print_line_t +static void print_graph_duration(unsigned long long duration, struct trace_seq *s, u32 flags) { - int ret = -1; - if (!(flags & TRACE_GRAPH_PRINT_DURATION) || !(trace_flags & TRACE_ITER_CONTEXT_INFO)) - return TRACE_TYPE_HANDLED; + return; /* No real adata, just filling the column with spaces */ switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { case FLAGS_FILL_FULL: - ret = trace_seq_puts(s, " | "); - return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; + trace_seq_puts(s, " | "); + return; case FLAGS_FILL_START: - ret = trace_seq_puts(s, " "); - return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; + trace_seq_puts(s, " "); + return; case FLAGS_FILL_END: - ret = trace_seq_puts(s, " |"); - return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; + trace_seq_puts(s, " |"); + return; } /* Signal a overhead of time execution to the output */ - if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { - /* Duration exceeded 100 msecs */ - if (duration > 100000ULL) - ret = trace_seq_puts(s, "! "); - /* Duration exceeded 10 msecs */ - else if (duration > 10000ULL) - ret = trace_seq_puts(s, "+ "); - } - - /* - * The -1 means we either did not exceed the duration tresholds - * or we dont want to print out the overhead. Either way we need - * to fill out the space. - */ - if (ret == -1) - ret = trace_seq_puts(s, " "); - - /* Catching here any failure happenned above */ - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - ret = trace_print_graph_duration(duration, s); - if (ret != TRACE_TYPE_HANDLED) - return ret; - - ret = trace_seq_puts(s, "| "); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + if (flags & TRACE_GRAPH_PRINT_OVERHEAD) + trace_seq_printf(s, "%c ", trace_find_mark(duration)); + else + trace_seq_puts(s, " "); - return TRACE_TYPE_HANDLED; + trace_print_graph_duration(duration, s); + trace_seq_puts(s, "| "); } /* Case of a leaf function on its call entry */ @@ -864,7 +768,6 @@ print_graph_entry_leaf(struct trace_iterator *iter, struct ftrace_graph_ret *graph_ret; struct ftrace_graph_ent *call; unsigned long long duration; - int ret; int i; graph_ret = &ret_entry->ret; @@ -890,22 +793,15 @@ print_graph_entry_leaf(struct trace_iterator *iter, } /* Overhead and duration */ - ret = print_graph_duration(duration, s, flags); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; + print_graph_duration(duration, s, flags); /* Function */ - for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { - ret = trace_seq_putc(s, ' '); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) + trace_seq_putc(s, ' '); - ret = trace_seq_printf(s, "%ps();\n", (void *)call->func); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_printf(s, "%ps();\n", (void *)call->func); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } static enum print_line_t @@ -915,7 +811,6 @@ print_graph_entry_nested(struct trace_iterator *iter, { struct ftrace_graph_ent *call = &entry->graph_ent; struct fgraph_data *data = iter->private; - int ret; int i; if (data) { @@ -931,19 +826,15 @@ print_graph_entry_nested(struct trace_iterator *iter, } /* No time */ - ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL); - if (ret != TRACE_TYPE_HANDLED) - return ret; + print_graph_duration(0, s, flags | FLAGS_FILL_FULL); /* Function */ - for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { - ret = trace_seq_putc(s, ' '); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) + trace_seq_putc(s, ' '); - ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func); - if (!ret) + trace_seq_printf(s, "%ps() {\n", (void *)call->func); + + if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; /* @@ -953,62 +844,43 @@ print_graph_entry_nested(struct trace_iterator *iter, return TRACE_TYPE_NO_CONSUME; } -static enum print_line_t +static void print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, int type, unsigned long addr, u32 flags) { struct fgraph_data *data = iter->private; struct trace_entry *ent = iter->ent; int cpu = iter->cpu; - int ret; /* Pid */ - if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; + verif_pid(s, ent->pid, cpu, data); - if (type) { + if (type) /* Interrupt */ - ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - } + print_graph_irq(iter, addr, type, cpu, ent->pid, flags); if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) - return 0; + return; /* Absolute time */ - if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { - ret = print_graph_abs_time(iter->ts, s); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + if (flags & TRACE_GRAPH_PRINT_ABS_TIME) + print_graph_abs_time(iter->ts, s); /* Cpu */ - if (flags & TRACE_GRAPH_PRINT_CPU) { - ret = print_graph_cpu(s, cpu); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - } + if (flags & TRACE_GRAPH_PRINT_CPU) + print_graph_cpu(s, cpu); /* Proc */ if (flags & TRACE_GRAPH_PRINT_PROC) { - ret = print_graph_proc(s, ent->pid); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - - ret = trace_seq_puts(s, " | "); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + print_graph_proc(s, ent->pid); + trace_seq_puts(s, " | "); } /* Latency format */ - if (trace_flags & TRACE_ITER_LATENCY_FMT) { - ret = print_graph_lat_fmt(s, ent); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - } + if (trace_flags & TRACE_ITER_LATENCY_FMT) + print_graph_lat_fmt(s, ent); - return 0; + return; } /* @@ -1126,8 +998,7 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, if (check_irq_entry(iter, flags, call->func, call->depth)) return TRACE_TYPE_HANDLED; - if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) - return TRACE_TYPE_PARTIAL_LINE; + print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); leaf_ret = get_return_for_leaf(iter, field); if (leaf_ret) @@ -1160,7 +1031,6 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, pid_t pid = ent->pid; int cpu = iter->cpu; int func_match = 1; - int ret; int i; if (check_irq_return(iter, flags, trace->depth)) @@ -1186,20 +1056,14 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, } } - if (print_graph_prologue(iter, s, 0, 0, flags)) - return TRACE_TYPE_PARTIAL_LINE; + print_graph_prologue(iter, s, 0, 0, flags); /* Overhead and duration */ - ret = print_graph_duration(duration, s, flags); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; + print_graph_duration(duration, s, flags); /* Closing brace */ - for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { - ret = trace_seq_putc(s, ' '); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) + trace_seq_putc(s, ' '); /* * If the return function does not have a matching entry, @@ -1208,30 +1072,20 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, * belongs to, write out the function name. Always do * that if the funcgraph-tail option is enabled. */ - if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) { - ret = trace_seq_puts(s, "}\n"); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } else { - ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) + trace_seq_puts(s, "}\n"); + else + trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); /* Overrun */ - if (flags & TRACE_GRAPH_PRINT_OVERRUN) { - ret = trace_seq_printf(s, " (Overruns: %lu)\n", - trace->overrun); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + if (flags & TRACE_GRAPH_PRINT_OVERRUN) + trace_seq_printf(s, " (Overruns: %lu)\n", + trace->overrun); - ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, - cpu, pid, flags); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; + print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, + cpu, pid, flags); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } static enum print_line_t @@ -1248,26 +1102,18 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, if (data) depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; - if (print_graph_prologue(iter, s, 0, 0, flags)) - return TRACE_TYPE_PARTIAL_LINE; + print_graph_prologue(iter, s, 0, 0, flags); /* No time */ - ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL); - if (ret != TRACE_TYPE_HANDLED) - return ret; + print_graph_duration(0, s, flags | FLAGS_FILL_FULL); /* Indentation */ if (depth > 0) - for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { - ret = trace_seq_putc(s, ' '); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) + trace_seq_putc(s, ' '); /* The comment */ - ret = trace_seq_puts(s, "/* "); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_puts(s, "/* "); switch (iter->ent->type) { case TRACE_BPRINT: @@ -1296,11 +1142,9 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, s->len--; } - ret = trace_seq_puts(s, " */\n"); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_puts(s, " */\n"); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } @@ -1407,32 +1251,32 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags) print_lat_header(s, flags); /* 1st line */ - seq_printf(s, "#"); + seq_putc(s, '#'); if (flags & TRACE_GRAPH_PRINT_ABS_TIME) - seq_printf(s, " TIME "); + seq_puts(s, " TIME "); if (flags & TRACE_GRAPH_PRINT_CPU) - seq_printf(s, " CPU"); + seq_puts(s, " CPU"); if (flags & TRACE_GRAPH_PRINT_PROC) - seq_printf(s, " TASK/PID "); + seq_puts(s, " TASK/PID "); if (lat) - seq_printf(s, "||||"); + seq_puts(s, "||||"); if (flags & TRACE_GRAPH_PRINT_DURATION) - seq_printf(s, " DURATION "); - seq_printf(s, " FUNCTION CALLS\n"); + seq_puts(s, " DURATION "); + seq_puts(s, " FUNCTION CALLS\n"); /* 2nd line */ - seq_printf(s, "#"); + seq_putc(s, '#'); if (flags & TRACE_GRAPH_PRINT_ABS_TIME) - seq_printf(s, " | "); + seq_puts(s, " | "); if (flags & TRACE_GRAPH_PRINT_CPU) - seq_printf(s, " | "); + seq_puts(s, " | "); if (flags & TRACE_GRAPH_PRINT_PROC) - seq_printf(s, " | | "); + seq_puts(s, " | | "); if (lat) - seq_printf(s, "||||"); + seq_puts(s, "||||"); if (flags & TRACE_GRAPH_PRINT_DURATION) - seq_printf(s, " | | "); - seq_printf(s, " | | | |\n"); + seq_puts(s, " | | "); + seq_puts(s, " | | | |\n"); } static void print_graph_headers(struct seq_file *s) diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index bd90e1b06088..b0b1c44e923a 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c @@ -20,10 +20,12 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) { /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; + static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS]; unsigned int old_userobj; int cnt = 0, cpu; trace_init_global_iter(&iter); + iter.buffer_iter = buffer_iter; for_each_tracing_cpu(cpu) { atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); @@ -57,19 +59,19 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ring_buffer_read_start(iter.buffer_iter[cpu_file]); tracing_iter_reset(&iter, cpu_file); } - if (!trace_empty(&iter)) - trace_find_next_entry_inc(&iter); - while (!trace_empty(&iter)) { + + while (trace_find_next_entry_inc(&iter)) { if (!cnt) kdb_printf("---------------------------------\n"); cnt++; - if (trace_find_next_entry_inc(&iter) != NULL && !skip_lines) + if (!skip_lines) { print_trace_line(&iter); - if (!skip_lines) trace_printk_seq(&iter.seq); - else + } else { skip_lines--; + } + if (KDB_FLAG(CMD_INTERRUPT)) goto out; } @@ -86,9 +88,12 @@ out: atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); } - for_each_tracing_cpu(cpu) - if (iter.buffer_iter[cpu]) + for_each_tracing_cpu(cpu) { + if (iter.buffer_iter[cpu]) { ring_buffer_read_finish(iter.buffer_iter[cpu]); + iter.buffer_iter[cpu] = NULL; + } + } } /* diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 282f6e4e5539..5edb518be345 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -826,7 +826,7 @@ static int probes_seq_show(struct seq_file *m, void *v) struct trace_kprobe *tk = v; int i; - seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); + seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); seq_printf(m, ":%s/%s", tk->tp.call.class->system, ftrace_event_name(&tk->tp.call)); @@ -840,7 +840,7 @@ static int probes_seq_show(struct seq_file *m, void *v) for (i = 0; i < tk->tp.nr_args; i++) seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); - seq_printf(m, "\n"); + seq_putc(m, '\n'); return 0; } @@ -1024,27 +1024,22 @@ print_kprobe_event(struct trace_iterator *iter, int flags, field = (struct kprobe_trace_entry_head *)iter->ent; tp = container_of(event, struct trace_probe, call.event); - if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) - goto partial; + trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)); if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) - goto partial; + goto out; - if (!trace_seq_puts(s, ")")) - goto partial; + trace_seq_putc(s, ')'); data = (u8 *)&field[1]; for (i = 0; i < tp->nr_args; i++) if (!tp->args[i].type->print(s, tp->args[i].name, data + tp->args[i].offset, field)) - goto partial; - - if (!trace_seq_puts(s, "\n")) - goto partial; + goto out; - return TRACE_TYPE_HANDLED; -partial: - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_putc(s, '\n'); + out: + return trace_handle_return(s); } static enum print_line_t @@ -1060,33 +1055,28 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, field = (struct kretprobe_trace_entry_head *)iter->ent; tp = container_of(event, struct trace_probe, call.event); - if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) - goto partial; + trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)); if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) - goto partial; + goto out; - if (!trace_seq_puts(s, " <- ")) - goto partial; + trace_seq_puts(s, " <- "); if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) - goto partial; + goto out; - if (!trace_seq_puts(s, ")")) - goto partial; + trace_seq_putc(s, ')'); data = (u8 *)&field[1]; for (i = 0; i < tp->nr_args; i++) if (!tp->args[i].type->print(s, tp->args[i].name, data + tp->args[i].offset, field)) - goto partial; + goto out; - if (!trace_seq_puts(s, "\n")) - goto partial; + trace_seq_putc(s, '\n'); - return TRACE_TYPE_HANDLED; -partial: - return TRACE_TYPE_PARTIAL_LINE; + out: + return trace_handle_return(s); } diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 0abd9b863474..7a9ba62e9fef 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -59,17 +59,15 @@ static void mmio_trace_start(struct trace_array *tr) mmio_reset_data(tr); } -static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) +static void mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) { - int ret = 0; int i; resource_size_t start, end; const struct pci_driver *drv = pci_dev_driver(dev); - /* XXX: incomplete checks for trace_seq_printf() return value */ - ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", - dev->bus->number, dev->devfn, - dev->vendor, dev->device, dev->irq); + trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", + dev->bus->number, dev->devfn, + dev->vendor, dev->device, dev->irq); /* * XXX: is pci_resource_to_user() appropriate, since we are * supposed to interpret the __ioremap() phys_addr argument based on @@ -77,21 +75,20 @@ static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) */ for (i = 0; i < 7; i++) { pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); - ret += trace_seq_printf(s, " %llx", + trace_seq_printf(s, " %llx", (unsigned long long)(start | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); } for (i = 0; i < 7; i++) { pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); - ret += trace_seq_printf(s, " %llx", + trace_seq_printf(s, " %llx", dev->resource[i].start < dev->resource[i].end ? (unsigned long long)(end - start) + 1 : 0); } if (drv) - ret += trace_seq_printf(s, " %s\n", drv->name); + trace_seq_printf(s, " %s\n", drv->name); else - ret += trace_seq_puts(s, " \n"); - return ret; + trace_seq_puts(s, " \n"); } static void destroy_header_iter(struct header_iter *hiter) @@ -179,28 +176,27 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned secs = (unsigned long)t; - int ret = 1; trace_assign_type(field, entry); rw = &field->rw; switch (rw->opcode) { case MMIO_READ: - ret = trace_seq_printf(s, + trace_seq_printf(s, "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", rw->width, secs, usec_rem, rw->map_id, (unsigned long long)rw->phys, rw->value, rw->pc, 0); break; case MMIO_WRITE: - ret = trace_seq_printf(s, + trace_seq_printf(s, "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", rw->width, secs, usec_rem, rw->map_id, (unsigned long long)rw->phys, rw->value, rw->pc, 0); break; case MMIO_UNKNOWN_OP: - ret = trace_seq_printf(s, + trace_seq_printf(s, "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx," "%02lx 0x%lx %d\n", secs, usec_rem, rw->map_id, @@ -209,12 +205,11 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) (rw->value >> 0) & 0xff, rw->pc, 0); break; default: - ret = trace_seq_puts(s, "rw what?\n"); + trace_seq_puts(s, "rw what?\n"); break; } - if (ret) - return TRACE_TYPE_HANDLED; - return TRACE_TYPE_PARTIAL_LINE; + + return trace_handle_return(s); } static enum print_line_t mmio_print_map(struct trace_iterator *iter) @@ -226,31 +221,29 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter) unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned secs = (unsigned long)t; - int ret; trace_assign_type(field, entry); m = &field->map; switch (m->opcode) { case MMIO_PROBE: - ret = trace_seq_printf(s, + trace_seq_printf(s, "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", secs, usec_rem, m->map_id, (unsigned long long)m->phys, m->virt, m->len, 0UL, 0); break; case MMIO_UNPROBE: - ret = trace_seq_printf(s, + trace_seq_printf(s, "UNMAP %u.%06lu %d 0x%lx %d\n", secs, usec_rem, m->map_id, 0UL, 0); break; default: - ret = trace_seq_puts(s, "map what?\n"); + trace_seq_puts(s, "map what?\n"); break; } - if (ret) - return TRACE_TYPE_HANDLED; - return TRACE_TYPE_PARTIAL_LINE; + + return trace_handle_return(s); } static enum print_line_t mmio_print_mark(struct trace_iterator *iter) @@ -262,14 +255,11 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter) unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned secs = (unsigned long)t; - int ret; /* The trailing newline must be in the message. */ - ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } static enum print_line_t mmio_print_line(struct trace_iterator *iter) diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index c6977d5a9b12..b77b9a697619 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -25,15 +25,12 @@ enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) struct trace_seq *s = &iter->seq; struct trace_entry *entry = iter->ent; struct bputs_entry *field; - int ret; trace_assign_type(field, entry); - ret = trace_seq_puts(s, field->str); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_puts(s, field->str); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) @@ -41,15 +38,12 @@ enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) struct trace_seq *s = &iter->seq; struct trace_entry *entry = iter->ent; struct bprint_entry *field; - int ret; trace_assign_type(field, entry); - ret = trace_seq_bprintf(s, field->fmt, field->buf); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_bprintf(s, field->fmt, field->buf); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) @@ -57,15 +51,12 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) struct trace_seq *s = &iter->seq; struct trace_entry *entry = iter->ent; struct print_entry *field; - int ret; trace_assign_type(field, entry); - ret = trace_seq_puts(s, field->buf); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_puts(s, field->buf); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } const char * @@ -124,7 +115,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, if (ret == (const char *)(trace_seq_buffer_ptr(p))) trace_seq_printf(p, "0x%lx", val); - + trace_seq_putc(p, 0); return ret; @@ -193,7 +184,6 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, struct trace_seq *s = &iter->seq; struct trace_seq *p = &iter->tmp_seq; struct trace_entry *entry; - int ret; event = container_of(trace_event, struct ftrace_event_call, event); entry = iter->ent; @@ -204,11 +194,9 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, } trace_seq_init(p); - ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event)); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_printf(s, "%s: ", ftrace_event_name(event)); - return 0; + return trace_handle_return(s); } EXPORT_SYMBOL(ftrace_raw_output_prep); @@ -216,18 +204,11 @@ static int ftrace_output_raw(struct trace_iterator *iter, char *name, char *fmt, va_list ap) { struct trace_seq *s = &iter->seq; - int ret; - - ret = trace_seq_printf(s, "%s: ", name); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - ret = trace_seq_vprintf(s, fmt, ap); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_printf(s, "%s: ", name); + trace_seq_vprintf(s, fmt, ap); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) @@ -260,7 +241,7 @@ static inline const char *kretprobed(const char *name) } #endif /* CONFIG_KRETPROBES */ -static int +static void seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) { #ifdef CONFIG_KALLSYMS @@ -271,12 +252,11 @@ seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) name = kretprobed(str); - return trace_seq_printf(s, fmt, name); + trace_seq_printf(s, fmt, name); #endif - return 1; } -static int +static void seq_print_sym_offset(struct trace_seq *s, const char *fmt, unsigned long address) { @@ -287,9 +267,8 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, sprint_symbol(str, address); name = kretprobed(str); - return trace_seq_printf(s, fmt, name); + trace_seq_printf(s, fmt, name); #endif - return 1; } #ifndef CONFIG_64BIT @@ -320,14 +299,14 @@ int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, if (file) { ret = trace_seq_path(s, &file->f_path); if (ret) - ret = trace_seq_printf(s, "[+0x%lx]", - ip - vmstart); + trace_seq_printf(s, "[+0x%lx]", + ip - vmstart); } up_read(&mm->mmap_sem); } if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) - ret = trace_seq_printf(s, " <" IP_FMT ">", ip); - return ret; + trace_seq_printf(s, " <" IP_FMT ">", ip); + return !trace_seq_has_overflowed(s); } int @@ -335,7 +314,6 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, unsigned long sym_flags) { struct mm_struct *mm = NULL; - int ret = 1; unsigned int i; if (trace_flags & TRACE_ITER_SYM_USEROBJ) { @@ -354,48 +332,45 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { unsigned long ip = entry->caller[i]; - if (ip == ULONG_MAX || !ret) + if (ip == ULONG_MAX || trace_seq_has_overflowed(s)) break; - if (ret) - ret = trace_seq_puts(s, " => "); + + trace_seq_puts(s, " => "); + if (!ip) { - if (ret) - ret = trace_seq_puts(s, "??"); - if (ret) - ret = trace_seq_putc(s, '\n'); + trace_seq_puts(s, "??"); + trace_seq_putc(s, '\n'); continue; } - if (!ret) - break; - if (ret) - ret = seq_print_user_ip(s, mm, ip, sym_flags); - ret = trace_seq_putc(s, '\n'); + + seq_print_user_ip(s, mm, ip, sym_flags); + trace_seq_putc(s, '\n'); } if (mm) mmput(mm); - return ret; + + return !trace_seq_has_overflowed(s); } int seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) { - int ret; - - if (!ip) - return trace_seq_putc(s, '0'); + if (!ip) { + trace_seq_putc(s, '0'); + goto out; + } if (sym_flags & TRACE_ITER_SYM_OFFSET) - ret = seq_print_sym_offset(s, "%s", ip); + seq_print_sym_offset(s, "%s", ip); else - ret = seq_print_sym_short(s, "%s", ip); - - if (!ret) - return 0; + seq_print_sym_short(s, "%s", ip); if (sym_flags & TRACE_ITER_SYM_ADDR) - ret = trace_seq_printf(s, " <" IP_FMT ">", ip); - return ret; + trace_seq_printf(s, " <" IP_FMT ">", ip); + + out: + return !trace_seq_has_overflowed(s); } /** @@ -413,7 +388,6 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) char irqs_off; int hardirq; int softirq; - int ret; hardirq = entry->flags & TRACE_FLAG_HARDIRQ; softirq = entry->flags & TRACE_FLAG_SOFTIRQ; @@ -445,16 +419,15 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) softirq ? 's' : '.'; - if (!trace_seq_printf(s, "%c%c%c", - irqs_off, need_resched, hardsoft_irq)) - return 0; + trace_seq_printf(s, "%c%c%c", + irqs_off, need_resched, hardsoft_irq); if (entry->preempt_count) - ret = trace_seq_printf(s, "%x", entry->preempt_count); + trace_seq_printf(s, "%x", entry->preempt_count); else - ret = trace_seq_putc(s, '.'); + trace_seq_putc(s, '.'); - return ret; + return !trace_seq_has_overflowed(s); } static int @@ -464,14 +437,38 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) trace_find_cmdline(entry->pid, comm); - if (!trace_seq_printf(s, "%8.8s-%-5d %3d", - comm, entry->pid, cpu)) - return 0; + trace_seq_printf(s, "%8.8s-%-5d %3d", + comm, entry->pid, cpu); return trace_print_lat_fmt(s, entry); } -static unsigned long preempt_mark_thresh_us = 100; +#undef MARK +#define MARK(v, s) {.val = v, .sym = s} +/* trace overhead mark */ +static const struct trace_mark { + unsigned long long val; /* unit: nsec */ + char sym; +} mark[] = { + MARK(1000000000ULL , '$'), /* 1 sec */ + MARK(1000000ULL , '#'), /* 1000 usecs */ + MARK(100000ULL , '!'), /* 100 usecs */ + MARK(10000ULL , '+'), /* 10 usecs */ +}; +#undef MARK + +char trace_find_mark(unsigned long long d) +{ + int i; + int size = ARRAY_SIZE(mark); + + for (i = 0; i < size; i++) { + if (d >= mark[i].val) + break; + } + + return (i == size) ? ' ' : mark[i].sym; +} static int lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) @@ -493,24 +490,28 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC); unsigned long rel_msec = (unsigned long)rel_ts; - return trace_seq_printf( - s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ", - ns2usecs(iter->ts), - abs_msec, abs_usec, - rel_msec, rel_usec); + trace_seq_printf( + s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ", + ns2usecs(iter->ts), + abs_msec, abs_usec, + rel_msec, rel_usec); + } else if (verbose && !in_ns) { - return trace_seq_printf( - s, "[%016llx] %lld (+%lld): ", - iter->ts, abs_ts, rel_ts); + trace_seq_printf( + s, "[%016llx] %lld (+%lld): ", + iter->ts, abs_ts, rel_ts); + } else if (!verbose && in_ns) { - return trace_seq_printf( - s, " %4lldus%c: ", - abs_ts, - rel_ts > preempt_mark_thresh_us ? '!' : - rel_ts > 1 ? '+' : ' '); + trace_seq_printf( + s, " %4lldus%c: ", + abs_ts, + trace_find_mark(rel_ts * NSEC_PER_USEC)); + } else { /* !verbose && !in_ns */ - return trace_seq_printf(s, " %4lld: ", abs_ts); + trace_seq_printf(s, " %4lld: ", abs_ts); } + + return !trace_seq_has_overflowed(s); } int trace_print_context(struct trace_iterator *iter) @@ -520,34 +521,29 @@ int trace_print_context(struct trace_iterator *iter) unsigned long long t; unsigned long secs, usec_rem; char comm[TASK_COMM_LEN]; - int ret; trace_find_cmdline(entry->pid, comm); - ret = trace_seq_printf(s, "%16s-%-5d [%03d] ", + trace_seq_printf(s, "%16s-%-5d [%03d] ", comm, entry->pid, iter->cpu); - if (!ret) - return 0; - if (trace_flags & TRACE_ITER_IRQ_INFO) { - ret = trace_print_lat_fmt(s, entry); - if (!ret) - return 0; - } + if (trace_flags & TRACE_ITER_IRQ_INFO) + trace_print_lat_fmt(s, entry); if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { t = ns2usecs(iter->ts); usec_rem = do_div(t, USEC_PER_SEC); secs = (unsigned long)t; - return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); + trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); } else - return trace_seq_printf(s, " %12llu: ", iter->ts); + trace_seq_printf(s, " %12llu: ", iter->ts); + + return !trace_seq_has_overflowed(s); } int trace_print_lat_context(struct trace_iterator *iter) { u64 next_ts; - int ret; /* trace_find_next_entry will reset ent_size */ int ent_size = iter->ent_size; struct trace_seq *s = &iter->seq; @@ -567,18 +563,17 @@ int trace_print_lat_context(struct trace_iterator *iter) trace_find_cmdline(entry->pid, comm); - ret = trace_seq_printf( - s, "%16s %5d %3d %d %08x %08lx ", - comm, entry->pid, iter->cpu, entry->flags, - entry->preempt_count, iter->idx); + trace_seq_printf( + s, "%16s %5d %3d %d %08x %08lx ", + comm, entry->pid, iter->cpu, entry->flags, + entry->preempt_count, iter->idx); } else { - ret = lat_print_generic(s, entry, iter->cpu); + lat_print_generic(s, entry, iter->cpu); } - if (ret) - ret = lat_print_timestamp(iter, next_ts); + lat_print_timestamp(iter, next_ts); - return ret; + return !trace_seq_has_overflowed(s); } static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; @@ -692,7 +687,7 @@ int register_ftrace_event(struct trace_event *event) goto out; } else { - + event->type = next_event_type++; list = &ftrace_event_list; } @@ -764,10 +759,9 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event); enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, struct trace_event *event) { - if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type); - return TRACE_TYPE_HANDLED; + return trace_handle_return(&iter->seq); } /* TRACE_FN */ @@ -779,24 +773,16 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, trace_assign_type(field, iter->ent); - if (!seq_print_ip_sym(s, field->ip, flags)) - goto partial; + seq_print_ip_sym(s, field->ip, flags); if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { - if (!trace_seq_puts(s, " <-")) - goto partial; - if (!seq_print_ip_sym(s, - field->parent_ip, - flags)) - goto partial; + trace_seq_puts(s, " <-"); + seq_print_ip_sym(s, field->parent_ip, flags); } - if (!trace_seq_putc(s, '\n')) - goto partial; - return TRACE_TYPE_HANDLED; + trace_seq_putc(s, '\n'); - partial: - return TRACE_TYPE_PARTIAL_LINE; + return trace_handle_return(s); } static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, @@ -806,12 +792,11 @@ static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, trace_assign_type(field, iter->ent); - if (!trace_seq_printf(&iter->seq, "%lx %lx\n", - field->ip, - field->parent_ip)) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_printf(&iter->seq, "%lx %lx\n", + field->ip, + field->parent_ip); - return TRACE_TYPE_HANDLED; + return trace_handle_return(&iter->seq); } static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, @@ -822,10 +807,10 @@ static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, trace_assign_type(field, iter->ent); - SEQ_PUT_HEX_FIELD_RET(s, field->ip); - SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); + SEQ_PUT_HEX_FIELD(s, field->ip); + SEQ_PUT_HEX_FIELD(s, field->parent_ip); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, @@ -836,10 +821,10 @@ static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, trace_assign_type(field, iter->ent); - SEQ_PUT_FIELD_RET(s, field->ip); - SEQ_PUT_FIELD_RET(s, field->parent_ip); + SEQ_PUT_FIELD(s, field->ip); + SEQ_PUT_FIELD(s, field->parent_ip); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } static struct trace_event_functions trace_fn_funcs = { @@ -868,18 +853,17 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, T = task_state_char(field->next_state); S = task_state_char(field->prev_state); trace_find_cmdline(field->next_pid, comm); - if (!trace_seq_printf(&iter->seq, - " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", - field->prev_pid, - field->prev_prio, - S, delim, - field->next_cpu, - field->next_pid, - field->next_prio, - T, comm)) - return TRACE_TYPE_PARTIAL_LINE; - - return TRACE_TYPE_HANDLED; + trace_seq_printf(&iter->seq, + " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", + field->prev_pid, + field->prev_prio, + S, delim, + field->next_cpu, + field->next_pid, + field->next_prio, + T, comm); + + return trace_handle_return(&iter->seq); } static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, @@ -904,17 +888,16 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S) if (!S) S = task_state_char(field->prev_state); T = task_state_char(field->next_state); - if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", - field->prev_pid, - field->prev_prio, - S, - field->next_cpu, - field->next_pid, - field->next_prio, - T)) - return TRACE_TYPE_PARTIAL_LINE; - - return TRACE_TYPE_HANDLED; + trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", + field->prev_pid, + field->prev_prio, + S, + field->next_cpu, + field->next_pid, + field->next_prio, + T); + + return trace_handle_return(&iter->seq); } static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, @@ -942,15 +925,15 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S) S = task_state_char(field->prev_state); T = task_state_char(field->next_state); - SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); - SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); - SEQ_PUT_HEX_FIELD_RET(s, S); - SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); - SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); - SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); - SEQ_PUT_HEX_FIELD_RET(s, T); + SEQ_PUT_HEX_FIELD(s, field->prev_pid); + SEQ_PUT_HEX_FIELD(s, field->prev_prio); + SEQ_PUT_HEX_FIELD(s, S); + SEQ_PUT_HEX_FIELD(s, field->next_cpu); + SEQ_PUT_HEX_FIELD(s, field->next_pid); + SEQ_PUT_HEX_FIELD(s, field->next_prio); + SEQ_PUT_HEX_FIELD(s, T); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, @@ -973,14 +956,15 @@ static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, trace_assign_type(field, iter->ent); - SEQ_PUT_FIELD_RET(s, field->prev_pid); - SEQ_PUT_FIELD_RET(s, field->prev_prio); - SEQ_PUT_FIELD_RET(s, field->prev_state); - SEQ_PUT_FIELD_RET(s, field->next_pid); - SEQ_PUT_FIELD_RET(s, field->next_prio); - SEQ_PUT_FIELD_RET(s, field->next_state); + SEQ_PUT_FIELD(s, field->prev_pid); + SEQ_PUT_FIELD(s, field->prev_prio); + SEQ_PUT_FIELD(s, field->prev_state); + SEQ_PUT_FIELD(s, field->next_cpu); + SEQ_PUT_FIELD(s, field->next_pid); + SEQ_PUT_FIELD(s, field->next_prio); + SEQ_PUT_FIELD(s, field->next_state); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } static struct trace_event_functions trace_ctx_funcs = { @@ -1020,23 +1004,19 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, trace_assign_type(field, iter->ent); end = (unsigned long *)((long)iter->ent + iter->ent_size); - if (!trace_seq_puts(s, "<stack trace>\n")) - goto partial; + trace_seq_puts(s, "<stack trace>\n"); for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { - if (!trace_seq_puts(s, " => ")) - goto partial; - if (!seq_print_ip_sym(s, *p, flags)) - goto partial; - if (!trace_seq_putc(s, '\n')) - goto partial; - } + if (trace_seq_has_overflowed(s)) + break; - return TRACE_TYPE_HANDLED; + trace_seq_puts(s, " => "); + seq_print_ip_sym(s, *p, flags); + trace_seq_putc(s, '\n'); + } - partial: - return TRACE_TYPE_PARTIAL_LINE; + return trace_handle_return(s); } static struct trace_event_functions trace_stack_funcs = { @@ -1057,16 +1037,10 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, trace_assign_type(field, iter->ent); - if (!trace_seq_puts(s, "<user stack trace>\n")) - goto partial; - - if (!seq_print_userip_objs(field, s, flags)) - goto partial; - - return TRACE_TYPE_HANDLED; + trace_seq_puts(s, "<user stack trace>\n"); + seq_print_userip_objs(field, s, flags); - partial: - return TRACE_TYPE_PARTIAL_LINE; + return trace_handle_return(s); } static struct trace_event_functions trace_user_stack_funcs = { @@ -1089,19 +1063,11 @@ trace_bputs_print(struct trace_iterator *iter, int flags, trace_assign_type(field, entry); - if (!seq_print_ip_sym(s, field->ip, flags)) - goto partial; + seq_print_ip_sym(s, field->ip, flags); + trace_seq_puts(s, ": "); + trace_seq_puts(s, field->str); - if (!trace_seq_puts(s, ": ")) - goto partial; - - if (!trace_seq_puts(s, field->str)) - goto partial; - - return TRACE_TYPE_HANDLED; - - partial: - return TRACE_TYPE_PARTIAL_LINE; + return trace_handle_return(s); } @@ -1114,16 +1080,10 @@ trace_bputs_raw(struct trace_iterator *iter, int flags, trace_assign_type(field, iter->ent); - if (!trace_seq_printf(s, ": %lx : ", field->ip)) - goto partial; - - if (!trace_seq_puts(s, field->str)) - goto partial; + trace_seq_printf(s, ": %lx : ", field->ip); + trace_seq_puts(s, field->str); - return TRACE_TYPE_HANDLED; - - partial: - return TRACE_TYPE_PARTIAL_LINE; + return trace_handle_return(s); } static struct trace_event_functions trace_bputs_funcs = { @@ -1147,19 +1107,11 @@ trace_bprint_print(struct trace_iterator *iter, int flags, trace_assign_type(field, entry); - if (!seq_print_ip_sym(s, field->ip, flags)) - goto partial; - - if (!trace_seq_puts(s, ": ")) - goto partial; - - if (!trace_seq_bprintf(s, field->fmt, field->buf)) - goto partial; + seq_print_ip_sym(s, field->ip, flags); + trace_seq_puts(s, ": "); + trace_seq_bprintf(s, field->fmt, field->buf); - return TRACE_TYPE_HANDLED; - - partial: - return TRACE_TYPE_PARTIAL_LINE; + return trace_handle_return(s); } @@ -1172,16 +1124,10 @@ trace_bprint_raw(struct trace_iterator *iter, int flags, trace_assign_type(field, iter->ent); - if (!trace_seq_printf(s, ": %lx : ", field->ip)) - goto partial; - - if (!trace_seq_bprintf(s, field->fmt, field->buf)) - goto partial; + trace_seq_printf(s, ": %lx : ", field->ip); + trace_seq_bprintf(s, field->fmt, field->buf); - return TRACE_TYPE_HANDLED; - - partial: - return TRACE_TYPE_PARTIAL_LINE; + return trace_handle_return(s); } static struct trace_event_functions trace_bprint_funcs = { @@ -1203,16 +1149,10 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter, trace_assign_type(field, iter->ent); - if (!seq_print_ip_sym(s, field->ip, flags)) - goto partial; - - if (!trace_seq_printf(s, ": %s", field->buf)) - goto partial; + seq_print_ip_sym(s, field->ip, flags); + trace_seq_printf(s, ": %s", field->buf); - return TRACE_TYPE_HANDLED; - - partial: - return TRACE_TYPE_PARTIAL_LINE; + return trace_handle_return(s); } static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, @@ -1222,13 +1162,9 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, trace_assign_type(field, iter->ent); - if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf)) - goto partial; - - return TRACE_TYPE_HANDLED; + trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf); - partial: - return TRACE_TYPE_PARTIAL_LINE; + return trace_handle_return(&iter->seq); } static struct trace_event_functions trace_print_funcs = { diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index 80b25b585a70..8ef2c40efb3c 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h @@ -35,17 +35,11 @@ trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry); extern int __unregister_ftrace_event(struct trace_event *event); extern struct rw_semaphore trace_event_sem; -#define SEQ_PUT_FIELD_RET(s, x) \ -do { \ - if (!trace_seq_putmem(s, &(x), sizeof(x))) \ - return TRACE_TYPE_PARTIAL_LINE; \ -} while (0) - -#define SEQ_PUT_HEX_FIELD_RET(s, x) \ -do { \ - if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ - return TRACE_TYPE_PARTIAL_LINE; \ -} while (0) +#define SEQ_PUT_FIELD(s, x) \ + trace_seq_putmem(s, &(x), sizeof(x)) + +#define SEQ_PUT_HEX_FIELD(s, x) \ + trace_seq_putmem_hex(s, &(x), sizeof(x)) #endif diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 2900817ba65c..c4e70b6bd7fa 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -305,7 +305,7 @@ static int t_show(struct seq_file *m, void *v) seq_puts(m, "\\t"); break; case '\\': - seq_puts(m, "\\"); + seq_putc(m, '\\'); break; case '"': seq_puts(m, "\\\""); diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index d4b9fc22cd27..b983b2fd2ca1 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -40,7 +40,8 @@ const char *reserved_field_names[] = { int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \ void *data, void *ent) \ { \ - return trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \ + trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \ + return !trace_seq_has_overflowed(s); \ } \ const char PRINT_TYPE_FMT_NAME(type)[] = fmt; \ NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(type)); @@ -61,10 +62,11 @@ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, const char *name, int len = *(u32 *)data >> 16; if (!len) - return trace_seq_printf(s, " %s=(fault)", name); + trace_seq_printf(s, " %s=(fault)", name); else - return trace_seq_printf(s, " %s=\"%s\"", name, - (const char *)get_loc_data(data, ent)); + trace_seq_printf(s, " %s=\"%s\"", name, + (const char *)get_loc_data(data, ent)); + return !trace_seq_has_overflowed(s); } NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string)); diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 3f34dc9b40f3..2e293beb186e 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -14,122 +14,26 @@ #include "trace.h" -static struct trace_array *ctx_trace; -static int __read_mostly tracer_enabled; static int sched_ref; static DEFINE_MUTEX(sched_register_mutex); -static int sched_stopped; - - -void -tracing_sched_switch_trace(struct trace_array *tr, - struct task_struct *prev, - struct task_struct *next, - unsigned long flags, int pc) -{ - struct ftrace_event_call *call = &event_context_switch; - struct ring_buffer *buffer = tr->trace_buffer.buffer; - struct ring_buffer_event *event; - struct ctx_switch_entry *entry; - - event = trace_buffer_lock_reserve(buffer, TRACE_CTX, - sizeof(*entry), flags, pc); - if (!event) - return; - entry = ring_buffer_event_data(event); - entry->prev_pid = prev->pid; - entry->prev_prio = prev->prio; - entry->prev_state = prev->state; - entry->next_pid = next->pid; - entry->next_prio = next->prio; - entry->next_state = next->state; - entry->next_cpu = task_cpu(next); - - if (!call_filter_check_discard(call, entry, buffer, event)) - trace_buffer_unlock_commit(buffer, event, flags, pc); -} static void probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) { - struct trace_array_cpu *data; - unsigned long flags; - int cpu; - int pc; - if (unlikely(!sched_ref)) return; tracing_record_cmdline(prev); tracing_record_cmdline(next); - - if (!tracer_enabled || sched_stopped) - return; - - pc = preempt_count(); - local_irq_save(flags); - cpu = raw_smp_processor_id(); - data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu); - - if (likely(!atomic_read(&data->disabled))) - tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); - - local_irq_restore(flags); -} - -void -tracing_sched_wakeup_trace(struct trace_array *tr, - struct task_struct *wakee, - struct task_struct *curr, - unsigned long flags, int pc) -{ - struct ftrace_event_call *call = &event_wakeup; - struct ring_buffer_event *event; - struct ctx_switch_entry *entry; - struct ring_buffer *buffer = tr->trace_buffer.buffer; - - event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, - sizeof(*entry), flags, pc); - if (!event) - return; - entry = ring_buffer_event_data(event); - entry->prev_pid = curr->pid; - entry->prev_prio = curr->prio; - entry->prev_state = curr->state; - entry->next_pid = wakee->pid; - entry->next_prio = wakee->prio; - entry->next_state = wakee->state; - entry->next_cpu = task_cpu(wakee); - - if (!call_filter_check_discard(call, entry, buffer, event)) - trace_buffer_unlock_commit(buffer, event, flags, pc); } static void probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) { - struct trace_array_cpu *data; - unsigned long flags; - int cpu, pc; - if (unlikely(!sched_ref)) return; tracing_record_cmdline(current); - - if (!tracer_enabled || sched_stopped) - return; - - pc = preempt_count(); - local_irq_save(flags); - cpu = raw_smp_processor_id(); - data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu); - - if (likely(!atomic_read(&data->disabled))) - tracing_sched_wakeup_trace(ctx_trace, wakee, current, - flags, pc); - - local_irq_restore(flags); } static int tracing_sched_register(void) @@ -197,51 +101,3 @@ void tracing_stop_cmdline_record(void) { tracing_stop_sched_switch(); } - -/** - * tracing_start_sched_switch_record - start tracing context switches - * - * Turns on context switch tracing for a tracer. - */ -void tracing_start_sched_switch_record(void) -{ - if (unlikely(!ctx_trace)) { - WARN_ON(1); - return; - } - - tracing_start_sched_switch(); - - mutex_lock(&sched_register_mutex); - tracer_enabled++; - mutex_unlock(&sched_register_mutex); -} - -/** - * tracing_stop_sched_switch_record - start tracing context switches - * - * Turns off context switch tracing for a tracer. - */ -void tracing_stop_sched_switch_record(void) -{ - mutex_lock(&sched_register_mutex); - tracer_enabled--; - WARN_ON(tracer_enabled < 0); - mutex_unlock(&sched_register_mutex); - - tracing_stop_sched_switch(); -} - -/** - * tracing_sched_switch_assign_trace - assign a trace array for ctx switch - * @tr: trace array pointer to assign - * - * Some tracers might want to record the context switches in their - * trace. This function lets those tracers assign the trace array - * to use. - */ -void tracing_sched_switch_assign_trace(struct trace_array *tr) -{ - ctx_trace = tr; -} - diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 19bd8928ce94..8fb84b362816 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -365,6 +365,62 @@ probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) wakeup_current_cpu = cpu; } +static void +tracing_sched_switch_trace(struct trace_array *tr, + struct task_struct *prev, + struct task_struct *next, + unsigned long flags, int pc) +{ + struct ftrace_event_call *call = &event_context_switch; + struct ring_buffer *buffer = tr->trace_buffer.buffer; + struct ring_buffer_event *event; + struct ctx_switch_entry *entry; + + event = trace_buffer_lock_reserve(buffer, TRACE_CTX, + sizeof(*entry), flags, pc); + if (!event) + return; + entry = ring_buffer_event_data(event); + entry->prev_pid = prev->pid; + entry->prev_prio = prev->prio; + entry->prev_state = prev->state; + entry->next_pid = next->pid; + entry->next_prio = next->prio; + entry->next_state = next->state; + entry->next_cpu = task_cpu(next); + + if (!call_filter_check_discard(call, entry, buffer, event)) + trace_buffer_unlock_commit(buffer, event, flags, pc); +} + +static void +tracing_sched_wakeup_trace(struct trace_array *tr, + struct task_struct *wakee, + struct task_struct *curr, + unsigned long flags, int pc) +{ + struct ftrace_event_call *call = &event_wakeup; + struct ring_buffer_event *event; + struct ctx_switch_entry *entry; + struct ring_buffer *buffer = tr->trace_buffer.buffer; + + event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, + sizeof(*entry), flags, pc); + if (!event) + return; + entry = ring_buffer_event_data(event); + entry->prev_pid = curr->pid; + entry->prev_prio = curr->prio; + entry->prev_state = curr->state; + entry->next_pid = wakee->pid; + entry->next_prio = wakee->prio; + entry->next_state = wakee->state; + entry->next_cpu = task_cpu(wakee); + + if (!call_filter_check_discard(call, entry, buffer, event)) + trace_buffer_unlock_commit(buffer, event, flags, pc); +} + static void notrace probe_wakeup_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c index 1f24ed99dca2..fabfa0f190a3 100644 --- a/kernel/trace/trace_seq.c +++ b/kernel/trace/trace_seq.c @@ -69,20 +69,15 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s) * trace_seq_printf() is used to store strings into a special * buffer (@s). Then the output may be either used by * the sequencer or pulled into another buffer. - * - * Returns 1 if we successfully written all the contents to - * the buffer. - * Returns 0 if we the length to write is bigger than the - * reserved buffer space. In this case, nothing gets written. */ -int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) +void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) { unsigned int len = TRACE_SEQ_BUF_LEFT(s); va_list ap; int ret; if (s->full || !len) - return 0; + return; va_start(ap, fmt); ret = vsnprintf(s->buffer + s->len, len, fmt, ap); @@ -91,12 +86,10 @@ int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) /* If we can't write it all, don't bother writing anything */ if (ret >= len) { s->full = 1; - return 0; + return; } s->len += ret; - - return 1; } EXPORT_SYMBOL_GPL(trace_seq_printf); @@ -107,25 +100,18 @@ EXPORT_SYMBOL_GPL(trace_seq_printf); * @nmaskbits: The number of bits that are valid in @maskp * * Writes a ASCII representation of a bitmask string into @s. - * - * Returns 1 if we successfully written all the contents to - * the buffer. - * Returns 0 if we the length to write is bigger than the - * reserved buffer space. In this case, nothing gets written. */ -int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, +void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, int nmaskbits) { unsigned int len = TRACE_SEQ_BUF_LEFT(s); int ret; if (s->full || !len) - return 0; + return; - ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits); + ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits); s->len += ret; - - return 1; } EXPORT_SYMBOL_GPL(trace_seq_bitmask); @@ -139,28 +125,24 @@ EXPORT_SYMBOL_GPL(trace_seq_bitmask); * trace_seq_printf is used to store strings into a special * buffer (@s). Then the output may be either used by * the sequencer or pulled into another buffer. - * - * Returns how much it wrote to the buffer. */ -int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) +void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) { unsigned int len = TRACE_SEQ_BUF_LEFT(s); int ret; if (s->full || !len) - return 0; + return; ret = vsnprintf(s->buffer + s->len, len, fmt, args); /* If we can't write it all, don't bother writing anything */ if (ret >= len) { s->full = 1; - return 0; + return; } s->len += ret; - - return len; } EXPORT_SYMBOL_GPL(trace_seq_vprintf); @@ -178,28 +160,24 @@ EXPORT_SYMBOL_GPL(trace_seq_vprintf); * * This function will take the format and the binary array and finish * the conversion into the ASCII string within the buffer. - * - * Returns how much it wrote to the buffer. */ -int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) +void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) { unsigned int len = TRACE_SEQ_BUF_LEFT(s); int ret; if (s->full || !len) - return 0; + return; ret = bstr_printf(s->buffer + s->len, len, fmt, binary); /* If we can't write it all, don't bother writing anything */ if (ret >= len) { s->full = 1; - return 0; + return; } s->len += ret; - - return len; } EXPORT_SYMBOL_GPL(trace_seq_bprintf); @@ -212,25 +190,21 @@ EXPORT_SYMBOL_GPL(trace_seq_bprintf); * copy to user routines. This function records a simple string * into a special buffer (@s) for later retrieval by a sequencer * or other mechanism. - * - * Returns how much it wrote to the buffer. */ -int trace_seq_puts(struct trace_seq *s, const char *str) +void trace_seq_puts(struct trace_seq *s, const char *str) { unsigned int len = strlen(str); if (s->full) - return 0; + return; if (len > TRACE_SEQ_BUF_LEFT(s)) { s->full = 1; - return 0; + return; } memcpy(s->buffer + s->len, str, len); s->len += len; - - return len; } EXPORT_SYMBOL_GPL(trace_seq_puts); @@ -243,22 +217,18 @@ EXPORT_SYMBOL_GPL(trace_seq_puts); * copy to user routines. This function records a simple charater * into a special buffer (@s) for later retrieval by a sequencer * or other mechanism. - * - * Returns how much it wrote to the buffer. */ -int trace_seq_putc(struct trace_seq *s, unsigned char c) +void trace_seq_putc(struct trace_seq *s, unsigned char c) { if (s->full) - return 0; + return; if (TRACE_SEQ_BUF_LEFT(s) < 1) { s->full = 1; - return 0; + return; } s->buffer[s->len++] = c; - - return 1; } EXPORT_SYMBOL_GPL(trace_seq_putc); @@ -271,23 +241,19 @@ EXPORT_SYMBOL_GPL(trace_seq_putc); * There may be cases where raw memory needs to be written into the * buffer and a strcpy() would not work. Using this function allows * for such cases. - * - * Returns how much it wrote to the buffer. */ -int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) +void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) { if (s->full) - return 0; + return; if (len > TRACE_SEQ_BUF_LEFT(s)) { s->full = 1; - return 0; + return; } memcpy(s->buffer + s->len, mem, len); s->len += len; - - return len; } EXPORT_SYMBOL_GPL(trace_seq_putmem); @@ -303,20 +269,17 @@ EXPORT_SYMBOL_GPL(trace_seq_putmem); * This is similar to trace_seq_putmem() except instead of just copying the * raw memory into the buffer it writes its ASCII representation of it * in hex characters. - * - * Returns how much it wrote to the buffer. */ -int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, +void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, unsigned int len) { unsigned char hex[HEX_CHARS]; const unsigned char *data = mem; unsigned int start_len; int i, j; - int cnt = 0; if (s->full) - return 0; + return; while (len) { start_len = min(len, HEX_CHARS - 1); @@ -335,9 +298,8 @@ int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, len -= j / 2; hex[j++] = ' '; - cnt += trace_seq_putmem(s, hex, j); + trace_seq_putmem(s, hex, j); } - return cnt; } EXPORT_SYMBOL_GPL(trace_seq_putmem_hex); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 29228c4d5696..dfe00a4f3f3e 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -114,7 +114,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags, struct trace_entry *ent = iter->ent; struct syscall_trace_enter *trace; struct syscall_metadata *entry; - int i, ret, syscall; + int i, syscall; trace = (typeof(trace))ent; syscall = trace->nr; @@ -128,35 +128,28 @@ print_syscall_enter(struct trace_iterator *iter, int flags, goto end; } - ret = trace_seq_printf(s, "%s(", entry->name); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_printf(s, "%s(", entry->name); for (i = 0; i < entry->nb_args; i++) { + + if (trace_seq_has_overflowed(s)) + goto end; + /* parameter types */ - if (trace_flags & TRACE_ITER_VERBOSE) { - ret = trace_seq_printf(s, "%s ", entry->types[i]); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + if (trace_flags & TRACE_ITER_VERBOSE) + trace_seq_printf(s, "%s ", entry->types[i]); + /* parameter values */ - ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i], - trace->args[i], - i == entry->nb_args - 1 ? "" : ", "); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_printf(s, "%s: %lx%s", entry->args[i], + trace->args[i], + i == entry->nb_args - 1 ? "" : ", "); } - ret = trace_seq_putc(s, ')'); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - + trace_seq_putc(s, ')'); end: - ret = trace_seq_putc(s, '\n'); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + trace_seq_putc(s, '\n'); - return TRACE_TYPE_HANDLED; + return trace_handle_return(s); } static enum print_line_t @@ -168,7 +161,6 @@ print_syscall_exit(struct trace_iterator *iter, int flags, struct syscall_trace_exit *trace; int syscall; struct syscall_metadata *entry; - int ret; trace = (typeof(trace))ent; syscall = trace->nr; @@ -176,7 +168,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags, if (!entry) { trace_seq_putc(s, '\n'); - return TRACE_TYPE_HANDLED; + goto out; } if (entry->exit_event->event.type != ent->type) { @@ -184,12 +176,11 @@ print_syscall_exit(struct trace_iterator *iter, int flags, return TRACE_TYPE_UNHANDLED; } - ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, + trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, trace->ret); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - return TRACE_TYPE_HANDLED; + out: + return trace_handle_return(s); } extern char *__bad_type_size(void); diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 33ff6a24b802..8520acc34b18 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -552,8 +552,7 @@ error: return ret; fail_address_parse: - if (inode) - iput(inode); + iput(inode); pr_info("Failed to parse address or file.\n"); @@ -606,7 +605,7 @@ static int probes_seq_show(struct seq_file *m, void *v) for (i = 0; i < tu->tp.nr_args; i++) seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); - seq_printf(m, "\n"); + seq_putc(m, '\n'); return 0; } @@ -852,16 +851,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e tu = container_of(event, struct trace_uprobe, tp.call.event); if (is_ret_probe(tu)) { - if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", - ftrace_event_name(&tu->tp.call), - entry->vaddr[1], entry->vaddr[0])) - goto partial; + trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", + ftrace_event_name(&tu->tp.call), + entry->vaddr[1], entry->vaddr[0]); data = DATAOF_TRACE_ENTRY(entry, true); } else { - if (!trace_seq_printf(s, "%s: (0x%lx)", - ftrace_event_name(&tu->tp.call), - entry->vaddr[0])) - goto partial; + trace_seq_printf(s, "%s: (0x%lx)", + ftrace_event_name(&tu->tp.call), + entry->vaddr[0]); data = DATAOF_TRACE_ENTRY(entry, false); } @@ -869,14 +866,13 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e struct probe_arg *parg = &tu->tp.args[i]; if (!parg->type->print(s, parg->name, data + parg->offset, entry)) - goto partial; + goto out; } - if (trace_seq_puts(s, "\n")) - return TRACE_TYPE_HANDLED; + trace_seq_putc(s, '\n'); -partial: - return TRACE_TYPE_PARTIAL_LINE; + out: + return trace_handle_return(s); } typedef bool (*filter_func_t)(struct uprobe_consumer *self, |