diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-31 11:46:59 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-31 11:46:59 -0800 |
commit | 495d714ad140e1732e66c45d0409054b24c1a0d6 (patch) | |
tree | 373ec6619adea47d848d36f140b32def27164bbd /arch | |
parent | f12e840c819bab42621685558a01d3f46ab9a226 (diff) | |
parent | 3d739c1f6156c70eb0548aa288dcfbac9e0bd162 (diff) |
Merge tag 'trace-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
- Rework of the kprobe/uprobe and synthetic events to consolidate all
the dynamic event code. This will make changes in the future easier.
- Partial rewrite of the function graph tracing infrastructure. This
will allow for multiple users of hooking onto functions to get the
callback (return) of the function. This is the ground work for having
kprobes and function graph tracer using one code base.
- Clean up of the histogram code that will facilitate adding more
features to the histograms in the future.
- Addition of str_has_prefix() and a few use cases. There currently is
a similar function strstart() that is used in a few places, but only
returns a bool and not a length. These instances will be removed in
the future to use str_has_prefix() instead.
- A few other various clean ups as well.
* tag 'trace-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (57 commits)
tracing: Use the return of str_has_prefix() to remove open coded numbers
tracing: Have the historgram use the result of str_has_prefix() for len of prefix
tracing: Use str_has_prefix() instead of using fixed sizes
tracing: Use str_has_prefix() helper for histogram code
string.h: Add str_has_prefix() helper function
tracing: Make function ‘ftrace_exports’ static
tracing: Simplify printf'ing in seq_print_sym
tracing: Avoid -Wformat-nonliteral warning
tracing: Merge seq_print_sym_short() and seq_print_sym_offset()
tracing: Add hist trigger comments for variable-related fields
tracing: Remove hist trigger synth_var_refs
tracing: Use hist trigger's var_ref array to destroy var_refs
tracing: Remove open-coding of hist trigger var_ref management
tracing: Use var_refs[] for hist trigger reference checking
tracing: Change strlen to sizeof for hist trigger static strings
tracing: Remove unnecessary hist trigger struct field
tracing: Fix ftrace_graph_get_ret_stack() to use task and not current
seq_buf: Use size_t for len in seq_buf_puts()
seq_buf: Make seq_buf_puts() null-terminate the buffer
arm64: Use ftrace_graph_get_ret_stack() instead of curr_ret_stack
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/kernel/ftrace.c | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/perf_callchain.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/return_address.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/stacktrace.c | 15 | ||||
-rw-r--r-- | arch/arm64/kernel/time.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 13 | ||||
-rw-r--r-- | arch/sh/kernel/dumpstack.c | 11 | ||||
-rw-r--r-- | arch/sh/kernel/dwarf.c | 9 | ||||
-rw-r--r-- | arch/sparc/kernel/perf_event.c | 8 | ||||
-rw-r--r-- | arch/sparc/kernel/stacktrace.c | 8 | ||||
-rw-r--r-- | arch/sparc/kernel/traps_64.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 41 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace_64.S | 8 |
15 files changed, 74 insertions, 57 deletions
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index c1f30f854fb3..8e4431a8821f 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -193,6 +193,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, void arch_ftrace_update_code(int command) { + command |= FTRACE_MAY_SLEEP; ftrace_modify_all_code(command); } diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index 94754f07f67a..a34c26afacb0 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -168,7 +168,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, frame.fp = regs->regs[29]; frame.pc = regs->pc; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = current->curr_ret_stack; + frame.graph = 0; #endif walk_stackframe(current, &frame, callchain_trace, entry); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index e0a443730e04..a0f985a6ac50 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -461,7 +461,7 @@ unsigned long get_wchan(struct task_struct *p) frame.fp = thread_saved_fp(p); frame.pc = thread_saved_pc(p); #ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = p->curr_ret_stack; + frame.graph = 0; #endif do { if (unwind_frame(p, &frame)) diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index 933adbc0f654..53c40196b607 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -44,7 +44,7 @@ void *return_address(unsigned int level) frame.fp = (unsigned long)__builtin_frame_address(0); frame.pc = (unsigned long)return_address; /* dummy */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = current->curr_ret_stack; + frame.graph = 0; #endif walk_stackframe(current, &frame, save_return_addr, &data); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 4989f7ea1e59..1a29f2695ff2 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -59,18 +59,17 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && (frame->pc == (unsigned long)return_to_handler)) { - if (WARN_ON_ONCE(frame->graph == -1)) - return -EINVAL; - if (frame->graph < -1) - frame->graph += FTRACE_NOTRACE_DEPTH; - + struct ftrace_ret_stack *ret_stack; /* * This is a case where function graph tracer has * modified a return address (LR) in a stack frame * to hook a function return. * So replace it to an original value. */ - frame->pc = tsk->ret_stack[frame->graph--].ret; + ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++); + if (WARN_ON_ONCE(!ret_stack)) + return -EINVAL; + frame->pc = ret_stack->ret; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ @@ -137,7 +136,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) frame.fp = regs->regs[29]; frame.pc = regs->pc; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = current->curr_ret_stack; + frame.graph = 0; #endif walk_stackframe(current, &frame, save_trace, &data); @@ -168,7 +167,7 @@ static noinline void __save_stack_trace(struct task_struct *tsk, frame.pc = (unsigned long)__save_stack_trace; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = tsk->curr_ret_stack; + frame.graph = 0; #endif walk_stackframe(tsk, &frame, save_trace, &data); diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index f258636273c9..a777ae90044d 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs) frame.fp = regs->regs[29]; frame.pc = regs->pc; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = current->curr_ret_stack; + frame.graph = 0; #endif do { int ret = unwind_frame(NULL, &frame); diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index cdc71cf70aad..4e2fb877f8d5 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -123,7 +123,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) frame.pc = thread_saved_pc(tsk); } #ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = tsk->curr_ret_stack; + frame.graph = 0; #endif skip = !!regs; diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 96f34730010f..ce393df243aa 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -2061,9 +2061,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) int count = 0; int firstframe = 1; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - int curr_frame = current->curr_ret_stack; + struct ftrace_ret_stack *ret_stack; extern void return_to_handler(void); unsigned long rth = (unsigned long)return_to_handler; + int curr_frame = 0; #endif sp = (unsigned long) stack; @@ -2089,9 +2090,13 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((ip == rth) && curr_frame >= 0) { - pr_cont(" (%pS)", - (void *)current->ret_stack[curr_frame].ret); - curr_frame--; + ret_stack = ftrace_graph_get_ret_stack(current, + curr_frame++); + if (ret_stack) + pr_cont(" (%pS)", + (void *)ret_stack->ret); + else + curr_frame = -1; } #endif if (firstframe) diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index 93c6c0e691ee..9f1c9c11d62d 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -56,17 +56,20 @@ print_ftrace_graph_addr(unsigned long addr, void *data, struct thread_info *tinfo, int *graph) { struct task_struct *task = tinfo->task; + struct ftrace_ret_stack *ret_stack; unsigned long ret_addr; - int index = task->curr_ret_stack; if (addr != (unsigned long)return_to_handler) return; - if (!task->ret_stack || index < *graph) + if (!task->ret_stack) return; - index -= *graph; - ret_addr = task->ret_stack[index].ret; + ret_stack = ftrace_graph_get_ret_stack(task, *graph); + if (!ret_stack) + return; + + ret_addr = ret_stack->ret; ops->address(data, ret_addr, 1); diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 9e1d26c8a0c4..c5b426506d16 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -605,17 +605,18 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, * expected to find the real return address. */ if (pc == (unsigned long)&return_to_handler) { - int index = current->curr_ret_stack; + struct ftrace_ret_stack *ret_stack; + ret_stack = ftrace_graph_get_ret_stack(current, 0); + if (ret_stack) + pc = ret_stack->ret; /* * We currently have no way of tracking how many * return_to_handler()'s we've seen. If there is more * than one patched return address on our stack, * complain loudly. */ - WARN_ON(index > 0); - - pc = current->ret_stack[index].ret; + WARN_ON(ftrace_graph_get_ret_stack(current, 1); } #endif diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 47c871394ccb..6de7c684c29f 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1767,9 +1767,11 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, perf_callchain_store(entry, pc); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((pc + 8UL) == (unsigned long) &return_to_handler) { - int index = current->curr_ret_stack; - if (current->ret_stack && index >= graph) { - pc = current->ret_stack[index - graph].ret; + struct ftrace_ret_stack *ret_stack; + ret_stack = ftrace_graph_get_ret_stack(current, + graph); + if (ret_stack) { + pc = ret_stack->ret; perf_callchain_store(entry, pc); graph++; } diff --git a/arch/sparc/kernel/stacktrace.c b/arch/sparc/kernel/stacktrace.c index be4c14cccc05..dd654e651500 100644 --- a/arch/sparc/kernel/stacktrace.c +++ b/arch/sparc/kernel/stacktrace.c @@ -57,9 +57,11 @@ static void __save_stack_trace(struct thread_info *tp, trace->entries[trace->nr_entries++] = pc; #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((pc + 8UL) == (unsigned long) &return_to_handler) { - int index = t->curr_ret_stack; - if (t->ret_stack && index >= graph) { - pc = t->ret_stack[index - graph].ret; + struct ftrace_ret_stack *ret_stack; + ret_stack = ftrace_graph_get_ret_stack(t, + graph); + if (ret_stack) { + pc = ret_stack->ret; if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = pc; diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index aa624ed79db1..0cd02a64a451 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2502,9 +2502,10 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) printk(" [%016lx] %pS\n", pc, (void *) pc); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((pc + 8UL) == (unsigned long) &return_to_handler) { - int index = tsk->curr_ret_stack; - if (tsk->ret_stack && index >= graph) { - pc = tsk->ret_stack[index - graph].ret; + struct ftrace_ret_stack *ret_stack; + ret_stack = ftrace_graph_get_ret_stack(tsk, graph); + if (ret_stack) { + pc = ret_stack->ret; printk(" [%016lx] %pS\n", pc, (void *) pc); graph++; } diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 7ee8067cbf45..8257a59704ae 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -733,18 +733,20 @@ union ftrace_op_code_union { } __attribute__((packed)); }; +#define RET_SIZE 1 + static unsigned long create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) { - unsigned const char *jmp; unsigned long start_offset; unsigned long end_offset; unsigned long op_offset; unsigned long offset; unsigned long size; - unsigned long ip; + unsigned long retq; unsigned long *ptr; void *trampoline; + void *ip; /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */ unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; union ftrace_op_code_union op_ptr; @@ -764,27 +766,27 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) /* * Allocate enough size to store the ftrace_caller code, - * the jmp to ftrace_epilogue, as well as the address of - * the ftrace_ops this trampoline is used for. + * the iret , as well as the address of the ftrace_ops this + * trampoline is used for. */ - trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *)); + trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *)); if (!trampoline) return 0; - *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *); + *tramp_size = size + RET_SIZE + sizeof(void *); /* Copy ftrace_caller onto the trampoline memory */ ret = probe_kernel_read(trampoline, (void *)start_offset, size); - if (WARN_ON(ret < 0)) { - tramp_free(trampoline, *tramp_size); - return 0; - } + if (WARN_ON(ret < 0)) + goto fail; - ip = (unsigned long)trampoline + size; + ip = trampoline + size; - /* The trampoline ends with a jmp to ftrace_epilogue */ - jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue); - memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE); + /* The trampoline ends with ret(q) */ + retq = (unsigned long)ftrace_stub; + ret = probe_kernel_read(ip, (void *)retq, RET_SIZE); + if (WARN_ON(ret < 0)) + goto fail; /* * The address of the ftrace_ops that is used for this trampoline @@ -794,17 +796,15 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) * the global function_trace_op variable. */ - ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE); + ptr = (unsigned long *)(trampoline + size + RET_SIZE); *ptr = (unsigned long)ops; op_offset -= start_offset; memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); /* Are we pointing to the reference? */ - if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { - tramp_free(trampoline, *tramp_size); - return 0; - } + if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) + goto fail; /* Load the contents of ptr into the callback parameter */ offset = (unsigned long)ptr; @@ -819,6 +819,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; return (unsigned long)trampoline; +fail: + tramp_free(trampoline, *tramp_size); + return 0; } static unsigned long calc_trampoline_call_offset(bool save_regs) diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 91b2cff4b79a..75f2b36b41a6 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -171,9 +171,6 @@ GLOBAL(ftrace_call) restore_mcount_regs /* - * The copied trampoline must call ftrace_epilogue as it - * still may need to call the function graph tracer. - * * The code up to this label is copied into trampolines so * think twice before adding any new code or changing the * layout here. @@ -185,7 +182,10 @@ GLOBAL(ftrace_graph_call) jmp ftrace_stub #endif -/* This is weak to keep gas from relaxing the jumps */ +/* + * This is weak to keep gas from relaxing the jumps. + * It is also used to copy the retq for trampolines. + */ WEAK(ftrace_stub) retq ENDPROC(ftrace_caller) |