summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/ftrace_event.h9
-rw-r--r--include/trace/ftrace.h39
2 files changed, 37 insertions, 11 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index f7b47c336703..43360c1d8f70 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -137,8 +137,13 @@ struct ftrace_event_call {
#define FTRACE_MAX_PROFILE_SIZE 2048
-extern char *trace_profile_buf;
-extern char *trace_profile_buf_nmi;
+struct perf_trace_buf {
+ char buf[FTRACE_MAX_PROFILE_SIZE];
+ int recursion;
+};
+
+extern struct perf_trace_buf *perf_trace_buf;
+extern struct perf_trace_buf *perf_trace_buf_nmi;
#define MAX_FILTER_PRED 32
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index a7f946094128..4945d1c99864 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -649,6 +649,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
* struct ftrace_event_call *event_call = &event_<call>;
* extern void perf_tp_event(int, u64, u64, void *, int);
* struct ftrace_raw_##call *entry;
+ * struct perf_trace_buf *trace_buf;
* u64 __addr = 0, __count = 1;
* unsigned long irq_flags;
* struct trace_entry *ent;
@@ -673,14 +674,25 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
* __cpu = smp_processor_id();
*
* if (in_nmi())
- * raw_data = rcu_dereference(trace_profile_buf_nmi);
+ * trace_buf = rcu_dereference(perf_trace_buf_nmi);
* else
- * raw_data = rcu_dereference(trace_profile_buf);
+ * trace_buf = rcu_dereference(perf_trace_buf);
*
- * if (!raw_data)
+ * if (!trace_buf)
* goto end;
*
- * raw_data = per_cpu_ptr(raw_data, __cpu);
+ * trace_buf = per_cpu_ptr(trace_buf, __cpu);
+ *
+ * // Avoid recursion from perf that could mess up the buffer
+ * if (trace_buf->recursion++)
+ * goto end_recursion;
+ *
+ * raw_data = trace_buf->buf;
+ *
+ * // Make recursion update visible before entering perf_tp_event
+ * // so that we protect from perf recursions.
+ *
+ * barrier();
*
* //zero dead bytes from alignment to avoid stack leak to userspace:
* *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
@@ -713,8 +725,9 @@ static void ftrace_profile_##call(proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_event_call *event_call = &event_##call; \
- extern void perf_tp_event(int, u64, u64, void *, int); \
+ extern void perf_tp_event(int, u64, u64, void *, int); \
struct ftrace_raw_##call *entry; \
+ struct perf_trace_buf *trace_buf; \
u64 __addr = 0, __count = 1; \
unsigned long irq_flags; \
struct trace_entry *ent; \
@@ -739,14 +752,20 @@ static void ftrace_profile_##call(proto) \
__cpu = smp_processor_id(); \
\
if (in_nmi()) \
- raw_data = rcu_dereference(trace_profile_buf_nmi); \
+ trace_buf = rcu_dereference(perf_trace_buf_nmi); \
else \
- raw_data = rcu_dereference(trace_profile_buf); \
+ trace_buf = rcu_dereference(perf_trace_buf); \
\
- if (!raw_data) \
+ if (!trace_buf) \
goto end; \
\
- raw_data = per_cpu_ptr(raw_data, __cpu); \
+ trace_buf = per_cpu_ptr(trace_buf, __cpu); \
+ if (trace_buf->recursion++) \
+ goto end_recursion; \
+ \
+ barrier(); \
+ \
+ raw_data = trace_buf->buf; \
\
*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
entry = (struct ftrace_raw_##call *)raw_data; \
@@ -761,6 +780,8 @@ static void ftrace_profile_##call(proto) \
perf_tp_event(event_call->id, __addr, __count, entry, \
__entry_size); \
\
+end_recursion: \
+ trace_buf->recursion--; \
end: \
local_irq_restore(irq_flags); \
\