summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-06-25 14:30:12 +0900
committerIngo Molnar <mingo@elte.hu>2009-06-25 10:31:30 +0200
commit1155de47cd66d0c496d5a6fb2223e980ef1285b2 (patch)
tree0ee762d6b1215a37fe89acc174de8cbbf476d218
parent00e54d087afb3867b0b461aef6c1ff433d0df564 (diff)
ring-buffer: Make it generally available
In hunting down the cause for the hwlat_detector ring buffer spew in my failed -next builds it became obvious that folks are now treating ring_buffer as something that is generic independent of tracing and thus, suitable for public driver consumption. Given that there are only a few minor areas in ring_buffer that have any reliance on CONFIG_TRACING or CONFIG_FUNCTION_TRACER, provide stubs for those and make it generally available. Signed-off-by: Paul Mundt <lethal@linux-sh.org> Cc: Jon Masters <jcm@jonmasters.org> Cc: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <20090625053012.GB19944@linux-sh.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/trace/ring_buffer.c11
-rw-r--r--kernel/trace/trace.h7
3 files changed, 19 insertions, 0 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 0a32cb21ec9..0630e293cd4 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_X86_DS) += trace/
+obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_SLOW_WORK) += slow-work.o
obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 04dac263825..bf27bb7a63e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1563,6 +1563,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
return NULL;
}
+#ifdef CONFIG_TRACING
+
#define TRACE_RECURSIVE_DEPTH 16
static int trace_recursive_lock(void)
@@ -1593,6 +1595,13 @@ static void trace_recursive_unlock(void)
current->trace_recursion--;
}
+#else
+
+#define trace_recursive_lock() (0)
+#define trace_recursive_unlock() do { } while (0)
+
+#endif
+
static DEFINE_PER_CPU(int, rb_need_resched);
/**
@@ -3104,6 +3113,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_read_page);
+#ifdef CONFIG_TRACING
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
@@ -3171,6 +3181,7 @@ static __init int rb_init_debugfs(void)
}
fs_initcall(rb_init_debugfs);
+#endif
#ifdef CONFIG_HOTPLUG_CPU
static int rb_cpu_notify(struct notifier_block *self,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6e735d4771f..3548ae5cc78 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -597,6 +597,7 @@ print_graph_function(struct trace_iterator *iter)
extern struct pid *ftrace_pid_trace;
+#ifdef CONFIG_FUNCTION_TRACER
static inline int ftrace_trace_task(struct task_struct *task)
{
if (!ftrace_pid_trace)
@@ -604,6 +605,12 @@ static inline int ftrace_trace_task(struct task_struct *task)
return test_tsk_trace_trace(task);
}
+#else
+static inline int ftrace_trace_task(struct task_struct *task)
+{
+ return 1;
+}
+#endif
/*
* trace_iterator_flags is an enumeration that defines bit