summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/uapi/linux/perf_event.h10
-rw-r--r--kernel/events/core.c49
3 files changed, 58 insertions, 2 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1660039199b2..7d7280aa4e22 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -735,6 +735,7 @@ struct perf_event {
int pending_wakeup;
int pending_kill;
int pending_disable;
+ unsigned long pending_addr; /* SIGTRAP */
struct irq_work pending;
atomic_t event_limit;
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 8c5b9f5ad63f..31b00e3b69c9 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -311,6 +311,7 @@ enum perf_event_read_format {
#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */
+#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -391,7 +392,8 @@ struct perf_event_attr {
build_id : 1, /* use build id in mmap2 events */
inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */
remove_on_exec : 1, /* event is removed from task on exec */
- __reserved_1 : 27;
+ sigtrap : 1, /* send synchronous SIGTRAP on event */
+ __reserved_1 : 26;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -443,6 +445,12 @@ struct perf_event_attr {
__u16 __reserved_2;
__u32 aux_sample_size;
__u32 __reserved_3;
+
+ /*
+ * User provided data if sigtrap=1, passed back to user via
+ * siginfo_t::si_perf, e.g. to permit user to identify the event.
+ */
+ __u64 sig_data;
};
/*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e4a584bceb7a..6f0723c711a9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6392,6 +6392,33 @@ void perf_event_wakeup(struct perf_event *event)
}
}
+static void perf_sigtrap(struct perf_event *event)
+{
+ struct kernel_siginfo info;
+
+ /*
+ * We'd expect this to only occur if the irq_work is delayed and either
+ * ctx->task or current has changed in the meantime. This can be the
+ * case on architectures that do not implement arch_irq_work_raise().
+ */
+ if (WARN_ON_ONCE(event->ctx->task != current))
+ return;
+
+ /*
+ * perf_pending_event() can race with the task exiting.
+ */
+ if (current->flags & PF_EXITING)
+ return;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_code = TRAP_PERF;
+ info.si_errno = event->attr.type;
+ info.si_perf = event->attr.sig_data;
+ info.si_addr = (void __user *)event->pending_addr;
+ force_sig_info(&info);
+}
+
static void perf_pending_event_disable(struct perf_event *event)
{
int cpu = READ_ONCE(event->pending_disable);
@@ -6401,6 +6428,13 @@ static void perf_pending_event_disable(struct perf_event *event)
if (cpu == smp_processor_id()) {
WRITE_ONCE(event->pending_disable, -1);
+
+ if (event->attr.sigtrap) {
+ perf_sigtrap(event);
+ atomic_set_release(&event->event_limit, 1); /* rearm event */
+ return;
+ }
+
perf_event_disable_local(event);
return;
}
@@ -9103,6 +9137,7 @@ static int __perf_event_overflow(struct perf_event *event,
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
+ event->pending_addr = data->addr;
perf_event_disable_inatomic(event);
}
@@ -11384,6 +11419,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (!task || cpu != -1)
return ERR_PTR(-EINVAL);
}
+ if (attr->sigtrap && !task) {
+ /* Requires a task: avoid signalling random tasks. */
+ return ERR_PTR(-EINVAL);
+ }
node = (cpu >= 0) ? cpu_to_node(cpu) : -1;
event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO,
@@ -11432,6 +11471,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->state = PERF_EVENT_STATE_INACTIVE;
+ if (event->attr.sigtrap)
+ atomic_set(&event->event_limit, 1);
+
if (task) {
event->attach_state = PERF_ATTACH_TASK;
/*
@@ -11710,6 +11752,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (attr->remove_on_exec && attr->enable_on_exec)
return -EINVAL;
+ if (attr->sigtrap && !attr->remove_on_exec)
+ return -EINVAL;
+
out:
return ret;
@@ -12936,7 +12981,9 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *child_ctx;
if (!event->attr.inherit ||
- (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD))) {
+ (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) ||
+ /* Do not inherit if sigtrap and signal handlers were cleared. */
+ (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) {
*inherited_all = 0;
return 0;
}