@@ -778,6 +778,9 @@ struct perf_event {
void *security;
#endif
struct list_head sb_list;
+
+ /* Address associated with event, which can be passed to siginfo_t. */
+ u64 sig_addr;
#endif /* CONFIG_PERF_EVENTS */
};
@@ -311,6 +311,7 @@ enum perf_event_read_format {
#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */
+#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -391,7 +392,8 @@ struct perf_event_attr {
build_id : 1, /* use build id in mmap2 events */
inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */
remove_on_exec : 1, /* event is removed from task on exec */
- __reserved_1 : 27;
+ sigtrap : 1, /* send synchronous SIGTRAP on event */
+ __reserved_1 : 26;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -443,6 +445,12 @@ struct perf_event_attr {
__u16 __reserved_2;
__u32 aux_sample_size;
__u32 __reserved_3;
+
+ /*
+ * User provided data if sigtrap=1, passed back to user via
+ * siginfo_t::si_perf, e.g. to permit user to identify the event.
+ */
+ __u64 sig_data;
};
/*
@@ -6391,6 +6391,33 @@ void perf_event_wakeup(struct perf_event *event)
}
}
+static void perf_sigtrap(struct perf_event *event)
+{
+ struct kernel_siginfo info;
+
+ /*
+ * We'd expect this to only occur if the irq_work is delayed and either
+ * ctx->task or current has changed in the meantime. This can be the
+ * case on architectures that do not implement arch_irq_work_raise().
+ */
+ if (WARN_ON_ONCE(event->ctx->task != current))
+ return;
+
+ /*
+ * perf_pending_event() can race with the task exiting.
+ */
+ if (current->flags & PF_EXITING)
+ return;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_code = TRAP_PERF;
+ info.si_errno = event->attr.type;
+ info.si_perf = event->attr.sig_data;
+ info.si_addr = (void *)event->sig_addr;
+ force_sig_info(&info);
+}
+
static void perf_pending_event_disable(struct perf_event *event)
{
int cpu = READ_ONCE(event->pending_disable);
@@ -6400,6 +6427,13 @@ static void perf_pending_event_disable(struct perf_event *event)
if (cpu == smp_processor_id()) {
WRITE_ONCE(event->pending_disable, -1);
+
+ if (event->attr.sigtrap) {
+ perf_sigtrap(event);
+ atomic_set_release(&event->event_limit, 1); /* rearm event */
+ return;
+ }
+
perf_event_disable_local(event);
return;
}
@@ -9102,6 +9136,7 @@ static int __perf_event_overflow(struct perf_event *event,
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
+ event->sig_addr = data->addr;
perf_event_disable_inatomic(event);
}
@@ -11382,6 +11417,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (!task || cpu != -1)
return ERR_PTR(-EINVAL);
}
+ if (attr->sigtrap && !task) {
+ /* Requires a task: avoid signalling random tasks. */
+ return ERR_PTR(-EINVAL);
+ }
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
@@ -11428,6 +11467,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->state = PERF_EVENT_STATE_INACTIVE;
+ if (event->attr.sigtrap)
+ atomic_set(&event->event_limit, 1);
+
if (task) {
event->attach_state = PERF_ATTACH_TASK;
/*
@@ -11706,6 +11748,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (attr->remove_on_exec && attr->enable_on_exec)
return -EINVAL;
+ if (attr->sigtrap && !attr->remove_on_exec)
+ return -EINVAL;
+
out:
return ret;
@@ -12932,7 +12977,9 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *child_ctx;
if (!event->attr.inherit ||
- (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD))) {
+ (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) ||
+ /* Do not inherit if sigtrap and signal handlers were cleared. */
+ (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) {
*inherited_all = 0;
return 0;
}