aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/blktrace.c16
-rw-r--r--kernel/trace/bpf_trace.c55
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--kernel/trace/trace_sched_switch.c3
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_stack.c11
8 files changed, 76 insertions, 19 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 8d6363f42169..e45db6b0d878 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -434,7 +434,7 @@ config UPROBE_EVENT
config BPF_EVENTS
depends on BPF_SYSCALL
- depends on KPROBE_EVENT || UPROBE_EVENT
+ depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS
bool
default y
help
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index b2fcf472774e..a990824c8604 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -437,7 +437,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct block_device *bdev,
struct blk_user_trace_setup *buts)
{
- struct blk_trace *old_bt, *bt = NULL;
+ struct blk_trace *bt = NULL;
struct dentry *dir = NULL;
int ret;
@@ -519,11 +519,8 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
bt->trace_state = Blktrace_setup;
ret = -EBUSY;
- old_bt = xchg(&q->blk_trace, bt);
- if (old_bt) {
- (void) xchg(&q->blk_trace, old_bt);
+ if (cmpxchg(&q->blk_trace, NULL, bt))
goto err;
- }
if (atomic_inc_return(&blk_probes_ref) == 1)
blk_register_tracepoints();
@@ -1482,7 +1479,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
static int blk_trace_setup_queue(struct request_queue *q,
struct block_device *bdev)
{
- struct blk_trace *old_bt, *bt = NULL;
+ struct blk_trace *bt = NULL;
int ret = -ENOMEM;
bt = kzalloc(sizeof(*bt), GFP_KERNEL);
@@ -1498,12 +1495,9 @@ static int blk_trace_setup_queue(struct request_queue *q,
blk_trace_setup_lba(bt, bdev);
- old_bt = xchg(&q->blk_trace, bt);
- if (old_bt != NULL) {
- (void)xchg(&q->blk_trace, old_bt);
- ret = -EBUSY;
+ ret = -EBUSY;
+ if (cmpxchg(&q->blk_trace, NULL, bt))
goto free_bt;
- }
if (atomic_inc_return(&blk_probes_ref) == 1)
blk_register_tracepoints();
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 0fe96c7c8803..4228fd3682c3 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -199,6 +199,11 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
if (!event)
return -ENOENT;
+ /* make sure event is local and doesn't have pmu::count */
+ if (event->oncpu != smp_processor_id() ||
+ event->pmu->count)
+ return -EINVAL;
+
/*
* we don't know if the function is run successfully by the
* return value. It can be judged in other places, such as
@@ -207,14 +212,58 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
return perf_event_read_local(event);
}
-const struct bpf_func_proto bpf_perf_event_read_proto = {
+static const struct bpf_func_proto bpf_perf_event_read_proto = {
.func = bpf_perf_event_read,
- .gpl_only = false,
+ .gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_ANYTHING,
};
+static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
+{
+ struct pt_regs *regs = (struct pt_regs *) (long) r1;
+ struct bpf_map *map = (struct bpf_map *) (long) r2;
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ void *data = (void *) (long) r4;
+ struct perf_sample_data sample_data;
+ struct perf_event *event;
+ struct perf_raw_record raw = {
+ .size = size,
+ .data = data,
+ };
+
+ if (unlikely(index >= array->map.max_entries))
+ return -E2BIG;
+
+ event = (struct perf_event *)array->ptrs[index];
+ if (unlikely(!event))
+ return -ENOENT;
+
+ if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
+ event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
+ return -EINVAL;
+
+ if (unlikely(event->oncpu != smp_processor_id()))
+ return -EOPNOTSUPP;
+
+ perf_sample_data_init(&sample_data, 0, 0);
+ sample_data.raw = &raw;
+ perf_event_output(event, &sample_data, regs);
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_perf_event_output_proto = {
+ .func = bpf_perf_event_output,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_PTR_TO_STACK,
+ .arg5_type = ARG_CONST_STACK_SIZE,
+};
+
static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
@@ -242,6 +291,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
return &bpf_get_smp_processor_id_proto;
case BPF_FUNC_perf_event_read:
return &bpf_perf_event_read_proto;
+ case BPF_FUNC_perf_event_output:
+ return &bpf_perf_event_output_proto;
default:
return NULL;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ea2725053771..3f743b147247 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5708,7 +5708,7 @@ free:
}
static void
-ftrace_graph_probe_sched_switch(void *ignore,
+ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
struct task_struct *prev, struct task_struct *next)
{
unsigned long long timestamp;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index bee1e1530052..6bbc5f652355 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -506,7 +506,7 @@ check_ignore_pid(struct trace_pid_list *filtered_pids, struct task_struct *task)
}
static void
-event_filter_pid_sched_switch_probe_pre(void *data,
+event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
struct task_struct *prev, struct task_struct *next)
{
struct trace_array *tr = data;
@@ -520,7 +520,7 @@ event_filter_pid_sched_switch_probe_pre(void *data,
}
static void
-event_filter_pid_sched_switch_probe_post(void *data,
+event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
struct task_struct *prev, struct task_struct *next)
{
struct trace_array *tr = data;
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index f270088e9929..4c896a0101bd 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -16,7 +16,8 @@ static int sched_ref;
static DEFINE_MUTEX(sched_register_mutex);
static void
-probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
+probe_sched_switch(void *ignore, bool preempt,
+ struct task_struct *prev, struct task_struct *next)
{
if (unlikely(!sched_ref))
return;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 855c2c7612e8..9d4399b553a3 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -424,7 +424,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
}
static void notrace
-probe_wakeup_sched_switch(void *ignore,
+probe_wakeup_sched_switch(void *ignore, bool preempt,
struct task_struct *prev, struct task_struct *next)
{
struct trace_array_cpu *data;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 0bd212af406c..dda9e6742950 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -91,9 +91,19 @@ check_stack(unsigned long ip, unsigned long *stack)
if (!object_is_on_stack(stack))
return;
+ /* Can't do this from NMI context (can cause deadlocks) */
+ if (in_nmi())
+ return;
+
local_irq_save(flags);
arch_spin_lock(&stack_trace_max_lock);
+ /*
+ * RCU may not be watching, make it see us.
+ * The stack trace code uses rcu_sched.
+ */
+ rcu_irq_enter();
+
/* In case another CPU set the tracer_frame on us */
if (unlikely(!frame_size))
this_size -= tracer_frame;
@@ -175,6 +185,7 @@ check_stack(unsigned long ip, unsigned long *stack)
}
out:
+ rcu_irq_exit();
arch_spin_unlock(&stack_trace_max_lock);
local_irq_restore(flags);
}