aboutsummaryrefslogtreecommitdiff
path: root/tools/perf/util/bpf_skel
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/bpf_skel')
-rw-r--r--tools/perf/util/bpf_skel/kwork_trace.bpf.c383
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c175
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c38
3 files changed, 595 insertions, 1 deletions
diff --git a/tools/perf/util/bpf_skel/kwork_trace.bpf.c b/tools/perf/util/bpf_skel/kwork_trace.bpf.c
new file mode 100644
index 000000000000..063c124e0999
--- /dev/null
+++ b/tools/perf/util/bpf_skel/kwork_trace.bpf.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2022, Huawei
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define KWORK_COUNT 100
+#define MAX_KWORKNAME 128
+
+/*
+ * This should be in sync with "util/kwork.h"
+ */
+enum kwork_class_type {
+ KWORK_CLASS_IRQ,
+ KWORK_CLASS_SOFTIRQ,
+ KWORK_CLASS_WORKQUEUE,
+ KWORK_CLASS_MAX,
+};
+
+struct work_key {
+ __u32 type;
+ __u32 cpu;
+ __u64 id;
+};
+
+struct report_data {
+ __u64 nr;
+ __u64 total_time;
+ __u64 max_time;
+ __u64 max_time_start;
+ __u64 max_time_end;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct work_key));
+ __uint(value_size, MAX_KWORKNAME);
+ __uint(max_entries, KWORK_COUNT);
+} perf_kwork_names SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct work_key));
+ __uint(value_size, sizeof(__u64));
+ __uint(max_entries, KWORK_COUNT);
+} perf_kwork_time SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct work_key));
+ __uint(value_size, sizeof(struct report_data));
+ __uint(max_entries, KWORK_COUNT);
+} perf_kwork_report SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} perf_kwork_cpu_filter SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, MAX_KWORKNAME);
+ __uint(max_entries, 1);
+} perf_kwork_name_filter SEC(".maps");
+
+int enabled = 0;
+int has_cpu_filter = 0;
+int has_name_filter = 0;
+
+static __always_inline int local_strncmp(const char *s1,
+ unsigned int sz, const char *s2)
+{
+ int ret = 0;
+ unsigned int i;
+
+ for (i = 0; i < sz; i++) {
+ ret = (unsigned char)s1[i] - (unsigned char)s2[i];
+ if (ret || !s1[i] || !s2[i])
+ break;
+ }
+
+ return ret;
+}
+
+static __always_inline int trace_event_match(struct work_key *key, char *name)
+{
+ __u8 *cpu_val;
+ char *name_val;
+ __u32 zero = 0;
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ if (!enabled)
+ return 0;
+
+ if (has_cpu_filter) {
+ cpu_val = bpf_map_lookup_elem(&perf_kwork_cpu_filter, &cpu);
+ if (!cpu_val)
+ return 0;
+ }
+
+ if (has_name_filter && (name != NULL)) {
+ name_val = bpf_map_lookup_elem(&perf_kwork_name_filter, &zero);
+ if (name_val &&
+ (local_strncmp(name_val, MAX_KWORKNAME, name) != 0)) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static __always_inline void do_update_time(void *map, struct work_key *key,
+ __u64 time_start, __u64 time_end)
+{
+ struct report_data zero, *data;
+ __s64 delta = time_end - time_start;
+
+ if (delta < 0)
+ return;
+
+ data = bpf_map_lookup_elem(map, key);
+ if (!data) {
+ __builtin_memset(&zero, 0, sizeof(zero));
+ bpf_map_update_elem(map, key, &zero, BPF_NOEXIST);
+ data = bpf_map_lookup_elem(map, key);
+ if (!data)
+ return;
+ }
+
+ if ((delta > data->max_time) ||
+ (data->max_time == 0)) {
+ data->max_time = delta;
+ data->max_time_start = time_start;
+ data->max_time_end = time_end;
+ }
+
+ data->total_time += delta;
+ data->nr++;
+}
+
+static __always_inline void do_update_timestart(void *map, struct work_key *key)
+{
+ __u64 ts = bpf_ktime_get_ns();
+
+ bpf_map_update_elem(map, key, &ts, BPF_ANY);
+}
+
+static __always_inline void do_update_timeend(void *report_map, void *time_map,
+ struct work_key *key)
+{
+ __u64 *time = bpf_map_lookup_elem(time_map, key);
+
+ if (time) {
+ bpf_map_delete_elem(time_map, key);
+ do_update_time(report_map, key, *time, bpf_ktime_get_ns());
+ }
+}
+
+static __always_inline void do_update_name(void *map,
+ struct work_key *key, char *name)
+{
+ if (!bpf_map_lookup_elem(map, key))
+ bpf_map_update_elem(map, key, name, BPF_ANY);
+}
+
+static __always_inline int update_timestart(void *map, struct work_key *key)
+{
+ if (!trace_event_match(key, NULL))
+ return 0;
+
+ do_update_timestart(map, key);
+ return 0;
+}
+
+static __always_inline int update_timestart_and_name(void *time_map,
+ void *names_map,
+ struct work_key *key,
+ char *name)
+{
+ if (!trace_event_match(key, name))
+ return 0;
+
+ do_update_timestart(time_map, key);
+ do_update_name(names_map, key, name);
+
+ return 0;
+}
+
+static __always_inline int update_timeend(void *report_map,
+ void *time_map, struct work_key *key)
+{
+ if (!trace_event_match(key, NULL))
+ return 0;
+
+ do_update_timeend(report_map, time_map, key);
+
+ return 0;
+}
+
+static __always_inline int update_timeend_and_name(void *report_map,
+ void *time_map,
+ void *names_map,
+ struct work_key *key,
+ char *name)
+{
+ if (!trace_event_match(key, name))
+ return 0;
+
+ do_update_timeend(report_map, time_map, key);
+ do_update_name(names_map, key, name);
+
+ return 0;
+}
+
+SEC("tracepoint/irq/irq_handler_entry")
+int report_irq_handler_entry(struct trace_event_raw_irq_handler_entry *ctx)
+{
+ char name[MAX_KWORKNAME];
+ struct work_key key = {
+ .type = KWORK_CLASS_IRQ,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->irq,
+ };
+ void *name_addr = (void *)ctx + (ctx->__data_loc_name & 0xffff);
+
+ bpf_probe_read_kernel_str(name, sizeof(name), name_addr);
+
+ return update_timestart_and_name(&perf_kwork_time,
+ &perf_kwork_names, &key, name);
+}
+
+SEC("tracepoint/irq/irq_handler_exit")
+int report_irq_handler_exit(struct trace_event_raw_irq_handler_exit *ctx)
+{
+ struct work_key key = {
+ .type = KWORK_CLASS_IRQ,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->irq,
+ };
+
+ return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
+}
+
+static char softirq_name_list[NR_SOFTIRQS][MAX_KWORKNAME] = {
+ { "HI" },
+ { "TIMER" },
+ { "NET_TX" },
+ { "NET_RX" },
+ { "BLOCK" },
+ { "IRQ_POLL" },
+ { "TASKLET" },
+ { "SCHED" },
+ { "HRTIMER" },
+ { "RCU" },
+};
+
+SEC("tracepoint/irq/softirq_entry")
+int report_softirq_entry(struct trace_event_raw_softirq *ctx)
+{
+ unsigned int vec = ctx->vec;
+ struct work_key key = {
+ .type = KWORK_CLASS_SOFTIRQ,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)vec,
+ };
+
+ if (vec < NR_SOFTIRQS) {
+ return update_timestart_and_name(&perf_kwork_time,
+ &perf_kwork_names, &key,
+ softirq_name_list[vec]);
+ }
+
+ return 0;
+}
+
+SEC("tracepoint/irq/softirq_exit")
+int report_softirq_exit(struct trace_event_raw_softirq *ctx)
+{
+ struct work_key key = {
+ .type = KWORK_CLASS_SOFTIRQ,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->vec,
+ };
+
+ return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
+}
+
+SEC("tracepoint/irq/softirq_raise")
+int latency_softirq_raise(struct trace_event_raw_softirq *ctx)
+{
+ unsigned int vec = ctx->vec;
+ struct work_key key = {
+ .type = KWORK_CLASS_SOFTIRQ,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)vec,
+ };
+
+ if (vec < NR_SOFTIRQS) {
+ return update_timestart_and_name(&perf_kwork_time,
+ &perf_kwork_names, &key,
+ softirq_name_list[vec]);
+ }
+
+ return 0;
+}
+
+SEC("tracepoint/irq/softirq_entry")
+int latency_softirq_entry(struct trace_event_raw_softirq *ctx)
+{
+ struct work_key key = {
+ .type = KWORK_CLASS_SOFTIRQ,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->vec,
+ };
+
+ return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
+}
+
+SEC("tracepoint/workqueue/workqueue_execute_start")
+int report_workqueue_execute_start(struct trace_event_raw_workqueue_execute_start *ctx)
+{
+ struct work_key key = {
+ .type = KWORK_CLASS_WORKQUEUE,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->work,
+ };
+
+ return update_timestart(&perf_kwork_time, &key);
+}
+
+SEC("tracepoint/workqueue/workqueue_execute_end")
+int report_workqueue_execute_end(struct trace_event_raw_workqueue_execute_end *ctx)
+{
+ char name[MAX_KWORKNAME];
+ struct work_key key = {
+ .type = KWORK_CLASS_WORKQUEUE,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->work,
+ };
+ unsigned long long func_addr = (unsigned long long)ctx->function;
+
+ __builtin_memset(name, 0, sizeof(name));
+ bpf_snprintf(name, sizeof(name), "%ps", &func_addr, sizeof(func_addr));
+
+ return update_timeend_and_name(&perf_kwork_report, &perf_kwork_time,
+ &perf_kwork_names, &key, name);
+}
+
+SEC("tracepoint/workqueue/workqueue_activate_work")
+int latency_workqueue_activate_work(struct trace_event_raw_workqueue_activate_work *ctx)
+{
+ struct work_key key = {
+ .type = KWORK_CLASS_WORKQUEUE,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->work,
+ };
+
+ return update_timestart(&perf_kwork_time, &key);
+}
+
+SEC("tracepoint/workqueue/workqueue_execute_start")
+int latency_workqueue_execute_start(struct trace_event_raw_workqueue_execute_start *ctx)
+{
+ char name[MAX_KWORKNAME];
+ struct work_key key = {
+ .type = KWORK_CLASS_WORKQUEUE,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->work,
+ };
+ unsigned long long func_addr = (unsigned long long)ctx->function;
+
+ __builtin_memset(name, 0, sizeof(name));
+ bpf_snprintf(name, sizeof(name), "%ps", &func_addr, sizeof(func_addr));
+
+ return update_timeend_and_name(&perf_kwork_report, &perf_kwork_time,
+ &perf_kwork_names, &key, name);
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
new file mode 100644
index 000000000000..9e8b94eb6320
--- /dev/null
+++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2022 Google
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+/* maximum stack trace depth */
+#define MAX_STACKS 8
+
+/* default buffer size */
+#define MAX_ENTRIES 10240
+
+struct contention_key {
+ __s32 stack_id;
+};
+
+struct contention_data {
+ __u64 total_time;
+ __u64 min_time;
+ __u64 max_time;
+ __u32 count;
+ __u32 flags;
+};
+
+struct tstamp_data {
+ __u64 timestamp;
+ __u64 lock;
+ __u32 flags;
+ __s32 stack_id;
+};
+
+/* callstack storage */
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, MAX_STACKS * sizeof(__u64));
+ __uint(max_entries, MAX_ENTRIES);
+} stacks SEC(".maps");
+
+/* maintain timestamp at the beginning of contention */
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct tstamp_data);
+} tstamp SEC(".maps");
+
+/* actual lock contention statistics */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct contention_key));
+ __uint(value_size, sizeof(struct contention_data));
+ __uint(max_entries, MAX_ENTRIES);
+} lock_stat SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} cpu_filter SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} task_filter SEC(".maps");
+
+/* control flags */
+int enabled;
+int has_cpu;
+int has_task;
+
+/* error stat */
+unsigned long lost;
+
+static inline int can_record(void)
+{
+ if (has_cpu) {
+ __u32 cpu = bpf_get_smp_processor_id();
+ __u8 *ok;
+
+ ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
+ if (!ok)
+ return 0;
+ }
+
+ if (has_task) {
+ __u8 *ok;
+ __u32 pid = bpf_get_current_pid_tgid();
+
+ ok = bpf_map_lookup_elem(&task_filter, &pid);
+ if (!ok)
+ return 0;
+ }
+
+ return 1;
+}
+
+SEC("tp_btf/contention_begin")
+int contention_begin(u64 *ctx)
+{
+ struct task_struct *curr;
+ struct tstamp_data *pelem;
+
+ if (!enabled || !can_record())
+ return 0;
+
+ curr = bpf_get_current_task_btf();
+ pelem = bpf_task_storage_get(&tstamp, curr, NULL,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!pelem || pelem->lock)
+ return 0;
+
+ pelem->timestamp = bpf_ktime_get_ns();
+ pelem->lock = (__u64)ctx[0];
+ pelem->flags = (__u32)ctx[1];
+ pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP);
+
+ if (pelem->stack_id < 0)
+ lost++;
+ return 0;
+}
+
+SEC("tp_btf/contention_end")
+int contention_end(u64 *ctx)
+{
+ struct task_struct *curr;
+ struct tstamp_data *pelem;
+ struct contention_key key;
+ struct contention_data *data;
+ __u64 duration;
+
+ if (!enabled)
+ return 0;
+
+ curr = bpf_get_current_task_btf();
+ pelem = bpf_task_storage_get(&tstamp, curr, NULL, 0);
+ if (!pelem || pelem->lock != ctx[0])
+ return 0;
+
+ duration = bpf_ktime_get_ns() - pelem->timestamp;
+
+ key.stack_id = pelem->stack_id;
+ data = bpf_map_lookup_elem(&lock_stat, &key);
+ if (!data) {
+ struct contention_data first = {
+ .total_time = duration,
+ .max_time = duration,
+ .min_time = duration,
+ .count = 1,
+ .flags = pelem->flags,
+ };
+
+ bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
+ pelem->lock = 0;
+ return 0;
+ }
+
+ __sync_fetch_and_add(&data->total_time, duration);
+ __sync_fetch_and_add(&data->count, 1);
+
+ /* FIXME: need atomic operations */
+ if (data->max_time < duration)
+ data->max_time = duration;
+ if (data->min_time > duration)
+ data->min_time = duration;
+
+ pelem->lock = 0;
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
index cc6d7fd55118..c4ba2bcf179f 100644
--- a/tools/perf/util/bpf_skel/off_cpu.bpf.c
+++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
@@ -12,6 +12,9 @@
#define TASK_INTERRUPTIBLE 0x0001
#define TASK_UNINTERRUPTIBLE 0x0002
+/* create a new thread */
+#define CLONE_THREAD 0x10000
+
#define MAX_STACKS 32
#define MAX_ENTRIES 102400
@@ -85,6 +88,7 @@ int enabled = 0;
int has_cpu = 0;
int has_task = 0;
int has_cgroup = 0;
+int uses_tgid = 0;
const volatile bool has_prev_state = false;
const volatile bool needs_cgroup = false;
@@ -144,7 +148,12 @@ static inline int can_record(struct task_struct *t, int state)
if (has_task) {
__u8 *ok;
- __u32 pid = t->pid;
+ __u32 pid;
+
+ if (uses_tgid)
+ pid = t->tgid;
+ else
+ pid = t->pid;
ok = bpf_map_lookup_elem(&task_filter, &pid);
if (!ok)
@@ -214,6 +223,33 @@ next:
return 0;
}
+SEC("tp_btf/task_newtask")
+int on_newtask(u64 *ctx)
+{
+ struct task_struct *task;
+ u64 clone_flags;
+ u32 pid;
+ u8 val = 1;
+
+ if (!uses_tgid)
+ return 0;
+
+ task = (struct task_struct *)bpf_get_current_task();
+
+ pid = BPF_CORE_READ(task, tgid);
+ if (!bpf_map_lookup_elem(&task_filter, &pid))
+ return 0;
+
+ task = (struct task_struct *)ctx[0];
+ clone_flags = ctx[1];
+
+ pid = task->tgid;
+ if (!(clone_flags & CLONE_THREAD))
+ bpf_map_update_elem(&task_filter, &pid, &val, BPF_NOEXIST);
+
+ return 0;
+}
+
SEC("tp_btf/sched_switch")
int on_switch(u64 *ctx)
{