aboutsummaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-09-18 15:03:58 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2024-09-18 15:03:58 +0200
commit9f0c253ddddca608457a42e509267bed2dee0a50 (patch)
tree6e00e1ed61e997c06169c70c9365f213fe412fc8 /tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
parent941c122da5c8355335dc16011c1c291a32cd1118 (diff)
parent5e645f31139183ac9a282238da18ca6bbc1c6f4a (diff)
Merge tag 'perf-core-2024-09-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf events updates from Ingo Molnar: - Implement per-PMU context rescheduling to significantly improve single-PMU performance, and related cleanups/fixes (Peter Zijlstra and Namhyung Kim) - Fix ancient bug resulting in a lot of events being dropped erroneously at higher sampling frequencies (Luo Gengkun) - uprobes enhancements: - Implement RCU-protected hot path optimizations for better performance: "For baseline vs SRCU, peak througput increased from 3.7 M/s (million uprobe triggerings per second) up to about 8 M/s. For uretprobes it's a bit more modest with bump from 2.4 M/s to 5 M/s. For SRCU vs RCU Tasks Trace, peak throughput for uprobes increases further from 8 M/s to 10.3 M/s (+28%!), and for uretprobes from 5.3 M/s to 5.8 M/s (+11%), as we have more work to do on uretprobes side. Even single-thread (no contention) performance is slightly better: 3.276 M/s to 3.396 M/s (+3.5%) for uprobes, and 2.055 M/s to 2.174 M/s (+5.8%) for uretprobes." (Andrii Nakryiko et al) - Document mmap_lock, don't abuse get_user_pages_remote() (Oleg Nesterov) - Cleanups & fixes to prepare for future work: - Remove uprobe_register_refctr() - Simplify error handling for alloc_uprobe() - Make uprobe_register() return struct uprobe * - Fold __uprobe_unregister() into uprobe_unregister() - Shift put_uprobe() from delete_uprobe() to uprobe_unregister() - BPF: Fix use-after-free in bpf_uprobe_multi_link_attach() (Oleg Nesterov) - New feature & ABI extension: allow events to use PERF_SAMPLE READ with inheritance, enabling sample based profiling of a group of counters over a hierarchy of processes or threads (Ben Gainey) - Intel uncore & power events updates: - Add Arrow Lake and Lunar Lake support - Add PERF_EV_CAP_READ_SCOPE - Clean up and enhance cpumask and hotplug support (Kan Liang) - Add LNL uncore iMC freerunning support - Use D0:F0 as a default device (Zhenyu Wang) - Intel PT: fix AUX snapshot handling race (Adrian Hunter) - Misc fixes and cleanups (James Clark, Jiri Olsa, Oleg Nesterov and Peter Zijlstra) * tag 'perf-core-2024-09-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits) dmaengine: idxd: Clean up cpumask and hotplug for perfmon iommu/vt-d: Clean up cpumask and hotplug for perfmon perf/x86/intel/cstate: Clean up cpumask and hotplug perf: Add PERF_EV_CAP_READ_SCOPE perf: Generic hotplug support for a PMU with a scope uprobes: perform lockless SRCU-protected uprobes_tree lookup rbtree: provide rb_find_rcu() / rb_find_add_rcu() perf/uprobe: split uprobe_unregister() uprobes: travers uprobe's consumer list locklessly under SRCU protection uprobes: get rid of enum uprobe_filter_ctx in uprobe filter callbacks uprobes: protected uprobe lifetime with SRCU uprobes: revamp uprobe refcounting and lifetime management bpf: Fix use-after-free in bpf_uprobe_multi_link_attach() perf/core: Fix small negative period being ignored perf: Really fix event_function_call() locking perf: Optimize __pmu_ctx_sched_out() perf: Add context time freeze perf: Fix event_function_call() locking perf: Extract a few helpers perf: Optimize context reschedule for single PMU cases ...
Diffstat (limited to 'tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c')
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index a32771da4293..22807fd78fe6 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -434,7 +434,7 @@ uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
struct testmod_uprobe {
struct path path;
- loff_t offset;
+ struct uprobe *uprobe;
struct uprobe_consumer consumer;
};
@@ -448,25 +448,25 @@ static int testmod_register_uprobe(loff_t offset)
{
int err = -EBUSY;
- if (uprobe.offset)
+ if (uprobe.uprobe)
return -EBUSY;
mutex_lock(&testmod_uprobe_mutex);
- if (uprobe.offset)
+ if (uprobe.uprobe)
goto out;
err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
if (err)
goto out;
- err = uprobe_register_refctr(d_real_inode(uprobe.path.dentry),
- offset, 0, &uprobe.consumer);
- if (err)
+ uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry),
+ offset, 0, &uprobe.consumer);
+ if (IS_ERR(uprobe.uprobe)) {
+ err = PTR_ERR(uprobe.uprobe);
path_put(&uprobe.path);
- else
- uprobe.offset = offset;
-
+ uprobe.uprobe = NULL;
+ }
out:
mutex_unlock(&testmod_uprobe_mutex);
return err;
@@ -476,10 +476,11 @@ static void testmod_unregister_uprobe(void)
{
mutex_lock(&testmod_uprobe_mutex);
- if (uprobe.offset) {
- uprobe_unregister(d_real_inode(uprobe.path.dentry),
- uprobe.offset, &uprobe.consumer);
- uprobe.offset = 0;
+ if (uprobe.uprobe) {
+ uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer);
+ uprobe_unregister_sync();
+ path_put(&uprobe.path);
+ uprobe.uprobe = NULL;
}
mutex_unlock(&testmod_uprobe_mutex);