diff options
author | Song Liu <song@kernel.org> | 2021-04-25 14:43:33 -0700 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2021-04-29 10:30:58 -0300 |
commit | 5508c9dae2a4a111acc7472900164f556ae75346 (patch) | |
tree | fd1feda22a604bcc179794b2acf320ad7c66d97b /tools/perf/util/bpf_counter.c | |
parent | 01bd8efcec444468db0275bbd71b49927f7e1544 (diff) |
perf stat: Introduce bpf_counter_ops->disable()
Introduce bpf_counter_ops->disable(), which is used stop counting the
event.
Committer notes:
Added a dummy bpf_counter__disable() to the python binding to avoid
having 'perf test python' failing.
bpf_counter isn't supported in the python binding.
Signed-off-by: Song Liu <song@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: kernel-team@fb.com
Link: https://lore.kernel.org/r/20210425214333.1090950-6-song@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/bpf_counter.c')
-rw-r--r-- | tools/perf/util/bpf_counter.c | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c index f179f5743025..ddb52f748c8e 100644 --- a/tools/perf/util/bpf_counter.c +++ b/tools/perf/util/bpf_counter.c @@ -215,6 +215,17 @@ static int bpf_program_profiler__enable(struct evsel *evsel) return 0; } +static int bpf_program_profiler__disable(struct evsel *evsel) +{ + struct bpf_counter *counter; + + list_for_each_entry(counter, &evsel->bpf_counter_list, list) { + assert(counter->skel != NULL); + bpf_prog_profiler_bpf__detach(counter->skel); + } + return 0; +} + static int bpf_program_profiler__read(struct evsel *evsel) { // perf_cpu_map uses /sys/devices/system/cpu/online @@ -280,6 +291,7 @@ static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu, struct bpf_counter_ops bpf_program_profiler_ops = { .load = bpf_program_profiler__load, .enable = bpf_program_profiler__enable, + .disable = bpf_program_profiler__disable, .read = bpf_program_profiler__read, .destroy = bpf_program_profiler__destroy, .install_pe = bpf_program_profiler__install_pe, @@ -627,6 +639,12 @@ static int bperf__enable(struct evsel *evsel) return 0; } +static int bperf__disable(struct evsel *evsel) +{ + evsel->follower_skel->bss->enabled = 0; + return 0; +} + static int bperf__read(struct evsel *evsel) { struct bperf_follower_bpf *skel = evsel->follower_skel; @@ -768,6 +786,7 @@ static int bperf__destroy(struct evsel *evsel) struct bpf_counter_ops bperf_ops = { .load = bperf__load, .enable = bperf__enable, + .disable = bperf__disable, .read = bperf__read, .install_pe = bperf__install_pe, .destroy = bperf__destroy, @@ -806,6 +825,13 @@ int bpf_counter__enable(struct evsel *evsel) return evsel->bpf_counter_ops->enable(evsel); } +int bpf_counter__disable(struct evsel *evsel) +{ + if (bpf_counter_skip(evsel)) + return 0; + return evsel->bpf_counter_ops->disable(evsel); +} + int bpf_counter__read(struct evsel *evsel) { if (bpf_counter_skip(evsel)) |