diff options
Diffstat (limited to 'tools/perf/arch/x86/util/intel-pt.c')
-rw-r--r-- | tools/perf/arch/x86/util/intel-pt.c | 138 |
1 files changed, 82 insertions, 56 deletions
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index 609088c01e3a..d6d26256915f 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -13,18 +13,21 @@ #include <linux/zalloc.h> #include <cpuid.h> -#include "../../perf.h" #include "../../util/session.h" #include "../../util/event.h" #include "../../util/evlist.h" #include "../../util/evsel.h" #include "../../util/cpumap.h" +#include "../../util/mmap.h" #include <subcmd/parse-options.h> #include "../../util/parse-events.h" #include "../../util/pmu.h" #include "../../util/debug.h" #include "../../util/auxtrace.h" +#include "../../util/record.h" +#include "../../util/target.h" #include "../../util/tsc.h" +#include <internal/lib.h> // page_size #include "../../util/intel-pt.h" #define KiB(x) ((x) * 1024) @@ -44,7 +47,7 @@ struct intel_pt_recording { struct auxtrace_record itr; struct perf_pmu *intel_pt_pmu; int have_sched_switch; - struct perf_evlist *evlist; + struct evlist *evlist; bool snapshot_mode; bool snapshot_init_done; size_t snapshot_size; @@ -110,9 +113,9 @@ static u64 intel_pt_masked_bits(u64 mask, u64 bits) } static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str, - struct perf_evlist *evlist, u64 *res) + struct evlist *evlist, u64 *res) { - struct perf_evsel *evsel; + struct evsel *evsel; u64 mask; *res = 0; @@ -122,8 +125,8 @@ static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str, return -EINVAL; evlist__for_each_entry(evlist, evsel) { - if (evsel->attr.type == intel_pt_pmu->type) { - *res = intel_pt_masked_bits(mask, evsel->attr.config); + if (evsel->core.attr.type == intel_pt_pmu->type) { + *res = intel_pt_masked_bits(mask, evsel->core.attr.config); return 0; } } @@ -132,7 +135,7 @@ static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str, } static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu, - struct perf_evlist *evlist) + struct evlist *evlist) { u64 val; int err, topa_multiple_entries; @@ -268,13 +271,13 @@ intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu) return attr; } -static const char *intel_pt_find_filter(struct perf_evlist *evlist, +static const char *intel_pt_find_filter(struct evlist *evlist, struct perf_pmu *intel_pt_pmu) { - struct perf_evsel *evsel; + struct evsel *evsel; evlist__for_each_entry(evlist, evsel) { - if (evsel->attr.type == intel_pt_pmu->type) + if (evsel->core.attr.type == intel_pt_pmu->type) return evsel->filter; } @@ -289,7 +292,7 @@ static size_t intel_pt_filter_bytes(const char *filter) } static size_t -intel_pt_info_priv_size(struct auxtrace_record *itr, struct perf_evlist *evlist) +intel_pt_info_priv_size(struct auxtrace_record *itr, struct evlist *evlist) { struct intel_pt_recording *ptr = container_of(itr, struct intel_pt_recording, itr); @@ -312,7 +315,7 @@ static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d) static int intel_pt_info_fill(struct auxtrace_record *itr, struct perf_session *session, - struct auxtrace_info_event *auxtrace_info, + struct perf_record_auxtrace_info *auxtrace_info, size_t priv_size) { struct intel_pt_recording *ptr = @@ -326,7 +329,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, unsigned long max_non_turbo_ratio; size_t filter_str_len; const char *filter; - u64 *info; + __u64 *info; int err; if (priv_size != ptr->priv_size) @@ -349,10 +352,10 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu); filter_str_len = filter ? strlen(filter) : 0; - if (!session->evlist->nr_mmaps) + if (!session->evlist->core.nr_mmaps) return -EINVAL; - pc = session->evlist->mmap[0].base; + pc = session->evlist->mmap[0].core.base; if (pc) { err = perf_read_tsc_conversion(pc, &tc); if (err) { @@ -365,7 +368,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, ui__warning("Intel Processor Trace: TSC not available\n"); } - per_cpu_mmaps = !cpu_map__empty(session->evlist->cpus); + per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus); auxtrace_info->type = PERF_AUXTRACE_INTEL_PT; auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type; @@ -398,10 +401,10 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, return 0; } -static int intel_pt_track_switches(struct perf_evlist *evlist) +static int intel_pt_track_switches(struct evlist *evlist) { const char *sched_switch = "sched:sched_switch"; - struct perf_evsel *evsel; + struct evsel *evsel; int err; if (!perf_evlist__can_select_event(evlist, sched_switch)) @@ -414,12 +417,12 @@ static int intel_pt_track_switches(struct perf_evlist *evlist) return err; } - evsel = perf_evlist__last(evlist); + evsel = evlist__last(evlist); perf_evsel__set_sample_bit(evsel, CPU); perf_evsel__set_sample_bit(evsel, TIME); - evsel->system_wide = true; + evsel->core.system_wide = true; evsel->no_aux_samples = true; evsel->immediate = true; @@ -513,7 +516,7 @@ out_err: } static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu, - struct perf_evsel *evsel) + struct evsel *evsel) { int err; char c; @@ -526,39 +529,59 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu, * sets pt=0, which avoids senseless kernel errors. */ if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 && - !(evsel->attr.config & 1)) { + !(evsel->core.attr.config & 1)) { pr_warning("pt=0 doesn't make sense, forcing pt=1\n"); - evsel->attr.config |= 1; + evsel->core.attr.config |= 1; } err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds", "cyc_thresh", "caps/psb_cyc", - evsel->attr.config); + evsel->core.attr.config); if (err) return err; err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods", "mtc_period", "caps/mtc", - evsel->attr.config); + evsel->core.attr.config); if (err) return err; return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods", "psb_period", "caps/psb_cyc", - evsel->attr.config); + evsel->core.attr.config); +} + +/* + * Currently, there is not enough information to disambiguate different PEBS + * events, so only allow one. + */ +static bool intel_pt_too_many_aux_output(struct evlist *evlist) +{ + struct evsel *evsel; + int aux_output_cnt = 0; + + evlist__for_each_entry(evlist, evsel) + aux_output_cnt += !!evsel->core.attr.aux_output; + + if (aux_output_cnt > 1) { + pr_err(INTEL_PT_PMU_NAME " supports at most one event with aux-output\n"); + return true; + } + + return false; } static int intel_pt_recording_options(struct auxtrace_record *itr, - struct perf_evlist *evlist, + struct evlist *evlist, struct record_opts *opts) { struct intel_pt_recording *ptr = container_of(itr, struct intel_pt_recording, itr); struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu; bool have_timing_info, need_immediate = false; - struct perf_evsel *evsel, *intel_pt_evsel = NULL; - const struct cpu_map *cpus = evlist->cpus; - bool privileged = geteuid() == 0 || perf_event_paranoid() < 0; + struct evsel *evsel, *intel_pt_evsel = NULL; + const struct perf_cpu_map *cpus = evlist->core.cpus; + bool privileged = perf_event_paranoid_check(-1); u64 tsc_bit; int err; @@ -566,13 +589,13 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, ptr->snapshot_mode = opts->auxtrace_snapshot_mode; evlist__for_each_entry(evlist, evsel) { - if (evsel->attr.type == intel_pt_pmu->type) { + if (evsel->core.attr.type == intel_pt_pmu->type) { if (intel_pt_evsel) { pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n"); return -EINVAL; } - evsel->attr.freq = 0; - evsel->attr.sample_period = 1; + evsel->core.attr.freq = 0; + evsel->core.attr.sample_period = 1; intel_pt_evsel = evsel; opts->full_auxtrace = true; } @@ -588,6 +611,9 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, return -EINVAL; } + if (intel_pt_too_many_aux_output(evlist)) + return -EINVAL; + if (!opts->full_auxtrace) return 0; @@ -670,7 +696,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit); - if (opts->full_auxtrace && (intel_pt_evsel->attr.config & tsc_bit)) + if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit)) have_timing_info = true; else have_timing_info = false; @@ -679,25 +705,25 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * Per-cpu recording needs sched_switch events to distinguish different * threads. */ - if (have_timing_info && !cpu_map__empty(cpus)) { + if (have_timing_info && !perf_cpu_map__empty(cpus)) { if (perf_can_record_switch_events()) { bool cpu_wide = !target__none(&opts->target) && !target__has_task(&opts->target); if (!cpu_wide && perf_can_record_cpu_wide()) { - struct perf_evsel *switch_evsel; + struct evsel *switch_evsel; err = parse_events(evlist, "dummy:u", NULL); if (err) return err; - switch_evsel = perf_evlist__last(evlist); + switch_evsel = evlist__last(evlist); - switch_evsel->attr.freq = 0; - switch_evsel->attr.sample_period = 1; - switch_evsel->attr.context_switch = 1; + switch_evsel->core.attr.freq = 0; + switch_evsel->core.attr.sample_period = 1; + switch_evsel->core.attr.context_switch = 1; - switch_evsel->system_wide = true; + switch_evsel->core.system_wide = true; switch_evsel->no_aux_samples = true; switch_evsel->immediate = true; @@ -737,31 +763,31 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * In the case of per-cpu mmaps, we need the CPU on the * AUX event. */ - if (!cpu_map__empty(cpus)) + if (!perf_cpu_map__empty(cpus)) perf_evsel__set_sample_bit(intel_pt_evsel, CPU); } /* Add dummy event to keep tracking */ if (opts->full_auxtrace) { - struct perf_evsel *tracking_evsel; + struct evsel *tracking_evsel; err = parse_events(evlist, "dummy:u", NULL); if (err) return err; - tracking_evsel = perf_evlist__last(evlist); + tracking_evsel = evlist__last(evlist); perf_evlist__set_tracking_event(evlist, tracking_evsel); - tracking_evsel->attr.freq = 0; - tracking_evsel->attr.sample_period = 1; + tracking_evsel->core.attr.freq = 0; + tracking_evsel->core.attr.sample_period = 1; tracking_evsel->no_aux_samples = true; if (need_immediate) tracking_evsel->immediate = true; /* In per-cpu case, always need the time of mmap events etc */ - if (!cpu_map__empty(cpus)) { + if (!perf_cpu_map__empty(cpus)) { perf_evsel__set_sample_bit(tracking_evsel, TIME); /* And the CPU for switch events */ perf_evsel__set_sample_bit(tracking_evsel, CPU); @@ -773,7 +799,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * Warn the user when we do not have enough information to decode i.e. * per-cpu with no sched_switch (except workload-only). */ - if (!ptr->have_sched_switch && !cpu_map__empty(cpus) && + if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) && !target__none(&opts->target)) ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n"); @@ -784,11 +810,11 @@ static int intel_pt_snapshot_start(struct auxtrace_record *itr) { struct intel_pt_recording *ptr = container_of(itr, struct intel_pt_recording, itr); - struct perf_evsel *evsel; + struct evsel *evsel; evlist__for_each_entry(ptr->evlist, evsel) { - if (evsel->attr.type == ptr->intel_pt_pmu->type) - return perf_evsel__disable(evsel); + if (evsel->core.attr.type == ptr->intel_pt_pmu->type) + return evsel__disable(evsel); } return -EINVAL; } @@ -797,11 +823,11 @@ static int intel_pt_snapshot_finish(struct auxtrace_record *itr) { struct intel_pt_recording *ptr = container_of(itr, struct intel_pt_recording, itr); - struct perf_evsel *evsel; + struct evsel *evsel; evlist__for_each_entry(ptr->evlist, evsel) { - if (evsel->attr.type == ptr->intel_pt_pmu->type) - return perf_evsel__enable(evsel); + if (evsel->core.attr.type == ptr->intel_pt_pmu->type) + return evsel__enable(evsel); } return -EINVAL; } @@ -1070,10 +1096,10 @@ static int intel_pt_read_finish(struct auxtrace_record *itr, int idx) { struct intel_pt_recording *ptr = container_of(itr, struct intel_pt_recording, itr); - struct perf_evsel *evsel; + struct evsel *evsel; evlist__for_each_entry(ptr->evlist, evsel) { - if (evsel->attr.type == ptr->intel_pt_pmu->type) + if (evsel->core.attr.type == ptr->intel_pt_pmu->type) return perf_evlist__enable_event_idx(ptr->evlist, evsel, idx); } |