diff options
Diffstat (limited to 'tools/perf/builtin-trace.c')
-rw-r--r-- | tools/perf/builtin-trace.c | 295 |
1 files changed, 251 insertions, 44 deletions
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 22ab8e67c760..835619476370 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -89,6 +89,8 @@ struct trace { u64 base_time; FILE *output; unsigned long nr_events; + unsigned long nr_events_printed; + unsigned long max_events; struct strlist *ev_qualifier; struct { size_t nr; @@ -106,6 +108,7 @@ struct trace { } stats; unsigned int max_stack; unsigned int min_stack; + bool raw_augmented_syscalls; bool not_ev_qualifier; bool live; bool full_time; @@ -181,7 +184,7 @@ static int __tp_field__init_uint(struct tp_field *field, int size, int offset, b return 0; } -static int tp_field__init_uint(struct tp_field *field, struct format_field *format_field, bool needs_swap) +static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap) { return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap); } @@ -198,7 +201,7 @@ static int __tp_field__init_ptr(struct tp_field *field, int offset) return 0; } -static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field) +static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field) { return __tp_field__init_ptr(field, format_field->offset); } @@ -214,7 +217,7 @@ static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel, struct tp_field *field, const char *name) { - struct format_field *format_field = perf_evsel__field(evsel, name); + struct tep_format_field *format_field = perf_evsel__field(evsel, name); if (format_field == NULL) return -1; @@ -230,7 +233,7 @@ static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel, struct tp_field *field, const char *name) { - struct format_field *format_field = perf_evsel__field(evsel, name); + struct tep_format_field *format_field = perf_evsel__field(evsel, name); if (format_field == NULL) return -1; @@ -288,6 +291,13 @@ static int perf_evsel__init_augmented_syscall_tp_args(struct perf_evsel *evsel) return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)); } +static int perf_evsel__init_augmented_syscall_tp_ret(struct perf_evsel *evsel) +{ + struct syscall_tp *sc = evsel->priv; + + return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap); +} + static int perf_evsel__init_raw_syscall_tp(struct perf_evsel *evsel, void *handler) { evsel->priv = malloc(sizeof(struct syscall_tp)); @@ -498,16 +508,6 @@ static const char *clockid[] = { }; static DEFINE_STRARRAY(clockid); -static const char *socket_families[] = { - "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM", - "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI", - "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC", - "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC", - "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF", - "ALG", "NFC", "VSOCK", -}; -static DEFINE_STRARRAY(socket_families); - static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size, struct syscall_arg *arg) { @@ -615,6 +615,7 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size, struct syscall_arg_fmt { size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg); + unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val); void *parm; const char *name; bool show_zero; @@ -631,6 +632,8 @@ static struct syscall_fmt { } syscall_fmts[] = { { .name = "access", .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, + { .name = "bind", + .arg = { [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ }, }, }, { .name = "bpf", .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, }, { .name = "brk", .hexret = true, @@ -645,6 +648,8 @@ static struct syscall_fmt { [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, }, { .name = "close", .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, }, + { .name = "connect", + .arg = { [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ }, }, }, { .name = "epoll_ctl", .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, }, { .name = "eventfd2", @@ -722,6 +727,10 @@ static struct syscall_fmt { .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ }, [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ }, }, }, + { .name = "mount", + .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ }, + [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */ + .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, }, { .name = "mprotect", .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, }, @@ -801,7 +810,8 @@ static struct syscall_fmt { { .name = "sendmsg", .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, { .name = "sendto", - .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, + .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, + [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, }, { .name = "set_tid_address", .errpid = true, }, { .name = "setitimer", .arg = { [0] = STRARRAY(which, itimers), }, }, @@ -830,6 +840,8 @@ static struct syscall_fmt { .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, { .name = "tkill", .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, + { .name = "umount2", .alias = "umount", + .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, }, { .name = "uname", .alias = "newuname", }, { .name = "unlinkat", .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, @@ -853,16 +865,30 @@ static struct syscall_fmt *syscall_fmt__find(const char *name) return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); } +static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias) +{ + int i, nmemb = ARRAY_SIZE(syscall_fmts); + + for (i = 0; i < nmemb; ++i) { + if (syscall_fmts[i].alias && strcmp(syscall_fmts[i].alias, alias) == 0) + return &syscall_fmts[i]; + } + + return NULL; +} + /* * is_exit: is this "exit" or "exit_group"? * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter. + * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc. */ struct syscall { - struct event_format *tp_format; + struct tep_event_format *tp_format; int nr_args; + int args_size; bool is_exit; bool is_open; - struct format_field *args; + struct tep_format_field *args; const char *name; struct syscall_fmt *fmt; struct syscall_arg_fmt *arg_fmt; @@ -1095,11 +1121,21 @@ static void thread__set_filename_pos(struct thread *thread, const char *bf, ttrace->filename.entry_str_pos = bf - ttrace->entry_str; } +static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size) +{ + struct augmented_arg *augmented_arg = arg->augmented.args; + + return scnprintf(bf, size, "%.*s", augmented_arg->size, augmented_arg->value); +} + static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, struct syscall_arg *arg) { unsigned long ptr = arg->val; + if (arg->augmented.args) + return syscall_arg__scnprintf_augmented_string(arg, bf, size); + if (!arg->trace->vfs_getname) return scnprintf(bf, size, "%#x", ptr); @@ -1142,11 +1178,9 @@ static void sig_handler(int sig) interrupted = sig == SIGINT; } -static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, - u64 duration, bool duration_calculated, u64 tstamp, FILE *fp) +static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) { - size_t printed = trace__fprintf_tstamp(trace, tstamp, fp); - printed += fprintf_duration(duration, duration_calculated, fp); + size_t printed = 0; if (trace->multiple_threads) { if (trace->show_comm) @@ -1157,6 +1191,14 @@ static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thre return printed; } +static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, + u64 duration, bool duration_calculated, u64 tstamp, FILE *fp) +{ + size_t printed = trace__fprintf_tstamp(trace, tstamp, fp); + printed += fprintf_duration(duration, duration_calculated, fp); + return printed + trace__fprintf_comm_tid(trace, thread, fp); +} + static int trace__process_event(struct trace *trace, struct machine *machine, union perf_event *event, struct perf_sample *sample) { @@ -1258,10 +1300,12 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) static int syscall__set_arg_fmts(struct syscall *sc) { - struct format_field *field; + struct tep_format_field *field, *last_field = NULL; int idx = 0, len; for (field = sc->args; field; field = field->next, ++idx) { + last_field = field; + if (sc->fmt && sc->fmt->arg[idx].scnprintf) continue; @@ -1270,7 +1314,7 @@ static int syscall__set_arg_fmts(struct syscall *sc) strcmp(field->name, "path") == 0 || strcmp(field->name, "pathname") == 0)) sc->arg_fmt[idx].scnprintf = SCA_FILENAME; - else if (field->flags & FIELD_IS_POINTER) + else if (field->flags & TEP_FIELD_IS_POINTER) sc->arg_fmt[idx].scnprintf = syscall_arg__scnprintf_hex; else if (strcmp(field->type, "pid_t") == 0) sc->arg_fmt[idx].scnprintf = SCA_PID; @@ -1292,6 +1336,9 @@ static int syscall__set_arg_fmts(struct syscall *sc) } } + if (last_field) + sc->args_size = last_field->offset + last_field->size; + return 0; } @@ -1459,6 +1506,19 @@ static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size, return scnprintf(bf, size, "arg%d: ", arg->idx); } +/* + * Check if the value is in fact zero, i.e. mask whatever needs masking, such + * as mount 'flags' argument that needs ignoring some magic flag, see comment + * in tools/perf/trace/beauty/mount_flags.c + */ +static unsigned long syscall__mask_val(struct syscall *sc, struct syscall_arg *arg, unsigned long val) +{ + if (sc->arg_fmt && sc->arg_fmt[arg->idx].mask_val) + return sc->arg_fmt[arg->idx].mask_val(arg, val); + + return val; +} + static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size, struct syscall_arg *arg, unsigned long val) { @@ -1472,14 +1532,18 @@ static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size, } static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, - unsigned char *args, struct trace *trace, - struct thread *thread) + unsigned char *args, void *augmented_args, int augmented_args_size, + struct trace *trace, struct thread *thread) { size_t printed = 0; unsigned long val; u8 bit = 1; struct syscall_arg arg = { .args = args, + .augmented = { + .size = augmented_args_size, + .args = augmented_args, + }, .idx = 0, .mask = 0, .trace = trace, @@ -1495,7 +1559,7 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, ttrace->ret_scnprintf = NULL; if (sc->args != NULL) { - struct format_field *field; + struct tep_format_field *field; for (field = sc->args; field; field = field->next, ++arg.idx, bit <<= 1) { @@ -1503,6 +1567,11 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, continue; val = syscall_arg__val(&arg, arg.idx); + /* + * Some syscall args need some mask, most don't and + * return val untouched. + */ + val = syscall__mask_val(sc, &arg, val); /* * Suppress this argument if its value is zero and @@ -1634,6 +1703,8 @@ static int trace__printf_interrupted_entry(struct trace *trace) printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str); ttrace->entry_pending = false; + ++trace->nr_events_printed; + return printed; } @@ -1654,6 +1725,32 @@ static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel, return printed; } +static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, bool raw_augmented) +{ + void *augmented_args = NULL; + /* + * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter + * and there we get all 6 syscall args plus the tracepoint common + * fields (sizeof(long)) and the syscall_nr (another long). So we check + * if that is the case and if so don't look after the sc->args_size, + * but always after the full raw_syscalls:sys_enter payload, which is + * fixed. + * + * We'll revisit this later to pass s->args_size to the BPF augmenter + * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it + * copies only what we need for each syscall, like what happens when we + * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace + * traffic to just what is needed for each syscall. + */ + int args_size = raw_augmented ? (8 * (int)sizeof(long)) : sc->args_size; + + *augmented_args_size = sample->raw_size - args_size; + if (*augmented_args_size > 0) + augmented_args = sample->raw_data + args_size; + + return augmented_args; +} + static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, union perf_event *event __maybe_unused, struct perf_sample *sample) @@ -1663,6 +1760,8 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, size_t printed = 0; struct thread *thread; int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; + int augmented_args_size = 0; + void *augmented_args = NULL; struct syscall *sc = trace__syscall_info(trace, evsel, id); struct thread_trace *ttrace; @@ -1686,13 +1785,24 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) trace__printf_interrupted_entry(trace); - + /* + * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible + * arguments, even if the syscall being handled, say "openat", uses only 4 arguments + * this breaks syscall__augmented_args() check for augmented args, as we calculate + * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file, + * so when handling, say the openat syscall, we end up getting 6 args for the + * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly + * thinking that the extra 2 u64 args are the augmented filename, so just check + * here and avoid using augmented syscalls when the evsel is the raw_syscalls one. + */ + if (evsel != trace->syscalls.events.sys_enter) + augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls); ttrace->entry_time = sample->time; msg = ttrace->entry_str; printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed, - args, trace, thread); + args, augmented_args, augmented_args_size, trace, thread); if (sc->is_exit) { if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) { @@ -1723,7 +1833,8 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evse int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; struct syscall *sc = trace__syscall_info(trace, evsel, id); char msg[1024]; - void *args; + void *args, *augmented_args = NULL; + int augmented_args_size; if (sc == NULL) return -1; @@ -1738,7 +1849,8 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evse goto out_put; args = perf_evsel__sc_tp_ptr(evsel, args, sample); - syscall__scnprintf_args(sc, msg, sizeof(msg), args, trace, thread); + augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls); + syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); fprintf(trace->output, "%s", msg); err = 0; out_put: @@ -1754,12 +1866,14 @@ static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evse int max_stack = evsel->attr.sample_max_stack ? evsel->attr.sample_max_stack : trace->max_stack; + int err; - if (machine__resolve(trace->host, &al, sample) < 0 || - thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack)) + if (machine__resolve(trace->host, &al, sample) < 0) return -1; - return 0; + err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack); + addr_location__put(&al); + return err; } static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) @@ -1884,6 +1998,13 @@ errno_print: { fputc('\n', trace->output); + /* + * We only consider an 'event' for the sake of --max-events a non-filtered + * sys_enter + sys_exit and other tracepoint events. + */ + if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) + interrupted = true; + if (callchain_ret > 0) trace__fprintf_callchain(trace, sample); else if (callchain_ret < 0) @@ -2016,13 +2137,25 @@ static void bpf_output__fprintf(struct trace *trace, { binary__fprintf(sample->raw_data, sample->raw_size, 8, bpf_output__printer, NULL, trace->output); + ++trace->nr_events_printed; } static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel, union perf_event *event __maybe_unused, struct perf_sample *sample) { + struct thread *thread; int callchain_ret = 0; + /* + * Check if we called perf_evsel__disable(evsel) due to, for instance, + * this event's max_events having been hit and this is an entry coming + * from the ring buffer that we should discard, since the max events + * have already been considered/printed. + */ + if (evsel->disabled) + return 0; + + thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); if (sample->callchain) { callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor); @@ -2039,22 +2172,47 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel, if (trace->trace_syscalls) fprintf(trace->output, "( ): "); + if (thread) + trace__fprintf_comm_tid(trace, thread, trace->output); + + if (evsel == trace->syscalls.events.augmented) { + int id = perf_evsel__sc_tp_uint(evsel, id, sample); + struct syscall *sc = trace__syscall_info(trace, evsel, id); + + if (sc) { + fprintf(trace->output, "%s(", sc->name); + trace__fprintf_sys_enter(trace, evsel, sample); + fputc(')', trace->output); + goto newline; + } + + /* + * XXX: Not having the associated syscall info or not finding/adding + * the thread should never happen, but if it does... + * fall thru and print it as a bpf_output event. + */ + } + fprintf(trace->output, "%s:", evsel->name); if (perf_evsel__is_bpf_output(evsel)) { - if (evsel == trace->syscalls.events.augmented) - trace__fprintf_sys_enter(trace, evsel, sample); - else - bpf_output__fprintf(trace, sample); + bpf_output__fprintf(trace, sample); } else if (evsel->tp_format) { if (strncmp(evsel->tp_format->name, "sys_enter_", 10) || trace__fprintf_sys_enter(trace, evsel, sample)) { event_format__fprintf(evsel->tp_format, sample->cpu, sample->raw_data, sample->raw_size, trace->output); + ++trace->nr_events_printed; + + if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) { + perf_evsel__disable(evsel); + perf_evsel__close(evsel); + } } } +newline: fprintf(trace->output, "\n"); if (callchain_ret > 0) @@ -2062,6 +2220,7 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel, else if (callchain_ret < 0) pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel)); out: + thread__put(thread); return 0; } @@ -2148,6 +2307,8 @@ static int trace__pgfault(struct trace *trace, trace__fprintf_callchain(trace, sample); else if (callchain_ret < 0) pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel)); + + ++trace->nr_events_printed; out: err = 0; out_put: @@ -2325,6 +2486,9 @@ static void trace__handle_event(struct trace *trace, union perf_event *event, st tracepoint_handler handler = evsel->handler; handler(trace, evsel, event, sample); } + + if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) + interrupted = true; } static int trace__add_syscall_newtp(struct trace *trace) @@ -2629,7 +2793,7 @@ next_event: int timeout = done ? 100 : -1; if (!draining && perf_evlist__poll(evlist, timeout) > 0) { - if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0) + if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) draining = true; goto again; @@ -3061,6 +3225,7 @@ static int trace__parse_events_option(const struct option *opt, const char *str, int len = strlen(str) + 1, err = -1, list, idx; char *strace_groups_dir = system_path(STRACE_GROUPS_DIR); char group_name[PATH_MAX]; + struct syscall_fmt *fmt; if (strace_groups_dir == NULL) return -1; @@ -3078,12 +3243,19 @@ static int trace__parse_events_option(const struct option *opt, const char *str, if (syscalltbl__id(trace->sctbl, s) >= 0 || syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) { list = 1; + goto do_concat; + } + + fmt = syscall_fmt__find_by_alias(s); + if (fmt != NULL) { + list = 1; + s = fmt->name; } else { path__join(group_name, sizeof(group_name), strace_groups_dir, s); if (access(group_name, R_OK) == 0) list = 1; } - +do_concat: if (lists[list]) { sprintf(lists[list] + strlen(lists[list]), ",%s", s); } else { @@ -3172,6 +3344,7 @@ int cmd_trace(int argc, const char **argv) .trace_syscalls = false, .kernel_syscallchains = false, .max_stack = UINT_MAX, + .max_events = ULONG_MAX, }; const char *output_name = NULL; const struct option trace_options[] = { @@ -3224,6 +3397,8 @@ int cmd_trace(int argc, const char **argv) &record_parse_callchain_opt), OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, "Show the kernel callchains on the syscall exit path"), + OPT_ULONG(0, "max-events", &trace.max_events, + "Set the maximum number of events to print, exit after that is reached. "), OPT_UINTEGER(0, "min-stack", &trace.min_stack, "Set the minimum stack depth when parsing the callchain, " "anything below the specified depth will be ignored."), @@ -3276,12 +3451,8 @@ int cmd_trace(int argc, const char **argv) goto out; } - if (evsel) { - if (perf_evsel__init_augmented_syscall_tp(evsel) || - perf_evsel__init_augmented_syscall_tp_args(evsel)) - goto out; + if (evsel) trace.syscalls.events.augmented = evsel; - } err = bpf__setup_stdout(trace.evlist); if (err) { @@ -3326,6 +3497,42 @@ int cmd_trace(int argc, const char **argv) } } + /* + * If we are augmenting syscalls, then combine what we put in the + * __augmented_syscalls__ BPF map with what is in the + * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF, + * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit. + * + * We'll switch to look at two BPF maps, one for sys_enter and the + * other for sys_exit when we start augmenting the sys_exit paths with + * buffers that are being copied from kernel to userspace, think 'read' + * syscall. + */ + if (trace.syscalls.events.augmented) { + evsel = trace.syscalls.events.augmented; + + if (perf_evsel__init_augmented_syscall_tp(evsel) || + perf_evsel__init_augmented_syscall_tp_args(evsel)) + goto out; + evsel->handler = trace__sys_enter; + + evlist__for_each_entry(trace.evlist, evsel) { + bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0; + + if (raw_syscalls_sys_exit) { + trace.raw_augmented_syscalls = true; + goto init_augmented_syscall_tp; + } + + if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) { +init_augmented_syscall_tp: + perf_evsel__init_augmented_syscall_tp(evsel); + perf_evsel__init_augmented_syscall_tp_ret(evsel); + evsel->handler = trace__sys_exit; + } + } + } + if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) return trace__record(&trace, argc-1, &argv[1]); |