diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-18 13:36:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-18 13:36:33 -0700 |
commit | 70045bfc4cd5fef44ada25fa3367329eba98731a (patch) | |
tree | f0c304052fe870c645bfc323bb143e11671d9ff5 /kernel/trace/trace_selftest.c | |
parent | 2fd4130e53db0958167510eddbca0f09dc858109 (diff) | |
parent | b576d375b536568c85d42c15a189f6b6fdd75b74 (diff) |
Merge tag 'ftrace-v6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull ftrace updates from Steven Rostedt:
"Rewrite of function graph tracer to allow multiple users
Up until now, the function graph tracer could only have a single user
attached to it. If another user tried to attach to the function graph
tracer while one was already attached, it would fail. Allowing
function graph tracer to have more than one user has been asked for
since 2009, but it required a rewrite to the logic to pull it off so
it never happened. Until now!
There's three systems that trace the return of a function. That is
kretprobes, function graph tracer, and BPF. kretprobes and function
graph tracing both do it similarly. The difference is that kretprobes
uses a shadow stack per callback and function graph tracer creates a
shadow stack for all tasks. The function graph tracer method makes it
possible to trace the return of all functions. As kretprobes now needs
that feature too, allowing it to use function graph tracer was needed.
BPF also wants to trace the return of many probes and its method
doesn't scale either. Having it use function graph tracer would
improve that.
By allowing function graph tracer to have multiple users allows both
kretprobes and BPF to use function graph tracer in these cases. This
will allow kretprobes code to be removed in the future as it's version
will no longer be needed.
Note, function graph tracer is only limited to 16 simultaneous users,
due to shadow stack size and allocated slots"
* tag 'ftrace-v6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: (49 commits)
fgraph: Use str_plural() in test_graph_storage_single()
function_graph: Add READ_ONCE() when accessing fgraph_array[]
ftrace: Add missing kerneldoc parameters to unregister_ftrace_direct()
function_graph: Everyone uses HAVE_FUNCTION_GRAPH_RET_ADDR_PTR, remove it
function_graph: Fix up ftrace_graph_ret_addr()
function_graph: Make fgraph_update_pid_func() a stub for !DYNAMIC_FTRACE
function_graph: Rename BYTE_NUMBER to CHAR_NUMBER in selftests
fgraph: Remove some unused functions
ftrace: Hide one more entry in stack trace when ftrace_pid is enabled
function_graph: Do not update pid func if CONFIG_DYNAMIC_FTRACE not enabled
function_graph: Make fgraph_do_direct static key static
ftrace: Fix prototypes for ftrace_startup/shutdown_subops()
ftrace: Assign RCU list variable with rcu_assign_ptr()
ftrace: Assign ftrace_list_end to ftrace_ops_list type cast to RCU
ftrace: Declare function_trace_op in header to quiet sparse warning
ftrace: Add comments to ftrace_hash_move() and friends
ftrace: Convert "inc" parameter to bool in ftrace_hash_rec_update_modify()
ftrace: Add comments to ftrace_hash_rec_disable/enable()
ftrace: Remove "filter_hash" parameter from __ftrace_hash_rec_update()
ftrace: Rename dup_hash() and comment it
...
Diffstat (limited to 'kernel/trace/trace_selftest.c')
-rw-r--r-- | kernel/trace/trace_selftest.c | 259 |
1 files changed, 255 insertions, 4 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index e9c5058a8efd..97f1e4bc47dc 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -756,13 +756,262 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) #ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifdef CONFIG_DYNAMIC_FTRACE + +#define CHAR_NUMBER 123 +#define SHORT_NUMBER 12345 +#define WORD_NUMBER 1234567890 +#define LONG_NUMBER 1234567890123456789LL +#define ERRSTR_BUFLEN 128 + +struct fgraph_fixture { + struct fgraph_ops gops; + int store_size; + const char *store_type_name; + char error_str_buf[ERRSTR_BUFLEN]; + char *error_str; +}; + +static __init int store_entry(struct ftrace_graph_ent *trace, + struct fgraph_ops *gops) +{ + struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops); + const char *type = fixture->store_type_name; + int size = fixture->store_size; + void *p; + + p = fgraph_reserve_data(gops->idx, size); + if (!p) { + snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, + "Failed to reserve %s\n", type); + return 0; + } + + switch (size) { + case 1: + *(char *)p = CHAR_NUMBER; + break; + case 2: + *(short *)p = SHORT_NUMBER; + break; + case 4: + *(int *)p = WORD_NUMBER; + break; + case 8: + *(long long *)p = LONG_NUMBER; + break; + } + + return 1; +} + +static __init void store_return(struct ftrace_graph_ret *trace, + struct fgraph_ops *gops) +{ + struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops); + const char *type = fixture->store_type_name; + long long expect = 0; + long long found = -1; + int size; + char *p; + + p = fgraph_retrieve_data(gops->idx, &size); + if (!p) { + snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, + "Failed to retrieve %s\n", type); + return; + } + if (fixture->store_size > size) { + snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, + "Retrieved size %d is smaller than expected %d\n", + size, (int)fixture->store_size); + return; + } + + switch (fixture->store_size) { + case 1: + expect = CHAR_NUMBER; + found = *(char *)p; + break; + case 2: + expect = SHORT_NUMBER; + found = *(short *)p; + break; + case 4: + expect = WORD_NUMBER; + found = *(int *)p; + break; + case 8: + expect = LONG_NUMBER; + found = *(long long *)p; + break; + } + + if (found != expect) { + snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, + "%s returned not %lld but %lld\n", type, expect, found); + return; + } + fixture->error_str = NULL; +} + +static int __init init_fgraph_fixture(struct fgraph_fixture *fixture) +{ + char *func_name; + int len; + + snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, + "Failed to execute storage %s\n", fixture->store_type_name); + fixture->error_str = fixture->error_str_buf; + + func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); + len = strlen(func_name); + + return ftrace_set_filter(&fixture->gops.ops, func_name, len, 1); +} + +/* Test fgraph storage for each size */ +static int __init test_graph_storage_single(struct fgraph_fixture *fixture) +{ + int size = fixture->store_size; + int ret; + + pr_cont("PASSED\n"); + pr_info("Testing fgraph storage of %d byte%s: ", size, str_plural(size)); + + ret = init_fgraph_fixture(fixture); + if (ret && ret != -ENODEV) { + pr_cont("*Could not set filter* "); + return -1; + } + + ret = register_ftrace_graph(&fixture->gops); + if (ret) { + pr_warn("Failed to init store_bytes fgraph tracing\n"); + return -1; + } + + DYN_FTRACE_TEST_NAME(); + + unregister_ftrace_graph(&fixture->gops); + + if (fixture->error_str) { + pr_cont("*** %s ***", fixture->error_str); + return -1; + } + + return 0; +} + +static struct fgraph_fixture store_bytes[4] __initdata = { + [0] = { + .gops = { + .entryfunc = store_entry, + .retfunc = store_return, + }, + .store_size = 1, + .store_type_name = "byte", + }, + [1] = { + .gops = { + .entryfunc = store_entry, + .retfunc = store_return, + }, + .store_size = 2, + .store_type_name = "short", + }, + [2] = { + .gops = { + .entryfunc = store_entry, + .retfunc = store_return, + }, + .store_size = 4, + .store_type_name = "word", + }, + [3] = { + .gops = { + .entryfunc = store_entry, + .retfunc = store_return, + }, + .store_size = 8, + .store_type_name = "long long", + }, +}; + +static __init int test_graph_storage_multi(void) +{ + struct fgraph_fixture *fixture; + bool printed = false; + int i, ret; + + pr_cont("PASSED\n"); + pr_info("Testing multiple fgraph storage on a function: "); + + for (i = 0; i < ARRAY_SIZE(store_bytes); i++) { + fixture = &store_bytes[i]; + ret = init_fgraph_fixture(fixture); + if (ret && ret != -ENODEV) { + pr_cont("*Could not set filter* "); + printed = true; + goto out; + } + + ret = register_ftrace_graph(&fixture->gops); + if (ret) { + pr_warn("Failed to init store_bytes fgraph tracing\n"); + printed = true; + goto out; + } + } + + DYN_FTRACE_TEST_NAME(); +out: + while (--i >= 0) { + fixture = &store_bytes[i]; + unregister_ftrace_graph(&fixture->gops); + + if (fixture->error_str && !printed) { + pr_cont("*** %s ***", fixture->error_str); + printed = true; + } + } + return printed ? -1 : 0; +} + +/* Test the storage passed across function_graph entry and return */ +static __init int test_graph_storage(void) +{ + int ret; + + ret = test_graph_storage_single(&store_bytes[0]); + if (ret) + return ret; + ret = test_graph_storage_single(&store_bytes[1]); + if (ret) + return ret; + ret = test_graph_storage_single(&store_bytes[2]); + if (ret) + return ret; + ret = test_graph_storage_single(&store_bytes[3]); + if (ret) + return ret; + ret = test_graph_storage_multi(); + if (ret) + return ret; + return 0; +} +#else +static inline int test_graph_storage(void) { return 0; } +#endif /* CONFIG_DYNAMIC_FTRACE */ + /* Maximum number of functions to trace before diagnosing a hang */ #define GRAPH_MAX_FUNC_TEST 100000000 static unsigned int graph_hang_thresh; /* Wrap the real function entry probe to avoid possible hanging */ -static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) +static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace, + struct fgraph_ops *gops) { /* This is harmlessly racy, we want to approximately detect a hang */ if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { @@ -776,7 +1025,7 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) return 0; } - return trace_graph_entry(trace); + return trace_graph_entry(trace, gops); } static struct fgraph_ops fgraph_ops __initdata = { @@ -812,7 +1061,7 @@ trace_selftest_startup_function_graph(struct tracer *trace, * to detect and recover from possible hangs */ tracing_reset_online_cpus(&tr->array_buffer); - set_graph_array(tr); + fgraph_ops.private = tr; ret = register_ftrace_graph(&fgraph_ops); if (ret) { warn_failed_init_tracer(trace, ret); @@ -855,7 +1104,7 @@ trace_selftest_startup_function_graph(struct tracer *trace, cond_resched(); tracing_reset_online_cpus(&tr->array_buffer); - set_graph_array(tr); + fgraph_ops.private = tr; /* * Some archs *cough*PowerPC*cough* add characters to the @@ -912,6 +1161,8 @@ trace_selftest_startup_function_graph(struct tracer *trace, ftrace_set_global_filter(NULL, 0, 1); #endif + ret = test_graph_storage(); + /* Don't test dynamic tracing, the function tracer already did */ out: /* Stop it if we failed */ |