aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Brown <[email protected]>2020-02-18 19:58:30 +0000
committerCatalin Marinas <[email protected]>2020-03-09 17:35:12 +0000
commite2d591d29d44af2078336c5dd7ab503c151a2607 (patch)
tree2cd0a302d3b3c07036d65baae4fda47603102e09
parente7bf6972177376496bf0748d9a99faa72960fb0b (diff)
arm64: entry-ftrace.S: Convert to modern annotations for assembly functions
In an effort to clarify and simplify the annotation of assembly functions in the kernel new macros have been introduced. These replace ENTRY and ENDPROC and also add a new annotation for static functions which previously had no ENTRY equivalent. Update the annotations in the core kernel code to the new macros. Signed-off-by: Mark Brown <[email protected]> Signed-off-by: Catalin Marinas <[email protected]>
-rw-r--r--arch/arm64/kernel/entry-ftrace.S28
1 files changed, 14 insertions, 14 deletions
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 7d02f9966d34..3d32b6d325d7 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -91,11 +91,11 @@ ENTRY(ftrace_common)
ldr_l x2, function_trace_op // op
mov x3, sp // regs
-GLOBAL(ftrace_call)
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
nop // If enabled, this will be replaced
// "b ftrace_graph_caller"
#endif
@@ -218,7 +218,7 @@ ENDPROC(ftrace_graph_caller)
* - tracer function to probe instrumented function's entry,
* - ftrace_graph_caller to set up an exit hook
*/
-ENTRY(_mcount)
+SYM_FUNC_START(_mcount)
mcount_enter
ldr_l x2, ftrace_trace_function
@@ -242,7 +242,7 @@ skip_ftrace_call: // }
b.ne ftrace_graph_caller // ftrace_graph_caller();
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
mcount_exit
-ENDPROC(_mcount)
+SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount)
NOKPROBE(_mcount)
@@ -253,9 +253,9 @@ NOKPROBE(_mcount)
* and later on, NOP to branch to ftrace_caller() when enabled or branch to
* NOP when disabled per-function base.
*/
-ENTRY(_mcount)
+SYM_FUNC_START(_mcount)
ret
-ENDPROC(_mcount)
+SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount)
NOKPROBE(_mcount)
@@ -268,24 +268,24 @@ NOKPROBE(_mcount)
* - tracer function to probe instrumented function's entry,
* - ftrace_graph_caller to set up an exit hook
*/
-ENTRY(ftrace_caller)
+SYM_FUNC_START(ftrace_caller)
mcount_enter
mcount_get_pc0 x0 // function's pc
mcount_get_lr x1 // function's lr
-GLOBAL(ftrace_call) // tracer(pc, lr);
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr);
nop // This will be replaced with "bl xxx"
// where xxx can be any kind of tracer.
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
+SYM_INNER_LABEL(ftrace_graph_call) // ftrace_graph_caller();
nop // If enabled, this will be replaced
// "b ftrace_graph_caller"
#endif
mcount_exit
-ENDPROC(ftrace_caller)
+SYM_FUNC_END(ftrace_caller)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -298,20 +298,20 @@ ENDPROC(ftrace_caller)
* the call stack in order to intercept instrumented function's return path
* and run return_to_handler() later on its exit.
*/
-ENTRY(ftrace_graph_caller)
+SYM_FUNC_START(ftrace_graph_caller)
mcount_get_pc x0 // function's pc
mcount_get_lr_addr x1 // pointer to function's saved lr
mcount_get_parent_fp x2 // parent's fp
bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
mcount_exit
-ENDPROC(ftrace_graph_caller)
+SYM_FUNC_END(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
-ENTRY(ftrace_stub)
+SYM_FUNC_START(ftrace_stub)
ret
-ENDPROC(ftrace_stub)
+SYM_FUNC_END(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*