diff options
40 files changed, 1876 insertions, 14 deletions
diff --git a/Documentation/trace/fprobe.rst b/Documentation/trace/fprobe.rst new file mode 100644 index 000000000000..b64bec1ce144 --- /dev/null +++ b/Documentation/trace/fprobe.rst @@ -0,0 +1,174 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================================== +Fprobe - Function entry/exit probe +================================== + +.. Author: Masami Hiramatsu <[email protected]> + +Introduction +============ + +Fprobe is a function entry/exit probe mechanism based on ftrace. +Instead of using ftrace full feature, if you only want to attach callbacks +on function entry and exit, similar to the kprobes and kretprobes, you can +use fprobe. Compared with kprobes and kretprobes, fprobe gives faster +instrumentation for multiple functions with single handler. This document +describes how to use fprobe. + +The usage of fprobe +=================== + +The fprobe is a wrapper of ftrace (+ kretprobe-like return callback) to +attach callbacks to multiple function entry and exit. User needs to set up +the `struct fprobe` and pass it to `register_fprobe()`. + +Typically, `fprobe` data structure is initialized with the `entry_handler` +and/or `exit_handler` as below. + +.. code-block:: c + + struct fprobe fp = { + .entry_handler = my_entry_callback, + .exit_handler = my_exit_callback, + }; + +To enable the fprobe, call one of register_fprobe(), register_fprobe_ips(), and +register_fprobe_syms(). These functions register the fprobe with different types +of parameters. + +The register_fprobe() enables a fprobe by function-name filters. +E.g. this enables @fp on "func*()" function except "func2()".:: + + register_fprobe(&fp, "func*", "func2"); + +The register_fprobe_ips() enables a fprobe by ftrace-location addresses. +E.g. + +.. code-block:: c + + unsigned long ips[] = { 0x.... }; + + register_fprobe_ips(&fp, ips, ARRAY_SIZE(ips)); + +And the register_fprobe_syms() enables a fprobe by symbol names. +E.g. + +.. code-block:: c + + char syms[] = {"func1", "func2", "func3"}; + + register_fprobe_syms(&fp, syms, ARRAY_SIZE(syms)); + +To disable (remove from functions) this fprobe, call:: + + unregister_fprobe(&fp); + +You can temporally (soft) disable the fprobe by:: + + disable_fprobe(&fp); + +and resume by:: + + enable_fprobe(&fp); + +The above is defined by including the header:: + + #include <linux/fprobe.h> + +Same as ftrace, the registered callbacks will start being called some time +after the register_fprobe() is called and before it returns. See +:file:`Documentation/trace/ftrace.rst`. + +Also, the unregister_fprobe() will guarantee that the both enter and exit +handlers are no longer being called by functions after unregister_fprobe() +returns as same as unregister_ftrace_function(). + +The fprobe entry/exit handler +============================= + +The prototype of the entry/exit callback function is as follows: + +.. code-block:: c + + void callback_func(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); + +Note that both entry and exit callbacks have same ptototype. The @entry_ip is +saved at function entry and passed to exit handler. + +@fp + This is the address of `fprobe` data structure related to this handler. + You can embed the `fprobe` to your data structure and get it by + container_of() macro from @fp. The @fp must not be NULL. + +@entry_ip + This is the ftrace address of the traced function (both entry and exit). + Note that this may not be the actual entry address of the function but + the address where the ftrace is instrumented. + +@regs + This is the `pt_regs` data structure at the entry and exit. Note that + the instruction pointer of @regs may be different from the @entry_ip + in the entry_handler. If you need traced instruction pointer, you need + to use @entry_ip. On the other hand, in the exit_handler, the instruction + pointer of @regs is set to the currect return address. + +Share the callbacks with kprobes +================================ + +Since the recursion safeness of the fprobe (and ftrace) is a bit different +from the kprobes, this may cause an issue if user wants to run the same +code from the fprobe and the kprobes. + +Kprobes has per-cpu 'current_kprobe' variable which protects the kprobe +handler from recursion in all cases. On the other hand, fprobe uses +only ftrace_test_recursion_trylock(). This allows interrupt context to +call another (or same) fprobe while the fprobe user handler is running. + +This is not a matter if the common callback code has its own recursion +detection, or it can handle the recursion in the different contexts +(normal/interrupt/NMI.) +But if it relies on the 'current_kprobe' recursion lock, it has to check +kprobe_running() and use kprobe_busy_*() APIs. + +Fprobe has FPROBE_FL_KPROBE_SHARED flag to do this. If your common callback +code will be shared with kprobes, please set FPROBE_FL_KPROBE_SHARED +*before* registering the fprobe, like: + +.. code-block:: c + + fprobe.flags = FPROBE_FL_KPROBE_SHARED; + + register_fprobe(&fprobe, "func*", NULL); + +This will protect your common callback from the nested call. + +The missed counter +================== + +The `fprobe` data structure has `fprobe::nmissed` counter field as same as +kprobes. +This counter counts up when; + + - fprobe fails to take ftrace_recursion lock. This usually means that a function + which is traced by other ftrace users is called from the entry_handler. + + - fprobe fails to setup the function exit because of the shortage of rethook + (the shadow stack for hooking the function return.) + +The `fprobe::nmissed` field counts up in both cases. Therefore, the former +skips both of entry and exit callback and the latter skips the exit +callback, but in both case the counter will increase by 1. + +Note that if you set the FTRACE_OPS_FL_RECURSION and/or FTRACE_OPS_FL_RCU to +`fprobe::ops::flags` (ftrace_ops::flags) when registering the fprobe, this +counter may not work correctly, because ftrace skips the fprobe function which +increase the counter. + + +Functions and structures +======================== + +.. kernel-doc:: include/linux/fprobe.h +.. kernel-doc:: kernel/trace/fprobe.c + diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst index 3769b9b7aed8..b9f3757f8269 100644 --- a/Documentation/trace/index.rst +++ b/Documentation/trace/index.rst @@ -9,6 +9,7 @@ Linux Tracing Technologies tracepoint-analysis ftrace ftrace-uses + fprobe kprobes kprobetrace uprobetracer diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4c97cb40eebb..440f69ee8af5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -107,6 +107,7 @@ config ARM select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI select HAVE_OPTPROBES if !THUMB2_KERNEL + select HAVE_RETHOOK select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h index 8f54f9ad8a9b..babed1707ca8 100644 --- a/arch/arm/include/asm/stacktrace.h +++ b/arch/arm/include/asm/stacktrace.h @@ -14,7 +14,7 @@ struct stackframe { unsigned long sp; unsigned long lr; unsigned long pc; -#ifdef CONFIG_KRETPROBES +#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) struct llist_node *kr_cur; struct task_struct *tsk; #endif @@ -27,7 +27,7 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame) frame->sp = regs->ARM_sp; frame->lr = regs->ARM_lr; frame->pc = regs->ARM_pc; -#ifdef CONFIG_KRETPROBES +#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) frame->kr_cur = NULL; frame->tsk = current; #endif diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 75e905508f27..f509c6be4f57 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/export.h> #include <linux/kprobes.h> +#include <linux/rethook.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/stacktrace.h> @@ -66,6 +67,11 @@ int notrace unwind_frame(struct stackframe *frame) frame->sp = *(unsigned long *)(fp - 8); frame->pc = *(unsigned long *)(fp - 4); #endif +#ifdef CONFIG_RETHOOK + if (is_rethook_trampoline(frame->pc)) + frame->pc = rethook_find_ret_addr(frame->tsk, frame->fp, + &frame->kr_cur); +#endif #ifdef CONFIG_KRETPROBES if (is_kretprobe_trampoline(frame->pc)) frame->pc = kretprobe_find_ret_addr(frame->tsk, diff --git a/arch/arm/probes/Makefile b/arch/arm/probes/Makefile index 8b0ea5ace100..10c083a22223 100644 --- a/arch/arm/probes/Makefile +++ b/arch/arm/probes/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_KPROBES) += decode-thumb.o else obj-$(CONFIG_KPROBES) += decode-arm.o endif +obj-$(CONFIG_RETHOOK) += rethook.o diff --git a/arch/arm/probes/rethook.c b/arch/arm/probes/rethook.c new file mode 100644 index 000000000000..1c1357a86365 --- /dev/null +++ b/arch/arm/probes/rethook.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arm implementation of rethook. Mostly copied from arch/arm/probes/kprobes/core.c + */ + +#include <linux/kprobes.h> +#include <linux/rethook.h> + +/* Called from arch_rethook_trampoline */ +static __used unsigned long arch_rethook_trampoline_callback(struct pt_regs *regs) +{ + return rethook_trampoline_handler(regs, regs->ARM_fp); +} +NOKPROBE_SYMBOL(arch_rethook_trampoline_callback); + +/* + * When a rethook'ed function returns, it returns to arch_rethook_trampoline + * which calls rethook callback. We construct a struct pt_regs to + * give a view of registers r0-r11, sp, lr, and pc to the user + * return-handler. This is not a complete pt_regs structure, but that + * should be enough for stacktrace from the return handler with or + * without pt_regs. + */ +asm( + ".text\n" + ".global arch_rethook_trampoline\n" + ".type arch_rethook_trampoline, %function\n" + "arch_rethook_trampoline:\n" +#ifdef CONFIG_FRAME_POINTER + "ldr lr, =arch_rethook_trampoline \n\t" + /* this makes a framepointer on pt_regs. */ +#ifdef CONFIG_CC_IS_CLANG + "stmdb sp, {sp, lr, pc} \n\t" + "sub sp, sp, #12 \n\t" + /* In clang case, pt_regs->ip = lr. */ + "stmdb sp!, {r0 - r11, lr} \n\t" + /* fp points regs->r11 (fp) */ + "add fp, sp, #44 \n\t" +#else /* !CONFIG_CC_IS_CLANG */ + /* In gcc case, pt_regs->ip = fp. */ + "stmdb sp, {fp, sp, lr, pc} \n\t" + "sub sp, sp, #16 \n\t" + "stmdb sp!, {r0 - r11} \n\t" + /* fp points regs->r15 (pc) */ + "add fp, sp, #60 \n\t" +#endif /* CONFIG_CC_IS_CLANG */ +#else /* !CONFIG_FRAME_POINTER */ + "sub sp, sp, #16 \n\t" + "stmdb sp!, {r0 - r11} \n\t" +#endif /* CONFIG_FRAME_POINTER */ + "mov r0, sp \n\t" + "bl arch_rethook_trampoline_callback \n\t" + "mov lr, r0 \n\t" + "ldmia sp!, {r0 - r11} \n\t" + "add sp, sp, #16 \n\t" +#ifdef CONFIG_THUMB2_KERNEL + "bx lr \n\t" +#else + "mov pc, lr \n\t" +#endif + ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n" +); +NOKPROBE_SYMBOL(arch_rethook_trampoline); + +/* + * At the entry of function with mcount. The stack and registers are prepared + * for the mcount function as below. + * + * mov ip, sp + * push {fp, ip, lr, pc} + * sub fp, ip, #4 ; FP[0] = PC, FP[-4] = LR, and FP[-12] = call-site FP. + * push {lr} + * bl <__gnu_mcount_nc> ; call ftrace + * + * And when returning from the function, call-site FP, SP and PC are restored + * from stack as below; + * + * ldm sp, {fp, sp, pc} + * + * Thus, if the arch_rethook_prepare() is called from real function entry, + * it must change the LR and save FP in pt_regs. But if it is called via + * mcount context (ftrace), it must change the LR on stack, which is next + * to the PC (= FP[-4]), and save the FP value at FP[-12]. + */ +void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount) +{ + unsigned long *ret_addr, *frame; + + if (mcount) { + ret_addr = (unsigned long *)(regs->ARM_fp - 4); + frame = (unsigned long *)(regs->ARM_fp - 12); + } else { + ret_addr = ®s->ARM_lr; + frame = ®s->ARM_fp; + } + + rh->ret_addr = *ret_addr; + rh->frame = *frame; + + /* Replace the return addr with trampoline addr. */ + *ret_addr = (unsigned long)arch_rethook_trampoline; +} +NOKPROBE_SYMBOL(arch_rethook_prepare); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 09b885cc4db5..62cff9d0a5bb 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -201,6 +201,7 @@ config ARM64 select HAVE_SYSCALL_TRACEPOINTS select HAVE_KPROBES select HAVE_KRETPROBES + select HAVE_RETHOOK select HAVE_GENERIC_VDSO select IOMMU_DMA if IOMMU_SUPPORT select IRQ_DOMAIN diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index e77cdef9ca29..bf04107da97c 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -58,7 +58,7 @@ struct stackframe { DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); unsigned long prev_fp; enum stack_type prev_type; -#ifdef CONFIG_KRETPROBES +#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) struct llist_node *kr_cur; #endif }; diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile index 8e4be92e25b1..24e689f44c32 100644 --- a/arch/arm64/kernel/probes/Makefile +++ b/arch/arm64/kernel/probes/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \ simulate-insn.o obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \ simulate-insn.o +obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o diff --git a/arch/arm64/kernel/probes/rethook.c b/arch/arm64/kernel/probes/rethook.c new file mode 100644 index 000000000000..edc6b804ad6a --- /dev/null +++ b/arch/arm64/kernel/probes/rethook.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic return hook for arm64. + * Most of the code is copied from arch/arm64/kernel/probes/kprobes.c + */ + +#include <linux/kprobes.h> +#include <linux/rethook.h> + +/* This is called from arch_rethook_trampoline() */ +unsigned long __used arch_rethook_trampoline_callback(struct pt_regs *regs) +{ + return rethook_trampoline_handler(regs, regs->regs[29]); +} +NOKPROBE_SYMBOL(arch_rethook_trampoline_callback); + +void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount) +{ + rhn->ret_addr = regs->regs[30]; + rhn->frame = regs->regs[29]; + + /* replace return addr (x30) with trampoline */ + regs->regs[30] = (u64)arch_rethook_trampoline; +} +NOKPROBE_SYMBOL(arch_rethook_prepare); diff --git a/arch/arm64/kernel/probes/rethook_trampoline.S b/arch/arm64/kernel/probes/rethook_trampoline.S new file mode 100644 index 000000000000..610f520ee72b --- /dev/null +++ b/arch/arm64/kernel/probes/rethook_trampoline.S @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * trampoline entry and return code for rethook. + * Copied from arch/arm64/kernel/probes/kprobes_trampoline.S + */ + +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/assembler.h> + + .text + + .macro save_all_base_regs + stp x0, x1, [sp, #S_X0] + stp x2, x3, [sp, #S_X2] + stp x4, x5, [sp, #S_X4] + stp x6, x7, [sp, #S_X6] + stp x8, x9, [sp, #S_X8] + stp x10, x11, [sp, #S_X10] + stp x12, x13, [sp, #S_X12] + stp x14, x15, [sp, #S_X14] + stp x16, x17, [sp, #S_X16] + stp x18, x19, [sp, #S_X18] + stp x20, x21, [sp, #S_X20] + stp x22, x23, [sp, #S_X22] + stp x24, x25, [sp, #S_X24] + stp x26, x27, [sp, #S_X26] + stp x28, x29, [sp, #S_X28] + add x0, sp, #PT_REGS_SIZE + stp lr, x0, [sp, #S_LR] + /* + * Construct a useful saved PSTATE + */ + mrs x0, nzcv + mrs x1, daif + orr x0, x0, x1 + mrs x1, CurrentEL + orr x0, x0, x1 + mrs x1, SPSel + orr x0, x0, x1 + stp xzr, x0, [sp, #S_PC] + .endm + + .macro restore_all_base_regs + ldr x0, [sp, #S_PSTATE] + and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT) + msr nzcv, x0 + ldp x0, x1, [sp, #S_X0] + ldp x2, x3, [sp, #S_X2] + ldp x4, x5, [sp, #S_X4] + ldp x6, x7, [sp, #S_X6] + ldp x8, x9, [sp, #S_X8] + ldp x10, x11, [sp, #S_X10] + ldp x12, x13, [sp, #S_X12] + ldp x14, x15, [sp, #S_X14] + ldp x16, x17, [sp, #S_X16] + ldp x18, x19, [sp, #S_X18] + ldp x20, x21, [sp, #S_X20] + ldp x22, x23, [sp, #S_X22] + ldp x24, x25, [sp, #S_X24] + ldp x26, x27, [sp, #S_X26] + ldp x28, x29, [sp, #S_X28] + .endm + +SYM_CODE_START(arch_rethook_trampoline) + sub sp, sp, #PT_REGS_SIZE + + save_all_base_regs + + /* Setup a frame pointer. */ + add x29, sp, #S_FP + + mov x0, sp + bl arch_rethook_trampoline_callback + /* + * Replace trampoline address in lr with actual orig_ret_addr return + * address. + */ + mov lr, x0 + + /* The frame pointer (x29) is restored with other registers. */ + restore_all_base_regs + + add sp, sp, #PT_REGS_SIZE + ret + +SYM_CODE_END(arch_rethook_trampoline) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index e4103e085681..efb5b85b57c2 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -8,6 +8,7 @@ #include <linux/export.h> #include <linux/ftrace.h> #include <linux/kprobes.h> +#include <linux/rethook.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> @@ -38,7 +39,7 @@ static notrace void start_backtrace(struct stackframe *frame, unsigned long fp, { frame->fp = fp; frame->pc = pc; -#ifdef CONFIG_KRETPROBES +#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) frame->kr_cur = NULL; #endif @@ -138,6 +139,10 @@ static int notrace unwind_frame(struct task_struct *tsk, if (is_kretprobe_trampoline(frame->pc)) frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur); #endif +#ifdef CONFIG_RETHOOK + if (is_rethook_trampoline(frame->pc)) + frame->pc = rethook_find_ret_addr(tsk, frame->fp, &frame->kr_cur); +#endif return 0; } diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b779603978e1..5feaa241fb56 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -229,6 +229,7 @@ config PPC select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_RETHOOK if KPROBES select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE select HAVE_RSEQ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 4d7829399570..feb24ea83ca6 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -115,6 +115,7 @@ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_OPTPROBES) += optprobes.o optprobes_head.o obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o +obj-$(CONFIG_RETHOOK) += rethook.o obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o diff --git a/arch/powerpc/kernel/rethook.c b/arch/powerpc/kernel/rethook.c new file mode 100644 index 000000000000..a8a128748efa --- /dev/null +++ b/arch/powerpc/kernel/rethook.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PowerPC implementation of rethook. This depends on kprobes. + */ + +#include <linux/kprobes.h> +#include <linux/rethook.h> + +/* + * Function return trampoline: + * - init_kprobes() establishes a probepoint here + * - When the probed function returns, this probe + * causes the handlers to fire + */ +asm(".global arch_rethook_trampoline\n" + ".type arch_rethook_trampoline, @function\n" + "arch_rethook_trampoline:\n" + "nop\n" + "blr\n" + ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n"); + +/* + * Called when the probe at kretprobe trampoline is hit + */ +static int trampoline_rethook_handler(struct kprobe *p, struct pt_regs *regs) +{ + unsigned long orig_ret_address; + + orig_ret_address = rethook_trampoline_handler(regs, 0); + /* + * We get here through one of two paths: + * 1. by taking a trap -> kprobe_handler() -> here + * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here + * + * When going back through (1), we need regs->nip to be setup properly + * as it is used to determine the return address from the trap. + * For (2), since nip is not honoured with optprobes, we instead setup + * the link register properly so that the subsequent 'blr' in + * __kretprobe_trampoline jumps back to the right instruction. + * + * For nip, we should set the address to the previous instruction since + * we end up emulating it in kprobe_handler(), which increments the nip + * again. + */ + regs_set_return_ip(regs, orig_ret_address - 4); + regs->link = orig_ret_address; + + return 0; +} +NOKPROBE_SYMBOL(trampoline_rethook_handler); + +void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount) +{ + rh->ret_addr = regs->link; + rh->frame = 0; + + /* Replace the return addr with trampoline addr */ + regs->link = (unsigned long)arch_rethook_trampoline; +} +NOKPROBE_SYMBOL(arch_prepare_kretprobe); + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *) &arch_rethook_trampoline, + .pre_handler = trampoline_rethook_handler +}; + +static int init_arch_rethook(void) +{ + return register_kprobe(&trampoline_p); +} + +core_initcall(init_arch_rethook); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9b356da6f46b..1270b65a5546 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -221,6 +221,7 @@ config X86 select HAVE_KPROBES_ON_FTRACE select HAVE_FUNCTION_ERROR_INJECTION select HAVE_KRETPROBES + select HAVE_RETHOOK select HAVE_KVM select HAVE_LIVEPATCH if X86_64 select HAVE_MIXED_BREAKPOINTS_REGS diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 2a1f8734416d..192df5b2094d 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -5,6 +5,7 @@ #include <linux/sched.h> #include <linux/ftrace.h> #include <linux/kprobes.h> +#include <linux/rethook.h> #include <asm/ptrace.h> #include <asm/stacktrace.h> @@ -16,7 +17,7 @@ struct unwind_state { unsigned long stack_mask; struct task_struct *task; int graph_idx; -#ifdef CONFIG_KRETPROBES +#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) struct llist_node *kr_cur; #endif bool error; @@ -107,6 +108,11 @@ static inline unsigned long unwind_recover_kretprobe(struct unwind_state *state, unsigned long addr, unsigned long *addr_p) { +#ifdef CONFIG_RETHOOK + if (is_rethook_trampoline(addr)) + return rethook_find_ret_addr(state->task, (unsigned long)addr_p, + &state->kr_cur); +#endif #ifdef CONFIG_KRETPROBES return is_kretprobe_trampoline(addr) ? kretprobe_find_ret_addr(state->task, addr_p, &state->kr_cur) : diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 6aef9ee28a39..792a893a5cc5 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -106,6 +106,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_X86_TSC) += trace_clock.o obj-$(CONFIG_TRACING) += trace.o +obj-$(CONFIG_RETHOOK) += rethook.o obj-$(CONFIG_CRASH_CORE) += crash_core_$(BITS).o obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index 7d3a2e2daf01..c993521d4933 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h @@ -6,6 +6,7 @@ #include <asm/asm.h> #include <asm/frame.h> +#include <asm/insn.h> #ifdef CONFIG_X86_64 diff --git a/arch/x86/kernel/rethook.c b/arch/x86/kernel/rethook.c new file mode 100644 index 000000000000..f0f2f0608282 --- /dev/null +++ b/arch/x86/kernel/rethook.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * x86 implementation of rethook. Mostly copied from arch/x86/kernel/kprobes/core.c. + */ +#include <linux/bug.h> +#include <linux/rethook.h> +#include <linux/kprobes.h> + +#include "kprobes/common.h" + +__visible void arch_rethook_trampoline_callback(struct pt_regs *regs); + +/* + * When a target function returns, this code saves registers and calls + * arch_rethook_trampoline_callback(), which calls the rethook handler. + */ +asm( + ".text\n" + ".global arch_rethook_trampoline\n" + ".type arch_rethook_trampoline, @function\n" + "arch_rethook_trampoline:\n" +#ifdef CONFIG_X86_64 + /* Push a fake return address to tell the unwinder it's a kretprobe. */ + " pushq $arch_rethook_trampoline\n" + UNWIND_HINT_FUNC + /* Save the 'sp - 8', this will be fixed later. */ + " pushq %rsp\n" + " pushfq\n" + SAVE_REGS_STRING + " movq %rsp, %rdi\n" + " call arch_rethook_trampoline_callback\n" + RESTORE_REGS_STRING + /* In the callback function, 'regs->flags' is copied to 'regs->sp'. */ + " addq $8, %rsp\n" + " popfq\n" +#else + /* Push a fake return address to tell the unwinder it's a kretprobe. */ + " pushl $arch_rethook_trampoline\n" + UNWIND_HINT_FUNC + /* Save the 'sp - 4', this will be fixed later. */ + " pushl %esp\n" + " pushfl\n" + SAVE_REGS_STRING + " movl %esp, %eax\n" + " call arch_rethook_trampoline_callback\n" + RESTORE_REGS_STRING + /* In the callback function, 'regs->flags' is copied to 'regs->sp'. */ + " addl $4, %esp\n" + " popfl\n" +#endif + " ret\n" + ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n" +); +NOKPROBE_SYMBOL(arch_rethook_trampoline); + +/* + * Called from arch_rethook_trampoline + */ +__used __visible void arch_rethook_trampoline_callback(struct pt_regs *regs) +{ + unsigned long *frame_pointer; + + /* fixup registers */ + regs->cs = __KERNEL_CS; +#ifdef CONFIG_X86_32 + regs->gs = 0; +#endif + regs->ip = (unsigned long)&arch_rethook_trampoline; + regs->orig_ax = ~0UL; + regs->sp += sizeof(long); + frame_pointer = ®s->sp + 1; + + /* + * The return address at 'frame_pointer' is recovered by the + * arch_rethook_fixup_return() which called from this + * rethook_trampoline_handler(). + */ + rethook_trampoline_handler(regs, (unsigned long)frame_pointer); + + /* + * Copy FLAGS to 'pt_regs::sp' so that arch_rethook_trapmoline() + * can do RET right after POPF. + */ + regs->sp = regs->flags; +} +NOKPROBE_SYMBOL(arch_rethook_trampoline_callback); + +/* + * arch_rethook_trampoline() skips updating frame pointer. The frame pointer + * saved in arch_rethook_trampoline_callback() points to the real caller + * function's frame pointer. Thus the arch_rethook_trampoline() doesn't have + * a standard stack frame with CONFIG_FRAME_POINTER=y. + * Let's mark it non-standard function. Anyway, FP unwinder can correctly + * unwind without the hint. + */ +STACK_FRAME_NON_STANDARD_FP(arch_rethook_trampoline); + +/* This is called from rethook_trampoline_handler(). */ +void arch_rethook_fixup_return(struct pt_regs *regs, + unsigned long correct_ret_addr) +{ + unsigned long *frame_pointer = ®s->sp + 1; + + /* Replace fake return address with real one. */ + *frame_pointer = correct_ret_addr; +} +NOKPROBE_SYMBOL(arch_rethook_fixup_return); + +void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount) +{ + unsigned long *stack = (unsigned long *)regs->sp; + + rh->ret_addr = stack[0]; + rh->frame = regs->sp; + + /* Replace the return addr with trampoline addr */ + stack[0] = (unsigned long) arch_rethook_trampoline; +} +NOKPROBE_SYMBOL(arch_rethook_prepare); diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h new file mode 100644 index 000000000000..1c2bde0ead73 --- /dev/null +++ b/include/linux/fprobe.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Simple ftrace probe wrapper */ +#ifndef _LINUX_FPROBE_H +#define _LINUX_FPROBE_H + +#include <linux/compiler.h> +#include <linux/ftrace.h> +#include <linux/rethook.h> + +/** + * struct fprobe - ftrace based probe. + * @ops: The ftrace_ops. + * @nmissed: The counter for missing events. + * @flags: The status flag. + * @rethook: The rethook data structure. (internal data) + * @entry_handler: The callback function for function entry. + * @exit_handler: The callback function for function exit. + */ +struct fprobe { +#ifdef CONFIG_FUNCTION_TRACER + /* + * If CONFIG_FUNCTION_TRACER is not set, CONFIG_FPROBE is disabled too. + * But user of fprobe may keep embedding the struct fprobe on their own + * code. To avoid build error, this will keep the fprobe data structure + * defined here, but remove ftrace_ops data structure. + */ + struct ftrace_ops ops; +#endif + unsigned long nmissed; + unsigned int flags; + struct rethook *rethook; + + void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); + void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); +}; + +/* This fprobe is soft-disabled. */ +#define FPROBE_FL_DISABLED 1 + +/* + * This fprobe handler will be shared with kprobes. + * This flag must be set before registering. + */ +#define FPROBE_FL_KPROBE_SHARED 2 + +static inline bool fprobe_disabled(struct fprobe *fp) +{ + return (fp) ? fp->flags & FPROBE_FL_DISABLED : false; +} + +static inline bool fprobe_shared_with_kprobes(struct fprobe *fp) +{ + return (fp) ? fp->flags & FPROBE_FL_KPROBE_SHARED : false; +} + +#ifdef CONFIG_FPROBE +int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter); +int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num); +int register_fprobe_syms(struct fprobe *fp, const char **syms, int num); +int unregister_fprobe(struct fprobe *fp); +#else +static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter) +{ + return -EOPNOTSUPP; +} +static inline int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num) +{ + return -EOPNOTSUPP; +} +static inline int register_fprobe_syms(struct fprobe *fp, const char **syms, int num) +{ + return -EOPNOTSUPP; +} +static inline int unregister_fprobe(struct fprobe *fp) +{ + return -EOPNOTSUPP; +} +#endif + +/** + * disable_fprobe() - Disable fprobe + * @fp: The fprobe to be disabled. + * + * This will soft-disable @fp. Note that this doesn't remove the ftrace + * hooks from the function entry. + */ +static inline void disable_fprobe(struct fprobe *fp) +{ + if (fp) + fp->flags |= FPROBE_FL_DISABLED; +} + +/** + * enable_fprobe() - Enable fprobe + * @fp: The fprobe to be enabled. + * + * This will soft-enable @fp. + */ +static inline void enable_fprobe(struct fprobe *fp) +{ + if (fp) + fp->flags &= ~FPROBE_FL_DISABLED; +} + +#endif diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9999e29187de..60847cbce0da 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -512,6 +512,8 @@ struct dyn_ftrace { int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset); +int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, + unsigned int cnt, int remove, int reset); int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int len, int reset); int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, @@ -802,6 +804,7 @@ static inline unsigned long ftrace_location(unsigned long ip) #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) +#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; }) #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_free_filter(ops) do { } while (0) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 19b884353b15..5f1859836deb 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -427,6 +427,9 @@ static inline struct kprobe *kprobe_running(void) { return NULL; } +#define kprobe_busy_begin() do {} while (0) +#define kprobe_busy_end() do {} while (0) + static inline int register_kprobe(struct kprobe *p) { return -EOPNOTSUPP; diff --git a/include/linux/rethook.h b/include/linux/rethook.h new file mode 100644 index 000000000000..c8ac1e5afcd1 --- /dev/null +++ b/include/linux/rethook.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Return hooking with list-based shadow stack. + */ +#ifndef _LINUX_RETHOOK_H +#define _LINUX_RETHOOK_H + +#include <linux/compiler.h> +#include <linux/freelist.h> +#include <linux/kallsyms.h> +#include <linux/llist.h> +#include <linux/rcupdate.h> +#include <linux/refcount.h> + +struct rethook_node; + +typedef void (*rethook_handler_t) (struct rethook_node *, void *, struct pt_regs *); + +/** + * struct rethook - The rethook management data structure. + * @data: The user-defined data storage. + * @handler: The user-defined return hook handler. + * @pool: The pool of struct rethook_node. + * @ref: The reference counter. + * @rcu: The rcu_head for deferred freeing. + * + * Don't embed to another data structure, because this is a self-destructive + * data structure when all rethook_node are freed. + */ +struct rethook { + void *data; + rethook_handler_t handler; + struct freelist_head pool; + refcount_t ref; + struct rcu_head rcu; +}; + +/** + * struct rethook_node - The rethook shadow-stack entry node. + * @freelist: The freelist, linked to struct rethook::pool. + * @rcu: The rcu_head for deferred freeing. + * @llist: The llist, linked to a struct task_struct::rethooks. + * @rethook: The pointer to the struct rethook. + * @ret_addr: The storage for the real return address. + * @frame: The storage for the frame pointer. + * + * You can embed this to your extended data structure to store any data + * on each entry of the shadow stack. + */ +struct rethook_node { + union { + struct freelist_node freelist; + struct rcu_head rcu; + }; + struct llist_node llist; + struct rethook *rethook; + unsigned long ret_addr; + unsigned long frame; +}; + +struct rethook *rethook_alloc(void *data, rethook_handler_t handler); +void rethook_free(struct rethook *rh); +void rethook_add_node(struct rethook *rh, struct rethook_node *node); +struct rethook_node *rethook_try_get(struct rethook *rh); +void rethook_recycle(struct rethook_node *node); +void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount); +unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame, + struct llist_node **cur); + +/* Arch dependent code must implement arch_* and trampoline code */ +void arch_rethook_prepare(struct rethook_node *node, struct pt_regs *regs, bool mcount); +void arch_rethook_trampoline(void); + +/** + * is_rethook_trampoline() - Check whether the address is rethook trampoline + * @addr: The address to be checked + * + * Return true if the @addr is the rethook trampoline address. + */ +static inline bool is_rethook_trampoline(unsigned long addr) +{ + return addr == (unsigned long)dereference_symbol_descriptor(arch_rethook_trampoline); +} + +/* If the architecture needs to fixup the return address, implement it. */ +void arch_rethook_fixup_return(struct pt_regs *regs, + unsigned long correct_ret_addr); + +/* Generic trampoline handler, arch code must prepare asm stub */ +unsigned long rethook_trampoline_handler(struct pt_regs *regs, + unsigned long frame); + +#ifdef CONFIG_RETHOOK +void rethook_flush_task(struct task_struct *tk); +#else +#define rethook_flush_task(tsk) do { } while (0) +#endif + +#endif + diff --git a/include/linux/sched.h b/include/linux/sched.h index 75ba8aa60248..7034f53404e3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1481,6 +1481,9 @@ struct task_struct { #ifdef CONFIG_KRETPROBES struct llist_head kretprobe_instances; #endif +#ifdef CONFIG_RETHOOK + struct llist_head rethooks; +#endif #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH /* diff --git a/kernel/exit.c b/kernel/exit.c index b00a25bb4ab9..2d1803fa8fe6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -64,6 +64,7 @@ #include <linux/compat.h> #include <linux/io_uring.h> #include <linux/kprobes.h> +#include <linux/rethook.h> #include <linux/uaccess.h> #include <asm/unistd.h> @@ -169,6 +170,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp) struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); kprobe_flush_task(tsk); + rethook_flush_task(tsk); perf_event_delayed_put(tsk); trace_sched_process_free(tsk); put_task_struct(tsk); diff --git a/kernel/fork.c b/kernel/fork.c index a024bf6254df..3db1a4110a25 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2255,6 +2255,9 @@ static __latent_entropy struct task_struct *copy_process( #ifdef CONFIG_KRETPROBES p->kretprobe_instances.first = NULL; #endif +#ifdef CONFIG_RETHOOK + p->rethooks.first = NULL; +#endif /* * Ensure that the cgroup subsystem policies allow the new process to be diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index a5eb5e7fd624..99dd4ca63d68 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -10,6 +10,17 @@ config USER_STACKTRACE_SUPPORT config NOP_TRACER bool +config HAVE_RETHOOK + bool + +config RETHOOK + bool + depends on HAVE_RETHOOK + help + Enable generic return hooking feature. This is an internal + API, which will be used by other function-entry hooking + features like fprobe and kprobes. + config HAVE_FUNCTION_TRACER bool help @@ -236,6 +247,21 @@ config DYNAMIC_FTRACE_WITH_ARGS depends on DYNAMIC_FTRACE depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS +config FPROBE + bool "Kernel Function Probe (fprobe)" + depends on FUNCTION_TRACER + depends on DYNAMIC_FTRACE_WITH_REGS + depends on HAVE_RETHOOK + select RETHOOK + default n + help + This option enables kernel function probe (fprobe) based on ftrace. + The fprobe is similar to kprobes, but probes only for kernel function + entries and exits. This also can probe multiple functions by one + fprobe. + + If unsure, say N. + config FUNCTION_PROFILER bool "Kernel function profiler" depends on FUNCTION_TRACER diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index bedc5caceec7..c6f11a139eac 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -97,6 +97,8 @@ obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o obj-$(CONFIG_FTRACE_RECORD_RECURSION) += trace_recursion_record.o +obj-$(CONFIG_FPROBE) += fprobe.o +obj-$(CONFIG_RETHOOK) += rethook.o obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c new file mode 100644 index 000000000000..8b2dd5b9dcd1 --- /dev/null +++ b/kernel/trace/fprobe.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * fprobe - Simple ftrace probe wrapper for function entry. + */ +#define pr_fmt(fmt) "fprobe: " fmt + +#include <linux/err.h> +#include <linux/fprobe.h> +#include <linux/kallsyms.h> +#include <linux/kprobes.h> +#include <linux/rethook.h> +#include <linux/slab.h> +#include <linux/sort.h> + +#include "trace.h" + +struct fprobe_rethook_node { + struct rethook_node node; + unsigned long entry_ip; +}; + +static void fprobe_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + struct fprobe_rethook_node *fpr; + struct rethook_node *rh; + struct fprobe *fp; + int bit; + + fp = container_of(ops, struct fprobe, ops); + if (fprobe_disabled(fp)) + return; + + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) { + fp->nmissed++; + return; + } + + if (fp->entry_handler) + fp->entry_handler(fp, ip, ftrace_get_regs(fregs)); + + if (fp->exit_handler) { + rh = rethook_try_get(fp->rethook); + if (!rh) { + fp->nmissed++; + goto out; + } + fpr = container_of(rh, struct fprobe_rethook_node, node); + fpr->entry_ip = ip; + rethook_hook(rh, ftrace_get_regs(fregs), true); + } + +out: + ftrace_test_recursion_unlock(bit); +} +NOKPROBE_SYMBOL(fprobe_handler); + +static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + struct fprobe *fp = container_of(ops, struct fprobe, ops); + + if (unlikely(kprobe_running())) { + fp->nmissed++; + return; + } + kprobe_busy_begin(); + fprobe_handler(ip, parent_ip, ops, fregs); + kprobe_busy_end(); +} + +static void fprobe_exit_handler(struct rethook_node *rh, void *data, + struct pt_regs *regs) +{ + struct fprobe *fp = (struct fprobe *)data; + struct fprobe_rethook_node *fpr; + + if (!fp || fprobe_disabled(fp)) + return; + + fpr = container_of(rh, struct fprobe_rethook_node, node); + + fp->exit_handler(fp, fpr->entry_ip, regs); +} +NOKPROBE_SYMBOL(fprobe_exit_handler); + +/* Convert ftrace location address from symbols */ +static unsigned long *get_ftrace_locations(const char **syms, int num) +{ + unsigned long addr, size; + unsigned long *addrs; + int i; + + /* Convert symbols to symbol address */ + addrs = kcalloc(num, sizeof(*addrs), GFP_KERNEL); + if (!addrs) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < num; i++) { + addr = kallsyms_lookup_name(syms[i]); + if (!addr) /* Maybe wrong symbol */ + goto error; + + /* Convert symbol address to ftrace location. */ + if (!kallsyms_lookup_size_offset(addr, &size, NULL) || !size) + goto error; + + addr = ftrace_location_range(addr, addr + size - 1); + if (!addr) /* No dynamic ftrace there. */ + goto error; + + addrs[i] = addr; + } + + return addrs; + +error: + kfree(addrs); + + return ERR_PTR(-ENOENT); +} + +static void fprobe_init(struct fprobe *fp) +{ + fp->nmissed = 0; + if (fprobe_shared_with_kprobes(fp)) + fp->ops.func = fprobe_kprobe_handler; + else + fp->ops.func = fprobe_handler; + fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS; +} + +static int fprobe_init_rethook(struct fprobe *fp, int num) +{ + int i, size; + + if (num < 0) + return -EINVAL; + + if (!fp->exit_handler) { + fp->rethook = NULL; + return 0; + } + + /* Initialize rethook if needed */ + size = num * num_possible_cpus() * 2; + if (size < 0) + return -E2BIG; + + fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler); + for (i = 0; i < size; i++) { + struct rethook_node *node; + + node = kzalloc(sizeof(struct fprobe_rethook_node), GFP_KERNEL); + if (!node) { + rethook_free(fp->rethook); + fp->rethook = NULL; + return -ENOMEM; + } + rethook_add_node(fp->rethook, node); + } + return 0; +} + +static void fprobe_fail_cleanup(struct fprobe *fp) +{ + if (fp->rethook) { + /* Don't need to cleanup rethook->handler because this is not used. */ + rethook_free(fp->rethook); + fp->rethook = NULL; + } + ftrace_free_filter(&fp->ops); +} + +/** + * register_fprobe() - Register fprobe to ftrace by pattern. + * @fp: A fprobe data structure to be registered. + * @filter: A wildcard pattern of probed symbols. + * @notfilter: A wildcard pattern of NOT probed symbols. + * + * Register @fp to ftrace for enabling the probe on the symbols matched to @filter. + * If @notfilter is not NULL, the symbols matched the @notfilter are not probed. + * + * Return 0 if @fp is registered successfully, -errno if not. + */ +int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter) +{ + struct ftrace_hash *hash; + unsigned char *str; + int ret, len; + + if (!fp || !filter) + return -EINVAL; + + fprobe_init(fp); + + len = strlen(filter); + str = kstrdup(filter, GFP_KERNEL); + ret = ftrace_set_filter(&fp->ops, str, len, 0); + kfree(str); + if (ret) + return ret; + + if (notfilter) { + len = strlen(notfilter); + str = kstrdup(notfilter, GFP_KERNEL); + ret = ftrace_set_notrace(&fp->ops, str, len, 0); + kfree(str); + if (ret) + goto out; + } + + /* TODO: + * correctly calculate the total number of filtered symbols + * from both filter and notfilter. + */ + hash = fp->ops.local_hash.filter_hash; + if (WARN_ON_ONCE(!hash)) + goto out; + + ret = fprobe_init_rethook(fp, (int)hash->count); + if (!ret) + ret = register_ftrace_function(&fp->ops); + +out: + if (ret) + fprobe_fail_cleanup(fp); + return ret; +} +EXPORT_SYMBOL_GPL(register_fprobe); + +/** + * register_fprobe_ips() - Register fprobe to ftrace by address. + * @fp: A fprobe data structure to be registered. + * @addrs: An array of target ftrace location addresses. + * @num: The number of entries of @addrs. + * + * Register @fp to ftrace for enabling the probe on the address given by @addrs. + * The @addrs must be the addresses of ftrace location address, which may be + * the symbol address + arch-dependent offset. + * If you unsure what this mean, please use other registration functions. + * + * Return 0 if @fp is registered successfully, -errno if not. + */ +int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num) +{ + int ret; + + if (!fp || !addrs || num <= 0) + return -EINVAL; + + fprobe_init(fp); + + ret = ftrace_set_filter_ips(&fp->ops, addrs, num, 0, 0); + if (ret) + return ret; + + ret = fprobe_init_rethook(fp, num); + if (!ret) + ret = register_ftrace_function(&fp->ops); + + if (ret) + fprobe_fail_cleanup(fp); + return ret; +} +EXPORT_SYMBOL_GPL(register_fprobe_ips); + +/** + * register_fprobe_syms() - Register fprobe to ftrace by symbols. + * @fp: A fprobe data structure to be registered. + * @syms: An array of target symbols. + * @num: The number of entries of @syms. + * + * Register @fp to the symbols given by @syms array. This will be useful if + * you are sure the symbols exist in the kernel. + * + * Return 0 if @fp is registered successfully, -errno if not. + */ +int register_fprobe_syms(struct fprobe *fp, const char **syms, int num) +{ + unsigned long *addrs; + int ret; + + if (!fp || !syms || num <= 0) + return -EINVAL; + + addrs = get_ftrace_locations(syms, num); + if (IS_ERR(addrs)) + return PTR_ERR(addrs); + + ret = register_fprobe_ips(fp, addrs, num); + + kfree(addrs); + + return ret; +} +EXPORT_SYMBOL_GPL(register_fprobe_syms); + +/** + * unregister_fprobe() - Unregister fprobe from ftrace + * @fp: A fprobe data structure to be unregistered. + * + * Unregister fprobe (and remove ftrace hooks from the function entries). + * + * Return 0 if @fp is unregistered successfully, -errno if not. + */ +int unregister_fprobe(struct fprobe *fp) +{ + int ret; + + if (!fp || fp->ops.func != fprobe_handler) + return -EINVAL; + + /* + * rethook_free() starts disabling the rethook, but the rethook handlers + * may be running on other processors at this point. To make sure that all + * current running handlers are finished, call unregister_ftrace_function() + * after this. + */ + if (fp->rethook) + rethook_free(fp->rethook); + + ret = unregister_ftrace_function(&fp->ops); + if (ret < 0) + return ret; + + ftrace_free_filter(&fp->ops); + + return ret; +} +EXPORT_SYMBOL_GPL(unregister_fprobe); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index a4b462b6f944..93e992962ada 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -4958,7 +4958,7 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, } static int -ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) +__ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) { struct ftrace_func_entry *entry; @@ -4977,8 +4977,29 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) } static int +ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips, + unsigned int cnt, int remove) +{ + unsigned int i; + int err; + + for (i = 0; i < cnt; i++) { + err = __ftrace_match_addr(hash, ips[i], remove); + if (err) { + /* + * This expects the @hash is a temporary hash and if this + * fails the caller must free the @hash. + */ + return err; + } + } + return 0; +} + +static int ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, - unsigned long ip, int remove, int reset, int enable) + unsigned long *ips, unsigned int cnt, + int remove, int reset, int enable) { struct ftrace_hash **orig_hash; struct ftrace_hash *hash; @@ -5008,8 +5029,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, ret = -EINVAL; goto out_regex_unlock; } - if (ip) { - ret = ftrace_match_addr(hash, ip, remove); + if (ips) { + ret = ftrace_match_addr(hash, ips, cnt, remove); if (ret < 0) goto out_regex_unlock; } @@ -5026,10 +5047,10 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, } static int -ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, - int reset, int enable) +ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt, + int remove, int reset, int enable) { - return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable); + return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable); } #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS @@ -5634,11 +5655,30 @@ int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset) { ftrace_ops_init(ops); - return ftrace_set_addr(ops, ip, remove, reset, 1); + return ftrace_set_addr(ops, &ip, 1, remove, reset, 1); } EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); /** + * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses + * @ops - the ops to set the filter with + * @ips - the array of addresses to add to or remove from the filter. + * @cnt - the number of addresses in @ips + * @remove - non zero to remove ips from the filter + * @reset - non zero to reset all filters before applying this filter. + * + * Filters denote which functions should be enabled when tracing is enabled + * If @ips array or any ip specified within is NULL , it fails to update filter. + */ +int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, + unsigned int cnt, int remove, int reset) +{ + ftrace_ops_init(ops); + return ftrace_set_addr(ops, ips, cnt, remove, reset, 1); +} +EXPORT_SYMBOL_GPL(ftrace_set_filter_ips); + +/** * ftrace_ops_set_global_filter - setup ops to use global filters * @ops - the ops which will use the global filters * @@ -5659,7 +5699,7 @@ static int ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, int reset, int enable) { - return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); + return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable); } /** diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c new file mode 100644 index 000000000000..ab463a4d2b23 --- /dev/null +++ b/kernel/trace/rethook.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "rethook: " fmt + +#include <linux/bug.h> +#include <linux/kallsyms.h> +#include <linux/kprobes.h> +#include <linux/preempt.h> +#include <linux/rethook.h> +#include <linux/slab.h> +#include <linux/sort.h> + +/* Return hook list (shadow stack by list) */ + +/* + * This function is called from delayed_put_task_struct() when a task is + * dead and cleaned up to recycle any kretprobe instances associated with + * this task. These left over instances represent probed functions that + * have been called but will never return. + */ +void rethook_flush_task(struct task_struct *tk) +{ + struct rethook_node *rhn; + struct llist_node *node; + + node = __llist_del_all(&tk->rethooks); + while (node) { + rhn = container_of(node, struct rethook_node, llist); + node = node->next; + preempt_disable(); + rethook_recycle(rhn); + preempt_enable(); + } +} + +static void rethook_free_rcu(struct rcu_head *head) +{ + struct rethook *rh = container_of(head, struct rethook, rcu); + struct rethook_node *rhn; + struct freelist_node *node; + int count = 1; + + node = rh->pool.head; + while (node) { + rhn = container_of(node, struct rethook_node, freelist); + node = node->next; + kfree(rhn); + count++; + } + + /* The rh->ref is the number of pooled node + 1 */ + if (refcount_sub_and_test(count, &rh->ref)) + kfree(rh); +} + +/** + * rethook_free() - Free struct rethook. + * @rh: the struct rethook to be freed. + * + * Free the rethook. Before calling this function, user must ensure the + * @rh::data is cleaned if needed (or, the handler can access it after + * calling this function.) This function will set the @rh to be freed + * after all rethook_node are freed (not soon). And the caller must + * not touch @rh after calling this. + */ +void rethook_free(struct rethook *rh) +{ + rcu_assign_pointer(rh->handler, NULL); + + call_rcu(&rh->rcu, rethook_free_rcu); +} + +/** + * rethook_alloc() - Allocate struct rethook. + * @data: a data to pass the @handler when hooking the return. + * @handler: the return hook callback function. + * + * Allocate and initialize a new rethook with @data and @handler. + * Return NULL if memory allocation fails or @handler is NULL. + * Note that @handler == NULL means this rethook is going to be freed. + */ +struct rethook *rethook_alloc(void *data, rethook_handler_t handler) +{ + struct rethook *rh = kzalloc(sizeof(struct rethook), GFP_KERNEL); + + if (!rh || !handler) + return NULL; + + rh->data = data; + rh->handler = handler; + rh->pool.head = NULL; + refcount_set(&rh->ref, 1); + + return rh; +} + +/** + * rethook_add_node() - Add a new node to the rethook. + * @rh: the struct rethook. + * @node: the struct rethook_node to be added. + * + * Add @node to @rh. User must allocate @node (as a part of user's + * data structure.) The @node fields are initialized in this function. + */ +void rethook_add_node(struct rethook *rh, struct rethook_node *node) +{ + node->rethook = rh; + freelist_add(&node->freelist, &rh->pool); + refcount_inc(&rh->ref); +} + +static void free_rethook_node_rcu(struct rcu_head *head) +{ + struct rethook_node *node = container_of(head, struct rethook_node, rcu); + + if (refcount_dec_and_test(&node->rethook->ref)) + kfree(node->rethook); + kfree(node); +} + +/** + * rethook_recycle() - return the node to rethook. + * @node: The struct rethook_node to be returned. + * + * Return back the @node to @node::rethook. If the @node::rethook is already + * marked as freed, this will free the @node. + */ +void rethook_recycle(struct rethook_node *node) +{ + lockdep_assert_preemption_disabled(); + + if (likely(READ_ONCE(node->rethook->handler))) + freelist_add(&node->freelist, &node->rethook->pool); + else + call_rcu(&node->rcu, free_rethook_node_rcu); +} +NOKPROBE_SYMBOL(rethook_recycle); + +/** + * rethook_try_get() - get an unused rethook node. + * @rh: The struct rethook which pools the nodes. + * + * Get an unused rethook node from @rh. If the node pool is empty, this + * will return NULL. Caller must disable preemption. + */ +struct rethook_node *rethook_try_get(struct rethook *rh) +{ + rethook_handler_t handler = READ_ONCE(rh->handler); + struct freelist_node *fn; + + lockdep_assert_preemption_disabled(); + + /* Check whether @rh is going to be freed. */ + if (unlikely(!handler)) + return NULL; + + fn = freelist_try_get(&rh->pool); + if (!fn) + return NULL; + + return container_of(fn, struct rethook_node, freelist); +} +NOKPROBE_SYMBOL(rethook_try_get); + +/** + * rethook_hook() - Hook the current function return. + * @node: The struct rethook node to hook the function return. + * @regs: The struct pt_regs for the function entry. + * @mcount: True if this is called from mcount(ftrace) context. + * + * Hook the current running function return. This must be called when the + * function entry (or at least @regs must be the registers of the function + * entry.) @mcount is used for identifying the context. If this is called + * from ftrace (mcount) callback, @mcount must be set true. If this is called + * from the real function entry (e.g. kprobes) @mcount must be set false. + * This is because the way to hook the function return depends on the context. + */ +void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount) +{ + arch_rethook_prepare(node, regs, mcount); + __llist_add(&node->llist, ¤t->rethooks); +} +NOKPROBE_SYMBOL(rethook_hook); + +/* This assumes the 'tsk' is the current task or is not running. */ +static unsigned long __rethook_find_ret_addr(struct task_struct *tsk, + struct llist_node **cur) +{ + struct rethook_node *rh = NULL; + struct llist_node *node = *cur; + + if (!node) + node = tsk->rethooks.first; + else + node = node->next; + + while (node) { + rh = container_of(node, struct rethook_node, llist); + if (rh->ret_addr != (unsigned long)arch_rethook_trampoline) { + *cur = node; + return rh->ret_addr; + } + node = node->next; + } + return 0; +} +NOKPROBE_SYMBOL(__rethook_find_ret_addr); + +/** + * rethook_find_ret_addr -- Find correct return address modified by rethook + * @tsk: Target task + * @frame: A frame pointer + * @cur: a storage of the loop cursor llist_node pointer for next call + * + * Find the correct return address modified by a rethook on @tsk in unsigned + * long type. + * The @tsk must be 'current' or a task which is not running. @frame is a hint + * to get the currect return address - which is compared with the + * rethook::frame field. The @cur is a loop cursor for searching the + * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the + * first call, but '@cur' itself must NOT NULL. + * + * Returns found address value or zero if not found. + */ +unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame, + struct llist_node **cur) +{ + struct rethook_node *rhn = NULL; + unsigned long ret; + + if (WARN_ON_ONCE(!cur)) + return 0; + + if (WARN_ON_ONCE(tsk != current && task_is_running(tsk))) + return 0; + + do { + ret = __rethook_find_ret_addr(tsk, cur); + if (!ret) + break; + rhn = container_of(*cur, struct rethook_node, llist); + } while (rhn->frame != frame); + + return ret; +} +NOKPROBE_SYMBOL(rethook_find_ret_addr); + +void __weak arch_rethook_fixup_return(struct pt_regs *regs, + unsigned long correct_ret_addr) +{ + /* + * Do nothing by default. If the architecture which uses a + * frame pointer to record real return address on the stack, + * it should fill this function to fixup the return address + * so that stacktrace works from the rethook handler. + */ +} + +/* This function will be called from each arch-defined trampoline. */ +unsigned long rethook_trampoline_handler(struct pt_regs *regs, + unsigned long frame) +{ + struct llist_node *first, *node = NULL; + unsigned long correct_ret_addr; + rethook_handler_t handler; + struct rethook_node *rhn; + + correct_ret_addr = __rethook_find_ret_addr(current, &node); + if (!correct_ret_addr) { + pr_err("rethook: Return address not found! Maybe there is a bug in the kernel\n"); + BUG_ON(1); + } + + instruction_pointer_set(regs, correct_ret_addr); + + /* + * These loops must be protected from rethook_free_rcu() because those + * are accessing 'rhn->rethook'. + */ + preempt_disable(); + + /* + * Run the handler on the shadow stack. Do not unlink the list here because + * stackdump inside the handlers needs to decode it. + */ + first = current->rethooks.first; + while (first) { + rhn = container_of(first, struct rethook_node, llist); + if (WARN_ON_ONCE(rhn->frame != frame)) + break; + handler = READ_ONCE(rhn->rethook->handler); + if (handler) + handler(rhn, rhn->rethook->data, regs); + + if (first == node) + break; + first = first->next; + } + + /* Fixup registers for returning to correct address. */ + arch_rethook_fixup_return(regs, correct_ret_addr); + + /* Unlink used shadow stack */ + first = current->rethooks.first; + current->rethooks.first = node->next; + node->next = NULL; + + while (first) { + rhn = container_of(first, struct rethook_node, llist); + first = first->next; + rethook_recycle(rhn); + } + preempt_enable(); + + return correct_ret_addr; +} +NOKPROBE_SYMBOL(rethook_trampoline_handler); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 72ca4684beda..b0bf0d224b2c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2118,6 +2118,18 @@ config KPROBES_SANITY_TEST Say N if you are unsure. +config FPROBE_SANITY_TEST + bool "Self test for fprobe" + depends on DEBUG_KERNEL + depends on FPROBE + depends on KUNIT=y + help + This option will enable testing the fprobe when the system boot. + A series of tests are made to verify that the fprobe is functioning + properly. + + Say N if you are unsure. + config BACKTRACE_SELF_TEST tristate "Self test for the backtrace code" depends on DEBUG_KERNEL diff --git a/lib/Makefile b/lib/Makefile index 300f569c626b..154008764b16 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -103,6 +103,8 @@ obj-$(CONFIG_TEST_HMM) += test_hmm.o obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o +CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE) +obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o # # CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns # off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c new file mode 100644 index 000000000000..ed70637a2ffa --- /dev/null +++ b/lib/test_fprobe.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * test_fprobe.c - simple sanity test for fprobe + */ + +#include <linux/kernel.h> +#include <linux/fprobe.h> +#include <linux/random.h> +#include <kunit/test.h> + +#define div_factor 3 + +static struct kunit *current_test; + +static u32 rand1, entry_val, exit_val; + +/* Use indirect calls to avoid inlining the target functions */ +static u32 (*target)(u32 value); +static u32 (*target2)(u32 value); +static unsigned long target_ip; +static unsigned long target2_ip; + +static noinline u32 fprobe_selftest_target(u32 value) +{ + return (value / div_factor); +} + +static noinline u32 fprobe_selftest_target2(u32 value) +{ + return (value / div_factor) + 1; +} + +static notrace void fp_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) +{ + KUNIT_EXPECT_FALSE(current_test, preemptible()); + /* This can be called on the fprobe_selftest_target and the fprobe_selftest_target2 */ + if (ip != target_ip) + KUNIT_EXPECT_EQ(current_test, ip, target2_ip); + entry_val = (rand1 / div_factor); +} + +static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) +{ + unsigned long ret = regs_return_value(regs); + + KUNIT_EXPECT_FALSE(current_test, preemptible()); + if (ip != target_ip) { + KUNIT_EXPECT_EQ(current_test, ip, target2_ip); + KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1); + } else + KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor)); + KUNIT_EXPECT_EQ(current_test, entry_val, (rand1 / div_factor)); + exit_val = entry_val + div_factor; +} + +/* Test entry only (no rethook) */ +static void test_fprobe_entry(struct kunit *test) +{ + struct fprobe fp_entry = { + .entry_handler = fp_entry_handler, + }; + + current_test = test; + + /* Before register, unregister should be failed. */ + KUNIT_EXPECT_NE(test, 0, unregister_fprobe(&fp_entry)); + KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp_entry, "fprobe_selftest_target*", NULL)); + + entry_val = 0; + exit_val = 0; + target(rand1); + KUNIT_EXPECT_NE(test, 0, entry_val); + KUNIT_EXPECT_EQ(test, 0, exit_val); + + entry_val = 0; + exit_val = 0; + target2(rand1); + KUNIT_EXPECT_NE(test, 0, entry_val); + KUNIT_EXPECT_EQ(test, 0, exit_val); + + KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp_entry)); +} + +static void test_fprobe(struct kunit *test) +{ + struct fprobe fp = { + .entry_handler = fp_entry_handler, + .exit_handler = fp_exit_handler, + }; + + current_test = test; + KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target*", NULL)); + + entry_val = 0; + exit_val = 0; + target(rand1); + KUNIT_EXPECT_NE(test, 0, entry_val); + KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val); + + entry_val = 0; + exit_val = 0; + target2(rand1); + KUNIT_EXPECT_NE(test, 0, entry_val); + KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val); + + KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp)); +} + +static void test_fprobe_syms(struct kunit *test) +{ + static const char *syms[] = {"fprobe_selftest_target", "fprobe_selftest_target2"}; + struct fprobe fp = { + .entry_handler = fp_entry_handler, + .exit_handler = fp_exit_handler, + }; + + current_test = test; + KUNIT_EXPECT_EQ(test, 0, register_fprobe_syms(&fp, syms, 2)); + + entry_val = 0; + exit_val = 0; + target(rand1); + KUNIT_EXPECT_NE(test, 0, entry_val); + KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val); + + entry_val = 0; + exit_val = 0; + target2(rand1); + KUNIT_EXPECT_NE(test, 0, entry_val); + KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val); + + KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp)); +} + +static unsigned long get_ftrace_location(void *func) +{ + unsigned long size, addr = (unsigned long)func; + + if (!kallsyms_lookup_size_offset(addr, &size, NULL) || !size) + return 0; + + return ftrace_location_range(addr, addr + size - 1); +} + +static int fprobe_test_init(struct kunit *test) +{ + do { + rand1 = prandom_u32(); + } while (rand1 <= div_factor); + + target = fprobe_selftest_target; + target2 = fprobe_selftest_target2; + target_ip = get_ftrace_location(target); + target2_ip = get_ftrace_location(target2); + + return 0; +} + +static struct kunit_case fprobe_testcases[] = { + KUNIT_CASE(test_fprobe_entry), + KUNIT_CASE(test_fprobe), + KUNIT_CASE(test_fprobe_syms), + {} +}; + +static struct kunit_suite fprobe_test_suite = { + .name = "fprobe_test", + .init = fprobe_test_init, + .test_cases = fprobe_testcases, +}; + +kunit_test_suites(&fprobe_test_suite); + +MODULE_LICENSE("GPL"); diff --git a/samples/Kconfig b/samples/Kconfig index 22cc921ae291..8415d60ea5f4 100644 --- a/samples/Kconfig +++ b/samples/Kconfig @@ -73,6 +73,13 @@ config SAMPLE_HW_BREAKPOINT help This builds kernel hardware breakpoint example modules. +config SAMPLE_FPROBE + tristate "Build fprobe examples -- loadable modules only" + depends on FPROBE && m + help + This builds a fprobe example module. This module has an option 'symbol'. + You can specify a probed symbol or symbols separated with ','. + config SAMPLE_KFIFO tristate "Build kfifo examples -- loadable modules only" depends on m diff --git a/samples/Makefile b/samples/Makefile index 1ae4de99c983..6d662965be5b 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -33,3 +33,4 @@ subdir-$(CONFIG_SAMPLE_WATCHDOG) += watchdog subdir-$(CONFIG_SAMPLE_WATCH_QUEUE) += watch_queue obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak/ obj-$(CONFIG_SAMPLE_CORESIGHT_SYSCFG) += coresight/ +obj-$(CONFIG_SAMPLE_FPROBE) += fprobe/ diff --git a/samples/fprobe/Makefile b/samples/fprobe/Makefile new file mode 100644 index 000000000000..ecccbfa6e99b --- /dev/null +++ b/samples/fprobe/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_SAMPLE_FPROBE) += fprobe_example.o diff --git a/samples/fprobe/fprobe_example.c b/samples/fprobe/fprobe_example.c new file mode 100644 index 000000000000..24d3cf109140 --- /dev/null +++ b/samples/fprobe/fprobe_example.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Here's a sample kernel module showing the use of fprobe to dump a + * stack trace and selected registers when kernel_clone() is called. + * + * For more information on theory of operation of kprobes, see + * Documentation/trace/kprobes.rst + * + * You will see the trace data in /var/log/messages and on the console + * whenever kernel_clone() is invoked to create a new process. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/fprobe.h> +#include <linux/sched/debug.h> +#include <linux/slab.h> + +#define BACKTRACE_DEPTH 16 +#define MAX_SYMBOL_LEN 4096 +struct fprobe sample_probe; + +static char symbol[MAX_SYMBOL_LEN] = "kernel_clone"; +module_param_string(symbol, symbol, sizeof(symbol), 0644); +static char nosymbol[MAX_SYMBOL_LEN] = ""; +module_param_string(nosymbol, nosymbol, sizeof(nosymbol), 0644); +static bool stackdump = true; +module_param(stackdump, bool, 0644); + +static void show_backtrace(void) +{ + unsigned long stacks[BACKTRACE_DEPTH]; + unsigned int len; + + len = stack_trace_save(stacks, BACKTRACE_DEPTH, 2); + stack_trace_print(stacks, len, 24); +} + +static void sample_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) +{ + pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip); + if (stackdump) + show_backtrace(); +} + +static void sample_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) +{ + unsigned long rip = instruction_pointer(regs); + + pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n", + (void *)ip, (void *)ip, (void *)rip, (void *)rip); + if (stackdump) + show_backtrace(); +} + +static int __init fprobe_init(void) +{ + char *p, *symbuf = NULL; + const char **syms; + int ret, count, i; + + sample_probe.entry_handler = sample_entry_handler; + sample_probe.exit_handler = sample_exit_handler; + + if (strchr(symbol, '*')) { + /* filter based fprobe */ + ret = register_fprobe(&sample_probe, symbol, + nosymbol[0] == '\0' ? NULL : nosymbol); + goto out; + } else if (!strchr(symbol, ',')) { + symbuf = symbol; + ret = register_fprobe_syms(&sample_probe, (const char **)&symbuf, 1); + goto out; + } + + /* Comma separated symbols */ + symbuf = kstrdup(symbol, GFP_KERNEL); + if (!symbuf) + return -ENOMEM; + p = symbuf; + count = 1; + while ((p = strchr(++p, ',')) != NULL) + count++; + + pr_info("%d symbols found\n", count); + + syms = kcalloc(count, sizeof(char *), GFP_KERNEL); + if (!syms) { + kfree(symbuf); + return -ENOMEM; + } + + p = symbuf; + for (i = 0; i < count; i++) + syms[i] = strsep(&p, ","); + + ret = register_fprobe_syms(&sample_probe, syms, count); + kfree(syms); + kfree(symbuf); +out: + if (ret < 0) + pr_err("register_fprobe failed, returned %d\n", ret); + else + pr_info("Planted fprobe at %s\n", symbol); + + return ret; +} + +static void __exit fprobe_exit(void) +{ + unregister_fprobe(&sample_probe); + + pr_info("fprobe at %s unregistered\n", symbol); +} + +module_init(fprobe_init) +module_exit(fprobe_exit) +MODULE_LICENSE("GPL"); |