aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <[email protected]>2019-11-11 14:02:10 +0100
committerIngo Molnar <[email protected]>2019-11-27 07:44:25 +0100
commitf2cb4f95b7571f2bebcf226cd92b448fd58950ca (patch)
tree90fdcc1b05666267186ea47bd45fc7c3f71e0c6b
parent04ae87a52074e2d448fc66143f1bd2c7d694d2b9 (diff)
x86/kprobe: Add comments to arch_{,un}optimize_kprobes()
Add a few words describing how it is safe to overwrite the 4 bytes after a kprobe. In specific it is possible the JMP.d32 required for the optimized kprobe overwrites multiple instructions. Tested-by: Alexei Starovoitov <[email protected]> Tested-by: Steven Rostedt (VMware) <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Brian Gerst <[email protected]> Cc: Denys Vlasenko <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
-rw-r--r--arch/x86/kernel/kprobes/opt.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 26e0d6c4d8d6..3f45b5c43a71 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -414,8 +414,12 @@ err:
}
/*
- * Replace breakpoints (int3) with relative jumps.
+ * Replace breakpoints (INT3) with relative jumps (JMP.d32).
* Caller must call with locking kprobe_mutex and text_mutex.
+ *
+ * The caller will have installed a regular kprobe and after that issued
+ * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
+ * the 4 bytes after the INT3 are unused and can now be overwritten.
*/
void arch_optimize_kprobes(struct list_head *oplist)
{
@@ -441,7 +445,13 @@ void arch_optimize_kprobes(struct list_head *oplist)
}
}
-/* Replace a relative jump with a breakpoint (int3). */
+/*
+ * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
+ *
+ * After that, we can restore the 4 bytes after the INT3 to undo what
+ * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
+ * unused once the INT3 lands.
+ */
void arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
arch_arm_kprobe(&op->kp);