aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMasami Hiramatsu <[email protected]>2021-02-18 23:29:23 +0900
committerSteven Rostedt (VMware) <[email protected]>2021-02-19 14:57:12 -0500
commitc85c9a2c6e368dc94907e63babb18a9788e5c9b6 (patch)
tree231d254e73c29794c640a607d6c2c5c20e5456df
parente23db805da2dfc39e5281b5efd3e36d132aa83af (diff)
kprobes: Fix to delay the kprobes jump optimization
Commit 36dadef23fcc ("kprobes: Init kprobes in early_initcall") moved the kprobe setup in early_initcall(), which includes kprobe jump optimization. The kprobes jump optimizer involves synchronize_rcu_tasks() which depends on the ksoftirqd and rcu_spawn_tasks_*(). However, since those are setup in core_initcall(), kprobes jump optimizer can not run at the early_initcall(). To avoid this issue, make the kprobe optimization disabled in the early_initcall() and enables it in subsys_initcall(). Note that non-optimized kprobes is still available after early_initcall(). Only jump optimization is delayed. Link: https://lkml.kernel.org/r/161365856280.719838.12423085451287256713.stgit@devnote2 Fixes: 36dadef23fcc ("kprobes: Init kprobes in early_initcall") Cc: Ingo Molnar <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: RCU <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Daniel Axtens <[email protected]> Cc: Frederic Weisbecker <[email protected]> Cc: Neeraj Upadhyay <[email protected]> Cc: Joel Fernandes <[email protected]> Cc: Michal Hocko <[email protected]> Cc: "Theodore Y . Ts'o" <[email protected]> Cc: Oleksiy Avramchenko <[email protected]> Cc: [email protected] Reported-by: Paul E. McKenney <[email protected]> Reported-by: Sebastian Andrzej Siewior <[email protected]> Reported-by: Uladzislau Rezki <[email protected]> Acked-by: Paul E. McKenney <[email protected]> Signed-off-by: Masami Hiramatsu <[email protected]> Signed-off-by: Steven Rostedt (VMware) <[email protected]>
-rw-r--r--kernel/kprobes.c31
1 files changed, 21 insertions, 10 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index dd1d027455c4..745f08fdd7a6 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -861,7 +861,6 @@ out:
cpus_read_unlock();
}
-#ifdef CONFIG_SYSCTL
static void optimize_all_kprobes(void)
{
struct hlist_head *head;
@@ -887,6 +886,7 @@ out:
mutex_unlock(&kprobe_mutex);
}
+#ifdef CONFIG_SYSCTL
static void unoptimize_all_kprobes(void)
{
struct hlist_head *head;
@@ -2500,18 +2500,14 @@ static int __init init_kprobes(void)
}
}
-#if defined(CONFIG_OPTPROBES)
-#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
- /* Init kprobe_optinsn_slots */
- kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
-#endif
- /* By default, kprobes can be optimized */
- kprobes_allow_optimization = true;
-#endif
-
/* By default, kprobes are armed */
kprobes_all_disarmed = false;
+#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
+ /* Init kprobe_optinsn_slots for allocation */
+ kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
+#endif
+
err = arch_init_kprobes();
if (!err)
err = register_die_notifier(&kprobe_exceptions_nb);
@@ -2526,6 +2522,21 @@ static int __init init_kprobes(void)
}
early_initcall(init_kprobes);
+#if defined(CONFIG_OPTPROBES)
+static int __init init_optprobes(void)
+{
+ /*
+ * Enable kprobe optimization - this kicks the optimizer which
+ * depends on synchronize_rcu_tasks() and ksoftirqd, that is
+ * not spawned in early initcall. So delay the optimization.
+ */
+ optimize_all_kprobes();
+
+ return 0;
+}
+subsys_initcall(init_optprobes);
+#endif
+
#ifdef CONFIG_DEBUG_FS
static void report_probe(struct seq_file *pi, struct kprobe *p,
const char *sym, int offset, char *modname, struct kprobe *pp)