aboutsummaryrefslogtreecommitdiff
path: root/arch/riscv/kernel/patch.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel/patch.c')
-rw-r--r--arch/riscv/kernel/patch.c47
1 files changed, 37 insertions, 10 deletions
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 765004b60513..575e71d6c8ae 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -11,14 +11,18 @@
#include <asm/kprobes.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
+#include <asm/ftrace.h>
#include <asm/patch.h>
struct patch_insn {
void *addr;
- u32 insn;
+ u32 *insns;
+ int ninsns;
atomic_t cpu_count;
};
+int riscv_patch_in_stop_machine = false;
+
#ifdef CONFIG_MMU
/*
* The fix_to_virt(, idx) needs a const value (not a dynamic variable of
@@ -59,8 +63,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
* Before reaching here, it was expected to lock the text_mutex
* already, so we don't need to give another lock here and could
* ensure that it was safe between each cores.
+ *
+ * We're currently using stop_machine() for ftrace & kprobes, and while
+ * that ensures text_mutex is held before installing the mappings it
+ * does not ensure text_mutex is held by the calling thread. That's
+ * safe but triggers a lockdep failure, so just elide it for that
+ * specific case.
*/
- lockdep_assert_held(&text_mutex);
+ if (!riscv_patch_in_stop_machine)
+ lockdep_assert_held(&text_mutex);
if (across_pages)
patch_map(addr + len, FIX_TEXT_POKE1);
@@ -102,12 +113,15 @@ NOKPROBE_SYMBOL(patch_text_nosync);
static int patch_text_cb(void *data)
{
struct patch_insn *patch = data;
- int ret = 0;
+ unsigned long len;
+ int i, ret = 0;
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
- ret =
- patch_text_nosync(patch->addr, &patch->insn,
- GET_INSN_LENGTH(patch->insn));
+ for (i = 0; ret == 0 && i < patch->ninsns; i++) {
+ len = GET_INSN_LENGTH(patch->insns[i]);
+ ret = patch_text_nosync(patch->addr + i * len,
+ &patch->insns[i], len);
+ }
atomic_inc(&patch->cpu_count);
} else {
while (atomic_read(&patch->cpu_count) <= num_online_cpus())
@@ -119,15 +133,28 @@ static int patch_text_cb(void *data)
}
NOKPROBE_SYMBOL(patch_text_cb);
-int patch_text(void *addr, u32 insn)
+int patch_text(void *addr, u32 *insns, int ninsns)
{
+ int ret;
struct patch_insn patch = {
.addr = addr,
- .insn = insn,
+ .insns = insns,
+ .ninsns = ninsns,
.cpu_count = ATOMIC_INIT(0),
};
- return stop_machine_cpuslocked(patch_text_cb,
- &patch, cpu_online_mask);
+ /*
+ * kprobes takes text_mutex, before calling patch_text(), but as we call
+ * calls stop_machine(), the lockdep assertion in patch_insn_write()
+ * gets confused by the context in which the lock is taken.
+ * Instead, ensure the lock is held before calling stop_machine(), and
+ * set riscv_patch_in_stop_machine to skip the check in
+ * patch_insn_write().
+ */
+ lockdep_assert_held(&text_mutex);
+ riscv_patch_in_stop_machine = true;
+ ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
+ riscv_patch_in_stop_machine = false;
+ return ret;
}
NOKPROBE_SYMBOL(patch_text);