diff options
author | Julien Thierry <jthierry@redhat.com> | 2021-03-03 18:05:29 +0100 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2021-05-27 17:37:25 +0100 |
commit | 5f154c4e20d7edd38bddec78f3e0a7628057ef76 (patch) | |
tree | 2e5fd4d618b2acd02946c265e07a6ee5fb3e4f4d /arch/arm64/kernel/patching.c | |
parent | c4681547bcce777daf576925a966ffa824edd09d (diff) |
arm64: Move patching utilities out of instruction encoding/decoding
Files insn.[c|h] containt some functions used for instruction patching.
In order to reuse the instruction encoder/decoder, move the patching
utilities to their own file.
Signed-off-by: Julien Thierry <jthierry@redhat.com>
Link: https://lore.kernel.org/r/20210303170536.1838032-2-jthierry@redhat.com
[will: Include patching.h in insn.h to fix header mess; add __ASSEMBLY__ guards]
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel/patching.c')
-rw-r--r-- | arch/arm64/kernel/patching.c | 148 |
1 files changed, 148 insertions, 0 deletions
diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c new file mode 100644 index 000000000000..9d050e33901b --- /dev/null +++ b/arch/arm64/kernel/patching.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/spinlock.h> +#include <linux/stop_machine.h> +#include <linux/uaccess.h> + +#include <asm/cacheflush.h> +#include <asm/fixmap.h> +#include <asm/kprobes.h> +#include <asm/sections.h> + +static DEFINE_RAW_SPINLOCK(patch_lock); + +static bool is_exit_text(unsigned long addr) +{ + /* discarded with init text/data */ + return system_state < SYSTEM_RUNNING && + addr >= (unsigned long)__exittext_begin && + addr < (unsigned long)__exittext_end; +} + +static bool is_image_text(unsigned long addr) +{ + return core_kernel_text(addr) || is_exit_text(addr); +} + +static void __kprobes *patch_map(void *addr, int fixmap) +{ + unsigned long uintaddr = (uintptr_t) addr; + bool image = is_image_text(uintaddr); + struct page *page; + + if (image) + page = phys_to_page(__pa_symbol(addr)); + else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + page = vmalloc_to_page(addr); + else + return addr; + + BUG_ON(!page); + return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + + (uintaddr & ~PAGE_MASK)); +} + +static void __kprobes patch_unmap(int fixmap) +{ + clear_fixmap(fixmap); +} +/* + * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always + * little-endian. + */ +int __kprobes aarch64_insn_read(void *addr, u32 *insnp) +{ + int ret; + __le32 val; + + ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE); + if (!ret) + *insnp = le32_to_cpu(val); + + return ret; +} + +static int __kprobes __aarch64_insn_write(void *addr, __le32 insn) +{ + void *waddr = addr; + unsigned long flags = 0; + int ret; + + raw_spin_lock_irqsave(&patch_lock, flags); + waddr = patch_map(addr, FIX_TEXT_POKE0); + + ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE); + + patch_unmap(FIX_TEXT_POKE0); + raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +int __kprobes aarch64_insn_write(void *addr, u32 insn) +{ + return __aarch64_insn_write(addr, cpu_to_le32(insn)); +} + +int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) +{ + u32 *tp = addr; + int ret; + + /* A64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + + ret = aarch64_insn_write(tp, insn); + if (ret == 0) + __flush_icache_range((uintptr_t)tp, + (uintptr_t)tp + AARCH64_INSN_SIZE); + + return ret; +} + +struct aarch64_insn_patch { + void **text_addrs; + u32 *new_insns; + int insn_cnt; + atomic_t cpu_count; +}; + +static int __kprobes aarch64_insn_patch_text_cb(void *arg) +{ + int i, ret = 0; + struct aarch64_insn_patch *pp = arg; + + /* The first CPU becomes master */ + if (atomic_inc_return(&pp->cpu_count) == 1) { + for (i = 0; ret == 0 && i < pp->insn_cnt; i++) + ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], + pp->new_insns[i]); + /* Notify other processors with an additional increment. */ + atomic_inc(&pp->cpu_count); + } else { + while (atomic_read(&pp->cpu_count) <= num_online_cpus()) + cpu_relax(); + isb(); + } + + return ret; +} + +int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) +{ + struct aarch64_insn_patch patch = { + .text_addrs = addrs, + .new_insns = insns, + .insn_cnt = cnt, + .cpu_count = ATOMIC_INIT(0), + }; + + if (cnt <= 0) + return -EINVAL; + + return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch, + cpu_online_mask); +} |