aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
authorBenjamin Gray <bgray@linux.ibm.com>2024-05-15 12:44:41 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2024-08-21 20:15:12 +1000
commite6b8940e7e80cdfe98ba8493214922998920dd9c (patch)
treea119cdae4ded82a1736c23ee25e27bdf3c423537 /arch/powerpc/lib
parenta540ad3e386f8f84bc6d600b93792a50861a81ef (diff)
powerpc/code-patching: Add generic memory patching
patch_instruction() is designed for patching instructions in otherwise readonly memory. Other consumers also sometimes need to patch readonly memory, so have abused patch_instruction() for arbitrary data patches. This is a problem on ppc64 as patch_instruction() decides on the patch width using the 'instruction' opcode to see if it's a prefixed instruction. Data that triggers this can lead to larger writes, possibly crossing a page boundary and failing the write altogether. Introduce patch_uint(), and patch_ulong(), with aliases patch_u32(), and patch_u64() (on ppc64) designed for aligned data patches. The patch size is now determined by the called function, and is passed as an additional parameter to generic internals. While the instruction flushing is not required for data patches, it remains unconditional in this patch. A followup series is possible if benchmarking shows fewer flushes gives an improvement in some data-patching workload. ppc32 does not support prefixed instructions, so is unaffected by the original issue. Care is taken in not exposing the size parameter in the public (non-static) interface, so the compiler can const-propagate it away. Signed-off-by: Benjamin Gray <bgray@linux.ibm.com> Reviewed-by: Hari Bathini <hbathini@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20240515024445.236364-2-bgray@linux.ibm.com
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/code-patching.c64
1 files changed, 49 insertions, 15 deletions
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 0d1f3ee91115..7f423fa3c51b 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -20,15 +20,14 @@
#include <asm/code-patching.h>
#include <asm/inst.h>
-static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr)
+static int __patch_mem(void *exec_addr, unsigned long val, void *patch_addr, bool is_dword)
{
- if (!ppc_inst_prefixed(instr)) {
- u32 val = ppc_inst_val(instr);
+ if (!IS_ENABLED(CONFIG_PPC64) || likely(!is_dword)) {
+ /* For big endian correctness: plain address would use the wrong half */
+ u32 val32 = val;
- __put_kernel_nofault(patch_addr, &val, u32, failed);
+ __put_kernel_nofault(patch_addr, &val32, u32, failed);
} else {
- u64 val = ppc_inst_as_ulong(instr);
-
__put_kernel_nofault(patch_addr, &val, u64, failed);
}
@@ -44,7 +43,10 @@ failed:
int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
{
- return __patch_instruction(addr, instr, addr);
+ if (ppc_inst_prefixed(instr))
+ return __patch_mem(addr, ppc_inst_as_ulong(instr), addr, true);
+ else
+ return __patch_mem(addr, ppc_inst_val(instr), addr, false);
}
struct patch_context {
@@ -276,7 +278,7 @@ static void unmap_patch_area(unsigned long addr)
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
}
-static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
+static int __do_patch_mem_mm(void *addr, unsigned long val, bool is_dword)
{
int err;
u32 *patch_addr;
@@ -305,7 +307,7 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
orig_mm = start_using_temp_mm(patching_mm);
- err = __patch_instruction(addr, instr, patch_addr);
+ err = __patch_mem(addr, val, patch_addr, is_dword);
/* context synchronisation performed by __patch_instruction (isync or exception) */
stop_using_temp_mm(patching_mm, orig_mm);
@@ -322,7 +324,7 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
return err;
}
-static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
+static int __do_patch_mem(void *addr, unsigned long val, bool is_dword)
{
int err;
u32 *patch_addr;
@@ -339,7 +341,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
if (radix_enabled())
asm volatile("ptesync": : :"memory");
- err = __patch_instruction(addr, instr, patch_addr);
+ err = __patch_mem(addr, val, patch_addr, is_dword);
pte_clear(&init_mm, text_poke_addr, pte);
flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
@@ -347,7 +349,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
return err;
}
-int patch_instruction(u32 *addr, ppc_inst_t instr)
+static int patch_mem(void *addr, unsigned long val, bool is_dword)
{
int err;
unsigned long flags;
@@ -359,19 +361,51 @@ int patch_instruction(u32 *addr, ppc_inst_t instr)
*/
if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) ||
!static_branch_likely(&poking_init_done))
- return raw_patch_instruction(addr, instr);
+ return __patch_mem(addr, val, addr, is_dword);
local_irq_save(flags);
if (mm_patch_enabled())
- err = __do_patch_instruction_mm(addr, instr);
+ err = __do_patch_mem_mm(addr, val, is_dword);
else
- err = __do_patch_instruction(addr, instr);
+ err = __do_patch_mem(addr, val, is_dword);
local_irq_restore(flags);
return err;
}
+
+#ifdef CONFIG_PPC64
+
+int patch_instruction(u32 *addr, ppc_inst_t instr)
+{
+ if (ppc_inst_prefixed(instr))
+ return patch_mem(addr, ppc_inst_as_ulong(instr), true);
+ else
+ return patch_mem(addr, ppc_inst_val(instr), false);
+}
NOKPROBE_SYMBOL(patch_instruction);
+int patch_uint(void *addr, unsigned int val)
+{
+ return patch_mem(addr, val, false);
+}
+NOKPROBE_SYMBOL(patch_uint);
+
+int patch_ulong(void *addr, unsigned long val)
+{
+ return patch_mem(addr, val, true);
+}
+NOKPROBE_SYMBOL(patch_ulong);
+
+#else
+
+int patch_instruction(u32 *addr, ppc_inst_t instr)
+{
+ return patch_mem(addr, ppc_inst_val(instr), false);
+}
+NOKPROBE_SYMBOL(patch_instruction)
+
+#endif
+
static int patch_memset64(u64 *addr, u64 val, size_t count)
{
for (u64 *end = addr + count; addr < end; addr++)