From 21733cb51847132567e0f5f568f126de08669842 Mon Sep 17 00:00:00 2001 From: Eric Lin Date: Fri, 4 Dec 2020 13:42:58 +0800 Subject: riscv/mm: Introduce a die_kernel_fault() helper function Like arm64, this patch adds a die_kernel_fault() helper to ensure the same semantics for the different kernel faults. Signed-off-by: Eric Lin Reviewed-by: Pekka Enberg Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/fault.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 3c8b9e433c67..0d5f06d6e3c7 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -19,8 +19,23 @@ #include "../kernel/head.h" +static void die_kernel_fault(const char *msg, unsigned long addr, + struct pt_regs *regs) +{ + bust_spinlocks(1); + + pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", msg, + addr); + + bust_spinlocks(0); + die(regs, "Oops"); + do_exit(SIGKILL); +} + static inline void no_context(struct pt_regs *regs, unsigned long addr) { + const char *msg; + /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; @@ -29,12 +44,8 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr) * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ - bust_spinlocks(1); - pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", - (addr < PAGE_SIZE) ? "NULL pointer dereference" : - "paging request", addr); - die(regs, "Oops"); - do_exit(SIGKILL); + msg = (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request"; + die_kernel_fault(msg, addr, regs); } static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) -- cgit From 21855cac82d3264aa660deafa9c26b8eef548b7a Mon Sep 17 00:00:00 2001 From: Eric Lin Date: Fri, 4 Dec 2020 13:42:59 +0800 Subject: riscv/mm: Prevent kernel module to access user memory without uaccess routines We found this issue in an legacy out-of-tree kernel module which didn't properly access user space pointer by get/put_user(). Such an illegal access loops in the page fault handler. To resolve this, let it die here. Signed-off-by: Eric Lin Reviewed-by: Pekka Enberg Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/fault.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 0d5f06d6e3c7..33d284188f9a 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -243,6 +243,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; + if (!user_mode(regs) && addr < TASK_SIZE && + unlikely(!(regs->status & SR_SUM))) + die_kernel_fault("access to user memory without uaccess routines", + addr, regs); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); if (cause == EXC_STORE_PAGE_FAULT) -- cgit From d5805af9fe9ffe4a9d975e9bc39496f57a161076 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Sun, 13 Dec 2020 22:50:37 +0900 Subject: riscv: Fix builtin DTB handling All SiPeed K210 MAIX boards have the exact same vendor, arch and implementation IDs, preventing differentiation to select the correct device tree to use through the SOC_BUILTIN_DTB_DECLARE() macro. This result in this macro to be useless and mandates changing the code of the sysctl driver to change the builtin device tree suitable for the target board. Fix this problem by removing the SOC_BUILTIN_DTB_DECLARE() macro since it is used only for the K210 support. The code searching the builtin DTBs using the vendor, arch an implementation IDs is also removed. Support for builtin DTB falls back to the simpler and more traditional handling of builtin DTB using the CONFIG_BUILTIN_DTB option, similarly to other architectures. Signed-off-by: Damien Le Moal Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig.socs | 19 +++++++++++++----- arch/riscv/boot/dts/Makefile | 2 +- arch/riscv/boot/dts/kendryte/Makefile | 5 +++-- arch/riscv/include/asm/soc.h | 38 ----------------------------------- arch/riscv/kernel/soc.c | 27 ------------------------- arch/riscv/mm/init.c | 6 +----- drivers/soc/kendryte/k210-sysctl.c | 12 ----------- 7 files changed, 19 insertions(+), 90 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index 3284d5c291be..59000675cb9f 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -32,9 +32,7 @@ config SOC_KENDRYTE help This enables support for Kendryte K210 SoC platform hardware. -config SOC_KENDRYTE_K210_DTB - def_bool y - depends on SOC_KENDRYTE_K210_DTB_BUILTIN +if SOC_KENDRYTE config SOC_KENDRYTE_K210_DTB_BUILTIN bool "Builtin device tree for the Kendryte K210" @@ -42,10 +40,21 @@ config SOC_KENDRYTE_K210_DTB_BUILTIN default y select OF select BUILTIN_DTB - select SOC_KENDRYTE_K210_DTB help - Builds a device tree for the Kendryte K210 into the Linux image. + Build a device tree for the Kendryte K210 into the Linux image. This option should be selected if no bootloader is being used. If unsure, say Y. +config SOC_KENDRYTE_K210_DTB_SOURCE + string "Source file for the Kendryte K210 builtin DTB" + depends on SOC_KENDRYTE + depends on SOC_KENDRYTE_K210_DTB_BUILTIN + default "k210" + help + Base name (without suffix, relative to arch/riscv/boot/dts/kendryte) + for the DTS file that will be used to produce the DTB linked into the + kernel. + +endif + endmenu diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile index ca1f8cbd78c0..21e3905f1c44 100644 --- a/arch/riscv/boot/dts/Makefile +++ b/arch/riscv/boot/dts/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 subdir-y += sifive -subdir-y += kendryte +subdir-$(CONFIG_SOC_KENDRYTE) += kendryte obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y)) diff --git a/arch/riscv/boot/dts/kendryte/Makefile b/arch/riscv/boot/dts/kendryte/Makefile index 1a88e616f18e..83636693166d 100644 --- a/arch/riscv/boot/dts/kendryte/Makefile +++ b/arch/riscv/boot/dts/kendryte/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -dtb-$(CONFIG_SOC_KENDRYTE_K210_DTB) += k210.dtb - +ifneq ($(CONFIG_SOC_KENDRYTE_K210_DTB_SOURCE),"") +dtb-y += $(strip $(shell echo $(CONFIG_SOC_KENDRYTE_K210_DTB_SOURCE))).dtb obj-$(CONFIG_SOC_KENDRYTE_K210_DTB_BUILTIN) += $(addsuffix .o, $(dtb-y)) +endif diff --git a/arch/riscv/include/asm/soc.h b/arch/riscv/include/asm/soc.h index 6c8363b1f327..f494066051a2 100644 --- a/arch/riscv/include/asm/soc.h +++ b/arch/riscv/include/asm/soc.h @@ -21,42 +21,4 @@ void soc_early_init(void); extern unsigned long __soc_early_init_table_start; extern unsigned long __soc_early_init_table_end; -/* - * Allows Linux to provide a device tree, which is necessary for SOCs that - * don't provide a useful one on their own. - */ -struct soc_builtin_dtb { - unsigned long vendor_id; - unsigned long arch_id; - unsigned long imp_id; - void *(*dtb_func)(void); -}; - -/* - * The argument name must specify a valid DTS file name without the dts - * extension. - */ -#define SOC_BUILTIN_DTB_DECLARE(name, vendor, arch, impl) \ - extern void *__dtb_##name##_begin; \ - \ - static __init __used \ - void *__soc_builtin_dtb_f__##name(void) \ - { \ - return (void *)&__dtb_##name##_begin; \ - } \ - \ - static const struct soc_builtin_dtb __soc_builtin_dtb__##name \ - __used __section("__soc_builtin_dtb_table") = \ - { \ - .vendor_id = vendor, \ - .arch_id = arch, \ - .imp_id = impl, \ - .dtb_func = __soc_builtin_dtb_f__##name, \ - } - -extern unsigned long __soc_builtin_dtb_table_start; -extern unsigned long __soc_builtin_dtb_table_end; - -void *soc_lookup_builtin_dtb(void); - #endif diff --git a/arch/riscv/kernel/soc.c b/arch/riscv/kernel/soc.c index c7b0a73e382e..a0516172a33c 100644 --- a/arch/riscv/kernel/soc.c +++ b/arch/riscv/kernel/soc.c @@ -26,30 +26,3 @@ void __init soc_early_init(void) } } } - -static bool soc_builtin_dtb_match(unsigned long vendor_id, - unsigned long arch_id, unsigned long imp_id, - const struct soc_builtin_dtb *entry) -{ - return entry->vendor_id == vendor_id && - entry->arch_id == arch_id && - entry->imp_id == imp_id; -} - -void * __init soc_lookup_builtin_dtb(void) -{ - unsigned long vendor_id, arch_id, imp_id; - const struct soc_builtin_dtb *s; - - __asm__ ("csrr %0, mvendorid" : "=r"(vendor_id)); - __asm__ ("csrr %0, marchid" : "=r"(arch_id)); - __asm__ ("csrr %0, mimpid" : "=r"(imp_id)); - - for (s = (void *)&__soc_builtin_dtb_table_start; - (void *)s < (void *)&__soc_builtin_dtb_table_end; s++) { - if (soc_builtin_dtb_match(vendor_id, arch_id, imp_id, s)) - return s->dtb_func(); - } - - return NULL; -} diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index bf5379135e39..77bd23f47a72 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -605,11 +605,7 @@ static void __init setup_vm_final(void) asmlinkage void __init setup_vm(uintptr_t dtb_pa) { #ifdef CONFIG_BUILTIN_DTB - dtb_early_va = soc_lookup_builtin_dtb(); - if (!dtb_early_va) { - /* Fallback to first available DTS */ - dtb_early_va = (void *) __dtb_start; - } + dtb_early_va = (void *) __dtb_start; #else dtb_early_va = (void *)dtb_pa; #endif diff --git a/drivers/soc/kendryte/k210-sysctl.c b/drivers/soc/kendryte/k210-sysctl.c index 707019223dd8..4608fbca20e1 100644 --- a/drivers/soc/kendryte/k210-sysctl.c +++ b/drivers/soc/kendryte/k210-sysctl.c @@ -246,15 +246,3 @@ static void __init k210_soc_early_init(const void *fdt) iounmap(regs); } SOC_EARLY_INIT_DECLARE(generic_k210, "kendryte,k210", k210_soc_early_init); - -#ifdef CONFIG_SOC_KENDRYTE_K210_DTB_BUILTIN -/* - * Generic entry for the default k210.dtb embedded DTB for boards with: - * - Vendor ID: 0x4B5 - * - Arch ID: 0xE59889E6A5A04149 (= "Canaan AI" in UTF-8 encoded Chinese) - * - Impl ID: 0x4D41495832303030 (= "MAIX2000") - * These values are reported by the SiPEED MAXDUINO, SiPEED MAIX GO and - * SiPEED Dan dock boards. - */ -SOC_BUILTIN_DTB_DECLARE(k210, 0x4B5, 0xE59889E6A5A04149, 0x4D41495832303030); -#endif -- cgit From cbd34f4bb37d62d8a027f54205bff07e73340da4 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Wed, 18 Nov 2020 16:38:27 -0800 Subject: riscv: Separate memory init from paging init Currently, we perform some memory init functions in paging init. But, that will be an issue for NUMA support where DT needs to be flattened before numa initialization and memblock_present can only be called after numa initialization. Move memory initialization related functions to a separate function. Signed-off-by: Atish Patra Reviewed-by: Greentime Hu Reviewed-by: Anup Patel Reviewed-by: Palmer Dabbelt Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/pgtable.h | 1 + arch/riscv/kernel/setup.c | 1 + arch/riscv/mm/init.c | 6 +++++- 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 41a72861987c..4dd56ea4a687 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -469,6 +469,7 @@ extern void *dtb_early_va; extern uintptr_t dtb_early_pa; void setup_bootmem(void); void paging_init(void); +void misc_mem_init(void); #define FIRST_USER_ADDRESS 0 diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index a43a954ef529..9cd81d4df031 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -250,6 +250,7 @@ void __init setup_arch(char **cmdline_p) else pr_err("No DTB found in kernel mappings\n"); #endif + misc_mem_init(); sbi_init(); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 77bd23f47a72..62716b43660b 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -649,8 +649,12 @@ void mark_rodata_ro(void) void __init paging_init(void) { setup_vm_final(); - sparse_init(); setup_zero_page(); +} + +void __init misc_mem_init(void) +{ + sparse_init(); zone_sizes_init(); } -- cgit From 4f0e8eef772ee4438f304b2178bc28c958b6c13d Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Wed, 18 Nov 2020 16:38:29 -0800 Subject: riscv: Add numa support for riscv64 platform Use the generic numa implementation to add NUMA support for RISC-V. This is based on Greentime's patch[1] but modified to use generic NUMA implementation and few more fixes. [1] https://lkml.org/lkml/2020/1/10/233 Co-developed-by: Greentime Hu Signed-off-by: Greentime Hu Signed-off-by: Atish Patra Reviewed-by: Anup Patel Reviewed-by: Palmer Dabbelt Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 31 ++++++++++++++++++++++++++++++- arch/riscv/include/asm/mmzone.h | 13 +++++++++++++ arch/riscv/include/asm/numa.h | 8 ++++++++ arch/riscv/include/asm/pci.h | 14 ++++++++++++++ arch/riscv/kernel/setup.c | 10 ++++++++-- arch/riscv/kernel/smpboot.c | 12 +++++++++++- arch/riscv/mm/init.c | 4 +++- 7 files changed, 87 insertions(+), 5 deletions(-) create mode 100644 arch/riscv/include/asm/mmzone.h create mode 100644 arch/riscv/include/asm/numa.h (limited to 'arch/riscv/mm') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 81b76d44725d..2ef05ef921b5 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -143,7 +143,7 @@ config PAGE_OFFSET default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB config ARCH_FLATMEM_ENABLE - def_bool y + def_bool !NUMA config ARCH_SPARSEMEM_ENABLE def_bool y @@ -298,6 +298,35 @@ config TUNE_GENERIC endchoice +# Common NUMA Features +config NUMA + bool "NUMA Memory Allocation and Scheduler Support" + select GENERIC_ARCH_NUMA + select OF_NUMA + select ARCH_SUPPORTS_NUMA_BALANCING + help + Enable NUMA (Non-Uniform Memory Access) support. + + The kernel will try to allocate memory used by a CPU on the + local memory of the CPU and add some more NUMA awareness to the kernel. + +config NODES_SHIFT + int "Maximum NUMA Nodes (as a power of 2)" + range 1 10 + default "2" + depends on NEED_MULTIPLE_NODES + help + Specify the maximum number of NUMA Nodes available on the target + system. Increases memory reserved to accommodate various tables. + +config USE_PERCPU_NUMA_NODE_ID + def_bool y + depends on NUMA + +config NEED_PER_CPU_EMBED_FIRST_CHUNK + def_bool y + depends on NUMA + config RISCV_ISA_C bool "Emit compressed instructions when building Linux" default y diff --git a/arch/riscv/include/asm/mmzone.h b/arch/riscv/include/asm/mmzone.h new file mode 100644 index 000000000000..fa17e01d9ab2 --- /dev/null +++ b/arch/riscv/include/asm/mmzone.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MMZONE_H +#define __ASM_MMZONE_H + +#ifdef CONFIG_NUMA + +#include + +extern struct pglist_data *node_data[]; +#define NODE_DATA(nid) (node_data[(nid)]) + +#endif /* CONFIG_NUMA */ +#endif /* __ASM_MMZONE_H */ diff --git a/arch/riscv/include/asm/numa.h b/arch/riscv/include/asm/numa.h new file mode 100644 index 000000000000..8c8cf4297cc3 --- /dev/null +++ b/arch/riscv/include/asm/numa.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_NUMA_H +#define __ASM_NUMA_H + +#include +#include + +#endif /* __ASM_NUMA_H */ diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h index 1c473a1bd986..658e112c3ce7 100644 --- a/arch/riscv/include/asm/pci.h +++ b/arch/riscv/include/asm/pci.h @@ -32,6 +32,20 @@ static inline int pci_proc_domain(struct pci_bus *bus) /* always show the domain in /proc */ return 1; } + +#ifdef CONFIG_NUMA + +static inline int pcibus_to_node(struct pci_bus *bus) +{ + return dev_to_node(&bus->dev); +} +#ifndef cpumask_of_pcibus +#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ + cpu_all_mask : \ + cpumask_of_node(pcibus_to_node(bus))) +#endif +#endif /* CONFIG_NUMA */ + #endif /* CONFIG_PCI */ #endif /* _ASM_RISCV_PCI_H */ diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 9cd81d4df031..90a4e06886a6 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -273,13 +273,19 @@ void __init setup_arch(char **cmdline_p) static int __init topology_init(void) { - int i; + int i, ret; + + for_each_online_node(i) + register_one_node(i); for_each_possible_cpu(i) { struct cpu *cpu = &per_cpu(cpu_devices, i); cpu->hotpluggable = cpu_has_hotplug(i); - register_cpu(cpu, i); + ret = register_cpu(cpu, i); + if (unlikely(ret)) + pr_warn("Warning: %s: register_cpu %d failed (%d)\n", + __func__, i, ret); } return 0; diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 96167d55ed98..5e276c25646f 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -45,13 +46,18 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { int cpuid; int ret; + unsigned int curr_cpuid; + + curr_cpuid = smp_processor_id(); + numa_store_cpu_info(curr_cpuid); + numa_add_cpu(curr_cpuid); /* This covers non-smp usecase mandated by "nosmp" option */ if (max_cpus == 0) return; for_each_possible_cpu(cpuid) { - if (cpuid == smp_processor_id()) + if (cpuid == curr_cpuid) continue; if (cpu_ops[cpuid]->cpu_prepare) { ret = cpu_ops[cpuid]->cpu_prepare(cpuid); @@ -59,6 +65,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) continue; } set_cpu_present(cpuid, true); + numa_store_cpu_info(cpuid); } } @@ -79,6 +86,7 @@ void __init setup_smp(void) if (hart == cpuid_to_hartid_map(0)) { BUG_ON(found_boot_cpu); found_boot_cpu = 1; + early_map_cpu_to_node(0, of_node_to_nid(dn)); continue; } if (cpuid >= NR_CPUS) { @@ -88,6 +96,7 @@ void __init setup_smp(void) } cpuid_to_hartid_map(cpuid) = hart; + early_map_cpu_to_node(cpuid, of_node_to_nid(dn)); cpuid++; } @@ -153,6 +162,7 @@ asmlinkage __visible void smp_callin(void) current->active_mm = mm; notify_cpu_starting(curr_cpuid); + numa_add_cpu(curr_cpuid); update_siblings_masks(curr_cpuid); set_cpu_online(curr_cpuid, 1); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 62716b43660b..30b61f2c6b87 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "../kernel/head.h" @@ -199,7 +200,6 @@ void __init setup_bootmem(void) early_init_fdt_scan_reserved_mem(); dma_contiguous_reserve(dma32_phys_limit); memblock_allow_resize(); - memblock_dump_all(); } #ifdef CONFIG_MMU @@ -654,8 +654,10 @@ void __init paging_init(void) void __init misc_mem_init(void) { + arch_numa_init(); sparse_init(); zone_sizes_init(); + memblock_dump_all(); } #ifdef CONFIG_SPARSEMEM_VMEMMAP -- cgit From 67d945778099b14324811fe67c5aff2cda7a7ad5 Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Thu, 17 Dec 2020 16:01:39 +0000 Subject: riscv: Fixup wrong ftrace remove cflag We must use $(CC_FLAGS_FTRACE) instead of directly using -pg. It will cause -fpatchable-function-entry error. Signed-off-by: Guo Ren Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/Makefile | 4 ++-- arch/riscv/mm/Makefile | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index f6caf4d9ca15..22c66dde18a0 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -4,8 +4,8 @@ # ifdef CONFIG_FTRACE -CFLAGS_REMOVE_ftrace.o = -pg -CFLAGS_REMOVE_patch.o = -pg +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE) endif extra-y += head.o diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index c0185e556ca5..6b4b7ec1bda2 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -2,7 +2,7 @@ CFLAGS_init.o := -mcmodel=medany ifdef CONFIG_FTRACE -CFLAGS_REMOVE_init.o = -pg +CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE) endif KCOV_INSTRUMENT_init.o := n -- cgit From 5ad84adf5456313e285734102367c861c436c5ed Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Thu, 17 Dec 2020 16:01:40 +0000 Subject: riscv: Fixup patch_text panic in ftrace Just like arm64, we can't trace the function in the patch_text path. Here is the bug log: [ 45.234334] Unable to handle kernel paging request at virtual address ffffffd38ae80900 [ 45.242313] Oops [#1] [ 45.244600] Modules linked in: [ 45.247678] CPU: 0 PID: 11 Comm: migration/0 Not tainted 5.9.0-00025-g9b7db83-dirty #215 [ 45.255797] epc: ffffffe00021689a ra : ffffffe00021718e sp : ffffffe01afabb58 [ 45.262955] gp : ffffffe00136afa0 tp : ffffffe01af94d00 t0 : 0000000000000002 [ 45.270200] t1 : 0000000000000000 t2 : 0000000000000001 s0 : ffffffe01afabc08 [ 45.277443] s1 : ffffffe0013718a8 a0 : 0000000000000000 a1 : ffffffe01afabba8 [ 45.284686] a2 : 0000000000000000 a3 : 0000000000000000 a4 : c4c16ad38ae80900 [ 45.291929] a5 : 0000000000000000 a6 : 0000000000000000 a7 : 0000000052464e43 [ 45.299173] s2 : 0000000000000001 s3 : ffffffe000206a60 s4 : ffffffe000206a60 [ 45.306415] s5 : 00000000000009ec s6 : ffffffe0013718a8 s7 : c4c16ad38ae80900 [ 45.313658] s8 : 0000000000000004 s9 : 0000000000000001 s10: 0000000000000001 [ 45.320902] s11: 0000000000000003 t3 : 0000000000000001 t4 : ffffffffd192fe79 [ 45.328144] t5 : ffffffffb8f80000 t6 : 0000000000040000 [ 45.333472] status: 0000000200000100 badaddr: ffffffd38ae80900 cause: 000000000000000f [ 45.341514] ---[ end trace d95102172248fdcf ]--- [ 45.346176] note: migration/0[11] exited with preempt_count 1 (gdb) x /2i $pc => 0xffffffe00021689a <__do_proc_dointvec+196>: sd zero,0(s7) 0xffffffe00021689e <__do_proc_dointvec+200>: li s11,0 (gdb) bt 0 __do_proc_dointvec (tbl_data=0x0, table=0xffffffe01afabba8, write=0, buffer=0x0, lenp=0x7bf897061f9a0800, ppos=0x4, conv=0x0, data=0x52464e43) at kernel/sysctl.c:581 1 0xffffffe00021718e in do_proc_dointvec (data=, conv=, ppos=, lenp=, buffer=, write=, table=) at kernel/sysctl.c:964 2 proc_dointvec_minmax (ppos=, lenp=, buffer=, write=, table=) at kernel/sysctl.c:964 3 proc_do_static_key (table=, write=1, buffer=0x0, lenp=0x0, ppos=0x7bf897061f9a0800) at kernel/sysctl.c:1643 4 0xffffffe000206792 in ftrace_make_call (rec=, addr=) at arch/riscv/kernel/ftrace.c:109 5 0xffffffe0002c9c04 in __ftrace_replace_code (rec=0xffffffe01ae40c30, enable=3) at kernel/trace/ftrace.c:2503 6 0xffffffe0002ca0b2 in ftrace_replace_code (mod_flags=) at kernel/trace/ftrace.c:2530 7 0xffffffe0002ca26a in ftrace_modify_all_code (command=5) at kernel/trace/ftrace.c:2677 8 0xffffffe0002ca30e in __ftrace_modify_code (data=) at kernel/trace/ftrace.c:2703 9 0xffffffe0002c13b0 in multi_cpu_stop (data=0x0) at kernel/stop_machine.c:224 10 0xffffffe0002c0fde in cpu_stopper_thread (cpu=) at kernel/stop_machine.c:491 11 0xffffffe0002343de in smpboot_thread_fn (data=0x0) at kernel/smpboot.c:165 12 0xffffffe00022f8b4 in kthread (_create=0xffffffe01af0c040) at kernel/kthread.c:292 13 0xffffffe000201fac in handle_exception () at arch/riscv/kernel/entry.S:236 0xffffffe00020678a <+114>: auipc ra,0xffffe 0xffffffe00020678e <+118>: jalr -118(ra) # 0xffffffe000204714 0xffffffe000206792 <+122>: snez a0,a0 (gdb) disassemble patch_text_nosync Dump of assembler code for function patch_text_nosync: 0xffffffe000204714 <+0>: addi sp,sp,-32 0xffffffe000204716 <+2>: sd s0,16(sp) 0xffffffe000204718 <+4>: sd ra,24(sp) 0xffffffe00020471a <+6>: addi s0,sp,32 0xffffffe00020471c <+8>: auipc ra,0x0 0xffffffe000204720 <+12>: jalr -384(ra) # 0xffffffe00020459c 0xffffffe000204724 <+16>: beqz a0,0xffffffe00020472e 0xffffffe000204726 <+18>: ld ra,24(sp) 0xffffffe000204728 <+20>: ld s0,16(sp) 0xffffffe00020472a <+22>: addi sp,sp,32 0xffffffe00020472c <+24>: ret 0xffffffe00020472e <+26>: sd a0,-24(s0) 0xffffffe000204732 <+30>: auipc ra,0x4 0xffffffe000204736 <+34>: jalr -1464(ra) # 0xffffffe00020817a 0xffffffe00020473a <+38>: ld a0,-24(s0) 0xffffffe00020473e <+42>: ld ra,24(sp) 0xffffffe000204740 <+44>: ld s0,16(sp) 0xffffffe000204742 <+46>: addi sp,sp,32 0xffffffe000204744 <+48>: ret (gdb) disassemble flush_icache_all-4 Dump of assembler code for function flush_icache_all: 0xffffffe00020817a <+0>: addi sp,sp,-8 0xffffffe00020817c <+2>: sd ra,0(sp) 0xffffffe00020817e <+4>: auipc ra,0xfffff 0xffffffe000208182 <+8>: jalr -1822(ra) # 0xffffffe000206a60 0xffffffe000208186 <+12>: ld ra,0(sp) 0xffffffe000208188 <+14>: addi sp,sp,8 0xffffffe00020818a <+0>: addi sp,sp,-16 0xffffffe00020818c <+2>: sd s0,0(sp) 0xffffffe00020818e <+4>: sd ra,8(sp) 0xffffffe000208190 <+6>: addi s0,sp,16 0xffffffe000208192 <+8>: li a0,0 0xffffffe000208194 <+10>: auipc ra,0xfffff 0xffffffe000208198 <+14>: jalr -410(ra) # 0xffffffe000206ffa 0xffffffe00020819c <+18>: ld s0,0(sp) 0xffffffe00020819e <+20>: ld ra,8(sp) 0xffffffe0002081a0 <+22>: addi sp,sp,16 0xffffffe0002081a2 <+24>: ret (gdb) frame 5 (rec=0xffffffe01ae40c30, enable=3) at kernel/trace/ftrace.c:2503 2503 return ftrace_make_call(rec, ftrace_addr); (gdb) p /x rec->ip $2 = 0xffffffe00020817a -> flush_icache_all ! When we modified flush_icache_all's patchable-entry with ftrace_caller: - Insert ftrace_caller at flush_icache_all prologue. - Call flush_icache_all to sync I/Dcache, but flush_icache_all is just we modified by half. Link: https://lore.kernel.org/linux-riscv/CAJF2gTT=oDWesWe0JVWvTpGi60-gpbNhYLdFWN_5EbyeqoEDdw@mail.gmail.com/T/#t Signed-off-by: Guo Ren Reviewed-by: Atish Patra Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/Makefile | 1 + arch/riscv/mm/Makefile | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 22c66dde18a0..4eee315d7954 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -6,6 +6,7 @@ ifdef CONFIG_FTRACE CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE) endif extra-y += head.o diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 6b4b7ec1bda2..7ebaef10ea1b 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -3,6 +3,7 @@ CFLAGS_init.o := -mcmodel=medany ifdef CONFIG_FTRACE CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_cacheflush.o = $(CC_FLAGS_FTRACE) endif KCOV_INSTRUMENT_init.o := n -- cgit From c22b0bcb1dd024cb9caad9230e3a387d8b061df5 Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Thu, 17 Dec 2020 16:01:42 +0000 Subject: riscv: Add kprobes supported MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch enables "kprobe & kretprobe" to work with ftrace interface. It utilized software breakpoint as single-step mechanism. Some instructions which can't be single-step executed must be simulated in kernel execution slot, such as: branch, jal, auipc, la ... Some instructions should be rejected for probing and we use a blacklist to filter, such as: ecall, ebreak, ... We use ebreak & c.ebreak to replace origin instruction and the kprobe handler prepares an executable memory slot for out-of-line execution with a copy of the original instruction being probed. In execution slot we add ebreak behind original instruction to simulate a single-setp mechanism. The patch is based on packi's work [1] and csky's work [2]. - The kprobes_trampoline.S is all from packi's patch - The single-step mechanism is new designed for riscv without hw single-step trap - The simulation codes are from csky - Frankly, all codes refer to other archs' implementation [1] https://lore.kernel.org/linux-riscv/20181113195804.22825-1-me@packi.ch/ [2] https://lore.kernel.org/linux-csky/20200403044150.20562-9-guoren@kernel.org/ Signed-off-by: Guo Ren Co-developed-by: Patrick Stählin Signed-off-by: Patrick Stählin Acked-by: Masami Hiramatsu Tested-by: Zong Li Reviewed-by: Pekka Enberg Cc: Patrick Stählin Cc: Palmer Dabbelt Cc: Björn Töpel Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 2 + arch/riscv/include/asm/kprobes.h | 40 +++ arch/riscv/include/asm/probes.h | 24 ++ arch/riscv/kernel/Makefile | 1 + arch/riscv/kernel/probes/Makefile | 4 + arch/riscv/kernel/probes/decode-insn.c | 48 ++++ arch/riscv/kernel/probes/decode-insn.h | 18 ++ arch/riscv/kernel/probes/kprobes.c | 398 ++++++++++++++++++++++++++ arch/riscv/kernel/probes/kprobes_trampoline.S | 93 ++++++ arch/riscv/kernel/probes/simulate-insn.c | 85 ++++++ arch/riscv/kernel/probes/simulate-insn.h | 47 +++ arch/riscv/kernel/traps.c | 9 + arch/riscv/mm/fault.c | 4 + 13 files changed, 773 insertions(+) create mode 100644 arch/riscv/include/asm/probes.h create mode 100644 arch/riscv/kernel/probes/Makefile create mode 100644 arch/riscv/kernel/probes/decode-insn.c create mode 100644 arch/riscv/kernel/probes/decode-insn.h create mode 100644 arch/riscv/kernel/probes/kprobes.c create mode 100644 arch/riscv/kernel/probes/kprobes_trampoline.S create mode 100644 arch/riscv/kernel/probes/simulate-insn.c create mode 100644 arch/riscv/kernel/probes/simulate-insn.h (limited to 'arch/riscv/mm') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 1027384e2632..375f6f4d3038 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -71,6 +71,8 @@ config RISCV select HAVE_GCC_PLUGINS select HAVE_GENERIC_VDSO if MMU && 64BIT select HAVE_IRQ_TIME_ACCOUNTING + select HAVE_KPROBES + select HAVE_KRETPROBES select HAVE_PCI select HAVE_PERF_EVENTS select HAVE_PERF_REGS diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h index 56a98ea30731..4647d38018f6 100644 --- a/arch/riscv/include/asm/kprobes.h +++ b/arch/riscv/include/asm/kprobes.h @@ -11,4 +11,44 @@ #include +#ifdef CONFIG_KPROBES +#include +#include +#include + +#define __ARCH_WANT_KPROBES_INSN_SLOT +#define MAX_INSN_SIZE 2 + +#define flush_insn_slot(p) do { } while (0) +#define kretprobe_blacklist_size 0 + +#include + +struct prev_kprobe { + struct kprobe *kp; + unsigned int status; +}; + +/* Single step context for kprobe */ +struct kprobe_step_ctx { + unsigned long ss_pending; + unsigned long match_addr; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned int kprobe_status; + unsigned long saved_status; + struct prev_kprobe prev_kprobe; + struct kprobe_step_ctx ss_ctx; +}; + +void arch_remove_kprobe(struct kprobe *p); +int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); +bool kprobe_breakpoint_handler(struct pt_regs *regs); +bool kprobe_single_step_handler(struct pt_regs *regs); +void kretprobe_trampoline(void); +void __kprobes *trampoline_probe_handler(struct pt_regs *regs); + +#endif /* CONFIG_KPROBES */ #endif /* _ASM_RISCV_KPROBES_H */ diff --git a/arch/riscv/include/asm/probes.h b/arch/riscv/include/asm/probes.h new file mode 100644 index 000000000000..a787e6d537b9 --- /dev/null +++ b/arch/riscv/include/asm/probes.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_PROBES_H +#define _ASM_RISCV_PROBES_H + +typedef u32 probe_opcode_t; +typedef bool (probes_handler_t) (u32 opcode, unsigned long addr, struct pt_regs *); + +/* architecture specific copy of original instruction */ +struct arch_probe_insn { + probe_opcode_t *insn; + probes_handler_t *handler; + /* restore address after simulation */ + unsigned long restore; +}; + +#ifdef CONFIG_KPROBES +typedef u32 kprobe_opcode_t; +struct arch_specific_insn { + struct arch_probe_insn api; +}; +#endif + +#endif /* _ASM_RISCV_PROBES_H */ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 4eee315d7954..3dc0abde988a 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -30,6 +30,7 @@ obj-y += riscv_ksyms.o obj-y += stacktrace.o obj-y += cacheinfo.o obj-y += patch.o +obj-y += probes/ obj-$(CONFIG_MMU) += vdso.o vdso/ obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o diff --git a/arch/riscv/kernel/probes/Makefile b/arch/riscv/kernel/probes/Makefile new file mode 100644 index 000000000000..8a39507285a3 --- /dev/null +++ b/arch/riscv/kernel/probes/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o +obj-$(CONFIG_KPROBES) += kprobes_trampoline.o +CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE) diff --git a/arch/riscv/kernel/probes/decode-insn.c b/arch/riscv/kernel/probes/decode-insn.c new file mode 100644 index 000000000000..0876c304ca77 --- /dev/null +++ b/arch/riscv/kernel/probes/decode-insn.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include +#include +#include +#include + +#include "decode-insn.h" +#include "simulate-insn.h" + +/* Return: + * INSN_REJECTED If instruction is one not allowed to kprobe, + * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. + */ +enum probe_insn __kprobes +riscv_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api) +{ + probe_opcode_t insn = le32_to_cpu(*addr); + + /* + * Reject instructions list: + */ + RISCV_INSN_REJECTED(system, insn); + RISCV_INSN_REJECTED(fence, insn); + + /* + * Simulate instructions list: + * TODO: the REJECTED ones below need to be implemented + */ +#ifdef CONFIG_RISCV_ISA_C + RISCV_INSN_REJECTED(c_j, insn); + RISCV_INSN_REJECTED(c_jr, insn); + RISCV_INSN_REJECTED(c_jal, insn); + RISCV_INSN_REJECTED(c_jalr, insn); + RISCV_INSN_REJECTED(c_beqz, insn); + RISCV_INSN_REJECTED(c_bnez, insn); + RISCV_INSN_REJECTED(c_ebreak, insn); +#endif + + RISCV_INSN_REJECTED(auipc, insn); + RISCV_INSN_REJECTED(branch, insn); + + RISCV_INSN_SET_SIMULATE(jal, insn); + RISCV_INSN_SET_SIMULATE(jalr, insn); + + return INSN_GOOD; +} diff --git a/arch/riscv/kernel/probes/decode-insn.h b/arch/riscv/kernel/probes/decode-insn.h new file mode 100644 index 000000000000..42269a7d676d --- /dev/null +++ b/arch/riscv/kernel/probes/decode-insn.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _RISCV_KERNEL_KPROBES_DECODE_INSN_H +#define _RISCV_KERNEL_KPROBES_DECODE_INSN_H + +#include +#include + +enum probe_insn { + INSN_REJECTED, + INSN_GOOD_NO_SLOT, + INSN_GOOD, +}; + +enum probe_insn __kprobes +riscv_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi); + +#endif /* _RISCV_KERNEL_KPROBES_DECODE_INSN_H */ diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c new file mode 100644 index 000000000000..e60893bd87db --- /dev/null +++ b/arch/riscv/kernel/probes/kprobes.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "decode-insn.h" + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +static void __kprobes +post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); + +static void __kprobes arch_prepare_ss_slot(struct kprobe *p) +{ + unsigned long offset = GET_INSN_LENGTH(p->opcode); + + p->ainsn.api.restore = (unsigned long)p->addr + offset; + + patch_text(p->ainsn.api.insn, p->opcode); + patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset), + __BUG_INSN_32); +} + +static void __kprobes arch_prepare_simulate(struct kprobe *p) +{ + p->ainsn.api.restore = 0; +} + +static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (p->ainsn.api.handler) + p->ainsn.api.handler((u32)p->opcode, + (unsigned long)p->addr, regs); + + post_kprobe_handler(kcb, regs); +} + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + unsigned long probe_addr = (unsigned long)p->addr; + + if (probe_addr & 0x1) { + pr_warn("Address not aligned.\n"); + + return -EINVAL; + } + + /* copy instruction */ + p->opcode = le32_to_cpu(*p->addr); + + /* decode instruction */ + switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) { + case INSN_REJECTED: /* insn not supported */ + return -EINVAL; + + case INSN_GOOD_NO_SLOT: /* insn need simulation */ + p->ainsn.api.insn = NULL; + break; + + case INSN_GOOD: /* instruction uses slot */ + p->ainsn.api.insn = get_insn_slot(); + if (!p->ainsn.api.insn) + return -ENOMEM; + break; + } + + /* prepare the instruction */ + if (p->ainsn.api.insn) + arch_prepare_ss_slot(p); + else + arch_prepare_simulate(p); + + return 0; +} + +/* install breakpoint in text */ +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + if ((p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) + patch_text(p->addr, __BUG_INSN_32); + else + patch_text(p->addr, __BUG_INSN_16); +} + +/* remove breakpoint from text */ +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + patch_text(p->addr, p->opcode); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ +} + +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + +/* + * Interrupts need to be disabled before single-step mode is set, and not + * reenabled until after single-step mode ends. + * Without disabling interrupt on local CPU, there is a chance of + * interrupt occurrence in the period of exception return and start of + * out-of-line single-step, that result in wrongly single stepping + * into the interrupt handler. + */ +static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + kcb->saved_status = regs->status; + regs->status &= ~SR_SPIE; +} + +static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + regs->status = kcb->saved_status; +} + +static void __kprobes +set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p) +{ + unsigned long offset = GET_INSN_LENGTH(p->opcode); + + kcb->ss_ctx.ss_pending = true; + kcb->ss_ctx.match_addr = addr + offset; +} + +static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) +{ + kcb->ss_ctx.ss_pending = false; + kcb->ss_ctx.match_addr = 0; +} + +static void __kprobes setup_singlestep(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) +{ + unsigned long slot; + + if (reenter) { + save_previous_kprobe(kcb); + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_REENTER; + } else { + kcb->kprobe_status = KPROBE_HIT_SS; + } + + if (p->ainsn.api.insn) { + /* prepare for single stepping */ + slot = (unsigned long)p->ainsn.api.insn; + + set_ss_context(kcb, slot, p); /* mark pending ss */ + + /* IRQs and single stepping do not mix well. */ + kprobes_save_local_irqflag(kcb, regs); + + instruction_pointer_set(regs, slot); + } else { + /* insn simulation */ + arch_simulate_insn(p, regs); + } +} + +static int __kprobes reenter_kprobe(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: + case KPROBE_HIT_ACTIVE: + kprobes_inc_nmissed_count(p); + setup_singlestep(p, regs, kcb, 1); + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: + pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); + BUG(); + break; + default: + WARN_ON(1); + return 0; + } + + return 1; +} + +static void __kprobes +post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + + if (!cur) + return; + + /* return addr restore if non-branching insn */ + if (cur->ainsn.api.restore != 0) + regs->epc = cur->ainsn.api.restore; + + /* restore back original saved kprobe variables and continue */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + return; + } + + /* call post handler */ + kcb->kprobe_status = KPROBE_HIT_SSDONE; + if (cur->post_handler) { + /* post_handler can hit breakpoint and single step + * again, so we enable D-flag for recursive exception. + */ + cur->post_handler(cur, regs, 0); + } + + reset_current_kprobe(); +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + switch (kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the ip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->epc = (unsigned long) cur->addr; + if (!instruction_pointer(regs)) + BUG(); + + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); + + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accounting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if (fixup_exception(regs)) + return 1; + } + return 0; +} + +bool __kprobes +kprobe_breakpoint_handler(struct pt_regs *regs) +{ + struct kprobe *p, *cur_kprobe; + struct kprobe_ctlblk *kcb; + unsigned long addr = instruction_pointer(regs); + + kcb = get_kprobe_ctlblk(); + cur_kprobe = kprobe_running(); + + p = get_kprobe((kprobe_opcode_t *) addr); + + if (p) { + if (cur_kprobe) { + if (reenter_kprobe(p, regs, kcb)) + return true; + } else { + /* Probe hit */ + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, it will + * modify the execution path and no need to single + * stepping. Let's just reset current kprobe and exit. + * + * pre_handler can hit a breakpoint and can step thru + * before return. + */ + if (!p->pre_handler || !p->pre_handler(p, regs)) + setup_singlestep(p, regs, kcb, 0); + else + reset_current_kprobe(); + } + return true; + } + + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + * Return back to original instruction, and continue. + */ + return false; +} + +bool __kprobes +kprobe_single_step_handler(struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if ((kcb->ss_ctx.ss_pending) + && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) { + clear_ss_context(kcb); /* clear pending ss */ + + kprobes_restore_local_irqflag(kcb, regs); + + post_kprobe_handler(kcb, regs); + return true; + } + return false; +} + +/* + * Provide a blacklist of symbols identifying ranges which cannot be kprobed. + * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). + */ +int __init arch_populate_kprobe_blacklist(void) +{ + int ret; + + ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, + (unsigned long)__irqentry_text_end); + return ret; +} + +void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) +{ + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); +} + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *)regs->ra; + ri->fp = NULL; + regs->ra = (unsigned long) &kretprobe_trampoline; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} + +int __init arch_init_kprobes(void) +{ + return 0; +} diff --git a/arch/riscv/kernel/probes/kprobes_trampoline.S b/arch/riscv/kernel/probes/kprobes_trampoline.S new file mode 100644 index 000000000000..6e85d021e2a2 --- /dev/null +++ b/arch/riscv/kernel/probes/kprobes_trampoline.S @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Author: Patrick Stählin + */ +#include + +#include +#include + + .text + .altmacro + + .macro save_all_base_regs + REG_S x1, PT_RA(sp) + REG_S x3, PT_GP(sp) + REG_S x4, PT_TP(sp) + REG_S x5, PT_T0(sp) + REG_S x6, PT_T1(sp) + REG_S x7, PT_T2(sp) + REG_S x8, PT_S0(sp) + REG_S x9, PT_S1(sp) + REG_S x10, PT_A0(sp) + REG_S x11, PT_A1(sp) + REG_S x12, PT_A2(sp) + REG_S x13, PT_A3(sp) + REG_S x14, PT_A4(sp) + REG_S x15, PT_A5(sp) + REG_S x16, PT_A6(sp) + REG_S x17, PT_A7(sp) + REG_S x18, PT_S2(sp) + REG_S x19, PT_S3(sp) + REG_S x20, PT_S4(sp) + REG_S x21, PT_S5(sp) + REG_S x22, PT_S6(sp) + REG_S x23, PT_S7(sp) + REG_S x24, PT_S8(sp) + REG_S x25, PT_S9(sp) + REG_S x26, PT_S10(sp) + REG_S x27, PT_S11(sp) + REG_S x28, PT_T3(sp) + REG_S x29, PT_T4(sp) + REG_S x30, PT_T5(sp) + REG_S x31, PT_T6(sp) + .endm + + .macro restore_all_base_regs + REG_L x3, PT_GP(sp) + REG_L x4, PT_TP(sp) + REG_L x5, PT_T0(sp) + REG_L x6, PT_T1(sp) + REG_L x7, PT_T2(sp) + REG_L x8, PT_S0(sp) + REG_L x9, PT_S1(sp) + REG_L x10, PT_A0(sp) + REG_L x11, PT_A1(sp) + REG_L x12, PT_A2(sp) + REG_L x13, PT_A3(sp) + REG_L x14, PT_A4(sp) + REG_L x15, PT_A5(sp) + REG_L x16, PT_A6(sp) + REG_L x17, PT_A7(sp) + REG_L x18, PT_S2(sp) + REG_L x19, PT_S3(sp) + REG_L x20, PT_S4(sp) + REG_L x21, PT_S5(sp) + REG_L x22, PT_S6(sp) + REG_L x23, PT_S7(sp) + REG_L x24, PT_S8(sp) + REG_L x25, PT_S9(sp) + REG_L x26, PT_S10(sp) + REG_L x27, PT_S11(sp) + REG_L x28, PT_T3(sp) + REG_L x29, PT_T4(sp) + REG_L x30, PT_T5(sp) + REG_L x31, PT_T6(sp) + .endm + +ENTRY(kretprobe_trampoline) + addi sp, sp, -(PT_SIZE_ON_STACK) + save_all_base_regs + + move a0, sp /* pt_regs */ + + call trampoline_probe_handler + + /* use the result as the return-address */ + move ra, a0 + + restore_all_base_regs + addi sp, sp, PT_SIZE_ON_STACK + + ret +ENDPROC(kretprobe_trampoline) diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c new file mode 100644 index 000000000000..2519ce26377d --- /dev/null +++ b/arch/riscv/kernel/probes/simulate-insn.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include +#include + +#include "decode-insn.h" +#include "simulate-insn.h" + +static inline bool rv_insn_reg_get_val(struct pt_regs *regs, u32 index, + unsigned long *ptr) +{ + if (index == 0) + *ptr = 0; + else if (index <= 31) + *ptr = *((unsigned long *)regs + index); + else + return false; + + return true; +} + +static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index, + unsigned long val) +{ + if (index == 0) + return false; + else if (index <= 31) + *((unsigned long *)regs + index) = val; + else + return false; + + return true; +} + +bool __kprobes simulate_jal(u32 opcode, unsigned long addr, struct pt_regs *regs) +{ + /* + * 31 30 21 20 19 12 11 7 6 0 + * imm [20] | imm[10:1] | imm[11] | imm[19:12] | rd | opcode + * 1 10 1 8 5 JAL/J + */ + bool ret; + u32 imm; + u32 index = (opcode >> 7) & 0x1f; + + ret = rv_insn_reg_set_val(regs, index, addr + 4); + if (!ret) + return ret; + + imm = ((opcode >> 21) & 0x3ff) << 1; + imm |= ((opcode >> 20) & 0x1) << 11; + imm |= ((opcode >> 12) & 0xff) << 12; + imm |= ((opcode >> 31) & 0x1) << 20; + + instruction_pointer_set(regs, addr + sign_extend32((imm), 20)); + + return ret; +} + +bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs) +{ + /* + * 31 20 19 15 14 12 11 7 6 0 + * offset[11:0] | rs1 | 010 | rd | opcode + * 12 5 3 5 JALR/JR + */ + bool ret; + unsigned long base_addr; + u32 imm = (opcode >> 20) & 0xfff; + u32 rd_index = (opcode >> 7) & 0x1f; + u32 rs1_index = (opcode >> 15) & 0x1f; + + ret = rv_insn_reg_set_val(regs, rd_index, addr + 4); + if (!ret) + return ret; + + ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr); + if (!ret) + return ret; + + instruction_pointer_set(regs, (base_addr + sign_extend32((imm), 11))&~1); + + return ret; +} diff --git a/arch/riscv/kernel/probes/simulate-insn.h b/arch/riscv/kernel/probes/simulate-insn.h new file mode 100644 index 000000000000..cb6ff7dccb92 --- /dev/null +++ b/arch/riscv/kernel/probes/simulate-insn.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _RISCV_KERNEL_PROBES_SIMULATE_INSN_H +#define _RISCV_KERNEL_PROBES_SIMULATE_INSN_H + +#define __RISCV_INSN_FUNCS(name, mask, val) \ +static __always_inline bool riscv_insn_is_##name(probe_opcode_t code) \ +{ \ + BUILD_BUG_ON(~(mask) & (val)); \ + return (code & (mask)) == (val); \ +} \ +bool simulate_##name(u32 opcode, unsigned long addr, \ + struct pt_regs *regs) + +#define RISCV_INSN_REJECTED(name, code) \ + do { \ + if (riscv_insn_is_##name(code)) { \ + return INSN_REJECTED; \ + } \ + } while (0) + +__RISCV_INSN_FUNCS(system, 0x7f, 0x73); +__RISCV_INSN_FUNCS(fence, 0x7f, 0x0f); + +#define RISCV_INSN_SET_SIMULATE(name, code) \ + do { \ + if (riscv_insn_is_##name(code)) { \ + api->handler = simulate_##name; \ + return INSN_GOOD_NO_SLOT; \ + } \ + } while (0) + +__RISCV_INSN_FUNCS(c_j, 0xe003, 0xa001); +__RISCV_INSN_FUNCS(c_jr, 0xf007, 0x8002); +__RISCV_INSN_FUNCS(c_jal, 0xe003, 0x2001); +__RISCV_INSN_FUNCS(c_jalr, 0xf007, 0x9002); +__RISCV_INSN_FUNCS(c_beqz, 0xe003, 0xc001); +__RISCV_INSN_FUNCS(c_bnez, 0xe003, 0xe001); +__RISCV_INSN_FUNCS(c_ebreak, 0xffff, 0x9002); + +__RISCV_INSN_FUNCS(auipc, 0x7f, 0x17); +__RISCV_INSN_FUNCS(branch, 0x7f, 0x63); + +__RISCV_INSN_FUNCS(jal, 0x7f, 0x6f); +__RISCV_INSN_FUNCS(jalr, 0x707f, 0x67); + +#endif /* _RISCV_KERNEL_PROBES_SIMULATE_INSN_H */ diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index ad14f4466d92..19a788a271f7 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -145,6 +146,14 @@ static inline unsigned long get_break_insn_length(unsigned long pc) asmlinkage __visible void do_trap_break(struct pt_regs *regs) { +#ifdef CONFIG_KPROBES + if (kprobe_single_step_handler(regs)) + return; + + if (kprobe_breakpoint_handler(regs)) + return; +#endif + if (user_mode(regs)) force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc); #ifdef CONFIG_KGDB diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 33d284188f9a..1da870f33582 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -213,6 +214,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs) tsk = current; mm = tsk->mm; + if (kprobe_page_fault(regs, cause)) + return; + /* * Fault-in kernel-space virtual memory on-demand. * The 'reference' page table is init_mm.pgd. -- cgit From 74784081aac8a0f3636965fc230e2d3b7cc123c6 Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Thu, 17 Dec 2020 16:01:44 +0000 Subject: riscv: Add uprobes supported This patch adds support for uprobes on riscv architecture. Just like kprobe, it support single-step and simulate instructions. Signed-off-by: Guo Ren Reviewed-by: Pekka Enberg Cc: Oleg Nesterov Cc: Masami Hiramatsu Cc: Palmer Dabbelt Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 3 + arch/riscv/include/asm/processor.h | 1 + arch/riscv/include/asm/thread_info.h | 4 +- arch/riscv/include/asm/uprobes.h | 40 ++++++++ arch/riscv/kernel/probes/Makefile | 1 + arch/riscv/kernel/probes/uprobes.c | 186 +++++++++++++++++++++++++++++++++++ arch/riscv/kernel/signal.c | 3 + arch/riscv/kernel/traps.c | 10 ++ arch/riscv/mm/fault.c | 6 ++ 9 files changed, 253 insertions(+), 1 deletion(-) create mode 100644 arch/riscv/include/asm/uprobes.h create mode 100644 arch/riscv/kernel/probes/uprobes.c (limited to 'arch/riscv/mm') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 07a48068567e..7b5bb48055e7 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -160,6 +160,9 @@ config ARCH_SELECT_MEMORY_MODEL config ARCH_WANT_GENERAL_HUGETLB def_bool y +config ARCH_SUPPORTS_UPROBES + def_bool y + config SYS_SUPPORTS_HUGETLBFS depends on MMU def_bool y diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index bdddcd5c1b71..3a240037bde2 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -34,6 +34,7 @@ struct thread_struct { unsigned long sp; /* Kernel mode stack */ unsigned long s[12]; /* s[0]: frame pointer */ struct __riscv_d_ext_state fstate; + unsigned long bad_cause; }; #define INIT_THREAD { \ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 97bf5a1575d2..0e549a3089b3 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -75,6 +75,7 @@ struct thread_info { #define TIF_SYSCALL_AUDIT 7 /* syscall auditing */ #define TIF_SECCOMP 8 /* syscall secure computing */ #define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */ +#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) @@ -84,10 +85,11 @@ struct thread_info { #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_WORK_MASK \ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ - _TIF_NOTIFY_SIGNAL) + _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) #define _TIF_SYSCALL_WORK \ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \ diff --git a/arch/riscv/include/asm/uprobes.h b/arch/riscv/include/asm/uprobes.h new file mode 100644 index 000000000000..f2183e00fdd2 --- /dev/null +++ b/arch/riscv/include/asm/uprobes.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _ASM_RISCV_UPROBES_H +#define _ASM_RISCV_UPROBES_H + +#include +#include +#include + +#define MAX_UINSN_BYTES 8 + +#ifdef CONFIG_RISCV_ISA_C +#define UPROBE_SWBP_INSN __BUG_INSN_16 +#define UPROBE_SWBP_INSN_SIZE 2 +#else +#define UPROBE_SWBP_INSN __BUG_INSN_32 +#define UPROBE_SWBP_INSN_SIZE 4 +#endif +#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES + +typedef u32 uprobe_opcode_t; + +struct arch_uprobe_task { + unsigned long saved_cause; +}; + +struct arch_uprobe { + union { + u8 insn[MAX_UINSN_BYTES]; + u8 ixol[MAX_UINSN_BYTES]; + }; + struct arch_probe_insn api; + unsigned long insn_size; + bool simulate; +}; + +bool uprobe_breakpoint_handler(struct pt_regs *regs); +bool uprobe_single_step_handler(struct pt_regs *regs); + +#endif /* _ASM_RISCV_UPROBES_H */ diff --git a/arch/riscv/kernel/probes/Makefile b/arch/riscv/kernel/probes/Makefile index abbd1314e2fb..7f0840dcc31b 100644 --- a/arch/riscv/kernel/probes/Makefile +++ b/arch/riscv/kernel/probes/Makefile @@ -2,4 +2,5 @@ obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o obj-$(CONFIG_KPROBES) += kprobes_trampoline.o obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o +obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE) diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c new file mode 100644 index 000000000000..7a057b5f0adc --- /dev/null +++ b/arch/riscv/kernel/probes/uprobes.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include + +#include "decode-insn.h" + +#define UPROBE_TRAP_NR UINT_MAX + +bool is_swbp_insn(uprobe_opcode_t *insn) +{ +#ifdef CONFIG_RISCV_ISA_C + return (*insn & 0xffff) == UPROBE_SWBP_INSN; +#else + return *insn == UPROBE_SWBP_INSN; +#endif +} + +unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) +{ + return instruction_pointer(regs); +} + +int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, + unsigned long addr) +{ + probe_opcode_t opcode; + + opcode = *(probe_opcode_t *)(&auprobe->insn[0]); + + auprobe->insn_size = GET_INSN_LENGTH(opcode); + + switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) { + case INSN_REJECTED: + return -EINVAL; + + case INSN_GOOD_NO_SLOT: + auprobe->simulate = true; + break; + + case INSN_GOOD: + auprobe->simulate = false; + break; + + default: + return -EINVAL; + } + + return 0; +} + +int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + utask->autask.saved_cause = current->thread.bad_cause; + current->thread.bad_cause = UPROBE_TRAP_NR; + + instruction_pointer_set(regs, utask->xol_vaddr); + + regs->status &= ~SR_SPIE; + + return 0; +} + +int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR); + + instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size); + + regs->status |= SR_SPIE; + + return 0; +} + +bool arch_uprobe_xol_was_trapped(struct task_struct *t) +{ + if (t->thread.bad_cause != UPROBE_TRAP_NR) + return true; + + return false; +} + +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + probe_opcode_t insn; + unsigned long addr; + + if (!auprobe->simulate) + return false; + + insn = *(probe_opcode_t *)(&auprobe->insn[0]); + addr = instruction_pointer(regs); + + if (auprobe->api.handler) + auprobe->api.handler(insn, addr, regs); + + return true; +} + +void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* + * Task has received a fatal signal, so reset back to probbed + * address. + */ + instruction_pointer_set(regs, utask->vaddr); + + regs->status &= ~SR_SPIE; +} + +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, + struct pt_regs *regs) +{ + if (ctx == RP_CHECK_CHAIN_CALL) + return regs->sp <= ret->stack; + else + return regs->sp < ret->stack; +} + +unsigned long +arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, + struct pt_regs *regs) +{ + unsigned long ra; + + ra = regs->ra; + + regs->ra = trampoline_vaddr; + + return ra; +} + +int arch_uprobe_exception_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + return NOTIFY_DONE; +} + +bool uprobe_breakpoint_handler(struct pt_regs *regs) +{ + if (uprobe_pre_sstep_notifier(regs)) + return true; + + return false; +} + +bool uprobe_single_step_handler(struct pt_regs *regs) +{ + if (uprobe_post_sstep_notifier(regs)) + return true; + + return false; +} + +void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + /* Initialize the slot */ + void *kaddr = kmap_atomic(page); + void *dst = kaddr + (vaddr & ~PAGE_MASK); + + memcpy(dst, src, len); + + /* Add ebreak behind opcode to simulate singlestep */ + if (vaddr) { + dst += GET_INSN_LENGTH(*(probe_opcode_t *)src); + *(uprobe_opcode_t *)dst = __BUG_INSN_32; + } + + kunmap_atomic(kaddr); + + /* + * We probably need flush_icache_user_page() but it needs vma. + * This should work on most of architectures by default. If + * architecture needs to do something different it can define + * its own version of the function. + */ + flush_dcache_page(page); +} diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 469aef8ed922..65942b3748b4 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -309,6 +309,9 @@ static void do_signal(struct pt_regs *regs) asmlinkage __visible void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { + if (thread_info_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + /* Handle pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 19a788a271f7..2bca2fab2d27 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -76,6 +76,8 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) static void do_trap_error(struct pt_regs *regs, int signo, int code, unsigned long addr, const char *str) { + current->thread.bad_cause = regs->cause; + if (user_mode(regs)) { do_trap(regs, signo, code, addr); } else { @@ -153,6 +155,14 @@ asmlinkage __visible void do_trap_break(struct pt_regs *regs) if (kprobe_breakpoint_handler(regs)) return; #endif +#ifdef CONFIG_UPROBES + if (uprobe_single_step_handler(regs)) + return; + + if (uprobe_breakpoint_handler(regs)) + return; +#endif + current->thread.bad_cause = regs->cause; if (user_mode(regs)) force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc); diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 1da870f33582..8f17519208c7 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -240,6 +240,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs) * in an atomic region, then we must not take the fault. */ if (unlikely(faulthandler_disabled() || !mm)) { + tsk->thread.bad_cause = cause; no_context(regs, addr); return; } @@ -262,16 +263,19 @@ retry: mmap_read_lock(mm); vma = find_vma(mm, addr); if (unlikely(!vma)) { + tsk->thread.bad_cause = cause; bad_area(regs, mm, code, addr); return; } if (likely(vma->vm_start <= addr)) goto good_area; if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { + tsk->thread.bad_cause = cause; bad_area(regs, mm, code, addr); return; } if (unlikely(expand_stack(vma, addr))) { + tsk->thread.bad_cause = cause; bad_area(regs, mm, code, addr); return; } @@ -284,6 +288,7 @@ good_area: code = SEGV_ACCERR; if (unlikely(access_error(cause, vma))) { + tsk->thread.bad_cause = cause; bad_area(regs, mm, code, addr); return; } @@ -317,6 +322,7 @@ good_area: mmap_read_unlock(mm); if (unlikely(fault & VM_FAULT_ERROR)) { + tsk->thread.bad_cause = cause; mm_fault_error(regs, addr, fault); return; } -- cgit From f105aa940e78a87b6b6c82d7c230db86386ff013 Mon Sep 17 00:00:00 2001 From: Vitaly Wool Date: Sat, 16 Jan 2021 01:49:48 +0200 Subject: riscv: add BUILTIN_DTB support for MMU-enabled targets Sometimes, especially in a production system we may not want to use a "smart bootloader" like u-boot to load kernel, ramdisk and device tree from a filesystem on eMMC, but rather load the kernel from a NAND partition and just run it as soon as we can, and in this case it is convenient to have device tree compiled into the kernel binary. Since this case is not limited to MMU-less systems, let's support it for these which have MMU enabled too. While at it, provide __dtb_start as a parameter to setup_vm() in BUILTIN_DTB case, so we don't have to duplicate BUILTIN_DTB specific processing in MMU-enabled and MMU-disabled versions of setup_vm(). Signed-off-by: Vitaly Wool Reviewed-by: Anup Patel Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 1 - arch/riscv/kernel/head.S | 4 ++++ arch/riscv/mm/init.c | 19 +++++++++++++------ 3 files changed, 17 insertions(+), 7 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index c8d45da8edd9..8eadd1cbd524 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -460,7 +460,6 @@ endmenu config BUILTIN_DTB def_bool n - depends on RISCV_M_MODE depends on OF menu "Power management options" diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 16e9941900c4..f5a9bad86e58 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -260,7 +260,11 @@ clear_bss_done: /* Initialize page tables and relocate to virtual addresses */ la sp, init_thread_union + THREAD_SIZE +#ifdef CONFIG_BUILTIN_DTB + la a0, __dtb_start +#else mv a0, s1 +#endif /* CONFIG_BUILTIN_DTB */ call setup_vm #ifdef CONFIG_MMU la a0, early_pg_dir diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 30b61f2c6b87..45faad7c4291 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -192,10 +192,13 @@ void __init setup_bootmem(void) #endif /* CONFIG_BLK_DEV_INITRD */ /* - * Avoid using early_init_fdt_reserve_self() since __pa() does + * If DTB is built in, no need to reserve its memblock. + * Otherwise, do reserve it but avoid using + * early_init_fdt_reserve_self() since __pa() does * not work for DTB pointers that are fixmap addresses */ - memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); + if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) + memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); early_init_fdt_scan_reserved_mem(); dma_contiguous_reserve(dma32_phys_limit); @@ -499,6 +502,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) /* Setup early PMD for DTB */ create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE); +#ifndef CONFIG_BUILTIN_DTB /* Create two consecutive PMD mappings for FDT early scan */ pa = dtb_pa & ~(PMD_SIZE - 1); create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA, @@ -506,7 +510,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE, pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1)); +#else /* CONFIG_BUILTIN_DTB */ + dtb_early_va = __va(dtb_pa); +#endif /* CONFIG_BUILTIN_DTB */ #else +#ifndef CONFIG_BUILTIN_DTB /* Create two consecutive PGD mappings for FDT early scan */ pa = dtb_pa & ~(PGDIR_SIZE - 1); create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, @@ -514,6 +522,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE, pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL); dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1)); +#else /* CONFIG_BUILTIN_DTB */ + dtb_early_va = __va(dtb_pa); +#endif /* CONFIG_BUILTIN_DTB */ #endif dtb_early_pa = dtb_pa; @@ -604,11 +615,7 @@ static void __init setup_vm_final(void) #else asmlinkage void __init setup_vm(uintptr_t dtb_pa) { -#ifdef CONFIG_BUILTIN_DTB - dtb_early_va = (void *) __dtb_start; -#else dtb_early_va = (void *)dtb_pa; -#endif dtb_early_pa = dtb_pa; } -- cgit From aec33b54af55ef025e03e3dfbab3b8abe00eaa22 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Fri, 15 Jan 2021 13:46:06 +0800 Subject: riscv: Covert to reserve_initrd_mem() Covert to the generic reserve_initrd_mem() function. Signed-off-by: Kefeng Wang Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/init.c | 54 +--------------------------------------------------- 1 file changed, 1 insertion(+), 53 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 45faad7c4291..c9bbab76358b 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -106,55 +106,6 @@ void __init mem_init(void) print_vm_layout(); } -#ifdef CONFIG_BLK_DEV_INITRD -static void __init setup_initrd(void) -{ - phys_addr_t start; - unsigned long size; - - /* Ignore the virtul address computed during device tree parsing */ - initrd_start = initrd_end = 0; - - if (!phys_initrd_size) - return; - /* - * Round the memory region to page boundaries as per free_initrd_mem() - * This allows us to detect whether the pages overlapping the initrd - * are in use, but more importantly, reserves the entire set of pages - * as we don't want these pages allocated for other purposes. - */ - start = round_down(phys_initrd_start, PAGE_SIZE); - size = phys_initrd_size + (phys_initrd_start - start); - size = round_up(size, PAGE_SIZE); - - if (!memblock_is_region_memory(start, size)) { - pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region", - (u64)start, size); - goto disable; - } - - if (memblock_is_region_reserved(start, size)) { - pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n", - (u64)start, size); - goto disable; - } - - memblock_reserve(start, size); - /* Now convert initrd to virtual addresses */ - initrd_start = (unsigned long)__va(phys_initrd_start); - initrd_end = initrd_start + phys_initrd_size; - initrd_below_start_ok = 1; - - pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", - (void *)(initrd_start), size); - return; -disable: - pr_cont(" - disabling initrd\n"); - initrd_start = 0; - initrd_end = 0; -} -#endif /* CONFIG_BLK_DEV_INITRD */ - void __init setup_bootmem(void) { phys_addr_t mem_start = 0; @@ -187,10 +138,7 @@ void __init setup_bootmem(void) dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); set_max_mapnr(max_low_pfn); -#ifdef CONFIG_BLK_DEV_INITRD - setup_initrd(); -#endif /* CONFIG_BLK_DEV_INITRD */ - + reserve_initrd_mem(); /* * If DTB is built in, no need to reserve its memblock. * Otherwise, do reserve it but avoid using -- cgit From e178d670f251b6947d6be99c0014e9a57ad4f0e0 Mon Sep 17 00:00:00 2001 From: Nylon Chen Date: Sat, 16 Jan 2021 13:58:35 +0800 Subject: riscv/kasan: add KASAN_VMALLOC support It references to x86/s390 architecture. So, it doesn't map the early shadow page to cover VMALLOC space. Prepopulate top level page table for the range that would otherwise be empty. lower levels are filled dynamically upon memory allocation while booting. Signed-off-by: Nylon Chen Signed-off-by: Nick Hu Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 1 + arch/riscv/mm/kasan_init.c | 57 +++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 1 deletion(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 8eadd1cbd524..3832a537c5d6 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -57,6 +57,7 @@ config RISCV select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if MMU && 64BIT + select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB_QXFER_PKT select HAVE_ARCH_MMAP_RND_BITS if MMU diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 12ddd1f6bf70..4b9149f963d3 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -9,6 +9,19 @@ #include #include #include +#include + +static __init void *early_alloc(size_t size, int node) +{ + void *ptr = memblock_alloc_try_nid(size, size, + __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); + + if (!ptr) + panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n", + __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); + + return ptr; +} extern pgd_t early_pg_dir[PTRS_PER_PGD]; asmlinkage void __init kasan_early_init(void) @@ -83,6 +96,40 @@ static void __init populate(void *start, void *end) memset(start, 0, end - start); } +void __init kasan_shallow_populate(void *start, void *end) +{ + unsigned long vaddr = (unsigned long)start & PAGE_MASK; + unsigned long vend = PAGE_ALIGN((unsigned long)end); + unsigned long pfn; + int index; + void *p; + pud_t *pud_dir, *pud_k; + pgd_t *pgd_dir, *pgd_k; + p4d_t *p4d_dir, *p4d_k; + + while (vaddr < vend) { + index = pgd_index(vaddr); + pfn = csr_read(CSR_SATP) & SATP_PPN; + pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index; + pgd_k = init_mm.pgd + index; + pgd_dir = pgd_offset_k(vaddr); + set_pgd(pgd_dir, *pgd_k); + + p4d_dir = p4d_offset(pgd_dir, vaddr); + p4d_k = p4d_offset(pgd_k, vaddr); + + vaddr = (vaddr + PUD_SIZE) & PUD_MASK; + pud_dir = pud_offset(p4d_dir, vaddr); + pud_k = pud_offset(p4d_k, vaddr); + + if (pud_present(*pud_dir)) { + p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); + pud_populate(&init_mm, pud_dir, p); + } + vaddr += PAGE_SIZE; + } +} + void __init kasan_init(void) { phys_addr_t _start, _end; @@ -90,7 +137,15 @@ void __init kasan_init(void) kasan_populate_early_shadow((void *)KASAN_SHADOW_START, (void *)kasan_mem_to_shadow((void *) - VMALLOC_END)); + VMEMMAP_END)); + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) + kasan_shallow_populate( + (void *)kasan_mem_to_shadow((void *)VMALLOC_START), + (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); + else + kasan_populate_early_shadow( + (void *)kasan_mem_to_shadow((void *)VMALLOC_START), + (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); for_each_mem_range(i, &_start, &_end) { void *start = (void *)_start; -- cgit From 65d4b9c5301749d18b5ec1323fdefecefab72687 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Wed, 3 Feb 2021 15:19:07 +0530 Subject: RISC-V: Implement ASID allocator Currently, we do local TLB flush on every MM switch. This is very harsh on performance because we are forcing page table walks after every MM switch. This patch implements ASID allocator for assigning an ASID to a MM context. The number of ASIDs are limited in HW so we create a logical entity named CONTEXTID for assigning to MM context. The lower bits of CONTEXTID are ASID and upper bits are VERSION number. The number of usable ASID bits supported by HW are detected at boot-time by writing 1s to ASID bits in SATP CSR. We allocate new CONTEXTID on first MM switch for a MM context where the ASID is allocated from an ASID bitmap and VERSION is provide by an atomic counter. At time of allocating new CONTEXTID, if we run out of available ASIDs then: 1. We flush the ASID bitmap 2. Increment current VERSION atomic counter 3. Re-allocate ASID from ASID bitmap 4. Flush TLB on all CPUs 5. Try CONTEXTID re-assignment on all CPUs Please note that we don't use ASID #0 because it is used at boot-time by all CPUs for initial MM context. Also, newly created context is always assigned CONTEXTID #0 (i.e. VERSION #0 and ASID #0) which is an invalid context in our implementation. Using above approach, we have virtually infinite CONTEXTIDs on-top-of limited number of HW ASIDs. This approach is inspired from ASID allocator used for Linux ARM/ARM64 but we have adapted it for RISC-V. Overall, this ASID allocator helps us reduce rate of local TLB flushes on every CPU thereby increasing performance. This patch is tested on QEMU virt machine, Spike and SiFive Unleashed board. On QEMU virt machine, we see some (3-5% approx) performance improvement with SW emulated TLBs provided by QEMU. Unfortunately, the ASID bits of the SATP CSR are not implemented on Spike and SiFive Unleashed board so we don't see any change in performance. On real HW having all ASID bits implemented, the performance gains will be much more due improved sharing of TLB among different processes. Signed-off-by: Anup Patel Reviewed-by: Palmer Dabbelt Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/csr.h | 6 + arch/riscv/include/asm/mmu.h | 2 + arch/riscv/include/asm/mmu_context.h | 10 ++ arch/riscv/mm/context.c | 265 ++++++++++++++++++++++++++++++++++- 4 files changed, 279 insertions(+), 4 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index cec462e198ce..caadfc1d7487 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -41,10 +41,16 @@ #define SATP_PPN _AC(0x003FFFFF, UL) #define SATP_MODE_32 _AC(0x80000000, UL) #define SATP_MODE SATP_MODE_32 +#define SATP_ASID_BITS 9 +#define SATP_ASID_SHIFT 22 +#define SATP_ASID_MASK _AC(0x1FF, UL) #else #define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL) #define SATP_MODE_39 _AC(0x8000000000000000, UL) #define SATP_MODE SATP_MODE_39 +#define SATP_ASID_BITS 16 +#define SATP_ASID_SHIFT 44 +#define SATP_ASID_MASK _AC(0xFFFF, UL) #endif /* Exception cause high bit - is an interrupt if set */ diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index dabcf2cfb3dc..0099dc116168 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -12,6 +12,8 @@ typedef struct { #ifndef CONFIG_MMU unsigned long end_brk; +#else + atomic_long_t id; #endif void *vdso; #ifdef CONFIG_SMP diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 250defa06f3a..b0659413a080 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -23,6 +23,16 @@ static inline void activate_mm(struct mm_struct *prev, switch_mm(prev, next, NULL); } +#define init_new_context init_new_context +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ +#ifdef CONFIG_MMU + atomic_long_set(&mm->context.id, 0); +#endif + return 0; +} + #include #endif /* _ASM_RISCV_MMU_CONTEXT_H */ diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 613ec81a8979..68aa312fc352 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -2,13 +2,273 @@ /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2017 SiFive + * Copyright (C) 2021 Western Digital Corporation or its affiliates. */ +#include +#include #include +#include +#include +#include +#include #include #include #include +#ifdef CONFIG_MMU + +static DEFINE_STATIC_KEY_FALSE(use_asid_allocator); + +static unsigned long asid_bits; +static unsigned long num_asids; +static unsigned long asid_mask; + +static atomic_long_t current_version; + +static DEFINE_RAW_SPINLOCK(context_lock); +static cpumask_t context_tlb_flush_pending; +static unsigned long *context_asid_map; + +static DEFINE_PER_CPU(atomic_long_t, active_context); +static DEFINE_PER_CPU(unsigned long, reserved_context); + +static bool check_update_reserved_context(unsigned long cntx, + unsigned long newcntx) +{ + int cpu; + bool hit = false; + + /* + * Iterate over the set of reserved CONTEXT looking for a match. + * If we find one, then we can update our mm to use new CONTEXT + * (i.e. the same CONTEXT in the current_version) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old CONTEXT are updated to reflect the mm. Failure to do + * so could result in us missing the reserved CONTEXT in a future + * version. + */ + for_each_possible_cpu(cpu) { + if (per_cpu(reserved_context, cpu) == cntx) { + hit = true; + per_cpu(reserved_context, cpu) = newcntx; + } + } + + return hit; +} + +static void __flush_context(void) +{ + int i; + unsigned long cntx; + + /* Must be called with context_lock held */ + lockdep_assert_held(&context_lock); + + /* Update the list of reserved ASIDs and the ASID bitmap. */ + bitmap_clear(context_asid_map, 0, num_asids); + + /* Mark already active ASIDs as used */ + for_each_possible_cpu(i) { + cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0); + /* + * If this CPU has already been through a rollover, but + * hasn't run another task in the meantime, we must preserve + * its reserved CONTEXT, as this is the only trace we have of + * the process it is still running. + */ + if (cntx == 0) + cntx = per_cpu(reserved_context, i); + + __set_bit(cntx & asid_mask, context_asid_map); + per_cpu(reserved_context, i) = cntx; + } + + /* Mark ASID #0 as used because it is used at boot-time */ + __set_bit(0, context_asid_map); + + /* Queue a TLB invalidation for each CPU on next context-switch */ + cpumask_setall(&context_tlb_flush_pending); +} + +static unsigned long __new_context(struct mm_struct *mm) +{ + static u32 cur_idx = 1; + unsigned long cntx = atomic_long_read(&mm->context.id); + unsigned long asid, ver = atomic_long_read(¤t_version); + + /* Must be called with context_lock held */ + lockdep_assert_held(&context_lock); + + if (cntx != 0) { + unsigned long newcntx = ver | (cntx & asid_mask); + + /* + * If our current CONTEXT was active during a rollover, we + * can continue to use it and this was just a false alarm. + */ + if (check_update_reserved_context(cntx, newcntx)) + return newcntx; + + /* + * We had a valid CONTEXT in a previous life, so try to + * re-use it if possible. + */ + if (!__test_and_set_bit(cntx & asid_mask, context_asid_map)) + return newcntx; + } + + /* + * Allocate a free ASID. If we can't find one then increment + * current_version and flush all ASIDs. + */ + asid = find_next_zero_bit(context_asid_map, num_asids, cur_idx); + if (asid != num_asids) + goto set_asid; + + /* We're out of ASIDs, so increment current_version */ + ver = atomic_long_add_return_relaxed(num_asids, ¤t_version); + + /* Flush everything */ + __flush_context(); + + /* We have more ASIDs than CPUs, so this will always succeed */ + asid = find_next_zero_bit(context_asid_map, num_asids, 1); + +set_asid: + __set_bit(asid, context_asid_map); + cur_idx = asid; + return asid | ver; +} + +static void set_mm_asid(struct mm_struct *mm, unsigned int cpu) +{ + unsigned long flags; + bool need_flush_tlb = false; + unsigned long cntx, old_active_cntx; + + cntx = atomic_long_read(&mm->context.id); + + /* + * If our active_context is non-zero and the context matches the + * current_version, then we update the active_context entry with a + * relaxed cmpxchg. + * + * Following is how we handle racing with a concurrent rollover: + * + * - We get a zero back from the cmpxchg and end up waiting on the + * lock. Taking the lock synchronises with the rollover and so + * we are forced to see the updated verion. + * + * - We get a valid context back from the cmpxchg then we continue + * using old ASID because __flush_context() would have marked ASID + * of active_context as used and next context switch we will + * allocate new context. + */ + old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu)); + if (old_active_cntx && + ((cntx & ~asid_mask) == atomic_long_read(¤t_version)) && + atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu), + old_active_cntx, cntx)) + goto switch_mm_fast; + + raw_spin_lock_irqsave(&context_lock, flags); + + /* Check that our ASID belongs to the current_version. */ + cntx = atomic_long_read(&mm->context.id); + if ((cntx & ~asid_mask) != atomic_long_read(¤t_version)) { + cntx = __new_context(mm); + atomic_long_set(&mm->context.id, cntx); + } + + if (cpumask_test_and_clear_cpu(cpu, &context_tlb_flush_pending)) + need_flush_tlb = true; + + atomic_long_set(&per_cpu(active_context, cpu), cntx); + + raw_spin_unlock_irqrestore(&context_lock, flags); + +switch_mm_fast: + csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | + ((cntx & asid_mask) << SATP_ASID_SHIFT) | + SATP_MODE); + + if (need_flush_tlb) + local_flush_tlb_all(); +} + +static void set_mm_noasid(struct mm_struct *mm) +{ + /* Switch the page table and blindly nuke entire local TLB */ + csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | SATP_MODE); + local_flush_tlb_all(); +} + +static inline void set_mm(struct mm_struct *mm, unsigned int cpu) +{ + if (static_branch_unlikely(&use_asid_allocator)) + set_mm_asid(mm, cpu); + else + set_mm_noasid(mm); +} + +static int asids_init(void) +{ + unsigned long old; + + /* Figure-out number of ASID bits in HW */ + old = csr_read(CSR_SATP); + asid_bits = old | (SATP_ASID_MASK << SATP_ASID_SHIFT); + csr_write(CSR_SATP, asid_bits); + asid_bits = (csr_read(CSR_SATP) >> SATP_ASID_SHIFT) & SATP_ASID_MASK; + asid_bits = fls_long(asid_bits); + csr_write(CSR_SATP, old); + + /* + * In the process of determining number of ASID bits (above) + * we polluted the TLB of current HART so let's do TLB flushed + * to remove unwanted TLB enteries. + */ + local_flush_tlb_all(); + + /* Pre-compute ASID details */ + num_asids = 1 << asid_bits; + asid_mask = num_asids - 1; + + /* + * Use ASID allocator only if number of HW ASIDs are + * at-least twice more than CPUs + */ + if (num_asids > (2 * num_possible_cpus())) { + atomic_long_set(¤t_version, num_asids); + + context_asid_map = kcalloc(BITS_TO_LONGS(num_asids), + sizeof(*context_asid_map), GFP_KERNEL); + if (!context_asid_map) + panic("Failed to allocate bitmap for %lu ASIDs\n", + num_asids); + + __set_bit(0, context_asid_map); + + static_branch_enable(&use_asid_allocator); + + pr_info("ASID allocator using %lu bits (%lu entries)\n", + asid_bits, num_asids); + } else { + pr_info("ASID allocator disabled\n"); + } + + return 0; +} +early_initcall(asids_init); +#else +static inline void set_mm(struct mm_struct *mm, unsigned int cpu) +{ + /* Nothing to do here when there is no MMU */ +} +#endif + /* * When necessary, performs a deferred icache flush for the given MM context, * on the local CPU. RISC-V has no direct mechanism for instruction cache @@ -58,10 +318,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_set_cpu(cpu, mm_cpumask(next)); -#ifdef CONFIG_MMU - csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); - local_flush_tlb_all(); -#endif + set_mm(next, cpu); flush_icache_deferred(next); } -- cgit From 0f02de4481da684aad6589aed0ea47bd1ab391c9 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 21 Feb 2021 09:22:33 -0500 Subject: riscv: Get rid of MAX_EARLY_MAPPING_SIZE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit At early boot stage, we have a whole PGDIR to map the kernel, so there is no need to restrict the early mapping size to 128MB. Removing this define also allows us to simplify some compile time logic. This fixes large kernel mappings with a size greater than 128MB, as it is the case for syzbot kernels whose size was just ~130MB. Note that on rv64, for now, we are then limited to PGDIR size for early mapping as we can't use PGD mappings (see [1]). That should be enough given the relative small size of syzbot kernels compared to PGDIR_SIZE which is 1GB. [1] https://lore.kernel.org/lkml/20200603153608.30056-1-alex@ghiti.fr/ Reported-by: Dmitry Vyukov Signed-off-by: Alexandre Ghiti Tested-by: Dmitry Vyukov Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/init.c | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index c9bbab76358b..3e044f11cef6 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -165,8 +165,6 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; -#define MAX_EARLY_MAPPING_SIZE SZ_128M - pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) @@ -241,13 +239,7 @@ static void __init create_pte_mapping(pte_t *ptep, pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss; pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; - -#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE -#define NUM_EARLY_PMDS 1UL -#else -#define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE) -#endif -pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE); +pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) @@ -269,11 +261,9 @@ static pmd_t *get_pmd_virt_late(phys_addr_t pa) static phys_addr_t __init alloc_pmd_early(uintptr_t va) { - uintptr_t pmd_num; + BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT); - pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT; - BUG_ON(pmd_num >= NUM_EARLY_PMDS); - return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD]; + return (uintptr_t)early_pmd; } static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) @@ -391,7 +381,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) uintptr_t va, pa, end_va; uintptr_t load_pa = (uintptr_t)(&_start); uintptr_t load_sz = (uintptr_t)(&_end) - load_pa; - uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE); + uintptr_t map_size; #ifndef __PAGETABLE_PMD_FOLDED pmd_t fix_bmap_spmd, fix_bmap_epmd; #endif @@ -403,12 +393,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) * Enforce boot alignment requirements of RV32 and * RV64 by only allowing PMD or PGD mappings. */ - BUG_ON(map_size == PAGE_SIZE); + map_size = PMD_SIZE; /* Sanity check alignment and size */ BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); BUG_ON((load_pa % map_size) != 0); - BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE); pt_ops.alloc_pte = alloc_pte_early; pt_ops.get_pte_virt = get_pte_virt_early; -- cgit From 9484e2aef45bbc27cd23519917f27031e2857a6f Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 8 Feb 2021 14:30:15 -0500 Subject: riscv: Use KASAN_SHADOW_INIT define for kasan memory initialization Instead of hardcoding memory initialization to 0, use KASAN_SHADOW_INIT. Signed-off-by: Alexandre Ghiti Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/kasan_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 4b9149f963d3..8f23799fcbe6 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -93,7 +93,7 @@ static void __init populate(void *start, void *end) __pgprot(_PAGE_TABLE))); local_flush_tlb_all(); - memset(start, 0, end - start); + memset(start, KASAN_SHADOW_INIT, end - start); } void __init kasan_shallow_populate(void *start, void *end) @@ -163,6 +163,6 @@ void __init kasan_init(void) __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED))); - memset(kasan_early_shadow_page, 0, PAGE_SIZE); + memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); init_task.kasan_depth = 0; } -- cgit From d127c19c7bea6150a247ffcd529c9a176877e422 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 8 Feb 2021 14:30:16 -0500 Subject: riscv: Improve kasan population function Current population code populates a whole page table without taking care of what could have been already allocated and without taking into account possible index in page table, assuming the virtual address to map is always aligned on the page table size, which, for example, won't be the case when the kernel will get pushed to the end of the address space. Address those problems by rewriting the kasan population function, splitting it into subfunctions for each different page table level. Signed-off-by: Alexandre Ghiti Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/kasan_init.c | 91 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 63 insertions(+), 28 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 8f23799fcbe6..a97df4fdce09 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -60,37 +60,72 @@ asmlinkage void __init kasan_early_init(void) local_flush_tlb_all(); } -static void __init populate(void *start, void *end) +static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end) +{ + phys_addr_t phys_addr; + pte_t *ptep, *base_pte; + + if (pmd_none(*pmd)) + base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE); + else + base_pte = (pte_t *)pmd_page_vaddr(*pmd); + + ptep = base_pte + pte_index(vaddr); + + do { + if (pte_none(*ptep)) { + phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL)); + } + } while (ptep++, vaddr += PAGE_SIZE, vaddr != end); + + set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE)); +} + +static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end) +{ + phys_addr_t phys_addr; + pmd_t *pmdp, *base_pmd; + unsigned long next; + + base_pmd = (pmd_t *)pgd_page_vaddr(*pgd); + if (base_pmd == lm_alias(kasan_early_shadow_pmd)) + base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE); + + pmdp = base_pmd + pmd_index(vaddr); + + do { + next = pmd_addr_end(vaddr, end); + kasan_populate_pte(pmdp, vaddr, next); + } while (pmdp++, vaddr = next, vaddr != end); + + /* + * Wait for the whole PGD to be populated before setting the PGD in + * the page table, otherwise, if we did set the PGD before populating + * it entirely, memblock could allocate a page at a physical address + * where KASAN is not populated yet and then we'd get a page fault. + */ + set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE)); +} + +static void kasan_populate_pgd(unsigned long vaddr, unsigned long end) +{ + phys_addr_t phys_addr; + pgd_t *pgdp = pgd_offset_k(vaddr); + unsigned long next; + + do { + next = pgd_addr_end(vaddr, end); + kasan_populate_pmd(pgdp, vaddr, next); + } while (pgdp++, vaddr = next, vaddr != end); +} + +static void __init kasan_populate(void *start, void *end) { - unsigned long i, offset; unsigned long vaddr = (unsigned long)start & PAGE_MASK; unsigned long vend = PAGE_ALIGN((unsigned long)end); - unsigned long n_pages = (vend - vaddr) / PAGE_SIZE; - unsigned long n_ptes = - ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE; - unsigned long n_pmds = - ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD; - - pte_t *pte = - memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE); - pmd_t *pmd = - memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE); - pgd_t *pgd = pgd_offset_k(vaddr); - - for (i = 0; i < n_pages; i++) { - phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); - set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); - } - - for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE) - set_pmd(&pmd[i], - pfn_pmd(PFN_DOWN(__pa(&pte[offset])), - __pgprot(_PAGE_TABLE))); - for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD) - set_pgd(&pgd[i], - pfn_pgd(PFN_DOWN(__pa(&pmd[offset])), - __pgprot(_PAGE_TABLE))); + kasan_populate_pgd(vaddr, vend); local_flush_tlb_all(); memset(start, KASAN_SHADOW_INIT, end - start); @@ -154,7 +189,7 @@ void __init kasan_init(void) if (start >= end) break; - populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end)); + kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end)); }; for (i = 0; i < PTRS_PER_PTE; i++) -- cgit From d7fbcf40df86bb67193d9faf52138fc1202decb2 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 8 Feb 2021 14:30:17 -0500 Subject: riscv: Improve kasan population by using hugepages when possible The kasan functions that populates the shadow regions used to allocate them page by page and did not take advantage of hugepages, so fix this by trying to allocate hugepages of 1GB and fallback to 2MB hugepages or 4K pages in case it fails. This reduces the page table memory consumption and improves TLB usage, as shown below: Before this patch: ---[ Kasan shadow start ]--- 0xffffffc000000000-0xffffffc400000000 0x00000000818ef000 16G PTE . A . . . . R V 0xffffffc400000000-0xffffffc447fc0000 0x00000002b7f4f000 1179392K PTE D A . . . W R V 0xffffffc480000000-0xffffffc800000000 0x00000000818ef000 14G PTE . A . . . . R V ---[ Kasan shadow end ]--- After this patch: ---[ Kasan shadow start ]--- 0xffffffc000000000-0xffffffc400000000 0x00000000818ef000 16G PTE . A . . . . R V 0xffffffc400000000-0xffffffc440000000 0x0000000240000000 1G PGD D A . . . W R V 0xffffffc440000000-0xffffffc447e00000 0x00000002b7e00000 126M PMD D A . . . W R V 0xffffffc447e00000-0xffffffc447fc0000 0x00000002b818f000 1792K PTE D A . . . W R V 0xffffffc480000000-0xffffffc800000000 0x00000000818ef000 14G PTE . A . . . . R V ---[ Kasan shadow end ]--- Signed-off-by: Alexandre Ghiti Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/kasan_init.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index a97df4fdce09..c676a4674342 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -96,6 +96,15 @@ static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long en do { next = pmd_addr_end(vaddr, end); + + if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) { + phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE); + if (phys_addr) { + set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL)); + continue; + } + } + kasan_populate_pte(pmdp, vaddr, next); } while (pmdp++, vaddr = next, vaddr != end); @@ -116,6 +125,21 @@ static void kasan_populate_pgd(unsigned long vaddr, unsigned long end) do { next = pgd_addr_end(vaddr, end); + + /* + * pgdp can't be none since kasan_early_init initialized all KASAN + * shadow region with kasan_early_shadow_pmd: if this is stillthe case, + * that means we can try to allocate a hugepage as a replacement. + */ + if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) && + IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) { + phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE); + if (phys_addr) { + set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL)); + continue; + } + } + kasan_populate_pmd(pgdp, vaddr, next); } while (pgdp++, vaddr = next, vaddr != end); } -- cgit