diff options
Diffstat (limited to 'arch/riscv/include/asm')
25 files changed, 572 insertions, 235 deletions
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 445ccc97305a..57b86fd9916c 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 generic-y += early_ioremap.h -generic-y += extable.h generic-y += flat.h generic-y += kvm_para.h generic-y += user.h diff --git a/arch/riscv/include/asm/asm-extable.h b/arch/riscv/include/asm/asm-extable.h new file mode 100644 index 000000000000..14be0673f5b5 --- /dev/null +++ b/arch/riscv/include/asm/asm-extable.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_ASM_EXTABLE_H +#define __ASM_ASM_EXTABLE_H + +#define EX_TYPE_NONE 0 +#define EX_TYPE_FIXUP 1 +#define EX_TYPE_BPF 2 +#define EX_TYPE_UACCESS_ERR_ZERO 3 + +#ifdef __ASSEMBLY__ + +#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ + .pushsection __ex_table, "a"; \ + .balign 4; \ + .long ((insn) - .); \ + .long ((fixup) - .); \ + .short (type); \ + .short (data); \ + .popsection; + + .macro _asm_extable, insn, fixup + __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0) + .endm + +#else /* __ASSEMBLY__ */ + +#include <linux/bits.h> +#include <linux/stringify.h> +#include <asm/gpr-num.h> + +#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ + ".pushsection __ex_table, \"a\"\n" \ + ".balign 4\n" \ + ".long ((" insn ") - .)\n" \ + ".long ((" fixup ") - .)\n" \ + ".short (" type ")\n" \ + ".short (" data ")\n" \ + ".popsection\n" + +#define _ASM_EXTABLE(insn, fixup) \ + __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0") + +#define EX_DATA_REG_ERR_SHIFT 0 +#define EX_DATA_REG_ERR GENMASK(4, 0) +#define EX_DATA_REG_ZERO_SHIFT 5 +#define EX_DATA_REG_ZERO GENMASK(9, 5) + +#define EX_DATA_REG(reg, gpr) \ + "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")" + +#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \ + __DEFINE_ASM_GPR_NUMS \ + __ASM_EXTABLE_RAW(#insn, #fixup, \ + __stringify(EX_TYPE_UACCESS_ERR_ZERO), \ + "(" \ + EX_DATA_REG(ERR, err) " | " \ + EX_DATA_REG(ZERO, zero) \ + ")") + +#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ASM_EXTABLE_H */ diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h index 396a3303c537..3540b690944b 100644 --- a/arch/riscv/include/asm/bitops.h +++ b/arch/riscv/include/asm/bitops.h @@ -20,7 +20,6 @@ #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> -#include <asm-generic/bitops/find.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/ffs.h> diff --git a/arch/riscv/include/asm/cpu_ops.h b/arch/riscv/include/asm/cpu_ops.h index a8ec3c5c1bd2..134590f1b843 100644 --- a/arch/riscv/include/asm/cpu_ops.h +++ b/arch/riscv/include/asm/cpu_ops.h @@ -40,7 +40,5 @@ struct cpu_operations { extern const struct cpu_operations *cpu_ops[NR_CPUS]; void __init cpu_set_ops(int cpu); -void cpu_update_secondary_bootdata(unsigned int cpuid, - struct task_struct *tidle); #endif /* ifndef __ASM_CPU_OPS_H */ diff --git a/arch/riscv/include/asm/cpu_ops_sbi.h b/arch/riscv/include/asm/cpu_ops_sbi.h new file mode 100644 index 000000000000..56e4b76d09ff --- /dev/null +++ b/arch/riscv/include/asm/cpu_ops_sbi.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 by Rivos Inc. + */ +#ifndef __ASM_CPU_OPS_SBI_H +#define __ASM_CPU_OPS_SBI_H + +#ifndef __ASSEMBLY__ +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/threads.h> + +/** + * struct sbi_hart_boot_data - Hart specific boot used during booting and + * cpu hotplug. + * @task_ptr: A pointer to the hart specific tp + * @stack_ptr: A pointer to the hart specific sp + */ +struct sbi_hart_boot_data { + void *task_ptr; + void *stack_ptr; +}; +#endif + +#endif /* ifndef __ASM_CPU_OPS_SBI_H */ diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 5046f431645c..ae711692eec9 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -40,14 +40,13 @@ #ifndef CONFIG_64BIT #define SATP_PPN _AC(0x003FFFFF, UL) #define SATP_MODE_32 _AC(0x80000000, UL) -#define SATP_MODE SATP_MODE_32 #define SATP_ASID_BITS 9 #define SATP_ASID_SHIFT 22 #define SATP_ASID_MASK _AC(0x1FF, UL) #else #define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL) #define SATP_MODE_39 _AC(0x8000000000000000, UL) -#define SATP_MODE SATP_MODE_39 +#define SATP_MODE_48 _AC(0x9000000000000000, UL) #define SATP_ASID_BITS 16 #define SATP_ASID_SHIFT 44 #define SATP_ASID_MASK _AC(0xFFFF, UL) diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h index 49b398fe99f1..cc4f6787f937 100644 --- a/arch/riscv/include/asm/efi.h +++ b/arch/riscv/include/asm/efi.h @@ -13,7 +13,6 @@ #ifdef CONFIG_EFI extern void efi_init(void); -extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); #else #define efi_init() #endif diff --git a/arch/riscv/include/asm/extable.h b/arch/riscv/include/asm/extable.h new file mode 100644 index 000000000000..512012d193dc --- /dev/null +++ b/arch/riscv/include/asm/extable.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_EXTABLE_H +#define _ASM_RISCV_EXTABLE_H + +/* + * The exception table consists of pairs of relative offsets: the first + * is the relative offset to an instruction that is allowed to fault, + * and the second is the relative offset at which the program should + * continue. No registers are modified, so it is entirely up to the + * continuation code to figure out what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry { + int insn, fixup; + short type, data; +}; + +#define ARCH_HAS_RELATIVE_EXTABLE + +#define swap_ex_entry_fixup(a, b, tmp, delta) \ +do { \ + (a)->fixup = (b)->fixup + (delta); \ + (b)->fixup = (tmp).fixup - (delta); \ + (a)->type = (b)->type; \ + (b)->type = (tmp).type; \ + (a)->data = (b)->data; \ + (b)->data = (tmp).data; \ +} while (0) + +bool fixup_exception(struct pt_regs *regs); + +#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I) +bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs); +#else +static inline bool +ex_handler_bpf(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + return false; +} +#endif + +#endif diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h index 54cbf07fb4e9..58a718573ad6 100644 --- a/arch/riscv/include/asm/fixmap.h +++ b/arch/riscv/include/asm/fixmap.h @@ -24,6 +24,7 @@ enum fixed_addresses { FIX_HOLE, FIX_PTE, FIX_PMD, + FIX_PUD, FIX_TEXT_POKE1, FIX_TEXT_POKE0, FIX_EARLYCON_MEM_BASE, diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h index 1b00badb9f87..fc8130f995c1 100644 --- a/arch/riscv/include/asm/futex.h +++ b/arch/riscv/include/asm/futex.h @@ -11,6 +11,7 @@ #include <linux/uaccess.h> #include <linux/errno.h> #include <asm/asm.h> +#include <asm/asm-extable.h> /* We don't even really need the extable code, but for now keep it simple */ #ifndef CONFIG_MMU @@ -20,23 +21,14 @@ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ { \ - uintptr_t tmp; \ __enable_user_access(); \ __asm__ __volatile__ ( \ "1: " insn " \n" \ "2: \n" \ - " .section .fixup,\"ax\" \n" \ - " .balign 4 \n" \ - "3: li %[r],%[e] \n" \ - " jump 2b,%[t] \n" \ - " .previous \n" \ - " .section __ex_table,\"a\" \n" \ - " .balign " RISCV_SZPTR " \n" \ - " " RISCV_PTR " 1b, 3b \n" \ - " .previous \n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %[r]) \ : [r] "+r" (ret), [ov] "=&r" (oldval), \ - [u] "+m" (*uaddr), [t] "=&r" (tmp) \ - : [op] "Jr" (oparg), [e] "i" (-EFAULT) \ + [u] "+m" (*uaddr) \ + : [op] "Jr" (oparg) \ : "memory"); \ __disable_user_access(); \ } @@ -98,18 +90,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "2: sc.w.aqrl %[t],%z[nv],%[u] \n" " bnez %[t],1b \n" "3: \n" - " .section .fixup,\"ax\" \n" - " .balign 4 \n" - "4: li %[r],%[e] \n" - " jump 3b,%[t] \n" - " .previous \n" - " .section __ex_table,\"a\" \n" - " .balign " RISCV_SZPTR " \n" - " " RISCV_PTR " 1b, 4b \n" - " " RISCV_PTR " 2b, 4b \n" - " .previous \n" + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \ + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp) - : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "i" (-EFAULT) + : [ov] "Jr" (oldval), [nv] "Jr" (newval) : "memory"); __disable_user_access(); diff --git a/arch/riscv/include/asm/gpr-num.h b/arch/riscv/include/asm/gpr-num.h new file mode 100644 index 000000000000..dfee2829fc7c --- /dev/null +++ b/arch/riscv/include/asm/gpr-num.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_GPR_NUM_H +#define __ASM_GPR_NUM_H + +#ifdef __ASSEMBLY__ + .equ .L__gpr_num_zero, 0 + .equ .L__gpr_num_ra, 1 + .equ .L__gpr_num_sp, 2 + .equ .L__gpr_num_gp, 3 + .equ .L__gpr_num_tp, 4 + .equ .L__gpr_num_t0, 5 + .equ .L__gpr_num_t1, 6 + .equ .L__gpr_num_t2, 7 + .equ .L__gpr_num_s0, 8 + .equ .L__gpr_num_s1, 9 + .equ .L__gpr_num_a0, 10 + .equ .L__gpr_num_a1, 11 + .equ .L__gpr_num_a2, 12 + .equ .L__gpr_num_a3, 13 + .equ .L__gpr_num_a4, 14 + .equ .L__gpr_num_a5, 15 + .equ .L__gpr_num_a6, 16 + .equ .L__gpr_num_a7, 17 + .equ .L__gpr_num_s2, 18 + .equ .L__gpr_num_s3, 19 + .equ .L__gpr_num_s4, 20 + .equ .L__gpr_num_s5, 21 + .equ .L__gpr_num_s6, 22 + .equ .L__gpr_num_s7, 23 + .equ .L__gpr_num_s8, 24 + .equ .L__gpr_num_s9, 25 + .equ .L__gpr_num_s10, 26 + .equ .L__gpr_num_s11, 27 + .equ .L__gpr_num_t3, 28 + .equ .L__gpr_num_t4, 29 + .equ .L__gpr_num_t5, 30 + .equ .L__gpr_num_t6, 31 + +#else /* __ASSEMBLY__ */ + +#define __DEFINE_ASM_GPR_NUMS \ +" .equ .L__gpr_num_zero, 0\n" \ +" .equ .L__gpr_num_ra, 1\n" \ +" .equ .L__gpr_num_sp, 2\n" \ +" .equ .L__gpr_num_gp, 3\n" \ +" .equ .L__gpr_num_tp, 4\n" \ +" .equ .L__gpr_num_t0, 5\n" \ +" .equ .L__gpr_num_t1, 6\n" \ +" .equ .L__gpr_num_t2, 7\n" \ +" .equ .L__gpr_num_s0, 8\n" \ +" .equ .L__gpr_num_s1, 9\n" \ +" .equ .L__gpr_num_a0, 10\n" \ +" .equ .L__gpr_num_a1, 11\n" \ +" .equ .L__gpr_num_a2, 12\n" \ +" .equ .L__gpr_num_a3, 13\n" \ +" .equ .L__gpr_num_a4, 14\n" \ +" .equ .L__gpr_num_a5, 15\n" \ +" .equ .L__gpr_num_a6, 16\n" \ +" .equ .L__gpr_num_a7, 17\n" \ +" .equ .L__gpr_num_s2, 18\n" \ +" .equ .L__gpr_num_s3, 19\n" \ +" .equ .L__gpr_num_s4, 20\n" \ +" .equ .L__gpr_num_s5, 21\n" \ +" .equ .L__gpr_num_s6, 22\n" \ +" .equ .L__gpr_num_s7, 23\n" \ +" .equ .L__gpr_num_s8, 24\n" \ +" .equ .L__gpr_num_s9, 25\n" \ +" .equ .L__gpr_num_s10, 26\n" \ +" .equ .L__gpr_num_s11, 27\n" \ +" .equ .L__gpr_num_t3, 28\n" \ +" .equ .L__gpr_num_t4, 29\n" \ +" .equ .L__gpr_num_t5, 30\n" \ +" .equ .L__gpr_num_t6, 31\n" + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_GPR_NUM_H */ diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h index b00f503ec124..0b85e363e778 100644 --- a/arch/riscv/include/asm/kasan.h +++ b/arch/riscv/include/asm/kasan.h @@ -27,13 +27,18 @@ */ #define KASAN_SHADOW_SCALE_SHIFT 3 -#define KASAN_SHADOW_SIZE (UL(1) << ((CONFIG_VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT)) -#define KASAN_SHADOW_START KERN_VIRT_START -#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) +#define KASAN_SHADOW_SIZE (UL(1) << ((VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT)) +/* + * Depending on the size of the virtual address space, the region may not be + * aligned on PGDIR_SIZE, so force its alignment to ease its population. + */ +#define KASAN_SHADOW_START ((KASAN_SHADOW_END - KASAN_SHADOW_SIZE) & PGDIR_MASK) +#define KASAN_SHADOW_END MODULES_LOWEST_VADDR #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) void kasan_init(void); asmlinkage void kasan_early_init(void); +void kasan_swapper_init(void); #endif #endif diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 25ba21f98504..99ef6a120617 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -12,14 +12,12 @@ #include <linux/types.h> #include <linux/kvm.h> #include <linux/kvm_types.h> +#include <asm/csr.h> #include <asm/kvm_vcpu_fp.h> #include <asm/kvm_vcpu_timer.h> -#ifdef CONFIG_64BIT -#define KVM_MAX_VCPUS (1U << 16) -#else -#define KVM_MAX_VCPUS (1U << 9) -#endif +#define KVM_MAX_VCPUS \ + ((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1) #define KVM_HALT_POLL_NS_DEFAULT 500000 @@ -79,13 +77,6 @@ struct kvm_sbi_context { int return_handled; }; -#define KVM_MMU_PAGE_CACHE_NR_OBJS 32 - -struct kvm_mmu_page_cache { - int nobjs; - void *objects[KVM_MMU_PAGE_CACHE_NR_OBJS]; -}; - struct kvm_cpu_trap { unsigned long sepc; unsigned long scause; @@ -195,7 +186,7 @@ struct kvm_vcpu_arch { struct kvm_sbi_context sbi_context; /* Cache pages needed to program page tables with spinlock held */ - struct kvm_mmu_page_cache mmu_page_cache; + struct kvm_mmu_memory_cache mmu_page_cache; /* VCPU power-off state */ bool power_off; @@ -210,7 +201,6 @@ struct kvm_vcpu_arch { static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} #define KVM_ARCH_WANT_MMU_NOTIFIER @@ -223,12 +213,12 @@ void __kvm_riscv_hfence_gvma_all(void); int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write); -void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu); int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm); void kvm_riscv_stage2_free_pgd(struct kvm *kvm); void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu); void kvm_riscv_stage2_mode_detect(void); unsigned long kvm_riscv_stage2_mode(void); +int kvm_riscv_stage2_gpa_bits(void); void kvm_riscv_stage2_vmid_detect(void); unsigned long kvm_riscv_stage2_vmid_bits(void); diff --git a/arch/riscv/include/asm/kvm_types.h b/arch/riscv/include/asm/kvm_types.h index e476b404eb67..e15765f98d7a 100644 --- a/arch/riscv/include/asm/kvm_types.h +++ b/arch/riscv/include/asm/kvm_types.h @@ -2,6 +2,6 @@ #ifndef _ASM_RISCV_KVM_TYPES_H #define _ASM_RISCV_KVM_TYPES_H -#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40 +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 32 #endif /* _ASM_RISCV_KVM_TYPES_H */ diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h new file mode 100644 index 000000000000..76e4e17a3e00 --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/** + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra <atish.patra@wdc.com> + */ + +#ifndef __RISCV_KVM_VCPU_SBI_H__ +#define __RISCV_KVM_VCPU_SBI_H__ + +#define KVM_SBI_IMPID 3 + +#define KVM_SBI_VERSION_MAJOR 0 +#define KVM_SBI_VERSION_MINOR 2 + +struct kvm_vcpu_sbi_extension { + unsigned long extid_start; + unsigned long extid_end; + /** + * SBI extension handler. It can be defined for a given extension or group of + * extension. But it should always return linux error codes rather than SBI + * specific error codes. + */ + int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long *out_val, struct kvm_cpu_trap *utrap, + bool *exit); +}; + +void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); +const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid); + +#endif /* __RISCV_KVM_VCPU_SBI_H__ */ diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index b3e5ff0125fe..004372f8da54 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -31,9 +31,20 @@ * When not using MMU this corresponds to the first free page in * physical memory (aligned on a page boundary). */ +#ifdef CONFIG_64BIT +#ifdef CONFIG_MMU +#define PAGE_OFFSET kernel_map.page_offset +#else #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) - -#define KERN_VIRT_SIZE (-PAGE_OFFSET) +#endif +/* + * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so + * define the PAGE_OFFSET value for SV39. + */ +#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL) +#else +#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) +#endif /* CONFIG_64BIT */ #ifndef __ASSEMBLY__ @@ -86,6 +97,7 @@ extern unsigned long riscv_pfn_base; #endif /* CONFIG_MMU */ struct kernel_mapping { + unsigned long page_offset; unsigned long virt_addr; uintptr_t phys_addr; uintptr_t size; @@ -107,7 +119,7 @@ extern phys_addr_t phys_ram_base; ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size)) #define is_linear_mapping(x) \ - ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < kernel_map.virt_addr)) + ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE)) #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset)) #define kernel_mapping_pa_to_va(y) ({ \ diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index 0af6933a7100..11823004b87a 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -11,6 +11,8 @@ #include <asm/tlb.h> #ifdef CONFIG_MMU +#define __HAVE_ARCH_PUD_ALLOC_ONE +#define __HAVE_ARCH_PUD_FREE #include <asm-generic/pgalloc.h> static inline void pmd_populate_kernel(struct mm_struct *mm, @@ -36,6 +38,44 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); } + +static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) +{ + if (pgtable_l4_enabled) { + unsigned long pfn = virt_to_pfn(pud); + + set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); + } +} + +static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, + pud_t *pud) +{ + if (pgtable_l4_enabled) { + unsigned long pfn = virt_to_pfn(pud); + + set_p4d_safe(p4d, + __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); + } +} + +#define pud_alloc_one pud_alloc_one +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + if (pgtable_l4_enabled) + return __pud_alloc_one(mm, addr); + + return NULL; +} + +#define pud_free pud_free +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + if (pgtable_l4_enabled) + __pud_free(mm, pud); +} + +#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud) #endif /* __PAGETABLE_PMD_FOLDED */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index 228261aa9628..bbbdd66e5e2f 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -8,16 +8,36 @@ #include <linux/const.h> -#define PGDIR_SHIFT 30 +extern bool pgtable_l4_enabled; + +#define PGDIR_SHIFT_L3 30 +#define PGDIR_SHIFT_L4 39 +#define PGDIR_SIZE_L3 (_AC(1, UL) << PGDIR_SHIFT_L3) + +#define PGDIR_SHIFT (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3) /* Size of region mapped by a page global directory */ #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) +/* pud is folded into pgd in case of 3-level page table */ +#define PUD_SHIFT 30 +#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE - 1)) + #define PMD_SHIFT 21 /* Size of region mapped by a page middle directory */ #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE - 1)) +/* Page Upper Directory entry */ +typedef struct { + unsigned long pud; +} pud_t; + +#define pud_val(x) ((x).pud) +#define __pud(x) ((pud_t) { (x) }) +#define PTRS_PER_PUD (PAGE_SIZE / sizeof(pud_t)) + /* Page Middle Directory entry */ typedef struct { unsigned long pmd; @@ -59,6 +79,16 @@ static inline void pud_clear(pud_t *pudp) set_pud(pudp, __pud(0)); } +static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot) +{ + return __pud((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); +} + +static inline unsigned long _pud_pfn(pud_t pud) +{ + return pud_val(pud) >> _PAGE_PFN_SHIFT; +} + static inline pmd_t *pud_pgtable(pud_t pud) { return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT); @@ -69,6 +99,17 @@ static inline struct page *pud_page(pud_t pud) return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT); } +#define mm_pud_folded mm_pud_folded +static inline bool mm_pud_folded(struct mm_struct *mm) +{ + if (pgtable_l4_enabled) + return false; + + return true; +} + +#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) + static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot) { return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); @@ -84,4 +125,69 @@ static inline unsigned long _pmd_pfn(pmd_t pmd) #define pmd_ERROR(e) \ pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pud_ERROR(e) \ + pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) + +static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) +{ + if (pgtable_l4_enabled) + *p4dp = p4d; + else + set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) }); +} + +static inline int p4d_none(p4d_t p4d) +{ + if (pgtable_l4_enabled) + return (p4d_val(p4d) == 0); + + return 0; +} + +static inline int p4d_present(p4d_t p4d) +{ + if (pgtable_l4_enabled) + return (p4d_val(p4d) & _PAGE_PRESENT); + + return 1; +} + +static inline int p4d_bad(p4d_t p4d) +{ + if (pgtable_l4_enabled) + return !p4d_present(p4d); + + return 0; +} + +static inline void p4d_clear(p4d_t *p4d) +{ + if (pgtable_l4_enabled) + set_p4d(p4d, __p4d(0)); +} + +static inline pud_t *p4d_pgtable(p4d_t p4d) +{ + if (pgtable_l4_enabled) + return (pud_t *)pfn_to_virt(p4d_val(p4d) >> _PAGE_PFN_SHIFT); + + return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) }); +} + +static inline struct page *p4d_page(p4d_t p4d) +{ + return pfn_to_page(p4d_val(p4d) >> _PAGE_PFN_SHIFT); +} + +#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) + +#define pud_offset pud_offset +static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) +{ + if (pgtable_l4_enabled) + return p4d_pgtable(*p4d) + pud_index(address); + + return (pud_t *)p4d; +} + #endif /* _ASM_RISCV_PGTABLE_64_H */ diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index 2ee413912926..a6b0c89824c2 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -31,7 +31,7 @@ * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to * distinguish them from swapped out pages */ -#define _PAGE_PROT_NONE _PAGE_READ +#define _PAGE_PROT_NONE _PAGE_GLOBAL #define _PAGE_PFN_SHIFT 10 diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index bf204e7c1f74..e3549e50de95 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -13,6 +13,7 @@ #ifndef CONFIG_MMU #define KERNEL_LINK_ADDR PAGE_OFFSET +#define KERN_VIRT_SIZE (UL(-1)) #else #define ADDRESS_SPACE_END (UL(-1)) @@ -24,8 +25,19 @@ #define KERNEL_LINK_ADDR PAGE_OFFSET #endif +/* Number of entries in the page global directory */ +#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) +/* Number of entries in the page table */ +#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) + +/* + * Half of the kernel address space (half of the entries of the page global + * directory) is for the direct mapping. + */ +#define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2) + #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) -#define VMALLOC_END (PAGE_OFFSET - 1) +#define VMALLOC_END PAGE_OFFSET #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) #define BPF_JIT_REGION_SIZE (SZ_128M) @@ -39,8 +51,10 @@ /* Modules always live before the kernel */ #ifdef CONFIG_64BIT -#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) -#define MODULES_END (PFN_ALIGN((unsigned long)&_start)) +/* This is used to define the end of the KASAN shadow region */ +#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G) +#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) +#define MODULES_END (PFN_ALIGN((unsigned long)&_start)) #endif /* @@ -48,10 +62,16 @@ * struct pages to map half the virtual address space. Then * position vmemmap directly below the VMALLOC region. */ +#ifdef CONFIG_64BIT +#define VA_BITS (pgtable_l4_enabled ? 48 : 39) +#else +#define VA_BITS 32 +#endif + #define VMEMMAP_SHIFT \ - (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) + (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) -#define VMEMMAP_END (VMALLOC_START - 1) +#define VMEMMAP_END VMALLOC_START #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) /* @@ -83,8 +103,7 @@ #ifndef __ASSEMBLY__ -/* Page Upper Directory not used in RISC-V */ -#include <asm-generic/pgtable-nopud.h> +#include <asm-generic/pgtable-nop4d.h> #include <asm/page.h> #include <asm/tlbflush.h> #include <linux/mm_types.h> @@ -107,19 +126,27 @@ #define XIP_FIXUP(addr) (addr) #endif /* CONFIG_XIP_KERNEL */ -#ifdef CONFIG_MMU -/* Number of entries in the page global directory */ -#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) -/* Number of entries in the page table */ -#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) +struct pt_alloc_ops { + pte_t *(*get_pte_virt)(phys_addr_t pa); + phys_addr_t (*alloc_pte)(uintptr_t va); +#ifndef __PAGETABLE_PMD_FOLDED + pmd_t *(*get_pmd_virt)(phys_addr_t pa); + phys_addr_t (*alloc_pmd)(uintptr_t va); + pud_t *(*get_pud_virt)(phys_addr_t pa); + phys_addr_t (*alloc_pud)(uintptr_t va); +#endif +}; + +extern struct pt_alloc_ops pt_ops __initdata; +#ifdef CONFIG_MMU /* Number of PGD entries that a user-mode program can use */ #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) /* Page protection bits */ #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) -#define PAGE_NONE __pgprot(_PAGE_PROT_NONE) +#define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ) #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) @@ -628,11 +655,12 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, * * Format of swap PTE: * bit 0: _PAGE_PRESENT (zero) - * bit 1: _PAGE_PROT_NONE (zero) - * bits 2 to 6: swap type - * bits 7 to XLEN-1: swap offset + * bit 1 to 3: _PAGE_LEAF (zero) + * bit 5: _PAGE_PROT_NONE (zero) + * bits 6 to 10: swap type + * bits 10 to XLEN-1: swap offset */ -#define __SWP_TYPE_SHIFT 2 +#define __SWP_TYPE_SHIFT 6 #define __SWP_TYPE_BITS 5 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) @@ -648,12 +676,17 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION +#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) +#define __swp_entry_to_pmd(swp) __pmd((swp).val) +#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ + /* * In the RV64 Linux scheme, we give the user half of the virtual-address space * and give the kernel the other (upper) half. */ #ifdef CONFIG_64BIT -#define KERN_VIRT_START (-(BIT(CONFIG_VA_BITS)) + TASK_SIZE) +#define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE) #else #define KERN_VIRT_START FIXADDR_START #endif @@ -661,11 +694,22 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, /* * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. * Note that PGDIR_SIZE must evenly divide TASK_SIZE. + * Task size is: + * - 0x9fc00000 (~2.5GB) for RV32. + * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu + * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu + * + * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V + * Instruction Set Manual Volume II: Privileged Architecture" states that + * "load and store effective addresses, which are 64bits, must have bits + * 63–48 all equal to bit 47, or else a page-fault exception will occur." */ #ifdef CONFIG_64BIT -#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) +#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) +#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2) #else -#define TASK_SIZE FIXADDR_START +#define TASK_SIZE FIXADDR_START +#define TASK_SIZE_MIN TASK_SIZE #endif #else /* CONFIG_MMU */ @@ -691,6 +735,8 @@ extern uintptr_t _dtb_early_pa; #define dtb_early_va _dtb_early_va #define dtb_early_pa _dtb_early_pa #endif /* CONFIG_XIP_KERNEL */ +extern u64 satp_mode; +extern bool pgtable_l4_enabled; void paging_init(void); void misc_mem_init(void); diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 0d42693cb65e..d1c37479d828 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -8,6 +8,7 @@ #define _ASM_RISCV_SBI_H #include <linux/types.h> +#include <linux/cpumask.h> #ifdef CONFIG_RISCV_SBI enum sbi_ext_id { @@ -27,6 +28,15 @@ enum sbi_ext_id { SBI_EXT_IPI = 0x735049, SBI_EXT_RFENCE = 0x52464E43, SBI_EXT_HSM = 0x48534D, + SBI_EXT_SRST = 0x53525354, + + /* Experimentals extensions must lie within this range */ + SBI_EXT_EXPERIMENTAL_START = 0x08000000, + SBI_EXT_EXPERIMENTAL_END = 0x08FFFFFF, + + /* Vendor extensions must lie within this range */ + SBI_EXT_VENDOR_START = 0x09000000, + SBI_EXT_VENDOR_END = 0x09FFFFFF, }; enum sbi_ext_base_fid { @@ -70,6 +80,21 @@ enum sbi_hsm_hart_status { SBI_HSM_HART_STATUS_STOP_PENDING, }; +enum sbi_ext_srst_fid { + SBI_EXT_SRST_RESET = 0, +}; + +enum sbi_srst_reset_type { + SBI_SRST_RESET_TYPE_SHUTDOWN = 0, + SBI_SRST_RESET_TYPE_COLD_REBOOT, + SBI_SRST_RESET_TYPE_WARM_REBOOT, +}; + +enum sbi_srst_reset_reason { + SBI_SRST_RESET_REASON_NONE = 0, + SBI_SRST_RESET_REASON_SYS_FAILURE, +}; + #define SBI_SPEC_VERSION_DEFAULT 0x1 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24 #define SBI_SPEC_VERSION_MAJOR_MASK 0x7f @@ -82,6 +107,7 @@ enum sbi_hsm_hart_status { #define SBI_ERR_INVALID_PARAM -3 #define SBI_ERR_DENIED -4 #define SBI_ERR_INVALID_ADDRESS -5 +#define SBI_ERR_ALREADY_AVAILABLE -6 extern unsigned long sbi_spec_version; struct sbiret { @@ -103,27 +129,27 @@ long sbi_get_mimpid(void); void sbi_set_timer(uint64_t stime_value); void sbi_shutdown(void); void sbi_clear_ipi(void); -int sbi_send_ipi(const unsigned long *hart_mask); -int sbi_remote_fence_i(const unsigned long *hart_mask); -int sbi_remote_sfence_vma(const unsigned long *hart_mask, +int sbi_send_ipi(const struct cpumask *cpu_mask); +int sbi_remote_fence_i(const struct cpumask *cpu_mask); +int sbi_remote_sfence_vma(const struct cpumask *cpu_mask, unsigned long start, unsigned long size); -int sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, +int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long asid); -int sbi_remote_hfence_gvma(const unsigned long *hart_mask, +int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask, unsigned long start, unsigned long size); -int sbi_remote_hfence_gvma_vmid(const unsigned long *hart_mask, +int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long vmid); -int sbi_remote_hfence_vvma(const unsigned long *hart_mask, +int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask, unsigned long start, unsigned long size); -int sbi_remote_hfence_vvma_asid(const unsigned long *hart_mask, +int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long asid); @@ -148,9 +174,17 @@ static inline unsigned long sbi_minor_version(void) return sbi_spec_version & SBI_SPEC_VERSION_MINOR_MASK; } +/* Make SBI version */ +static inline unsigned long sbi_mk_version(unsigned long major, + unsigned long minor) +{ + return ((major & SBI_SPEC_VERSION_MAJOR_MASK) << + SBI_SPEC_VERSION_MAJOR_SHIFT) | minor; +} + int sbi_err_map_linux_errno(int err); #else /* CONFIG_RISCV_SBI */ -static inline int sbi_remote_fence_i(const unsigned long *hart_mask) { return -1; } +static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; } static inline void sbi_init(void) {} #endif /* CONFIG_RISCV_SBI */ #endif /* _ASM_RISCV_SBI_H */ diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h index a7d2811f3536..23170c933d73 100644 --- a/arch/riscv/include/asm/smp.h +++ b/arch/riscv/include/asm/smp.h @@ -43,7 +43,6 @@ void arch_send_call_function_ipi_mask(struct cpumask *mask); void arch_send_call_function_single_ipi(int cpu); int riscv_hartid_to_cpuid(int hartid); -void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out); /* Set custom IPI operations */ void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops); @@ -63,8 +62,6 @@ asmlinkage void smp_callin(void); #if defined CONFIG_HOTPLUG_CPU int __cpu_disable(void); void __cpu_die(unsigned int cpu); -void cpu_stop(void); -#else #endif /* CONFIG_HOTPLUG_CPU */ #else @@ -85,13 +82,6 @@ static inline unsigned long cpuid_to_hartid_map(int cpu) return boot_cpu_hartid; } -static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in, - struct cpumask *out) -{ - cpumask_clear(out); - cpumask_set_cpu(boot_cpu_hartid, out); -} - static inline void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) { } diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h index 45a7018a8118..63acaecc3374 100644 --- a/arch/riscv/include/asm/sparsemem.h +++ b/arch/riscv/include/asm/sparsemem.h @@ -4,7 +4,11 @@ #define _ASM_RISCV_SPARSEMEM_H #ifdef CONFIG_SPARSEMEM -#define MAX_PHYSMEM_BITS CONFIG_PA_BITS +#ifdef CONFIG_64BIT +#define MAX_PHYSMEM_BITS 56 +#else +#define MAX_PHYSMEM_BITS 34 +#endif /* CONFIG_64BIT */ #define SECTION_SIZE_BITS 27 #endif /* CONFIG_SPARSEMEM */ diff --git a/arch/riscv/include/asm/spinlock_types.h b/arch/riscv/include/asm/spinlock_types.h index f398e7638dd6..5a35a49505da 100644 --- a/arch/riscv/include/asm/spinlock_types.h +++ b/arch/riscv/include/asm/spinlock_types.h @@ -6,7 +6,7 @@ #ifndef _ASM_RISCV_SPINLOCK_TYPES_H #define _ASM_RISCV_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H # error "please don't include this file directly" #endif diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index f314ff44c48d..c701a5e57a2b 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -8,6 +8,7 @@ #ifndef _ASM_RISCV_UACCESS_H #define _ASM_RISCV_UACCESS_H +#include <asm/asm-extable.h> #include <asm/pgtable.h> /* for TASK_SIZE */ /* @@ -80,25 +81,14 @@ static inline int __access_ok(unsigned long addr, unsigned long size) #define __get_user_asm(insn, x, ptr, err) \ do { \ - uintptr_t __tmp; \ __typeof__(x) __x; \ __asm__ __volatile__ ( \ "1:\n" \ - " " insn " %1, %3\n" \ + " " insn " %1, %2\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ - " .balign 4\n" \ - "3:\n" \ - " li %0, %4\n" \ - " li %1, 0\n" \ - " jump 2b, %2\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .balign " RISCV_SZPTR "\n" \ - " " RISCV_PTR " 1b, 3b\n" \ - " .previous" \ - : "+r" (err), "=&r" (__x), "=r" (__tmp) \ - : "m" (*(ptr)), "i" (-EFAULT)); \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \ + : "+r" (err), "=&r" (__x) \ + : "m" (*(ptr))); \ (x) = __x; \ } while (0) @@ -110,30 +100,18 @@ do { \ do { \ u32 __user *__ptr = (u32 __user *)(ptr); \ u32 __lo, __hi; \ - uintptr_t __tmp; \ __asm__ __volatile__ ( \ "1:\n" \ - " lw %1, %4\n" \ + " lw %1, %3\n" \ "2:\n" \ - " lw %2, %5\n" \ + " lw %2, %4\n" \ "3:\n" \ - " .section .fixup,\"ax\"\n" \ - " .balign 4\n" \ - "4:\n" \ - " li %0, %6\n" \ - " li %1, 0\n" \ - " li %2, 0\n" \ - " jump 3b, %3\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .balign " RISCV_SZPTR "\n" \ - " " RISCV_PTR " 1b, 4b\n" \ - " " RISCV_PTR " 2b, 4b\n" \ - " .previous" \ - : "+r" (err), "=&r" (__lo), "=r" (__hi), \ - "=r" (__tmp) \ - : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \ - "i" (-EFAULT)); \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \ + : "+r" (err), "=&r" (__lo), "=r" (__hi) \ + : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \ + if (err) \ + __hi = 0; \ (x) = (__typeof__(x))((__typeof__((x)-(x)))( \ (((u64)__hi << 32) | __lo))); \ } while (0) @@ -221,24 +199,14 @@ do { \ #define __put_user_asm(insn, x, ptr, err) \ do { \ - uintptr_t __tmp; \ __typeof__(*(ptr)) __x = x; \ __asm__ __volatile__ ( \ "1:\n" \ - " " insn " %z3, %2\n" \ + " " insn " %z2, %1\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ - " .balign 4\n" \ - "3:\n" \ - " li %0, %4\n" \ - " jump 2b, %1\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .balign " RISCV_SZPTR "\n" \ - " " RISCV_PTR " 1b, 3b\n" \ - " .previous" \ - : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \ - : "rJ" (__x), "i" (-EFAULT)); \ + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \ + : "+r" (err), "=m" (*(ptr)) \ + : "rJ" (__x)); \ } while (0) #ifdef CONFIG_64BIT @@ -249,28 +217,18 @@ do { \ do { \ u32 __user *__ptr = (u32 __user *)(ptr); \ u64 __x = (__typeof__((x)-(x)))(x); \ - uintptr_t __tmp; \ __asm__ __volatile__ ( \ "1:\n" \ - " sw %z4, %2\n" \ + " sw %z3, %1\n" \ "2:\n" \ - " sw %z5, %3\n" \ + " sw %z4, %2\n" \ "3:\n" \ - " .section .fixup,\"ax\"\n" \ - " .balign 4\n" \ - "4:\n" \ - " li %0, %6\n" \ - " jump 3b, %1\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .balign " RISCV_SZPTR "\n" \ - " " RISCV_PTR " 1b, 4b\n" \ - " " RISCV_PTR " 2b, 4b\n" \ - " .previous" \ - : "+r" (err), "=r" (__tmp), \ + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \ + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \ + : "+r" (err), \ "=m" (__ptr[__LSW]), \ "=m" (__ptr[__MSW]) \ - : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \ + : "rJ" (__x), "rJ" (__x >> 32)); \ } while (0) #endif /* CONFIG_64BIT */ @@ -388,81 +346,6 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n) __clear_user(to, n) : n; } -/* - * Atomic compare-and-exchange, but with a fixup for userspace faults. Faults - * will set "err" to -EFAULT, while successful accesses return the previous - * value. - */ -#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - __typeof__(err) __err = 0; \ - register unsigned int __rc; \ - __enable_user_access(); \ - switch (size) { \ - case 4: \ - __asm__ __volatile__ ( \ - "0:\n" \ - " lr.w" #scb " %[ret], %[ptr]\n" \ - " bne %[ret], %z[old], 1f\n" \ - " sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \ - " bnez %[rc], 0b\n" \ - "1:\n" \ - ".section .fixup,\"ax\"\n" \ - ".balign 4\n" \ - "2:\n" \ - " li %[err], %[efault]\n" \ - " jump 1b, %[rc]\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - ".balign " RISCV_SZPTR "\n" \ - " " RISCV_PTR " 1b, 2b\n" \ - ".previous\n" \ - : [ret] "=&r" (__ret), \ - [rc] "=&r" (__rc), \ - [ptr] "+A" (*__ptr), \ - [err] "=&r" (__err) \ - : [old] "rJ" (__old), \ - [new] "rJ" (__new), \ - [efault] "i" (-EFAULT)); \ - break; \ - case 8: \ - __asm__ __volatile__ ( \ - "0:\n" \ - " lr.d" #scb " %[ret], %[ptr]\n" \ - " bne %[ret], %z[old], 1f\n" \ - " sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \ - " bnez %[rc], 0b\n" \ - "1:\n" \ - ".section .fixup,\"ax\"\n" \ - ".balign 4\n" \ - "2:\n" \ - " li %[err], %[efault]\n" \ - " jump 1b, %[rc]\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - ".balign " RISCV_SZPTR "\n" \ - " " RISCV_PTR " 1b, 2b\n" \ - ".previous\n" \ - : [ret] "=&r" (__ret), \ - [rc] "=&r" (__rc), \ - [ptr] "+A" (*__ptr), \ - [err] "=&r" (__err) \ - : [old] "rJ" (__old), \ - [new] "rJ" (__new), \ - [efault] "i" (-EFAULT)); \ - break; \ - default: \ - BUILD_BUG(); \ - } \ - __disable_user_access(); \ - (err) = __err; \ - __ret; \ -}) - #define HAVE_GET_KERNEL_NOFAULT #define __get_kernel_nofault(dst, src, type, err_label) \ |