diff options
30 files changed, 793 insertions, 389 deletions
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 17ea3fecddaa..899a72570282 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -70,8 +70,12 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A57 | #1319537 | ARM64_ERRATUM_1319367 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A72 | #853709 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A72 | #1319367 | ARM64_ERRATUM_1319367 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 | @@ -88,6 +92,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1349291 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ @@ -107,6 +113,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | Cavium | ThunderX2 SMMUv3| #126 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| Cavium | ThunderX2 Core | #219 | CAVIUM_TX2_ERRATUM_219 | ++----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | +----------------+-----------------+-----------------+-----------------------------+ diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0ffb8596b8a1..2901d6656c45 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -540,6 +540,16 @@ config ARM64_ERRATUM_1286807 invalidated has been observed by other observers. The workaround repeats the TLBI+DSB operation. +config ARM64_ERRATUM_1319367 + bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation" + default y + help + This option adds work arounds for ARM Cortex-A57 erratum 1319537 + and A72 erratum 1319367 + + Cortex-A57 and A72 cores could end-up with corrupted TLBs by + speculating an AT instruction during a guest context switch. + If unsure, say Y. config ARM64_ERRATUM_1463225 @@ -560,6 +570,22 @@ config ARM64_ERRATUM_1463225 If unsure, say Y. +config ARM64_ERRATUM_1542419 + bool "Neoverse-N1: workaround mis-ordering of instruction fetches" + default y + help + This option adds a workaround for ARM Neoverse-N1 erratum + 1542419. + + Affected Neoverse-N1 cores could execute a stale instruction when + modified by another CPU. The workaround depends on a firmware + counterpart. + + Workaround the issue by hiding the DIC feature from EL0. This + forces user-space to perform cache maintenance. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y @@ -618,6 +644,23 @@ config CAVIUM_ERRATUM_30115 If unsure, say Y. +config CAVIUM_TX2_ERRATUM_219 + bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails" + default y + help + On Cavium ThunderX2, a load, store or prefetch instruction between a + TTBR update and the corresponding context synchronizing operation can + cause a spurious Data Abort to be delivered to any hardware thread in + the CPU core. + + Work around the issue by avoiding the problematic code sequence and + trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The + trap handler performs the corresponding register access, skips the + instruction and ensures context synchronization by virtue of the + exception return. + + If unsure, say Y. + config QCOM_FALKOR_ERRATUM_1003 bool "Falkor E1003: Incorrect translation due to ASID change" default y diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index f74909ba29bd..a70575edae8e 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -74,14 +74,4 @@ alternative_if ARM64_ALT_PAN_NOT_UAO SET_PSTATE_PAN(0) alternative_else_nop_endif .endm - -/* - * Remove the address tag from a virtual address, if present. - */ - .macro clear_address_tag, dst, addr - tst \addr, #(1 << 55) - bic \dst, \addr, #(0xff << 56) - csel \dst, \dst, \addr, eq - .endm - #endif diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 43da6dd29592..806e9dc2a852 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -11,6 +11,7 @@ #define CTR_L1IP_MASK 3 #define CTR_DMINLINE_SHIFT 16 #define CTR_IMINLINE_SHIFT 0 +#define CTR_IMINLINE_MASK 0xf #define CTR_ERG_SHIFT 20 #define CTR_CWG_SHIFT 24 #define CTR_CWG_MASK 15 @@ -18,7 +19,7 @@ #define CTR_DIC_SHIFT 29 #define CTR_CACHE_MINLINE_MASK \ - (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT) + (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT) #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index f19fe4b9acc4..b92683871119 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -52,7 +52,11 @@ #define ARM64_HAS_IRQ_PRIO_MASKING 42 #define ARM64_HAS_DCPODP 43 #define ARM64_WORKAROUND_1463225 44 +#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45 +#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46 +#define ARM64_WORKAROUND_1542419 47 +#define ARM64_WORKAROUND_1319367 48 -#define ARM64_NCAPS 45 +#define ARM64_NCAPS 49 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 063c964af705..9207cd5aa39e 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -9,6 +9,7 @@ #include <asm/arch_gicv3.h> #include <asm/cpufeature.h> +#include <asm/ptrace.h> #define DAIF_PROCCTX 0 #define DAIF_PROCCTX_NOIRQ PSR_I_BIT @@ -109,4 +110,19 @@ static inline void local_daif_restore(unsigned long flags) trace_hardirqs_off(); } +/* + * Called by synchronous exception handlers to restore the DAIF bits that were + * modified by taking an exception. + */ +static inline void local_daif_inherit(struct pt_regs *regs) +{ + unsigned long flags = regs->pstate & DAIF_MASK; + + /* + * We can't use local_daif_restore(regs->pstate) here as + * system_has_prio_mask_debugging() won't restore the I bit if it can + * use the pmr instead. + */ + write_sysreg(flags, daif); +} #endif diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index a17393ff6677..4d5f3b5f50cd 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -8,14 +8,15 @@ #define __ASM_EXCEPTION_H #include <asm/esr.h> +#include <asm/kprobes.h> +#include <asm/ptrace.h> #include <linux/interrupt.h> -#define __exception __attribute__((section(".exception.text"))) #ifdef CONFIG_FUNCTION_GRAPH_TRACER #define __exception_irq_entry __irq_entry #else -#define __exception_irq_entry __exception +#define __exception_irq_entry __kprobes #endif static inline u32 disr_to_esr(u64 disr) @@ -31,5 +32,22 @@ static inline u32 disr_to_esr(u64 disr) } asmlinkage void enter_from_user_mode(void); +void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); +void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); +void do_undefinstr(struct pt_regs *regs); +asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr); +void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, + struct pt_regs *regs); +void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs); +void do_sve_acc(unsigned int esr, struct pt_regs *regs); +void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs); +void do_sysinstr(unsigned int esr, struct pt_regs *regs); +void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); +void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr); +void do_cp15instr(unsigned int esr, struct pt_regs *regs); +void el0_svc_handler(struct pt_regs *regs); +void el0_svc_compat_handler(struct pt_regs *regs); +void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr, + struct pt_regs *regs); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index b61b50bf68b1..c23c47360664 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -215,12 +215,18 @@ static inline unsigned long kaslr_offset(void) * up with a tagged userland pointer. Clear the tag to get a sane pointer to * pass on to access_ok(), for instance. */ -#define untagged_addr(addr) \ +#define __untagged_addr(addr) \ ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) +#define untagged_addr(addr) ({ \ + u64 __addr = (__force u64)addr; \ + __addr &= __untagged_addr(__addr); \ + (__force __typeof__(addr))__addr; \ +}) + #ifdef CONFIG_KASAN_SW_TAGS #define __tag_shifted(tag) ((u64)(tag) << 56) -#define __tag_reset(addr) untagged_addr(addr) +#define __tag_reset(addr) __untagged_addr(addr) #define __tag_get(addr) (__u8)((u64)(addr) >> 56) #else #define __tag_shifted(tag) 0UL diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 7576df00eb50..8330810f699e 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -876,9 +876,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) -#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END) -#define kc_offset_to_vaddr(o) ((o) | PAGE_END) - #ifdef CONFIG_ARM64_PA_BITS_52 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) #else diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 5623685c7d13..8899d26f73ff 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -26,10 +26,12 @@ #include <linux/init.h> #include <linux/stddef.h> #include <linux/string.h> +#include <linux/thread_info.h> #include <asm/alternative.h> #include <asm/cpufeature.h> #include <asm/hw_breakpoint.h> +#include <asm/kasan.h> #include <asm/lse.h> #include <asm/pgtable-hwdef.h> #include <asm/pointer_auth.h> @@ -214,6 +216,18 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, regs->sp = sp; } +static inline bool is_ttbr0_addr(unsigned long addr) +{ + /* entry assembly clears tags for TTBR0 addrs */ + return addr < TASK_SIZE; +} + +static inline bool is_ttbr1_addr(unsigned long addr) +{ + /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ + return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; +} + #ifdef CONFIG_COMPAT static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 972d196c7714..6e919fafb43d 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -212,7 +212,7 @@ #define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) -#define SYS_PAR_EL1_F BIT(1) +#define SYS_PAR_EL1_F BIT(0) #define SYS_PAR_EL1_FST GENMASK(6, 1) /*** Statistical Profiling Extension ***/ diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 59690613ac31..cee5928e1b7d 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -42,16 +42,6 @@ static inline int __in_irqentry_text(unsigned long ptr) ptr < (unsigned long)&__irqentry_text_end; } -static inline int in_exception_text(unsigned long ptr) -{ - int in; - - in = ptr >= (unsigned long)&__exception_text_start && - ptr < (unsigned long)&__exception_text_end; - - return in ? : __in_irqentry_text(ptr); -} - static inline int in_entry_text(unsigned long ptr) { return ptr >= (unsigned long)&__entry_text_start && diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 478491f07b4f..fc6488660f64 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -13,9 +13,9 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE) # Object file lists. obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ - entry-fpsimd.o process.o ptrace.o setup.o signal.o \ - sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o cpu_ops.o insn.o \ + entry-common.o entry-fpsimd.o process.o ptrace.o \ + setup.o signal.o sys.o stacktrace.o time.o traps.o \ + io.o vdso.o hyp-stub.o psci.o cpu_ops.o insn.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ smp.o smp_spin_table.o topology.o smccc-call.o \ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index f593f4cffc0d..e2c4ca1e636e 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -12,6 +12,7 @@ #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/cpufeature.h> +#include <asm/smp_plat.h> static bool __maybe_unused is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) @@ -87,13 +88,21 @@ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, } static void -cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) +cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) { u64 mask = arm64_ftr_reg_ctrel0.strict_mask; + bool enable_uct_trap = false; /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ if ((read_cpuid_cachetype() & mask) != (arm64_ftr_reg_ctrel0.sys_val & mask)) + enable_uct_trap = true; + + /* ... or if the system is affected by an erratum */ + if (cap->capability == ARM64_WORKAROUND_1542419) + enable_uct_trap = true; + + if (enable_uct_trap) sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); } @@ -623,9 +632,45 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) return (need_wa > 0); } -#ifdef CONFIG_HARDEN_EL2_VECTORS +static const __maybe_unused struct midr_range tx2_family_cpus[] = { + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), + {}, +}; + +static bool __maybe_unused +needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, + int scope) +{ + int i; + + if (!is_affected_midr_range_list(entry, scope) || + !is_hyp_mode_available()) + return false; + + for_each_possible_cpu(i) { + if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) + return true; + } + + return false; +} + +static bool __maybe_unused +has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, + int scope) +{ + u32 midr = read_cpuid_id(); + bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT); + const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return is_midr_in_range(midr, &range) && has_dic; +} + +#if defined(CONFIG_HARDEN_EL2_VECTORS) || defined(CONFIG_ARM64_ERRATUM_1319367) -static const struct midr_range arm64_harden_el2_vectors[] = { +static const struct midr_range ca57_a72[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), {}, @@ -819,7 +864,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .desc = "EL2 vector hardening", .capability = ARM64_HARDEN_EL2_VECTORS, - ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), + ERRATA_MIDR_RANGE_LIST(ca57_a72), }, #endif { @@ -852,6 +897,36 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .matches = has_cortex_a76_erratum_1463225, }, #endif +#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 + { + .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", + .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, + ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), + .matches = needs_tx2_tvm_workaround, + }, + { + .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", + .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, + ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1542419 + { + /* we depend on the firmware portion for correctness */ + .desc = "ARM erratum 1542419 (kernel portion)", + .capability = ARM64_WORKAROUND_1542419, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_neoverse_n1_erratum_1542419, + .cpu_enable = cpu_enable_trap_ctr_access, + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1319367 + { + .desc = "ARM erratum 1319367", + .capability = ARM64_WORKAROUND_1319367, + ERRATA_MIDR_RANGE_LIST(ca57_a72), + }, +#endif { } }; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index cabebf1a7976..80f459ad0190 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -176,11 +176,16 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0), ARM64_FTR_END, }; diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c new file mode 100644 index 000000000000..5dce5e56995a --- /dev/null +++ b/arch/arm64/kernel/entry-common.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Exception handling code + * + * Copyright (C) 2019 ARM Ltd. + */ + +#include <linux/context_tracking.h> +#include <linux/ptrace.h> +#include <linux/thread_info.h> + +#include <asm/cpufeature.h> +#include <asm/daifflags.h> +#include <asm/esr.h> +#include <asm/exception.h> +#include <asm/kprobes.h> +#include <asm/mmu.h> +#include <asm/sysreg.h> + +static void notrace el1_abort(struct pt_regs *regs, unsigned long esr) +{ + unsigned long far = read_sysreg(far_el1); + + local_daif_inherit(regs); + far = untagged_addr(far); + do_mem_abort(far, esr, regs); +} +NOKPROBE_SYMBOL(el1_abort); + +static void notrace el1_pc(struct pt_regs *regs, unsigned long esr) +{ + unsigned long far = read_sysreg(far_el1); + + local_daif_inherit(regs); + do_sp_pc_abort(far, esr, regs); +} +NOKPROBE_SYMBOL(el1_pc); + +static void el1_undef(struct pt_regs *regs) +{ + local_daif_inherit(regs); + do_undefinstr(regs); +} +NOKPROBE_SYMBOL(el1_undef); + +static void el1_inv(struct pt_regs *regs, unsigned long esr) +{ + local_daif_inherit(regs); + bad_mode(regs, 0, esr); +} +NOKPROBE_SYMBOL(el1_inv); + +static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr) +{ + unsigned long far = read_sysreg(far_el1); + + /* + * The CPU masked interrupts, and we are leaving them masked during + * do_debug_exception(). Update PMR as if we had called + * local_mask_daif(). + */ + if (system_uses_irq_prio_masking()) + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); + + do_debug_exception(far, esr, regs); +} +NOKPROBE_SYMBOL(el1_dbg); + +asmlinkage void notrace el1_sync_handler(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + + switch (ESR_ELx_EC(esr)) { + case ESR_ELx_EC_DABT_CUR: + case ESR_ELx_EC_IABT_CUR: + el1_abort(regs, esr); + break; + /* + * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a + * recursive exception when trying to push the initial pt_regs. + */ + case ESR_ELx_EC_PC_ALIGN: + el1_pc(regs, esr); + break; + case ESR_ELx_EC_SYS64: + case ESR_ELx_EC_UNKNOWN: + el1_undef(regs); + break; + case ESR_ELx_EC_BREAKPT_CUR: + case ESR_ELx_EC_SOFTSTP_CUR: + case ESR_ELx_EC_WATCHPT_CUR: + case ESR_ELx_EC_BRK64: + el1_dbg(regs, esr); + break; + default: + el1_inv(regs, esr); + }; +} +NOKPROBE_SYMBOL(el1_sync_handler); + +static void notrace el0_da(struct pt_regs *regs, unsigned long esr) +{ + unsigned long far = read_sysreg(far_el1); + + user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX); + far = untagged_addr(far); + do_mem_abort(far, esr, regs); +} +NOKPROBE_SYMBOL(el0_da); + +static void notrace el0_ia(struct pt_regs *regs, unsigned long esr) +{ + unsigned long far = read_sysreg(far_el1); + + /* + * We've taken an instruction abort from userspace and not yet + * re-enabled IRQs. If the address is a kernel address, apply + * BP hardening prior to enabling IRQs and pre-emption. + */ + if (!is_ttbr0_addr(far)) + arm64_apply_bp_hardening(); + + user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX); + do_mem_abort(far, esr, regs); +} +NOKPROBE_SYMBOL(el0_ia); + +static void notrace el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr) +{ + user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX); + do_fpsimd_acc(esr, regs); +} +NOKPROBE_SYMBOL(el0_fpsimd_acc); + +static void notrace el0_sve_acc(struct pt_regs *regs, unsigned long esr) +{ + user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX); + do_sve_acc(esr, regs); +} +NOKPROBE_SYMBOL(el0_sve_acc); + +static void notrace el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr) +{ + user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX); + do_fpsimd_exc(esr, regs); +} +NOKPROBE_SYMBOL(el0_fpsimd_exc); + +static void notrace el0_sys(struct pt_regs *regs, unsigned long esr) +{ + user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX); + do_sysinstr(esr, regs); +} +NOKPROBE_SYMBOL(el0_sys); + +static void notrace el0_pc(struct pt_regs *regs, unsigned long esr) +{ + unsigned long far = read_sysreg(far_el1); + + if (!is_ttbr0_addr(instruction_pointer(regs))) + arm64_apply_bp_hardening(); + + user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX); + do_sp_pc_abort(far, esr, regs); +} +NOKPROBE_SYMBOL(el0_pc); + +static void notrace el0_sp(struct pt_regs *regs, unsigned long esr) +{ + user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX_NOIRQ); + do_sp_pc_abort(regs->sp, esr, regs); +} +NOKPROBE_SYMBOL(el0_sp); + +static void notrace el0_undef(struct pt_regs *regs) +{ + user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX); + do_undefinstr(regs); +} +NOKPROBE_SYMBOL(el0_undef); + +static void notrace el0_inv(struct pt_regs *regs, unsigned long esr) +{ + user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX); + bad_el0_sync(regs, 0, esr); +} +NOKPROBE_SYMBOL(el0_inv); + +static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr) +{ + /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */ + unsigned long far = read_sysreg(far_el1); + + if (system_uses_irq_prio_masking()) + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); + + user_exit_irqoff(); + do_debug_exception(far, esr, regs); + local_daif_restore(DAIF_PROCCTX_NOIRQ); +} +NOKPROBE_SYMBOL(el0_dbg); + +static void notrace el0_svc(struct pt_regs *regs) +{ + if (system_uses_irq_prio_masking()) + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); + + el0_svc_handler(regs); +} +NOKPROBE_SYMBOL(el0_svc); + +asmlinkage void notrace el0_sync_handler(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + + switch (ESR_ELx_EC(esr)) { + case ESR_ELx_EC_SVC64: + el0_svc(regs); + break; + case ESR_ELx_EC_DABT_LOW: + el0_da(regs, esr); + break; + case ESR_ELx_EC_IABT_LOW: + el0_ia(regs, esr); + break; + case ESR_ELx_EC_FP_ASIMD: + el0_fpsimd_acc(regs, esr); + break; + case ESR_ELx_EC_SVE: + el0_sve_acc(regs, esr); + break; + case ESR_ELx_EC_FP_EXC64: + el0_fpsimd_exc(regs, esr); + break; + case ESR_ELx_EC_SYS64: + case ESR_ELx_EC_WFx: + el0_sys(regs, esr); + break; + case ESR_ELx_EC_SP_ALIGN: + el0_sp(regs, esr); + break; + case ESR_ELx_EC_PC_ALIGN: + el0_pc(regs, esr); + break; + case ESR_ELx_EC_UNKNOWN: + el0_undef(regs); + break; + case ESR_ELx_EC_BREAKPT_LOW: + case ESR_ELx_EC_SOFTSTP_LOW: + case ESR_ELx_EC_WATCHPT_LOW: + case ESR_ELx_EC_BRK64: + el0_dbg(regs, esr); + break; + default: + el0_inv(regs, esr); + } +} +NOKPROBE_SYMBOL(el0_sync_handler); + +#ifdef CONFIG_COMPAT +static void notrace el0_cp15(struct pt_regs *regs, unsigned long esr) +{ + user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX); + do_cp15instr(esr, regs); +} +NOKPROBE_SYMBOL(el0_cp15); + +static void notrace el0_svc_compat(struct pt_regs *regs) +{ + if (system_uses_irq_prio_masking()) + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); + + el0_svc_compat_handler(regs); +} +NOKPROBE_SYMBOL(el0_svc_compat); + +asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + + switch (ESR_ELx_EC(esr)) { + case ESR_ELx_EC_SVC32: + el0_svc_compat(regs); + break; + case ESR_ELx_EC_DABT_LOW: + el0_da(regs, esr); + break; + case ESR_ELx_EC_IABT_LOW: + el0_ia(regs, esr); + break; + case ESR_ELx_EC_FP_ASIMD: + el0_fpsimd_acc(regs, esr); + break; + case ESR_ELx_EC_FP_EXC32: + el0_fpsimd_exc(regs, esr); + break; + case ESR_ELx_EC_PC_ALIGN: + el0_pc(regs, esr); + break; + case ESR_ELx_EC_UNKNOWN: + case ESR_ELx_EC_CP14_MR: + case ESR_ELx_EC_CP14_LS: + case ESR_ELx_EC_CP14_64: + el0_undef(regs); + break; + case ESR_ELx_EC_CP15_32: + case ESR_ELx_EC_CP15_64: + el0_cp15(regs, esr); + break; + case ESR_ELx_EC_BREAKPT_LOW: + case ESR_ELx_EC_SOFTSTP_LOW: + case ESR_ELx_EC_WATCHPT_LOW: + case ESR_ELx_EC_BKPT32: + el0_dbg(regs, esr); + break; + default: + el0_inv(regs, esr); + } +} +NOKPROBE_SYMBOL(el0_sync_compat_handler); +#endif /* CONFIG_COMPAT */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e304fe04b098..d440a2a51729 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -578,76 +578,9 @@ ENDPROC(el1_error_invalid) .align 6 el1_sync: kernel_entry 1 - mrs x1, esr_el1 // read the syndrome register - lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class - cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 - b.eq el1_da - cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1 - b.eq el1_ia - cmp x24, #ESR_ELx_EC_SYS64 // configurable trap - b.eq el1_undef - cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el1_pc - cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 - b.eq el1_undef - cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 - b.ge el1_dbg - b el1_inv - -el1_ia: - /* - * Fall through to the Data abort case - */ -el1_da: - /* - * Data abort handling - */ - mrs x3, far_el1 - inherit_daif pstate=x23, tmp=x2 - clear_address_tag x0, x3 - mov x2, sp // struct pt_regs - bl do_mem_abort - - kernel_exit 1 -el1_pc: - /* - * PC alignment exception handling. We don't handle SP alignment faults, - * since we will have hit a recursive exception when trying to push the - * initial pt_regs. - */ - mrs x0, far_el1 - inherit_daif pstate=x23, tmp=x2 - mov x2, sp - bl do_sp_pc_abort - ASM_BUG() -el1_undef: - /* - * Undefined instruction - */ - inherit_daif pstate=x23, tmp=x2 mov x0, sp - bl do_undefinstr + bl el1_sync_handler kernel_exit 1 -el1_dbg: - /* - * Debug exception handling - */ - cmp x24, #ESR_ELx_EC_BRK64 // if BRK64 - cinc x24, x24, eq // set bit '0' - tbz x24, #0, el1_inv // EL1 only - gic_prio_kentry_setup tmp=x3 - mrs x0, far_el1 - mov x2, sp // struct pt_regs - bl do_debug_exception - kernel_exit 1 -el1_inv: - // TODO: add support for undefined instructions in kernel mode - inherit_daif pstate=x23, tmp=x2 - mov x0, sp - mov x2, x1 - mov x1, #BAD_SYNC - bl bad_mode - ASM_BUG() ENDPROC(el1_sync) .align 6 @@ -680,7 +613,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING orr x24, x24, x0 alternative_else_nop_endif cbnz x24, 1f // preempt count != 0 || NMI return path - bl preempt_schedule_irq // irq en/disable is done inside + bl arm64_preempt_schedule_irq // irq en/disable is done inside 1: #endif @@ -714,71 +647,18 @@ ENDPROC(el1_irq) .align 6 el0_sync: kernel_entry 0 - mrs x25, esr_el1 // read the syndrome register - lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class - cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state - b.eq el0_svc - cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 - b.eq el0_da - cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 - b.eq el0_ia - cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access - b.eq el0_fpsimd_acc - cmp x24, #ESR_ELx_EC_SVE // SVE access - b.eq el0_sve_acc - cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception - b.eq el0_fpsimd_exc - cmp x24, #ESR_ELx_EC_SYS64 // configurable trap - ccmp x24, #ESR_ELx_EC_WFx, #4, ne - b.eq el0_sys - cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception - b.eq el0_sp - cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el0_pc - cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 - b.eq el0_undef - cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 - b.ge el0_dbg - b el0_inv + mov x0, sp + bl el0_sync_handler + b ret_to_user #ifdef CONFIG_COMPAT .align 6 el0_sync_compat: kernel_entry 0, 32 - mrs x25, esr_el1 // read the syndrome register - lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class - cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state - b.eq el0_svc_compat - cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 - b.eq el0_da - cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 - b.eq el0_ia - cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access - b.eq el0_fpsimd_acc - cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception - b.eq el0_fpsimd_exc - cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el0_pc - cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 - b.eq el0_undef - cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap - b.eq el0_cp15 - cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap - b.eq el0_cp15 - cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap - b.eq el0_undef - cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap - b.eq el0_undef - cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap - b.eq el0_undef - cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 - b.ge el0_dbg - b el0_inv -el0_svc_compat: - gic_prio_kentry_setup tmp=x1 mov x0, sp - bl el0_svc_compat_handler + bl el0_sync_compat_handler b ret_to_user +ENDPROC(el0_sync) .align 6 el0_irq_compat: @@ -788,140 +668,8 @@ el0_irq_compat: el0_error_compat: kernel_entry 0, 32 b el0_error_naked - -el0_cp15: - /* - * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions - */ - ct_user_exit_irqoff - enable_daif - mov x0, x25 - mov x1, sp - bl do_cp15instr - b ret_to_user #endif -el0_da: - /* - * Data abort handling - */ - mrs x26, far_el1 - ct_user_exit_irqoff - enable_daif - clear_address_tag x0, x26 - mov x1, x25 - mov x2, sp - bl do_mem_abort - b ret_to_user -el0_ia: - /* - * Instruction abort handling - */ - mrs x26, far_el1 - gic_prio_kentry_setup tmp=x0 - ct_user_exit_irqoff - enable_da_f -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif - mov x0, x26 - mov x1, x25 - mov x2, sp - bl do_el0_ia_bp_hardening - b ret_to_user -el0_fpsimd_acc: - /* - * Floating Point or Advanced SIMD access - */ - ct_user_exit_irqoff - enable_daif - mov x0, x25 - mov x1, sp - bl do_fpsimd_acc - b ret_to_user -el0_sve_acc: - /* - * Scalable Vector Extension access - */ - ct_user_exit_irqoff - enable_daif - mov x0, x25 - mov x1, sp - bl do_sve_acc - b ret_to_user -el0_fpsimd_exc: - /* - * Floating Point, Advanced SIMD or SVE exception - */ - ct_user_exit_irqoff - enable_daif - mov x0, x25 - mov x1, sp - bl do_fpsimd_exc - b ret_to_user -el0_sp: - ldr x26, [sp, #S_SP] - b el0_sp_pc -el0_pc: - mrs x26, far_el1 -el0_sp_pc: - /* - * Stack or PC alignment exception handling - */ - gic_prio_kentry_setup tmp=x0 - ct_user_exit_irqoff - enable_da_f -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif - mov x0, x26 - mov x1, x25 - mov x2, sp - bl do_sp_pc_abort - b ret_to_user -el0_undef: - /* - * Undefined instruction - */ - ct_user_exit_irqoff - enable_daif - mov x0, sp - bl do_undefinstr - b ret_to_user -el0_sys: - /* - * System instructions, for trapped cache maintenance instructions - */ - ct_user_exit_irqoff - enable_daif - mov x0, x25 - mov x1, sp - bl do_sysinstr - b ret_to_user -el0_dbg: - /* - * Debug exception handling - */ - tbnz x24, #0, el0_inv // EL0 only - mrs x24, far_el1 - gic_prio_kentry_setup tmp=x3 - ct_user_exit_irqoff - mov x0, x24 - mov x1, x25 - mov x2, sp - bl do_debug_exception - enable_da_f - b ret_to_user -el0_inv: - ct_user_exit_irqoff - enable_daif - mov x0, sp - mov x1, #BAD_SYNC - mov x2, x25 - bl bad_el0_sync - b ret_to_user -ENDPROC(el0_sync) - .align 6 el0_irq: kernel_entry 0 @@ -999,17 +747,6 @@ finish_ret_to_user: kernel_exit 0 ENDPROC(ret_to_user) -/* - * SVC handler. - */ - .align 6 -el0_svc: - gic_prio_kentry_setup tmp=x1 - mov x0, sp - bl el0_svc_handler - b ret_to_user -ENDPROC(el0_svc) - .popsection // .entry.text #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 @@ -1071,7 +808,9 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 #else ldr x30, =vectors #endif +alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM prfm plil1strm, [x30, #(1b - tramp_vectors)] +alternative_else_nop_endif msr vbar_el1, x30 add x30, x30, #(1b - tramp_vectors) isb diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 37d3912cfe06..3eb338f14386 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -920,7 +920,7 @@ void fpsimd_release_task(struct task_struct *dead_task) * would have disabled the SVE access trap for userspace during * ret_to_user, making an SVE access trap impossible in that case. */ -asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) +void do_sve_acc(unsigned int esr, struct pt_regs *regs) { /* Even if we chose not to use SVE, the hardware could still trap: */ if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { @@ -947,7 +947,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) /* * Trapped FP/ASIMD access. */ -asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) +void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) { /* TODO: implement lazy context saving/restoring */ WARN_ON(1); @@ -956,7 +956,7 @@ asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) /* * Raise a SIGFPE for the current process. */ -asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) +void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) { unsigned int si_code = FPE_FLTUNK; diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index e0a7fce0e01c..a96b2921d22c 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -201,6 +201,7 @@ static int create_safe_exec_page(void *src_start, size_t length, gfp_t mask) { int rc = 0; + pgd_t *trans_pgd; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; @@ -215,7 +216,13 @@ static int create_safe_exec_page(void *src_start, size_t length, memcpy((void *)dst, src_start, length); __flush_icache_range(dst, dst + length); - pgdp = pgd_offset_raw(allocator(mask), dst_addr); + trans_pgd = allocator(mask); + if (!trans_pgd) { + rc = -ENOMEM; + goto out; + } + + pgdp = pgd_offset_raw(trans_pgd, dst_addr); if (pgd_none(READ_ONCE(*pgdp))) { pudp = allocator(mask); if (!pudp) { diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index c4452827419b..d1c95dcf1d78 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -455,10 +455,6 @@ int __init arch_populate_kprobe_blacklist(void) (unsigned long)__irqentry_text_end); if (ret) return ret; - ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start, - (unsigned long)__exception_text_end); - if (ret) - return ret; ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start, (unsigned long)__idmap_text_end); if (ret) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 1fb2819fc048..71f788cd2b18 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -17,6 +17,7 @@ #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/kernel.h> +#include <linux/lockdep.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/sysctl.h> @@ -44,6 +45,7 @@ #include <asm/alternative.h> #include <asm/arch_gicv3.h> #include <asm/compat.h> +#include <asm/cpufeature.h> #include <asm/cacheflush.h> #include <asm/exec.h> #include <asm/fpsimd.h> @@ -631,3 +633,19 @@ static int __init tagged_addr_init(void) core_initcall(tagged_addr_init); #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ + +asmlinkage void __sched arm64_preempt_schedule_irq(void) +{ + lockdep_assert_irqs_disabled(); + + /* + * Preempting a task from an IRQ means we leave copies of PSTATE + * on the stack. cpufeature's enable calls may modify PSTATE, but + * resuming one of these preempted tasks would undo those changes. + * + * Only allow a task to be preempted once cpufeatures have been + * enabled. + */ + if (static_branch_likely(&arm64_const_caps_ready)) + preempt_schedule_irq(); +} diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index f1cb64959427..3c18c2454089 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -8,6 +8,7 @@ */ #include <linux/compat.h> +#include <linux/cpufeature.h> #include <linux/personality.h> #include <linux/sched.h> #include <linux/sched/signal.h> @@ -17,6 +18,7 @@ #include <asm/cacheflush.h> #include <asm/system_misc.h> +#include <asm/tlbflush.h> #include <asm/unistd.h> static long @@ -30,6 +32,15 @@ __do_compat_cache_op(unsigned long start, unsigned long end) if (fatal_signal_pending(current)) return 0; + if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { + /* + * The workaround requires an inner-shareable tlbi. + * We pick the reserved-ASID to minimise the impact. + */ + __tlbi(aside1is, __TLBI_VADDR(0, 0)); + dsb(ish); + } + ret = __flush_cache_user_range(start, start + chunk); if (ret) return ret; diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 871c739f060a..9a9d98a443fc 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -154,14 +154,14 @@ static inline void sve_user_discard(void) sve_user_disable(); } -asmlinkage void el0_svc_handler(struct pt_regs *regs) +void el0_svc_handler(struct pt_regs *regs) { sve_user_discard(); el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table); } #ifdef CONFIG_COMPAT -asmlinkage void el0_svc_compat_handler(struct pt_regs *regs) +void el0_svc_compat_handler(struct pt_regs *regs) { el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls, compat_sys_call_table); diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 34739e80211b..73caf35c2262 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -35,6 +35,7 @@ #include <asm/debug-monitors.h> #include <asm/esr.h> #include <asm/insn.h> +#include <asm/kprobes.h> #include <asm/traps.h> #include <asm/smp.h> #include <asm/stack_pointer.h> @@ -393,7 +394,7 @@ void arm64_notify_segfault(unsigned long addr) force_signal_inject(SIGSEGV, code, addr); } -asmlinkage void __exception do_undefinstr(struct pt_regs *regs) +void do_undefinstr(struct pt_regs *regs) { /* check for AArch32 breakpoint instructions */ if (!aarch32_break_handler(regs)) @@ -405,6 +406,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) BUG_ON(!user_mode(regs)); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); } +NOKPROBE_SYMBOL(do_undefinstr); #define __user_cache_maint(insn, address, res) \ if (address >= user_addr_max()) { \ @@ -470,6 +472,15 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) int rt = ESR_ELx_SYS64_ISS_RT(esr); unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); + if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { + /* Hide DIC so that we can trap the unnecessary maintenance...*/ + val &= ~BIT(CTR_DIC_SHIFT); + + /* ... and fake IminLine to reduce the number of traps. */ + val &= ~CTR_IMINLINE_MASK; + val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK; + } + pt_regs_write_reg(regs, rt, val); arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); @@ -667,7 +678,7 @@ static const struct sys64_hook cp15_64_hooks[] = { {}, }; -asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs) +void do_cp15instr(unsigned int esr, struct pt_regs *regs) { const struct sys64_hook *hook, *hook_base; @@ -705,9 +716,10 @@ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs) */ do_undefinstr(regs); } +NOKPROBE_SYMBOL(do_cp15instr); #endif -asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) +void do_sysinstr(unsigned int esr, struct pt_regs *regs) { const struct sys64_hook *hook; @@ -724,6 +736,7 @@ asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) */ do_undefinstr(regs); } +NOKPROBE_SYMBOL(do_sysinstr); static const char *esr_class_str[] = { [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", @@ -793,7 +806,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) * bad_el0_sync handles unexpected, but potentially recoverable synchronous * exceptions taken from EL0. Unlike bad_mode, this returns. */ -asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) +void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) { void __user *pc = (void __user *)instruction_pointer(regs); diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index aa76f7259668..009057517bdd 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -111,9 +111,6 @@ SECTIONS } .text : { /* Real text segment */ _stext = .; /* Text and read-only data */ - __exception_text_start = .; - *(.exception.text) - __exception_text_end = .; IRQENTRY_TEXT SOFTIRQENTRY_TEXT ENTRY_TEXT diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 3d3815020e36..c99d5e034744 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -118,12 +118,29 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) } write_sysreg(val, cptr_el2); + + if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) { + struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; + + isb(); + /* + * At this stage, and thanks to the above isb(), S2 is + * configured and enabled. We can now restore the guest's S1 + * configuration: SCTLR, and only then TCR. + */ + write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); + isb(); + write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); + } } static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) { u64 hcr = vcpu->arch.hcr_el2; + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) + hcr |= HCR_TVM; + write_sysreg(hcr, hcr_el2); if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) @@ -156,6 +173,23 @@ static void __hyp_text __deactivate_traps_nvhe(void) { u64 mdcr_el2 = read_sysreg(mdcr_el2); + if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) { + u64 val; + + /* + * Set the TCR and SCTLR registers in the exact opposite + * sequence as __activate_traps_nvhe (first prevent walks, + * then force the MMU on). A generous sprinkling of isb() + * ensure that things happen in this exact order. + */ + val = read_sysreg_el1(SYS_TCR); + write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR); + isb(); + val = read_sysreg_el1(SYS_SCTLR); + write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR); + isb(); + } + __deactivate_traps_common(); mdcr_el2 &= MDCR_EL2_HPMN_MASK; @@ -174,8 +208,10 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) * the crucial bit is "On taking a vSError interrupt, * HCR_EL2.VSE is cleared to 0." */ - if (vcpu->arch.hcr_el2 & HCR_VSE) - vcpu->arch.hcr_el2 = read_sysreg(hcr_el2); + if (vcpu->arch.hcr_el2 & HCR_VSE) { + vcpu->arch.hcr_el2 &= ~HCR_VSE; + vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; + } if (has_vhe()) deactivate_traps_vhe(); @@ -380,6 +416,61 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) return true; } +static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu) +{ + u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu)); + int rt = kvm_vcpu_sys_get_rt(vcpu); + u64 val = vcpu_get_reg(vcpu, rt); + + /* + * The normal sysreg handling code expects to see the traps, + * let's not do anything here. + */ + if (vcpu->arch.hcr_el2 & HCR_TVM) + return false; + + switch (sysreg) { + case SYS_SCTLR_EL1: + write_sysreg_el1(val, SYS_SCTLR); + break; + case SYS_TTBR0_EL1: + write_sysreg_el1(val, SYS_TTBR0); + break; + case SYS_TTBR1_EL1: + write_sysreg_el1(val, SYS_TTBR1); + break; + case SYS_TCR_EL1: + write_sysreg_el1(val, SYS_TCR); + break; + case SYS_ESR_EL1: + write_sysreg_el1(val, SYS_ESR); + break; + case SYS_FAR_EL1: + write_sysreg_el1(val, SYS_FAR); + break; + case SYS_AFSR0_EL1: + write_sysreg_el1(val, SYS_AFSR0); + break; + case SYS_AFSR1_EL1: + write_sysreg_el1(val, SYS_AFSR1); + break; + case SYS_MAIR_EL1: + write_sysreg_el1(val, SYS_MAIR); + break; + case SYS_AMAIR_EL1: + write_sysreg_el1(val, SYS_AMAIR); + break; + case SYS_CONTEXTIDR_EL1: + write_sysreg_el1(val, SYS_CONTEXTIDR); + break; + default: + return false; + } + + __kvm_skip_instr(vcpu); + return true; +} + /* * Return true when we were able to fixup the guest exit and should return to * the guest, false when we should restore the host state and return to the @@ -399,6 +490,11 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) if (*exit_code != ARM_EXCEPTION_TRAP) goto exit; + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && + kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 && + handle_tx2_tvm(vcpu)) + return true; + /* * We trap the first access to the FP/SIMD to save the host context * and restore the guest context lazily. @@ -605,18 +701,23 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) __sysreg_save_state_nvhe(host_ctxt); - __activate_vm(kern_hyp_va(vcpu->kvm)); - __activate_traps(vcpu); - - __hyp_vgic_restore_state(vcpu); - __timer_enable_traps(vcpu); - /* * We must restore the 32-bit state before the sysregs, thanks * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). + * + * Also, and in order to be able to deal with erratum #1319537 (A57) + * and #1319367 (A72), we must ensure that all VM-related sysreg are + * restored before we enable S2 translation. */ __sysreg32_restore_state(vcpu); __sysreg_restore_state_nvhe(guest_ctxt); + + __activate_vm(kern_hyp_va(vcpu->kvm)); + __activate_traps(vcpu); + + __hyp_vgic_restore_state(vcpu); + __timer_enable_traps(vcpu); + __debug_switch_to_guest(vcpu); __set_guest_arch_workaround_state(vcpu); diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 7ddbc849b580..22b8128d19f6 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -117,12 +117,26 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) { write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); - write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); + + if (!cpus_have_const_cap(ARM64_WORKAROUND_1319367)) { + write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); + write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); + } else if (!ctxt->__hyp_running_vcpu) { + /* + * Must only be done for guest registers, hence the context + * test. We're coming from the host, so SCTLR.M is already + * set. Pairs with __activate_traps_nvhe(). + */ + write_sysreg_el1((ctxt->sys_regs[TCR_EL1] | + TCR_EPD1_MASK | TCR_EPD0_MASK), + SYS_TCR); + isb(); + } + write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR); write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0); write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1); - write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); write_sysreg_el1(ctxt->sys_regs[ESR_EL1], SYS_ESR); write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], SYS_AFSR0); write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], SYS_AFSR1); @@ -135,6 +149,23 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); + if (cpus_have_const_cap(ARM64_WORKAROUND_1319367) && + ctxt->__hyp_running_vcpu) { + /* + * Must only be done for host registers, hence the context + * test. Pairs with __deactivate_traps_nvhe(). + */ + isb(); + /* + * At this stage, and thanks to the above isb(), S2 is + * deconfigured and disabled. We can now restore the host's + * S1 configuration: SCTLR, and only then TCR. + */ + write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); + isb(); + write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); + } + write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); write_sysreg_el1(ctxt->gp_regs.elr_el1, SYS_ELR); write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR); diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index eb0efc5557f3..c2bc17ca6430 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -63,6 +63,22 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, struct tlb_inv_context *cxt) { + if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) { + u64 val; + + /* + * For CPUs that are affected by ARM 1319367, we need to + * avoid a host Stage-1 walk while we have the guest's + * VMID set in the VTTBR in order to invalidate TLBs. + * We're guaranteed that the S1 MMU is enabled, so we can + * simply set the EPD bits to avoid any further TLB fill. + */ + val = cxt->tcr = read_sysreg_el1(SYS_TCR); + val |= TCR_EPD1_MASK | TCR_EPD0_MASK; + write_sysreg_el1(val, SYS_TCR); + isb(); + } + __load_guest_stage2(kvm); isb(); } @@ -100,6 +116,13 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, struct tlb_inv_context *cxt) { write_sysreg(0, vttbr_el2); + + if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) { + /* Ensure write of the host VMID */ + isb(); + /* Restore the host's TCR_EL1 */ + write_sysreg_el1(cxt->tcr, SYS_TCR); + } } static void __hyp_text __tlb_switch_to_host(struct kvm *kvm, diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 855f2a7954e6..7ae3a5bb8a53 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -32,7 +32,8 @@ #include <asm/daifflags.h> #include <asm/debug-monitors.h> #include <asm/esr.h> -#include <asm/kasan.h> +#include <asm/kprobes.h> +#include <asm/processor.h> #include <asm/sysreg.h> #include <asm/system_misc.h> #include <asm/pgtable.h> @@ -101,18 +102,6 @@ static void mem_abort_decode(unsigned int esr) data_abort_decode(esr); } -static inline bool is_ttbr0_addr(unsigned long addr) -{ - /* entry assembly clears tags for TTBR0 addrs */ - return addr < TASK_SIZE; -} - -static inline bool is_ttbr1_addr(unsigned long addr) -{ - /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ - return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; -} - static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm) { /* Either init_pg_dir or swapper_pg_dir */ @@ -268,8 +257,12 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, par = read_sysreg(par_el1); local_irq_restore(flags); + /* + * If we now have a valid translation, treat the translation fault as + * spurious. + */ if (!(par & SYS_PAR_EL1_F)) - return false; + return true; /* * If we got a different type of fault from the AT instruction, @@ -732,8 +725,7 @@ static const struct fault_info fault_info[] = { { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, }; -asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, - struct pt_regs *regs) +void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_fault_info(esr); @@ -749,43 +741,21 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, arm64_notify_die(inf->name, regs, inf->sig, inf->code, (void __user *)addr, esr); } +NOKPROBE_SYMBOL(do_mem_abort); -asmlinkage void __exception do_el0_irq_bp_hardening(void) +void do_el0_irq_bp_hardening(void) { /* PC has already been checked in entry.S */ arm64_apply_bp_hardening(); } +NOKPROBE_SYMBOL(do_el0_irq_bp_hardening); -asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, - unsigned int esr, - struct pt_regs *regs) -{ - /* - * We've taken an instruction abort from userspace and not yet - * re-enabled IRQs. If the address is a kernel address, apply - * BP hardening prior to enabling IRQs and pre-emption. - */ - if (!is_ttbr0_addr(addr)) - arm64_apply_bp_hardening(); - - local_daif_restore(DAIF_PROCCTX); - do_mem_abort(addr, esr, regs); -} - - -asmlinkage void __exception do_sp_pc_abort(unsigned long addr, - unsigned int esr, - struct pt_regs *regs) +void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - if (user_mode(regs)) { - if (!is_ttbr0_addr(instruction_pointer(regs))) - arm64_apply_bp_hardening(); - local_daif_restore(DAIF_PROCCTX); - } - arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, (void __user *)addr, esr); } +NOKPROBE_SYMBOL(do_sp_pc_abort); int __init early_brk64(unsigned long addr, unsigned int esr, struct pt_regs *regs); @@ -868,8 +838,7 @@ NOKPROBE_SYMBOL(debug_exception_exit); #ifdef CONFIG_ARM64_ERRATUM_1463225 DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); -static int __exception -cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) { if (user_mode(regs)) return 0; @@ -888,16 +857,15 @@ cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) return 1; } #else -static int __exception -cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) { return 0; } #endif /* CONFIG_ARM64_ERRATUM_1463225 */ +NOKPROBE_SYMBOL(cortex_a76_erratum_1463225_debug_handler); -asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint, - unsigned int esr, - struct pt_regs *regs) +void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, + struct pt_regs *regs) { const struct fault_info *inf = esr_to_debug_fault_info(esr); unsigned long pc = instruction_pointer(regs); diff --git a/include/linux/sched.h b/include/linux/sched.h index 2c2e56bd8913..67a1d86981a9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -223,6 +223,7 @@ extern long schedule_timeout_uninterruptible(long timeout); extern long schedule_timeout_idle(long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); +asmlinkage void preempt_schedule_irq(void); extern int __must_check io_schedule_prepare(void); extern void io_schedule_finish(int token); |