diff options
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/alternative.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/atomic.h | 47 | ||||
-rw-r--r-- | arch/arm64/include/asm/bitops.h | 21 | ||||
-rw-r--r-- | arch/arm64/include/asm/efi.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/hw_breakpoint.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/irq.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/kprobes.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/simd.h | 19 | ||||
-rw-r--r-- | arch/arm64/include/asm/tlb.h | 2 |
10 files changed, 27 insertions, 88 deletions
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index a91933b1e2e6..4b650ec1d7dd 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -28,7 +28,12 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void __init apply_alternatives_all(void); -void apply_alternatives(void *start, size_t length); + +#ifdef CONFIG_MODULES +void apply_alternatives_module(void *start, size_t length); +#else +static inline void apply_alternatives_module(void *start, size_t length) { } +#endif #define ALTINSTR_ENTRY(feature,cb) \ " .word 661b - .\n" /* label */ \ diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index c0235e0ff849..9bca54dda75c 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -40,17 +40,6 @@ #include <asm/cmpxchg.h> -#define ___atomic_add_unless(v, a, u, sfx) \ -({ \ - typeof((v)->counter) c, old; \ - \ - c = atomic##sfx##_read(v); \ - while (c != (u) && \ - (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c; \ - }) - #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) READ_ONCE((v)->counter) @@ -61,21 +50,11 @@ #define atomic_add_return_release atomic_add_return_release #define atomic_add_return atomic_add_return -#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) -#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) -#define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) -#define atomic_inc_return(v) atomic_add_return(1, (v)) - #define atomic_sub_return_relaxed atomic_sub_return_relaxed #define atomic_sub_return_acquire atomic_sub_return_acquire #define atomic_sub_return_release atomic_sub_return_release #define atomic_sub_return atomic_sub_return -#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) -#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) -#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) -#define atomic_dec_return(v) atomic_sub_return(1, (v)) - #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #define atomic_fetch_add_acquire atomic_fetch_add_acquire #define atomic_fetch_add_release atomic_fetch_add_release @@ -119,13 +98,6 @@ cmpxchg_release(&((v)->counter), (old), (new)) #define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new)) -#define atomic_inc(v) atomic_add(1, (v)) -#define atomic_dec(v) atomic_sub(1, (v)) -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) -#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) -#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) -#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) -#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) #define atomic_andnot atomic_andnot /* @@ -140,21 +112,11 @@ #define atomic64_add_return_release atomic64_add_return_release #define atomic64_add_return atomic64_add_return -#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) -#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) -#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1, (v)) - #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #define atomic64_sub_return_acquire atomic64_sub_return_acquire #define atomic64_sub_return_release atomic64_sub_return_release #define atomic64_sub_return atomic64_sub_return -#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) -#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) -#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) - #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire #define atomic64_fetch_add_release atomic64_fetch_add_release @@ -195,16 +157,9 @@ #define atomic64_cmpxchg_release atomic_cmpxchg_release #define atomic64_cmpxchg atomic_cmpxchg -#define atomic64_inc(v) atomic64_add(1, (v)) -#define atomic64_dec(v) atomic64_sub(1, (v)) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) -#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) -#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) -#define atomic64_add_unless(v, a, u) (___atomic_add_unless(v, a, u, 64) != u) #define atomic64_andnot atomic64_andnot -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#define atomic64_dec_if_positive atomic64_dec_if_positive #endif #endif diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h index 9c19594ce7cb..10d536b1af74 100644 --- a/arch/arm64/include/asm/bitops.h +++ b/arch/arm64/include/asm/bitops.h @@ -17,22 +17,11 @@ #define __ASM_BITOPS_H #include <linux/compiler.h> -#include <asm/barrier.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif -/* - * Little endian assembly atomic bitops. - */ -extern void set_bit(int nr, volatile unsigned long *p); -extern void clear_bit(int nr, volatile unsigned long *p); -extern void change_bit(int nr, volatile unsigned long *p); -extern int test_and_set_bit(int nr, volatile unsigned long *p); -extern int test_and_clear_bit(int nr, volatile unsigned long *p); -extern int test_and_change_bit(int nr, volatile unsigned long *p); - #include <asm-generic/bitops/builtin-__ffs.h> #include <asm-generic/bitops/builtin-ffs.h> #include <asm-generic/bitops/builtin-__fls.h> @@ -44,15 +33,11 @@ extern int test_and_change_bit(int nr, volatile unsigned long *p); #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/hweight.h> -#include <asm-generic/bitops/lock.h> +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/le.h> - -/* - * Ext2 is defined to use little-endian byte ordering. - */ -#define ext2_set_bit_atomic(lock, nr, p) test_and_set_bit_le(nr, p) -#define ext2_clear_bit_atomic(lock, nr, p) test_and_clear_bit_le(nr, p) +#include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* __ASM_BITOPS_H */ diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 192d791f1103..7ed320895d1f 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -87,6 +87,9 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, #define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__) #define efi_is_64bit() (true) +#define efi_table_attr(table, attr, instance) \ + ((table##_t *)instance)->attr + #define efi_call_proto(protocol, f, instance, ...) \ ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index 41770766d964..6a53e59ced95 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -119,13 +119,16 @@ static inline void decode_ctrl_reg(u32 reg, struct task_struct; struct notifier_block; +struct perf_event_attr; struct perf_event; struct pmu; extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, int *gen_len, int *gen_type, int *offset); -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index a0fee6985e6a..b2b0c6405eb0 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -8,8 +8,6 @@ struct pt_regs; -extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); - static inline int nr_legacy_irqs(void) { return 0; diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 6deb8d726041..d5a44cf859e9 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h @@ -48,7 +48,6 @@ struct kprobe_ctlblk { unsigned long saved_irqflag; struct prev_kprobe prev_kprobe; struct kprobe_step_ctx ss_ctx; - struct pt_regs jprobe_saved_regs; }; void arch_remove_kprobe(struct kprobe *); diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 9f82d6b53851..1bdeca8918a6 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -224,10 +224,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte) * Only if the new pte is valid and kernel, otherwise TLB maintenance * or update_mmu_cache() have the necessary barriers. */ - if (pte_valid_not_user(pte)) { + if (pte_valid_not_user(pte)) dsb(ishst); - isb(); - } } extern void __sync_icache_dcache(pte_t pteval); @@ -434,7 +432,6 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { WRITE_ONCE(*pmdp, pmd); dsb(ishst); - isb(); } static inline void pmd_clear(pmd_t *pmdp) @@ -485,7 +482,6 @@ static inline void set_pud(pud_t *pudp, pud_t pud) { WRITE_ONCE(*pudp, pud); dsb(ishst); - isb(); } static inline void pud_clear(pud_t *pudp) diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index fa8b3fe932e6..6495cc51246f 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy); static __must_check inline bool may_use_simd(void) { /* - * The raw_cpu_read() is racy if called with preemption enabled. - * This is not a bug: kernel_neon_busy is only set when - * preemption is disabled, so we cannot migrate to another CPU - * while it is set, nor can we migrate to a CPU where it is set. - * So, if we find it clear on some CPU then we're guaranteed to - * find it clear on any CPU we could migrate to. - * - * If we are in between kernel_neon_begin()...kernel_neon_end(), - * the flag will be set, but preemption is also disabled, so we - * can't migrate to another CPU and spuriously see it become - * false. + * kernel_neon_busy is only set while preemption is disabled, + * and is clear whenever preemption is enabled. Since + * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy + * cannot change under our feet -- if it's set we cannot be + * migrated, and if it's clear we cannot be migrated to a CPU + * where it is set. */ return !in_irq() && !irqs_disabled() && !in_nmi() && - !raw_cpu_read(kernel_neon_busy); + !this_cpu_read(kernel_neon_busy); } #else /* ! CONFIG_KERNEL_MODE_NEON */ diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index ffdaea7954bb..0ad1cf233470 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -37,7 +37,7 @@ static inline void __tlb_remove_table(void *_table) static inline void tlb_flush(struct mmu_gather *tlb) { - struct vm_area_struct vma = { .vm_mm = tlb->mm, }; + struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0); /* * The ASID allocator will either invalidate the ASID or mark |