diff options
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r-- | arch/powerpc/include/asm/atomic.h | 44 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/pgtable.h | 30 | ||||
-rw-r--r-- | arch/powerpc/include/asm/btext.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kexec.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_booke_hv_asm.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mm-arch-hooks.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/page.h | 7 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable.h | 14 | ||||
-rw-r--r-- | arch/powerpc/include/asm/ppc-opcode.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/ptrace.h | 29 | ||||
-rw-r--r-- | arch/powerpc/include/asm/reg_booke.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/trace_clock.h | 4 |
13 files changed, 107 insertions, 46 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 52eafaf74054..31c231ea56b7 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -297,24 +297,24 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) #define ATOMIC64_INIT(i) { (i) } -static __inline__ long atomic64_read(const atomic64_t *v) +static __inline__ s64 atomic64_read(const atomic64_t *v) { - long t; + s64 t; __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); return t; } -static __inline__ void atomic64_set(atomic64_t *v, long i) +static __inline__ void atomic64_set(atomic64_t *v, s64 i) { __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); } #define ATOMIC64_OP(op, asm_op) \ -static __inline__ void atomic64_##op(long a, atomic64_t *v) \ +static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \ { \ - long t; \ + s64 t; \ \ __asm__ __volatile__( \ "1: ldarx %0,0,%3 # atomic64_" #op "\n" \ @@ -327,10 +327,10 @@ static __inline__ void atomic64_##op(long a, atomic64_t *v) \ } #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \ -static inline long \ -atomic64_##op##_return_relaxed(long a, atomic64_t *v) \ +static inline s64 \ +atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \ { \ - long t; \ + s64 t; \ \ __asm__ __volatile__( \ "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \ @@ -345,10 +345,10 @@ atomic64_##op##_return_relaxed(long a, atomic64_t *v) \ } #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \ -static inline long \ -atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \ +static inline s64 \ +atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \ { \ - long res, t; \ + s64 res, t; \ \ __asm__ __volatile__( \ "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \ @@ -396,7 +396,7 @@ ATOMIC64_OPS(xor, xor) static __inline__ void atomic64_inc(atomic64_t *v) { - long t; + s64 t; __asm__ __volatile__( "1: ldarx %0,0,%2 # atomic64_inc\n\ @@ -409,9 +409,9 @@ static __inline__ void atomic64_inc(atomic64_t *v) } #define atomic64_inc atomic64_inc -static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v) +static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v) { - long t; + s64 t; __asm__ __volatile__( "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n" @@ -427,7 +427,7 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v) static __inline__ void atomic64_dec(atomic64_t *v) { - long t; + s64 t; __asm__ __volatile__( "1: ldarx %0,0,%2 # atomic64_dec\n\ @@ -440,9 +440,9 @@ static __inline__ void atomic64_dec(atomic64_t *v) } #define atomic64_dec atomic64_dec -static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v) +static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v) { - long t; + s64 t; __asm__ __volatile__( "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n" @@ -463,9 +463,9 @@ static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v) * Atomically test *v and decrement if it is greater than 0. * The function returns the old value of *v minus 1. */ -static __inline__ long atomic64_dec_if_positive(atomic64_t *v) +static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v) { - long t; + s64 t; __asm__ __volatile__( PPC_ATOMIC_ENTRY_BARRIER @@ -502,9 +502,9 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { - long t; + s64 t; __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER @@ -534,7 +534,7 @@ static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) */ static __inline__ int atomic64_inc_not_zero(atomic64_t *v) { - long t1, t2; + s64 t1, t2; __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 7dede2e34b70..ccf00a8b98c6 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -876,6 +876,23 @@ static inline int pmd_present(pmd_t pmd) return false; } +static inline int pmd_is_serializing(pmd_t pmd) +{ + /* + * If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear + * and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate). + * + * This condition may also occur when flushing a pmd while flushing + * it (see ptep_modify_prot_start), so callers must ensure this + * case is fine as well. + */ + if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) == + cpu_to_be64(_PAGE_INVALID)) + return true; + + return false; +} + static inline int pmd_bad(pmd_t pmd) { if (radix_enabled()) @@ -1092,6 +1109,19 @@ static inline int pmd_protnone(pmd_t pmd) #define pmd_access_permitted pmd_access_permitted static inline bool pmd_access_permitted(pmd_t pmd, bool write) { + /* + * pmdp_invalidate sets this combination (which is not caught by + * !pte_present() check in pte_access_permitted), to prevent + * lock-free lookups, as part of the serialize_against_pte_lookup() + * synchronisation. + * + * This also catches the case where the PTE's hardware PRESENT bit is + * cleared while TLB is flushed, which is suboptimal but should not + * be frequent. + */ + if (pmd_is_serializing(pmd)) + return false; + return pte_access_permitted(pmd_pte(pmd), write); } diff --git a/arch/powerpc/include/asm/btext.h b/arch/powerpc/include/asm/btext.h index 3ffad030393c..461b0f193864 100644 --- a/arch/powerpc/include/asm/btext.h +++ b/arch/powerpc/include/asm/btext.h @@ -13,7 +13,11 @@ extern void btext_update_display(unsigned long phys, int width, int height, int depth, int pitch); extern void btext_setup_display(int width, int height, int depth, int pitch, unsigned long address); +#ifdef CONFIG_PPC32 extern void btext_prepare_BAT(void); +#else +static inline void btext_prepare_BAT(void) { } +#endif extern void btext_map(void); extern void btext_unmap(void); diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 4a585cba1787..c68476818753 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -94,6 +94,9 @@ static inline bool kdump_in_progress(void) return crashing_cpu >= 0; } +void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer, + unsigned long start_address) __noreturn; + #ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_elf64_ops; diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h index 931260b59ac6..7487ef582121 100644 --- a/arch/powerpc/include/asm/kvm_booke_hv_asm.h +++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h @@ -1,9 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2010-2011 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. */ #ifndef ASM_KVM_BOOKE_HV_ASM_H diff --git a/arch/powerpc/include/asm/mm-arch-hooks.h b/arch/powerpc/include/asm/mm-arch-hooks.h index f2a2da895897..dce274be824a 100644 --- a/arch/powerpc/include/asm/mm-arch-hooks.h +++ b/arch/powerpc/include/asm/mm-arch-hooks.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Architecture specific mm hooks * * Copyright (C) 2015, IBM Corporation * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef _ASM_POWERPC_MM_ARCH_HOOKS_H diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index b8286a2013b4..0d52f57fca04 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -319,6 +319,13 @@ struct vm_area_struct; #endif /* __ASSEMBLY__ */ #include <asm/slice.h> +/* + * Allow 30-bit DMA for very limited Broadcom wifi chips on many powerbooks. + */ +#ifdef CONFIG_PPC32 +#define ARCH_ZONE_DMA_BITS 30 +#else #define ARCH_ZONE_DMA_BITS 31 +#endif #endif /* _ASM_POWERPC_PAGE_H */ diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 3f53be60fb01..64145751b2fd 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -140,6 +140,20 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p) } #endif +#ifdef CONFIG_PPC64 +#define is_ioremap_addr is_ioremap_addr +static inline bool is_ioremap_addr(const void *x) +{ +#ifdef CONFIG_MMU + unsigned long addr = (unsigned long)x; + + return addr >= IOREMAP_BASE && addr < IOREMAP_END; +#else + return false; +#endif +} +#endif /* CONFIG_PPC64 */ + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 493c5c943acd..2291daf39cd1 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -338,6 +338,7 @@ #define PPC_INST_MADDLD 0x10000033 #define PPC_INST_DIVWU 0x7c000396 #define PPC_INST_DIVD 0x7c0003d2 +#define PPC_INST_DIVDU 0x7c000392 #define PPC_INST_RLWINM 0x54000000 #define PPC_INST_RLWINM_DOT 0x54000001 #define PPC_INST_RLWIMI 0x50000000 diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index ef573fe9873e..a9993e7a443b 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -346,8 +346,6 @@ static inline unsigned long __pack_fe01(unsigned int fpmode) #define spin_cpu_relax() barrier() -#define spin_cpu_yield() spin_cpu_relax() - #define spin_end() HMT_medium() #define spin_until_cond(cond) \ diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index faa5a338ac5a..feee1b21bbd5 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -111,18 +111,33 @@ struct pt_regs #ifndef __ASSEMBLY__ -#define GET_IP(regs) ((regs)->nip) -#define GET_USP(regs) ((regs)->gpr[1]) -#define GET_FP(regs) (0) -#define SET_FP(regs, val) +static inline unsigned long instruction_pointer(struct pt_regs *regs) +{ + return regs->nip; +} + +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->nip = val; +} + +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ + return regs->gpr[1]; +} + +static inline unsigned long frame_pointer(struct pt_regs *regs) +{ + return 0; +} #ifdef CONFIG_SMP extern unsigned long profile_pc(struct pt_regs *regs); -#define profile_pc profile_pc +#else +#define profile_pc(regs) instruction_pointer(regs) #endif -#include <asm-generic/ptrace.h> - #define kernel_stack_pointer(regs) ((regs)->gpr[1]) static inline int is_syscall_success(struct pt_regs *regs) { diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index e382bd6ede84..f26fe482fbca 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Contains register definitions common to the Book E PowerPC * specification. Notice that while the IBM-40x series of CPUs @@ -5,10 +6,6 @@ * before Book E was finalized, and are included here as well. Unfortunately, * they sometimes used different locations than true Book E CPUs did. * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * * Copyright 2009-2010 Freescale Semiconductor, Inc. */ #ifdef __KERNEL__ diff --git a/arch/powerpc/include/asm/trace_clock.h b/arch/powerpc/include/asm/trace_clock.h index cf1ee75ca069..ef70c2f7974d 100644 --- a/arch/powerpc/include/asm/trace_clock.h +++ b/arch/powerpc/include/asm/trace_clock.h @@ -1,7 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. * * Copyright (C) 2015 Naveen N. Rao, IBM Corporation */ |