diff options
Diffstat (limited to 'arch/powerpc/include/asm/book3s')
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/kup.h | 108 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/mmu-hash.h | 84 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/pgtable.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/hash.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/kup.h | 56 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/mmu-hash.h | 8 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/mmu.h | 40 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/pgtable.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/tlbflush-hash.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/tlbflush.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/pgtable.h | 4 |
11 files changed, 186 insertions, 131 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 9f38040f0641..678f9c9d89b6 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -12,50 +12,10 @@ #include <linux/jump_label.h> extern struct static_key_false disable_kuap_key; -extern struct static_key_false disable_kuep_key; - -static __always_inline bool kuap_is_disabled(void) -{ - return !IS_ENABLED(CONFIG_PPC_KUAP) || static_branch_unlikely(&disable_kuap_key); -} static __always_inline bool kuep_is_disabled(void) { - return !IS_ENABLED(CONFIG_PPC_KUEP) || static_branch_unlikely(&disable_kuep_key); -} - -static inline void kuep_lock(void) -{ - if (kuep_is_disabled()) - return; - - update_user_segments(mfsr(0) | SR_NX); - /* - * This isync() shouldn't be necessary as the kernel is not excepted to - * run any instruction in userspace soon after the update of segments, - * but hash based cores (at least G3) seem to exhibit a random - * behaviour when the 'isync' is not there. 603 cores don't have this - * behaviour so don't do the 'isync' as it saves several CPU cycles. - */ - if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) - isync(); /* Context sync required after mtsr() */ -} - -static inline void kuep_unlock(void) -{ - if (kuep_is_disabled()) - return; - - update_user_segments(mfsr(0) & ~SR_NX); - /* - * This isync() shouldn't be necessary as a 'rfi' will soon be executed - * to return to userspace, but hash based cores (at least G3) seem to - * exhibit a random behaviour when the 'isync' is not there. 603 cores - * don't have this behaviour so don't do the 'isync' as it saves several - * CPU cycles. - */ - if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) - isync(); /* Context sync required after mtsr() */ + return !IS_ENABLED(CONFIG_PPC_KUEP); } #ifdef CONFIG_PPC_KUAP @@ -65,6 +25,11 @@ static inline void kuep_unlock(void) #define KUAP_NONE (~0UL) #define KUAP_ALL (~1UL) +static __always_inline bool kuap_is_disabled(void) +{ + return static_branch_unlikely(&disable_kuap_key); +} + static inline void kuap_lock_one(unsigned long addr) { mtsr(mfsr(addr) | SR_KS, addr); @@ -92,7 +57,7 @@ static inline void kuap_unlock_all(void) void kuap_lock_all_ool(void); void kuap_unlock_all_ool(void); -static inline void kuap_lock(unsigned long addr, bool ool) +static inline void kuap_lock_addr(unsigned long addr, bool ool) { if (likely(addr != KUAP_ALL)) kuap_lock_one(addr); @@ -112,33 +77,31 @@ static inline void kuap_unlock(unsigned long addr, bool ool) kuap_unlock_all_ool(); } -static inline void kuap_save_and_lock(struct pt_regs *regs) +static inline void __kuap_lock(void) { - unsigned long kuap = current->thread.kuap; +} - if (kuap_is_disabled()) - return; +static inline void __kuap_save_and_lock(struct pt_regs *regs) +{ + unsigned long kuap = current->thread.kuap; regs->kuap = kuap; if (unlikely(kuap == KUAP_NONE)) return; current->thread.kuap = KUAP_NONE; - kuap_lock(kuap, false); + kuap_lock_addr(kuap, false); } static inline void kuap_user_restore(struct pt_regs *regs) { } -static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) +static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) { - if (kuap_is_disabled()) - return; - if (unlikely(kuap != KUAP_NONE)) { current->thread.kuap = KUAP_NONE; - kuap_lock(kuap, false); + kuap_lock_addr(kuap, false); } if (likely(regs->kuap == KUAP_NONE)) @@ -149,29 +112,18 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) kuap_unlock(regs->kuap, false); } -static inline unsigned long kuap_get_and_assert_locked(void) +static inline unsigned long __kuap_get_and_assert_locked(void) { unsigned long kuap = current->thread.kuap; - if (kuap_is_disabled()) - return KUAP_NONE; - WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE); return kuap; } -static inline void kuap_assert_locked(void) -{ - kuap_get_and_assert_locked(); -} - -static __always_inline void allow_user_access(void __user *to, const void __user *from, - u32 size, unsigned long dir) +static __always_inline void __allow_user_access(void __user *to, const void __user *from, + u32 size, unsigned long dir) { - if (kuap_is_disabled()) - return; - BUILD_BUG_ON(!__builtin_constant_p(dir)); if (!(dir & KUAP_WRITE)) @@ -181,42 +133,33 @@ static __always_inline void allow_user_access(void __user *to, const void __user kuap_unlock_one((__force u32)to); } -static __always_inline void prevent_user_access(unsigned long dir) +static __always_inline void __prevent_user_access(unsigned long dir) { u32 kuap = current->thread.kuap; - if (kuap_is_disabled()) - return; - BUILD_BUG_ON(!__builtin_constant_p(dir)); if (!(dir & KUAP_WRITE)) return; current->thread.kuap = KUAP_NONE; - kuap_lock(kuap, true); + kuap_lock_addr(kuap, true); } -static inline unsigned long prevent_user_access_return(void) +static inline unsigned long __prevent_user_access_return(void) { unsigned long flags = current->thread.kuap; - if (kuap_is_disabled()) - return KUAP_NONE; - if (flags != KUAP_NONE) { current->thread.kuap = KUAP_NONE; - kuap_lock(flags, true); + kuap_lock_addr(flags, true); } return flags; } -static inline void restore_user_access(unsigned long flags) +static inline void __restore_user_access(unsigned long flags) { - if (kuap_is_disabled()) - return; - if (flags != KUAP_NONE) { current->thread.kuap = flags; kuap_unlock(flags, true); @@ -224,13 +167,10 @@ static inline void restore_user_access(unsigned long flags) } static inline bool -bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) +__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { unsigned long kuap = regs->kuap; - if (kuap_is_disabled()) - return false; - if (!is_write || kuap == KUAP_ALL) return false; if (kuap == KUAP_NONE) diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h index f5be185cbdf8..78c6a5fde1d6 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -64,7 +64,82 @@ struct ppc_bat { #define SR_KP 0x20000000 /* User key */ #define SR_KS 0x40000000 /* Supervisor key */ -#ifndef __ASSEMBLY__ +#ifdef __ASSEMBLY__ + +#include <asm/asm-offsets.h> + +.macro uus_addi sr reg1 reg2 imm + .if NUM_USER_SEGMENTS > \sr + addi \reg1,\reg2,\imm + .endif +.endm + +.macro uus_mtsr sr reg1 + .if NUM_USER_SEGMENTS > \sr + mtsr \sr, \reg1 + .endif +.endm + +/* + * This isync() shouldn't be necessary as the kernel is not excepted to run + * any instruction in userspace soon after the update of segments and 'rfi' + * instruction is used to return to userspace, but hash based cores + * (at least G3) seem to exhibit a random behaviour when the 'isync' is not + * there. 603 cores don't have this behaviour so don't do the 'isync' as it + * saves several CPU cycles. + */ +.macro uus_isync +#ifdef CONFIG_PPC_BOOK3S_604 +BEGIN_MMU_FTR_SECTION + isync +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) +#endif +.endm + +.macro update_user_segments_by_4 tmp1 tmp2 tmp3 tmp4 + uus_addi 1, \tmp2, \tmp1, 0x111 + uus_addi 2, \tmp3, \tmp1, 0x222 + uus_addi 3, \tmp4, \tmp1, 0x333 + + uus_mtsr 0, \tmp1 + uus_mtsr 1, \tmp2 + uus_mtsr 2, \tmp3 + uus_mtsr 3, \tmp4 + + uus_addi 4, \tmp1, \tmp1, 0x444 + uus_addi 5, \tmp2, \tmp2, 0x444 + uus_addi 6, \tmp3, \tmp3, 0x444 + uus_addi 7, \tmp4, \tmp4, 0x444 + + uus_mtsr 4, \tmp1 + uus_mtsr 5, \tmp2 + uus_mtsr 6, \tmp3 + uus_mtsr 7, \tmp4 + + uus_addi 8, \tmp1, \tmp1, 0x444 + uus_addi 9, \tmp2, \tmp2, 0x444 + uus_addi 10, \tmp3, \tmp3, 0x444 + uus_addi 11, \tmp4, \tmp4, 0x444 + + uus_mtsr 8, \tmp1 + uus_mtsr 9, \tmp2 + uus_mtsr 10, \tmp3 + uus_mtsr 11, \tmp4 + + uus_addi 12, \tmp1, \tmp1, 0x444 + uus_addi 13, \tmp2, \tmp2, 0x444 + uus_addi 14, \tmp3, \tmp3, 0x444 + uus_addi 15, \tmp4, \tmp4, 0x444 + + uus_mtsr 12, \tmp1 + uus_mtsr 13, \tmp2 + uus_mtsr 14, \tmp3 + uus_mtsr 15, \tmp4 + + uus_isync +.endm + +#else /* * This macro defines the mapping from contexts to VSIDs (virtual @@ -100,9 +175,14 @@ struct hash_pte { typedef struct { unsigned long id; + unsigned long sr0; void __user *vdso; } mm_context_t; +#ifdef CONFIG_PPC_KUEP +#define INIT_MM_CONTEXT(mm) .context.sr0 = SR_NX +#endif + void update_bats(void); static inline void cleanup_cpu_mmu_context(void) { } @@ -143,6 +223,8 @@ static __always_inline void update_user_segments(u32 val) update_user_segment(15, val); } +int __init find_free_bat(void); +unsigned int bat_block_size(unsigned long base, unsigned long top); #endif /* !__ASSEMBLY__ */ /* We happily ignore the smaller BATs on 601, we don't actually use diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 609c80f67194..f8b94f78403f 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -178,6 +178,7 @@ static inline bool pte_user(pte_t pte) #ifndef __ASSEMBLY__ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); +void unmap_kernel_page(unsigned long va); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 674fe0e890dc..a7a0572f3846 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -99,10 +99,6 @@ * Defines the address of the vmemap area, in its own region on * hash table CPUs. */ -#ifdef CONFIG_PPC_MM_SLICES -#define HAVE_ARCH_UNMAPPED_AREA -#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN -#endif /* CONFIG_PPC_MM_SLICES */ /* PTEIDX nibble */ #define _PTEIDX_SECONDARY 0x8 diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h index 170339969b7c..69fcf63eec94 100644 --- a/arch/powerpc/include/asm/book3s/64/kup.h +++ b/arch/powerpc/include/asm/book3s/64/kup.h @@ -229,6 +229,11 @@ static inline u64 current_thread_iamr(void) #ifdef CONFIG_PPC_KUAP +static __always_inline bool kuap_is_disabled(void) +{ + return !mmu_has_feature(MMU_FTR_BOOK3S_KUAP); +} + static inline void kuap_user_restore(struct pt_regs *regs) { bool restore_amr = false, restore_iamr = false; @@ -268,40 +273,38 @@ static inline void kuap_user_restore(struct pt_regs *regs) */ } -static inline void kuap_kernel_restore(struct pt_regs *regs, - unsigned long amr) +static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { - if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { - if (unlikely(regs->amr != amr)) { - isync(); - mtspr(SPRN_AMR, regs->amr); - /* - * No isync required here because we are about to rfi - * back to previous context before any user accesses - * would be made, which is a CSI. - */ - } - } + if (likely(regs->amr == amr)) + return; + + isync(); + mtspr(SPRN_AMR, regs->amr); /* + * No isync required here because we are about to rfi + * back to previous context before any user accesses + * would be made, which is a CSI. + * * No need to restore IAMR when returning to kernel space. */ } -static inline unsigned long kuap_get_and_assert_locked(void) +static inline unsigned long __kuap_get_and_assert_locked(void) { - if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { - unsigned long amr = mfspr(SPRN_AMR); - if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */ - WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED); - return amr; - } - return 0; + unsigned long amr = mfspr(SPRN_AMR); + + if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */ + WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED); + return amr; } -static inline void kuap_assert_locked(void) +/* Do nothing, book3s/64 does that in ASM */ +static inline void __kuap_lock(void) +{ +} + +static inline void __kuap_save_and_lock(struct pt_regs *regs) { - if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) - WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED); } /* @@ -339,11 +342,8 @@ static inline void set_kuap(unsigned long value) isync(); } -static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address, - bool is_write) +static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { - if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) - return false; /* * For radix this will be a storage protection fault (DSISR_PROTFAULT). * For hash this will be a key fault (DSISR_KEYFAULT) diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 3004f3323144..21f780942911 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -523,8 +523,14 @@ void slb_save_contents(struct slb_entry *slb_ptr); void slb_dump_contents(struct slb_entry *slb_ptr); extern void slb_vmalloc_update(void); -extern void slb_set_size(u16 size); void preload_new_slb_context(unsigned long start, unsigned long sp); + +#ifdef CONFIG_PPC_64S_HASH_MMU +void slb_set_size(u16 size); +#else +static inline void slb_set_size(u16 size) { } +#endif + #endif /* __ASSEMBLY__ */ /* diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index c02f42d1031e..006cbec70ffe 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -4,6 +4,12 @@ #include <asm/page.h> +#ifdef CONFIG_HUGETLB_PAGE +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#endif +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + #ifndef __ASSEMBLY__ /* * Page size definition @@ -62,6 +68,9 @@ extern struct patb_entry *partition_tb; #define PRTS_MASK 0x1f /* process table size field */ #define PRTB_MASK 0x0ffffffffffff000UL +/* Number of supported LPID bits */ +extern unsigned int mmu_lpid_bits; + /* Number of supported PID bits */ extern unsigned int mmu_pid_bits; @@ -76,10 +85,8 @@ extern unsigned long __ro_after_init radix_mem_block_size; #define PRTB_SIZE_SHIFT (mmu_pid_bits + 4) #define PRTB_ENTRIES (1ul << mmu_pid_bits) -/* - * Power9 currently only support 64K partition table size. - */ -#define PATB_SIZE_SHIFT 16 +#define PATB_SIZE_SHIFT (mmu_lpid_bits + 4) +#define PATB_ENTRIES (1ul << mmu_lpid_bits) typedef unsigned long mm_context_id_t; struct spinlock; @@ -98,7 +105,9 @@ typedef struct { * from EA and new context ids to build the new VAs. */ mm_context_id_t id; +#ifdef CONFIG_PPC_64S_HASH_MMU mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE]; +#endif }; /* Number of bits in the mm_cpumask */ @@ -110,7 +119,9 @@ typedef struct { /* Number of user space windows opened in process mm_context */ atomic_t vas_windows; +#ifdef CONFIG_PPC_64S_HASH_MMU struct hash_mm_context *hash_context; +#endif void __user *vdso; /* @@ -133,6 +144,7 @@ typedef struct { #endif } mm_context_t; +#ifdef CONFIG_PPC_64S_HASH_MMU static inline u16 mm_ctx_user_psize(mm_context_t *ctx) { return ctx->hash_context->user_psize; @@ -190,11 +202,18 @@ static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx) /* * The current system page and segment sizes */ -extern int mmu_linear_psize; extern int mmu_virtual_psize; extern int mmu_vmalloc_psize; -extern int mmu_vmemmap_psize; extern int mmu_io_psize; +#else /* CONFIG_PPC_64S_HASH_MMU */ +#ifdef CONFIG_PPC_64K_PAGES +#define mmu_virtual_psize MMU_PAGE_64K +#else +#define mmu_virtual_psize MMU_PAGE_4K +#endif +#endif +extern int mmu_linear_psize; +extern int mmu_vmemmap_psize; /* MMU initialization */ void mmu_early_init_devtree(void); @@ -233,12 +252,13 @@ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, * know which translations we will pick. Hence go with hash * restrictions. */ - return hash__setup_initial_memory_limit(first_memblock_base, - first_memblock_size); + if (!early_radix_enabled()) + hash__setup_initial_memory_limit(first_memblock_base, + first_memblock_size); } #ifdef CONFIG_PPC_PSERIES -extern void radix_init_pseries(void); +void __init radix_init_pseries(void); #else static inline void radix_init_pseries(void) { } #endif @@ -255,6 +275,7 @@ static inline void radix_init_pseries(void) { } void cleanup_cpu_mmu_context(void); #endif +#ifdef CONFIG_PPC_64S_HASH_MMU static inline int get_user_context(mm_context_t *ctx, unsigned long ea) { int index = ea >> MAX_EA_BITS_PER_CONTEXT; @@ -274,6 +295,7 @@ static inline unsigned long get_user_vsid(mm_context_t *ctx, return get_vsid(context, ea, ssize); } +#endif #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 33e073d6b0c4..875730d5af40 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1082,6 +1082,8 @@ static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t p return hash__map_kernel_page(ea, pa, prot); } +void unmap_kernel_page(unsigned long va); + static inline int __meminit vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h index 3b95769739c7..8b762f282190 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h @@ -112,8 +112,14 @@ static inline void hash__flush_tlb_kernel_range(unsigned long start, struct mmu_gather; extern void hash__tlb_flush(struct mmu_gather *tlb); +void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr); + +#ifdef CONFIG_PPC_64S_HASH_MMU /* Private function for use by PCI IO mapping code */ extern void __flush_hash_table_range(unsigned long start, unsigned long end); extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr); +#else +static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { } +#endif #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */ diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index 215973b4cb26..d2e80f178b6d 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -14,7 +14,6 @@ enum { TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */ }; -#ifdef CONFIG_PPC_NATIVE static inline void tlbiel_all(void) { /* @@ -30,9 +29,6 @@ static inline void tlbiel_all(void) else hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL); } -#else -static inline void tlbiel_all(void) { BUG(); } -#endif static inline void tlbiel_all_lpid(bool radix) { diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h index ad130e15a126..e8269434ecbe 100644 --- a/arch/powerpc/include/asm/book3s/pgtable.h +++ b/arch/powerpc/include/asm/book3s/pgtable.h @@ -25,6 +25,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot); #define __HAVE_PHYS_MEM_ACCESS_PROT +#if defined(CONFIG_PPC32) || defined(CONFIG_PPC_64S_HASH_MMU) /* * This gets called at the end of handling a page fault, when * the kernel has put a new PTE into the page table for the process. @@ -35,6 +36,9 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, * waiting for the inevitable extra hash-table miss exception. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); +#else +static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} +#endif #endif /* __ASSEMBLY__ */ #endif |