diff options
Diffstat (limited to 'arch/powerpc/include/asm/book3s/32')
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/hash.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/mmu-hash.h | 15 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/pgalloc.h | 44 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/pgtable.h | 46 |
4 files changed, 43 insertions, 63 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h index f2892c7ab73e..2a0a467d2985 100644 --- a/arch/powerpc/include/asm/book3s/32/hash.h +++ b/arch/powerpc/include/asm/book3s/32/hash.h @@ -26,6 +26,7 @@ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ #define _PAGE_DIRTY 0x080 /* C: page changed */ #define _PAGE_ACCESSED 0x100 /* R: page referenced */ +#define _PAGE_EXEC 0x200 /* software: exec allowed */ #define _PAGE_RW 0x400 /* software: user write access allowed */ #define _PAGE_SPECIAL 0x800 /* software: Special page */ diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h index e38c91388c40..0c261ba2c826 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ #define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ + /* * 32-bit hash table MMU support */ @@ -9,6 +10,8 @@ * BATs */ +#include <asm/page.h> + /* Block size masks */ #define BL_128K 0x000 #define BL_256K 0x001 @@ -34,14 +37,20 @@ #define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \ ((x & 0x0000000e00000000ULL) >> 24) | \ ((x & 0x0000000100000000ULL) >> 30))) +#define PHYS_BAT_ADDR(x) (((u64)(x) & 0x00000000fffe0000ULL) | \ + (((u64)(x) << 24) & 0x0000000e00000000ULL) | \ + (((u64)(x) << 30) & 0x0000000100000000ULL)) #else #define BAT_PHYS_ADDR(x) (x) +#define PHYS_BAT_ADDR(x) ((x) & 0xfffe0000) #endif struct ppc_bat { u32 batu; u32 batl; }; + +typedef pte_t *pgtable_t; #endif /* !__ASSEMBLY__ */ /* @@ -83,6 +92,12 @@ typedef struct { unsigned long vdso_base; } mm_context_t; +/* patch sites */ +extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2; +extern s32 patch__hash_page_B, patch__hash_page_C; +extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2; +extern s32 patch__flush_hash_B; + #endif /* !__ASSEMBLY__ */ /* We happily ignore the smaller BATs on 601, we don't actually use diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index 82e44b1a00ae..3633502e102c 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -25,10 +25,7 @@ extern void __bad_pte(pmd_t *pmd); extern struct kmem_cache *pgtable_cache[]; -#define PGT_CACHE(shift) ({ \ - BUG_ON(!(shift)); \ - pgtable_cache[(shift) - 1]; \ - }) +#define PGT_CACHE(shift) pgtable_cache[shift] static inline pgd_t *pgd_alloc(struct mm_struct *mm) { @@ -50,8 +47,6 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) #define __pmd_free_tlb(tlb,x,a) do { } while (0) /* #define pgd_populate(mm, pmd, pte) BUG() */ -#ifndef CONFIG_BOOKE - static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *pte) { @@ -61,46 +56,31 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pte_page) { - *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT); -} - -#define pmd_pgtable(pmd) pmd_page(pmd) -#else - -static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, - pte_t *pte) -{ - *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT); -} - -static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, - pgtable_t pte_page) -{ - *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT); + *pmdp = __pmd(__pa(pte_page) | _PMD_PRESENT); } -#define pmd_pgtable(pmd) pmd_page(pmd) -#endif +#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) -extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); -extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); +extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); +extern pgtable_t pte_alloc_one(struct mm_struct *mm); +void pte_frag_destroy(void *pte_frag); +pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel); +void pte_fragment_free(unsigned long *table, int kernel); static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - free_page((unsigned long)pte); + pte_fragment_free((unsigned long *)pte, 1); } static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) { - pgtable_page_dtor(ptepage); - __free_page(ptepage); + pte_fragment_free((unsigned long *)ptepage, 0); } static inline void pgtable_free(void *table, unsigned index_size) { if (!index_size) { - pgtable_page_dtor(virt_to_page(table)); - free_page((unsigned long)table); + pte_fragment_free((unsigned long *)table, 0); } else { BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); kmem_cache_free(PGT_CACHE(index_size), table); @@ -138,6 +118,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { - pgtable_free_tlb(tlb, page_address(table), 0); + pgtable_free_tlb(tlb, table, 0); } #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */ diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index c21d33704633..49d76adb9bc5 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -10,9 +10,9 @@ /* And here we include common definitions */ #define _PAGE_KERNEL_RO 0 -#define _PAGE_KERNEL_ROX 0 +#define _PAGE_KERNEL_ROX (_PAGE_EXEC) #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) -#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW) +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) #define _PAGE_HPTEFLAGS _PAGE_HASHPTE @@ -66,11 +66,11 @@ static inline bool pte_user(pte_t pte) */ #define PAGE_NONE __pgprot(_PAGE_BASE) #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) /* Permission masks used for kernel mappings */ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) @@ -318,7 +318,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, int psize) { unsigned long set = pte_val(entry) & - (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); pte_update(ptep, 0, set); @@ -328,24 +328,10 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) -/* - * Note that on Book E processors, the pmd contains the kernel virtual - * (lowmem) address of the pte page. The physical address is less useful - * because everything runs with translation enabled (even the TLB miss - * handler). On everything else the pmd contains the physical address - * of the pte page. -- paulus - */ -#ifndef CONFIG_BOOKE #define pmd_page_vaddr(pmd) \ - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) + ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_page(pmd) \ pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) -#else -#define pmd_page_vaddr(pmd) \ - ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) -#define pmd_page(pmd) \ - pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) -#endif /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) @@ -360,7 +346,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define pte_offset_kernel(dir, addr) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) + ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \ + (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr)) #define pte_unmap(pte) kunmap_atomic(pte) /* @@ -384,7 +371,7 @@ static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } -static inline bool pte_exec(pte_t pte) { return true; } +static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } static inline int pte_present(pte_t pte) { @@ -451,7 +438,7 @@ static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_exprotect(pte_t pte) { - return pte; + return __pte(pte_val(pte) & ~_PAGE_EXEC); } static inline pte_t pte_mkclean(pte_t pte) @@ -466,7 +453,7 @@ static inline pte_t pte_mkold(pte_t pte) static inline pte_t pte_mkexec(pte_t pte) { - return pte; + return __pte(pte_val(pte) | _PAGE_EXEC); } static inline pte_t pte_mkpte(pte_t pte) @@ -524,7 +511,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, int percpu) { -#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) +#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the * helper pte_update() which does an atomic update. We need to do that * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a @@ -537,7 +524,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, else pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); -#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) +#elif defined(CONFIG_PTE_64BIT) /* Second case is 32-bit with 64-bit PTE. In this case, we * can just store as long as we do the two halves in the right order * with a barrier in between. This is possible because we take care, @@ -560,7 +547,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) : "r" (pte) : "memory"); -#elif defined(CONFIG_PPC_STD_MMU_32) +#else /* Third case is 32-bit hash table in UP mode, we need to preserve * the _PAGE_HASHPTE bit since we may not have invalidated the previous * translation in the hash yet (done in a subsequent flush_tlb_xxx()) @@ -568,9 +555,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, */ *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | (pte_val(pte) & ~_PAGE_HASHPTE)); - -#else -#error "Not supported " #endif } |