diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 4 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/fault-armv.c | 5 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 66 | ||||
-rw-r--r-- | arch/arm/mm/fault.h | 4 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 1 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/nommu.c | 1 | ||||
-rw-r--r-- | arch/arm/mm/tcm.h | 17 |
9 files changed, 32 insertions, 70 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index be183ed1232d..c164cde50243 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -712,7 +712,7 @@ config ARM_VIRT_EXT assistance. A compliant bootloader is required in order to make maximum - use of this feature. Refer to Documentation/arm/booting.rst for + use of this feature. Refer to Documentation/arch/arm/booting.rst for details. config SWP_EMULATE @@ -904,7 +904,7 @@ config KUSER_HELPERS the CPU type fitted to the system. This permits binaries to be run on ARMv4 through to ARMv7 without modification. - See Documentation/arm/kernel_user_helpers.rst for details. + See Documentation/arch/arm/kernel_user_helpers.rst for details. However, the fixed address nature of these helpers can be used by ROP (return orientated programming) authors when creating diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b4a33358d2e9..bc4ed5ce3e00 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -258,12 +258,14 @@ static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; static int dma_mmu_remap_num __initdata; +#ifdef CONFIG_DMA_CMA void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { dma_mmu_remap[dma_mmu_remap_num].base = base; dma_mmu_remap[dma_mmu_remap_num].size = size; dma_mmu_remap_num++; } +#endif void __init dma_contiguous_remap(void) { diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 0e49154454a6..ca5302b0b7ee 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -117,8 +117,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, * must use the nested version. This also means we need to * open-code the spin-locking. */ - ptl = pte_lockptr(vma->vm_mm, pmd); pte = pte_offset_map(pmd, address); + if (!pte) + return 0; + + ptl = pte_lockptr(vma->vm_mm, pmd); do_pte_lock(ptl); ret = do_adjust_pte(vma, address, pfn, pte); diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 2418f1efabd8..fef62e4a9edd 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -85,6 +85,9 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr) break; pte = pte_offset_map(pmd, addr); + if (!pte) + break; + pr_cont(", *pte=%08llx", (long long)pte_val(*pte)); #ifndef CONFIG_ARM_LPAE pr_cont(", *ppte=%08llx", @@ -232,37 +235,11 @@ static inline bool is_permission_fault(unsigned int fsr) return false; } -static vm_fault_t __kprobes -__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags, - unsigned long vma_flags, struct pt_regs *regs) -{ - struct vm_area_struct *vma = find_vma(mm, addr); - if (unlikely(!vma)) - return VM_FAULT_BADMAP; - - if (unlikely(vma->vm_start > addr)) { - if (!(vma->vm_flags & VM_GROWSDOWN)) - return VM_FAULT_BADMAP; - if (addr < FIRST_USER_ADDRESS) - return VM_FAULT_BADMAP; - if (expand_stack(vma, addr)) - return VM_FAULT_BADMAP; - } - - /* - * ok, we have a good vm_area for this memory access, check the - * permissions on the VMA allow for the fault which occurred. - */ - if (!(vma->vm_flags & vma_flags)) - return VM_FAULT_BADACCESS; - - return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs); -} - static int __kprobes do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; int sig, code; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; @@ -301,31 +278,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); - /* - * As per x86, we may deadlock here. However, since the kernel only - * validly references user space from well defined areas of the code, - * we can bug out early if this is from code which shouldn't. - */ - if (!mmap_read_trylock(mm)) { - if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) - goto no_context; retry: - mmap_read_lock(mm); - } else { - /* - * The above down_read_trylock() might have succeeded in - * which case, we'll have missed the might_sleep() from - * down_read() - */ - might_sleep(); -#ifdef CONFIG_DEBUG_VM - if (!user_mode(regs) && - !search_exception_tables(regs->ARM_pc)) - goto no_context; -#endif + vma = lock_mm_and_find_vma(mm, addr, regs); + if (unlikely(!vma)) { + fault = VM_FAULT_BADMAP; + goto bad_area; } - fault = __do_page_fault(mm, addr, flags, vm_flags, regs); + /* + * ok, we have a good vm_area for this memory access, check the + * permissions on the VMA allow for the fault which occurred. + */ + if (!(vma->vm_flags & vm_flags)) + fault = VM_FAULT_BADACCESS; + else + fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs); /* If we need to retry but a fatal signal is pending, handle the * signal first. We do not need to release the mmap_lock because @@ -356,6 +323,7 @@ retry: if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) return 0; +bad_area: /* * If we are in kernel mode at this point, we * have no context to handle this fault with. diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index 54927ba1fa6e..e8f8c1902544 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h @@ -37,5 +37,9 @@ static inline int fsr_fs(unsigned int fsr) void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); void early_abt_enable(void); +asmlinkage void do_DataAbort(unsigned long addr, unsigned int fsr, + struct pt_regs *regs); +asmlinkage void do_PrefetchAbort(unsigned long addr, unsigned int ifsr, + struct pt_regs *regs); #endif /* __ARCH_ARM_FAULT_H */ diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 7ff9feea13a6..2508be91b7a0 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -354,6 +354,7 @@ EXPORT_SYMBOL(flush_dcache_page); * memcpy() to/from page * if written to page, flush_dcache_page() */ +void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr); void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { unsigned long pfn; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 463fc2a8448f..f3a52c08a200 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -21,6 +21,7 @@ #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp_plat.h> +#include <asm/tcm.h> #include <asm/tlb.h> #include <asm/highmem.h> #include <asm/system_info.h> @@ -37,7 +38,6 @@ #include "fault.h" #include "mm.h" -#include "tcm.h" extern unsigned long __atags_pointer; diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 53f2d8774fdb..43cfd06bbeba 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -21,6 +21,7 @@ #include <asm/cputype.h> #include <asm/mpu.h> #include <asm/procinfo.h> +#include <asm/idmap.h> #include "mm.h" diff --git a/arch/arm/mm/tcm.h b/arch/arm/mm/tcm.h deleted file mode 100644 index 6b80a760d875..000000000000 --- a/arch/arm/mm/tcm.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2008-2009 ST-Ericsson AB - * TCM memory handling for ARM systems - * - * Author: Linus Walleij <linus.walleij@stericsson.com> - * Author: Rickard Andersson <rickard.andersson@stericsson.com> - */ - -#ifdef CONFIG_HAVE_TCM -void __init tcm_init(void); -#else -/* No TCM support, just blank inlines to be optimized out */ -static inline void tcm_init(void) -{ -} -#endif |