diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 18 | ||||
-rw-r--r-- | arch/arm/mm/cache-v6.S | 31 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 5 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 30 | ||||
-rw-r--r-- | arch/arm/mm/kasan_init.c | 8 |
5 files changed, 38 insertions, 54 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c164cde50243..2b6f50dd5478 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -937,24 +937,6 @@ config VDSO You must have glibc 2.22 or later for programs to seamlessly take advantage of this. -config DMA_CACHE_RWFO - bool "Enable read/write for ownership DMA cache maintenance" - depends on CPU_V6K && SMP - default y - help - The Snoop Control Unit on ARM11MPCore does not detect the - cache maintenance operations and the dma_{map,unmap}_area() - functions may leave stale cache entries on other CPUs. By - enabling this option, Read or Write For Ownership in the ARMv6 - DMA cache maintenance functions is performed. These LDR/STR - instructions change the cache line state to shared or modified - so that the cache operation has the desired effect. - - Note that the workaround is only valid on processors that do - not perform speculative loads into the D-cache. For such - processors, if cache maintenance operations are not broadcast - in hardware, other workarounds are needed (e.g. cache - maintenance broadcasting in software via FIQ). config OUTER_CACHE bool diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 250c83bf7158..44211d8a296f 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -201,10 +201,6 @@ ENTRY(v6_flush_kern_dcache_area) * - end - virtual end address of region */ v6_dma_inv_range: -#ifdef CONFIG_DMA_CACHE_RWFO - ldrb r2, [r0] @ read for ownership - strb r2, [r0] @ write for ownership -#endif tst r0, #D_CACHE_LINE_SIZE - 1 bic r0, r0, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE @@ -213,10 +209,6 @@ v6_dma_inv_range: mcrne p15, 0, r0, c7, c11, 1 @ clean unified line #endif tst r1, #D_CACHE_LINE_SIZE - 1 -#ifdef CONFIG_DMA_CACHE_RWFO - ldrbne r2, [r1, #-1] @ read for ownership - strbne r2, [r1, #-1] @ write for ownership -#endif bic r1, r1, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line @@ -231,10 +223,6 @@ v6_dma_inv_range: #endif add r0, r0, #D_CACHE_LINE_SIZE cmp r0, r1 -#ifdef CONFIG_DMA_CACHE_RWFO - ldrlo r2, [r0] @ read for ownership - strlo r2, [r0] @ write for ownership -#endif blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer @@ -248,9 +236,6 @@ v6_dma_inv_range: v6_dma_clean_range: bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: -#ifdef CONFIG_DMA_CACHE_RWFO - ldr r2, [r0] @ read for ownership -#endif #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c10, 1 @ clean D line #else @@ -269,10 +254,6 @@ v6_dma_clean_range: * - end - virtual end address of region */ ENTRY(v6_dma_flush_range) -#ifdef CONFIG_DMA_CACHE_RWFO - ldrb r2, [r0] @ read for ownership - strb r2, [r0] @ write for ownership -#endif bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE @@ -282,10 +263,6 @@ ENTRY(v6_dma_flush_range) #endif add r0, r0, #D_CACHE_LINE_SIZE cmp r0, r1 -#ifdef CONFIG_DMA_CACHE_RWFO - ldrblo r2, [r0] @ read for ownership - strblo r2, [r0] @ write for ownership -#endif blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer @@ -301,13 +278,7 @@ ENTRY(v6_dma_map_area) add r1, r1, r0 teq r2, #DMA_FROM_DEVICE beq v6_dma_inv_range -#ifndef CONFIG_DMA_CACHE_RWFO b v6_dma_clean_range -#else - teq r2, #DMA_TO_DEVICE - beq v6_dma_clean_range - b v6_dma_flush_range -#endif ENDPROC(v6_dma_map_area) /* @@ -317,11 +288,9 @@ ENDPROC(v6_dma_map_area) * - dir - DMA direction */ ENTRY(v6_dma_unmap_area) -#ifndef CONFIG_DMA_CACHE_RWFO add r1, r1, r0 teq r2, #DMA_TO_DEVICE bne v6_dma_inv_range -#endif ret lr ENDPROC(v6_dma_unmap_area) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 5409225b4abc..d688eac6dbc1 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -859,10 +859,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, int i = 0; int order_idx = 0; - if (array_size <= PAGE_SIZE) - pages = kzalloc(array_size, GFP_KERNEL); - else - pages = vzalloc(array_size); + pages = kvzalloc(array_size, GFP_KERNEL); if (!pages) return NULL; diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index fef62e4a9edd..e96fb40b9cc3 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -278,6 +278,35 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); + if (!(flags & FAULT_FLAG_USER)) + goto lock_mmap; + + vma = lock_vma_under_rcu(mm, addr); + if (!vma) + goto lock_mmap; + + if (!(vma->vm_flags & vm_flags)) { + vma_end_read(vma); + goto lock_mmap; + } + fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs); + if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) + vma_end_read(vma); + + if (!(fault & VM_FAULT_RETRY)) { + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto done; + } + count_vm_vma_lock_event(VMA_LOCK_RETRY); + + /* Quick path to respond to signals */ + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; + return 0; + } +lock_mmap: + retry: vma = lock_mm_and_find_vma(mm, addr, regs); if (unlikely(!vma)) { @@ -316,6 +345,7 @@ retry: } mmap_read_unlock(mm); +done: /* * Handle the "normal" case first - VM_FAULT_MAJOR diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c index 24d71b5db62d..111d4f703136 100644 --- a/arch/arm/mm/kasan_init.c +++ b/arch/arm/mm/kasan_init.c @@ -28,6 +28,12 @@ static pgd_t tmp_pgd_table[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss; +static __init void *kasan_alloc_block_raw(size_t size) +{ + return memblock_alloc_try_nid_raw(size, size, __pa(MAX_DMA_ADDRESS), + MEMBLOCK_ALLOC_NOLEAKTRACE, NUMA_NO_NODE); +} + static __init void *kasan_alloc_block(size_t size) { return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), @@ -50,7 +56,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, if (!pte_none(READ_ONCE(*ptep))) continue; - p = kasan_alloc_block(PAGE_SIZE); + p = kasan_alloc_block_raw(PAGE_SIZE); if (!p) { panic("%s failed to allocate shadow page for address 0x%lx\n", __func__, addr); |