diff options
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r-- | arch/arm64/mm/fault.c | 33 | ||||
-rw-r--r-- | arch/arm64/mm/hugetlbpage.c | 5 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 26 | ||||
-rw-r--r-- | arch/arm64/mm/ioremap.c | 20 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 4 | ||||
-rw-r--r-- | arch/arm64/mm/tlb.S | 2 |
6 files changed, 50 insertions, 40 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 6c8ba25bf6bb..c23751b06120 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -130,7 +130,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr, force_sig_info(sig, &si, tsk); } -void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) +static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) { struct task_struct *tsk = current; struct mm_struct *mm = tsk->active_mm; @@ -199,13 +199,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; - if (esr & ESR_LNX_EXEC) { - vm_flags = VM_EXEC; - } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) { - vm_flags = VM_WRITE; - mm_flags |= FAULT_FLAG_WRITE; - } - tsk = current; mm = tsk->mm; @@ -220,6 +213,16 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, if (in_atomic() || !mm) goto no_context; + if (user_mode(regs)) + mm_flags |= FAULT_FLAG_USER; + + if (esr & ESR_LNX_EXEC) { + vm_flags = VM_EXEC; + } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) { + vm_flags = VM_WRITE; + mm_flags |= FAULT_FLAG_WRITE; + } + /* * As per x86, we may deadlock here. However, since the kernel only * validly references user space from well defined areas of the code, @@ -288,6 +291,13 @@ retry: VM_FAULT_BADACCESS)))) return 0; + /* + * If we are in kernel mode at this point, we have no context to + * handle this fault with. + */ + if (!user_mode(regs)) + goto no_context; + if (fault & VM_FAULT_OOM) { /* * We ran out of memory, call the OOM killer, and return to @@ -298,13 +308,6 @@ retry: return 0; } - /* - * If we are in kernel mode at this point, we have no context to - * handle this fault with. - */ - if (!user_mode(regs)) - goto no_context; - if (fault & VM_FAULT_SIGBUS) { /* * We had some memory, but were unable to successfully fix up diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 2fc8258bab2d..5e9aec358306 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -54,6 +54,11 @@ int pud_huge(pud_t pud) return !(pud_val(pud) & PUD_TABLE_BIT); } +int pmd_huge_support(void) +{ + return 1; +} + static __init int setup_hugepagesz(char *opt) { unsigned long ps = memparse(opt, &opt); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 67e8d7ce3fe7..0cb8742de4f2 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -31,7 +31,6 @@ #include <linux/sort.h> #include <linux/of_fdt.h> -#include <asm/prom.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/sizes.h> @@ -39,18 +38,9 @@ #include "mm.h" -static unsigned long phys_initrd_start __initdata = 0; -static unsigned long phys_initrd_size __initdata = 0; - phys_addr_t memstart_addr __read_mostly = 0; -void __init early_init_dt_setup_initrd_arch(unsigned long start, - unsigned long end) -{ - phys_initrd_start = start; - phys_initrd_size = end - start; -} - +#ifdef CONFIG_BLK_DEV_INITRD static int __init early_initrd(char *p) { unsigned long start, size; @@ -60,12 +50,13 @@ static int __init early_initrd(char *p) if (*endp == ',') { size = memparse(endp + 1, NULL); - phys_initrd_start = start; - phys_initrd_size = size; + initrd_start = (unsigned long)__va(start); + initrd_end = (unsigned long)__va(start + size); } return 0; } early_param("initrd", early_initrd); +#endif #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) @@ -138,13 +129,8 @@ void __init arm64_memblock_init(void) /* Register the kernel text, kernel data and initrd with memblock */ memblock_reserve(__pa(_text), _end - _text); #ifdef CONFIG_BLK_DEV_INITRD - if (phys_initrd_size) { - memblock_reserve(phys_initrd_start, phys_initrd_size); - - /* Now convert initrd to virtual addresses */ - initrd_start = __phys_to_virt(phys_initrd_start); - initrd_end = initrd_start + phys_initrd_size; - } + if (initrd_start) + memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); #endif /* diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 1725cd6db37a..2bb1d586664c 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -77,8 +77,24 @@ EXPORT_SYMBOL(__ioremap); void __iounmap(volatile void __iomem *io_addr) { - void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); + unsigned long addr = (unsigned long)io_addr & PAGE_MASK; - vunmap(addr); + /* + * We could get an address outside vmalloc range in case + * of ioremap_cache() reusing a RAM mapping. + */ + if (VMALLOC_START <= addr && addr < VMALLOC_END) + vunmap((void *)addr); } EXPORT_SYMBOL(__iounmap); + +void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) +{ + /* For normal memory we already have a cacheable mapping. */ + if (pfn_valid(__phys_to_pfn(phys_addr))) + return (void __iomem *)__phys_to_virt(phys_addr); + + return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL), + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_cache); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index b1b31bbc967b..421b99fd635d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -162,9 +162,9 @@ ENDPROC(__cpu_setup) * CE0 XWHW CZ ME TEEA S * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved - * .... .100 .... 01.1 11.1 ..01 0001 1101 < software settings + * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings */ .type crval, #object crval: - .word 0x030802e2 // clear + .word 0x000802e2 // clear .word 0x0405d11d // set diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S index 8ae80a18e8ec..19da91e0cd27 100644 --- a/arch/arm64/mm/tlb.S +++ b/arch/arm64/mm/tlb.S @@ -35,7 +35,7 @@ */ ENTRY(__cpu_flush_user_tlb_range) vma_vm_mm x3, x2 // get vma->vm_mm - mmid x3, x3 // get vm_mm->context.id + mmid w3, x3 // get vm_mm->context.id dsb sy lsr x0, x0, #12 // align address lsr x1, x1, #12 |