diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-03-25 10:11:38 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-03-25 10:11:38 -0700 |
commit | aa5b537b0ecc16992577b013f11112d54c7ce869 (patch) | |
tree | ac9f6ce6c8c5b4722501cb36e95b3c0a35aa933e /arch/riscv/mm | |
parent | d710d370c4911e83da5d2bc43d4a2c3b56bd27e7 (diff) | |
parent | bbde015227e89f1da21bd3b84523d62c4a445c06 (diff) |
Merge tag 'riscv-for-linus-5.18-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V updates from Palmer Dabbelt:
- Support for Sv57-based virtual memory.
- Various improvements for the MicroChip PolarFire SOC and the
associated Icicle dev board, which should allow upstream kernels to
boot without any additional modifications.
- An improved memmove() implementation.
- Support for the new Ssconfpmf and SBI PMU extensions, which allows
for a much more useful perf implementation on RISC-V systems.
- Support for restartable sequences.
* tag 'riscv-for-linus-5.18-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (36 commits)
rseq/selftests: Add support for RISC-V
RISC-V: Add support for restartable sequence
MAINTAINERS: Add entry for RISC-V PMU drivers
Documentation: riscv: Remove the old documentation
RISC-V: Add sscofpmf extension support
RISC-V: Add perf platform driver based on SBI PMU extension
RISC-V: Add RISC-V SBI PMU extension definitions
RISC-V: Add a simple platform driver for RISC-V legacy perf
RISC-V: Add a perf core library for pmu drivers
RISC-V: Add CSR encodings for all HPMCOUNTERS
RISC-V: Remove the current perf implementation
RISC-V: Improve /proc/cpuinfo output for ISA extensions
RISC-V: Do no continue isa string parsing without correct XLEN
RISC-V: Implement multi-letter ISA extension probing framework
RISC-V: Extract multi-letter extension names from "riscv, isa"
RISC-V: Minimal parser for "riscv, isa" strings
RISC-V: Correctly print supported extensions
riscv: Fixed misaligned memory access. Fixed pointer comparison.
MAINTAINERS: update riscv/microchip entry
riscv: dts: microchip: add new peripherals to icicle kit device tree
...
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r-- | arch/riscv/mm/init.c | 168 | ||||
-rw-r--r-- | arch/riscv/mm/kasan_init.c | 155 |
2 files changed, 286 insertions, 37 deletions
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index f6275b317129..9535bea8688c 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -38,14 +38,16 @@ EXPORT_SYMBOL(kernel_map); #endif #ifdef CONFIG_64BIT -u64 satp_mode = !IS_ENABLED(CONFIG_XIP_KERNEL) ? SATP_MODE_48 : SATP_MODE_39; +u64 satp_mode __ro_after_init = !IS_ENABLED(CONFIG_XIP_KERNEL) ? SATP_MODE_57 : SATP_MODE_39; #else -u64 satp_mode = SATP_MODE_32; +u64 satp_mode __ro_after_init = SATP_MODE_32; #endif EXPORT_SYMBOL(satp_mode); bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL); +bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL); EXPORT_SYMBOL(pgtable_l4_enabled); +EXPORT_SYMBOL(pgtable_l5_enabled); phys_addr_t phys_ram_base __ro_after_init; EXPORT_SYMBOL(phys_ram_base); @@ -227,6 +229,7 @@ pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); +static p4d_t __maybe_unused early_dtb_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); static pud_t __maybe_unused early_dtb_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE); static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); @@ -318,6 +321,16 @@ static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); #define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd)) #endif /* CONFIG_XIP_KERNEL */ +static p4d_t trampoline_p4d[PTRS_PER_P4D] __page_aligned_bss; +static p4d_t fixmap_p4d[PTRS_PER_P4D] __page_aligned_bss; +static p4d_t early_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); + +#ifdef CONFIG_XIP_KERNEL +#define trampoline_p4d ((p4d_t *)XIP_FIXUP(trampoline_p4d)) +#define fixmap_p4d ((p4d_t *)XIP_FIXUP(fixmap_p4d)) +#define early_p4d ((p4d_t *)XIP_FIXUP(early_p4d)) +#endif /* CONFIG_XIP_KERNEL */ + static pud_t trampoline_pud[PTRS_PER_PUD] __page_aligned_bss; static pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss; static pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE); @@ -432,6 +445,44 @@ static phys_addr_t alloc_pud_late(uintptr_t va) return __pa(vaddr); } +static p4d_t *__init get_p4d_virt_early(phys_addr_t pa) +{ + return (p4d_t *)((uintptr_t)pa); +} + +static p4d_t *__init get_p4d_virt_fixmap(phys_addr_t pa) +{ + clear_fixmap(FIX_P4D); + return (p4d_t *)set_fixmap_offset(FIX_P4D, pa); +} + +static p4d_t *__init get_p4d_virt_late(phys_addr_t pa) +{ + return (p4d_t *)__va(pa); +} + +static phys_addr_t __init alloc_p4d_early(uintptr_t va) +{ + /* Only one P4D is available for early mapping */ + BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); + + return (uintptr_t)early_p4d; +} + +static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va) +{ + return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); +} + +static phys_addr_t alloc_p4d_late(uintptr_t va) +{ + unsigned long vaddr; + + vaddr = __get_free_page(GFP_KERNEL); + BUG_ON(!vaddr); + return __pa(vaddr); +} + static void __init create_pud_mapping(pud_t *pudp, uintptr_t va, phys_addr_t pa, phys_addr_t sz, pgprot_t prot) @@ -459,21 +510,55 @@ static void __init create_pud_mapping(pud_t *pudp, create_pmd_mapping(nextp, va, pa, sz, prot); } -#define pgd_next_t pud_t -#define alloc_pgd_next(__va) (pgtable_l4_enabled ? \ - pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va)) -#define get_pgd_next_virt(__pa) (pgtable_l4_enabled ? \ - pt_ops.get_pud_virt(__pa) : (pgd_next_t *)pt_ops.get_pmd_virt(__pa)) +static void __init create_p4d_mapping(p4d_t *p4dp, + uintptr_t va, phys_addr_t pa, + phys_addr_t sz, pgprot_t prot) +{ + pud_t *nextp; + phys_addr_t next_phys; + uintptr_t p4d_index = p4d_index(va); + + if (sz == P4D_SIZE) { + if (p4d_val(p4dp[p4d_index]) == 0) + p4dp[p4d_index] = pfn_p4d(PFN_DOWN(pa), prot); + return; + } + + if (p4d_val(p4dp[p4d_index]) == 0) { + next_phys = pt_ops.alloc_pud(va); + p4dp[p4d_index] = pfn_p4d(PFN_DOWN(next_phys), PAGE_TABLE); + nextp = pt_ops.get_pud_virt(next_phys); + memset(nextp, 0, PAGE_SIZE); + } else { + next_phys = PFN_PHYS(_p4d_pfn(p4dp[p4d_index])); + nextp = pt_ops.get_pud_virt(next_phys); + } + + create_pud_mapping(nextp, va, pa, sz, prot); +} + +#define pgd_next_t p4d_t +#define alloc_pgd_next(__va) (pgtable_l5_enabled ? \ + pt_ops.alloc_p4d(__va) : (pgtable_l4_enabled ? \ + pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va))) +#define get_pgd_next_virt(__pa) (pgtable_l5_enabled ? \ + pt_ops.get_p4d_virt(__pa) : (pgd_next_t *)(pgtable_l4_enabled ? \ + pt_ops.get_pud_virt(__pa) : (pud_t *)pt_ops.get_pmd_virt(__pa))) #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ + (pgtable_l5_enabled ? \ + create_p4d_mapping(__nextp, __va, __pa, __sz, __prot) : \ (pgtable_l4_enabled ? \ - create_pud_mapping(__nextp, __va, __pa, __sz, __prot) : \ - create_pmd_mapping((pmd_t *)__nextp, __va, __pa, __sz, __prot)) -#define fixmap_pgd_next (pgtable_l4_enabled ? \ - (uintptr_t)fixmap_pud : (uintptr_t)fixmap_pmd) -#define trampoline_pgd_next (pgtable_l4_enabled ? \ - (uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd) -#define early_dtb_pgd_next (pgtable_l4_enabled ? \ - (uintptr_t)early_dtb_pud : (uintptr_t)early_dtb_pmd) + create_pud_mapping((pud_t *)__nextp, __va, __pa, __sz, __prot) : \ + create_pmd_mapping((pmd_t *)__nextp, __va, __pa, __sz, __prot))) +#define fixmap_pgd_next (pgtable_l5_enabled ? \ + (uintptr_t)fixmap_p4d : (pgtable_l4_enabled ? \ + (uintptr_t)fixmap_pud : (uintptr_t)fixmap_pmd)) +#define trampoline_pgd_next (pgtable_l5_enabled ? \ + (uintptr_t)trampoline_p4d : (pgtable_l4_enabled ? \ + (uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd)) +#define early_dtb_pgd_next (pgtable_l5_enabled ? \ + (uintptr_t)early_dtb_p4d : (pgtable_l4_enabled ? \ + (uintptr_t)early_dtb_pud : (uintptr_t)early_dtb_pmd)) #else #define pgd_next_t pte_t #define alloc_pgd_next(__va) pt_ops.alloc_pte(__va) @@ -482,6 +567,7 @@ static void __init create_pud_mapping(pud_t *pudp, create_pte_mapping(__nextp, __va, __pa, __sz, __prot) #define fixmap_pgd_next ((uintptr_t)fixmap_pte) #define early_dtb_pgd_next ((uintptr_t)early_dtb_pmd) +#define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) #define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) #endif /* __PAGETABLE_PMD_FOLDED */ @@ -575,6 +661,13 @@ static __init pgprot_t pgprot_from_va(uintptr_t va) #endif /* CONFIG_STRICT_KERNEL_RWX */ #ifdef CONFIG_64BIT +static void __init disable_pgtable_l5(void) +{ + pgtable_l5_enabled = false; + kernel_map.page_offset = PAGE_OFFSET_L4; + satp_mode = SATP_MODE_48; +} + static void __init disable_pgtable_l4(void) { pgtable_l4_enabled = false; @@ -591,12 +684,12 @@ static void __init disable_pgtable_l4(void) static __init void set_satp_mode(void) { u64 identity_satp, hw_satp; - uintptr_t set_satp_mode_pmd; + uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK; + bool check_l4 = false; - set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK; - create_pgd_mapping(early_pg_dir, - set_satp_mode_pmd, (uintptr_t)early_pud, - PGDIR_SIZE, PAGE_TABLE); + create_p4d_mapping(early_p4d, + set_satp_mode_pmd, (uintptr_t)early_pud, + P4D_SIZE, PAGE_TABLE); create_pud_mapping(early_pud, set_satp_mode_pmd, (uintptr_t)early_pmd, PUD_SIZE, PAGE_TABLE); @@ -608,6 +701,11 @@ static __init void set_satp_mode(void) set_satp_mode_pmd + PMD_SIZE, set_satp_mode_pmd + PMD_SIZE, PMD_SIZE, PAGE_KERNEL_EXEC); +retry: + create_pgd_mapping(early_pg_dir, + set_satp_mode_pmd, + check_l4 ? (uintptr_t)early_pud : (uintptr_t)early_p4d, + PGDIR_SIZE, PAGE_TABLE); identity_satp = PFN_DOWN((uintptr_t)&early_pg_dir) | satp_mode; @@ -616,10 +714,17 @@ static __init void set_satp_mode(void) hw_satp = csr_swap(CSR_SATP, 0ULL); local_flush_tlb_all(); - if (hw_satp != identity_satp) + if (hw_satp != identity_satp) { + if (!check_l4) { + disable_pgtable_l5(); + check_l4 = true; + goto retry; + } disable_pgtable_l4(); + } memset(early_pg_dir, 0, PAGE_SIZE); + memset(early_p4d, 0, PAGE_SIZE); memset(early_pud, 0, PAGE_SIZE); memset(early_pmd, 0, PAGE_SIZE); } @@ -693,10 +798,13 @@ static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa) PGDIR_SIZE, IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL); - if (pgtable_l4_enabled) { + if (pgtable_l5_enabled) + create_p4d_mapping(early_dtb_p4d, DTB_EARLY_BASE_VA, + (uintptr_t)early_dtb_pud, P4D_SIZE, PAGE_TABLE); + + if (pgtable_l4_enabled) create_pud_mapping(early_dtb_pud, DTB_EARLY_BASE_VA, (uintptr_t)early_dtb_pmd, PUD_SIZE, PAGE_TABLE); - } if (IS_ENABLED(CONFIG_64BIT)) { create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA, @@ -732,6 +840,8 @@ void __init pt_ops_set_early(void) pt_ops.get_pmd_virt = get_pmd_virt_early; pt_ops.alloc_pud = alloc_pud_early; pt_ops.get_pud_virt = get_pud_virt_early; + pt_ops.alloc_p4d = alloc_p4d_early; + pt_ops.get_p4d_virt = get_p4d_virt_early; #endif } @@ -752,6 +862,8 @@ void __init pt_ops_set_fixmap(void) pt_ops.get_pmd_virt = kernel_mapping_pa_to_va((uintptr_t)get_pmd_virt_fixmap); pt_ops.alloc_pud = kernel_mapping_pa_to_va((uintptr_t)alloc_pud_fixmap); pt_ops.get_pud_virt = kernel_mapping_pa_to_va((uintptr_t)get_pud_virt_fixmap); + pt_ops.alloc_p4d = kernel_mapping_pa_to_va((uintptr_t)alloc_p4d_fixmap); + pt_ops.get_p4d_virt = kernel_mapping_pa_to_va((uintptr_t)get_p4d_virt_fixmap); #endif } @@ -768,6 +880,8 @@ void __init pt_ops_set_late(void) pt_ops.get_pmd_virt = get_pmd_virt_late; pt_ops.alloc_pud = alloc_pud_late; pt_ops.get_pud_virt = get_pud_virt_late; + pt_ops.alloc_p4d = alloc_p4d_late; + pt_ops.get_p4d_virt = get_p4d_virt_late; #endif } @@ -828,6 +942,10 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE); #ifndef __PAGETABLE_PMD_FOLDED + /* Setup fixmap P4D and PUD */ + if (pgtable_l5_enabled) + create_p4d_mapping(fixmap_p4d, FIXADDR_START, + (uintptr_t)fixmap_pud, P4D_SIZE, PAGE_TABLE); /* Setup fixmap PUD and PMD */ if (pgtable_l4_enabled) create_pud_mapping(fixmap_pud, FIXADDR_START, @@ -837,6 +955,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) /* Setup trampoline PGD and PMD */ create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, trampoline_pgd_next, PGDIR_SIZE, PAGE_TABLE); + if (pgtable_l5_enabled) + create_p4d_mapping(trampoline_p4d, kernel_map.virt_addr, + (uintptr_t)trampoline_pud, P4D_SIZE, PAGE_TABLE); if (pgtable_l4_enabled) create_pud_mapping(trampoline_pud, kernel_map.virt_addr, (uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE); @@ -938,6 +1059,7 @@ static void __init setup_vm_final(void) clear_fixmap(FIX_PTE); clear_fixmap(FIX_PMD); clear_fixmap(FIX_PUD); + clear_fixmap(FIX_P4D); /* Move to swapper page table */ csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | satp_mode); diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index cd1a145257b7..a22e418dbd82 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -111,6 +111,8 @@ static void __init kasan_populate_pud(pgd_t *pgd, * pt_ops facility. */ base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd))); + } else if (pgd_none(*pgd)) { + base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE); } else { base_pud = (pud_t *)pgd_page_vaddr(*pgd); if (base_pud == lm_alias(kasan_early_shadow_pud)) { @@ -152,13 +154,72 @@ static void __init kasan_populate_pud(pgd_t *pgd, set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE)); } -#define kasan_early_shadow_pgd_next (pgtable_l4_enabled ? \ +static void __init kasan_populate_p4d(pgd_t *pgd, + unsigned long vaddr, unsigned long end, + bool early) +{ + phys_addr_t phys_addr; + p4d_t *p4dp, *base_p4d; + unsigned long next; + + if (early) { + /* + * We can't use pgd_page_vaddr here as it would return a linear + * mapping address but it is not mapped yet, but when populating + * early_pg_dir, we need the physical address and when populating + * swapper_pg_dir, we need the kernel virtual address so use + * pt_ops facility. + */ + base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd))); + } else { + base_p4d = (p4d_t *)pgd_page_vaddr(*pgd); + if (base_p4d == lm_alias(kasan_early_shadow_p4d)) + base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE); + } + + p4dp = base_p4d + p4d_index(vaddr); + + do { + next = p4d_addr_end(vaddr, end); + + if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) { + if (early) { + phys_addr = __pa(((uintptr_t)kasan_early_shadow_pud)); + set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE)); + continue; + } else { + phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE); + if (phys_addr) { + set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL)); + continue; + } + } + } + + kasan_populate_pud((pgd_t *)p4dp, vaddr, next, early); + } while (p4dp++, vaddr = next, vaddr != end); + + /* + * Wait for the whole P4D to be populated before setting the P4D in + * the page table, otherwise, if we did set the P4D before populating + * it entirely, memblock could allocate a page at a physical address + * where KASAN is not populated yet and then we'd get a page fault. + */ + if (!early) + set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE)); +} + +#define kasan_early_shadow_pgd_next (pgtable_l5_enabled ? \ + (uintptr_t)kasan_early_shadow_p4d : \ + (pgtable_l4_enabled ? \ (uintptr_t)kasan_early_shadow_pud : \ - (uintptr_t)kasan_early_shadow_pmd) + (uintptr_t)kasan_early_shadow_pmd)) #define kasan_populate_pgd_next(pgdp, vaddr, next, early) \ + (pgtable_l5_enabled ? \ + kasan_populate_p4d(pgdp, vaddr, next, early) : \ (pgtable_l4_enabled ? \ kasan_populate_pud(pgdp, vaddr, next, early) : \ - kasan_populate_pmd((pud_t *)pgdp, vaddr, next)) + kasan_populate_pmd((pud_t *)pgdp, vaddr, next))) static void __init kasan_populate_pgd(pgd_t *pgdp, unsigned long vaddr, unsigned long end, @@ -221,6 +282,14 @@ asmlinkage void __init kasan_early_init(void) PAGE_TABLE)); } + if (pgtable_l5_enabled) { + for (i = 0; i < PTRS_PER_P4D; ++i) + set_p4d(kasan_early_shadow_p4d + i, + pfn_p4d(PFN_DOWN + (__pa(((uintptr_t)kasan_early_shadow_pud))), + PAGE_TABLE)); + } + kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START), KASAN_SHADOW_START, KASAN_SHADOW_END, true); @@ -246,9 +315,27 @@ static void __init kasan_populate(void *start, void *end) memset(start, KASAN_SHADOW_INIT, end - start); } +static void __init kasan_shallow_populate_pmd(pgd_t *pgdp, + unsigned long vaddr, unsigned long end) +{ + unsigned long next; + pmd_t *pmdp, *base_pmd; + bool is_kasan_pte; + + base_pmd = (pmd_t *)pgd_page_vaddr(*pgdp); + pmdp = base_pmd + pmd_index(vaddr); + + do { + next = pmd_addr_end(vaddr, end); + is_kasan_pte = (pmd_pgtable(*pmdp) == lm_alias(kasan_early_shadow_pte)); + + if (is_kasan_pte) + pmd_clear(pmdp); + } while (pmdp++, vaddr = next, vaddr != end); +} + static void __init kasan_shallow_populate_pud(pgd_t *pgdp, - unsigned long vaddr, unsigned long end, - bool kasan_populate) + unsigned long vaddr, unsigned long end) { unsigned long next; pud_t *pudp, *base_pud; @@ -258,21 +345,60 @@ static void __init kasan_shallow_populate_pud(pgd_t *pgdp, base_pud = (pud_t *)pgd_page_vaddr(*pgdp); pudp = base_pud + pud_index(vaddr); - if (kasan_populate) - memcpy(base_pud, (void *)kasan_early_shadow_pgd_next, - sizeof(pud_t) * PTRS_PER_PUD); - do { next = pud_addr_end(vaddr, end); is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd)); - if (is_kasan_pmd) { - base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE)); - } + if (!is_kasan_pmd) + continue; + + base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE)); + + if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) + continue; + + memcpy(base_pmd, (void *)kasan_early_shadow_pmd, PAGE_SIZE); + kasan_shallow_populate_pmd((pgd_t *)pudp, vaddr, next); } while (pudp++, vaddr = next, vaddr != end); } +static void __init kasan_shallow_populate_p4d(pgd_t *pgdp, + unsigned long vaddr, unsigned long end) +{ + unsigned long next; + p4d_t *p4dp, *base_p4d; + pud_t *base_pud; + bool is_kasan_pud; + + base_p4d = (p4d_t *)pgd_page_vaddr(*pgdp); + p4dp = base_p4d + p4d_index(vaddr); + + do { + next = p4d_addr_end(vaddr, end); + is_kasan_pud = (p4d_pgtable(*p4dp) == lm_alias(kasan_early_shadow_pud)); + + if (!is_kasan_pud) + continue; + + base_pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + set_p4d(p4dp, pfn_p4d(PFN_DOWN(__pa(base_pud)), PAGE_TABLE)); + + if (IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) + continue; + + memcpy(base_pud, (void *)kasan_early_shadow_pud, PAGE_SIZE); + kasan_shallow_populate_pud((pgd_t *)p4dp, vaddr, next); + } while (p4dp++, vaddr = next, vaddr != end); +} + +#define kasan_shallow_populate_pgd_next(pgdp, vaddr, next) \ + (pgtable_l5_enabled ? \ + kasan_shallow_populate_p4d(pgdp, vaddr, next) : \ + (pgtable_l4_enabled ? \ + kasan_shallow_populate_pud(pgdp, vaddr, next) : \ + kasan_shallow_populate_pmd(pgdp, vaddr, next))) + static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end) { unsigned long next; @@ -293,7 +419,8 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) continue; - kasan_shallow_populate_pud(pgd_k, vaddr, next, is_kasan_pgd_next); + memcpy(p, (void *)kasan_early_shadow_pgd_next, PAGE_SIZE); + kasan_shallow_populate_pgd_next(pgd_k, vaddr, next); } while (pgd_k++, vaddr = next, vaddr != end); } |