diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2021-01-25 11:12:29 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2021-01-25 11:12:29 +0100 |
commit | 072a51be8ecfb84e15b27b7f80a601560f386788 (patch) | |
tree | cd7bee45536733a987e70037b109eb1bd9a4cc36 /mm | |
parent | 3ac6e56c325e4660837efcf995e8460d917b4778 (diff) | |
parent | 6ee1d745b7c9fd573fba142a2efdad76a9f1cb04 (diff) |
Merge 5.11-rc5 into driver-core-next
We need the fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/highmem.c | 7 | ||||
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/kasan/hw_tags.c | 77 | ||||
-rw-r--r-- | mm/kasan/init.c | 26 | ||||
-rw-r--r-- | mm/memblock.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 4 | ||||
-rw-r--r-- | mm/memory-failure.c | 22 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/migrate.c | 23 | ||||
-rw-r--r-- | mm/page-writeback.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 117 | ||||
-rw-r--r-- | mm/process_vm_access.c | 1 | ||||
-rw-r--r-- | mm/slub.c | 9 | ||||
-rw-r--r-- | mm/vmalloc.c | 4 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
15 files changed, 166 insertions, 134 deletions
diff --git a/mm/highmem.c b/mm/highmem.c index c3a9ea7875ef..874b732b120c 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -473,6 +473,11 @@ static inline void *arch_kmap_local_high_get(struct page *page) } #endif +#ifndef arch_kmap_local_set_pte +#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \ + set_pte_at(mm, vaddr, ptep, ptev) +#endif + /* Unmap a local mapping which was obtained by kmap_high_get() */ static inline bool kmap_high_unmap_local(unsigned long vaddr) { @@ -515,7 +520,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte - idx))); pteval = pfn_pte(pfn, prot); - set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval); + arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval); arch_kmap_local_post_map(vaddr, pteval); current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; preempt_enable(); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a2602969873d..18f6ee317900 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4371,7 +4371,7 @@ retry: * So we need to block hugepage fault by PG_hwpoison bit check. */ if (unlikely(PageHWPoison(page))) { - ret = VM_FAULT_HWPOISON | + ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); goto backout_unlocked; } diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 55bd6f09c70f..e529428e7a11 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -19,11 +19,10 @@ #include "kasan.h" -enum kasan_arg_mode { - KASAN_ARG_MODE_DEFAULT, - KASAN_ARG_MODE_OFF, - KASAN_ARG_MODE_PROD, - KASAN_ARG_MODE_FULL, +enum kasan_arg { + KASAN_ARG_DEFAULT, + KASAN_ARG_OFF, + KASAN_ARG_ON, }; enum kasan_arg_stacktrace { @@ -38,7 +37,7 @@ enum kasan_arg_fault { KASAN_ARG_FAULT_PANIC, }; -static enum kasan_arg_mode kasan_arg_mode __ro_after_init; +static enum kasan_arg kasan_arg __ro_after_init; static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init; static enum kasan_arg_fault kasan_arg_fault __ro_after_init; @@ -52,26 +51,24 @@ DEFINE_STATIC_KEY_FALSE(kasan_flag_stacktrace); /* Whether panic or disable tag checking on fault. */ bool kasan_flag_panic __ro_after_init; -/* kasan.mode=off/prod/full */ -static int __init early_kasan_mode(char *arg) +/* kasan=off/on */ +static int __init early_kasan_flag(char *arg) { if (!arg) return -EINVAL; if (!strcmp(arg, "off")) - kasan_arg_mode = KASAN_ARG_MODE_OFF; - else if (!strcmp(arg, "prod")) - kasan_arg_mode = KASAN_ARG_MODE_PROD; - else if (!strcmp(arg, "full")) - kasan_arg_mode = KASAN_ARG_MODE_FULL; + kasan_arg = KASAN_ARG_OFF; + else if (!strcmp(arg, "on")) + kasan_arg = KASAN_ARG_ON; else return -EINVAL; return 0; } -early_param("kasan.mode", early_kasan_mode); +early_param("kasan", early_kasan_flag); -/* kasan.stack=off/on */ +/* kasan.stacktrace=off/on */ static int __init early_kasan_flag_stacktrace(char *arg) { if (!arg) @@ -113,8 +110,8 @@ void kasan_init_hw_tags_cpu(void) * as this function is only called for MTE-capable hardware. */ - /* If KASAN is disabled, do nothing. */ - if (kasan_arg_mode == KASAN_ARG_MODE_OFF) + /* If KASAN is disabled via command line, don't initialize it. */ + if (kasan_arg == KASAN_ARG_OFF) return; hw_init_tags(KASAN_TAG_MAX); @@ -124,43 +121,28 @@ void kasan_init_hw_tags_cpu(void) /* kasan_init_hw_tags() is called once on boot CPU. */ void __init kasan_init_hw_tags(void) { - /* If hardware doesn't support MTE, do nothing. */ + /* If hardware doesn't support MTE, don't initialize KASAN. */ if (!system_supports_mte()) return; - /* Choose KASAN mode if kasan boot parameter is not provided. */ - if (kasan_arg_mode == KASAN_ARG_MODE_DEFAULT) { - if (IS_ENABLED(CONFIG_DEBUG_KERNEL)) - kasan_arg_mode = KASAN_ARG_MODE_FULL; - else - kasan_arg_mode = KASAN_ARG_MODE_PROD; - } - - /* Preset parameter values based on the mode. */ - switch (kasan_arg_mode) { - case KASAN_ARG_MODE_DEFAULT: - /* Shouldn't happen as per the check above. */ - WARN_ON(1); - return; - case KASAN_ARG_MODE_OFF: - /* If KASAN is disabled, do nothing. */ + /* If KASAN is disabled via command line, don't initialize it. */ + if (kasan_arg == KASAN_ARG_OFF) return; - case KASAN_ARG_MODE_PROD: - static_branch_enable(&kasan_flag_enabled); - break; - case KASAN_ARG_MODE_FULL: - static_branch_enable(&kasan_flag_enabled); - static_branch_enable(&kasan_flag_stacktrace); - break; - } - /* Now, optionally override the presets. */ + /* Enable KASAN. */ + static_branch_enable(&kasan_flag_enabled); switch (kasan_arg_stacktrace) { case KASAN_ARG_STACKTRACE_DEFAULT: + /* + * Default to enabling stack trace collection for + * debug kernels. + */ + if (IS_ENABLED(CONFIG_DEBUG_KERNEL)) + static_branch_enable(&kasan_flag_stacktrace); break; case KASAN_ARG_STACKTRACE_OFF: - static_branch_disable(&kasan_flag_stacktrace); + /* Do nothing, kasan_flag_stacktrace keeps its default value. */ break; case KASAN_ARG_STACKTRACE_ON: static_branch_enable(&kasan_flag_stacktrace); @@ -169,11 +151,16 @@ void __init kasan_init_hw_tags(void) switch (kasan_arg_fault) { case KASAN_ARG_FAULT_DEFAULT: + /* + * Default to no panic on report. + * Do nothing, kasan_flag_panic keeps its default value. + */ break; case KASAN_ARG_FAULT_REPORT: - kasan_flag_panic = false; + /* Do nothing, kasan_flag_panic keeps its default value. */ break; case KASAN_ARG_FAULT_PANIC: + /* Enable panic on report. */ kasan_flag_panic = true; break; } diff --git a/mm/kasan/init.c b/mm/kasan/init.c index bc0ad208b3a7..c4605ac9837b 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -64,7 +64,8 @@ static inline bool kasan_pmd_table(pud_t pud) return false; } #endif -pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss; +pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS] + __page_aligned_bss; static inline bool kasan_pte_table(pmd_t pmd) { @@ -372,9 +373,10 @@ static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr, if (kasan_pte_table(*pmd)) { if (IS_ALIGNED(addr, PMD_SIZE) && - IS_ALIGNED(next, PMD_SIZE)) + IS_ALIGNED(next, PMD_SIZE)) { pmd_clear(pmd); - continue; + continue; + } } pte = pte_offset_kernel(pmd, addr); kasan_remove_pte_table(pte, addr, next); @@ -397,9 +399,10 @@ static void kasan_remove_pud_table(pud_t *pud, unsigned long addr, if (kasan_pmd_table(*pud)) { if (IS_ALIGNED(addr, PUD_SIZE) && - IS_ALIGNED(next, PUD_SIZE)) + IS_ALIGNED(next, PUD_SIZE)) { pud_clear(pud); - continue; + continue; + } } pmd = pmd_offset(pud, addr); pmd_base = pmd_offset(pud, 0); @@ -423,9 +426,10 @@ static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr, if (kasan_pud_table(*p4d)) { if (IS_ALIGNED(addr, P4D_SIZE) && - IS_ALIGNED(next, P4D_SIZE)) + IS_ALIGNED(next, P4D_SIZE)) { p4d_clear(p4d); - continue; + continue; + } } pud = pud_offset(p4d, addr); kasan_remove_pud_table(pud, addr, next); @@ -456,9 +460,10 @@ void kasan_remove_zero_shadow(void *start, unsigned long size) if (kasan_p4d_table(*pgd)) { if (IS_ALIGNED(addr, PGDIR_SIZE) && - IS_ALIGNED(next, PGDIR_SIZE)) + IS_ALIGNED(next, PGDIR_SIZE)) { pgd_clear(pgd); - continue; + continue; + } } p4d = p4d_offset(pgd, addr); @@ -481,7 +486,6 @@ int kasan_add_zero_shadow(void *start, unsigned long size) ret = kasan_populate_early_shadow(shadow_start, shadow_end); if (ret) - kasan_remove_zero_shadow(shadow_start, - size >> KASAN_SHADOW_SCALE_SHIFT); + kasan_remove_zero_shadow(start, size); return ret; } diff --git a/mm/memblock.c b/mm/memblock.c index d24bcfa88d2f..1eaaec1e7687 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1427,7 +1427,7 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, } /** - * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node + * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @nid: nid of the free area to find, %NUMA_NO_NODE for any node diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 605f671203ef..e2de77b5bcc2 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3115,9 +3115,7 @@ void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages) if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) page_counter_uncharge(&memcg->kmem, nr_pages); - page_counter_uncharge(&memcg->memory, nr_pages); - if (do_memsw_account()) - page_counter_uncharge(&memcg->memsw, nr_pages); + refill_stock(memcg, nr_pages); } /** diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 5a38e9eade94..e9481632fcd1 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1885,6 +1885,12 @@ static int soft_offline_free_page(struct page *page) return rc; } +static void put_ref_page(struct page *page) +{ + if (page) + put_page(page); +} + /** * soft_offline_page - Soft offline a page. * @pfn: pfn to soft-offline @@ -1910,20 +1916,26 @@ static int soft_offline_free_page(struct page *page) int soft_offline_page(unsigned long pfn, int flags) { int ret; - struct page *page; bool try_again = true; + struct page *page, *ref_page = NULL; + + WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED)); if (!pfn_valid(pfn)) return -ENXIO; + if (flags & MF_COUNT_INCREASED) + ref_page = pfn_to_page(pfn); + /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */ page = pfn_to_online_page(pfn); - if (!page) + if (!page) { + put_ref_page(ref_page); return -EIO; + } if (PageHWPoison(page)) { pr_info("%s: %#lx page already poisoned\n", __func__, pfn); - if (flags & MF_COUNT_INCREASED) - put_page(page); + put_ref_page(ref_page); return 0; } @@ -1940,7 +1952,7 @@ retry: goto retry; } } else if (ret == -EIO) { - pr_info("%s: %#lx: unknown page type: %lx (%pGP)\n", + pr_info("%s: %#lx: unknown page type: %lx (%pGp)\n", __func__, pfn, page->flags, &page->flags); } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 8cf96bd21341..2c3a86502053 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1111,7 +1111,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { int busy = 0; - int err; + int err = 0; nodemask_t tmp; migrate_prep(); diff --git a/mm/migrate.c b/mm/migrate.c index ee5e612b4cd8..c0efe921bca5 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -402,6 +402,7 @@ int migrate_page_move_mapping(struct address_space *mapping, struct zone *oldzone, *newzone; int dirty; int expected_count = expected_page_refs(mapping, page) + extra_count; + int nr = thp_nr_pages(page); if (!mapping) { /* Anonymous page without mapping */ @@ -437,7 +438,7 @@ int migrate_page_move_mapping(struct address_space *mapping, */ newpage->index = page->index; newpage->mapping = page->mapping; - page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */ + page_ref_add(newpage, nr); /* add cache reference */ if (PageSwapBacked(page)) { __SetPageSwapBacked(newpage); if (PageSwapCache(page)) { @@ -459,7 +460,7 @@ int migrate_page_move_mapping(struct address_space *mapping, if (PageTransHuge(page)) { int i; - for (i = 1; i < HPAGE_PMD_NR; i++) { + for (i = 1; i < nr; i++) { xas_next(&xas); xas_store(&xas, newpage); } @@ -470,7 +471,7 @@ int migrate_page_move_mapping(struct address_space *mapping, * to one less reference. * We know this isn't the last reference. */ - page_ref_unfreeze(page, expected_count - thp_nr_pages(page)); + page_ref_unfreeze(page, expected_count - nr); xas_unlock(&xas); /* Leave irq disabled to prevent preemption while updating stats */ @@ -493,17 +494,17 @@ int migrate_page_move_mapping(struct address_space *mapping, old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); - __dec_lruvec_state(old_lruvec, NR_FILE_PAGES); - __inc_lruvec_state(new_lruvec, NR_FILE_PAGES); + __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); + __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); if (PageSwapBacked(page) && !PageSwapCache(page)) { - __dec_lruvec_state(old_lruvec, NR_SHMEM); - __inc_lruvec_state(new_lruvec, NR_SHMEM); + __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); + __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); } if (dirty && mapping_can_writeback(mapping)) { - __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY); - __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING); - __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY); - __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING); + __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); + __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); + __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); + __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); } } local_irq_enable(); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 586042472ac9..eb34d204d4ee 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2826,7 +2826,7 @@ EXPORT_SYMBOL(__test_set_page_writeback); */ void wait_on_page_writeback(struct page *page) { - if (PageWriteback(page)) { + while (PageWriteback(page)) { trace_wait_on_page_writeback(page, page_mapping(page)); wait_on_page_bit(page, PG_writeback); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bdbec4c98173..783913e41f65 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1207,8 +1207,10 @@ static void kernel_init_free_pages(struct page *page, int numpages) /* s390's use of memset() could override KASAN redzones. */ kasan_disable_current(); for (i = 0; i < numpages; i++) { + u8 tag = page_kasan_tag(page + i); page_kasan_tag_reset(page + i); clear_highpage(page + i); + page_kasan_tag_set(page + i, tag); } kasan_enable_current(); } @@ -2862,20 +2864,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, { struct page *page; -#ifdef CONFIG_CMA - /* - * Balance movable allocations between regular and CMA areas by - * allocating from CMA when over half of the zone's free memory - * is in the CMA area. - */ - if (alloc_flags & ALLOC_CMA && - zone_page_state(zone, NR_FREE_CMA_PAGES) > - zone_page_state(zone, NR_FREE_PAGES) / 2) { - page = __rmqueue_cma_fallback(zone, order); - if (page) - return page; + if (IS_ENABLED(CONFIG_CMA)) { + /* + * Balance movable allocations between regular and CMA areas by + * allocating from CMA when over half of the zone's free memory + * is in the CMA area. + */ + if (alloc_flags & ALLOC_CMA && + zone_page_state(zone, NR_FREE_CMA_PAGES) > + zone_page_state(zone, NR_FREE_PAGES) / 2) { + page = __rmqueue_cma_fallback(zone, order); + if (page) + goto out; + } } -#endif retry: page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page)) { @@ -2886,8 +2888,9 @@ retry: alloc_flags)) goto retry; } - - trace_mm_page_alloc_zone_locked(page, order, migratetype); +out: + if (page) + trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; } @@ -7077,23 +7080,26 @@ void __init free_area_init_memoryless_node(int nid) * Initialize all valid struct pages in the range [spfn, epfn) and mark them * PageReserved(). Return the number of struct pages that were initialized. */ -static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn) +static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn, + int zone, int nid) { - unsigned long pfn; + unsigned long pfn, zone_spfn, zone_epfn; u64 pgcnt = 0; + zone_spfn = arch_zone_lowest_possible_pfn[zone]; + zone_epfn = arch_zone_highest_possible_pfn[zone]; + + spfn = clamp(spfn, zone_spfn, zone_epfn); + epfn = clamp(epfn, zone_spfn, zone_epfn); + for (pfn = spfn; pfn < epfn; pfn++) { if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) { pfn = ALIGN_DOWN(pfn, pageblock_nr_pages) + pageblock_nr_pages - 1; continue; } - /* - * Use a fake node/zone (0) for now. Some of these pages - * (in memblock.reserved but not in memblock.memory) will - * get re-initialized via reserve_bootmem_region() later. - */ - __init_single_page(pfn_to_page(pfn), pfn, 0, 0); + + __init_single_page(pfn_to_page(pfn), pfn, zone, nid); __SetPageReserved(pfn_to_page(pfn)); pgcnt++; } @@ -7102,51 +7108,64 @@ static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn) } /* - * Only struct pages that are backed by physical memory are zeroed and - * initialized by going through __init_single_page(). But, there are some - * struct pages which are reserved in memblock allocator and their fields - * may be accessed (for example page_to_pfn() on some configuration accesses - * flags). We must explicitly initialize those struct pages. + * Only struct pages that correspond to ranges defined by memblock.memory + * are zeroed and initialized by going through __init_single_page() during + * memmap_init(). + * + * But, there could be struct pages that correspond to holes in + * memblock.memory. This can happen because of the following reasons: + * - phyiscal memory bank size is not necessarily the exact multiple of the + * arbitrary section size + * - early reserved memory may not be listed in memblock.memory + * - memory layouts defined with memmap= kernel parameter may not align + * nicely with memmap sections * - * This function also addresses a similar issue where struct pages are left - * uninitialized because the physical address range is not covered by - * memblock.memory or memblock.reserved. That could happen when memblock - * layout is manually configured via memmap=, or when the highest physical - * address (max_pfn) does not end on a section boundary. + * Explicitly initialize those struct pages so that: + * - PG_Reserved is set + * - zone link is set accorging to the architecture constrains + * - node is set to node id of the next populated region except for the + * trailing hole where last node id is used */ -static void __init init_unavailable_mem(void) +static void __init init_zone_unavailable_mem(int zone) { - phys_addr_t start, end; - u64 i, pgcnt; - phys_addr_t next = 0; + unsigned long start, end; + int i, nid; + u64 pgcnt; + unsigned long next = 0; /* - * Loop through unavailable ranges not covered by memblock.memory. + * Loop through holes in memblock.memory and initialize struct + * pages corresponding to these holes */ pgcnt = 0; - for_each_mem_range(i, &start, &end) { + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { if (next < start) - pgcnt += init_unavailable_range(PFN_DOWN(next), - PFN_UP(start)); + pgcnt += init_unavailable_range(next, start, zone, nid); next = end; } /* - * Early sections always have a fully populated memmap for the whole - * section - see pfn_valid(). If the last section has holes at the - * end and that section is marked "online", the memmap will be - * considered initialized. Make sure that memmap has a well defined - * state. + * Last section may surpass the actual end of memory (e.g. we can + * have 1Gb section and 512Mb of RAM pouplated). + * Make sure that memmap has a well defined state in this case. */ - pgcnt += init_unavailable_range(PFN_DOWN(next), - round_up(max_pfn, PAGES_PER_SECTION)); + end = round_up(max_pfn, PAGES_PER_SECTION); + pgcnt += init_unavailable_range(next, end, zone, nid); /* * Struct pages that do not have backing memory. This could be because * firmware is using some of this memory, or for some other reasons. */ if (pgcnt) - pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt); + pr_info("Zone %s: zeroed struct page in unavailable ranges: %lld pages", zone_names[zone], pgcnt); +} + +static void __init init_unavailable_mem(void) +{ + int zone; + + for (zone = 0; zone < ZONE_MOVABLE; zone++) + init_zone_unavailable_mem(zone); } #else static inline void __init init_unavailable_mem(void) diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 4bcc11958089..f5fee9cf90f8 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -9,6 +9,7 @@ #include <linux/mm.h> #include <linux/uio.h> #include <linux/sched.h> +#include <linux/compat.h> #include <linux/sched/mm.h> #include <linux/highmem.h> #include <linux/ptrace.h> diff --git a/mm/slub.c b/mm/slub.c index dc5b42e700b8..69742ab9a21d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1973,7 +1973,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, t = acquire_slab(s, n, page, object == NULL, &objects); if (!t) - break; + continue; /* cmpxchg raced */ available += objects; if (!object) { @@ -2791,7 +2791,8 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, void *obj) { if (unlikely(slab_want_init_on_free(s)) && obj) - memset((void *)((char *)obj + s->offset), 0, sizeof(void *)); + memset((void *)((char *)kasan_reset_tag(obj) + s->offset), + 0, sizeof(void *)); } /* @@ -2883,7 +2884,7 @@ redo: stat(s, ALLOC_FASTPATH); } - maybe_wipe_obj_freeptr(s, kasan_reset_tag(object)); + maybe_wipe_obj_freeptr(s, object); if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) memset(kasan_reset_tag(object), 0, s->object_size); @@ -3329,7 +3330,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, int j; for (j = 0; j < i; j++) - memset(p[j], 0, s->object_size); + memset(kasan_reset_tag(p[j]), 0, s->object_size); } /* memcg and kmem_cache debug support */ diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 4d88fe5a277a..e6f352bf0498 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2420,8 +2420,10 @@ void *vmap(struct page **pages, unsigned int count, return NULL; } - if (flags & VM_MAP_PUT_PAGES) + if (flags & VM_MAP_PUT_PAGES) { area->pages = pages; + area->nr_pages = count; + } return area->addr; } EXPORT_SYMBOL(vmap); diff --git a/mm/vmscan.c b/mm/vmscan.c index 257cba79a96d..b1b574ad199d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1238,6 +1238,8 @@ static unsigned int shrink_page_list(struct list_head *page_list, if (!PageSwapCache(page)) { if (!(sc->gfp_mask & __GFP_IO)) goto keep_locked; + if (page_maybe_dma_pinned(page)) + goto keep_locked; if (PageTransHuge(page)) { /* cannot split THP, skip it */ if (!can_split_huge_page(page, NULL)) |