diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 4 | ||||
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/kasan/Makefile | 9 | ||||
-rw-r--r-- | mm/kasan/common.c | 43 | ||||
-rw-r--r-- | mm/kasan/report.c | 10 | ||||
-rw-r--r-- | mm/kmemleak.c | 24 | ||||
-rw-r--r-- | mm/madvise.c | 2 | ||||
-rw-r--r-- | mm/memblock.c | 12 | ||||
-rw-r--r-- | mm/memory.c | 8 | ||||
-rw-r--r-- | mm/mmu_gather.c | 129 | ||||
-rw-r--r-- | mm/page_alloc.c | 7 | ||||
-rw-r--r-- | mm/page_owner.c | 82 | ||||
-rw-r--r-- | mm/shmem.c | 5 | ||||
-rw-r--r-- | mm/slab.c | 48 | ||||
-rw-r--r-- | mm/slub.c | 21 | ||||
-rw-r--r-- | mm/vmalloc.c | 113 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
17 files changed, 259 insertions, 262 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 165ea46bf149..b6a34b32d8ac 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1677,7 +1677,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, struct mm_struct *mm = tlb->mm; bool ret = false; - tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); + tlb_change_page_size(tlb, HPAGE_PMD_SIZE); ptl = pmd_trans_huge_lock(pmd, vma); if (!ptl) @@ -1753,7 +1753,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t orig_pmd; spinlock_t *ptl; - tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); + tlb_change_page_size(tlb, HPAGE_PMD_SIZE); ptl = __pmd_trans_huge_lock(pmd, vma); if (!ptl) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6cdc7b2d9100..641cedfc8c0f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3353,7 +3353,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, * This is a hugetlb vma, all the pte entries should point * to huge page. */ - tlb_remove_check_page_size_change(tlb, sz); + tlb_change_page_size(tlb, sz); tlb_start_vma(tlb, vma); /* diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 5d1065efbd47..08b43de2383b 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -2,18 +2,21 @@ KASAN_SANITIZE := n UBSAN_SANITIZE_common.o := n UBSAN_SANITIZE_generic.o := n +UBSAN_SANITIZE_generic_report.o := n UBSAN_SANITIZE_tags.o := n KCOV_INSTRUMENT := n -CFLAGS_REMOVE_common.o = -pg -CFLAGS_REMOVE_generic.o = -pg -CFLAGS_REMOVE_tags.o = -pg +CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_generic.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_generic_report.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_tags.o = $(CC_FLAGS_FTRACE) # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) +CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) obj-$(CONFIG_KASAN) := common.o init.o report.o diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 80bbe62b16cd..36afcf64e016 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -36,6 +36,7 @@ #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/bug.h> +#include <linux/uaccess.h> #include "kasan.h" #include "../slab.h" @@ -48,37 +49,28 @@ static inline int in_irqentry_text(unsigned long ptr) ptr < (unsigned long)&__softirqentry_text_end); } -static inline void filter_irq_stacks(struct stack_trace *trace) +static inline unsigned int filter_irq_stacks(unsigned long *entries, + unsigned int nr_entries) { - int i; + unsigned int i; - if (!trace->nr_entries) - return; - for (i = 0; i < trace->nr_entries; i++) - if (in_irqentry_text(trace->entries[i])) { + for (i = 0; i < nr_entries; i++) { + if (in_irqentry_text(entries[i])) { /* Include the irqentry function into the stack. */ - trace->nr_entries = i + 1; - break; + return i + 1; } + } + return nr_entries; } static inline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[KASAN_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = KASAN_STACK_DEPTH, - .skip = 0 - }; + unsigned int nr_entries; - save_stack_trace(&trace); - filter_irq_stacks(&trace); - if (trace.nr_entries != 0 && - trace.entries[trace.nr_entries-1] == ULONG_MAX) - trace.nr_entries--; - - return depot_save_stack(&trace, flags); + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); + nr_entries = filter_irq_stacks(entries, nr_entries); + return stack_depot_save(entries, nr_entries, flags); } static inline void set_track(struct kasan_track *track, gfp_t flags) @@ -614,6 +606,15 @@ void kasan_free_shadow(const struct vm_struct *vm) vfree(kasan_mem_to_shadow(vm->addr)); } +extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); + +void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) +{ + unsigned long flags = user_access_save(); + __kasan_report(addr, size, is_write, ip); + user_access_restore(flags); +} + #ifdef CONFIG_MEMORY_HOTPLUG static bool shadow_mapped(unsigned long addr) { diff --git a/mm/kasan/report.c b/mm/kasan/report.c index ca9418fe9232..03a443579386 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -100,10 +100,11 @@ static void print_track(struct kasan_track *track, const char *prefix) { pr_err("%s by task %u:\n", prefix, track->pid); if (track->stack) { - struct stack_trace trace; + unsigned long *entries; + unsigned int nr_entries; - depot_fetch_stack(track->stack, &trace); - print_stack_trace(&trace, 0); + nr_entries = stack_depot_fetch(track->stack, &entries); + stack_trace_print(entries, nr_entries, 0); } else { pr_err("(stack is not available)\n"); } @@ -281,8 +282,7 @@ void kasan_report_invalid_free(void *object, unsigned long ip) end_report(&flags); } -void kasan_report(unsigned long addr, size_t size, - bool is_write, unsigned long ip) +void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) { struct kasan_access_info info; void *tagged_addr; diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 2e435b8142e5..e57bf810f798 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -410,11 +410,6 @@ static void print_unreferenced(struct seq_file *seq, */ static void dump_object_info(struct kmemleak_object *object) { - struct stack_trace trace; - - trace.nr_entries = object->trace_len; - trace.entries = object->trace; - pr_notice("Object 0x%08lx (size %zu):\n", object->pointer, object->size); pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", @@ -424,7 +419,7 @@ static void dump_object_info(struct kmemleak_object *object) pr_notice(" flags = 0x%x\n", object->flags); pr_notice(" checksum = %u\n", object->checksum); pr_notice(" backtrace:\n"); - print_stack_trace(&trace, 4); + stack_trace_print(object->trace, object->trace_len, 4); } /* @@ -553,15 +548,7 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali */ static int __save_stack_trace(unsigned long *trace) { - struct stack_trace stack_trace; - - stack_trace.max_entries = MAX_TRACE; - stack_trace.nr_entries = 0; - stack_trace.entries = trace; - stack_trace.skip = 2; - save_stack_trace(&stack_trace); - - return stack_trace.nr_entries; + return stack_trace_save(trace, MAX_TRACE, 2); } /* @@ -2021,13 +2008,8 @@ early_param("kmemleak", kmemleak_boot_config); static void __init print_log_trace(struct early_log *log) { - struct stack_trace trace; - - trace.nr_entries = log->trace_len; - trace.entries = log->trace; - pr_notice("Early log backtrace:\n"); - print_stack_trace(&trace, 2); + stack_trace_print(log->trace, log->trace_len, 2); } /* diff --git a/mm/madvise.c b/mm/madvise.c index 21a7881a2db4..bb3a4554d5d5 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -328,7 +328,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, if (pmd_trans_unstable(pmd)) return 0; - tlb_remove_check_page_size_change(tlb, PAGE_SIZE); + tlb_change_page_size(tlb, PAGE_SIZE); orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); diff --git a/mm/memblock.c b/mm/memblock.c index e7665cf914b1..a48f520c2d01 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -702,7 +702,7 @@ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + size - 1; - memblock_dbg("memblock_add: [%pa-%pa] %pF\n", + memblock_dbg("memblock_add: [%pa-%pa] %pS\n", &base, &end, (void *)_RET_IP_); return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); @@ -821,7 +821,7 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + size - 1; - memblock_dbg(" memblock_free: [%pa-%pa] %pF\n", + memblock_dbg(" memblock_free: [%pa-%pa] %pS\n", &base, &end, (void *)_RET_IP_); kmemleak_free_part_phys(base, size); @@ -832,7 +832,7 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + size - 1; - memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n", + memblock_dbg("memblock_reserve: [%pa-%pa] %pS\n", &base, &end, (void *)_RET_IP_); return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); @@ -1447,7 +1447,7 @@ void * __init memblock_alloc_try_nid_raw( { void *ptr; - memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n", + memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr, (void *)_RET_IP_); @@ -1483,7 +1483,7 @@ void * __init memblock_alloc_try_nid( { void *ptr; - memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n", + memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr, (void *)_RET_IP_); ptr = memblock_alloc_internal(size, align, @@ -1508,7 +1508,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) phys_addr_t cursor, end; end = base + size - 1; - memblock_dbg("%s: [%pa-%pa] %pF\n", + memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, &base, &end, (void *)_RET_IP_); kmemleak_free_part_phys(base, size); cursor = PFN_UP(base); diff --git a/mm/memory.c b/mm/memory.c index ab650c21bccd..f7d962d7de19 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -356,7 +356,7 @@ void free_pgd_range(struct mmu_gather *tlb, * We add page table cache pages with PAGE_SIZE, * (see pte_free_tlb()), flush the tlb if we need */ - tlb_remove_check_page_size_change(tlb, PAGE_SIZE); + tlb_change_page_size(tlb, PAGE_SIZE); pgd = pgd_offset(tlb->mm, addr); do { next = pgd_addr_end(addr, end); @@ -519,7 +519,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, dump_page(page, "bad pte"); pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); - pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n", + pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n", vma->vm_file, vma->vm_ops ? vma->vm_ops->fault : NULL, vma->vm_file ? vma->vm_file->f_op->mmap : NULL, @@ -1046,7 +1046,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, pte_t *pte; swp_entry_t entry; - tlb_remove_check_page_size_change(tlb, PAGE_SIZE); + tlb_change_page_size(tlb, PAGE_SIZE); again: init_rss_vec(rss); start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); @@ -1155,7 +1155,7 @@ again: */ if (force_flush) { force_flush = 0; - tlb_flush_mmu_free(tlb); + tlb_flush_mmu(tlb); if (addr != end) goto again; } diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index f2f03c655807..99740e1dd273 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -11,7 +11,7 @@ #include <asm/pgalloc.h> #include <asm/tlb.h> -#ifdef HAVE_GENERIC_MMU_GATHER +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER static bool tlb_next_batch(struct mmu_gather *tlb) { @@ -41,35 +41,10 @@ static bool tlb_next_batch(struct mmu_gather *tlb) return true; } -void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - tlb->mm = mm; - - /* Is it from 0 to ~0? */ - tlb->fullmm = !(start | (end+1)); - tlb->need_flush_all = 0; - tlb->local.next = NULL; - tlb->local.nr = 0; - tlb->local.max = ARRAY_SIZE(tlb->__pages); - tlb->active = &tlb->local; - tlb->batch_count = 0; - -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - tlb->batch = NULL; -#endif - tlb->page_size = 0; - - __tlb_reset_range(tlb); -} - -void tlb_flush_mmu_free(struct mmu_gather *tlb) +static void tlb_batch_pages_flush(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - tlb_table_flush(tlb); -#endif for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); batch->nr = 0; @@ -77,31 +52,10 @@ void tlb_flush_mmu_free(struct mmu_gather *tlb) tlb->active = &tlb->local; } -void tlb_flush_mmu(struct mmu_gather *tlb) -{ - tlb_flush_mmu_tlbonly(tlb); - tlb_flush_mmu_free(tlb); -} - -/* tlb_finish_mmu - * Called at the end of the shootdown operation to free up any resources - * that were required. - */ -void arch_tlb_finish_mmu(struct mmu_gather *tlb, - unsigned long start, unsigned long end, bool force) +static void tlb_batch_list_free(struct mmu_gather *tlb) { struct mmu_gather_batch *batch, *next; - if (force) { - __tlb_reset_range(tlb); - __tlb_adjust_range(tlb, start, end - start); - } - - tlb_flush_mmu(tlb); - - /* keep the page table cache within bounds */ - check_pgt_cache(); - for (batch = tlb->local.next; batch; batch = next) { next = batch->next; free_pages((unsigned long)batch, 0); @@ -109,19 +63,15 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb, tlb->local.next = NULL; } -/* __tlb_remove_page - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while - * handling the additional races in SMP caused by other CPUs caching valid - * mappings in their TLBs. Returns the number of free page slots left. - * When out of page slots we must call tlb_flush_mmu(). - *returns true if the caller should flush. - */ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->end); + +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE VM_WARN_ON(tlb->page_size != page_size); +#endif batch = tlb->active; /* @@ -139,7 +89,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ return false; } -#endif /* HAVE_GENERIC_MMU_GATHER */ +#endif /* HAVE_MMU_GATHER_NO_GATHER */ #ifdef CONFIG_HAVE_RCU_TABLE_FREE @@ -152,7 +102,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ */ static inline void tlb_table_invalidate(struct mmu_gather *tlb) { -#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE +#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE /* * Invalidate page-table caches used by hardware walkers. Then we still * need to RCU-sched wait while freeing the pages because software @@ -193,7 +143,7 @@ static void tlb_remove_table_rcu(struct rcu_head *head) free_page((unsigned long)batch); } -void tlb_table_flush(struct mmu_gather *tlb) +static void tlb_table_flush(struct mmu_gather *tlb) { struct mmu_table_batch **batch = &tlb->batch; @@ -225,6 +175,22 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +static void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb_table_flush(tlb); +#endif +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + tlb_batch_pages_flush(tlb); +#endif +} + +void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); +} + /** * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down * @tlb: the mmu_gather structure to initialize @@ -240,10 +206,40 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { - arch_tlb_gather_mmu(tlb, mm, start, end); + tlb->mm = mm; + + /* Is it from 0 to ~0? */ + tlb->fullmm = !(start | (end+1)); + +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + tlb->need_flush_all = 0; + tlb->local.next = NULL; + tlb->local.nr = 0; + tlb->local.max = ARRAY_SIZE(tlb->__pages); + tlb->active = &tlb->local; + tlb->batch_count = 0; +#endif + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb->batch = NULL; +#endif +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE + tlb->page_size = 0; +#endif + + __tlb_reset_range(tlb); inc_tlb_flush_pending(tlb->mm); } +/** + * tlb_finish_mmu - finish an mmu_gather structure + * @tlb: the mmu_gather structure to finish + * @start: start of the region that will be removed from the page-table + * @end: end of the region that will be removed from the page-table + * + * Called at the end of the shootdown operation to free up any resources that + * were required. + */ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { @@ -254,8 +250,17 @@ void tlb_finish_mmu(struct mmu_gather *tlb, * the TLB by observing pte_none|!pte_dirty, for example so flush TLB * forcefully if we detect parallel PTE batching threads. */ - bool force = mm_tlb_flush_nested(tlb->mm); + if (mm_tlb_flush_nested(tlb->mm)) { + __tlb_reset_range(tlb); + __tlb_adjust_range(tlb, start, end - start); + } - arch_tlb_finish_mmu(tlb, start, end, force); + tlb_flush_mmu(tlb); + + /* keep the page table cache within bounds */ + check_pgt_cache(); +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + tlb_batch_list_free(tlb); +#endif dec_tlb_flush_pending(tlb->mm); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c02cff1ed56e..59661106da16 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1144,7 +1144,9 @@ static __always_inline bool free_pages_prepare(struct page *page, } arch_free_page(page, order); kernel_poison_pages(page, 1 << order, 0); - kernel_map_pages(page, 1 << order, 0); + if (debug_pagealloc_enabled()) + kernel_map_pages(page, 1 << order, 0); + kasan_free_nondeferred_pages(page, order); return true; @@ -2014,7 +2016,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order, set_page_refcounted(page); arch_alloc_page(page, order); - kernel_map_pages(page, 1 << order, 1); + if (debug_pagealloc_enabled()) + kernel_map_pages(page, 1 << order, 1); kasan_alloc_pages(page, order); kernel_poison_pages(page, 1 << order, 1); set_page_owner(page, order, gfp_flags); diff --git a/mm/page_owner.c b/mm/page_owner.c index 925b6f44a444..addcbb2ae4e4 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -58,15 +58,10 @@ static bool need_page_owner(void) static __always_inline depot_stack_handle_t create_dummy_stack(void) { unsigned long entries[4]; - struct stack_trace dummy; + unsigned int nr_entries; - dummy.nr_entries = 0; - dummy.max_entries = ARRAY_SIZE(entries); - dummy.entries = &entries[0]; - dummy.skip = 0; - - save_stack_trace(&dummy); - return depot_save_stack(&dummy, GFP_KERNEL); + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); + return stack_depot_save(entries, nr_entries, GFP_KERNEL); } static noinline void register_dummy_stack(void) @@ -120,49 +115,39 @@ void __reset_page_owner(struct page *page, unsigned int order) } } -static inline bool check_recursive_alloc(struct stack_trace *trace, - unsigned long ip) +static inline bool check_recursive_alloc(unsigned long *entries, + unsigned int nr_entries, + unsigned long ip) { - int i; - - if (!trace->nr_entries) - return false; + unsigned int i; - for (i = 0; i < trace->nr_entries; i++) { - if (trace->entries[i] == ip) + for (i = 0; i < nr_entries; i++) { + if (entries[i] == ip) return true; } - return false; } static noinline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[PAGE_OWNER_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = PAGE_OWNER_STACK_DEPTH, - .skip = 2 - }; depot_stack_handle_t handle; + unsigned int nr_entries; - save_stack_trace(&trace); - if (trace.nr_entries != 0 && - trace.entries[trace.nr_entries-1] == ULONG_MAX) - trace.nr_entries--; + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); /* - * We need to check recursion here because our request to stackdepot - * could trigger memory allocation to save new entry. New memory - * allocation would reach here and call depot_save_stack() again - * if we don't catch it. There is still not enough memory in stackdepot - * so it would try to allocate memory again and loop forever. + * We need to check recursion here because our request to + * stackdepot could trigger memory allocation to save new + * entry. New memory allocation would reach here and call + * stack_depot_save_entries() again if we don't catch it. There is + * still not enough memory in stackdepot so it would try to + * allocate memory again and loop forever. */ - if (check_recursive_alloc(&trace, _RET_IP_)) + if (check_recursive_alloc(entries, nr_entries, _RET_IP_)) return dummy_handle; - handle = depot_save_stack(&trace, flags); + handle = stack_depot_save(entries, nr_entries, flags); if (!handle) handle = failure_handle; @@ -340,16 +325,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, struct page *page, struct page_owner *page_owner, depot_stack_handle_t handle) { - int ret; - int pageblock_mt, page_mt; + int ret, pageblock_mt, page_mt; + unsigned long *entries; + unsigned int nr_entries; char *kbuf; - unsigned long entries[PAGE_OWNER_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = PAGE_OWNER_STACK_DEPTH, - .skip = 0 - }; count = min_t(size_t, count, PAGE_SIZE); kbuf = kmalloc(count, GFP_KERNEL); @@ -378,8 +357,8 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, if (ret >= count) goto err; - depot_fetch_stack(handle, &trace); - ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); + nr_entries = stack_depot_fetch(handle, &entries); + ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0); if (ret >= count) goto err; @@ -410,14 +389,9 @@ void __dump_page_owner(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); struct page_owner *page_owner; - unsigned long entries[PAGE_OWNER_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = PAGE_OWNER_STACK_DEPTH, - .skip = 0 - }; depot_stack_handle_t handle; + unsigned long *entries; + unsigned int nr_entries; gfp_t gfp_mask; int mt; @@ -441,10 +415,10 @@ void __dump_page_owner(struct page *page) return; } - depot_fetch_stack(handle, &trace); + nr_entries = stack_depot_fetch(handle, &entries); pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n", page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask); - print_stack_trace(&trace, 0); + stack_trace_print(entries, nr_entries, 0); if (page_owner->last_migrate_reason != -1) pr_alert("page has been migrated, last migrate reason: %s\n", diff --git a/mm/shmem.c b/mm/shmem.c index 2275a0ff7c30..f4dce9c8670d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3631,9 +3631,8 @@ static struct inode *shmem_alloc_inode(struct super_block *sb) return &info->vfs_inode; } -static void shmem_destroy_callback(struct rcu_head *head) +static void shmem_free_in_core_inode(struct inode *inode) { - struct inode *inode = container_of(head, struct inode, i_rcu); if (S_ISLNK(inode->i_mode)) kfree(inode->i_link); kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); @@ -3643,7 +3642,6 @@ static void shmem_destroy_inode(struct inode *inode) { if (S_ISREG(inode->i_mode)) mpol_free_shared_policy(&SHMEM_I(inode)->policy); - call_rcu(&inode->i_rcu, shmem_destroy_callback); } static void shmem_init_inode(void *foo) @@ -3734,6 +3732,7 @@ static const struct inode_operations shmem_special_inode_operations = { static const struct super_operations shmem_ops = { .alloc_inode = shmem_alloc_inode, + .free_inode = shmem_free_in_core_inode, .destroy_inode = shmem_destroy_inode, #ifdef CONFIG_TMPFS .statfs = shmem_statfs, diff --git a/mm/slab.c b/mm/slab.c index 9142ee992493..284ab737faee 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1467,53 +1467,17 @@ static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) } #ifdef CONFIG_DEBUG_PAGEALLOC -static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, - unsigned long caller) -{ - int size = cachep->object_size; - - addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; - - if (size < 5 * sizeof(unsigned long)) - return; - - *addr++ = 0x12345678; - *addr++ = caller; - *addr++ = smp_processor_id(); - size -= 3 * sizeof(unsigned long); - { - unsigned long *sptr = &caller; - unsigned long svalue; - - while (!kstack_end(sptr)) { - svalue = *sptr++; - if (kernel_text_address(svalue)) { - *addr++ = svalue; - size -= sizeof(unsigned long); - if (size <= sizeof(unsigned long)) - break; - } - } - - } - *addr++ = 0x87654321; -} - -static void slab_kernel_map(struct kmem_cache *cachep, void *objp, - int map, unsigned long caller) +static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) { if (!is_debug_pagealloc_cache(cachep)) return; - if (caller) - store_stackinfo(cachep, objp, caller); - kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); } #else static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, - int map, unsigned long caller) {} + int map) {} #endif @@ -1661,7 +1625,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, if (cachep->flags & SLAB_POISON) { check_poison_obj(cachep, objp); - slab_kernel_map(cachep, objp, 1, 0); + slab_kernel_map(cachep, objp, 1); } if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) @@ -2433,7 +2397,7 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page) /* need to poison the objs? */ if (cachep->flags & SLAB_POISON) { poison_obj(cachep, objp, POISON_FREE); - slab_kernel_map(cachep, objp, 0, 0); + slab_kernel_map(cachep, objp, 0); } } #endif @@ -2812,7 +2776,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, if (cachep->flags & SLAB_POISON) { poison_obj(cachep, objp, POISON_FREE); - slab_kernel_map(cachep, objp, 0, caller); + slab_kernel_map(cachep, objp, 0); } return objp; } @@ -3076,7 +3040,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, return objp; if (cachep->flags & SLAB_POISON) { check_poison_obj(cachep, objp); - slab_kernel_map(cachep, objp, 1, 0); + slab_kernel_map(cachep, objp, 1); poison_obj(cachep, objp, POISON_INUSE); } if (cachep->flags & SLAB_STORE_USER) diff --git a/mm/slub.c b/mm/slub.c index d30ede89f4a6..6b28cd2b5a58 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -552,31 +552,22 @@ static void set_track(struct kmem_cache *s, void *object, if (addr) { #ifdef CONFIG_STACKTRACE - struct stack_trace trace; - int i; + unsigned int nr_entries; - trace.nr_entries = 0; - trace.max_entries = TRACK_ADDRS_COUNT; - trace.entries = p->addrs; - trace.skip = 3; metadata_access_enable(); - save_stack_trace(&trace); + nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3); metadata_access_disable(); - /* See rant in lockdep.c */ - if (trace.nr_entries != 0 && - trace.entries[trace.nr_entries - 1] == ULONG_MAX) - trace.nr_entries--; - - for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++) - p->addrs[i] = 0; + if (nr_entries < TRACK_ADDRS_COUNT) + p->addrs[nr_entries] = 0; #endif p->addr = addr; p->cpu = smp_processor_id(); p->pid = current->pid; p->when = jiffies; - } else + } else { memset(p, 0, sizeof(struct track)); + } } static void init_tracking(struct kmem_cache *s, void *object) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e86ba6e74b50..e5e9e1fcac01 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -18,6 +18,7 @@ #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/set_memory.h> #include <linux/debugobjects.h> #include <linux/kallsyms.h> #include <linux/list.h> @@ -1059,24 +1060,9 @@ static void vb_free(const void *addr, unsigned long size) spin_unlock(&vb->lock); } -/** - * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer - * - * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily - * to amortize TLB flushing overheads. What this means is that any page you - * have now, may, in a former life, have been mapped into kernel virtual - * address by the vmap layer and so there might be some CPUs with TLB entries - * still referencing that page (additional to the regular 1:1 kernel mapping). - * - * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can - * be sure that none of the pages we have control over will have any aliases - * from the vmap layer. - */ -void vm_unmap_aliases(void) +static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) { - unsigned long start = ULONG_MAX, end = 0; int cpu; - int flush = 0; if (unlikely(!vmap_initialized)) return; @@ -1113,6 +1099,27 @@ void vm_unmap_aliases(void) flush_tlb_kernel_range(start, end); mutex_unlock(&vmap_purge_lock); } + +/** + * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer + * + * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily + * to amortize TLB flushing overheads. What this means is that any page you + * have now, may, in a former life, have been mapped into kernel virtual + * address by the vmap layer and so there might be some CPUs with TLB entries + * still referencing that page (additional to the regular 1:1 kernel mapping). + * + * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can + * be sure that none of the pages we have control over will have any aliases + * from the vmap layer. + */ +void vm_unmap_aliases(void) +{ + unsigned long start = ULONG_MAX, end = 0; + int flush = 0; + + _vm_unmap_aliases(start, end, flush); +} EXPORT_SYMBOL_GPL(vm_unmap_aliases); /** @@ -1505,6 +1512,72 @@ struct vm_struct *remove_vm_area(const void *addr) return NULL; } +static inline void set_area_direct_map(const struct vm_struct *area, + int (*set_direct_map)(struct page *page)) +{ + int i; + + for (i = 0; i < area->nr_pages; i++) + if (page_address(area->pages[i])) + set_direct_map(area->pages[i]); +} + +/* Handle removing and resetting vm mappings related to the vm_struct. */ +static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) +{ + unsigned long addr = (unsigned long)area->addr; + unsigned long start = ULONG_MAX, end = 0; + int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; + int i; + + /* + * The below block can be removed when all architectures that have + * direct map permissions also have set_direct_map_() implementations. + * This is concerned with resetting the direct map any an vm alias with + * execute permissions, without leaving a RW+X window. + */ + if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) { + set_memory_nx(addr, area->nr_pages); + set_memory_rw(addr, area->nr_pages); + } + + remove_vm_area(area->addr); + + /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ + if (!flush_reset) + return; + + /* + * If not deallocating pages, just do the flush of the VM area and + * return. + */ + if (!deallocate_pages) { + vm_unmap_aliases(); + return; + } + + /* + * If execution gets here, flush the vm mapping and reset the direct + * map. Find the start and end range of the direct mappings to make sure + * the vm_unmap_aliases() flush includes the direct map. + */ + for (i = 0; i < area->nr_pages; i++) { + if (page_address(area->pages[i])) { + start = min(addr, start); + end = max(addr, end); + } + } + + /* + * Set direct map to something invalid so that it won't be cached if + * there are any accesses after the TLB flush, then flush the TLB and + * reset the direct map permissions to the default. + */ + set_area_direct_map(area, set_direct_map_invalid_noflush); + _vm_unmap_aliases(start, end, 1); + set_area_direct_map(area, set_direct_map_default_noflush); +} + static void __vunmap(const void *addr, int deallocate_pages) { struct vm_struct *area; @@ -1526,7 +1599,8 @@ static void __vunmap(const void *addr, int deallocate_pages) debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); - remove_vm_area(addr); + vm_remove_mappings(area, deallocate_pages); + if (deallocate_pages) { int i; @@ -1961,8 +2035,9 @@ EXPORT_SYMBOL(vzalloc_node); */ void *vmalloc_exec(unsigned long size) { - return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC, - NUMA_NO_NODE, __builtin_return_address(0)); + return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, + NUMA_NO_NODE, __builtin_return_address(0)); } #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) diff --git a/mm/vmscan.c b/mm/vmscan.c index a815f73ee4d5..fd9de504e516 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -493,7 +493,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, total_scan += delta; if (total_scan < 0) { - pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", + pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n", shrinker->scan_objects, total_scan); total_scan = freeable; next_deferred = nr; |