aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorShakeel Butt <[email protected]>2021-08-13 16:54:31 -0700
committerLinus Torvalds <[email protected]>2021-08-13 14:09:31 -1000
commit1ed7ce574c136569f55fb5c32e69e382c77ba500 (patch)
tree604fc31ae873564e5fd9537e3f067bd0ec41c69e
parent340caf178ddc2efb0294afaf54c715f7928c258e (diff)
slub: fix kmalloc_pagealloc_invalid_free unit test
The unit test kmalloc_pagealloc_invalid_free makes sure that for the higher order slub allocation which goes to page allocator, the free is called with the correct address i.e. the virtual address of the head page. Commit f227f0faf63b ("slub: fix unreclaimable slab stat for bulk free") unified the free code paths for page allocator based slub allocations but instead of using the address passed by the caller, it extracted the address from the page. Thus making the unit test kmalloc_pagealloc_invalid_free moot. So, fix this by using the address passed by the caller. Should we fix this? I think yes because dev expect kasan to catch these type of programming bugs. Link: https://lkml.kernel.org/r/[email protected] Fixes: f227f0faf63b ("slub: fix unreclaimable slab stat for bulk free") Signed-off-by: Shakeel Butt <[email protected]> Reported-by: Nathan Chancellor <[email protected]> Tested-by: Nathan Chancellor <[email protected]> Acked-by: Roman Gushchin <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Muchun Song <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Pekka Enberg <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Vlastimil Babka <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--mm/slub.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 1583354fbf48..5c2a13d66e71 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3236,12 +3236,12 @@ struct detached_freelist {
struct kmem_cache *s;
};
-static inline void free_nonslab_page(struct page *page)
+static inline void free_nonslab_page(struct page *page, void *object)
{
unsigned int order = compound_order(page);
VM_BUG_ON_PAGE(!PageCompound(page), page);
- kfree_hook(page_address(page));
+ kfree_hook(object);
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
__free_pages(page, order);
}
@@ -3282,7 +3282,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!s) {
/* Handle kalloc'ed objects */
if (unlikely(!PageSlab(page))) {
- free_nonslab_page(page);
+ free_nonslab_page(page, object);
p[size] = NULL; /* mark object processed */
return size;
}
@@ -4258,7 +4258,7 @@ void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
- free_nonslab_page(page);
+ free_nonslab_page(page, object);
return;
}
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);