diff options
Diffstat (limited to 'mm/slub.c')
| -rw-r--r-- | mm/slub.c | 36 | 
1 files changed, 16 insertions, 20 deletions
diff --git a/mm/slub.c b/mm/slub.c index e72e802fc569..d11389710b12 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -93,9 +93,7 @@   * minimal so we rely on the page allocators per cpu caches for   * fast frees and allocs.   * - * Overloading of page flags that are otherwise used for LRU management. - * - * PageActive 		The slab is frozen and exempt from list processing. + * page->frozen		The slab is frozen and exempt from list processing.   * 			This means that the slab is dedicated to a purpose   * 			such as satisfying allocations for a specific   * 			processor. Objects may be freed in the slab while @@ -111,7 +109,7 @@   * 			free objects in addition to the regular freelist   * 			that requires the slab lock.   * - * PageError		Slab requires special handling due to debug + * SLAB_DEBUG_FLAGS	Slab requires special handling due to debug   * 			options set. This moves	slab handling out of   * 			the fast path and disables lockless freelists.   */ @@ -736,6 +734,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,  {  	u8 *fault;  	u8 *end; +	u8 *addr = page_address(page);  	metadata_access_enable();  	fault = memchr_inv(start, value, bytes); @@ -748,8 +747,9 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,  		end--;  	slab_bug(s, "%s overwritten", what); -	pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", -					fault, end - 1, fault[0], value); +	pr_err("INFO: 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", +					fault, end - 1, fault - addr, +					fault[0], value);  	print_trailer(s, page, object);  	restore_bytes(s, what, value, fault, end); @@ -844,7 +844,8 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)  	while (end > fault && end[-1] == POISON_INUSE)  		end--; -	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); +	slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu", +			fault, end - 1, fault - start);  	print_section(KERN_ERR, "Padding ", pad, remainder);  	restore_bytes(s, "slab padding", POISON_INUSE, fault, end); @@ -4383,31 +4384,26 @@ static int count_total(struct page *page)  #endif  #ifdef CONFIG_SLUB_DEBUG -static int validate_slab(struct kmem_cache *s, struct page *page, +static void validate_slab(struct kmem_cache *s, struct page *page,  						unsigned long *map)  {  	void *p;  	void *addr = page_address(page); -	if (!check_slab(s, page) || -			!on_freelist(s, page, NULL)) -		return 0; +	if (!check_slab(s, page) || !on_freelist(s, page, NULL)) +		return;  	/* Now we know that a valid freelist exists */  	bitmap_zero(map, page->objects);  	get_map(s, page, map);  	for_each_object(p, s, addr, page->objects) { -		if (test_bit(slab_index(p, s, addr), map)) -			if (!check_object(s, page, p, SLUB_RED_INACTIVE)) -				return 0; -	} +		u8 val = test_bit(slab_index(p, s, addr), map) ? +			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; -	for_each_object(p, s, addr, page->objects) -		if (!test_bit(slab_index(p, s, addr), map)) -			if (!check_object(s, page, p, SLUB_RED_ACTIVE)) -				return 0; -	return 1; +		if (!check_object(s, page, p, val)) +			break; +	}  }  static void validate_slab_slab(struct kmem_cache *s, struct page *page,  |