aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVlastimil Babka <[email protected]>2020-08-06 23:18:55 -0700
committerLinus Torvalds <[email protected]>2020-08-07 11:33:22 -0700
commit59052e89fc89e3e6bef0151052e093566e446851 (patch)
tree84cbd5b1732112096ec4985d730f6f0e0cd5ea51
parentca0cab65ea2b8c1527dc48c8dfd38ae055f5f241 (diff)
mm, slub: introduce kmem_cache_debug_flags()
There are few places that call kmem_cache_debug(s) (which tests if any of debug flags are enabled for a cache) immediately followed by a test for a specific flag. The compiler can probably eliminate the extra check, but we can make the code nicer by introducing kmem_cache_debug_flags() that works like kmem_cache_debug() (including the static key check) but tests for specific flag(s). The next patches will add more users. [[email protected]: change return from int to bool, per Kees. Add VM_WARN_ON_ONCE() for invalid flags, per Roman] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Vlastimil Babka <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Acked-by: Roman Gushchin <[email protected]> Acked-by: Christoph Lameter <[email protected]> Acked-by: Kees Cook <[email protected]> Cc: Jann Horn <[email protected]> Cc: Vijayanand Jitta <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Pekka Enberg <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--mm/slub.c21
1 files changed, 16 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 8adab4c5296d..97074631a2d1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -122,18 +122,29 @@ DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
#endif
-static inline int kmem_cache_debug(struct kmem_cache *s)
+/*
+ * Returns true if any of the specified slub_debug flags is enabled for the
+ * cache. Use only for flags parsed by setup_slub_debug() as it also enables
+ * the static key.
+ */
+static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
{
+ VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
#ifdef CONFIG_SLUB_DEBUG
if (static_branch_unlikely(&slub_debug_enabled))
- return s->flags & SLAB_DEBUG_FLAGS;
+ return s->flags & flags;
#endif
- return 0;
+ return false;
+}
+
+static inline bool kmem_cache_debug(struct kmem_cache *s)
+{
+ return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
}
void *fixup_red_left(struct kmem_cache *s, void *p)
{
- if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
+ if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
p += s->red_left_pad;
return p;
@@ -4060,7 +4071,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
offset = (ptr - page_address(page)) % s->size;
/* Adjust for redzone and reject if within the redzone. */
- if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+ if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
if (offset < s->red_left_pad)
usercopy_abort("SLUB object in left red zone",
s->name, to_user, offset, n);