slub: convert SLAB_DEBUG_FREE to SLAB_CONSISTENCY_CHECKS

SLAB_DEBUG_FREE allows expensive consistency checks at free to be turned
on or off.  Expand its use to be able to turn off all consistency
checks.  This gives a nice speed up if you only want features such as
poisoning or tracing.

Credit to Mathias Krause for the original work which inspired this
series

Signed-off-by: Laura Abbott <labbott@fedoraproject.org>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Laura Abbott 2016-03-15 14:55:06 -07:00 committed by Linus Torvalds
parent 804aa132d3
commit becfda68ab
5 changed files with 69 additions and 44 deletions

View File

@ -35,8 +35,8 @@ slub_debug=<Debug-Options>,<slab name>
Enable options only for select slabs Enable options only for select slabs
Possible debug options are Possible debug options are
F Sanity checks on (enables SLAB_DEBUG_FREE. Sorry F Sanity checks on (enables SLAB_DEBUG_CONSISTENCY_CHECKS
SLAB legacy issues) Sorry SLAB legacy issues)
Z Red zoning Z Red zoning
P Poisoning (object and padding) P Poisoning (object and padding)
U User tracking (free and alloc) U User tracking (free and alloc)

View File

@ -20,7 +20,7 @@
* Flags to pass to kmem_cache_create(). * Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/ */
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ #define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */

View File

@ -125,7 +125,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
#elif defined(CONFIG_SLUB_DEBUG) #elif defined(CONFIG_SLUB_DEBUG)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_DEBUG_FREE) SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
#else #else
#define SLAB_DEBUG_FLAGS (0) #define SLAB_DEBUG_FLAGS (0)
#endif #endif
@ -311,7 +311,8 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
* to not do even the assignment. In that case, slab_equal_or_root * to not do even the assignment. In that case, slab_equal_or_root
* will also be a constant. * will also be a constant.
*/ */
if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE)) if (!memcg_kmem_enabled() &&
!unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
return s; return s;
page = virt_to_head_page(x); page = virt_to_head_page(x);

100
mm/slub.c
View File

@ -160,7 +160,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
*/ */
#define MAX_PARTIAL 10 #define MAX_PARTIAL 10
#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_STORE_USER) SLAB_POISON | SLAB_STORE_USER)
/* /*
@ -1007,20 +1007,32 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
init_tracking(s, object); init_tracking(s, object);
} }
static noinline int alloc_debug_processing(struct kmem_cache *s, static inline int alloc_consistency_checks(struct kmem_cache *s,
struct page *page, struct page *page,
void *object, unsigned long addr) void *object, unsigned long addr)
{ {
if (!check_slab(s, page)) if (!check_slab(s, page))
goto bad; return 0;
if (!check_valid_pointer(s, page, object)) { if (!check_valid_pointer(s, page, object)) {
object_err(s, page, object, "Freelist Pointer check fails"); object_err(s, page, object, "Freelist Pointer check fails");
goto bad; return 0;
} }
if (!check_object(s, page, object, SLUB_RED_INACTIVE)) if (!check_object(s, page, object, SLUB_RED_INACTIVE))
goto bad; return 0;
return 1;
}
static noinline int alloc_debug_processing(struct kmem_cache *s,
struct page *page,
void *object, unsigned long addr)
{
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
if (!alloc_consistency_checks(s, page, object, addr))
goto bad;
}
/* Success perform special debug activities for allocs */ /* Success perform special debug activities for allocs */
if (s->flags & SLAB_STORE_USER) if (s->flags & SLAB_STORE_USER)
@ -1043,6 +1055,38 @@ static noinline int alloc_debug_processing(struct kmem_cache *s,
return 0; return 0;
} }
static inline int free_consistency_checks(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr)
{
if (!check_valid_pointer(s, page, object)) {
slab_err(s, page, "Invalid object pointer 0x%p", object);
return 0;
}
if (on_freelist(s, page, object)) {
object_err(s, page, object, "Object already free");
return 0;
}
if (!check_object(s, page, object, SLUB_RED_ACTIVE))
return 0;
if (unlikely(s != page->slab_cache)) {
if (!PageSlab(page)) {
slab_err(s, page, "Attempt to free object(0x%p) "
"outside of slab", object);
} else if (!page->slab_cache) {
pr_err("SLUB <none>: no slab for object 0x%p.\n",
object);
dump_stack();
} else
object_err(s, page, object,
"page slab pointer corrupt.");
return 0;
}
return 1;
}
/* Supports checking bulk free of a constructed freelist */ /* Supports checking bulk free of a constructed freelist */
static noinline int free_debug_processing( static noinline int free_debug_processing(
struct kmem_cache *s, struct page *page, struct kmem_cache *s, struct page *page,
@ -1058,37 +1102,17 @@ static noinline int free_debug_processing(
spin_lock_irqsave(&n->list_lock, flags); spin_lock_irqsave(&n->list_lock, flags);
slab_lock(page); slab_lock(page);
if (!check_slab(s, page)) if (s->flags & SLAB_CONSISTENCY_CHECKS) {
goto out; if (!check_slab(s, page))
goto out;
}
next_object: next_object:
cnt++; cnt++;
if (!check_valid_pointer(s, page, object)) { if (s->flags & SLAB_CONSISTENCY_CHECKS) {
slab_err(s, page, "Invalid object pointer 0x%p", object); if (!free_consistency_checks(s, page, object, addr))
goto out; goto out;
}
if (on_freelist(s, page, object)) {
object_err(s, page, object, "Object already free");
goto out;
}
if (!check_object(s, page, object, SLUB_RED_ACTIVE))
goto out;
if (unlikely(s != page->slab_cache)) {
if (!PageSlab(page)) {
slab_err(s, page, "Attempt to free object(0x%p) "
"outside of slab", object);
} else if (!page->slab_cache) {
pr_err("SLUB <none>: no slab for object 0x%p.\n",
object);
dump_stack();
} else
object_err(s, page, object,
"page slab pointer corrupt.");
goto out;
} }
if (s->flags & SLAB_STORE_USER) if (s->flags & SLAB_STORE_USER)
@ -1145,7 +1169,7 @@ static int __init setup_slub_debug(char *str)
for (; *str && *str != ','; str++) { for (; *str && *str != ','; str++) {
switch (tolower(*str)) { switch (tolower(*str)) {
case 'f': case 'f':
slub_debug |= SLAB_DEBUG_FREE; slub_debug |= SLAB_CONSISTENCY_CHECKS;
break; break;
case 'z': case 'z':
slub_debug |= SLAB_RED_ZONE; slub_debug |= SLAB_RED_ZONE;
@ -1449,7 +1473,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
int order = compound_order(page); int order = compound_order(page);
int pages = 1 << order; int pages = 1 << order;
if (kmem_cache_debug(s)) { if (s->flags & SLAB_CONSISTENCY_CHECKS) {
void *p; void *p;
slab_pad_check(s, page); slab_pad_check(s, page);
@ -4769,16 +4793,16 @@ SLAB_ATTR_RO(total_objects);
static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
} }
static ssize_t sanity_checks_store(struct kmem_cache *s, static ssize_t sanity_checks_store(struct kmem_cache *s,
const char *buf, size_t length) const char *buf, size_t length)
{ {
s->flags &= ~SLAB_DEBUG_FREE; s->flags &= ~SLAB_CONSISTENCY_CHECKS;
if (buf[0] == '1') { if (buf[0] == '1') {
s->flags &= ~__CMPXCHG_DOUBLE; s->flags &= ~__CMPXCHG_DOUBLE;
s->flags |= SLAB_DEBUG_FREE; s->flags |= SLAB_CONSISTENCY_CHECKS;
} }
return length; return length;
} }
@ -5313,7 +5337,7 @@ static char *create_unique_id(struct kmem_cache *s)
*p++ = 'd'; *p++ = 'd';
if (s->flags & SLAB_RECLAIM_ACCOUNT) if (s->flags & SLAB_RECLAIM_ACCOUNT)
*p++ = 'a'; *p++ = 'a';
if (s->flags & SLAB_DEBUG_FREE) if (s->flags & SLAB_CONSISTENCY_CHECKS)
*p++ = 'F'; *p++ = 'F';
if (!(s->flags & SLAB_NOTRACK)) if (!(s->flags & SLAB_NOTRACK))
*p++ = 't'; *p++ = 't';

View File

@ -135,7 +135,7 @@ static void usage(void)
"\nValid debug options (FZPUT may be combined)\n" "\nValid debug options (FZPUT may be combined)\n"
"a / A Switch on all debug options (=FZUP)\n" "a / A Switch on all debug options (=FZUP)\n"
"- Switch off all debug options\n" "- Switch off all debug options\n"
"f / F Sanity Checks (SLAB_DEBUG_FREE)\n" "f / F Sanity Checks (SLAB_CONSISTENCY_CHECKS)\n"
"z / Z Redzoning\n" "z / Z Redzoning\n"
"p / P Poisoning\n" "p / P Poisoning\n"
"u / U Tracking\n" "u / U Tracking\n"