aboutsummaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c38
1 files changed, 34 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 30354bfeb43..15960a09abb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -18,6 +18,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmemtrace.h>
+#include <linux/kmemcheck.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/kmemleak.h>
@@ -147,7 +148,7 @@
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
- SLAB_CACHE_DMA)
+ SLAB_CACHE_DMA | SLAB_NOTRACK)
#ifndef ARCH_KMALLOC_MINALIGN
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
@@ -1071,6 +1072,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
{
int order = oo_order(oo);
+ flags |= __GFP_NOTRACK;
+
if (node == -1)
return alloc_pages(flags, order);
else
@@ -1098,6 +1101,24 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
}
+
+ if (kmemcheck_enabled
+ && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
+ {
+ int pages = 1 << oo_order(oo);
+
+ kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
+
+ /*
+ * Objects from caches that have a constructor don't get
+ * cleared when they're allocated, so we need to do it here.
+ */
+ if (s->ctor)
+ kmemcheck_mark_uninitialized_pages(page, pages);
+ else
+ kmemcheck_mark_unallocated_pages(page, pages);
+ }
+
page->objects = oo_objects(oo);
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -1171,6 +1192,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__ClearPageSlubDebug(page);
}
+ kmemcheck_free_shadow(page, compound_order(page));
+
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
@@ -1626,7 +1649,9 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
if (unlikely((gfpflags & __GFP_ZERO) && object))
memset(object, 0, objsize);
+ kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
+
return object;
}
@@ -1759,6 +1784,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
kmemleak_free_recursive(x, s->flags);
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
+ kmemcheck_slab_free(s, object, c->objsize);
debug_check_no_locks_freed(object, c->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, c->objsize);
@@ -2633,7 +2659,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
if (!s || !text || !kmem_cache_open(s, flags, text,
realsize, ARCH_KMALLOC_MINALIGN,
- SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
+ SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED,
+ NULL)) {
kfree(s);
kfree(text);
goto unlock_out;
@@ -2727,9 +2754,10 @@ EXPORT_SYMBOL(__kmalloc);
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
- struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
- get_order(size));
+ struct page *page;
+ flags |= __GFP_COMP | __GFP_NOTRACK;
+ page = alloc_pages_node(node, flags, get_order(size));
if (page)
return page_address(page);
else
@@ -4412,6 +4440,8 @@ static char *create_unique_id(struct kmem_cache *s)
*p++ = 'a';
if (s->flags & SLAB_DEBUG_FREE)
*p++ = 'F';
+ if (!(s->flags & SLAB_NOTRACK))
+ *p++ = 't';
if (p != name + 1)
*p++ = '-';
p += sprintf(p, "%07d", s->size);