diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-06 14:49:42 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 12:12:54 -0700 |
commit | 643b113849d8faa68c9f01c3c9d929bfbffd50bd (patch) | |
tree | d8eea2326ccee49892acaa970bf015ee69a31e8a | |
parent | 77c5e2d01af871f4bfbe08feefa3d5118cb1001b (diff) |
slub: enable tracking of full slabs
If slab tracking is on then build a list of full slabs so that we can verify
the integrity of all slabs and are also able to built list of alloc/free
callers.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/slub_def.h | 1 | ||||
-rw-r--r-- | mm/slub.c | 41 |
2 files changed, 41 insertions, 1 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index f8e0c86c48a..ea27065e80e 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -16,6 +16,7 @@ struct kmem_cache_node { unsigned long nr_partial; atomic_long_t nr_slabs; struct list_head partial; + struct list_head full; }; /* diff --git a/mm/slub.c b/mm/slub.c index cfc5301afe4..c4f40d373d1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -661,6 +661,40 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) return search == NULL; } +/* + * Tracking of fully allocated slabs for debugging + */ +static void add_full(struct kmem_cache *s, struct page *page) +{ + struct kmem_cache_node *n; + + VM_BUG_ON(!irqs_disabled()); + + VM_BUG_ON(!irqs_disabled()); + + if (!(s->flags & SLAB_STORE_USER)) + return; + + n = get_node(s, page_to_nid(page)); + spin_lock(&n->list_lock); + list_add(&page->lru, &n->full); + spin_unlock(&n->list_lock); +} + +static void remove_full(struct kmem_cache *s, struct page *page) +{ + struct kmem_cache_node *n; + + if (!(s->flags & SLAB_STORE_USER)) + return; + + n = get_node(s, page_to_nid(page)); + + spin_lock(&n->list_lock); + list_del(&page->lru); + spin_unlock(&n->list_lock); +} + static int alloc_object_checks(struct kmem_cache *s, struct page *page, void *object) { @@ -1090,6 +1124,8 @@ static void putback_slab(struct kmem_cache *s, struct page *page) if (page->inuse) { if (page->freelist) add_partial(s, page); + else if (PageError(page)) + add_full(s, page); slab_unlock(page); } else { slab_unlock(page); @@ -1302,7 +1338,7 @@ out_unlock: slab_empty: if (prior) /* - * Partially used slab that is on the partial list. + * Slab on the partial list. */ remove_partial(s, page); @@ -1314,6 +1350,8 @@ slab_empty: debug: if (!free_object_checks(s, page, x)) goto out_unlock; + if (!PageActive(page) && !page->freelist) + remove_full(s, page); if (s->flags & SLAB_STORE_USER) set_track(s, x, TRACK_FREE, addr); goto checks_ok; @@ -1466,6 +1504,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) atomic_long_set(&n->nr_slabs, 0); spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); + INIT_LIST_HEAD(&n->full); } #ifdef CONFIG_NUMA |