From 6e40e73097a5e4ad1b9f92fa21757343fdd6a682 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sat, 10 Feb 2007 01:42:52 -0800 Subject: [PATCH] slab: remove broken PageSlab check from kfree_debugcheck The PageSlab debug check in kfree_debugcheck() is broken for compound pages. It is also redundant as we already do BUG_ON for non-slab pages in page_get_cache() and page_get_slab() which are always called before we free any actual objects. Signed-off-by: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index c6100628a6e..aea1cd25314 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2814,19 +2814,11 @@ failed: */ static void kfree_debugcheck(const void *objp) { - struct page *page; - if (!virt_addr_valid(objp)) { printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", (unsigned long)objp); BUG(); } - page = virt_to_page(objp); - if (!PageSlab(page)) { - printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", - (unsigned long)objp); - BUG(); - } } static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) -- cgit v1.2.3 From 8c8cc2c10c21943dd3499a2df7aac835093b37f3 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sat, 10 Feb 2007 01:42:53 -0800 Subject: [PATCH] slab: cache alloc cleanups Clean up __cache_alloc and __cache_alloc_node functions a bit. We no longer need to do NUMA_BUILD tricks and the UMA allocation path is much simpler. No functional changes in this patch. Note: saves few kernel text bytes on x86 NUMA build due to using gotos in __cache_alloc_node() and moving __GFP_THISNODE check in to fallback_alloc(). Cc: Andy Whitcroft Cc: Christoph Hellwig Cc: Manfred Spraul Acked-by: Christoph Lameter Cc: Paul Jackson Signed-off-by: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 200 ++++++++++++++++++++++++++++++++++---------------------------- 1 file changed, 110 insertions(+), 90 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index aea1cd25314..b595323e24a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3189,35 +3189,6 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) return objp; } -static __always_inline void *__cache_alloc(struct kmem_cache *cachep, - gfp_t flags, void *caller) -{ - unsigned long save_flags; - void *objp = NULL; - - cache_alloc_debugcheck_before(cachep, flags); - - local_irq_save(save_flags); - - if (unlikely(NUMA_BUILD && - current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) - objp = alternate_node_alloc(cachep, flags); - - if (!objp) - objp = ____cache_alloc(cachep, flags); - /* - * We may just have run out of memory on the local node. - * ____cache_alloc_node() knows how to locate memory on other nodes - */ - if (NUMA_BUILD && !objp) - objp = ____cache_alloc_node(cachep, flags, numa_node_id()); - local_irq_restore(save_flags); - objp = cache_alloc_debugcheck_after(cachep, flags, objp, - caller); - prefetchw(objp); - return objp; -} - #ifdef CONFIG_NUMA /* * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. @@ -3249,14 +3220,20 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) * allocator to do its reclaim / fallback magic. We then insert the * slab into the proper nodelist and then allocate from it. */ -void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) +static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) { - struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy)) - ->node_zonelists[gfp_zone(flags)]; + struct zonelist *zonelist; + gfp_t local_flags; struct zone **z; void *obj = NULL; int nid; - gfp_t local_flags = (flags & GFP_LEVEL_MASK); + + if (flags & __GFP_THISNODE) + return NULL; + + zonelist = &NODE_DATA(slab_node(current->mempolicy)) + ->node_zonelists[gfp_zone(flags)]; + local_flags = (flags & GFP_LEVEL_MASK); retry: /* @@ -3366,16 +3343,110 @@ must_grow: if (x) goto retry; - if (!(flags & __GFP_THISNODE)) - /* Unable to grow the cache. Fall back to other nodes. */ - return fallback_alloc(cachep, flags); - - return NULL; + return fallback_alloc(cachep, flags); done: return obj; } -#endif + +/** + * kmem_cache_alloc_node - Allocate an object on the specified node + * @cachep: The cache to allocate from. + * @flags: See kmalloc(). + * @nodeid: node number of the target node. + * @caller: return address of caller, used for debug information + * + * Identical to kmem_cache_alloc but it will allocate memory on the given + * node, which can improve the performance for cpu bound structures. + * + * Fallback to other node is possible if __GFP_THISNODE is not set. + */ +static __always_inline void * +__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, + void *caller) +{ + unsigned long save_flags; + void *ptr; + + cache_alloc_debugcheck_before(cachep, flags); + local_irq_save(save_flags); + + if (unlikely(nodeid == -1)) + nodeid = numa_node_id(); + + if (unlikely(!cachep->nodelists[nodeid])) { + /* Node not bootstrapped yet */ + ptr = fallback_alloc(cachep, flags); + goto out; + } + + if (nodeid == numa_node_id()) { + /* + * Use the locally cached objects if possible. + * However ____cache_alloc does not allow fallback + * to other nodes. It may fail while we still have + * objects on other nodes available. + */ + ptr = ____cache_alloc(cachep, flags); + if (ptr) + goto out; + } + /* ___cache_alloc_node can fall back to other nodes */ + ptr = ____cache_alloc_node(cachep, flags, nodeid); + out: + local_irq_restore(save_flags); + ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); + + return ptr; +} + +static __always_inline void * +__do_cache_alloc(struct kmem_cache *cache, gfp_t flags) +{ + void *objp; + + if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { + objp = alternate_node_alloc(cache, flags); + if (objp) + goto out; + } + objp = ____cache_alloc(cache, flags); + + /* + * We may just have run out of memory on the local node. + * ____cache_alloc_node() knows how to locate memory on other nodes + */ + if (!objp) + objp = ____cache_alloc_node(cache, flags, numa_node_id()); + + out: + return objp; +} +#else + +static __always_inline void * +__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) +{ + return ____cache_alloc(cachep, flags); +} + +#endif /* CONFIG_NUMA */ + +static __always_inline void * +__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) +{ + unsigned long save_flags; + void *objp; + + cache_alloc_debugcheck_before(cachep, flags); + local_irq_save(save_flags); + objp = __do_cache_alloc(cachep, flags); + local_irq_restore(save_flags); + objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); + prefetchw(objp); + + return objp; +} /* * Caller needs to acquire correct kmem_list's list_lock @@ -3574,57 +3645,6 @@ out: } #ifdef CONFIG_NUMA -/** - * kmem_cache_alloc_node - Allocate an object on the specified node - * @cachep: The cache to allocate from. - * @flags: See kmalloc(). - * @nodeid: node number of the target node. - * @caller: return address of caller, used for debug information - * - * Identical to kmem_cache_alloc but it will allocate memory on the given - * node, which can improve the performance for cpu bound structures. - * - * Fallback to other node is possible if __GFP_THISNODE is not set. - */ -static __always_inline void * -__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, - int nodeid, void *caller) -{ - unsigned long save_flags; - void *ptr = NULL; - - cache_alloc_debugcheck_before(cachep, flags); - local_irq_save(save_flags); - - if (unlikely(nodeid == -1)) - nodeid = numa_node_id(); - - if (likely(cachep->nodelists[nodeid])) { - if (nodeid == numa_node_id()) { - /* - * Use the locally cached objects if possible. - * However ____cache_alloc does not allow fallback - * to other nodes. It may fail while we still have - * objects on other nodes available. - */ - ptr = ____cache_alloc(cachep, flags); - } - if (!ptr) { - /* ___cache_alloc_node can fall back to other nodes */ - ptr = ____cache_alloc_node(cachep, flags, nodeid); - } - } else { - /* Node not bootstrapped yet */ - if (!(flags & __GFP_THISNODE)) - ptr = fallback_alloc(cachep, flags); - } - - local_irq_restore(save_flags); - ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); - - return ptr; -} - void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { return __cache_alloc_node(cachep, flags, nodeid, -- cgit v1.2.3 From 7c5cae368a6c44eac0e817ae130301b65ff446dc Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 10 Feb 2007 01:42:55 -0800 Subject: [PATCH] slab: use parameter passed to cache_reap to determine pointer to work structure Use the pointer passed to cache_reap to determine the work pointer and consolidate exit paths. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index b595323e24a..caa8f87e04e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4029,18 +4029,17 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, * If we cannot acquire the cache chain mutex then just give up - we'll try * again on the next iteration. */ -static void cache_reap(struct work_struct *unused) +static void cache_reap(struct work_struct *w) { struct kmem_cache *searchp; struct kmem_list3 *l3; int node = numa_node_id(); + struct delayed_work *work = + container_of(w, struct delayed_work, work); - if (!mutex_trylock(&cache_chain_mutex)) { + if (!mutex_trylock(&cache_chain_mutex)) /* Give up. Setup the next iteration. */ - schedule_delayed_work(&__get_cpu_var(reap_work), - round_jiffies_relative(REAPTIMEOUT_CPUC)); - return; - } + goto out; list_for_each_entry(searchp, &cache_chain, next) { check_irq_on(); @@ -4083,9 +4082,9 @@ next: mutex_unlock(&cache_chain_mutex); next_reap_node(); refresh_cpu_vm_stats(smp_processor_id()); +out: /* Set up the next iteration */ - schedule_delayed_work(&__get_cpu_var(reap_work), - round_jiffies_relative(REAPTIMEOUT_CPUC)); + schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); } #ifdef CONFIG_PROC_FS -- cgit v1.2.3 From 4b51d66989218aad731a721b5b28c79bf5388c09 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 10 Feb 2007 01:43:10 -0800 Subject: [PATCH] optional ZONE_DMA: optional ZONE_DMA in the VM Make ZONE_DMA optional in core code. - ifdef all code for ZONE_DMA and related definitions following the example for ZONE_DMA32 and ZONE_HIGHMEM. - Without ZONE_DMA, ZONE_HIGHMEM and ZONE_DMA32 we get to a ZONES_SHIFT of 0. - Modify the VM statistics to work correctly without a DMA zone. - Modify slab to not create DMA slabs if there is no ZONE_DMA. [akpm@osdl.org: cleanup] [jdike@addtoit.com: build fix] [apw@shadowen.org: Simplify calculation of the number of bits we need for ZONES_SHIFT] Signed-off-by: Christoph Lameter Cc: Andi Kleen Cc: "Luck, Tony" Cc: Kyle McMartin Cc: Matthew Wilcox Cc: James Bottomley Cc: Paul Mundt Signed-off-by: Andy Whitcroft Signed-off-by: Jeff Dike Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index caa8f87e04e..348396d691a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -793,8 +793,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, * has cs_{dma,}cachep==NULL. Thus no special case * for large kmalloc calls required. */ +#ifdef CONFIG_ZONE_DMA if (unlikely(gfpflags & GFP_DMA)) return csizep->cs_dmacachep; +#endif return csizep->cs_cachep; } @@ -1493,13 +1495,15 @@ void __init kmem_cache_init(void) ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL, NULL); } - - sizes->cs_dmacachep = kmem_cache_create(names->name_dma, +#ifdef CONFIG_ZONE_DMA + sizes->cs_dmacachep = kmem_cache_create( + names->name_dma, sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC, NULL, NULL); +#endif sizes++; names++; } @@ -2321,7 +2325,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, cachep->slab_size = slab_size; cachep->flags = flags; cachep->gfpflags = 0; - if (flags & SLAB_CACHE_DMA) + if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) cachep->gfpflags |= GFP_DMA; cachep->buffer_size = size; cachep->reciprocal_buffer_size = reciprocal_value(size); @@ -2643,10 +2647,12 @@ static void cache_init_objs(struct kmem_cache *cachep, static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) { - if (flags & GFP_DMA) - BUG_ON(!(cachep->gfpflags & GFP_DMA)); - else - BUG_ON(cachep->gfpflags & GFP_DMA); + if (CONFIG_ZONE_DMA_FLAG) { + if (flags & GFP_DMA) + BUG_ON(!(cachep->gfpflags & GFP_DMA)); + else + BUG_ON(cachep->gfpflags & GFP_DMA); + } } static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, -- cgit v1.2.3 From 898552c9d807fe59f3ecaf9c300c109358375c12 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 10 Feb 2007 01:44:57 -0800 Subject: [PATCH] lockdep: also check for freed locks in kmem_cache_free() kmem_cache_free() was missing the check for freeing held locks. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 348396d691a..196df70eb8c 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3751,6 +3751,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) BUG_ON(virt_to_cache(objp) != cachep); local_irq_save(flags); + debug_check_no_locks_freed(objp, obj_size(cachep)); __cache_free(cachep, objp); local_irq_restore(flags); } -- cgit v1.2.3 From 72fd4a35a824331d7a0f4168d7576502d95d34b3 Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Sat, 10 Feb 2007 01:45:59 -0800 Subject: [PATCH] Numerous fixes to kernel-doc info in source files. A variety of (mostly) innocuous fixes to the embedded kernel-doc content in source files, including: * make multi-line initial descriptions single line * denote some function names, constants and structs as such * change erroneous opening '/*' to '/**' in a few places * reword some text for clarity Signed-off-by: Robert P. J. Day Cc: "Randy.Dunlap" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 196df70eb8c..70784b848b6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2520,7 +2520,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); * kmem_cache_destroy - delete a cache * @cachep: the cache to destroy * - * Remove a struct kmem_cache object from the slab cache. + * Remove a &struct kmem_cache object from the slab cache. * * It is expected this function will be called by a module when it is * unloaded. This will remove the cache completely, and avoid a duplicate -- cgit v1.2.3