From b773ad73690b5f34eee0c76f4273ac6fcbd88f82 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Mar 2008 11:10:17 -0800 Subject: slub statistics: Fix check for DEACTIVATE_REMOTE_FREES The remote frees are in the freelist of the page and not in the percpu freelist. Reviewed-by: Pekka Enberg Signed-off-by: Christoph Lameter --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slub.c b/mm/slub.c index 0863fd38a5c..a96e11c77fd 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1368,7 +1368,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) struct page *page = c->page; int tail = 1; - if (c->freelist) + if (page->freelist) stat(c, DEACTIVATE_REMOTE_FREES); /* * Merge cpu freelist into slab freelist. Typically we get here -- cgit v1.2.3 From 9ac33b2b749e9539e84bbb1a41f97b066c4bd757 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 4 Mar 2008 12:24:22 -0800 Subject: slab numa fallback logic: Do not pass unfiltered flags to page allocator The NUMA fallback logic should be passing local_flags to kmem_get_pages() and not simply the flags passed in. Reviewed-by: Pekka Enberg Signed-off-by: Christoph Lameter --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slab.c b/mm/slab.c index 473e6c2eaef..5d16c8a3049 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3280,7 +3280,7 @@ retry: if (local_flags & __GFP_WAIT) local_irq_enable(); kmem_flagcheck(cache, flags); - obj = kmem_getpages(cache, flags, -1); + obj = kmem_getpages(cache, local_flags, -1); if (local_flags & __GFP_WAIT) local_irq_disable(); if (obj) { -- cgit v1.2.3 From 1c61fc40fc264059ff41a614ed2d899127288281 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 5 Mar 2008 13:58:17 -0800 Subject: slab - use angle brackets for include of kmalloc_sizes.h Make them all use angle brackets and the directory name. Acked-by: Pekka Enberg Signed-off-by: Joe Perches Signed-off-by: Christoph Lameter --- include/linux/slab_def.h | 4 ++-- mm/slab.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index fcc48096ee6..39c3a5eb8eb 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -41,7 +41,7 @@ static inline void *kmalloc(size_t size, gfp_t flags) goto found; \ else \ i++; -#include "kmalloc_sizes.h" +#include #undef CACHE { extern void __you_cannot_kmalloc_that_much(void); @@ -75,7 +75,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) goto found; \ else \ i++; -#include "kmalloc_sizes.h" +#include #undef CACHE { extern void __you_cannot_kmalloc_that_much(void); diff --git a/mm/slab.c b/mm/slab.c index 5d16c8a3049..f7faff72cf5 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -333,7 +333,7 @@ static __always_inline int index_of(const size_t size) return i; \ else \ i++; -#include "linux/kmalloc_sizes.h" +#include #undef CACHE __bad_size(); } else -- cgit v1.2.3 From b6210386787728b84db25adc4f1eba70440a4c73 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 5 Mar 2008 14:05:56 -0800 Subject: slub: Do not cross cacheline boundaries for very small objects SLUB should pack even small objects nicely into cachelines if that is what has been asked for. Use the same algorithm as SLAB for this. The effect of this patch for a system with a cacheline size of 64 bytes is that the 24 byte sized slab caches will now put exactly 2 objects into a cacheline instead of 3 with some overlap into the next cacheline. This reduces the object density in a 4k slab from 170 to 128 objects (same as SLAB). Signed-off-by: Nick Piggin Signed-off-by: Christoph Lameter --- mm/slub.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index a96e11c77fd..96d63eb3ab1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1856,12 +1856,15 @@ static unsigned long calculate_alignment(unsigned long flags, * The hardware cache alignment cannot override the specified * alignment though. If that is greater then use it. */ - if ((flags & SLAB_HWCACHE_ALIGN) && - size > cache_line_size() / 2) - return max_t(unsigned long, align, cache_line_size()); + if (flags & SLAB_HWCACHE_ALIGN) { + unsigned long ralign = cache_line_size(); + while (size <= ralign / 2) + ralign /= 2; + align = max(align, ralign); + } if (align < ARCH_SLAB_MINALIGN) - return ARCH_SLAB_MINALIGN; + align = ARCH_SLAB_MINALIGN; return ALIGN(align, sizeof(void *)); } -- cgit v1.2.3 From 6d2144d355d2a532e5cc3fc12a6ba2a8d4ef15e4 Mon Sep 17 00:00:00 2001 From: Joe Korty Date: Wed, 5 Mar 2008 15:04:59 -0800 Subject: slab: NUMA slab allocator migration bugfix NUMA slab allocator cpu migration bugfix The NUMA slab allocator (specifically, cache_alloc_refill) is not refreshing its local copies of what cpu and what numa node it is on, when it drops and reacquires the irq block that it inherited from its caller. As a result those values become invalid if an attempt to migrate the process to another numa node occured while the irq block had been dropped. The solution is to make cache_alloc_refill reload these variables whenever it drops and reacquires the irq block. The error is very difficult to hit. When it does occur, one gets the following oops + stack traceback bits in check_spinlock_acquired: kernel BUG at mm/slab.c:2417 cache_alloc_refill+0xe6 kmem_cache_alloc+0xd0 ... This patch was developed against 2.6.23, ported to and compiled-tested only against 2.6.25-rc4. Signed-off-by: Joe Korty Signed-off-by: Christoph Lameter --- mm/slab.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index f7faff72cf5..e6c698f5567 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2964,11 +2964,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) struct array_cache *ac; int node; - node = numa_node_id(); - +retry: check_irq_off(); + node = numa_node_id(); ac = cpu_cache_get(cachep); -retry: batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* -- cgit v1.2.3 From 989a7241df87526bfef0396567e71ebe53a84ae4 Mon Sep 17 00:00:00 2001 From: Itaru Kitayama Date: Wed, 5 Mar 2008 15:07:30 -0800 Subject: slub: fix typo in Documentation/vm/slub.txt slub_debug=,dentry is correct, not dentry_cache. Signed-off-by: Itaru Kitayama Signed-off-by: Christoph Lameter --- Documentation/vm/slub.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt index dcf8bcf846d..7c13f22a0c9 100644 --- a/Documentation/vm/slub.txt +++ b/Documentation/vm/slub.txt @@ -50,14 +50,14 @@ F.e. in order to boot just with sanity checks and red zoning one would specify: Trying to find an issue in the dentry cache? Try - slub_debug=,dentry_cache + slub_debug=,dentry to only enable debugging on the dentry cache. Red zoning and tracking may realign the slab. We can just apply sanity checks to the dentry cache with - slub_debug=F,dentry_cache + slub_debug=F,dentry In case you forgot to enable debugging on the kernel command line: It is possible to enable debugging manually when the kernel is up. Look at the -- cgit v1.2.3