aboutsummaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-11 10:46:50 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-11 10:46:50 +0200
commit0c81b2a1448bc6a2a9b2d6469fb0669fb4b25e5b (patch)
tree6f82579cae6d6e39fa9f837a3c349ded51e19d14 /mm/slub.c
parent0729fbf3bc70870370b4f43d652f05a468dc68b8 (diff)
parent70ff05554f91a1edda1f11684da1dbde09e2feea (diff)
Merge branch 'linus' into core/rcu
Conflicts: include/linux/rculist.h kernel/rcupreempt.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 0987d1cd943..1a427c0ae83 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5,7 +5,7 @@
* The allocator synchronizes using per slab locks and only
* uses a centralized lock to manage a pool of partial slabs.
*
- * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
+ * (C) 2007 SGI, Christoph Lameter
*/
#include <linux/mm.h>
@@ -2995,8 +2995,6 @@ void __init kmem_cache_init(void)
create_kmalloc_cache(&kmalloc_caches[1],
"kmalloc-96", 96, GFP_KERNEL);
caches++;
- }
- if (KMALLOC_MIN_SIZE <= 128) {
create_kmalloc_cache(&kmalloc_caches[2],
"kmalloc-192", 192, GFP_KERNEL);
caches++;
@@ -3026,6 +3024,16 @@ void __init kmem_cache_init(void)
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
+ if (KMALLOC_MIN_SIZE == 128) {
+ /*
+ * The 192 byte sized cache is not used if the alignment
+ * is 128 byte. Redirect kmalloc to use the 256 byte cache
+ * instead.
+ */
+ for (i = 128 + 8; i <= 192; i += 8)
+ size_index[(i - 1) / 8] = 8;
+ }
+
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */