aboutsummaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c67
1 files changed, 31 insertions, 36 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 2e71a328aa0..a453383333f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -137,6 +137,7 @@
/* Shouldn't this be in a header file somewhere? */
#define BYTES_PER_WORD sizeof(void *)
+#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
#ifndef cache_line_size
#define cache_line_size() L1_CACHE_BYTES
@@ -547,7 +548,7 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
if (cachep->flags & SLAB_STORE_USER)
return (unsigned long long *)(objp + cachep->buffer_size -
sizeof(unsigned long long) -
- BYTES_PER_WORD);
+ REDZONE_ALIGN);
return (unsigned long long *) (objp + cachep->buffer_size -
sizeof(unsigned long long));
}
@@ -774,7 +775,6 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
*/
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif
- WARN_ON_ONCE(size == 0);
while (size > csizep->cs_size)
csizep++;
@@ -929,7 +929,7 @@ static void next_reap_node(void)
* the CPUs getting into lockstep and contending for the global cache chain
* lock.
*/
-static void __devinit start_cpu_timer(int cpu)
+static void __cpuinit start_cpu_timer(int cpu)
{
struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
@@ -2179,7 +2179,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* above the next power of two: caches with object sizes just above a
* power of two have a significant amount of internal fragmentation.
*/
- if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD))
+ if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
+ 2 * sizeof(unsigned long long)))
flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON;
@@ -2220,12 +2221,20 @@ kmem_cache_create (const char *name, size_t size, size_t align,
}
/*
- * Redzoning and user store require word alignment. Note this will be
- * overridden by architecture or caller mandated alignment if either
- * is greater than BYTES_PER_WORD.
+ * Redzoning and user store require word alignment or possibly larger.
+ * Note this will be overridden by architecture or caller mandated
+ * alignment if either is greater than BYTES_PER_WORD.
*/
- if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
- ralign = __alignof__(unsigned long long);
+ if (flags & SLAB_STORE_USER)
+ ralign = BYTES_PER_WORD;
+
+ if (flags & SLAB_RED_ZONE) {
+ ralign = REDZONE_ALIGN;
+ /* If redzoning, ensure that the second redzone is suitably
+ * aligned, by adjusting the object size accordingly. */
+ size += REDZONE_ALIGN - 1;
+ size &= ~(REDZONE_ALIGN - 1);
+ }
/* 2) arch mandated alignment */
if (ralign < ARCH_SLAB_MINALIGN) {
@@ -2262,9 +2271,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
}
if (flags & SLAB_STORE_USER) {
/* user store requires one word storage behind the end of
- * the real object.
+ * the real object. But if the second red zone needs to be
+ * aligned to 64 bits, we must allow that much space.
*/
- size += BYTES_PER_WORD;
+ if (flags & SLAB_RED_ZONE)
+ size += REDZONE_ALIGN;
+ else
+ size += BYTES_PER_WORD;
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
@@ -3539,7 +3552,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
check_irq_off();
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
- if (use_alien_caches && cache_free_alien(cachep, objp))
+ if (cache_free_alien(cachep, objp))
return;
if (likely(ac->avail < ac->limit)) {
@@ -4144,26 +4157,17 @@ static void print_slabinfo_header(struct seq_file *m)
static void *s_start(struct seq_file *m, loff_t *pos)
{
loff_t n = *pos;
- struct list_head *p;
mutex_lock(&cache_chain_mutex);
if (!n)
print_slabinfo_header(m);
- p = cache_chain.next;
- while (n--) {
- p = p->next;
- if (p == &cache_chain)
- return NULL;
- }
- return list_entry(p, struct kmem_cache, next);
+
+ return seq_list_start(&cache_chain, *pos);
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
- struct kmem_cache *cachep = p;
- ++*pos;
- return cachep->next.next == &cache_chain ?
- NULL : list_entry(cachep->next.next, struct kmem_cache, next);
+ return seq_list_next(p, &cache_chain, pos);
}
static void s_stop(struct seq_file *m, void *p)
@@ -4173,7 +4177,7 @@ static void s_stop(struct seq_file *m, void *p)
static int s_show(struct seq_file *m, void *p)
{
- struct kmem_cache *cachep = p;
+ struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
struct slab *slabp;
unsigned long active_objs;
unsigned long num_objs;
@@ -4342,17 +4346,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
static void *leaks_start(struct seq_file *m, loff_t *pos)
{
- loff_t n = *pos;
- struct list_head *p;
-
mutex_lock(&cache_chain_mutex);
- p = cache_chain.next;
- while (n--) {
- p = p->next;
- if (p == &cache_chain)
- return NULL;
- }
- return list_entry(p, struct kmem_cache, next);
+ return seq_list_start(&cache_chain, *pos);
}
static inline int add_caller(unsigned long *n, unsigned long v)
@@ -4417,7 +4412,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
static int leaks_show(struct seq_file *m, void *p)
{
- struct kmem_cache *cachep = p;
+ struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
struct slab *slabp;
struct kmem_list3 *l3;
const char *name;