aboutsummaryrefslogtreecommitdiff
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h21
1 files changed, 17 insertions, 4 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 2f5c16b1aac..e37b6aa8a9f 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -46,7 +46,6 @@ struct kmem_cache_cpu {
struct kmem_cache_node {
spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
- unsigned long min_partial;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
@@ -89,6 +88,7 @@ struct kmem_cache {
void (*ctor)(void *);
int inuse; /* Offset to metadata */
int align; /* Alignment */
+ unsigned long min_partial;
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SLUB_DEBUG
@@ -121,10 +121,23 @@ struct kmem_cache {
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
/*
+ * Maximum kmalloc object size handled by SLUB. Larger object allocations
+ * are passed through to the page allocator. The page allocator "fastpath"
+ * is relatively slow so we need this value sufficiently high so that
+ * performance critical objects are allocated through the SLUB fastpath.
+ *
+ * This should be dropped to PAGE_SIZE / 2 once the page allocator
+ * "fastpath" becomes competitive with the slab allocator fastpaths.
+ */
+#define SLUB_MAX_SIZE (2 * PAGE_SIZE)
+
+#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
+
+/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
-extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
+extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
/*
* Sorry that the following has to be that ugly but some versions of GCC
@@ -212,7 +225,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
- if (size > PAGE_SIZE)
+ if (size > SLUB_MAX_SIZE)
return kmalloc_large(size, flags);
if (!(flags & SLUB_DMA)) {
@@ -234,7 +247,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) &&
- size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
+ size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
if (!s)