aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c8
-rw-r--r--mm/page_alloc.c30
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slab.c46
-rw-r--r--mm/sparse.c9
-rw-r--r--mm/vmscan.c2
6 files changed, 59 insertions, 38 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1ae2b2cc3a5..70df5c0d957 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -91,8 +91,8 @@ static void grow_zone_span(struct zone *zone,
if (start_pfn < zone->zone_start_pfn)
zone->zone_start_pfn = start_pfn;
- if (end_pfn > old_zone_end_pfn)
- zone->spanned_pages = end_pfn - zone->zone_start_pfn;
+ zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
+ zone->zone_start_pfn;
zone_span_writeunlock(zone);
}
@@ -106,8 +106,8 @@ static void grow_pgdat_span(struct pglist_data *pgdat,
if (start_pfn < pgdat->node_start_pfn)
pgdat->node_start_pfn = start_pfn;
- if (end_pfn > old_pgdat_end_pfn)
- pgdat->node_spanned_pages = end_pfn - pgdat->node_start_pfn;
+ pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
+ pgdat->node_start_pfn;
}
int online_pages(unsigned long pfn, unsigned long nr_pages)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ea77c999047..253a450c400 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -39,6 +39,7 @@
#include <linux/mempolicy.h>
#include <asm/tlbflush.h>
+#include <asm/div64.h>
#include "internal.h"
/*
@@ -950,7 +951,7 @@ restart:
goto got_pg;
do {
- if (cpuset_zone_allowed(*z, gfp_mask))
+ if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL))
wakeup_kswapd(*z, order);
} while (*(++z));
@@ -969,7 +970,8 @@ restart:
alloc_flags |= ALLOC_HARDER;
if (gfp_mask & __GFP_HIGH)
alloc_flags |= ALLOC_HIGH;
- alloc_flags |= ALLOC_CPUSET;
+ if (wait)
+ alloc_flags |= ALLOC_CPUSET;
/*
* Go through the zonelist again. Let __GFP_HIGH and allocations
@@ -2123,14 +2125,22 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
#ifdef CONFIG_FLAT_NODE_MEM_MAP
/* ia64 gets its own node_mem_map, before this, without bootmem */
if (!pgdat->node_mem_map) {
- unsigned long size;
+ unsigned long size, start, end;
struct page *map;
- size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
+ /*
+ * The zone's endpoints aren't required to be MAX_ORDER
+ * aligned but the node_mem_map endpoints must be in order
+ * for the buddy allocator to function correctly.
+ */
+ start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
+ end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
+ end = ALIGN(end, MAX_ORDER_NR_PAGES);
+ size = (end - start) * sizeof(struct page);
map = alloc_remap(pgdat->node_id, size);
if (!map)
map = alloc_bootmem_node(pgdat, size);
- pgdat->node_mem_map = map;
+ pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
}
#ifdef CONFIG_FLATMEM
/*
@@ -2566,9 +2576,11 @@ void setup_per_zone_pages_min(void)
}
for_each_zone(zone) {
- unsigned long tmp;
+ u64 tmp;
+
spin_lock_irqsave(&zone->lru_lock, flags);
- tmp = (pages_min * zone->present_pages) / lowmem_pages;
+ tmp = (u64)pages_min * zone->present_pages;
+ do_div(tmp, lowmem_pages);
if (is_highmem(zone)) {
/*
* __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -2595,8 +2607,8 @@ void setup_per_zone_pages_min(void)
zone->pages_min = tmp;
}
- zone->pages_low = zone->pages_min + tmp / 4;
- zone->pages_high = zone->pages_min + tmp / 2;
+ zone->pages_low = zone->pages_min + (tmp >> 2);
+ zone->pages_high = zone->pages_min + (tmp >> 1);
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 4c5e68e4e9a..1e43c8a865b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1780,6 +1780,7 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
if (!simple_empty(dentry))
return -ENOTEMPTY;
+ dentry->d_inode->i_nlink--;
dir->i_nlink--;
return shmem_unlink(dir, dentry);
}
@@ -2102,6 +2103,7 @@ static int shmem_fill_super(struct super_block *sb,
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = TMPFS_MAGIC;
sb->s_op = &shmem_ops;
+ sb->s_time_gran = 1;
inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
if (!inode)
diff --git a/mm/slab.c b/mm/slab.c
index c32af7e7581..f1b644eb39d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -207,11 +207,6 @@ typedef unsigned int kmem_bufctl_t;
#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
-/* Max number of objs-per-slab for caches which use off-slab slabs.
- * Needed to avoid a possible looping condition in cache_grow().
- */
-static unsigned long offslab_limit;
-
/*
* struct slab
*
@@ -700,6 +695,14 @@ static enum {
FULL
} g_cpucache_up;
+/*
+ * used by boot code to determine if it can use slab based allocator
+ */
+int slab_is_available(void)
+{
+ return g_cpucache_up == FULL;
+}
+
static DEFINE_PER_CPU(struct work_struct, reap_work);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
@@ -1348,12 +1351,6 @@ void __init kmem_cache_init(void)
NULL, NULL);
}
- /* Inc off-slab bufctl limit until the ceiling is hit. */
- if (!(OFF_SLAB(sizes->cs_cachep))) {
- offslab_limit = sizes->cs_size - sizeof(struct slab);
- offslab_limit /= sizeof(kmem_bufctl_t);
- }
-
sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
@@ -1772,6 +1769,7 @@ static void set_up_list3s(struct kmem_cache *cachep, int index)
static size_t calculate_slab_order(struct kmem_cache *cachep,
size_t size, size_t align, unsigned long flags)
{
+ unsigned long offslab_limit;
size_t left_over = 0;
int gfporder;
@@ -1783,9 +1781,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
if (!num)
continue;
- /* More than offslab_limit objects will cause problems */
- if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit)
- break;
+ if (flags & CFLGS_OFF_SLAB) {
+ /*
+ * Max number of objs-per-slab for caches which
+ * use off-slab slabs. Needed to avoid a possible
+ * looping condition in cache_grow().
+ */
+ offslab_limit = size - sizeof(struct slab);
+ offslab_limit /= sizeof(kmem_bufctl_t);
+
+ if (num > offslab_limit)
+ break;
+ }
/* Found something acceptable - save it away */
cachep->num = num;
@@ -2192,11 +2199,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
check_irq_on();
for_each_online_node(node) {
l3 = cachep->nodelists[node];
- if (l3) {
+ if (l3 && l3->alien)
+ drain_alien_cache(cachep, l3->alien);
+ }
+
+ for_each_online_node(node) {
+ l3 = cachep->nodelists[node];
+ if (l3)
drain_array(cachep, l3, l3->shared, 1, node);
- if (l3->alien)
- drain_alien_cache(cachep, l3->alien);
- }
}
}
diff --git a/mm/sparse.c b/mm/sparse.c
index d7c32de99ee..100040c0dfb 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -32,7 +32,7 @@ static struct mem_section *sparse_index_alloc(int nid)
unsigned long array_size = SECTIONS_PER_ROOT *
sizeof(struct mem_section);
- if (system_state == SYSTEM_RUNNING)
+ if (slab_is_available())
section = kmalloc_node(array_size, GFP_KERNEL, nid);
else
section = alloc_bootmem_node(NODE_DATA(nid), array_size);
@@ -87,11 +87,8 @@ int __section_nr(struct mem_section* ms)
unsigned long root_nr;
struct mem_section* root;
- for (root_nr = 0;
- root_nr < NR_MEM_SECTIONS;
- root_nr += SECTIONS_PER_ROOT) {
- root = __nr_to_section(root_nr);
-
+ for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
+ root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
if (!root)
continue;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4649a63a8cb..440a733fe2e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1061,7 +1061,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
loop_again:
total_scanned = 0;
nr_reclaimed = 0;
- sc.may_writepage = !laptop_mode,
+ sc.may_writepage = !laptop_mode;
sc.nr_mapped = read_page_state(nr_mapped);
inc_page_state(pageoutrun);