aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap_xip.c9
-rw-r--r--mm/hugetlb.c13
-rw-r--r--mm/memory.c8
-rw-r--r--mm/page-writeback.c9
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/quicklist.c12
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c116
8 files changed, 140 insertions, 33 deletions
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index e233fff61b4..f874ae818ad 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -25,14 +25,15 @@ static struct page *__xip_sparse_page;
static struct page *xip_sparse_page(void)
{
if (!__xip_sparse_page) {
- unsigned long zeroes = get_zeroed_page(GFP_HIGHUSER);
- if (zeroes) {
+ struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
+
+ if (page) {
static DEFINE_SPINLOCK(xip_alloc_lock);
spin_lock(&xip_alloc_lock);
if (!__xip_sparse_page)
- __xip_sparse_page = virt_to_page(zeroes);
+ __xip_sparse_page = page;
else
- free_page(zeroes);
+ __free_page(page);
spin_unlock(&xip_alloc_lock);
}
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7224a4f0710..e0fda156f02 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -418,9 +418,14 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
if (free_huge_pages > resv_huge_pages)
page = dequeue_huge_page(vma, addr);
spin_unlock(&hugetlb_lock);
- if (!page)
+ if (!page) {
page = alloc_buddy_huge_page(vma, addr);
- return page ? page : ERR_PTR(-VM_FAULT_OOM);
+ if (!page) {
+ hugetlb_put_quota(vma->vm_file->f_mapping, 1);
+ return ERR_PTR(-VM_FAULT_OOM);
+ }
+ }
+ return page;
}
static struct page *alloc_huge_page(struct vm_area_struct *vma,
@@ -1206,8 +1211,10 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
if (hugetlb_get_quota(inode->i_mapping, chg))
return -ENOSPC;
ret = hugetlb_acct_memory(chg);
- if (ret < 0)
+ if (ret < 0) {
+ hugetlb_put_quota(inode->i_mapping, chg);
return ret;
+ }
region_add(&inode->i_mapping->private_list, from, to);
return 0;
}
diff --git a/mm/memory.c b/mm/memory.c
index 4bf0b6d0eb2..4b0144b24c1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -392,6 +392,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_
return NULL;
}
+#ifdef CONFIG_DEBUG_VM
/*
* Add some anal sanity checks for now. Eventually,
* we should just do "return pfn_to_page(pfn)", but
@@ -402,6 +403,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_
print_bad_pte(vma, pte, addr);
return NULL;
}
+#endif
/*
* NOTE! We still have PageReserved() pages in the page
@@ -1668,6 +1670,9 @@ gotten:
unlock:
pte_unmap_unlock(page_table, ptl);
if (dirty_page) {
+ if (vma->vm_file)
+ file_update_time(vma->vm_file);
+
/*
* Yes, Virginia, this is actually required to prevent a race
* with clear_page_dirty_for_io() from clearing the page dirty
@@ -2341,6 +2346,9 @@ out_unlocked:
if (anon)
page_cache_release(vmf.page);
else if (dirty_page) {
+ if (vma->vm_file)
+ file_update_time(vma->vm_file);
+
set_page_dirty_balance(dirty_page, page_mkwrite);
put_page(dirty_page);
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d55cfcae2ef..3d3848fa632 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -558,7 +558,6 @@ static void background_writeout(unsigned long _min_pages)
global_page_state(NR_UNSTABLE_NFS) < background_thresh
&& min_pages <= 0)
break;
- wbc.more_io = 0;
wbc.encountered_congestion = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES;
wbc.pages_skipped = 0;
@@ -566,9 +565,8 @@ static void background_writeout(unsigned long _min_pages)
min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
/* Wrote less than expected */
- if (wbc.encountered_congestion || wbc.more_io)
- congestion_wait(WRITE, HZ/10);
- else
+ congestion_wait(WRITE, HZ/10);
+ if (!wbc.encountered_congestion)
break;
}
}
@@ -633,12 +631,11 @@ static void wb_kupdate(unsigned long arg)
global_page_state(NR_UNSTABLE_NFS) +
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
while (nr_to_write > 0) {
- wbc.more_io = 0;
wbc.encountered_congestion = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES;
writeback_inodes(&wbc);
if (wbc.nr_to_write > 0) {
- if (wbc.encountered_congestion || wbc.more_io)
+ if (wbc.encountered_congestion)
congestion_wait(WRITE, HZ/10);
else
break; /* All the old data is written */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d73bfad1c32..b2838c24e58 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2566,7 +2566,7 @@ static void __meminit zone_init_free_lists(struct pglist_data *pgdat,
memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
#endif
-static int __devinit zone_batchsize(struct zone *zone)
+static int zone_batchsize(struct zone *zone)
{
int batch;
@@ -3438,7 +3438,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
mem_map = NODE_DATA(0)->node_mem_map;
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
- mem_map -= pgdat->node_start_pfn;
+ mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
}
#endif
diff --git a/mm/quicklist.c b/mm/quicklist.c
index ae8189c2799..3f703f7cb39 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -26,9 +26,17 @@ DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
static unsigned long max_pages(unsigned long min_pages)
{
unsigned long node_free_pages, max;
+ struct zone *zones = NODE_DATA(numa_node_id())->node_zones;
+
+ node_free_pages =
+#ifdef CONFIG_ZONE_DMA
+ zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
+#endif
+ zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
- node_free_pages = node_page_state(numa_node_id(),
- NR_FREE_PAGES);
max = node_free_pages / FRACTION_OF_NODE_MEM;
return max(max, min_pages);
}
diff --git a/mm/slab.c b/mm/slab.c
index 2e338a5f7b1..aebb9f68557 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4105,7 +4105,7 @@ out:
schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
}
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_SLABINFO
static void print_slabinfo_header(struct seq_file *m)
{
diff --git a/mm/slub.c b/mm/slub.c
index b9f37cb0f2e..474945ecd89 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -172,7 +172,7 @@ static inline void ClearSlabDebug(struct page *page)
* Mininum number of partial slabs. These will be left on the partial
* lists even if they are empty. kmem_cache_shrink may reclaim them.
*/
-#define MIN_PARTIAL 2
+#define MIN_PARTIAL 5
/*
* Maximum number of desirable partial slabs.
@@ -1613,7 +1613,7 @@ checks_ok:
* then add it.
*/
if (unlikely(!prior))
- add_partial(get_node(s, page_to_nid(page)), page);
+ add_partial_tail(get_node(s, page_to_nid(page)), page);
out_unlock:
slab_unlock(page);
@@ -3076,6 +3076,19 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
return slab_alloc(s, gfpflags, node, caller);
}
+static unsigned long count_partial(struct kmem_cache_node *n)
+{
+ unsigned long flags;
+ unsigned long x = 0;
+ struct page *page;
+
+ spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ x += page->inuse;
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ return x;
+}
+
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
@@ -3458,19 +3471,6 @@ static int list_locations(struct kmem_cache *s, char *buf,
return n;
}
-static unsigned long count_partial(struct kmem_cache_node *n)
-{
- unsigned long flags;
- unsigned long x = 0;
- struct page *page;
-
- spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
- x += page->inuse;
- spin_unlock_irqrestore(&n->list_lock, flags);
- return x;
-}
-
enum slab_stat_type {
SL_FULL,
SL_PARTIAL,
@@ -4123,3 +4123,89 @@ static int __init slab_sysfs_init(void)
__initcall(slab_sysfs_init);
#endif
+
+/*
+ * The /proc/slabinfo ABI
+ */
+#ifdef CONFIG_SLABINFO
+
+ssize_t slabinfo_write(struct file *file, const char __user * buffer,
+ size_t count, loff_t *ppos)
+{
+ return -EINVAL;
+}
+
+
+static void print_slabinfo_header(struct seq_file *m)
+{
+ seq_puts(m, "slabinfo - version: 2.1\n");
+ seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
+ "<objperslab> <pagesperslab>");
+ seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
+ seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
+ seq_putc(m, '\n');
+}
+
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t n = *pos;
+
+ down_read(&slub_lock);
+ if (!n)
+ print_slabinfo_header(m);
+
+ return seq_list_start(&slab_caches, *pos);
+}
+
+static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ return seq_list_next(p, &slab_caches, pos);
+}
+
+static void s_stop(struct seq_file *m, void *p)
+{
+ up_read(&slub_lock);
+}
+
+static int s_show(struct seq_file *m, void *p)
+{
+ unsigned long nr_partials = 0;
+ unsigned long nr_slabs = 0;
+ unsigned long nr_inuse = 0;
+ unsigned long nr_objs;
+ struct kmem_cache *s;
+ int node;
+
+ s = list_entry(p, struct kmem_cache, list);
+
+ for_each_online_node(node) {
+ struct kmem_cache_node *n = get_node(s, node);
+
+ if (!n)
+ continue;
+
+ nr_partials += n->nr_partial;
+ nr_slabs += atomic_long_read(&n->nr_slabs);
+ nr_inuse += count_partial(n);
+ }
+
+ nr_objs = nr_slabs * s->objects;
+ nr_inuse += (nr_slabs - nr_partials) * s->objects;
+
+ seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
+ nr_objs, s->size, s->objects, (1 << s->order));
+ seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
+ seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
+ 0UL);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+const struct seq_operations slabinfo_op = {
+ .start = s_start,
+ .next = s_next,
+ .stop = s_stop,
+ .show = s_show,
+};
+
+#endif /* CONFIG_SLABINFO */