aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/memory_hotplug.c3
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/mlock.c18
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/page_cgroup.c4
-rw-r--r--mm/vmalloc.c21
-rw-r--r--mm/vmscan.c44
9 files changed, 40 insertions, 66 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d143ab67be4..6058b53dcb8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1796,6 +1796,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page, unsigned long address)
{
+ struct hstate *h = hstate_vma(vma);
struct vm_area_struct *iter_vma;
struct address_space *mapping;
struct prio_tree_iter iter;
@@ -1805,7 +1806,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
* vm_pgoff is in PAGE_SIZE units, hence the different calculation
* from page cache lookup which is in HPAGE_SIZE units.
*/
- address = address & huge_page_mask(hstate_vma(vma));
+ address = address & huge_page_mask(h);
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
+ (vma->vm_pgoff >> PAGE_SHIFT);
mapping = (struct address_space *)page_private(page);
@@ -1824,7 +1825,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
*/
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
unmap_hugepage_range(iter_vma,
- address, address + HPAGE_SIZE,
+ address, address + huge_page_size(h),
page);
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6837a101437..b5b2b15085a 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -22,7 +22,6 @@
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/ioport.h>
-#include <linux/cpuset.h>
#include <linux/delay.h>
#include <linux/migrate.h>
#include <linux/page-isolation.h>
@@ -498,8 +497,6 @@ int add_memory(int nid, u64 start, u64 size)
/* we online node here. we can't roll back from here. */
node_set_online(nid);
- cpuset_track_online_nodes();
-
if (new_pgdat) {
ret = register_one_node(nid);
/*
diff --git a/mm/migrate.c b/mm/migrate.c
index 385db89f0c3..1e0d6b237f4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -522,15 +522,12 @@ static int writeout(struct address_space *mapping, struct page *page)
remove_migration_ptes(page, page);
rc = mapping->a_ops->writepage(page, &wbc);
- if (rc < 0)
- /* I/O Error writing */
- return -EIO;
if (rc != AOP_WRITEPAGE_ACTIVATE)
/* unlocked. Relock */
lock_page(page);
- return -EAGAIN;
+ return (rc < 0) ? -EIO : -EAGAIN;
}
/*
diff --git a/mm/mlock.c b/mm/mlock.c
index 008ea70b7af..1ada366570c 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -66,14 +66,10 @@ void __clear_page_mlock(struct page *page)
putback_lru_page(page);
} else {
/*
- * Page not on the LRU yet. Flush all pagevecs and retry.
+ * We lost the race. the page already moved to evictable list.
*/
- lru_add_drain_all();
- if (!isolate_lru_page(page))
- putback_lru_page(page);
- else if (PageUnevictable(page))
+ if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED);
-
}
}
@@ -166,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long addr = start;
struct page *pages[16]; /* 16 gives a reasonable batch */
int nr_pages = (end - start) / PAGE_SIZE;
- int ret;
+ int ret = 0;
int gup_flags = 0;
VM_BUG_ON(start & ~PAGE_MASK);
@@ -187,8 +183,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
if (vma->vm_flags & VM_WRITE)
gup_flags |= GUP_FLAGS_WRITE;
- lru_add_drain_all(); /* push cached pages to LRU */
-
while (nr_pages > 0) {
int i;
@@ -251,8 +245,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
ret = 0;
}
- lru_add_drain_all(); /* to update stats */
-
return ret; /* count entire vma as locked_vm */
}
@@ -546,6 +538,8 @@ asmlinkage long sys_mlock(unsigned long start, size_t len)
if (!can_do_mlock())
return -EPERM;
+ lru_add_drain_all(); /* flush pagevec */
+
down_write(&current->mm->mmap_sem);
len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
start &= PAGE_MASK;
@@ -612,6 +606,8 @@ asmlinkage long sys_mlockall(int flags)
if (!can_do_mlock())
goto out;
+ lru_add_drain_all(); /* flush pagevec */
+
down_write(&current->mm->mmap_sem);
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
diff --git a/mm/mmap.c b/mm/mmap.c
index de14ac21e5b..d4855a682ab 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1704,7 +1704,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
vma = find_vma_prev(mm, addr, &prev);
if (vma && (vma->vm_start <= addr))
return vma;
- if (expand_stack(prev, addr))
+ if (!prev || expand_stack(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED) {
if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 54069e64e3a..d8ac0147456 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1561,6 +1561,10 @@ nofail_alloc:
/* We now go into synchronous reclaim */
cpuset_memory_pressure_bump();
+ /*
+ * The task's cpuset might have expanded its set of allowable nodes
+ */
+ cpuset_update_task_memory_state();
p->flags |= PF_MEMALLOC;
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index f59d797dc5a..1223d927904 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -165,7 +165,7 @@ int online_page_cgroup(unsigned long start_pfn,
unsigned long start, end, pfn;
int fail = 0;
- start = start_pfn & (PAGES_PER_SECTION - 1);
+ start = start_pfn & ~(PAGES_PER_SECTION - 1);
end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
@@ -188,7 +188,7 @@ int offline_page_cgroup(unsigned long start_pfn,
{
unsigned long start, end, pfn;
- start = start_pfn & (PAGES_PER_SECTION - 1);
+ start = start_pfn & ~(PAGES_PER_SECTION - 1);
end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ba6b0f5f7fa..30f826d484f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -324,14 +324,14 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
BUG_ON(size & ~PAGE_MASK);
- addr = ALIGN(vstart, align);
-
va = kmalloc_node(sizeof(struct vmap_area),
gfp_mask & GFP_RECLAIM_MASK, node);
if (unlikely(!va))
return ERR_PTR(-ENOMEM);
retry:
+ addr = ALIGN(vstart, align);
+
spin_lock(&vmap_area_lock);
/* XXX: could have a last_hole cache */
n = vmap_area_root.rb_node;
@@ -362,7 +362,7 @@ retry:
goto found;
}
- while (addr + size >= first->va_start && addr + size <= vend) {
+ while (addr + size > first->va_start && addr + size <= vend) {
addr = ALIGN(first->va_end + PAGE_SIZE, align);
n = rb_next(&first->rb_node);
@@ -522,13 +522,24 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
}
/*
+ * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
+ * is already purging.
+ */
+static void try_purge_vmap_area_lazy(void)
+{
+ unsigned long start = ULONG_MAX, end = 0;
+
+ __purge_vmap_area_lazy(&start, &end, 0, 0);
+}
+
+/*
* Kick off a purge of the outstanding lazy areas.
*/
static void purge_vmap_area_lazy(void)
{
unsigned long start = ULONG_MAX, end = 0;
- __purge_vmap_area_lazy(&start, &end, 0, 0);
+ __purge_vmap_area_lazy(&start, &end, 1, 0);
}
/*
@@ -539,7 +550,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
va->flags |= VM_LAZY_FREE;
atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
- purge_vmap_area_lazy();
+ try_purge_vmap_area_lazy();
}
static struct vmap_area *find_vmap_area(unsigned long addr)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3b5860294bb..7ea1440b53d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -623,6 +623,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* Try to allocate it some swap space here.
*/
if (PageAnon(page) && !PageSwapCache(page)) {
+ if (!(sc->gfp_mask & __GFP_IO))
+ goto keep_locked;
switch (try_to_munlock(page)) {
case SWAP_FAIL: /* shouldn't happen */
case SWAP_AGAIN:
@@ -634,6 +636,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
if (!add_to_swap(page, GFP_ATOMIC))
goto activate_locked;
+ may_enter_fs = 1;
}
#endif /* CONFIG_SWAP */
@@ -1386,9 +1389,9 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
file_prio = 200 - sc->swappiness;
/*
- * anon recent_rotated[0]
- * %anon = 100 * ----------- / ----------------- * IO cost
- * anon + file rotate_sum
+ * The amount of pressure on anon vs file pages is inversely
+ * proportional to the fraction of recently scanned pages on
+ * each list that were recently referenced and in active use.
*/
ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
ap /= zone->recent_rotated[0] + 1;
@@ -2368,39 +2371,6 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
return 1;
}
-static void show_page_path(struct page *page)
-{
- char buf[256];
- if (page_is_file_cache(page)) {
- struct address_space *mapping = page->mapping;
- struct dentry *dentry;
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
- spin_lock(&mapping->i_mmap_lock);
- dentry = d_find_alias(mapping->host);
- printk(KERN_INFO "rescued: %s %lu\n",
- dentry_path(dentry, buf, 256), pgoff);
- spin_unlock(&mapping->i_mmap_lock);
- } else {
-#if defined(CONFIG_MM_OWNER) && defined(CONFIG_MMU)
- struct anon_vma *anon_vma;
- struct vm_area_struct *vma;
-
- anon_vma = page_lock_anon_vma(page);
- if (!anon_vma)
- return;
-
- list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- printk(KERN_INFO "rescued: anon %s\n",
- vma->vm_mm->owner->comm);
- break;
- }
- page_unlock_anon_vma(anon_vma);
-#endif
- }
-}
-
-
/**
* check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
* @page: page to check evictability and move to appropriate lru list
@@ -2421,8 +2391,6 @@ retry:
if (page_evictable(page, NULL)) {
enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
- show_page_path(page);
-
__dec_zone_state(zone, NR_UNEVICTABLE);
list_move(&page->lru, &zone->lru[l].list);
__inc_zone_state(zone, NR_INACTIVE_ANON + l);