aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-25 05:46:49 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 10:00:55 -0700
commite6a1530d692d6a60cdf15dfbcfea07f5324d7b9f (patch)
treebb34a4d745eb7f7e8d3de40b171fac17822ee8ac
parent7b2259b3e53f128c10a9fded0965e69d4a949847 (diff)
[PATCH] Allow migration of mlocked pages
Hugh clarified the role of VM_LOCKED. So we can now implement page migration for mlocked pages. Allow the migration of mlocked pages. This means that try_to_unmap must unmap mlocked pages in the migration case. Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/migrate.c10
-rw-r--r--mm/rmap.c9
2 files changed, 8 insertions, 11 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 0576c053598..3f1e0c2c942 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -616,15 +616,13 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
/*
* Establish migration ptes or remove ptes
*/
- if (try_to_unmap(page, 1) != SWAP_FAIL) {
- if (!page_mapped(page))
- rc = move_to_new_page(newpage, page);
- } else
- /* A vma has VM_LOCKED set -> permanent failure */
- rc = -EPERM;
+ try_to_unmap(page, 1);
+ if (!page_mapped(page))
+ rc = move_to_new_page(newpage, page);
if (rc)
remove_migration_ptes(page, page);
+
unlock:
unlock_page(page);
diff --git a/mm/rmap.c b/mm/rmap.c
index 882a85826bb..e76909e880c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -562,9 +562,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* If it's recently referenced (perhaps page_referenced
* skipped over this mm) then we should reactivate it.
*/
- if ((vma->vm_flags & VM_LOCKED) ||
- (ptep_clear_flush_young(vma, address, pte)
- && !migration)) {
+ if (!migration && ((vma->vm_flags & VM_LOCKED) ||
+ (ptep_clear_flush_young(vma, address, pte)))) {
ret = SWAP_FAIL;
goto out_unmap;
}
@@ -771,7 +770,7 @@ static int try_to_unmap_file(struct page *page, int migration)
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (vma->vm_flags & VM_LOCKED)
+ if ((vma->vm_flags & VM_LOCKED) && !migration)
continue;
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
@@ -805,7 +804,7 @@ static int try_to_unmap_file(struct page *page, int migration)
do {
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (vma->vm_flags & VM_LOCKED)
+ if ((vma->vm_flags & VM_LOCKED) && !migration)
continue;
cursor = (unsigned long) vma->vm_private_data;
while ( cursor < max_nl_cursor &&