aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-01-08 01:00:55 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 20:12:42 -0800
commitd498471133ff1f9586a06820beaeebc575fe2814 (patch)
treec7cde93441692e3b32d83dfbf85858ab2d85e481 /mm
parentee27497df36823f2793212cad0997c044eb0e1eb (diff)
[PATCH] SwapMig: Extend parameters for migrate_pages()
Extend the parameters of migrate_pages() to allow the caller control over the fate of successfully migrated or impossible to migrate pages. Swap migration and direct migration will have the same interface after this patch so that patches can be independently applied to the policy layer and the core migration code. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c27
-rw-r--r--mm/vmscan.c17
2 files changed, 30 insertions, 14 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 20d5ad39fa4..30bdafba52d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -429,6 +429,19 @@ static int contextualize_policy(int mode, nodemask_t *nodes)
return mpol_check_policy(mode, nodes);
}
+static int swap_pages(struct list_head *pagelist)
+{
+ LIST_HEAD(moved);
+ LIST_HEAD(failed);
+ int n;
+
+ n = migrate_pages(pagelist, NULL, &moved, &failed);
+ putback_lru_pages(&failed);
+ putback_lru_pages(&moved);
+
+ return n;
+}
+
long do_mbind(unsigned long start, unsigned long len,
unsigned long mode, nodemask_t *nmask, unsigned long flags)
{
@@ -481,10 +494,13 @@ long do_mbind(unsigned long start, unsigned long len,
(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ? &pagelist : NULL);
err = PTR_ERR(vma);
if (!IS_ERR(vma)) {
+ int nr_failed = 0;
+
err = mbind_range(vma, start, end, new);
if (!list_empty(&pagelist))
- migrate_pages(&pagelist, NULL);
- if (!err && !list_empty(&pagelist) && (flags & MPOL_MF_STRICT))
+ nr_failed = swap_pages(&pagelist);
+
+ if (!err && nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
}
if (!list_empty(&pagelist))
@@ -635,11 +651,12 @@ int do_migrate_pages(struct mm_struct *mm,
down_read(&mm->mmap_sem);
check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nodes,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
+
if (!list_empty(&pagelist)) {
- migrate_pages(&pagelist, NULL);
- if (!list_empty(&pagelist))
- count = putback_lru_pages(&pagelist);
+ count = swap_pages(&pagelist);
+ putback_lru_pages(&pagelist);
}
+
up_read(&mm->mmap_sem);
return count;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 73ba4046ed2..5eecb514cce 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -670,10 +670,10 @@ retry:
* list. The direct migration patchset
* extends this function to avoid the use of swap.
*/
-int migrate_pages(struct list_head *l, struct list_head *t)
+int migrate_pages(struct list_head *from, struct list_head *to,
+ struct list_head *moved, struct list_head *failed)
{
int retry;
- LIST_HEAD(failed);
int nr_failed = 0;
int pass = 0;
struct page *page;
@@ -686,12 +686,12 @@ int migrate_pages(struct list_head *l, struct list_head *t)
redo:
retry = 0;
- list_for_each_entry_safe(page, page2, l, lru) {
+ list_for_each_entry_safe(page, page2, from, lru) {
cond_resched();
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
- move_to_lru(page);
+ list_move(&page->lru, moved);
continue;
}
/*
@@ -722,7 +722,7 @@ redo:
if (PageAnon(page) && !PageSwapCache(page)) {
if (!add_to_swap(page, GFP_KERNEL)) {
unlock_page(page);
- list_move(&page->lru, &failed);
+ list_move(&page->lru, failed);
nr_failed++;
continue;
}
@@ -732,8 +732,10 @@ redo:
* Page is properly locked and writeback is complete.
* Try to migrate the page.
*/
- if (!swap_page(page))
+ if (!swap_page(page)) {
+ list_move(&page->lru, moved);
continue;
+ }
retry_later:
retry++;
}
@@ -743,9 +745,6 @@ retry_later:
if (!swapwrite)
current->flags &= ~PF_SWAPWRITE;
- if (!list_empty(&failed))
- list_splice(&failed, l);
-
return nr_failed + retry;
}