diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2008-02-07 00:14:10 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-07 08:42:19 -0800 |
commit | ae41be374293e70e1ed441d986afcc6e744ef9d9 (patch) | |
tree | d8e2143820bbf3ed2f1f79ed99ee430284567b93 | |
parent | 9175e0311ec9e6d1bf1f6dfecf9268baf08765e6 (diff) |
bugfix for memory cgroup controller: migration under memory controller fix
While using memory control cgroup, page-migration under it works as following.
==
1. uncharge all refs at try to unmap.
2. charge regs again remove_migration_ptes()
==
This is simple but has following problems.
==
The page is uncharged and charged back again if *mapped*.
- This means that cgroup before migration can be different from one after
migration
- If page is not mapped but charged as page cache, charge is just ignored
(because not mapped, it will not be uncharged before migration)
This is memory leak.
==
This patch tries to keep memory cgroup at page migration by increasing
one refcnt during it. 3 functions are added.
mem_cgroup_prepare_migration() --- increase refcnt of page->page_cgroup
mem_cgroup_end_migration() --- decrease refcnt of page->page_cgroup
mem_cgroup_page_migration() --- copy page->page_cgroup from old page to
new page.
During migration
- old page is under PG_locked.
- new page is under PG_locked, too.
- both old page and new page is not on LRU.
These 3 facts guarantee that page_cgroup() migration has no race.
Tested and worked well in x86_64/fake-NUMA box.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Paul Menage <menage@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Kirill Korotaev <dev@sw.ru>
Cc: Herbert Poetzl <herbert@13thfloor.at>
Cc: David Rientjes <rientjes@google.com>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/memcontrol.h | 19 | ||||
-rw-r--r-- | mm/memcontrol.c | 43 | ||||
-rw-r--r-- | mm/migrate.c | 13 |
3 files changed, 72 insertions, 3 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 42536c737d9..4ec712967f7 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -60,6 +60,10 @@ static inline void mem_cgroup_uncharge_page(struct page *page) mem_cgroup_uncharge(page_get_page_cgroup(page)); } +extern int mem_cgroup_prepare_migration(struct page *page); +extern void mem_cgroup_end_migration(struct page *page); +extern void mem_cgroup_page_migration(struct page *page, struct page *newpage); + #else /* CONFIG_CGROUP_MEM_CONT */ static inline void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p) @@ -117,6 +121,21 @@ static inline int task_in_mem_cgroup(struct task_struct *task, return 1; } +static inline int mem_cgroup_prepare_migration(struct page *page) +{ + return 0; +} + +static inline void mem_cgroup_end_migration(struct page *page) +{ +} + +static inline void +mem_cgroup_page_migration(struct page *page, struct page *newpage) +{ +} + + #endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3270ce7375d..128f45c16fa 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -492,6 +492,49 @@ void mem_cgroup_uncharge(struct page_cgroup *pc) } } } +/* + * Returns non-zero if a page (under migration) has valid page_cgroup member. + * Refcnt of page_cgroup is incremented. + */ + +int mem_cgroup_prepare_migration(struct page *page) +{ + struct page_cgroup *pc; + int ret = 0; + lock_page_cgroup(page); + pc = page_get_page_cgroup(page); + if (pc && atomic_inc_not_zero(&pc->ref_cnt)) + ret = 1; + unlock_page_cgroup(page); + return ret; +} + +void mem_cgroup_end_migration(struct page *page) +{ + struct page_cgroup *pc = page_get_page_cgroup(page); + mem_cgroup_uncharge(pc); +} +/* + * We know both *page* and *newpage* are now not-on-LRU and Pg_locked. + * And no race with uncharge() routines because page_cgroup for *page* + * has extra one reference by mem_cgroup_prepare_migration. + */ + +void mem_cgroup_page_migration(struct page *page, struct page *newpage) +{ + struct page_cgroup *pc; +retry: + pc = page_get_page_cgroup(page); + if (!pc) + return; + if (clear_page_cgroup(page, pc) != pc) + goto retry; + pc->page = newpage; + lock_page_cgroup(newpage); + page_assign_page_cgroup(newpage, pc); + unlock_page_cgroup(newpage); + return; +} int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) { diff --git a/mm/migrate.c b/mm/migrate.c index 76379414469..a73504ff5ab 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -593,9 +593,10 @@ static int move_to_new_page(struct page *newpage, struct page *page) else rc = fallback_migrate_page(mapping, newpage, page); - if (!rc) + if (!rc) { + mem_cgroup_page_migration(page, newpage); remove_migration_ptes(page, newpage); - else + } else newpage->mapping = NULL; unlock_page(newpage); @@ -614,6 +615,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, int *result = NULL; struct page *newpage = get_new_page(page, private, &result); int rcu_locked = 0; + int charge = 0; if (!newpage) return -ENOMEM; @@ -673,14 +675,19 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, goto rcu_unlock; } + charge = mem_cgroup_prepare_migration(page); /* Establish migration ptes or remove ptes */ try_to_unmap(page, 1); if (!page_mapped(page)) rc = move_to_new_page(newpage, page); - if (rc) + if (rc) { remove_migration_ptes(page, page); + if (charge) + mem_cgroup_end_migration(page); + } else if (charge) + mem_cgroup_end_migration(newpage); rcu_unlock: if (rcu_locked) rcu_read_unlock(); |