diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2008-04-29 01:00:22 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-29 08:06:10 -0700 |
commit | 3eae90c3cdd4e762d0f4f5e939c98780fccded57 (patch) | |
tree | d5b6f138e8464827b7b882fc6acea3916aca8957 /mm | |
parent | 29f2a4dac856e9433a502b05b40e8e90385d8e27 (diff) |
memcg: remove redundant function calls
remove_list/add_list uses page_cgroup_zoneinfo() in it.
So, it's called twice before and after lock.
mz = page_cgroup_zoneinfo();
lock();
mz = page_cgroup_zoneinfo();
....
unlock();
And address of mz never changes.
This is not good. This patch fixes this behavior.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f891876efee..395fd8e4166 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -275,10 +275,10 @@ static void unlock_page_cgroup(struct page *page) bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); } -static void __mem_cgroup_remove_list(struct page_cgroup *pc) +static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, + struct page_cgroup *pc) { int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; - struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); if (from) MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; @@ -289,10 +289,10 @@ static void __mem_cgroup_remove_list(struct page_cgroup *pc) list_del_init(&pc->lru); } -static void __mem_cgroup_add_list(struct page_cgroup *pc) +static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, + struct page_cgroup *pc) { int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; - struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); if (!to) { MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; @@ -618,7 +618,7 @@ retry: mz = page_cgroup_zoneinfo(pc); spin_lock_irqsave(&mz->lru_lock, flags); - __mem_cgroup_add_list(pc); + __mem_cgroup_add_list(mz, pc); spin_unlock_irqrestore(&mz->lru_lock, flags); unlock_page_cgroup(page); @@ -674,7 +674,7 @@ void mem_cgroup_uncharge_page(struct page *page) if (--(pc->ref_cnt) == 0) { mz = page_cgroup_zoneinfo(pc); spin_lock_irqsave(&mz->lru_lock, flags); - __mem_cgroup_remove_list(pc); + __mem_cgroup_remove_list(mz, pc); spin_unlock_irqrestore(&mz->lru_lock, flags); page_assign_page_cgroup(page, NULL); @@ -736,7 +736,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage) mz = page_cgroup_zoneinfo(pc); spin_lock_irqsave(&mz->lru_lock, flags); - __mem_cgroup_remove_list(pc); + __mem_cgroup_remove_list(mz, pc); spin_unlock_irqrestore(&mz->lru_lock, flags); page_assign_page_cgroup(page, NULL); @@ -748,7 +748,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage) mz = page_cgroup_zoneinfo(pc); spin_lock_irqsave(&mz->lru_lock, flags); - __mem_cgroup_add_list(pc); + __mem_cgroup_add_list(mz, pc); spin_unlock_irqrestore(&mz->lru_lock, flags); unlock_page_cgroup(newpage); |