diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2008-07-25 01:47:20 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-25 10:53:37 -0700 |
commit | 628f42355389cfb596ca3a5a5f64fb9054a2a06a (patch) | |
tree | a1a19788e554f4345f80ae835edcb5ad2402533a | |
parent | 12b9804419cfb1c1bdac413f6c373af3b88d154b (diff) |
memcg: limit change shrink usage
Shrinking memory usage at limit change.
[akpm@linux-foundation.org: coding-style fixes]
Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | Documentation/controllers/memory.txt | 3 | ||||
-rw-r--r-- | mm/memcontrol.c | 48 |
2 files changed, 45 insertions, 6 deletions
diff --git a/Documentation/controllers/memory.txt b/Documentation/controllers/memory.txt index 866b9cd9a95..9b53d582736 100644 --- a/Documentation/controllers/memory.txt +++ b/Documentation/controllers/memory.txt @@ -242,8 +242,7 @@ rmdir() if there are no tasks. 1. Add support for accounting huge pages (as a separate controller) 2. Make per-cgroup scanner reclaim not-shared pages first 3. Teach controller to account for shared-pages -4. Start reclamation when the limit is lowered -5. Start reclamation in the background when the limit is +4. Start reclamation in the background when the limit is not yet hit but the usage is getting closer Summary diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0c035647d36..fba566c5132 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -812,6 +812,30 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) return 0; } +int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val) +{ + + int retry_count = MEM_CGROUP_RECLAIM_RETRIES; + int progress; + int ret = 0; + + while (res_counter_set_limit(&memcg->res, val)) { + if (signal_pending(current)) { + ret = -EINTR; + break; + } + if (!retry_count) { + ret = -EBUSY; + break; + } + progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL); + if (!progress) + retry_count--; + } + return ret; +} + + /* * This routine traverse page_cgroup in given list and drop them all. * *And* this routine doesn't reclaim page itself, just removes page_cgroup. @@ -896,13 +920,29 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res, cft->private); } - +/* + * The user of this function is... + * RES_LIMIT. + */ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, const char *buffer) { - return res_counter_write(&mem_cgroup_from_cont(cont)->res, - cft->private, buffer, - res_counter_memparse_write_strategy); + struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); + unsigned long long val; + int ret; + + switch (cft->private) { + case RES_LIMIT: + /* This function does all necessary parse...reuse it */ + ret = res_counter_memparse_write_strategy(buffer, &val); + if (!ret) + ret = mem_cgroup_resize_limit(memcg, val); + break; + default: + ret = -EINVAL; /* should be BUG() ? */ + break; + } + return ret; } static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) |