From f7402e0361d4472535e07cfca648f2fa81d85cd2 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Mon, 29 Oct 2007 21:18:10 +0100 Subject: sched: make kernel/sched.c:account_guest_time() static account_guest_time() can become static. Signed-off-by: Adrian Bunk Signed-off-by: Ingo Molnar --- kernel/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched.c b/kernel/sched.c index b4fbbc44045..74dbb4020cf 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3355,7 +3355,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime) * @p: the process that the cpu time gets accounted to * @cputime: the cpu time spent in virtual machine since the last update */ -void account_guest_time(struct task_struct *p, cputime_t cputime) +static void account_guest_time(struct task_struct *p, cputime_t cputime) { cputime64_t tmp; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; -- cgit v1.2.3 From 73a2bcb0edb9ffb0b007b3546b430e2c6e415eee Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 29 Oct 2007 21:18:11 +0100 Subject: sched: keep utime/stime monotonic keep utime/stime monotonic. cpustats use utime/stime as a ratio against sum_exec_runtime, as a consequence it can happen - when the ratio changes faster than time accumulates - that either can be appear to go backwards. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- fs/proc/array.c | 3 ++- include/linux/sched.h | 1 + kernel/fork.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/proc/array.c b/fs/proc/array.c index 63c95afb561..d80baaabf83 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -358,7 +358,8 @@ static cputime_t task_utime(struct task_struct *p) } utime = (clock_t)temp; - return clock_t_to_cputime(utime); + p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); + return p->prev_utime; } static cputime_t task_stime(struct task_struct *p) diff --git a/include/linux/sched.h b/include/linux/sched.h index 3c07d595979..b0b1fe6e0b1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1009,6 +1009,7 @@ struct task_struct { unsigned int rt_priority; cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime; + cputime_t prev_utime; unsigned long nvcsw, nivcsw; /* context switch counts */ struct timespec start_time; /* monotonic time */ struct timespec real_start_time; /* boot based time */ diff --git a/kernel/fork.c b/kernel/fork.c index ddafdfac945..a65bfc47177 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1056,6 +1056,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->gtime = cputime_zero; p->utimescaled = cputime_zero; p->stimescaled = cputime_zero; + p->prev_utime = cputime_zero; #ifdef CONFIG_TASK_XACCT p->rchar = 0; /* I/O counter: bytes read */ -- cgit v1.2.3 From 7bae49d498de87f0da0c20c67adaa278eac84566 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Mon, 29 Oct 2007 21:18:11 +0100 Subject: sched: fix incorrect assumption that cpu 0 exists This patch: commit 9b5b77512dce239fa168183fa71896712232e95a Author: Srivatsa Vaddagiri Date: Mon Oct 15 17:00:09 2007 +0200 sched: clean up code under CONFIG_FAIR_GROUP_SCHED Introduced an assumption of the existence of CPU0 via this line cfs_rq = tg->cfs_rq[0]; If you have no CPU0, that will be NULL. The fix seems to be just to take whatever cfs_rq queue comes out of the for_each_possible_cpu() loop, since they're all equally good for the destruction operation. Signed-off-by: James Bottomley Signed-off-by: Ingo Molnar --- kernel/sched.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 74dbb4020cf..235952b100e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7041,7 +7041,7 @@ static void free_sched_group(struct rcu_head *rhp) /* Destroy runqueue etc associated with a task group */ void sched_destroy_group(struct task_group *tg) { - struct cfs_rq *cfs_rq; + struct cfs_rq *cfs_rq = NULL; int i; for_each_possible_cpu(i) { @@ -7049,7 +7049,7 @@ void sched_destroy_group(struct task_group *tg) list_del_rcu(&cfs_rq->leaf_cfs_rq_list); } - cfs_rq = tg->cfs_rq[0]; + BUG_ON(!cfs_rq); /* wait for possible concurrent references to cfs_rqs complete */ call_rcu(&cfs_rq->rcu, free_sched_group); -- cgit v1.2.3 From ae8393e508e5f17add66964688c49bf0bfe4fcf9 Mon Sep 17 00:00:00 2001 From: Srivatsa Vaddagiri Date: Mon, 29 Oct 2007 21:18:11 +0100 Subject: sched: move rcu_head to task_group struct Peter Zijlstra noticed that the rcu_head object need not be present in every cfs_rq of a group. Move it to the task_group structure instead. Signed-off-by: Srivatsa Vaddagiri Signed-off-by: Ingo Molnar --- kernel/sched.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 235952b100e..470480790f3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -172,6 +172,7 @@ struct task_group { unsigned long shares; /* spinlock to serialize modification to shares */ spinlock_t lock; + struct rcu_head rcu; }; /* Default task group's sched entity on each cpu */ @@ -258,7 +259,6 @@ struct cfs_rq { */ struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ struct task_group *tg; /* group that "owns" this runqueue */ - struct rcu_head rcu; #endif }; @@ -7019,8 +7019,8 @@ err: /* rcu callback to free various structures associated with a task group */ static void free_sched_group(struct rcu_head *rhp) { - struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); - struct task_group *tg = cfs_rq->tg; + struct task_group *tg = container_of(rhp, struct task_group, rcu); + struct cfs_rq *cfs_rq; struct sched_entity *se; int i; @@ -7052,7 +7052,7 @@ void sched_destroy_group(struct task_group *tg) BUG_ON(!cfs_rq); /* wait for possible concurrent references to cfs_rqs complete */ - call_rcu(&cfs_rq->rcu, free_sched_group); + call_rcu(&tg->rcu, free_sched_group); } /* change task's runqueue when it moves between groups. -- cgit v1.2.3 From fe5c7cc22897b809a2fbe05bea71963853df7f17 Mon Sep 17 00:00:00 2001 From: Paul Menage Date: Mon, 29 Oct 2007 21:18:11 +0100 Subject: sched: report CPU usage in CFS cgroup directories Adds a cpu.usage file to the CFS cgroup that reports CPU usage in milliseconds for that cgroup's tasks [ mingo@elte.hu: style cleanups. ] Signed-off-by: Paul Menage Signed-off-by: Ingo Molnar --- kernel/sched.c | 38 +++++++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 470480790f3..6c4abfbd68e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7211,15 +7211,43 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft) return (u64) tg->shares; } -static struct cftype cpu_shares = { - .name = "shares", - .read_uint = cpu_shares_read_uint, - .write_uint = cpu_shares_write_uint, +static u64 cpu_usage_read(struct cgroup *cgrp, struct cftype *cft) +{ + struct task_group *tg = cgroup_tg(cgrp); + unsigned long flags; + u64 res = 0; + int i; + + for_each_possible_cpu(i) { + /* + * Lock to prevent races with updating 64-bit counters + * on 32-bit arches. + */ + spin_lock_irqsave(&cpu_rq(i)->lock, flags); + res += tg->se[i]->sum_exec_runtime; + spin_unlock_irqrestore(&cpu_rq(i)->lock, flags); + } + /* Convert from ns to ms */ + do_div(res, 1000000); + + return res; +} + +static struct cftype cpu_files[] = { + { + .name = "shares", + .read_uint = cpu_shares_read_uint, + .write_uint = cpu_shares_write_uint, + }, + { + .name = "usage", + .read_uint = cpu_usage_read, + }, }; static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) { - return cgroup_add_file(cont, ss, &cpu_shares); + return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files)); } struct cgroup_subsys cpu_cgroup_subsys = { -- cgit v1.2.3 From 8eb172d9418c9387234a2c9a344131c46b5eea5b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 29 Oct 2007 21:18:11 +0100 Subject: sched: fix style of swap() macro in kernel/sched_fair.c fix style of swap() macro in kernel/sched_fair.c. ( this macro should eventually move to a general header, as ext3 uses a similar construct too. ) Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9971831b560..01859f662ab 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1025,7 +1025,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr) } } -#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0) +#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0) /* * Share the fairness runtime between parent and child, thus the -- cgit v1.2.3 From 38605cae99d386332df6822a22dba7bfdc8fae1c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 29 Oct 2007 21:18:11 +0100 Subject: sched: fix style in kernel/sched.c fallout of recent commits: small coding style fixes. Signed-off-by: Ingo Molnar --- kernel/sched.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 6c4abfbd68e..3f6bd111290 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5365,7 +5365,7 @@ static struct ctl_table sd_ctl_dir[] = { .procname = "sched_domain", .mode = 0555, }, - {0,}, + {0, }, }; static struct ctl_table sd_ctl_root[] = { @@ -5375,7 +5375,7 @@ static struct ctl_table sd_ctl_root[] = { .mode = 0555, .child = sd_ctl_dir, }, - {0,}, + {0, }, }; static struct ctl_table *sd_alloc_ctl_entry(int n) @@ -7251,13 +7251,13 @@ static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) } struct cgroup_subsys cpu_cgroup_subsys = { - .name = "cpu", - .create = cpu_cgroup_create, - .destroy = cpu_cgroup_destroy, - .can_attach = cpu_cgroup_can_attach, - .attach = cpu_cgroup_attach, - .populate = cpu_cgroup_populate, - .subsys_id = cpu_cgroup_subsys_id, + .name = "cpu", + .create = cpu_cgroup_create, + .destroy = cpu_cgroup_destroy, + .can_attach = cpu_cgroup_can_attach, + .attach = cpu_cgroup_attach, + .populate = cpu_cgroup_populate, + .subsys_id = cpu_cgroup_subsys_id, .early_init = 1, }; -- cgit v1.2.3