diff options
-rw-r--r-- | fs/proc/array.c | 3 | ||||
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | kernel/sched.c | 70 | ||||
-rw-r--r-- | kernel/sched_fair.c | 2 |
5 files changed, 54 insertions, 23 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c index 63c95afb561..d80baaabf83 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -358,7 +358,8 @@ static cputime_t task_utime(struct task_struct *p) } utime = (clock_t)temp; - return clock_t_to_cputime(utime); + p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); + return p->prev_utime; } static cputime_t task_stime(struct task_struct *p) diff --git a/include/linux/sched.h b/include/linux/sched.h index 3c07d595979..b0b1fe6e0b1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1009,6 +1009,7 @@ struct task_struct { unsigned int rt_priority; cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime; + cputime_t prev_utime; unsigned long nvcsw, nivcsw; /* context switch counts */ struct timespec start_time; /* monotonic time */ struct timespec real_start_time; /* boot based time */ diff --git a/kernel/fork.c b/kernel/fork.c index ddafdfac945..a65bfc47177 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1056,6 +1056,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->gtime = cputime_zero; p->utimescaled = cputime_zero; p->stimescaled = cputime_zero; + p->prev_utime = cputime_zero; #ifdef CONFIG_TASK_XACCT p->rchar = 0; /* I/O counter: bytes read */ diff --git a/kernel/sched.c b/kernel/sched.c index b4fbbc44045..3f6bd111290 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -172,6 +172,7 @@ struct task_group { unsigned long shares; /* spinlock to serialize modification to shares */ spinlock_t lock; + struct rcu_head rcu; }; /* Default task group's sched entity on each cpu */ @@ -258,7 +259,6 @@ struct cfs_rq { */ struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ struct task_group *tg; /* group that "owns" this runqueue */ - struct rcu_head rcu; #endif }; @@ -3355,7 +3355,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime) * @p: the process that the cpu time gets accounted to * @cputime: the cpu time spent in virtual machine since the last update */ -void account_guest_time(struct task_struct *p, cputime_t cputime) +static void account_guest_time(struct task_struct *p, cputime_t cputime) { cputime64_t tmp; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; @@ -5365,7 +5365,7 @@ static struct ctl_table sd_ctl_dir[] = { .procname = "sched_domain", .mode = 0555, }, - {0,}, + {0, }, }; static struct ctl_table sd_ctl_root[] = { @@ -5375,7 +5375,7 @@ static struct ctl_table sd_ctl_root[] = { .mode = 0555, .child = sd_ctl_dir, }, - {0,}, + {0, }, }; static struct ctl_table *sd_alloc_ctl_entry(int n) @@ -7019,8 +7019,8 @@ err: /* rcu callback to free various structures associated with a task group */ static void free_sched_group(struct rcu_head *rhp) { - struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); - struct task_group *tg = cfs_rq->tg; + struct task_group *tg = container_of(rhp, struct task_group, rcu); + struct cfs_rq *cfs_rq; struct sched_entity *se; int i; @@ -7041,7 +7041,7 @@ static void free_sched_group(struct rcu_head *rhp) /* Destroy runqueue etc associated with a task group */ void sched_destroy_group(struct task_group *tg) { - struct cfs_rq *cfs_rq; + struct cfs_rq *cfs_rq = NULL; int i; for_each_possible_cpu(i) { @@ -7049,10 +7049,10 @@ void sched_destroy_group(struct task_group *tg) list_del_rcu(&cfs_rq->leaf_cfs_rq_list); } - cfs_rq = tg->cfs_rq[0]; + BUG_ON(!cfs_rq); /* wait for possible concurrent references to cfs_rqs complete */ - call_rcu(&cfs_rq->rcu, free_sched_group); + call_rcu(&tg->rcu, free_sched_group); } /* change task's runqueue when it moves between groups. @@ -7211,25 +7211,53 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft) return (u64) tg->shares; } -static struct cftype cpu_shares = { - .name = "shares", - .read_uint = cpu_shares_read_uint, - .write_uint = cpu_shares_write_uint, +static u64 cpu_usage_read(struct cgroup *cgrp, struct cftype *cft) +{ + struct task_group *tg = cgroup_tg(cgrp); + unsigned long flags; + u64 res = 0; + int i; + + for_each_possible_cpu(i) { + /* + * Lock to prevent races with updating 64-bit counters + * on 32-bit arches. + */ + spin_lock_irqsave(&cpu_rq(i)->lock, flags); + res += tg->se[i]->sum_exec_runtime; + spin_unlock_irqrestore(&cpu_rq(i)->lock, flags); + } + /* Convert from ns to ms */ + do_div(res, 1000000); + + return res; +} + +static struct cftype cpu_files[] = { + { + .name = "shares", + .read_uint = cpu_shares_read_uint, + .write_uint = cpu_shares_write_uint, + }, + { + .name = "usage", + .read_uint = cpu_usage_read, + }, }; static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) { - return cgroup_add_file(cont, ss, &cpu_shares); + return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files)); } struct cgroup_subsys cpu_cgroup_subsys = { - .name = "cpu", - .create = cpu_cgroup_create, - .destroy = cpu_cgroup_destroy, - .can_attach = cpu_cgroup_can_attach, - .attach = cpu_cgroup_attach, - .populate = cpu_cgroup_populate, - .subsys_id = cpu_cgroup_subsys_id, + .name = "cpu", + .create = cpu_cgroup_create, + .destroy = cpu_cgroup_destroy, + .can_attach = cpu_cgroup_can_attach, + .attach = cpu_cgroup_attach, + .populate = cpu_cgroup_populate, + .subsys_id = cpu_cgroup_subsys_id, .early_init = 1, }; diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9971831b560..01859f662ab 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1025,7 +1025,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr) } } -#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0) +#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0) /* * Share the fairness runtime between parent and child, thus the |