diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:02 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:02 +0200 |
commit | eba1ed4b7e52720e3099325874811c38a5ec1562 (patch) | |
tree | 4ce145dd10b6cc248a2bc3463822dff23f9dd3c5 | |
parent | a4b29ba2f72673aaa60ba11ced74d579771dd578 (diff) |
sched: debug: track maximum 'slice'
track the maximum amount of time a task has executed while
the CPU load was at least 2x. (i.e. at least two nice-0
tasks were runnable)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched.c | 1 | ||||
-rw-r--r-- | kernel/sched_debug.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 11 |
4 files changed, 15 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 833f7dc2b8d..9761b165d56 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -921,6 +921,7 @@ struct sched_entity { u64 block_start; u64 block_max; u64 exec_max; + u64 slice_max; unsigned long wait_runtime_overruns; unsigned long wait_runtime_underruns; diff --git a/kernel/sched.c b/kernel/sched.c index e92b185e371..282d037c730 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1603,6 +1603,7 @@ static void __sched_fork(struct task_struct *p) p->se.sleep_max = 0; p->se.block_max = 0; p->se.exec_max = 0; + p->se.slice_max = 0; p->se.wait_max = 0; p->se.wait_runtime_overruns = 0; p->se.wait_runtime_underruns = 0; diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 94915f1fd9d..fd080f686f1 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -254,6 +254,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) P(se.sleep_max); P(se.block_max); P(se.exec_max); + P(se.slice_max); P(se.wait_max); P(se.wait_runtime_overruns); P(se.wait_runtime_underruns); @@ -282,6 +283,7 @@ void proc_sched_set_task(struct task_struct *p) p->se.sleep_max = 0; p->se.block_max = 0; p->se.exec_max = 0; + p->se.slice_max = 0; p->se.wait_max = 0; p->se.wait_runtime_overruns = 0; p->se.wait_runtime_underruns = 0; diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 0990b20fdcf..5c15d8ae92c 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -739,6 +739,17 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end(cfs_rq, se); update_stats_curr_start(cfs_rq, se); set_cfs_rq_curr(cfs_rq, se); +#ifdef CONFIG_SCHEDSTATS + /* + * Track our maximum slice length, if the CPU's load is at + * least twice that of our own weight (i.e. dont track it + * when there are only lesser-weight tasks around): + */ + if (rq_of(cfs_rq)->ls.load.weight >= 2*se->load.weight) { + se->slice_max = max(se->slice_max, + se->sum_exec_runtime - se->prev_sum_exec_runtime); + } +#endif se->prev_sum_exec_runtime = se->sum_exec_runtime; } |