aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-08-02 11:15:27 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-08-02 11:15:27 -0700
commit370504cf7c68b953de55c41d5e0be97d30f3cf00 (patch)
tree1941a38f78083dca4852070c229363d81bbb9aae /kernel/sched_fair.c
parent160d6aaf60d75b71a48223b5bdc29285e18cff07 (diff)
parent94c18227d1e3f02de5b345bd3cd5c960214dc9c8 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: [PATCH] sched: reduce task_struct size [PATCH] sched: reduce debug code [PATCH] sched: use schedstat_set() API [PATCH] sched: add schedstat_set() API [PATCH] sched: move load-calculation functions [PATCH] sched: ->task_new cleanup [PATCH] sched: uninline inc/dec_nr_running() [PATCH] sched: uninline calc_delta_mine() [PATCH] sched: calc_delta_mine(): use fixed limit [PATCH] sched: tidy up left over smpnice code [PATCH] sched: remove cache_hot_time
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c21
1 files changed, 5 insertions, 16 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 6971db0a716..6f579ff5a9b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -292,10 +292,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, u64 now)
return;
delta_exec = curr->delta_exec;
-#ifdef CONFIG_SCHEDSTATS
- if (unlikely(delta_exec > curr->exec_max))
- curr->exec_max = delta_exec;
-#endif
+ schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
curr->sum_exec_runtime += delta_exec;
cfs_rq->exec_clock += delta_exec;
@@ -352,7 +349,7 @@ static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
{
se->wait_start_fair = cfs_rq->fair_clock;
- se->wait_start = now;
+ schedstat_set(se->wait_start, now);
}
/*
@@ -425,13 +422,7 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
{
unsigned long delta_fair = se->delta_fair_run;
-#ifdef CONFIG_SCHEDSTATS
- {
- s64 delta_wait = now - se->wait_start;
- if (unlikely(delta_wait > se->wait_max))
- se->wait_max = delta_wait;
- }
-#endif
+ schedstat_set(se->wait_max, max(se->wait_max, now - se->wait_start));
if (unlikely(se->load.weight != NICE_0_LOAD))
delta_fair = calc_weighted(delta_fair, se->load.weight,
@@ -456,7 +447,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
}
se->wait_start_fair = 0;
- se->wait_start = 0;
+ schedstat_set(se->wait_start, 0);
}
static inline void
@@ -1041,11 +1032,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
* monopolize the CPU. Note: the parent runqueue is locked,
* the child is not running yet.
*/
-static void task_new_fair(struct rq *rq, struct task_struct *p)
+static void task_new_fair(struct rq *rq, struct task_struct *p, u64 now)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
struct sched_entity *se = &p->se;
- u64 now = rq_clock(rq);
sched_info_queued(p);
@@ -1072,7 +1062,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
p->se.wait_runtime = -(sysctl_sched_granularity / 2);
__enqueue_entity(cfs_rq, se);
- inc_nr_running(p, rq, now);
}
#ifdef CONFIG_FAIR_GROUP_SCHED