aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2008-01-25 21:08:12 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-25 21:08:12 +0100
commita22d7fc187ed996b66d8439db27b2303f79a8e7b (patch)
tree44845eaac2aa44b185d0663d689fea29d94ea5ff /kernel/sched_rt.c
parent6e1254d2c41215da27025add8900ed187bca121d (diff)
sched: wake-balance fixes
We have logic to detect whether the system has migratable tasks, but we are not using it when deciding whether to push tasks away. So we add support for considering this new information. Signed-off-by: Gregory Haskins <ghaskins@novell.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index a9d7d440816..87d7b3ff386 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -16,6 +16,7 @@ static inline cpumask_t *rt_overload(void)
}
static inline void rt_set_overload(struct rq *rq)
{
+ rq->rt.overloaded = 1;
cpu_set(rq->cpu, rt_overload_mask);
/*
* Make sure the mask is visible before we set
@@ -32,6 +33,7 @@ static inline void rt_clear_overload(struct rq *rq)
/* the order here really doesn't matter */
atomic_dec(&rto_count);
cpu_clear(rq->cpu, rt_overload_mask);
+ rq->rt.overloaded = 0;
}
static void update_rt_migration(struct rq *rq)
@@ -448,6 +450,9 @@ static int push_rt_task(struct rq *rq)
assert_spin_locked(&rq->lock);
+ if (!rq->rt.overloaded)
+ return 0;
+
next_task = pick_next_highest_task_rt(rq, -1);
if (!next_task)
return 0;
@@ -675,7 +680,7 @@ static void schedule_tail_balance_rt(struct rq *rq)
* the lock was owned by prev, we need to release it
* first via finish_lock_switch and then reaquire it here.
*/
- if (unlikely(rq->rt.rt_nr_running > 1)) {
+ if (unlikely(rq->rt.overloaded)) {
spin_lock_irq(&rq->lock);
push_rt_tasks(rq);
spin_unlock_irq(&rq->lock);
@@ -687,7 +692,8 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
{
if (unlikely(rt_task(p)) &&
!task_running(rq, p) &&
- (p->prio >= rq->curr->prio))
+ (p->prio >= rq->rt.highest_prio) &&
+ rq->rt.overloaded)
push_rt_tasks(rq);
}