aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-19 14:22:26 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-20 10:26:00 +0200
commiteff6549b957d15d1ad168d90b8c1eb643b9c163f (patch)
tree016e025635dabb070edf1f48fe1cee5308e18fd4 /kernel/sched_rt.c
parentb79f3833d81d54fc71d98c8064dc45f33a755a8a (diff)
sched: rt: move some code around
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: "Daniel K." <dk@uw.no> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c119
1 files changed, 57 insertions, 62 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 87b2e3bf947..61d52112289 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -229,68 +229,6 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
#endif
#ifdef CONFIG_SMP
-static int do_balance_runtime(struct rt_rq *rt_rq);
-
-static int balance_runtime(struct rt_rq *rt_rq)
-{
- int more = 0;
-
- if (rt_rq->rt_time > rt_rq->rt_runtime) {
- spin_unlock(&rt_rq->rt_runtime_lock);
- more = do_balance_runtime(rt_rq);
- spin_lock(&rt_rq->rt_runtime_lock);
- }
-
- return more;
-}
-#else
-static inline int balance_runtime(struct rt_rq *rt_rq)
-{
- return 0;
-}
-#endif
-
-static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
-{
- int i, idle = 1;
- cpumask_t span;
-
- if (rt_b->rt_runtime == RUNTIME_INF)
- return 1;
-
- span = sched_rt_period_mask();
- for_each_cpu_mask(i, span) {
- int enqueue = 0;
- struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
- struct rq *rq = rq_of_rt_rq(rt_rq);
-
- spin_lock(&rq->lock);
- if (rt_rq->rt_time) {
- u64 runtime;
-
- spin_lock(&rt_rq->rt_runtime_lock);
- if (rt_rq->rt_throttled)
- balance_runtime(rt_rq);
- runtime = rt_rq->rt_runtime;
- rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
- if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
- rt_rq->rt_throttled = 0;
- enqueue = 1;
- }
- if (rt_rq->rt_time || rt_rq->rt_nr_running)
- idle = 0;
- spin_unlock(&rt_rq->rt_runtime_lock);
- }
-
- if (enqueue)
- sched_rt_rq_enqueue(rt_rq);
- spin_unlock(&rq->lock);
- }
-
- return idle;
-}
-
-#ifdef CONFIG_SMP
static int do_balance_runtime(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
@@ -425,8 +363,65 @@ static void enable_runtime(struct rq *rq)
spin_unlock_irqrestore(&rq->lock, flags);
}
+static int balance_runtime(struct rt_rq *rt_rq)
+{
+ int more = 0;
+
+ if (rt_rq->rt_time > rt_rq->rt_runtime) {
+ spin_unlock(&rt_rq->rt_runtime_lock);
+ more = do_balance_runtime(rt_rq);
+ spin_lock(&rt_rq->rt_runtime_lock);
+ }
+
+ return more;
+}
+#else
+static inline int balance_runtime(struct rt_rq *rt_rq)
+{
+ return 0;
+}
#endif
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
+{
+ int i, idle = 1;
+ cpumask_t span;
+
+ if (rt_b->rt_runtime == RUNTIME_INF)
+ return 1;
+
+ span = sched_rt_period_mask();
+ for_each_cpu_mask(i, span) {
+ int enqueue = 0;
+ struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
+ spin_lock(&rq->lock);
+ if (rt_rq->rt_time) {
+ u64 runtime;
+
+ spin_lock(&rt_rq->rt_runtime_lock);
+ if (rt_rq->rt_throttled)
+ balance_runtime(rt_rq);
+ runtime = rt_rq->rt_runtime;
+ rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
+ if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
+ rt_rq->rt_throttled = 0;
+ enqueue = 1;
+ }
+ if (rt_rq->rt_time || rt_rq->rt_nr_running)
+ idle = 0;
+ spin_unlock(&rt_rq->rt_runtime_lock);
+ }
+
+ if (enqueue)
+ sched_rt_rq_enqueue(rt_rq);
+ spin_unlock(&rq->lock);
+ }
+
+ return idle;
+}
+
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_RT_GROUP_SCHED