aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2008-03-01 18:29:55 +0000
committerSteve French <sfrench@us.ibm.com>2008-03-01 18:29:55 +0000
commit0dbd888936a23514716b8d944775bc56f731363a (patch)
treea2c60cdc45bdcbed47680731fa8188bffe58c098 /kernel/sched.c
parent0b442d2c28479332610c46e1a74e5638ab63a97d (diff)
parentd395991c117d43bfca97101a931a41d062a93852 (diff)
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f28f19e65b5..f06950c8a6c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -668,6 +668,8 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
*/
unsigned int sysctl_sched_rt_period = 1000000;
+static __read_mostly int scheduler_running;
+
/*
* part of the period that we allow rt tasks to run in us.
* default: 0.95s
@@ -689,14 +691,16 @@ unsigned long long cpu_clock(int cpu)
unsigned long flags;
struct rq *rq;
- local_irq_save(flags);
- rq = cpu_rq(cpu);
/*
* Only call sched_clock() if the scheduler has already been
* initialized (some code might call cpu_clock() very early):
*/
- if (rq->idle)
- update_rq_clock(rq);
+ if (unlikely(!scheduler_running))
+ return 0;
+
+ local_irq_save(flags);
+ rq = cpu_rq(cpu);
+ update_rq_clock(rq);
now = rq->clock;
local_irq_restore(flags);
@@ -1831,6 +1835,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
long old_state;
struct rq *rq;
+ smp_wmb();
rq = task_rq_lock(p, &flags);
old_state = p->state;
if (!(old_state & state))
@@ -3766,7 +3771,7 @@ void scheduler_tick(void)
#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
-void add_preempt_count(int val)
+void __kprobes add_preempt_count(int val)
{
/*
* Underflow?
@@ -3782,7 +3787,7 @@ void add_preempt_count(int val)
}
EXPORT_SYMBOL(add_preempt_count);
-void sub_preempt_count(int val)
+void __kprobes sub_preempt_count(int val)
{
/*
* Underflow?
@@ -3884,7 +3889,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
asmlinkage void __sched schedule(void)
{
struct task_struct *prev, *next;
- long *switch_count;
+ unsigned long *switch_count;
struct rq *rq;
int cpu;
@@ -7283,6 +7288,8 @@ void __init sched_init(void)
* During early bootup we pretend to be a normal task:
*/
current->sched_class = &fair_sched_class;
+
+ scheduler_running = 1;
}
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP