aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-09 15:27:54 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-09 15:27:54 -0800
commita70a93229943c177f0062490b4f8e44be4cef685 (patch)
tree24cc6f087307f18cda2f55ad91c7649dd5388b86 /arch
parenta80b824f0b63fa3a8c269903828beb0837d738e7 (diff)
parente6fe6649b4ec11aa3075e394b4d8743eebe1f64c (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: sched: proper prototype for kernel/sched.c:migration_init() sched: avoid large irq-latencies in smp-balancing sched: fix copy_namespace() <-> sched_fork() dependency in do_fork sched: clean up the wakeup preempt check, #2 sched: clean up the wakeup preempt check sched: wakeup preemption fix sched: remove PREEMPT_RESTRICT sched: turn off PREEMPT_RESTRICT KVM: fix !SMP build error x86: make nmi_cpu_busy() always defined x86: make ipi_handler() always defined sched: cleanup, use NSEC_PER_MSEC and NSEC_PER_SEC sched: reintroduce SMP tunings again sched: restore deterministic CPU accounting on powerpc sched: fix delay accounting regression sched: reintroduce the sched_min_granularity tunable sched: documentation: place_entity() comments sched: fix vslice
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/time.c25
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/vtime.c8
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c6
-rw-r--r--arch/x86/kernel/nmi_32.c4
6 files changed, 7 insertions, 42 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b9d88374f14..41e13f4cc6e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -350,7 +350,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
local_irq_save(flags);
account_system_vtime(current);
- account_process_vtime(current);
+ account_process_tick(current, 0);
calculate_steal_time();
last = _switch(old_thread, new_thread);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 99ebcd3884d..4beb6329dfb 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -259,7 +259,7 @@ void account_system_vtime(struct task_struct *tsk)
* user and system time records.
* Must be called with interrupts disabled.
*/
-void account_process_vtime(struct task_struct *tsk)
+void account_process_tick(struct task_struct *tsk, int user_tick)
{
cputime_t utime, utimescaled;
@@ -274,18 +274,6 @@ void account_process_vtime(struct task_struct *tsk)
account_user_time_scaled(tsk, utimescaled);
}
-static void account_process_time(struct pt_regs *regs)
-{
- int cpu = smp_processor_id();
-
- account_process_vtime(current);
- run_local_timers();
- if (rcu_pending(cpu))
- rcu_check_callbacks(cpu, user_mode(regs));
- scheduler_tick();
- run_posix_cpu_timers(current);
-}
-
/*
* Stuff for accounting stolen time.
*/
@@ -375,7 +363,6 @@ static void snapshot_purr(void)
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
#define calc_cputime_factors()
-#define account_process_time(regs) update_process_times(user_mode(regs))
#define calculate_steal_time() do { } while (0)
#endif
@@ -599,16 +586,6 @@ void timer_interrupt(struct pt_regs * regs)
get_lppaca()->int_dword.fields.decr_int = 0;
#endif
- /*
- * We cannot disable the decrementer, so in the period
- * between this cpu's being marked offline in cpu_online_map
- * and calling stop-self, it is taking timer interrupts.
- * Avoid calling into the scheduler rebalancing code if this
- * is the case.
- */
- if (!cpu_is_offline(cpu))
- account_process_time(regs);
-
if (evt->event_handler)
evt->event_handler(evt);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index a963fe81359..22b800ce212 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -145,12 +145,8 @@ void account_ticks(u64 time)
do_timer(ticks);
#endif
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- account_tick_vtime(current);
-#else
while (ticks--)
update_process_times(user_mode(get_irq_regs()));
-#endif
s390_do_profile();
}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 84ff78de6ba..c5f05b3fb2c 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -32,7 +32,7 @@ static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
-void account_tick_vtime(struct task_struct *tsk)
+void account_process_tick(struct task_struct *tsk, int user_tick)
{
cputime_t cputime;
__u64 timer, clock;
@@ -64,12 +64,6 @@ void account_tick_vtime(struct task_struct *tsk)
S390_lowcore.steal_clock -= cputime << 12;
account_steal_time(tsk, cputime);
}
-
- run_local_timers();
- if (rcu_pending(smp_processor_id()))
- rcu_check_callbacks(smp_processor_id(), rcu_user_flag);
- scheduler_tick();
- run_posix_cpu_timers(tsk);
}
/*
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 9abbdf7562c..3b20613325d 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -139,13 +139,12 @@ struct set_mtrr_data {
mtrr_type smp_type;
};
-#ifdef CONFIG_SMP
-
static void ipi_handler(void *info)
/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
[RETURNS] Nothing.
*/
{
+#ifdef CONFIG_SMP
struct set_mtrr_data *data = info;
unsigned long flags;
@@ -168,9 +167,8 @@ static void ipi_handler(void *info)
atomic_dec(&data->count);
local_irq_restore(flags);
-}
-
#endif
+}
static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
return type1 == MTRR_TYPE_UNCACHABLE ||
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
index f803ed0ed1c..600fd404e44 100644
--- a/arch/x86/kernel/nmi_32.c
+++ b/arch/x86/kernel/nmi_32.c
@@ -51,13 +51,13 @@ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
static int endflag __initdata = 0;
-#ifdef CONFIG_SMP
/* The performance counters used by NMI_LOCAL_APIC don't trigger when
* the CPU is idle. To make sure the NMI watchdog really ticks on all
* CPUs during the test make them busy.
*/
static __init void nmi_cpu_busy(void *data)
{
+#ifdef CONFIG_SMP
local_irq_enable_in_hardirq();
/* Intentionally don't use cpu_relax here. This is
to make sure that the performance counter really ticks,
@@ -67,8 +67,8 @@ static __init void nmi_cpu_busy(void *data)
care if they get somewhat less cycles. */
while (endflag == 0)
mb();
-}
#endif
+}
static int __init check_nmi_watchdog(void)
{