aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVenki Pallipadi <venkatesh.pallipadi@intel.com>2007-07-21 17:10:44 +0200
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-21 18:37:10 -0700
commit22293e5806f58a9682267139678a5cc117fd3dcf (patch)
tree63f1961afb05a4757f9716ab2339f40b1f28f3b2
parent459029541d857258dfa9ad29e443d287a74c36fe (diff)
x86: round_jiffies() for i386 and x86-64 non-critical/corrected MCE polling
This helps to reduce the frequency at which the CPU must be taken out of a lower-power state. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Acked-by: Tim Hockin <thockin@hockin.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/i386/kernel/cpu/mcheck/non-fatal.c4
-rw-r--r--arch/x86_64/kernel/mce.c9
2 files changed, 8 insertions, 5 deletions
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index 6b5d3518a1c..bf39409b383 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -57,7 +57,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
static void mce_work_fn(struct work_struct *work)
{
on_each_cpu(mce_checkregs, NULL, 1, 1);
- schedule_delayed_work(&mce_work, MCE_RATE);
+ schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
}
static int __init init_nonfatal_mce_checker(void)
@@ -82,7 +82,7 @@ static int __init init_nonfatal_mce_checker(void)
/*
* Check for non-fatal errors every MCE_RATE s
*/
- schedule_delayed_work(&mce_work, MCE_RATE);
+ schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
printk(KERN_INFO "Machine check exception polling timer started.\n");
return 0;
}
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 7c8ab423abe..4d8450ee363 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -375,7 +375,8 @@ static void mcheck_timer(struct work_struct *work)
if (mce_notify_user()) {
next_interval = max(next_interval/2, HZ/100);
} else {
- next_interval = min(next_interval*2, check_interval*HZ);
+ next_interval = min(next_interval*2,
+ (int)round_jiffies_relative(check_interval*HZ));
}
schedule_delayed_work(&mcheck_work, next_interval);
@@ -428,7 +429,8 @@ static __init int periodic_mcheck_init(void)
{
next_interval = check_interval * HZ;
if (next_interval)
- schedule_delayed_work(&mcheck_work, next_interval);
+ schedule_delayed_work(&mcheck_work,
+ round_jiffies_relative(next_interval));
idle_notifier_register(&mce_idle_notifier);
return 0;
}
@@ -720,7 +722,8 @@ static void mce_restart(void)
on_each_cpu(mce_init, NULL, 1, 1);
next_interval = check_interval * HZ;
if (next_interval)
- schedule_delayed_work(&mcheck_work, next_interval);
+ schedule_delayed_work(&mcheck_work,
+ round_jiffies_relative(next_interval));
}
static struct sysdev_class mce_sysclass = {