aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/common/sharpsl_pm.c9
-rw-r--r--arch/arm/mach-at91/pm.c7
-rw-r--r--arch/arm/mach-omap1/pm.c29
-rw-r--r--arch/arm/mach-omap2/pm.c35
-rw-r--r--arch/arm/mach-pnx4008/pm.c6
-rw-r--r--arch/arm/mach-pxa/pm.c4
-rw-r--r--arch/arm/mach-pxa/pxa25x.c4
-rw-r--r--arch/arm/mach-pxa/pxa27x.c2
-rw-r--r--arch/arm/mach-sa1100/pm.c6
-rw-r--r--arch/arm/nwfpe/fpopcode.h6
-rw-r--r--arch/arm/plat-s3c24xx/pm.c6
-rw-r--r--arch/blackfin/mach-common/pm.c59
-rw-r--r--arch/ia64/kernel/time.c5
-rw-r--r--arch/ia64/sn/kernel/xpnet.c13
-rw-r--r--arch/mips/Kconfig24
-rw-r--r--arch/mips/au1000/Kconfig1
-rw-r--r--arch/mips/jazz/irq.c6
-rw-r--r--arch/mips/jazz/setup.c8
-rw-r--r--arch/mips/jmr3927/rbhma3100/setup.c66
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/cevt-r4k.c272
-rw-r--r--arch/mips/kernel/head.S16
-rw-r--r--arch/mips/kernel/time.c262
-rw-r--r--arch/mips/kernel/traps.c11
-rw-r--r--arch/mips/pmc-sierra/Kconfig2
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c5
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c155
-rw-r--r--arch/mips/sgi-ip32/ip32-setup.c6
-rw-r--r--arch/mips/vr41xx/Kconfig6
-rw-r--r--arch/powerpc/configs/pmac32_defconfig1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/time.c32
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_pm.c34
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pm.c15
-rw-r--r--arch/sh/boards/hp6xx/pm.c6
-rw-r--r--arch/sparc/kernel/of_device.c20
-rw-r--r--arch/sparc64/kernel/irq.c8
-rw-r--r--arch/sparc64/kernel/of_device.c20
-rw-r--r--arch/sparc64/kernel/pci_common.c7
-rw-r--r--arch/sparc64/lib/atomic.S38
-rw-r--r--arch/sparc64/lib/bitops.S30
-rw-r--r--arch/um/drivers/slip_kern.c4
-rw-r--r--arch/um/drivers/slirp_kern.c4
-rw-r--r--arch/x86/ia32/ia32_binfmt.c1
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S12
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S32
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c66
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c10
-rw-r--r--arch/x86/kernel/mce_64.c39
-rw-r--r--arch/x86/kernel/msr.c35
-rw-r--r--arch/x86/kernel/suspend_64.c101
-rw-r--r--arch/x86/kernel/suspend_asm_64.S49
-rw-r--r--arch/x86/kernel/vsyscall_64.c23
-rw-r--r--arch/x86_64/Kconfig5
54 files changed, 901 insertions, 725 deletions
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c
index 111a7fa5deb..5bba5255b11 100644
--- a/arch/arm/common/sharpsl_pm.c
+++ b/arch/arm/common/sharpsl_pm.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/apm-emulation.h>
+#include <linux/suspend.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
@@ -765,9 +766,9 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
info->battery_life = sharpsl_pm.battstat.mainbat_percent;
}
-static struct pm_ops sharpsl_pm_ops = {
+static struct platform_suspend_ops sharpsl_pm_ops = {
.enter = corgi_pxa_pm_enter,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
};
static int __init sharpsl_pm_probe(struct platform_device *pdev)
@@ -799,7 +800,7 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev)
apm_get_power_status = sharpsl_apm_get_power_status;
- pm_set_ops(&sharpsl_pm_ops);
+ suspend_set_ops(&sharpsl_pm_ops);
mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250));
@@ -808,7 +809,7 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev)
static int sharpsl_pm_remove(struct platform_device *pdev)
{
- pm_set_ops(NULL);
+ suspend_set_ops(NULL);
device_remove_file(&pdev->dev, &dev_attr_battery_percentage);
device_remove_file(&pdev->dev, &dev_attr_battery_voltage);
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index ddf9184d561..98cb6148291 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -10,10 +10,9 @@
* (at your option) any later version.
*/
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
-#include <linux/pm.h>
#include <linux/interrupt.h>
#include <linux/sysfs.h>
#include <linux/module.h>
@@ -199,7 +198,7 @@ error:
}
-static struct pm_ops at91_pm_ops ={
+static struct platform_suspend_ops at91_pm_ops ={
.valid = at91_pm_valid_state,
.set_target = at91_pm_set_target,
.enter = at91_pm_enter,
@@ -220,7 +219,7 @@ static int __init at91_pm_init(void)
/* Disable SDRAM low-power mode. Cannot be used with self-refresh. */
at91_sys_write(AT91_SDRAMC_LPR, 0);
- pm_set_ops(&at91_pm_ops);
+ suspend_set_ops(&at91_pm_ops);
return 0;
}
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 089b8208de0..3bf01e28df3 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -35,10 +35,9 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
-#include <linux/pm.h>
#include <linux/interrupt.h>
#include <linux/sysfs.h>
#include <linux/module.h>
@@ -600,27 +599,15 @@ static void (*saved_idle)(void) = NULL;
/*
* omap_pm_prepare - Do preliminary suspend work.
- * @state: suspend state we're entering.
*
*/
-static int omap_pm_prepare(suspend_state_t state)
+static int omap_pm_prepare(void)
{
- int error = 0;
-
/* We cannot sleep in idle until we have resumed */
saved_idle = pm_idle;
pm_idle = NULL;
- switch (state)
- {
- case PM_SUSPEND_STANDBY:
- case PM_SUSPEND_MEM:
- break;
- default:
- return -EINVAL;
- }
-
- return error;
+ return 0;
}
@@ -648,16 +635,14 @@ static int omap_pm_enter(suspend_state_t state)
/**
* omap_pm_finish - Finish up suspend sequence.
- * @state: State we're coming out of.
*
* This is called after we wake back up (or if entering the sleep state
* failed).
*/
-static int omap_pm_finish(suspend_state_t state)
+static void omap_pm_finish(void)
{
pm_idle = saved_idle;
- return 0;
}
@@ -674,11 +659,11 @@ static struct irqaction omap_wakeup_irq = {
-static struct pm_ops omap_pm_ops ={
+static struct platform_suspend_ops omap_pm_ops ={
.prepare = omap_pm_prepare,
.enter = omap_pm_enter,
.finish = omap_pm_finish,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
};
static int __init omap_pm_init(void)
@@ -735,7 +720,7 @@ static int __init omap_pm_init(void)
else if (cpu_is_omap16xx())
omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3);
- pm_set_ops(&omap_pm_ops);
+ suspend_set_ops(&omap_pm_ops);
#if defined(DEBUG) && defined(CONFIG_PROC_FS)
omap_pm_init_proc();
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 6f4a5436d0c..baf7d82b458 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -16,10 +16,9 @@
* published by the Free Software Foundation.
*/
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
-#include <linux/pm.h>
#include <linux/interrupt.h>
#include <linux/sysfs.h>
#include <linux/module.h>
@@ -71,28 +70,12 @@ void omap2_pm_idle(void)
local_irq_enable();
}
-static int omap2_pm_prepare(suspend_state_t state)
+static int omap2_pm_prepare(void)
{
- int error = 0;
-
/* We cannot sleep in idle until we have resumed */
saved_idle = pm_idle;
pm_idle = NULL;
-
- switch (state)
- {
- case PM_SUSPEND_STANDBY:
- case PM_SUSPEND_MEM:
- break;
-
- case PM_SUSPEND_DISK:
- return -ENOTSUPP;
-
- default:
- return -EINVAL;
- }
-
- return error;
+ return 0;
}
#define INT0_WAKE_MASK (OMAP_IRQ_BIT(INT_24XX_GPIO_BANK1) | \
@@ -353,9 +336,6 @@ static int omap2_pm_enter(suspend_state_t state)
case PM_SUSPEND_MEM:
ret = omap2_pm_suspend();
break;
- case PM_SUSPEND_DISK:
- ret = -ENOTSUPP;
- break;
default:
ret = -EINVAL;
}
@@ -363,17 +343,16 @@ static int omap2_pm_enter(suspend_state_t state)
return ret;
}
-static int omap2_pm_finish(suspend_state_t state)
+static void omap2_pm_finish(void)
{
pm_idle = saved_idle;
- return 0;
}
-static struct pm_ops omap_pm_ops = {
+static struct platform_suspend_ops omap_pm_ops = {
.prepare = omap2_pm_prepare,
.enter = omap2_pm_enter,
.finish = omap2_pm_finish,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
};
int __init omap2_pm_init(void)
@@ -397,7 +376,7 @@ int __init omap2_pm_init(void)
omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
omap24xx_cpu_suspend_sz);
- pm_set_ops(&omap_pm_ops);
+ suspend_set_ops(&omap_pm_ops);
pm_idle = omap2_pm_idle;
pmdomain_init();
diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
index 2a137f33f75..40116d25434 100644
--- a/arch/arm/mach-pnx4008/pm.c
+++ b/arch/arm/mach-pnx4008/pm.c
@@ -15,7 +15,7 @@
#include <linux/rtc.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/clk.h>
@@ -117,7 +117,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
(state == PM_SUSPEND_MEM);
}
-static struct pm_ops pnx4008_pm_ops = {
+static struct platform_suspend_ops pnx4008_pm_ops = {
.enter = pnx4008_pm_enter,
.valid = pnx4008_pm_valid,
};
@@ -146,7 +146,7 @@ static int __init pnx4008_pm_init(void)
return -ENOMEM;
}
- pm_set_ops(&pnx4008_pm_ops);
+ suspend_set_ops(&pnx4008_pm_ops);
return 0;
}
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index b59a81a8e7d..a941c71c7d0 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -86,7 +86,7 @@ static int pxa_pm_valid(suspend_state_t state)
return -EINVAL;
}
-static struct pm_ops pxa_pm_ops = {
+static struct platform_suspend_ops pxa_pm_ops = {
.valid = pxa_pm_valid,
.enter = pxa_pm_enter,
};
@@ -104,7 +104,7 @@ static int __init pxa_pm_init(void)
return -ENOMEM;
}
- pm_set_ops(&pxa_pm_ops);
+ suspend_set_ops(&pxa_pm_ops);
return 0;
}
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 0d6a72504ca..dcd81f8d083 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -20,7 +20,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <asm/hardware.h>
#include <asm/arch/irqs.h>
@@ -215,7 +215,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state)
static struct pxa_cpu_pm_fns pxa25x_cpu_pm_fns = {
.save_size = SLEEP_SAVE_SIZE,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
.save = pxa25x_cpu_pm_save,
.restore = pxa25x_cpu_pm_restore,
.enter = pxa25x_cpu_pm_enter,
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 2d7fc39732e..d0f2b597db1 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -14,7 +14,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <asm/hardware.h>
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index 01a37d3c072..246c573e725 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -122,14 +122,14 @@ unsigned long sleep_phys_sp(void *sp)
return virt_to_phys(sp);
}
-static struct pm_ops sa11x0_pm_ops = {
+static struct platform_suspend_ops sa11x0_pm_ops = {
.enter = sa11x0_pm_enter,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
};
static int __init sa11x0_pm_init(void)
{
- pm_set_ops(&sa11x0_pm_ops);
+ suspend_set_ops(&sa11x0_pm_ops);
return 0;
}
diff --git a/arch/arm/nwfpe/fpopcode.h b/arch/arm/nwfpe/fpopcode.h
index ec78e3517fc..0090b19bbe6 100644
--- a/arch/arm/nwfpe/fpopcode.h
+++ b/arch/arm/nwfpe/fpopcode.h
@@ -369,20 +369,20 @@ TABLE 5
#define getRoundingMode(opcode) ((opcode & MASK_ROUNDING_MODE) >> 5)
#ifdef CONFIG_FPE_NWFPE_XP
-static inline __attribute_pure__ floatx80 getExtendedConstant(const unsigned int nIndex)
+static inline floatx80 __pure getExtendedConstant(const unsigned int nIndex)
{
extern const floatx80 floatx80Constant[];
return floatx80Constant[nIndex];
}
#endif
-static inline __attribute_pure__ float64 getDoubleConstant(const unsigned int nIndex)
+static inline float64 __pure getDoubleConstant(const unsigned int nIndex)
{
extern const float64 float64Constant[];
return float64Constant[nIndex];
}
-static inline __attribute_pure__ float32 getSingleConstant(const unsigned int nIndex)
+static inline float32 __pure getSingleConstant(const unsigned int nIndex)
{
extern const float32 float32Constant[];
return float32Constant[nIndex];
diff --git a/arch/arm/plat-s3c24xx/pm.c b/arch/arm/plat-s3c24xx/pm.c
index eab1850616d..4fdb3117744 100644
--- a/arch/arm/plat-s3c24xx/pm.c
+++ b/arch/arm/plat-s3c24xx/pm.c
@@ -612,9 +612,9 @@ static int s3c2410_pm_enter(suspend_state_t state)
return 0;
}
-static struct pm_ops s3c2410_pm_ops = {
+static struct platform_suspend_ops s3c2410_pm_ops = {
.enter = s3c2410_pm_enter,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
};
/* s3c2410_pm_init
@@ -628,6 +628,6 @@ int __init s3c2410_pm_init(void)
{
printk("S3C2410 Power Management, (c) 2004 Simtec Electronics\n");
- pm_set_ops(&s3c2410_pm_ops);
+ suspend_set_ops(&s3c2410_pm_ops);
return 0;
}
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index b1030272220..dac51fb06f2 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -32,7 +32,7 @@
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/io.h>
@@ -89,28 +89,15 @@ void bfin_pm_suspend_standby_enter(void)
#endif /* CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR */
}
-
/*
- * bfin_pm_prepare - Do preliminary suspend work.
- * @state: suspend state we're entering.
+ * bfin_pm_valid - Tell the PM core that we only support the standby sleep
+ * state
+ * @state: suspend state we're checking.
*
*/
-static int bfin_pm_prepare(suspend_state_t state)
+static int bfin_pm_valid(suspend_state_t state)
{
- int error = 0;
-
- switch (state) {
- case PM_SUSPEND_STANDBY:
- break;
-
- case PM_SUSPEND_MEM:
- return -ENOTSUPP;
-
- default:
- return -EINVAL;
- }
-
- return error;
+ return (state == PM_SUSPEND_STANDBY);
}
/*
@@ -135,44 +122,14 @@ static int bfin_pm_enter(suspend_state_t state)
return 0;
}
-/*
- * bfin_pm_finish - Finish up suspend sequence.
- * @state: State we're coming out of.
- *
- * This is called after we wake back up (or if entering the sleep state
- * failed).
- */
-static int bfin_pm_finish(suspend_state_t state)
-{
- switch (state) {
- case PM_SUSPEND_STANDBY:
- break;
-
- case PM_SUSPEND_MEM:
- return -ENOTSUPP;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int bfin_pm_valid(suspend_state_t state)
-{
- return (state == PM_SUSPEND_STANDBY);
-}
-
-struct pm_ops bfin_pm_ops = {
- .prepare = bfin_pm_prepare,
+struct platform_suspend_ops bfin_pm_ops = {
.enter = bfin_pm_enter,
- .finish = bfin_pm_finish,
.valid = bfin_pm_valid,
};
static int __init bfin_pm_init(void)
{
- pm_set_ops(&bfin_pm_ops);
+ suspend_set_ops(&bfin_pm_ops);
return 0;
}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 98cfc90cab1..2bb84214e5f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -371,6 +371,11 @@ ia64_setup_printk_clock(void)
ia64_printk_clock = ia64_itc_printk_clock;
}
+/* IA64 doesn't cache the timezone */
+void update_vsyscall_tz(void)
+{
+}
+
void update_vsyscall(struct timespec *wall, struct clocksource *c)
{
unsigned long flags;
diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c
index e58fcadff2e..a5df672d839 100644
--- a/arch/ia64/sn/kernel/xpnet.c
+++ b/arch/ia64/sn/kernel/xpnet.c
@@ -269,8 +269,9 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
skb->protocol = eth_type_trans(skb, xpnet_device);
skb->ip_summed = CHECKSUM_UNNECESSARY;
- dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p "
- "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n",
+ dev_dbg(xpnet, "passing skb to network layer\n"
+ KERN_DEBUG "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p "
+ "skb->end=0x%p skb->len=%d\n",
(void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
skb_end_pointer(skb), skb->len);
@@ -576,10 +577,10 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
msg->buf_pa = __pa(start_addr);
- dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa="
- "0x%lx, msg->size=%u, msg->leadin_ignore=%u, "
- "msg->tailout_ignore=%u\n", dest_partid,
- XPC_NET_CHANNEL, msg->buf_pa, msg->size,
+ dev_dbg(xpnet, "sending XPC message to %d:%d\n"
+ KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, "
+ "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
+ dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 235d4514e0a..cb027580cd1 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -21,6 +21,7 @@ config MACH_ALCHEMY
config BASLER_EXCITE
bool "Basler eXcite smart camera"
+ select CEVT_R4K
select DMA_COHERENT
select HW_HAS_PCI
select IRQ_CPU
@@ -47,6 +48,7 @@ config BASLER_EXCITE_PROTOTYPE
config BCM47XX
bool "BCM47XX based boards"
+ select CEVT_R4K
select DMA_NONCOHERENT
select HW_HAS_PCI
select IRQ_CPU
@@ -63,6 +65,7 @@ config BCM47XX
config MIPS_COBALT
bool "Cobalt Server"
+ select CEVT_R4K
select DMA_NONCOHERENT
select HW_HAS_PCI
select I8253
@@ -80,6 +83,7 @@ config MIPS_COBALT
config MACH_DECSTATION
bool "DECstations"
select BOOT_ELF32
+ select CEVT_R4K
select DMA_NONCOHERENT
select NO_IOPORT
select IRQ_CPU
@@ -111,6 +115,7 @@ config MACH_JAZZ
select ARC
select ARC32
select ARCH_MAY_HAVE_PC_FDC
+ select CEVT_R4K
select GENERIC_ISA_DMA
select IRQ_CPU
select I8253
@@ -130,6 +135,7 @@ config MACH_JAZZ
config LASAT
bool "LASAT Networks platforms"
+ select CEVT_R4K
select DMA_NONCOHERENT
select SYS_HAS_EARLY_PRINTK
select HW_HAS_PCI
@@ -146,6 +152,7 @@ config LASAT
config LEMOTE_FULONG
bool "Lemote Fulong mini-PC"
select ARCH_SPARSEMEM_ENABLE
+ select CEVT_R4K
select SYS_HAS_CPU_LOONGSON2
select DMA_NONCOHERENT
select BOOT_ELF32
@@ -170,6 +177,7 @@ config LEMOTE_FULONG
config MIPS_ATLAS
bool "MIPS Atlas board"
select BOOT_ELF32
+ select CEVT_R4K
select DMA_NONCOHERENT
select SYS_HAS_EARLY_PRINTK
select IRQ_CPU
@@ -200,6 +208,7 @@ config MIPS_MALTA
bool "MIPS Malta board"
select ARCH_MAY_HAVE_PC_FDC
select BOOT_ELF32
+ select CEVT_R4K
select DMA_NONCOHERENT
select GENERIC_ISA_DMA
select IRQ_CPU
@@ -230,6 +239,7 @@ config MIPS_MALTA
config MIPS_SEAD
bool "MIPS SEAD board"
+ select CEVT_R4K
select IRQ_CPU
select DMA_NONCOHERENT
select SYS_HAS_EARLY_PRINTK
@@ -248,6 +258,7 @@ config MIPS_SEAD
config MIPS_SIM
bool 'MIPS simulator (MIPSsim)'
+ select CEVT_R4K
select DMA_NONCOHERENT
select SYS_HAS_EARLY_PRINTK
select IRQ_CPU
@@ -265,6 +276,7 @@ config MIPS_SIM
config MARKEINS
bool "NEC EMMA2RH Mark-eins"
+ select CEVT_R4K
select DMA_NONCOHERENT
select HW_HAS_PCI
select IRQ_CPU
@@ -279,6 +291,7 @@ config MARKEINS
config MACH_VR41XX
bool "NEC VR4100 series based machines"
+ select CEVT_R4K
select SYS_HAS_CPU_VR41XX
select GENERIC_HARDIRQS_NO__DO_IRQ
@@ -315,6 +328,7 @@ config PMC_MSP
config PMC_YOSEMITE
bool "PMC-Sierra Yosemite eval board"
+ select CEVT_R4K
select DMA_COHERENT
select HW_HAS_PCI
select IRQ_CPU
@@ -335,6 +349,7 @@ config PMC_YOSEMITE
config QEMU
bool "Qemu"
+ select CEVT_R4K
select DMA_COHERENT
select GENERIC_ISA_DMA
select HAVE_STD_PC_SERIAL_PORT
@@ -365,6 +380,7 @@ config SGI_IP22
select ARC
select ARC32
select BOOT_ELF32
+ select CEVT_R4K
select DMA_NONCOHERENT
select HW_HAS_EISA
select I8253
@@ -409,6 +425,7 @@ config SGI_IP32
select ARC
select ARC32
select BOOT_ELF32
+ select CEVT_R4K
select DMA_NONCOHERENT
select HW_HAS_PCI
select IRQ_CPU
@@ -536,6 +553,7 @@ config SNI_RM
select ARC32 if CPU_LITTLE_ENDIAN
select ARCH_MAY_HAVE_PC_FDC
select BOOT_ELF32
+ select CEVT_R4K
select DMA_NONCOHERENT
select GENERIC_ISA_DMA
select HW_HAS_EISA
@@ -577,6 +595,7 @@ config TOSHIBA_JMR3927
config TOSHIBA_RBTX4927
bool "Toshiba RBTX49[23]7 board"
+ select CEVT_R4K
select DMA_NONCOHERENT
select HAS_TXX9_SERIAL
select HW_HAS_PCI
@@ -597,6 +616,7 @@ config TOSHIBA_RBTX4927
config TOSHIBA_RBTX4938
bool "Toshiba RBTX4938 board"
+ select CEVT_R4K
select DMA_NONCOHERENT
select HAS_TXX9_SERIAL
select HW_HAS_PCI
@@ -616,6 +636,7 @@ config TOSHIBA_RBTX4938
config WR_PPMC
bool "Wind River PPMC board"
+ select CEVT_R4K
select IRQ_CPU
select BOOT_ELF32
select DMA_NONCOHERENT
@@ -708,6 +729,9 @@ config ARCH_MAY_HAVE_PC_FDC
config BOOT_RAW
bool
+config CEVT_R4K
+ bool
+
config CFE
bool
diff --git a/arch/mips/au1000/Kconfig b/arch/mips/au1000/Kconfig
index a23d4154da0..b36cec58a9a 100644
--- a/arch/mips/au1000/Kconfig
+++ b/arch/mips/au1000/Kconfig
@@ -137,6 +137,7 @@ config SOC_AU1200
config SOC_AU1X00
bool
select 64BIT_PHYS_ADDR
+ select CEVT_R4K
select IRQ_CPU
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_32BIT_KERNEL
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index 835b056cea3..ae25b480723 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -4,7 +4,7 @@
* for more details.
*
* Copyright (C) 1992 Linus Torvalds
- * Copyright (C) 1994 - 2001, 2003 Ralf Baechle
+ * Copyright (C) 1994 - 2001, 2003, 07 Ralf Baechle
*/
#include <linux/clockchips.h>
#include <linux/init.h>
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <asm/irq_cpu.h>
+#include <asm/i8253.h>
#include <asm/i8259.h>
#include <asm/io.h>
#include <asm/jazz.h>
@@ -136,7 +137,7 @@ static struct irqaction r4030_timer_irqaction = {
.name = "timer",
};
-void __init plat_timer_setup(struct irqaction *ignored)
+void __init plat_time_init(void)
{
struct irqaction *irq = &r4030_timer_irqaction;
@@ -152,4 +153,5 @@ void __init plat_timer_setup(struct irqaction *ignored)
setup_irq(JAZZ_TIMER_IRQ, irq);
clockevents_register_device(&r4030_clockevent);
+ setup_pit_timer();
}
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index cfc7dce78da..a7857973ca0 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 1997, 1998, 2001 by Ralf Baechle
+ * Copyright (C) 1996, 1997, 1998, 2001, 07 by Ralf Baechle
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2007 by Thomas Bogendoerfer
*/
@@ -25,7 +25,6 @@
#include <linux/serial_8250.h>
#include <asm/bootinfo.h>
-#include <asm/i8253.h>
#include <asm/irq.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
@@ -64,11 +63,6 @@ static struct resource jazz_io_resources[] = {
}
};
-void __init plat_time_init(void)
-{
- setup_pit_timer();
-}
-
void __init plat_mem_setup(void)
{
int i;
diff --git a/arch/mips/jmr3927/rbhma3100/setup.c b/arch/mips/jmr3927/rbhma3100/setup.c
index 0c7aee1682c..edb9e59248e 100644
--- a/arch/mips/jmr3927/rbhma3100/setup.c
+++ b/arch/mips/jmr3927/rbhma3100/setup.c
@@ -1,15 +1,4 @@
-/***********************************************************************
- *
- * Copyright 2001 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * ahennessy@mvista.com
- *
- * Based on arch/mips/ddb5xxx/ddb5477/setup.c
- *
- * Setup file for JMR3927.
- *
- * Copyright (C) 2000-2001 Toshiba Corporation
- *
+/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -30,9 +19,15 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
- ***********************************************************************
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ahennessy@mvista.com
+ *
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
*/
+#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kdev_t.h>
@@ -104,27 +99,60 @@ static cycle_t jmr3927_hpt_read(void)
return jiffies * (JMR3927_TIMER_CLK / HZ) + jmr3927_tmrptr->trr;
}
-static void jmr3927_timer_ack(void)
+static void jmr3927_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do here */
+}
+
+struct clock_event_device jmr3927_clock_event_device = {
+ .name = "MIPS",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .shift = 32,
+ .rating = 300,
+ .cpumask = CPU_MASK_CPU0,
+ .irq = JMR3927_IRQ_TICK,
+ .set_mode = jmr3927_set_mode,
+};
+
+static irqreturn_t jmr3927_timer_interrupt(int irq, void *dev_id)
{
+ struct clock_event_device *cd = &jmr3927_clock_event_device;
+
jmr3927_tmrptr->tisr = 0; /* ack interrupt */
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
}
+static struct irqaction jmr3927_timer_irqaction = {
+ .handler = jmr3927_timer_interrupt,
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .name = "jmr3927-timer",
+};
+
void __init plat_time_init(void)
{
+ struct clock_event_device *cd;
+
clocksource_mips.read = jmr3927_hpt_read;
- mips_timer_ack = jmr3927_timer_ack;
mips_hpt_frequency = JMR3927_TIMER_CLK;
-}
-void __init plat_timer_setup(struct irqaction *irq)
-{
jmr3927_tmrptr->cpra = JMR3927_TIMER_CLK / HZ;
jmr3927_tmrptr->itmr = TXx927_TMTITMR_TIIE | TXx927_TMTITMR_TZCE;
jmr3927_tmrptr->ccdr = JMR3927_TIMER_CCD;
jmr3927_tmrptr->tcr =
TXx927_TMTCR_TCE | TXx927_TMTCR_CCDE | TXx927_TMTCR_TMODE_ITVL;
- setup_irq(JMR3927_IRQ_TICK, irq);
+ cd = &jmr3927_clock_event_device;
+ /* Calculate the min / max delta */
+ cd->mult = div_sc((unsigned long) JMR3927_IMCLK, NSEC_PER_SEC, 32);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ clockevents_register_device(cd);
+
+ setup_irq(JMR3927_IRQ_TICK, &jmr3927_timer_irqaction);
}
#define DO_WRITE_THROUGH
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 95a356ef391..a3afa39faae 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -8,6 +8,8 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \
time.o topology.o traps.o unaligned.o
+obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
+
binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
irix5sys.o sysirix.o
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
new file mode 100644
index 00000000000..08b84d476c8
--- /dev/null
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -0,0 +1,272 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+
+#include <asm/time.h>
+
+static int mips_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cnt;
+ int res;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ {
+ unsigned long flags, vpflags;
+ local_irq_save(flags);
+ vpflags = dvpe();
+#endif
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+ res = ((long)(read_c0_count() - cnt ) > 0) ? -ETIME : 0;
+#ifdef CONFIG_MIPS_MT_SMTC
+ evpe(vpflags);
+ local_irq_restore(flags);
+ }
+#endif
+ return res;
+}
+
+static void mips_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+static int cp0_timer_irq_installed;
+
+/*
+ * Timer ack for an R4k-compatible timer of a known frequency.
+ */
+static void c0_timer_ack(void)
+{
+ write_c0_compare(read_c0_compare());
+}
+
+/*
+ * Possibly handle a performance counter interrupt.
+ * Return true if the timer interrupt should not be checked
+ */
+static inline int handle_perf_irq(int r2)
+{
+ /*
+ * The performance counter overflow interrupt may be shared with the
+ * timer interrupt (cp0_perfcount_irq < 0). If it is and a
+ * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
+ * and we can't reliably determine if a counter interrupt has also
+ * happened (!r2) then don't check for a timer interrupt.
+ */
+ return (cp0_perfcount_irq < 0) &&
+ perf_irq() == IRQ_HANDLED &&
+ !r2;
+}
+
+static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
+{
+ const int r2 = cpu_has_mips_r2;
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
+
+ /*
+ * Suckage alert:
+ * Before R2 of the architecture there was no way to see if a
+ * performance counter interrupt was pending, so we have to run
+ * the performance counter interrupt handler anyway.
+ */
+ if (handle_perf_irq(r2))
+ goto out;
+
+ /*
+ * The same applies to performance counter interrupts. But with the
+ * above we now know that the reason we got here must be a timer
+ * interrupt. Being the paranoiacs we are we check anyway.
+ */
+ if (!r2 || (read_c0_cause() & (1 << 30))) {
+ c0_timer_ack();
+#ifdef CONFIG_MIPS_MT_SMTC
+ if (cpu_data[cpu].vpe_id)
+ goto out;
+ cpu = 0;
+#endif
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+static struct irqaction c0_compare_irqaction = {
+ .handler = c0_compare_interrupt,
+#ifdef CONFIG_MIPS_MT_SMTC
+ .flags = IRQF_DISABLED,
+#else
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+#endif
+ .name = "timer",
+};
+
+#ifdef CONFIG_MIPS_MT_SMTC
+DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
+
+static void smtc_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+}
+
+static void mips_broadcast(cpumask_t mask)
+{
+ unsigned int cpu;
+
+ for_each_cpu_mask(cpu, mask)
+ smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
+}
+
+static void setup_smtc_dummy_clockevent_device(void)
+{
+ //uint64_t mips_freq = mips_hpt_^frequency;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+
+ cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
+
+ cd->name = "SMTC";
+ cd->features = CLOCK_EVT_FEAT_DUMMY;
+
+ /* Calculate the min / max delta */
+ cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
+ cd->shift = 0; //32;
+ cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
+
+ cd->rating = 200;
+ cd->irq = 17; //-1;
+// if (cpu)
+// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
+// else
+ cd->cpumask = cpumask_of_cpu(cpu);
+
+ cd->set_mode = smtc_set_mode;
+
+ cd->broadcast = mips_broadcast;
+
+ clockevents_register_device(cd);
+}
+#endif
+
+static void mips_event_handler(struct clock_event_device *dev)
+{
+}
+
+/*
+ * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
+ */
+static int c0_compare_int_pending(void)
+{
+ return (read_c0_cause() >> cp0_compare_irq) & 0x100;
+}
+
+static int c0_compare_int_usable(void)
+{
+ const unsigned int delta = 0x300000;
+ unsigned int cnt;
+
+ /*
+ * IP7 already pending? Try to clear it by acking the timer.
+ */
+ if (c0_compare_int_pending()) {
+ write_c0_compare(read_c0_compare());
+ irq_disable_hazard();
+ if (c0_compare_int_pending())
+ return 0;
+ }
+
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+
+ while ((long)(read_c0_count() - cnt) <= 0)
+ ; /* Wait for expiry */
+
+ if (!c0_compare_int_pending())
+ return 0;
+
+ write_c0_compare(read_c0_compare());
+ irq_disable_hazard();
+ if (c0_compare_int_pending())
+ return 0;
+
+ /*
+ * Feels like a real count / compare timer.
+ */
+ return 1;
+}
+
+void __cpuinit mips_clockevent_init(void)
+{
+ uint64_t mips_freq = mips_hpt_frequency;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq = MIPS_CPU_IRQ_BASE + 7;
+
+ if (!cpu_has_counter)
+ return;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ setup_smtc_dummy_clockevent_device();
+
+ /*
+ * On SMTC we only register VPE0's compare interrupt as clockevent
+ * device.
+ */
+ if (cpu)
+ return;
+#endif
+
+ if (!c0_compare_int_usable())
+ return;
+
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ cd->name = "MIPS";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+
+ /* Calculate the min / max delta */
+ cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
+ cd->shift = 32;
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+
+ cd->rating = 300;
+ cd->irq = irq;
+#ifdef CONFIG_MIPS_MT_SMTC
+ cd->cpumask = CPU_MASK_ALL;
+#else
+ cd->cpumask = cpumask_of_cpu(cpu);
+#endif
+ cd->set_next_event = mips_next_event;
+ cd->set_mode = mips_set_mode;
+ cd->event_handler = mips_event_handler;
+
+ clockevents_register_device(cd);
+
+ if (!cp0_timer_irq_installed) {
+#ifdef CONFIG_MIPS_MT_SMTC
+#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
+ setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
+#else
+ setup_irq(irq, &c0_compare_irqaction);
+#endif /* CONFIG_MIPS_MT_SMTC */
+ cp0_timer_irq_installed = 1;
+ }
+}
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index bf164a562ac..23676873106 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -27,16 +27,6 @@
#include <kernel-entry-init.h>
- .macro ARC64_TWIDDLE_PC
-#if defined(CONFIG_ARC64) || defined(CONFIG_MAPPED_KERNEL)
- /* We get launched at a XKPHYS address but the kernel is linked to
- run at a KSEG0 address, so jump there. */
- PTR_LA t0, \@f
- jr t0
-\@:
-#endif
- .endm
-
/*
* inputs are the text nasid in t1, data nasid in t2.
*/
@@ -157,7 +147,11 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
setup_c0_status_pri
- ARC64_TWIDDLE_PC
+ /* We might not get launched at the address the kernel is linked to,
+ so we jump there. */
+ PTR_LA t0, 0f
+ jr t0
+0:
#ifdef CONFIG_MIPS_MT_SMTC
/*
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index e4b5e647b14..ea7cfe766a8 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -72,14 +72,6 @@ int update_persistent_clock(struct timespec now)
return rtc_mips_set_mmss(now.tv_sec);
}
-/* how many counter cycles in a jiffy */
-static unsigned long cycles_per_jiffy __read_mostly;
-
-/*
- * Null timer ack for systems not needing one (e.g. i8254).
- */
-static void null_timer_ack(void) { /* nothing */ }
-
/*
* Null high precision timer functions for systems lacking one.
*/
@@ -89,14 +81,6 @@ static cycle_t null_hpt_read(void)
}
/*
- * Timer ack for an R4k-compatible timer of a known frequency.
- */
-static void c0_timer_ack(void)
-{
- write_c0_compare(read_c0_compare());
-}
-
-/*
* High precision timer functions for a R4k-compatible timer.
*/
static cycle_t c0_hpt_read(void)
@@ -105,7 +89,6 @@ static cycle_t c0_hpt_read(void)
}
int (*mips_timer_state)(void);
-void (*mips_timer_ack)(void);
/*
* local_timer_interrupt() does profiling and process accounting
@@ -135,35 +118,6 @@ int (*perf_irq)(void) = null_perf_irq;
EXPORT_SYMBOL(perf_irq);
/*
- * Timer interrupt
- */
-int cp0_compare_irq;
-
-/*
- * Performance counter IRQ or -1 if shared with timer
- */
-int cp0_perfcount_irq;
-EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
-
-/*
- * Possibly handle a performance counter interrupt.
- * Return true if the timer interrupt should not be checked
- */
-static inline int handle_perf_irq(int r2)
-{
- /*
- * The performance counter overflow interrupt may be shared with the
- * timer interrupt (cp0_perfcount_irq < 0). If it is and a
- * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
- * and we can't reliably determine if a counter interrupt has also
- * happened (!r2) then don't check for a timer interrupt.
- */
- return (cp0_perfcount_irq < 0) &&
- perf_irq() == IRQ_HANDLED &&
- !r2;
-}
-
-/*
* time_init() - it does the following things.
*
* 1) plat_time_init() -
@@ -228,84 +182,6 @@ struct clocksource clocksource_mips = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static int mips_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- unsigned int cnt;
- int res;
-
-#ifdef CONFIG_MIPS_MT_SMTC
- {
- unsigned long flags, vpflags;
- local_irq_save(flags);
- vpflags = dvpe();
-#endif
- cnt = read_c0_count();
- cnt += delta;
- write_c0_compare(cnt);
- res = ((long)(read_c0_count() - cnt ) > 0) ? -ETIME : 0;
-#ifdef CONFIG_MIPS_MT_SMTC
- evpe(vpflags);
- local_irq_restore(flags);
- }
-#endif
- return res;
-}
-
-static void mips_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- /* Nothing to do ... */
-}
-
-static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
-static int cp0_timer_irq_installed;
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- const int r2 = cpu_has_mips_r2;
- struct clock_event_device *cd;
- int cpu = smp_processor_id();
-
- /*
- * Suckage alert:
- * Before R2 of the architecture there was no way to see if a
- * performance counter interrupt was pending, so we have to run
- * the performance counter interrupt handler anyway.
- */
- if (handle_perf_irq(r2))
- goto out;
-
- /*
- * The same applies to performance counter interrupts. But with the
- * above we now know that the reason we got here must be a timer
- * interrupt. Being the paranoiacs we are we check anyway.
- */
- if (!r2 || (read_c0_cause() & (1 << 30))) {
- c0_timer_ack();
-#ifdef CONFIG_MIPS_MT_SMTC
- if (cpu_data[cpu].vpe_id)
- goto out;
- cpu = 0;
-#endif
- cd = &per_cpu(mips_clockevent_device, cpu);
- cd->event_handler(cd);
- }
-
-out:
- return IRQ_HANDLED;
-}
-
-static struct irqaction timer_irqaction = {
- .handler = timer_interrupt,
-#ifdef CONFIG_MIPS_MT_SMTC
- .flags = IRQF_DISABLED,
-#else
- .flags = IRQF_DISABLED | IRQF_PERCPU,
-#endif
- .name = "timer",
-};
-
static void __init init_mips_clocksource(void)
{
u64 temp;
@@ -345,8 +221,6 @@ static void smtc_set_mode(enum clock_event_mode mode,
{
}
-int dummycnt[NR_CPUS];
-
static void mips_broadcast(cpumask_t mask)
{
unsigned int cpu;
@@ -387,113 +261,6 @@ static void setup_smtc_dummy_clockevent_device(void)
}
#endif
-static void mips_event_handler(struct clock_event_device *dev)
-{
-}
-
-/*
- * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
- */
-static int c0_compare_int_pending(void)
-{
- return (read_c0_cause() >> cp0_compare_irq) & 0x100;
-}
-
-static int c0_compare_int_usable(void)
-{
- const unsigned int delta = 0x300000;
- unsigned int cnt;
-
- /*
- * IP7 already pending? Try to clear it by acking the timer.
- */
- if (c0_compare_int_pending()) {
- write_c0_compare(read_c0_compare());
- irq_disable_hazard();
- if (c0_compare_int_pending())
- return 0;
- }
-
- cnt = read_c0_count();
- cnt += delta;
- write_c0_compare(cnt);
-
- while ((long)(read_c0_count() - cnt) <= 0)
- ; /* Wait for expiry */
-
- if (!c0_compare_int_pending())
- return 0;
-
- write_c0_compare(read_c0_compare());
- irq_disable_hazard();
- if (c0_compare_int_pending())
- return 0;
-
- /*
- * Feels like a real count / compare timer.
- */
- return 1;
-}
-
-void __cpuinit mips_clockevent_init(void)
-{
- uint64_t mips_freq = mips_hpt_frequency;
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *cd;
- unsigned int irq = MIPS_CPU_IRQ_BASE + 7;
-
- if (!cpu_has_counter)
- return;
-
-#ifdef CONFIG_MIPS_MT_SMTC
- setup_smtc_dummy_clockevent_device();
-
- /*
- * On SMTC we only register VPE0's compare interrupt as clockevent
- * device.
- */
- if (cpu)
- return;
-#endif
-
- if (!c0_compare_int_usable())
- return;
-
- cd = &per_cpu(mips_clockevent_device, cpu);
-
- cd->name = "MIPS";
- cd->features = CLOCK_EVT_FEAT_ONESHOT;
-
- /* Calculate the min / max delta */
- cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
- cd->shift = 32;
- cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
- cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
-
- cd->rating = 300;
- cd->irq = irq;
-#ifdef CONFIG_MIPS_MT_SMTC
- cd->cpumask = CPU_MASK_ALL;
-#else
- cd->cpumask = cpumask_of_cpu(cpu);
-#endif
- cd->set_next_event = mips_next_event;
- cd->set_mode = mips_set_mode;
- cd->event_handler = mips_event_handler;
-
- clockevents_register_device(cd);
-
- if (!cp0_timer_irq_installed) {
-#ifdef CONFIG_MIPS_MT_SMTC
-#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
- setup_irq_smtc(irq, &timer_irqaction, CPUCTR_IMASKBIT);
-#else
- setup_irq(irq, &timer_irqaction);
-#endif /* CONFIG_MIPS_MT_SMTC */
- cp0_timer_irq_installed = 1;
- }
-}
-
void __init time_init(void)
{
plat_time_init();
@@ -512,14 +279,6 @@ void __init time_init(void)
if (!clocksource_mips.read) {
/* No external high precision timer -- use R4k. */
clocksource_mips.read = c0_hpt_read;
-
- if (!mips_timer_state) {
- /* No external timer interrupt -- use R4k. */
- mips_timer_ack = c0_timer_ack;
- /* Calculate cache parameters. */
- cycles_per_jiffy =
- (mips_hpt_frequency + HZ / 2) / HZ;
- }
}
if (!mips_hpt_frequency)
mips_hpt_frequency = calibrate_hpt();
@@ -528,29 +287,8 @@ void __init time_init(void)
printk("Using %u.%03u MHz high precision timer.\n",
((mips_hpt_frequency + 500) / 1000) / 1000,
((mips_hpt_frequency + 500) / 1000) % 1000);
-
-#ifdef CONFIG_IRQ_CPU
- setup_irq(MIPS_CPU_IRQ_BASE + 7, &timer_irqaction);
-#endif
}
- if (!mips_timer_ack)
- /* No timer interrupt ack (e.g. i8254). */
- mips_timer_ack = null_timer_ack;
-
- /*
- * Call board specific timer interrupt setup.
- *
- * this pointer must be setup in machine setup routine.
- *
- * Even if a machine chooses to use a low-level timer interrupt,
- * it still needs to setup the timer_irqaction.
- * In that case, it might be better to set timer_irqaction.handler
- * to be NULL function so that we are sure the high-level code
- * is not invoked accidentally.
- */
- plat_timer_setup(&timer_irqaction);
-
init_mips_clocksource();
mips_clockevent_init();
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bbf01b81a4f..7b78d137259 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1336,6 +1336,17 @@ extern void cpu_cache_init(void);
extern void tlb_init(void);
extern void flush_tlb_handlers(void);
+/*
+ * Timer interrupt
+ */
+int cp0_compare_irq;
+
+/*
+ * Performance counter IRQ or -1 if shared with timer
+ */
+int cp0_perfcount_irq;
+EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
+
void __init per_cpu_trap_init(void)
{
unsigned int cpu = smp_processor_id();
diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig
index abbd0bbfabd..6b293ce0935 100644
--- a/arch/mips/pmc-sierra/Kconfig
+++ b/arch/mips/pmc-sierra/Kconfig
@@ -4,11 +4,13 @@ choice
config PMC_MSP4200_EVAL
bool "PMC-Sierra MSP4200 Eval Board"
+ select CEVT_R4K
select IRQ_MSP_SLP
select HW_HAS_PCI
config PMC_MSP4200_GW
bool "PMC-Sierra MSP4200 VoIP Gateway"
+ select CEVT_R4K
select IRQ_MSP_SLP
select HW_HAS_PCI
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 856649cf9f1..1bb692a3b31 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -374,14 +374,13 @@ int __devinit request_bridge_irq(struct bridge_controller *bc)
return irq;
}
-extern void ip27_rt_timer_interrupt(void);
-
asmlinkage void plat_irq_dispatch(void)
{
unsigned long pending = read_c0_cause() & read_c0_status();
+ extern unsigned int rt_timer_irq;
if (pending & CAUSEF_IP4)
- ip27_rt_timer_interrupt();
+ do_IRQ(rt_timer_irq);
else if (pending & CAUSEF_IP2) /* PI_INT_PEND_0 or CC_PEND_{A|B} */
ip27_do_irq_mask0();
else if (pending & CAUSEF_IP3) /* PI_INT_PEND_1 */
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index b7b3479b6bc..d467bf4f6c3 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -3,6 +3,7 @@
* Copytight (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/bcd.h>
+#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -25,22 +26,8 @@
#include <asm/sn/sn0/ip27.h>
#include <asm/sn/sn0/hub.h>
-/*
- * This is a hack; we really need to figure these values out dynamically
- *
- * Since 800 ns works very well with various HUB frequencies, such as
- * 360, 380, 390 and 400 MHZ, we use 800 ns rtc cycle time.
- *
- * Ralf: which clock rate is used to feed the counter?
- */
-#define NSEC_PER_CYCLE 800
-#define CYCLES_PER_SEC (NSEC_PER_SEC/NSEC_PER_CYCLE)
-#define CYCLES_PER_JIFFY (CYCLES_PER_SEC/HZ)
-
#define TICK_SIZE (tick_nsec / 1000)
-static unsigned long ct_cur[NR_CPUS]; /* What counter should be at next timer irq */
-
#if 0
static int set_rtc_mmss(unsigned long nowtime)
{
@@ -86,36 +73,6 @@ static int set_rtc_mmss(unsigned long nowtime)
}
#endif
-static unsigned int rt_timer_irq;
-
-void ip27_rt_timer_interrupt(void)
-{
- int cpu = smp_processor_id();
- int cpuA = cputoslice(cpu) == 0;
- unsigned int irq = rt_timer_irq;
-
- irq_enter();
- write_seqlock(&xtime_lock);
-
-again:
- LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0); /* Ack */
- ct_cur[cpu] += CYCLES_PER_JIFFY;
- LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]);
-
- if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu])
- goto again;
-
- kstat_this_cpu.irqs[irq]++; /* kstat only for bootcpu? */
-
- if (cpu == 0)
- do_timer(1);
-
- update_process_times(user_mode(get_irq_regs()));
-
- write_sequnlock(&xtime_lock);
- irq_exit();
-}
-
/* Includes for ioc3_init(). */
#include <asm/sn/types.h>
#include <asm/sn/sn0/addrs.h>
@@ -154,6 +111,46 @@ unsigned long read_persistent_clock(void)
return mktime(year, month, date, hour, min, sec);
}
+static int rt_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ int slice = cputoslice(cpu) == 0;
+ unsigned long cnt;
+
+ cnt = LOCAL_HUB_L(PI_RT_COUNT);
+ cnt += delta;
+ LOCAL_HUB_S(slice ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, cnt);
+
+ return LOCAL_HUB_L(PI_RT_COUNT) >= cnt ? -ETIME : 0;
+}
+
+static void rt_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* The only mode supported */
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_RESUME:
+ /* Nothing to do */
+ break;
+ }
+}
+
+struct clock_event_device rt_clock_event_device = {
+ .name = "HUB-RT",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+
+ .rating = 300,
+ .set_next_event = rt_set_next_event,
+ .set_mode = rt_set_mode,
+};
+
static void enable_rt_irq(unsigned int irq)
{
}
@@ -171,6 +168,20 @@ static struct irq_chip rt_irq_type = {
.eoi = enable_rt_irq,
};
+unsigned int rt_timer_irq;
+
+static irqreturn_t ip27_rt_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = &rt_clock_event_device;
+ unsigned int cpu = smp_processor_id();
+ int slice = cputoslice(cpu) == 0;
+
+ LOCAL_HUB_S(slice ? PI_RT_PEND_A : PI_RT_PEND_B, 0); /* Ack */
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
static struct irqaction rt_irqaction = {
.handler = (irq_handler_t) ip27_rt_timer_interrupt,
.flags = IRQF_DISABLED,
@@ -178,26 +189,43 @@ static struct irqaction rt_irqaction = {
.name = "timer"
};
-void __init plat_timer_setup(struct irqaction *irq)
+/*
+ * This is a hack; we really need to figure these values out dynamically
+ *
+ * Since 800 ns works very well with various HUB frequencies, such as
+ * 360, 380, 390 and 400 MHZ, we use 800 ns rtc cycle time.
+ *
+ * Ralf: which clock rate is used to feed the counter?
+ */
+#define NSEC_PER_CYCLE 800
+#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE)
+
+static void __init ip27_rt_clock_event_init(void)
{
- int irqno = allocate_irqno();
+ struct clock_event_device *cd = &rt_clock_event_device;
+ unsigned int cpu = smp_processor_id();
+ int irq = allocate_irqno();
- if (irqno < 0)
+ if (irq < 0)
panic("Can't allocate interrupt number for timer interrupt");
- set_irq_chip_and_handler(irqno, &rt_irq_type, handle_percpu_irq);
+ rt_timer_irq = irq;
- /* over-write the handler, we use our own way */
- irq->handler = no_action;
+ cd->irq = irq,
+ cd->cpumask = cpumask_of_cpu(cpu),
- /* setup irqaction */
- irq_desc[irqno].status |= IRQ_PER_CPU;
-
- rt_timer_irq = irqno;
/*
- * Only needed to get /proc/interrupt to display timer irq stats
+ * Calculate the min / max delta
*/
- setup_irq(irqno, &rt_irqaction);
+ cd->mult =
+ div_sc((unsigned long) CYCLES_PER_SEC, NSEC_PER_SEC, 32);
+ cd->shift = 32;
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ clockevents_register_device(cd);
+
+ set_irq_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq);
+ setup_irq(irq, &rt_irqaction);
}
static cycle_t hub_rt_read(void)
@@ -206,7 +234,7 @@ static cycle_t hub_rt_read(void)
}
struct clocksource ht_rt_clocksource = {
- .name = "HUB",
+ .name = "HUB-RT",
.rating = 200,
.read = hub_rt_read,
.mask = CLOCKSOURCE_MASK(52),
@@ -214,11 +242,17 @@ struct clocksource ht_rt_clocksource = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-void __init plat_time_init(void)
+static void __init ip27_rt_clocksource_init(void)
{
clocksource_register(&ht_rt_clocksource);
}
+void __init plat_time_init(void)
+{
+ ip27_rt_clock_event_init();
+ ip27_rt_clocksource_init();
+}
+
void __init cpu_time_init(void)
{
lboard_t *board;
@@ -248,17 +282,12 @@ void __init hub_rtc_init(cnodeid_t cnode)
* node and timeouts will not happen there.
*/
if (get_compact_nodeid() == cnode) {
- int cpu = smp_processor_id();
LOCAL_HUB_S(PI_RT_EN_A, 1);
LOCAL_HUB_S(PI_RT_EN_B, 1);
LOCAL_HUB_S(PI_PROF_EN_A, 0);
LOCAL_HUB_S(PI_PROF_EN_B, 0);
- ct_cur[cpu] = CYCLES_PER_JIFFY;
- LOCAL_HUB_S(PI_RT_COMPARE_A, ct_cur[cpu]);
LOCAL_HUB_S(PI_RT_COUNT, 0);
LOCAL_HUB_S(PI_RT_PEND_A, 0);
- LOCAL_HUB_S(PI_RT_COMPARE_B, ct_cur[cpu]);
- LOCAL_HUB_S(PI_RT_COUNT, 0);
LOCAL_HUB_S(PI_RT_PEND_B, 0);
}
}
diff --git a/arch/mips/sgi-ip32/ip32-setup.c b/arch/mips/sgi-ip32/ip32-setup.c
index fc75bfcb0c0..1024bf40bd9 100644
--- a/arch/mips/sgi-ip32/ip32-setup.c
+++ b/arch/mips/sgi-ip32/ip32-setup.c
@@ -80,12 +80,6 @@ void __init plat_time_init(void)
printk("%d MHz CPU detected\n", mips_hpt_frequency * 2 / 1000000);
}
-void __init plat_timer_setup(struct irqaction *irq)
-{
- irq->handler = no_action;
- setup_irq(MIPS_CPU_IRQ_BASE + 7, irq);
-}
-
void __init plat_mem_setup(void)
{
board_be_init = ip32_be_init;
diff --git a/arch/mips/vr41xx/Kconfig b/arch/mips/vr41xx/Kconfig
index 8f4d3e74c23..eeb089f20c0 100644
--- a/arch/mips/vr41xx/Kconfig
+++ b/arch/mips/vr41xx/Kconfig
@@ -5,6 +5,7 @@ choice
config CASIO_E55
bool "CASIO CASSIOPEIA E-10/15/55/65"
+ select CEVT_R4K
select DMA_NONCOHERENT
select IRQ_CPU
select ISA
@@ -13,6 +14,7 @@ config CASIO_E55
config IBM_WORKPAD
bool "IBM WorkPad z50"
+ select CEVT_R4K
select DMA_NONCOHERENT
select IRQ_CPU
select ISA
@@ -21,6 +23,7 @@ config IBM_WORKPAD
config NEC_CMBVR4133
bool "NEC CMB-VR4133"
+ select CEVT_R4K
select DMA_NONCOHERENT
select IRQ_CPU
select HW_HAS_PCI
@@ -29,6 +32,7 @@ config NEC_CMBVR4133
config TANBAC_TB022X
bool "TANBAC VR4131 multichip module and TANBAC VR4131DIMM"
+ select CEVT_R4K
select DMA_NONCOHERENT
select IRQ_CPU
select HW_HAS_PCI
@@ -43,6 +47,7 @@ config TANBAC_TB022X
config VICTOR_MPC30X
bool "Victor MP-C303/304"
+ select CEVT_R4K
select DMA_NONCOHERENT
select IRQ_CPU
select HW_HAS_PCI
@@ -52,6 +57,7 @@ config VICTOR_MPC30X
config ZAO_CAPCELLA
bool "ZAO Networks Capcella"
+ select CEVT_R4K
select DMA_NONCOHERENT
select IRQ_CPU
select HW_HAS_PCI
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 95b823b60c9..8e5988c4a16 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -209,7 +209,6 @@ CONFIG_PM=y
# CONFIG_PM_LEGACY is not set
CONFIG_PM_DEBUG=y
# CONFIG_PM_VERBOSE is not set
-# CONFIG_DISABLE_CONSOLE_SUSPEND is not set
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_HIBERNATION=y
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0ae5d57b936..2c8e756d19a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -141,6 +141,7 @@ int main(void)
DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
+ DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 863a5d6d9b1..9eb3284deac 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -212,23 +212,44 @@ static u64 read_purr(void)
}
/*
+ * Read the SPURR on systems that have it, otherwise the purr
+ */
+static u64 read_spurr(u64 purr)
+{
+ if (cpu_has_feature(CPU_FTR_SPURR))
+ return mfspr(SPRN_SPURR);
+ return purr;
+}
+
+/*
* Account time for a transition between system, hard irq
* or soft irq state.
*/
void account_system_vtime(struct task_struct *tsk)
{
- u64 now, delta;
+ u64 now, nowscaled, delta, deltascaled;
unsigned long flags;
local_irq_save(flags);
now = read_purr();
delta = now - get_paca()->startpurr;
get_paca()->startpurr = now;
+ nowscaled = read_spurr(now);
+ deltascaled = nowscaled - get_paca()->startspurr;
+ get_paca()->startspurr = nowscaled;
if (!in_interrupt()) {
+ /* deltascaled includes both user and system time.
+ * Hence scale it based on the purr ratio to estimate
+ * the system time */
+ deltascaled = deltascaled * get_paca()->system_time /
+ (get_paca()->system_time + get_paca()->user_time);
delta += get_paca()->system_time;
get_paca()->system_time = 0;
}
account_system_time(tsk, 0, delta);
+ get_paca()->purrdelta = delta;
+ account_system_time_scaled(tsk, deltascaled);
+ get_paca()->spurrdelta = deltascaled;
local_irq_restore(flags);
}
@@ -240,11 +261,17 @@ void account_system_vtime(struct task_struct *tsk)
*/
void account_process_vtime(struct task_struct *tsk)
{
- cputime_t utime;
+ cputime_t utime, utimescaled;
utime = get_paca()->user_time;
get_paca()->user_time = 0;
account_user_time(tsk, utime);
+
+ /* Estimate the scaled utime by scaling the real utime based
+ * on the last spurr to purr ratio */
+ utimescaled = utime * get_paca()->spurrdelta / get_paca()->purrdelta;
+ get_paca()->spurrdelta = get_paca()->purrdelta = 0;
+ account_user_time_scaled(tsk, utimescaled);
}
static void account_process_time(struct pt_regs *regs)
@@ -266,6 +293,7 @@ struct cpu_purr_data {
int initialized; /* thread is running */
u64 tb; /* last TB value read */
u64 purr; /* last PURR value read */
+ u64 spurr; /* last SPURR value read */
};
/*
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
index f26afcd4175..ffa14aff524 100644
--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
@@ -1,5 +1,5 @@
#include <linux/init.h>
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
@@ -18,6 +18,8 @@ static void __iomem *sram;
static const int sram_size = 0x4000; /* 16 kBytes */
static void __iomem *mbar;
+static suspend_state_t lite5200_pm_target_state;
+
static int lite5200_pm_valid(suspend_state_t state)
{
switch (state) {
@@ -29,13 +31,22 @@ static int lite5200_pm_valid(suspend_state_t state)
}
}
-static int lite5200_pm_prepare(suspend_state_t state)
+static int lite5200_pm_set_target(suspend_state_t state)
+{
+ if (lite5200_pm_valid(state)) {
+ lite5200_pm_target_state = state;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int lite5200_pm_prepare(void)
{
/* deep sleep? let mpc52xx code handle that */
- if (state == PM_SUSPEND_STANDBY)
- return mpc52xx_pm_prepare(state);
+ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY)
+ return mpc52xx_pm_prepare();
- if (state != PM_SUSPEND_MEM)
+ if (lite5200_pm_target_state != PM_SUSPEND_MEM)
return -EINVAL;
/* map registers */
@@ -190,17 +201,16 @@ static int lite5200_pm_enter(suspend_state_t state)
return 0;
}
-static int lite5200_pm_finish(suspend_state_t state)
+static void lite5200_pm_finish(void)
{
/* deep sleep? let mpc52xx code handle that */
- if (state == PM_SUSPEND_STANDBY) {
- return mpc52xx_pm_finish(state);
- }
- return 0;
+ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY)
+ mpc52xx_pm_finish();
}
-static struct pm_ops lite5200_pm_ops = {
+static struct platform_suspend_ops lite5200_pm_ops = {
.valid = lite5200_pm_valid,
+ .set_target = lite5200_pm_set_target,
.prepare = lite5200_pm_prepare,
.enter = lite5200_pm_enter,
.finish = lite5200_pm_finish,
@@ -208,6 +218,6 @@ static struct pm_ops lite5200_pm_ops = {
int __init lite5200_pm_init(void)
{
- pm_set_ops(&lite5200_pm_ops);
+ suspend_set_ops(&lite5200_pm_ops);
return 0;
}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index ee2e7639c63..7ffa7babf25 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -1,5 +1,5 @@
#include <linux/init.h>
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/io.h>
#include <asm/time.h>
#include <asm/cacheflush.h>
@@ -57,11 +57,8 @@ int mpc52xx_set_wakeup_gpio(u8 pin, u8 level)
return 0;
}
-int mpc52xx_pm_prepare(suspend_state_t state)
+int mpc52xx_pm_prepare(void)
{
- if (state != PM_SUSPEND_STANDBY)
- return -EINVAL;
-
/* map the whole register space */
mbar = mpc52xx_find_and_map("mpc5200");
if (!mbar) {
@@ -166,18 +163,16 @@ int mpc52xx_pm_enter(suspend_state_t state)
return 0;
}
-int mpc52xx_pm_finish(suspend_state_t state)
+void mpc52xx_pm_finish(void)
{
/* call board resume code */
if (mpc52xx_suspend.board_resume_finish)
mpc52xx_suspend.board_resume_finish(mbar);
iounmap(mbar);
-
- return 0;
}
-static struct pm_ops mpc52xx_pm_ops = {
+static struct platform_suspend_ops mpc52xx_pm_ops = {
.valid = mpc52xx_pm_valid,
.prepare = mpc52xx_pm_prepare,
.enter = mpc52xx_pm_enter,
@@ -186,6 +181,6 @@ static struct pm_ops mpc52xx_pm_ops = {
int __init mpc52xx_pm_init(void)
{
- pm_set_ops(&mpc52xx_pm_ops);
+ suspend_set_ops(&mpc52xx_pm_ops);
return 0;
}
diff --git a/arch/sh/boards/hp6xx/pm.c b/arch/sh/boards/hp6xx/pm.c
index 8143d1b948e..d22f6eac9cc 100644
--- a/arch/sh/boards/hp6xx/pm.c
+++ b/arch/sh/boards/hp6xx/pm.c
@@ -67,14 +67,14 @@ static int hp6x0_pm_enter(suspend_state_t state)
return 0;
}
-static struct pm_ops hp6x0_pm_ops = {
+static struct platform_suspend_ops hp6x0_pm_ops = {
.enter = hp6x0_pm_enter,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
};
static int __init hp6x0_pm_init(void)
{
- pm_set_ops(&hp6x0_pm_ops);
+ suspend_set_ops(&hp6x0_pm_ops);
return 0;
}
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
index fb2caef79ce..3ea000d15e3 100644
--- a/arch/sparc/kernel/of_device.c
+++ b/arch/sparc/kernel/of_device.c
@@ -585,24 +585,6 @@ static int __init of_debug(char *str)
__setup("of_debug=", of_debug);
-int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
-{
- /* initialize common driver fields */
- if (!drv->driver.name)
- drv->driver.name = drv->name;
- if (!drv->driver.owner)
- drv->driver.owner = drv->owner;
- drv->driver.bus = bus;
-
- /* register with core */
- return driver_register(&drv->driver);
-}
-
-void of_unregister_driver(struct of_platform_driver *drv)
-{
- driver_unregister(&drv->driver);
-}
-
struct of_device* of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent,
@@ -628,6 +610,4 @@ struct of_device* of_platform_device_create(struct device_node *np,
return dev;
}
-EXPORT_SYMBOL(of_register_driver);
-EXPORT_SYMBOL(of_unregister_driver);
EXPORT_SYMBOL(of_platform_device_create);
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index f3922e5a89f..2c3bea22815 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -877,7 +877,7 @@ void __cpuinit sun4v_register_mondo_queues(int this_cpu)
static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
{
unsigned long size = PAGE_ALIGN(qmask + 1);
- void *p = __alloc_bootmem_low(size, size, 0);
+ void *p = __alloc_bootmem(size, size, 0);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
prom_halt();
@@ -889,7 +889,7 @@ static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
{
unsigned long size = PAGE_ALIGN(qmask + 1);
- void *p = __alloc_bootmem_low(size, size, 0);
+ void *p = __alloc_bootmem(size, size, 0);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
@@ -906,7 +906,7 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
- page = alloc_bootmem_low_pages(PAGE_SIZE);
+ page = alloc_bootmem_pages(PAGE_SIZE);
if (!page) {
prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
prom_halt();
@@ -953,7 +953,7 @@ void __init init_IRQ(void)
kill_prom_timer();
size = sizeof(struct ino_bucket) * NUM_IVECS;
- ivector_table = alloc_bootmem_low(size);
+ ivector_table = alloc_bootmem(size);
if (!ivector_table) {
prom_printf("Fatal error, cannot allocate ivector_table\n");
prom_halt();
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 42d779866fb..fc5c0cc793b 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -869,26 +869,6 @@ static int __init of_debug(char *str)
__setup("of_debug=", of_debug);
-int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
-{
- /* initialize common driver fields */
- if (!drv->driver.name)
- drv->driver.name = drv->name;
- if (!drv->driver.owner)
- drv->driver.owner = drv->owner;
- drv->driver.bus = bus;
-
- /* register with core */
- return driver_register(&drv->driver);
-}
-EXPORT_SYMBOL(of_register_driver);
-
-void of_unregister_driver(struct of_platform_driver *drv)
-{
- driver_unregister(&drv->driver);
-}
-EXPORT_SYMBOL(of_unregister_driver);
-
struct of_device* of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent,
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
index c76bfbb7da0..923e0bcc3bf 100644
--- a/arch/sparc64/kernel/pci_common.c
+++ b/arch/sparc64/kernel/pci_common.c
@@ -396,6 +396,13 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
saw_mem = saw_io = 0;
pbm_ranges = of_get_property(pbm->prom_node, "ranges", &i);
+ if (!pbm_ranges) {
+ prom_printf("PCI: Fatal error, missing PBM ranges property "
+ " for %s\n",
+ pbm->name);
+ prom_halt();
+ }
+
num_pbm_ranges = i / sizeof(*pbm_ranges);
for (i = 0; i < num_pbm_ranges; i++) {
diff --git a/arch/sparc64/lib/atomic.S b/arch/sparc64/lib/atomic.S
index 9633750167d..70ac4186f62 100644
--- a/arch/sparc64/lib/atomic.S
+++ b/arch/sparc64/lib/atomic.S
@@ -1,10 +1,10 @@
-/* $Id: atomic.S,v 1.4 2001/11/18 00:12:56 davem Exp $
- * atomic.S: These things are too big to do inline.
+/* atomic.S: These things are too big to do inline.
*
- * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
*/
#include <asm/asi.h>
+#include <asm/backoff.h>
.text
@@ -16,27 +16,31 @@
.globl atomic_add
.type atomic_add,#function
atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
add %g1, %o0, %g7
cas [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %icc, 1b
+ bne,pn %icc, 2f
nop
retl
nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_add, .-atomic_add
.globl atomic_sub
.type atomic_sub,#function
atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
sub %g1, %o0, %g7
cas [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %icc, 1b
+ bne,pn %icc, 2f
nop
retl
nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_sub, .-atomic_sub
/* On SMP we need to use memory barriers to ensure
@@ -60,89 +64,101 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
.globl atomic_add_ret
.type atomic_add_ret,#function
atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
ATOMIC_PRE_BARRIER
1: lduw [%o1], %g1
add %g1, %o0, %g7
cas [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %icc, 1b
+ bne,pn %icc, 2f
add %g7, %o0, %g7
sra %g7, 0, %o0
ATOMIC_POST_BARRIER
retl
nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_add_ret, .-atomic_add_ret
.globl atomic_sub_ret
.type atomic_sub_ret,#function
atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
ATOMIC_PRE_BARRIER
1: lduw [%o1], %g1
sub %g1, %o0, %g7
cas [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %icc, 1b
+ bne,pn %icc, 2f
sub %g7, %o0, %g7
sra %g7, 0, %o0
ATOMIC_POST_BARRIER
retl
nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_sub_ret, .-atomic_sub_ret
.globl atomic64_add
.type atomic64_add,#function
atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
add %g1, %o0, %g7
casx [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %xcc, 1b
+ bne,pn %xcc, 2f
nop
retl
nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_add, .-atomic64_add
.globl atomic64_sub
.type atomic64_sub,#function
atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
sub %g1, %o0, %g7
casx [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %xcc, 1b
+ bne,pn %xcc, 2f
nop
retl
nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_sub, .-atomic64_sub
.globl atomic64_add_ret
.type atomic64_add_ret,#function
atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
ATOMIC_PRE_BARRIER
1: ldx [%o1], %g1
add %g1, %o0, %g7
casx [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %xcc, 1b
+ bne,pn %xcc, 2f
add %g7, %o0, %g7
mov %g7, %o0
ATOMIC_POST_BARRIER
retl
nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_add_ret, .-atomic64_add_ret
.globl atomic64_sub_ret
.type atomic64_sub_ret,#function
atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
ATOMIC_PRE_BARRIER
1: ldx [%o1], %g1
sub %g1, %o0, %g7
casx [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %xcc, 1b
+ bne,pn %xcc, 2f
sub %g7, %o0, %g7
mov %g7, %o0
ATOMIC_POST_BARRIER
retl
nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_sub_ret, .-atomic64_sub_ret
diff --git a/arch/sparc64/lib/bitops.S b/arch/sparc64/lib/bitops.S
index 892431a8213..6b015a6eefb 100644
--- a/arch/sparc64/lib/bitops.S
+++ b/arch/sparc64/lib/bitops.S
@@ -1,10 +1,10 @@
-/* $Id: bitops.S,v 1.3 2001/11/18 00:12:56 davem Exp $
- * bitops.S: Sparc64 atomic bit operations.
+/* bitops.S: Sparc64 atomic bit operations.
*
- * Copyright (C) 2000 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net)
*/
#include <asm/asi.h>
+#include <asm/backoff.h>
.text
@@ -29,6 +29,7 @@
.globl test_and_set_bit
.type test_and_set_bit,#function
test_and_set_bit: /* %o0=nr, %o1=addr */
+ BACKOFF_SETUP(%o3)
BITOP_PRE_BARRIER
srlx %o0, 6, %g1
mov 1, %o2
@@ -40,18 +41,20 @@ test_and_set_bit: /* %o0=nr, %o1=addr */
or %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
- bne,pn %xcc, 1b
+ bne,pn %xcc, 2f
and %g7, %o2, %g2
clr %o0
movrne %g2, 1, %o0
BITOP_POST_BARRIER
retl
nop
+2: BACKOFF_SPIN(%o3, %o4, 1b)
.size test_and_set_bit, .-test_and_set_bit
.globl test_and_clear_bit
.type test_and_clear_bit,#function
test_and_clear_bit: /* %o0=nr, %o1=addr */
+ BACKOFF_SETUP(%o3)
BITOP_PRE_BARRIER
srlx %o0, 6, %g1
mov 1, %o2
@@ -63,18 +66,20 @@ test_and_clear_bit: /* %o0=nr, %o1=addr */
andn %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
- bne,pn %xcc, 1b
+ bne,pn %xcc, 2f
and %g7, %o2, %g2
clr %o0
movrne %g2, 1, %o0
BITOP_POST_BARRIER
retl
nop
+2: BACKOFF_SPIN(%o3, %o4, 1b)
.size test_and_clear_bit, .-test_and_clear_bit
.globl test_and_change_bit
.type test_and_change_bit,#function
test_and_change_bit: /* %o0=nr, %o1=addr */
+ BACKOFF_SETUP(%o3)
BITOP_PRE_BARRIER
srlx %o0, 6, %g1
mov 1, %o2
@@ -86,18 +91,20 @@ test_and_change_bit: /* %o0=nr, %o1=addr */
xor %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
- bne,pn %xcc, 1b
+ bne,pn %xcc, 2f
and %g7, %o2, %g2
clr %o0
movrne %g2, 1, %o0
BITOP_POST_BARRIER
retl
nop
+2: BACKOFF_SPIN(%o3, %o4, 1b)
.size test_and_change_bit, .-test_and_change_bit
.globl set_bit
.type set_bit,#function
set_bit: /* %o0=nr, %o1=addr */
+ BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
sllx %g1, 3, %g3
@@ -108,15 +115,17 @@ set_bit: /* %o0=nr, %o1=addr */
or %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
- bne,pn %xcc, 1b
+ bne,pn %xcc, 2f
nop
retl
nop
+2: BACKOFF_SPIN(%o3, %o4, 1b)
.size set_bit, .-set_bit
.globl clear_bit
.type clear_bit,#function
clear_bit: /* %o0=nr, %o1=addr */
+ BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
sllx %g1, 3, %g3
@@ -127,15 +136,17 @@ clear_bit: /* %o0=nr, %o1=addr */
andn %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
- bne,pn %xcc, 1b
+ bne,pn %xcc, 2f
nop
retl
nop
+2: BACKOFF_SPIN(%o3, %o4, 1b)
.size clear_bit, .-clear_bit
.globl change_bit
.type change_bit,#function
change_bit: /* %o0=nr, %o1=addr */
+ BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
sllx %g1, 3, %g3
@@ -146,8 +157,9 @@ change_bit: /* %o0=nr, %o1=addr */
xor %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
- bne,pn %xcc, 1b
+ bne,pn %xcc, 2f
nop
retl
nop
+2: BACKOFF_SPIN(%o3, %o4, 1b)
.size change_bit, .-change_bit
diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c
index ae67e7158e7..6b4a0f9e38d 100644
--- a/arch/um/drivers/slip_kern.c
+++ b/arch/um/drivers/slip_kern.c
@@ -31,10 +31,8 @@ void slip_init(struct net_device *dev, void *data)
slip_proto_init(&spri->slip);
dev->init = NULL;
- dev->header_cache_update = NULL;
- dev->hard_header_cache = NULL;
- dev->hard_header = NULL;
dev->hard_header_len = 0;
+ dev->header_ops = NULL;
dev->addr_len = 0;
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 256;
diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c
index 240ee650865..d987af277db 100644
--- a/arch/um/drivers/slirp_kern.c
+++ b/arch/um/drivers/slirp_kern.c
@@ -34,9 +34,7 @@ void slirp_init(struct net_device *dev, void *data)
dev->init = NULL;
dev->hard_header_len = 0;
- dev->header_cache_update = NULL;
- dev->hard_header_cache = NULL;
- dev->hard_header = NULL;
+ dev->header_ops = NULL;
dev->addr_len = 0;
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 256;
diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c
index 118b9f9ff49..5027650eb27 100644
--- a/arch/x86/ia32/ia32_binfmt.c
+++ b/arch/x86/ia32/ia32_binfmt.c
@@ -289,7 +289,6 @@ static void elf32_init(struct pt_regs *regs)
static ctl_table abi_table2[] = {
{
- .ctl_name = 99,
.procname = "vsyscall32",
.data = &sysctl_vsyscall32,
.maxlen = sizeof(int),
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index f22ba8534d2..a97313b1270 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -11,7 +11,7 @@
#
# If physical address of wakeup_code is 0x12345, BIOS should call us with
# cs = 0x1234, eip = 0x05
-#
+#
#define BEEP \
inb $97, %al; \
@@ -52,7 +52,6 @@ wakeup_code:
BEEP
1:
mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board
- movw $0x0e00 + 'S', %fs:(0x12)
pushl $0 # Kill any dangerous flags
popfl
@@ -90,9 +89,6 @@ wakeup_code:
# make sure %cr4 is set correctly (features, etc)
movl real_save_cr4 - wakeup_code, %eax
movl %eax, %cr4
- movw $0xb800, %ax
- movw %ax,%fs
- movw $0x0e00 + 'i', %fs:(0x12)
# need a gdt -- use lgdtl to force 32-bit operands, in case
# the GDT is located past 16 megabytes.
@@ -102,8 +98,6 @@ wakeup_code:
movl %eax, %cr0
jmp 1f
1:
- movw $0x0e00 + 'n', %fs:(0x14)
-
movl real_magic - wakeup_code, %eax
cmpl $0x12345678, %eax
jne bogus_real_magic
@@ -122,13 +116,11 @@ real_save_cr4: .long 0
real_magic: .long 0
video_mode: .long 0
realmode_flags: .long 0
-beep_flags: .long 0
real_efer_save_restore: .long 0
real_save_efer_edx: .long 0
real_save_efer_eax: .long 0
bogus_real_magic:
- movw $0x0e00 + 'B', %fs:(0x12)
jmp bogus_real_magic
/* This code uses an extended set of video mode numbers. These include:
@@ -194,7 +186,6 @@ wakeup_pmode_return:
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
- movw $0x0e00 + 'u', 0xb8016
# reload the gdt, as we need the full 32 bit address
lgdt saved_gdt
@@ -218,7 +209,6 @@ wakeup_pmode_return:
jmp *%eax
bogus_magic:
- movw $0x0e00 + 'B', 0xb8018
jmp bogus_magic
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 8b4357e1efe..55608ec2ed7 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -41,7 +41,6 @@ wakeup_code:
# Running in *copy* of this code, somewhere in low 1MB.
- movb $0xa1, %al ; outb %al, $0x80
cli
cld
# setup data segment
@@ -65,11 +64,6 @@ wakeup_code:
cmpl $0x12345678, %eax
jne bogus_real_magic
- call verify_cpu # Verify the cpu supports long
- # mode
- testl %eax, %eax
- jnz no_longmode
-
testl $1, realmode_flags - wakeup_code
jz 1f
lcall $0xc000,$3
@@ -84,12 +78,6 @@ wakeup_code:
call mode_set
1:
- movw $0xb800, %ax
- movw %ax,%fs
- movw $0x0e00 + 'L', %fs:(0x10)
-
- movb $0xa2, %al ; outb %al, $0x80
-
mov %ds, %ax # Find 32bit wakeup_code addr
movzx %ax, %esi # (Convert %ds:gdt to a liner ptr)
shll $4, %esi
@@ -117,14 +105,10 @@ wakeup_32_vector:
.code32
wakeup_32:
# Running in this code, but at low address; paging is not yet turned on.
- movb $0xa5, %al ; outb %al, $0x80
movl $__KERNEL_DS, %eax
movl %eax, %ds
- movw $0x0e00 + 'i', %ds:(0xb8012)
- movb $0xa8, %al ; outb %al, $0x80;
-
/*
* Prepare for entering 64bits mode
*/
@@ -200,16 +184,11 @@ wakeup_long64:
*/
lgdt cpu_gdt_descr
- movw $0x0e00 + 'n', %ds:(0xb8014)
- movb $0xa9, %al ; outb %al, $0x80
-
movq saved_magic, %rax
movq $0x123456789abcdef0, %rdx
cmpq %rdx, %rax
jne bogus_64_magic
- movw $0x0e00 + 'u', %ds:(0xb8016)
-
nop
nop
movw $__KERNEL_DS, %ax
@@ -220,13 +199,11 @@ wakeup_long64:
movw %ax, %gs
movq saved_rsp, %rsp
- movw $0x0e00 + 'x', %ds:(0xb8018)
movq saved_rbx, %rbx
movq saved_rdi, %rdi
movq saved_rsi, %rsi
movq saved_rbp, %rbp
- movw $0x0e00 + '!', %ds:(0xb801a)
movq saved_rip, %rax
jmp *%rax
@@ -256,21 +233,12 @@ realmode_flags: .quad 0
.code16
bogus_real_magic:
- movb $0xba,%al ; outb %al,$0x80
jmp bogus_real_magic
.code64
bogus_64_magic:
- movb $0xb3,%al ; outb %al,$0x80
jmp bogus_64_magic
-.code16
-no_longmode:
- movb $0xbc,%al ; outb %al,$0x80
- jmp no_longmode
-
-#include "../verify_cpu_64.S"
-
/* This code uses an extended set of video mode numbers. These include:
* Aliases for standard modes
* NORMAL_VGA (-1)
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 1826395ebee..297a2411694 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -499,6 +499,11 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {
static void free_cache_attributes(unsigned int cpu)
{
+ int i;
+
+ for (i = 0; i < num_cache_leaves; i++)
+ cache_remove_shared_cpu_map(cpu, i);
+
kfree(cpuid4_info[cpu]);
cpuid4_info[cpu] = NULL;
}
@@ -506,8 +511,8 @@ static void free_cache_attributes(unsigned int cpu)
static int __cpuinit detect_cache_attributes(unsigned int cpu)
{
struct _cpuid4_info *this_leaf;
- unsigned long j;
- int retval;
+ unsigned long j;
+ int retval;
cpumask_t oldmask;
if (num_cache_leaves == 0)
@@ -524,19 +529,26 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
goto out;
/* Do cpuid and store the results */
- retval = 0;
for (j = 0; j < num_cache_leaves; j++) {
this_leaf = CPUID4_INFO_IDX(cpu, j);
retval = cpuid4_cache_lookup(j, this_leaf);
- if (unlikely(retval < 0))
+ if (unlikely(retval < 0)) {
+ int i;
+
+ for (i = 0; i < j; i++)
+ cache_remove_shared_cpu_map(cpu, i);
break;
+ }
cache_shared_cpu_map_setup(cpu, j);
}
set_cpus_allowed(current, oldmask);
out:
- if (retval)
- free_cache_attributes(cpu);
+ if (retval) {
+ kfree(cpuid4_info[cpu]);
+ cpuid4_info[cpu] = NULL;
+ }
+
return retval;
}
@@ -669,7 +681,7 @@ static struct kobj_type ktype_percpu_entry = {
.sysfs_ops = &sysfs_ops,
};
-static void cpuid4_cache_sysfs_exit(unsigned int cpu)
+static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
{
kfree(cache_kobject[cpu]);
kfree(index_kobject[cpu]);
@@ -680,13 +692,14 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu)
static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
{
+ int err;
if (num_cache_leaves == 0)
return -ENOENT;
- detect_cache_attributes(cpu);
- if (cpuid4_info[cpu] == NULL)
- return -ENOENT;
+ err = detect_cache_attributes(cpu);
+ if (err)
+ return err;
/* Allocate all required memory */
cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
@@ -705,13 +718,15 @@ err_out:
return -ENOMEM;
}
+static cpumask_t cache_dev_map = CPU_MASK_NONE;
+
/* Add/Remove cache interface for CPU device */
static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long i, j;
struct _index_kobject *this_object;
- int retval = 0;
+ int retval;
retval = cpuid4_cache_sysfs_init(cpu);
if (unlikely(retval < 0))
@@ -721,6 +736,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
kobject_set_name(cache_kobject[cpu], "%s", "cache");
cache_kobject[cpu]->ktype = &ktype_percpu_entry;
retval = kobject_register(cache_kobject[cpu]);
+ if (retval < 0) {
+ cpuid4_cache_sysfs_exit(cpu);
+ return retval;
+ }
for (i = 0; i < num_cache_leaves; i++) {
this_object = INDEX_KOBJECT_PTR(cpu,i);
@@ -740,6 +759,9 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
break;
}
}
+ if (!retval)
+ cpu_set(cpu, cache_dev_map);
+
return retval;
}
@@ -750,13 +772,14 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
if (cpuid4_info[cpu] == NULL)
return;
- for (i = 0; i < num_cache_leaves; i++) {
- cache_remove_shared_cpu_map(cpu, i);
+ if (!cpu_isset(cpu, cache_dev_map))
+ return;
+ cpu_clear(cpu, cache_dev_map);
+
+ for (i = 0; i < num_cache_leaves; i++)
kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
- }
kobject_unregister(cache_kobject[cpu]);
cpuid4_cache_sysfs_exit(cpu);
- return;
}
static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
@@ -781,7 +804,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
{
- .notifier_call = cacheinfo_cpu_callback,
+ .notifier_call = cacheinfo_cpu_callback,
};
static int __cpuinit cache_sysfs_init(void)
@@ -791,14 +814,15 @@ static int __cpuinit cache_sysfs_init(void)
if (num_cache_leaves == 0)
return 0;
- register_hotcpu_notifier(&cacheinfo_cpu_notifier);
-
for_each_online_cpu(i) {
- struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
+ int err;
+ struct sys_device *sys_dev = get_cpu_sysdev(i);
- cache_add_dev(sys_dev);
+ err = cache_add_dev(sys_dev);
+ if (err)
+ return err;
}
-
+ register_hotcpu_notifier(&cacheinfo_cpu_notifier);
return 0;
}
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 494d320d909..24885be5c48 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -131,17 +131,19 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long)hcpu;
struct sys_device *sys_dev;
- int err;
+ int err = 0;
sys_dev = get_cpu_sysdev(cpu);
switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
mutex_lock(&therm_cpu_lock);
err = thermal_throttle_add_dev(sys_dev);
mutex_unlock(&therm_cpu_lock);
WARN_ON(err);
break;
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
mutex_lock(&therm_cpu_lock);
@@ -149,7 +151,7 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
mutex_unlock(&therm_cpu_lock);
break;
}
- return NOTIFY_OK;
+ return err ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c
index 8ca8f864896..66e6b797b2c 100644
--- a/arch/x86/kernel/mce_64.c
+++ b/arch/x86/kernel/mce_64.c
@@ -802,16 +802,29 @@ static __cpuinit int mce_create_device(unsigned int cpu)
if (!mce_available(&cpu_data[cpu]))
return -EIO;
+ memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
per_cpu(device_mce,cpu).id = cpu;
per_cpu(device_mce,cpu).cls = &mce_sysclass;
err = sysdev_register(&per_cpu(device_mce,cpu));
+ if (err)
+ return err;
+
+ for (i = 0; mce_attributes[i]; i++) {
+ err = sysdev_create_file(&per_cpu(device_mce,cpu),
+ mce_attributes[i]);
+ if (err)
+ goto error;
+ }
- if (!err) {
- for (i = 0; mce_attributes[i]; i++)
- sysdev_create_file(&per_cpu(device_mce,cpu),
- mce_attributes[i]);
+ return 0;
+error:
+ while (i--) {
+ sysdev_remove_file(&per_cpu(device_mce,cpu),
+ mce_attributes[i]);
}
+ sysdev_unregister(&per_cpu(device_mce,cpu));
+
return err;
}
@@ -823,7 +836,6 @@ static void mce_remove_device(unsigned int cpu)
sysdev_remove_file(&per_cpu(device_mce,cpu),
mce_attributes[i]);
sysdev_unregister(&per_cpu(device_mce,cpu));
- memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
@@ -831,18 +843,21 @@ static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ int err = 0;
switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- mce_create_device(cpu);
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ err = mce_create_device(cpu);
break;
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
mce_remove_device(cpu);
break;
}
- return NOTIFY_OK;
+ return err ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block mce_cpu_notifier = {
@@ -857,9 +872,13 @@ static __init int mce_init_device(void)
if (!mce_available(&boot_cpu_data))
return -EIO;
err = sysdev_class_register(&mce_sysclass);
+ if (err)
+ return err;
for_each_online_cpu(i) {
- mce_create_device(i);
+ err = mce_create_device(i);
+ if (err)
+ return err;
}
register_hotcpu_notifier(&mce_cpu_notifier);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index df85c9c1360..e18e516cf54 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -133,37 +133,42 @@ static const struct file_operations msr_fops = {
.open = msr_open,
};
-static int __cpuinit msr_device_create(int i)
+static int __cpuinit msr_device_create(int cpu)
{
- int err = 0;
struct device *dev;
- dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), "msr%d",i);
- if (IS_ERR(dev))
- err = PTR_ERR(dev);
- return err;
+ dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu),
+ "msr%d", cpu);
+ return IS_ERR(dev) ? PTR_ERR(dev) : 0;
+}
+
+static void msr_device_destroy(int cpu)
+{
+ device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
}
static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ int err = 0;
switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- msr_device_create(cpu);
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ err = msr_device_create(cpu);
break;
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+ msr_device_destroy(cpu);
break;
}
- return NOTIFY_OK;
+ return err ? NOTIFY_BAD : NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata msr_class_cpu_notifier =
-{
+static struct notifier_block __cpuinitdata msr_class_cpu_notifier = {
.notifier_call = msr_class_cpu_callback,
};
@@ -196,7 +201,7 @@ static int __init msr_init(void)
out_class:
i = 0;
for_each_online_cpu(i)
- device_destroy(msr_class, MKDEV(MSR_MAJOR, i));
+ msr_device_destroy(i);
class_destroy(msr_class);
out_chrdev:
unregister_chrdev(MSR_MAJOR, "cpu/msr");
@@ -208,7 +213,7 @@ static void __exit msr_exit(void)
{
int cpu = 0;
for_each_online_cpu(cpu)
- device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+ msr_device_destroy(cpu);
class_destroy(msr_class);
unregister_chrdev(MSR_MAJOR, "cpu/msr");
unregister_hotcpu_notifier(&msr_class_cpu_notifier);
diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c
index 573c0a6e0ac..f8fafe527ff 100644
--- a/arch/x86/kernel/suspend_64.c
+++ b/arch/x86/kernel/suspend_64.c
@@ -150,8 +150,22 @@ void fix_processor_context(void)
/* Defined in arch/x86_64/kernel/suspend_asm.S */
extern int restore_image(void);
+/*
+ * Address to jump to in the last phase of restore in order to get to the image
+ * kernel's text (this value is passed in the image header).
+ */
+unsigned long restore_jump_address;
+
+/*
+ * Value of the cr3 register from before the hibernation (this value is passed
+ * in the image header).
+ */
+unsigned long restore_cr3;
+
pgd_t *temp_level4_pgt;
+void *relocated_restore_code;
+
static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
{
long i, j;
@@ -175,7 +189,7 @@ static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long en
if (paddr >= end)
break;
- pe = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | paddr;
+ pe = __PAGE_KERNEL_LARGE_EXEC | paddr;
pe &= __supported_pte_mask;
set_pmd(pmd, __pmd(pe));
}
@@ -183,25 +197,42 @@ static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long en
return 0;
}
+static int res_kernel_text_pud_init(pud_t *pud, unsigned long start)
+{
+ pmd_t *pmd;
+ unsigned long paddr;
+
+ pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+ if (!pmd)
+ return -ENOMEM;
+ set_pud(pud + pud_index(start), __pud(__pa(pmd) | _KERNPG_TABLE));
+ for (paddr = 0; paddr < KERNEL_TEXT_SIZE; pmd++, paddr += PMD_SIZE) {
+ unsigned long pe;
+
+ pe = __PAGE_KERNEL_LARGE_EXEC | _PAGE_GLOBAL | paddr;
+ pe &= __supported_pte_mask;
+ set_pmd(pmd, __pmd(pe));
+ }
+
+ return 0;
+}
+
static int set_up_temporary_mappings(void)
{
unsigned long start, end, next;
+ pud_t *pud;
int error;
temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!temp_level4_pgt)
return -ENOMEM;
- /* It is safe to reuse the original kernel mapping */
- set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
- init_level4_pgt[pgd_index(__START_KERNEL_map)]);
-
/* Set up the direct mapping from scratch */
start = (unsigned long)pfn_to_kaddr(0);
end = (unsigned long)pfn_to_kaddr(end_pfn);
for (; start < end; start = next) {
- pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+ pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
next = start + PGDIR_SIZE;
@@ -212,7 +243,17 @@ static int set_up_temporary_mappings(void)
set_pgd(temp_level4_pgt + pgd_index(start),
mk_kernel_pgd(__pa(pud)));
}
- return 0;
+
+ /* Set up the kernel text mapping from scratch */
+ pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+ if (!pud)
+ return -ENOMEM;
+ error = res_kernel_text_pud_init(pud, __START_KERNEL_map);
+ if (!error)
+ set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
+ __pgd(__pa(pud) | _PAGE_TABLE));
+
+ return error;
}
int swsusp_arch_resume(void)
@@ -222,6 +263,13 @@ int swsusp_arch_resume(void)
/* We have got enough memory and from now on we cannot recover */
if ((error = set_up_temporary_mappings()))
return error;
+
+ relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
+ if (!relocated_restore_code)
+ return -ENOMEM;
+ memcpy(relocated_restore_code, &core_restore_code,
+ &restore_registers - &core_restore_code);
+
restore_image();
return 0;
}
@@ -236,4 +284,43 @@ int pfn_is_nosave(unsigned long pfn)
unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
+
+struct restore_data_record {
+ unsigned long jump_address;
+ unsigned long cr3;
+ unsigned long magic;
+};
+
+#define RESTORE_MAGIC 0x0123456789ABCDEFUL
+
+/**
+ * arch_hibernation_header_save - populate the architecture specific part
+ * of a hibernation image header
+ * @addr: address to save the data at
+ */
+int arch_hibernation_header_save(void *addr, unsigned int max_size)
+{
+ struct restore_data_record *rdr = addr;
+
+ if (max_size < sizeof(struct restore_data_record))
+ return -EOVERFLOW;
+ rdr->jump_address = restore_jump_address;
+ rdr->cr3 = restore_cr3;
+ rdr->magic = RESTORE_MAGIC;
+ return 0;
+}
+
+/**
+ * arch_hibernation_header_restore - read the architecture specific data
+ * from the hibernation image header
+ * @addr: address to read the data from
+ */
+int arch_hibernation_header_restore(void *addr)
+{
+ struct restore_data_record *rdr = addr;
+
+ restore_jump_address = rdr->jump_address;
+ restore_cr3 = rdr->cr3;
+ return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
+}
#endif /* CONFIG_HIBERNATION */
diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S
index 16d183f67bc..48344b666d2 100644
--- a/arch/x86/kernel/suspend_asm_64.S
+++ b/arch/x86/kernel/suspend_asm_64.S
@@ -2,8 +2,8 @@
*
* Distribute under GPLv2.
*
- * swsusp_arch_resume may not use any stack, nor any variable that is
- * not "NoSave" during copying pages:
+ * swsusp_arch_resume must not use any stack or any nonlocal variables while
+ * copying pages:
*
* Its rewriting one kernel image with another. What is stack in "old"
* image could very well be data page in "new" image, and overwriting
@@ -36,6 +36,13 @@ ENTRY(swsusp_arch_suspend)
movq %r15, saved_context_r15(%rip)
pushfq ; popq saved_context_eflags(%rip)
+ /* save the address of restore_registers */
+ movq $restore_registers, %rax
+ movq %rax, restore_jump_address(%rip)
+ /* save cr3 */
+ movq %cr3, %rax
+ movq %rax, restore_cr3(%rip)
+
call swsusp_save
ret
@@ -54,7 +61,17 @@ ENTRY(restore_image)
movq %rcx, %cr3;
movq %rax, %cr4; # turn PGE back on
+ /* prepare to jump to the image kernel */
+ movq restore_jump_address(%rip), %rax
+ movq restore_cr3(%rip), %rbx
+
+ /* prepare to copy image data to their original locations */
movq restore_pblist(%rip), %rdx
+ movq relocated_restore_code(%rip), %rcx
+ jmpq *%rcx
+
+ /* code below has been relocated to a safe page */
+ENTRY(core_restore_code)
loop:
testq %rdx, %rdx
jz done
@@ -62,7 +79,7 @@ loop:
/* get addresses from the pbe and copy the page */
movq pbe_address(%rdx), %rsi
movq pbe_orig_address(%rdx), %rdi
- movq $512, %rcx
+ movq $(PAGE_SIZE >> 3), %rcx
rep
movsq
@@ -70,10 +87,22 @@ loop:
movq pbe_next(%rdx), %rdx
jmp loop
done:
+ /* jump to the restore_registers address from the image header */
+ jmpq *%rax
+ /*
+ * NOTE: This assumes that the boot kernel's text mapping covers the
+ * image kernel's page containing restore_registers and the address of
+ * this page is the same as in the image kernel's text mapping (it
+ * should always be true, because the text mapping is linear, starting
+ * from 0, and is supposed to cover the entire kernel text for every
+ * kernel).
+ *
+ * code below belongs to the image kernel
+ */
+
+ENTRY(restore_registers)
/* go back to the original page tables */
- movq $(init_level4_pgt - __START_KERNEL_map), %rax
- addq phys_base(%rip), %rax
- movq %rax, %cr3
+ movq %rbx, %cr3
/* Flush TLB, including "global" things (vmalloc) */
movq mmu_cr4_features(%rip), %rax
@@ -84,12 +113,9 @@ done:
movq %rcx, %cr3
movq %rax, %cr4; # turn PGE back on
- movl $24, %eax
- movl %eax, %ds
-
movq saved_context_esp(%rip), %rsp
movq saved_context_ebp(%rip), %rbp
- /* Don't restore %rax, it must be 0 anyway */
+ /* restore GPRs (we don't restore %rax, it must be 0 anyway) */
movq saved_context_ebx(%rip), %rbx
movq saved_context_ecx(%rip), %rcx
movq saved_context_edx(%rip), %rdx
@@ -107,4 +133,7 @@ done:
xorq %rax, %rax
+ /* tell the hibernation core that we've just restored the memory */
+ movq %rax, in_suspend(%rip)
+
ret
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 8a67e282cb5..585541ca1a7 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -64,6 +64,16 @@ struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
.sysctl_enabled = 1,
};
+void update_vsyscall_tz(void)
+{
+ unsigned long flags;
+
+ write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ /* sys_tz has changed */
+ vsyscall_gtod_data.sys_tz = sys_tz;
+ write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+}
+
void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
{
unsigned long flags;
@@ -77,7 +87,6 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
vsyscall_gtod_data.clock.shift = clock->shift;
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
- vsyscall_gtod_data.sys_tz = sys_tz;
vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
@@ -163,7 +172,7 @@ time_t __vsyscall(1) vtime(time_t *t)
if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
return time_syscall(t);
- vgettimeofday(&tv, 0);
+ vgettimeofday(&tv, NULL);
result = tv.tv_sec;
if (t)
*t = result;
@@ -257,18 +266,10 @@ out:
return ret;
}
-static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- return -ENOSYS;
-}
-
static ctl_table kernel_table2[] = {
- { .ctl_name = 99, .procname = "vsyscall64",
+ { .procname = "vsyscall64",
.data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
.mode = 0644,
- .strategy = vsyscall_sysctl_nostrat,
.proc_handler = vsyscall_sysctl_change },
{}
};
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 43fafe9e9c0..78cb68f2ebb 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -716,6 +716,11 @@ menu "Power management options"
source kernel/power/Kconfig
+config ARCH_HIBERNATION_HEADER
+ bool
+ depends on HIBERNATION
+ default y
+
source "drivers/acpi/Kconfig"
source "arch/x86/kernel/cpufreq/Kconfig"