aboutsummaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c223
-rw-r--r--arch/arm/mach-pxa/standby.S80
2 files changed, 303 insertions, 0 deletions
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index fcb2359b386..0b2a15ed399 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -19,6 +19,7 @@
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
+#include <linux/io.h>
#include <asm/hardware.h>
#include <asm/arch/pxa3xx-regs.h>
@@ -201,6 +202,225 @@ static struct clk pxa3xx_clks[] = {
PXA3xx_CKEN("MMCCLK", MMC3, 19500000, 0, &pxa3xx_device_mci3.dev),
};
+#ifdef CONFIG_PM
+#define SLEEP_SAVE_SIZE 4
+
+#define ISRAM_START 0x5c000000
+#define ISRAM_SIZE SZ_256K
+
+static void __iomem *sram;
+static unsigned long wakeup_src;
+
+static void pxa3xx_cpu_pm_save(unsigned long *sleep_save)
+{
+ pr_debug("PM: CKENA=%08x CKENB=%08x\n", CKENA, CKENB);
+
+ if (CKENA & (1 << CKEN_USBH)) {
+ printk(KERN_ERR "PM: USB host clock not stopped?\n");
+ CKENA &= ~(1 << CKEN_USBH);
+ }
+// CKENA |= 1 << (CKEN_ISC & 31);
+
+ /*
+ * Low power modes require the HSIO2 clock to be enabled.
+ */
+ CKENB |= 1 << (CKEN_HSIO2 & 31);
+}
+
+static void pxa3xx_cpu_pm_restore(unsigned long *sleep_save)
+{
+ CKENB &= ~(1 << (CKEN_HSIO2 & 31));
+}
+
+/*
+ * Enter a standby mode (S0D1C2 or S0D2C2). Upon wakeup, the dynamic
+ * memory controller has to be reinitialised, so we place some code
+ * in the SRAM to perform this function.
+ *
+ * We disable FIQs across the standby - otherwise, we might receive a
+ * FIQ while the SDRAM is unavailable.
+ */
+static void pxa3xx_cpu_standby(unsigned int pwrmode)
+{
+ extern const char pm_enter_standby_start[], pm_enter_standby_end[];
+ void (*fn)(unsigned int) = (void __force *)(sram + 0x8000);
+
+ memcpy_toio(sram + 0x8000, pm_enter_standby_start,
+ pm_enter_standby_end - pm_enter_standby_start);
+
+ AD2D0SR = ~0;
+ AD2D1SR = ~0;
+ AD2D0ER = wakeup_src;
+ AD2D1ER = 0;
+ ASCR = ASCR;
+ ARSR = ARSR;
+
+ local_fiq_disable();
+ fn(pwrmode);
+ local_fiq_enable();
+
+ AD2D0ER = 0;
+ AD2D1ER = 0;
+
+ printk("PM: AD2D0SR=%08x ASCR=%08x\n", AD2D0SR, ASCR);
+}
+
+static void pxa3xx_cpu_pm_enter(suspend_state_t state)
+{
+ /*
+ * Don't sleep if no wakeup sources are defined
+ */
+ if (wakeup_src == 0)
+ return;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2);
+ break;
+
+ case PM_SUSPEND_MEM:
+ break;
+ }
+}
+
+static int pxa3xx_cpu_pm_valid(suspend_state_t state)
+{
+ return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
+}
+
+static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = {
+ .save_size = SLEEP_SAVE_SIZE,
+ .save = pxa3xx_cpu_pm_save,
+ .restore = pxa3xx_cpu_pm_restore,
+ .valid = pxa3xx_cpu_pm_valid,
+ .enter = pxa3xx_cpu_pm_enter,
+};
+
+static void __init pxa3xx_init_pm(void)
+{
+ sram = ioremap(ISRAM_START, ISRAM_SIZE);
+ if (!sram) {
+ printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n");
+ return;
+ }
+
+ /*
+ * Since we copy wakeup code into the SRAM, we need to ensure
+ * that it is preserved over the low power modes. Note: bit 8
+ * is undocumented in the developer manual, but must be set.
+ */
+ AD1R |= ADXR_L2 | ADXR_R0;
+ AD2R |= ADXR_L2 | ADXR_R0;
+ AD3R |= ADXR_L2 | ADXR_R0;
+
+ /*
+ * Clear the resume enable registers.
+ */
+ AD1D0ER = 0;
+ AD2D0ER = 0;
+ AD2D1ER = 0;
+ AD3ER = 0;
+
+ pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
+}
+
+static int pxa3xx_set_wake(unsigned int irq, unsigned int on)
+{
+ unsigned long flags, mask = 0;
+
+ switch (irq) {
+ case IRQ_SSP3:
+ mask = ADXER_MFP_WSSP3;
+ break;
+ case IRQ_MSL:
+ mask = ADXER_WMSL0;
+ break;
+ case IRQ_USBH2:
+ case IRQ_USBH1:
+ mask = ADXER_WUSBH;
+ break;
+ case IRQ_KEYPAD:
+ mask = ADXER_WKP;
+ break;
+ case IRQ_AC97:
+ mask = ADXER_MFP_WAC97;
+ break;
+ case IRQ_USIM:
+ mask = ADXER_WUSIM0;
+ break;
+ case IRQ_SSP2:
+ mask = ADXER_MFP_WSSP2;
+ break;
+ case IRQ_I2C:
+ mask = ADXER_MFP_WI2C;
+ break;
+ case IRQ_STUART:
+ mask = ADXER_MFP_WUART3;
+ break;
+ case IRQ_BTUART:
+ mask = ADXER_MFP_WUART2;
+ break;
+ case IRQ_FFUART:
+ mask = ADXER_MFP_WUART1;
+ break;
+ case IRQ_MMC:
+ mask = ADXER_MFP_WMMC1;
+ break;
+ case IRQ_SSP:
+ mask = ADXER_MFP_WSSP1;
+ break;
+ case IRQ_RTCAlrm:
+ mask = ADXER_WRTC;
+ break;
+ case IRQ_SSP4:
+ mask = ADXER_MFP_WSSP4;
+ break;
+ case IRQ_TSI:
+ mask = ADXER_WTSI;
+ break;
+ case IRQ_USIM2:
+ mask = ADXER_WUSIM1;
+ break;
+ case IRQ_MMC2:
+ mask = ADXER_MFP_WMMC2;
+ break;
+ case IRQ_NAND:
+ mask = ADXER_MFP_WFLASH;
+ break;
+ case IRQ_USB2:
+ mask = ADXER_WUSB2;
+ break;
+ case IRQ_WAKEUP0:
+ mask = ADXER_WEXTWAKE0;
+ break;
+ case IRQ_WAKEUP1:
+ mask = ADXER_WEXTWAKE1;
+ break;
+ case IRQ_MMC3:
+ mask = ADXER_MFP_GEN12;
+ break;
+ }
+
+ local_irq_save(flags);
+ if (on)
+ wakeup_src |= mask;
+ else
+ wakeup_src &= ~mask;
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static void pxa3xx_init_irq_pm(void)
+{
+ pxa_init_irq_set_wake(pxa3xx_set_wake);
+}
+
+#else
+static inline void pxa3xx_init_pm(void) {}
+static inline void pxa3xx_init_irq_pm(void) {}
+#endif
+
void __init pxa3xx_init_irq(void)
{
/* enable CP6 access */
@@ -212,6 +432,7 @@ void __init pxa3xx_init_irq(void)
pxa_init_irq_low();
pxa_init_irq_high();
pxa_init_irq_gpio(128);
+ pxa3xx_init_irq_pm();
}
/*
@@ -241,6 +462,8 @@ static int __init pxa3xx_init(void)
if ((ret = pxa_init_dma(32)))
return ret;
+ pxa3xx_init_pm();
+
return platform_add_devices(devices, ARRAY_SIZE(devices));
}
return 0;
diff --git a/arch/arm/mach-pxa/standby.S b/arch/arm/mach-pxa/standby.S
index 434a6ab0eca..167412e6bec 100644
--- a/arch/arm/mach-pxa/standby.S
+++ b/arch/arm/mach-pxa/standby.S
@@ -32,3 +32,83 @@ ENTRY(pxa_cpu_standby)
mov pc, lr
#endif
+
+#ifdef CONFIG_PXA3xx
+
+#define MDCNFG 0x0000
+#define MDCNFG_DMCEN (1 << 30)
+#define DDR_HCAL 0x0060
+#define DDR_HCAL_HCRNG 0x1f
+#define DDR_HCAL_HCPROG (1 << 28)
+#define DDR_HCAL_HCEN (1 << 31)
+#define DMCIER 0x0070
+#define DMCIER_EDLP (1 << 29)
+#define DMCISR 0x0078
+#define RCOMP 0x0100
+#define RCOMP_SWEVAL (1 << 31)
+
+ENTRY(pm_enter_standby_start)
+ mov r1, #0xf6000000 @ DMEMC_REG_BASE (MDCNFG)
+ add r1, r1, #0x00100000
+
+ /*
+ * Preload the TLB entry for accessing the dynamic memory
+ * controller registers. Note that page table lookups will
+ * fail until the dynamic memory controller has been
+ * reinitialised - and that includes MMU page table walks.
+ * This also means that only the dynamic memory controller
+ * can be reliably accessed in the code following standby.
+ */
+ ldr r2, [r1] @ Dummy read MDCNFG
+
+ mcr p14, 0, r0, c7, c0, 0
+ .rept 8
+ nop
+ .endr
+
+ ldr r0, [r1, #DDR_HCAL] @ Clear (and wait for) HCEN
+ bic r0, r0, #DDR_HCAL_HCEN
+ str r0, [r1, #DDR_HCAL]
+1: ldr r0, [r1, #DDR_HCAL]
+ tst r0, #DDR_HCAL_HCEN
+ bne 1b
+
+ ldr r0, [r1, #RCOMP] @ Initiate RCOMP
+ orr r0, r0, #RCOMP_SWEVAL
+ str r0, [r1, #RCOMP]
+
+ mov r0, #~0 @ Clear interrupts
+ str r0, [r1, #DMCISR]
+
+ ldr r0, [r1, #DMCIER] @ set DMIER[EDLP]
+ orr r0, r0, #DMCIER_EDLP
+ str r0, [r1, #DMCIER]
+
+ ldr r0, [r1, #DDR_HCAL] @ clear HCRNG, set HCPROG, HCEN
+ bic r0, r0, #DDR_HCAL_HCRNG
+ orr r0, r0, #DDR_HCAL_HCEN | DDR_HCAL_HCPROG
+ str r0, [r1, #DDR_HCAL]
+
+1: ldr r0, [r1, #DMCISR]
+ tst r0, #DMCIER_EDLP
+ beq 1b
+
+ ldr r0, [r1, #MDCNFG] @ set MDCNFG[DMCEN]
+ orr r0, r0, #MDCNFG_DMCEN
+ str r0, [r1, #MDCNFG]
+1: ldr r0, [r1, #MDCNFG]
+ tst r0, #MDCNFG_DMCEN
+ beq 1b
+
+ ldr r0, [r1, #DDR_HCAL] @ set DDR_HCAL[HCRNG]
+ orr r0, r0, #2 @ HCRNG
+ str r0, [r1, #DDR_HCAL]
+
+ ldr r0, [r1, #DMCIER] @ Clear the interrupt
+ bic r0, r0, #0x20000000
+ str r0, [r1, #DMCIER]
+
+ mov pc, lr
+ENTRY(pm_enter_standby_end)
+
+#endif