aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/plat-omap
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/plat-omap')
-rw-r--r--arch/arm/plat-omap/Kconfig2
-rw-r--r--arch/arm/plat-omap/clock.c4
-rw-r--r--arch/arm/plat-omap/cpu-omap.c18
-rw-r--r--arch/arm/plat-omap/devices.c2
-rw-r--r--arch/arm/plat-omap/dma.c84
-rw-r--r--arch/arm/plat-omap/dmtimer.c428
-rw-r--r--arch/arm/plat-omap/gpio.c103
-rw-r--r--arch/arm/plat-omap/sram.c9
-rw-r--r--arch/arm/plat-omap/timer32k.c121
9 files changed, 541 insertions, 230 deletions
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index ec49495e651..ec752e16d61 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -91,7 +91,7 @@ config OMAP_32K_TIMER_HZ
config OMAP_DM_TIMER
bool "Use dual-mode timer"
- depends on ARCH_OMAP16XX
+ depends on ARCH_OMAP16XX || ARCH_OMAP24XX
help
Select this option if you want to use OMAP Dual-Mode timers.
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 32ec04c58bc..dcd9d81201f 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -28,9 +28,9 @@
#include <asm/arch/clock.h>
-LIST_HEAD(clocks);
+static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
-DEFINE_SPINLOCK(clockfw_lock);
+static DEFINE_SPINLOCK(clockfw_lock);
static struct clk_functions *arch_clock;
diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c
index 98edc9fdd6d..a0c71dca237 100644
--- a/arch/arm/plat-omap/cpu-omap.c
+++ b/arch/arm/plat-omap/cpu-omap.c
@@ -25,6 +25,14 @@
#include <asm/io.h>
#include <asm/system.h>
+#define VERY_HI_RATE 900000000
+
+#ifdef CONFIG_ARCH_OMAP1
+#define MPU_CLK "mpu"
+#else
+#define MPU_CLK "virt_prcm_set"
+#endif
+
/* TODO: Add support for SDRAM timing changes */
int omap_verify_speed(struct cpufreq_policy *policy)
@@ -36,7 +44,7 @@ int omap_verify_speed(struct cpufreq_policy *policy)
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
- mpu_clk = clk_get(NULL, "mpu");
+ mpu_clk = clk_get(NULL, MPU_CLK);
if (IS_ERR(mpu_clk))
return PTR_ERR(mpu_clk);
policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000;
@@ -56,7 +64,7 @@ unsigned int omap_getspeed(unsigned int cpu)
if (cpu)
return 0;
- mpu_clk = clk_get(NULL, "mpu");
+ mpu_clk = clk_get(NULL, MPU_CLK);
if (IS_ERR(mpu_clk))
return 0;
rate = clk_get_rate(mpu_clk) / 1000;
@@ -73,7 +81,7 @@ static int omap_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
int ret = 0;
- mpu_clk = clk_get(NULL, "mpu");
+ mpu_clk = clk_get(NULL, MPU_CLK);
if (IS_ERR(mpu_clk))
return PTR_ERR(mpu_clk);
@@ -93,7 +101,7 @@ static int __init omap_cpu_init(struct cpufreq_policy *policy)
{
struct clk * mpu_clk;
- mpu_clk = clk_get(NULL, "mpu");
+ mpu_clk = clk_get(NULL, MPU_CLK);
if (IS_ERR(mpu_clk))
return PTR_ERR(mpu_clk);
@@ -102,7 +110,7 @@ static int __init omap_cpu_init(struct cpufreq_policy *policy)
policy->cur = policy->min = policy->max = omap_getspeed(0);
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000;
- policy->cpuinfo.max_freq = clk_round_rate(mpu_clk, 216000000) / 1000;
+ policy->cpuinfo.max_freq = clk_round_rate(mpu_clk, VERY_HI_RATE) / 1000;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
clk_put(mpu_clk);
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 5d5d6eb222d..ee15b408912 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -105,7 +105,7 @@ static void omap_init_kp(void)
omap_cfg_reg(E20_1610_KBR3);
omap_cfg_reg(E19_1610_KBR4);
omap_cfg_reg(N19_1610_KBR5);
- } else if (machine_is_omap_perseus2()) {
+ } else if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
omap_cfg_reg(E2_730_KBR0);
omap_cfg_reg(J7_730_KBR1);
omap_cfg_reg(E1_730_KBR2);
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 5dac4230360..c5d0214ef19 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -43,6 +43,7 @@
#define OMAP_DMA_ACTIVE 0x01
#define OMAP_DMA_CCR_EN (1 << 7)
+#define OMAP2_DMA_CSR_CLEAR_MASK 0xffe
#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
@@ -166,18 +167,24 @@ void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
if (cpu_is_omap24xx() && dma_trigger) {
u32 val = OMAP_DMA_CCR_REG(lch);
+ val &= ~(3 << 19);
if (dma_trigger > 63)
val |= 1 << 20;
if (dma_trigger > 31)
val |= 1 << 19;
+ val &= ~(0x1f);
val |= (dma_trigger & 0x1f);
if (sync_mode & OMAP_DMA_SYNC_FRAME)
val |= 1 << 5;
+ else
+ val &= ~(1 << 5);
if (sync_mode & OMAP_DMA_SYNC_BLOCK)
val |= 1 << 18;
+ else
+ val &= ~(1 << 18);
if (src_or_dst_synch)
val |= 1 << 24; /* source synch */
@@ -286,22 +293,39 @@ void omap_set_dma_src_data_pack(int lch, int enable)
void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
{
+ unsigned int burst = 0;
OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 7);
switch (burst_mode) {
case OMAP_DMA_DATA_BURST_DIS:
break;
case OMAP_DMA_DATA_BURST_4:
- OMAP_DMA_CSDP_REG(lch) |= (0x02 << 7);
+ if (cpu_is_omap24xx())
+ burst = 0x1;
+ else
+ burst = 0x2;
break;
case OMAP_DMA_DATA_BURST_8:
- /* not supported by current hardware
+ if (cpu_is_omap24xx()) {
+ burst = 0x2;
+ break;
+ }
+ /* not supported by current hardware on OMAP1
* w |= (0x03 << 7);
* fall through
*/
+ case OMAP_DMA_DATA_BURST_16:
+ if (cpu_is_omap24xx()) {
+ burst = 0x3;
+ break;
+ }
+ /* OMAP1 don't support burst 16
+ * fall through
+ */
default:
BUG();
}
+ OMAP_DMA_CSDP_REG(lch) |= (burst << 7);
}
/* Note that dest_port is only for OMAP1 */
@@ -348,30 +372,49 @@ void omap_set_dma_dest_data_pack(int lch, int enable)
void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
{
+ unsigned int burst = 0;
OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 14);
switch (burst_mode) {
case OMAP_DMA_DATA_BURST_DIS:
break;
case OMAP_DMA_DATA_BURST_4:
- OMAP_DMA_CSDP_REG(lch) |= (0x02 << 14);
+ if (cpu_is_omap24xx())
+ burst = 0x1;
+ else
+ burst = 0x2;
break;
case OMAP_DMA_DATA_BURST_8:
- OMAP_DMA_CSDP_REG(lch) |= (0x03 << 14);
+ if (cpu_is_omap24xx())
+ burst = 0x2;
+ else
+ burst = 0x3;
break;
+ case OMAP_DMA_DATA_BURST_16:
+ if (cpu_is_omap24xx()) {
+ burst = 0x3;
+ break;
+ }
+ /* OMAP1 don't support burst 16
+ * fall through
+ */
default:
printk(KERN_ERR "Invalid DMA burst mode\n");
BUG();
return;
}
+ OMAP_DMA_CSDP_REG(lch) |= (burst << 14);
}
static inline void omap_enable_channel_irq(int lch)
{
u32 status;
- /* Read CSR to make sure it's cleared. */
- status = OMAP_DMA_CSR_REG(lch);
+ /* Clear CSR */
+ if (cpu_class_is_omap1())
+ status = OMAP_DMA_CSR_REG(lch);
+ else if (cpu_is_omap24xx())
+ OMAP_DMA_CSR_REG(lch) = OMAP2_DMA_CSR_CLEAR_MASK;
/* Enable some nice interrupts. */
OMAP_DMA_CICR_REG(lch) = dma_chan[lch].enabled_irqs;
@@ -470,11 +513,13 @@ int omap_request_dma(int dev_id, const char *dev_name,
chan->dev_name = dev_name;
chan->callback = callback;
chan->data = data;
- chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ |
- OMAP_DMA_BLOCK_IRQ;
+ chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
- if (cpu_is_omap24xx())
- chan->enabled_irqs |= OMAP2_DMA_TRANS_ERR_IRQ;
+ if (cpu_class_is_omap1())
+ chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
+ else if (cpu_is_omap24xx())
+ chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
+ OMAP2_DMA_TRANS_ERR_IRQ;
if (cpu_is_omap16xx()) {
/* If the sync device is set, configure it dynamically. */
@@ -494,7 +539,7 @@ int omap_request_dma(int dev_id, const char *dev_name,
omap_enable_channel_irq(free_ch);
/* Clear the CSR register and IRQ status register */
- OMAP_DMA_CSR_REG(free_ch) = 0x0;
+ OMAP_DMA_CSR_REG(free_ch) = OMAP2_DMA_CSR_CLEAR_MASK;
omap_writel(~0x0, OMAP_DMA4_IRQSTATUS_L0);
}
@@ -534,7 +579,7 @@ void omap_free_dma(int lch)
omap_writel(val, OMAP_DMA4_IRQENABLE_L0);
/* Clear the CSR register and IRQ status register */
- OMAP_DMA_CSR_REG(lch) = 0x0;
+ OMAP_DMA_CSR_REG(lch) = OMAP2_DMA_CSR_CLEAR_MASK;
val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
val |= 1 << lch;
@@ -798,7 +843,7 @@ static int omap1_dma_handle_ch(int ch)
"%d (CSR %04x)\n", ch, csr);
return 0;
}
- if (unlikely(csr & OMAP_DMA_TOUT_IRQ))
+ if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
printk(KERN_WARNING "DMA timeout with device %d\n",
dma_chan[ch].dev_id);
if (unlikely(csr & OMAP_DMA_DROP_IRQ))
@@ -846,20 +891,21 @@ static int omap2_dma_handle_ch(int ch)
return 0;
if (unlikely(dma_chan[ch].dev_id == -1))
return 0;
- /* REVISIT: According to 24xx TRM, there's no TOUT_IE */
- if (unlikely(status & OMAP_DMA_TOUT_IRQ))
- printk(KERN_INFO "DMA timeout with device %d\n",
- dma_chan[ch].dev_id);
if (unlikely(status & OMAP_DMA_DROP_IRQ))
printk(KERN_INFO
"DMA synchronization event drop occurred with device "
"%d\n", dma_chan[ch].dev_id);
-
if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ))
printk(KERN_INFO "DMA transaction error with device %d\n",
dma_chan[ch].dev_id);
+ if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
+ printk(KERN_INFO "DMA secure error with device %d\n",
+ dma_chan[ch].dev_id);
+ if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
+ printk(KERN_INFO "DMA misaligned error with device %d\n",
+ dma_chan[ch].dev_id);
- OMAP_DMA_CSR_REG(ch) = 0x20;
+ OMAP_DMA_CSR_REG(ch) = OMAP2_DMA_CSR_CLEAR_MASK;
val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
/* ch in this function is from 0-31 while in register it is 1-32 */
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index eba3cb52ad8..804a5353437 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -4,7 +4,8 @@
* OMAP Dual-Mode Timers
*
* Copyright (C) 2005 Nokia Corporation
- * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com>
+ * OMAP2 support by Juha Yrjola
+ * API improvements and OMAP2 clock framework support by Timo Teras
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -26,15 +27,17 @@
*/
#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <asm/hardware.h>
#include <asm/arch/dmtimer.h>
#include <asm/io.h>
#include <asm/arch/irqs.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-
-#define OMAP_TIMER_COUNT 8
+/* register offsets */
#define OMAP_TIMER_ID_REG 0x00
#define OMAP_TIMER_OCP_CFG_REG 0x10
#define OMAP_TIMER_SYS_STAT_REG 0x14
@@ -50,52 +53,196 @@
#define OMAP_TIMER_CAPTURE_REG 0x3c
#define OMAP_TIMER_IF_CTRL_REG 0x40
+/* timer control reg bits */
+#define OMAP_TIMER_CTRL_GPOCFG (1 << 14)
+#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13)
+#define OMAP_TIMER_CTRL_PT (1 << 12)
+#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8)
+#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8)
+#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8)
+#define OMAP_TIMER_CTRL_SCPWM (1 << 7)
+#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */
+#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */
+#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* how much to shift the prescaler value */
+#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */
+#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */
+
+struct omap_dm_timer {
+ unsigned long phys_base;
+ int irq;
+#ifdef CONFIG_ARCH_OMAP2
+ struct clk *iclk, *fclk;
+#endif
+ void __iomem *io_base;
+ unsigned reserved:1;
+};
-static struct dmtimer_info_struct {
- struct list_head unused_timers;
- struct list_head reserved_timers;
-} dm_timer_info;
+#ifdef CONFIG_ARCH_OMAP1
static struct omap_dm_timer dm_timers[] = {
- { .base=0xfffb1400, .irq=INT_1610_GPTIMER1 },
- { .base=0xfffb1c00, .irq=INT_1610_GPTIMER2 },
- { .base=0xfffb2400, .irq=INT_1610_GPTIMER3 },
- { .base=0xfffb2c00, .irq=INT_1610_GPTIMER4 },
- { .base=0xfffb3400, .irq=INT_1610_GPTIMER5 },
- { .base=0xfffb3c00, .irq=INT_1610_GPTIMER6 },
- { .base=0xfffb4400, .irq=INT_1610_GPTIMER7 },
- { .base=0xfffb4c00, .irq=INT_1610_GPTIMER8 },
- { .base=0x0 },
+ { .phys_base = 0xfffb1400, .irq = INT_1610_GPTIMER1 },
+ { .phys_base = 0xfffb1c00, .irq = INT_1610_GPTIMER2 },
+ { .phys_base = 0xfffb2400, .irq = INT_1610_GPTIMER3 },
+ { .phys_base = 0xfffb2c00, .irq = INT_1610_GPTIMER4 },
+ { .phys_base = 0xfffb3400, .irq = INT_1610_GPTIMER5 },
+ { .phys_base = 0xfffb3c00, .irq = INT_1610_GPTIMER6 },
+ { .phys_base = 0xfffb4400, .irq = INT_1610_GPTIMER7 },
+ { .phys_base = 0xfffb4c00, .irq = INT_1610_GPTIMER8 },
};
+#elif defined(CONFIG_ARCH_OMAP2)
+
+static struct omap_dm_timer dm_timers[] = {
+ { .phys_base = 0x48028000, .irq = INT_24XX_GPTIMER1 },
+ { .phys_base = 0x4802a000, .irq = INT_24XX_GPTIMER2 },
+ { .phys_base = 0x48078000, .irq = INT_24XX_GPTIMER3 },
+ { .phys_base = 0x4807a000, .irq = INT_24XX_GPTIMER4 },
+ { .phys_base = 0x4807c000, .irq = INT_24XX_GPTIMER5 },
+ { .phys_base = 0x4807e000, .irq = INT_24XX_GPTIMER6 },
+ { .phys_base = 0x48080000, .irq = INT_24XX_GPTIMER7 },
+ { .phys_base = 0x48082000, .irq = INT_24XX_GPTIMER8 },
+ { .phys_base = 0x48084000, .irq = INT_24XX_GPTIMER9 },
+ { .phys_base = 0x48086000, .irq = INT_24XX_GPTIMER10 },
+ { .phys_base = 0x48088000, .irq = INT_24XX_GPTIMER11 },
+ { .phys_base = 0x4808a000, .irq = INT_24XX_GPTIMER12 },
+};
+
+static const char *dm_source_names[] = {
+ "sys_ck",
+ "func_32k_ck",
+ "alt_ck"
+};
+static struct clk *dm_source_clocks[3];
+
+#else
+
+#error OMAP architecture not supported!
+
+#endif
+
+static const int dm_timer_count = ARRAY_SIZE(dm_timers);
static spinlock_t dm_timer_lock;
+static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, int reg)
+{
+ return readl(timer->io_base + reg);
+}
-inline void omap_dm_timer_write_reg(struct omap_dm_timer *timer, int reg, u32 value)
+static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, int reg, u32 value)
{
- omap_writel(value, timer->base + reg);
+ writel(value, timer->io_base + reg);
while (omap_dm_timer_read_reg(timer, OMAP_TIMER_WRITE_PEND_REG))
;
}
-u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, int reg)
+static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
{
- return omap_readl(timer->base + reg);
+ int c;
+
+ c = 0;
+ while (!(omap_dm_timer_read_reg(timer, OMAP_TIMER_SYS_STAT_REG) & 1)) {
+ c++;
+ if (c > 100000) {
+ printk(KERN_ERR "Timer failed to reset\n");
+ return;
+ }
+ }
}
-int omap_dm_timers_active(void)
+static void omap_dm_timer_reset(struct omap_dm_timer *timer)
+{
+ u32 l;
+
+ if (timer != &dm_timers[0]) {
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
+ omap_dm_timer_wait_for_reset(timer);
+ }
+ omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_SYS_CLK);
+
+ /* Set to smart-idle mode */
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_OCP_CFG_REG);
+ l |= 0x02 << 3;
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG, l);
+}
+
+static void omap_dm_timer_prepare(struct omap_dm_timer *timer)
+{
+#ifdef CONFIG_ARCH_OMAP2
+ clk_enable(timer->iclk);
+ clk_enable(timer->fclk);
+#endif
+ omap_dm_timer_reset(timer);
+}
+
+struct omap_dm_timer *omap_dm_timer_request(void)
+{
+ struct omap_dm_timer *timer = NULL;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dm_timer_lock, flags);
+ for (i = 0; i < dm_timer_count; i++) {
+ if (dm_timers[i].reserved)
+ continue;
+
+ timer = &dm_timers[i];
+ timer->reserved = 1;
+ break;
+ }
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
+
+ if (timer != NULL)
+ omap_dm_timer_prepare(timer);
+
+ return timer;
+}
+
+struct omap_dm_timer *omap_dm_timer_request_specific(int id)
{
struct omap_dm_timer *timer;
+ unsigned long flags;
- for (timer = &dm_timers[0]; timer->base; ++timer)
- if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
- OMAP_TIMER_CTRL_ST)
- return 1;
+ spin_lock_irqsave(&dm_timer_lock, flags);
+ if (id <= 0 || id > dm_timer_count || dm_timers[id-1].reserved) {
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
+ printk("BUG: warning at %s:%d/%s(): unable to get timer %d\n",
+ __FILE__, __LINE__, __FUNCTION__, id);
+ dump_stack();
+ return NULL;
+ }
- return 0;
+ timer = &dm_timers[id-1];
+ timer->reserved = 1;
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
+
+ omap_dm_timer_prepare(timer);
+
+ return timer;
}
+void omap_dm_timer_free(struct omap_dm_timer *timer)
+{
+ omap_dm_timer_reset(timer);
+#ifdef CONFIG_ARCH_OMAP2
+ clk_disable(timer->iclk);
+ clk_disable(timer->fclk);
+#endif
+ WARN_ON(!timer->reserved);
+ timer->reserved = 0;
+}
+
+int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
+{
+ return timer->irq;
+}
+
+#if defined(CONFIG_ARCH_OMAP1)
+
+struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
+{
+ BUG();
+}
/**
* omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
@@ -103,184 +250,229 @@ int omap_dm_timers_active(void)
*/
__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
{
- int n;
+ int i;
/* If ARMXOR cannot be idled this function call is unnecessary */
if (!(inputmask & (1 << 1)))
return inputmask;
/* If any active timer is using ARMXOR return modified mask */
- for (n = 0; dm_timers[n].base; ++n)
- if (omap_dm_timer_read_reg(&dm_timers[n], OMAP_TIMER_CTRL_REG)&
- OMAP_TIMER_CTRL_ST) {
- if (((omap_readl(MOD_CONF_CTRL_1)>>(n*2)) & 0x03) == 0)
+ for (i = 0; i < dm_timer_count; i++) {
+ u32 l;
+
+ l = omap_dm_timer_read_reg(&dm_timers[n], OMAP_TIMER_CTRL_REG);
+ if (l & OMAP_TIMER_CTRL_ST) {
+ if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
inputmask &= ~(1 << 1);
else
inputmask &= ~(1 << 2);
}
+ }
return inputmask;
}
+#elif defined(CONFIG_ARCH_OMAP2)
-void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
+struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
{
- int n = (timer - dm_timers) << 1;
- u32 l;
+ return timer->fclk;
+}
- l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
- l |= source << n;
- omap_writel(l, MOD_CONF_CTRL_1);
+__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
+{
+ BUG();
}
+#endif
-static void omap_dm_timer_reset(struct omap_dm_timer *timer)
+void omap_dm_timer_trigger(struct omap_dm_timer *timer)
{
- /* Reset and set posted mode */
- omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG, 0x02);
-
- omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_ARMXOR);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
}
+void omap_dm_timer_start(struct omap_dm_timer *timer)
+{
+ u32 l;
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ if (!(l & OMAP_TIMER_CTRL_ST)) {
+ l |= OMAP_TIMER_CTRL_ST;
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ }
+}
-struct omap_dm_timer * omap_dm_timer_request(void)
+void omap_dm_timer_stop(struct omap_dm_timer *timer)
{
- struct omap_dm_timer *timer = NULL;
- unsigned long flags;
+ u32 l;
- spin_lock_irqsave(&dm_timer_lock, flags);
- if (!list_empty(&dm_timer_info.unused_timers)) {
- timer = (struct omap_dm_timer *)
- dm_timer_info.unused_timers.next;
- list_move_tail((struct list_head *)timer,
- &dm_timer_info.reserved_timers);
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ if (l & OMAP_TIMER_CTRL_ST) {
+ l &= ~0x1;
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
}
- spin_unlock_irqrestore(&dm_timer_lock, flags);
-
- return timer;
}
+#ifdef CONFIG_ARCH_OMAP1
-void omap_dm_timer_free(struct omap_dm_timer *timer)
+void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
{
- unsigned long flags;
-
- omap_dm_timer_reset(timer);
+ int n = (timer - dm_timers) << 1;
+ u32 l;
- spin_lock_irqsave(&dm_timer_lock, flags);
- list_move_tail((struct list_head *)timer, &dm_timer_info.unused_timers);
- spin_unlock_irqrestore(&dm_timer_lock, flags);
+ l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
+ l |= source << n;
+ omap_writel(l, MOD_CONF_CTRL_1);
}
-void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
- unsigned int value)
-{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG, value);
-}
+#else
-unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
+void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
{
- return omap_dm_timer_read_reg(timer, OMAP_TIMER_STAT_REG);
-}
+ if (source < 0 || source >= 3)
+ return;
-void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
-{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value);
+ clk_disable(timer->fclk);
+ clk_set_parent(timer->fclk, dm_source_clocks[source]);
+ clk_enable(timer->fclk);
+
+ /* When the functional clock disappears, too quick writes seem to
+ * cause an abort. */
+ __delay(15000);
}
-void omap_dm_timer_enable_autoreload(struct omap_dm_timer *timer)
+#endif
+
+void omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
+ unsigned int load)
{
u32 l;
+
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- l |= OMAP_TIMER_CTRL_AR;
+ if (autoreload)
+ l |= OMAP_TIMER_CTRL_AR;
+ else
+ l &= ~OMAP_TIMER_CTRL_AR;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
}
-void omap_dm_timer_trigger(struct omap_dm_timer *timer)
-{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 1);
-}
-
-void omap_dm_timer_set_trigger(struct omap_dm_timer *timer, unsigned int value)
+void omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
+ unsigned int match)
{
u32 l;
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- l |= value & 0x3;
+ if (enable)
+ l |= OMAP_TIMER_CTRL_CE;
+ else
+ l &= ~OMAP_TIMER_CTRL_CE;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
}
-void omap_dm_timer_start(struct omap_dm_timer *timer)
+
+void omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
+ int toggle, int trigger)
{
u32 l;
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- l |= OMAP_TIMER_CTRL_ST;
+ l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
+ OMAP_TIMER_CTRL_PT | (0x03 << 10));
+ if (def_on)
+ l |= OMAP_TIMER_CTRL_SCPWM;
+ if (toggle)
+ l |= OMAP_TIMER_CTRL_PT;
+ l |= trigger << 10;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
}
-void omap_dm_timer_stop(struct omap_dm_timer *timer)
+void omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler)
{
u32 l;
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- l &= ~0x1;
+ l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
+ if (prescaler >= 0x00 && prescaler <= 0x07) {
+ l |= OMAP_TIMER_CTRL_PRE;
+ l |= prescaler << 2;
+ }
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
}
-unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
+void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
+ unsigned int value)
{
- return omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG, value);
}
-void omap_dm_timer_reset_counter(struct omap_dm_timer *timer)
+unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, 0);
+ return omap_dm_timer_read_reg(timer, OMAP_TIMER_STAT_REG);
}
-void omap_dm_timer_set_load(struct omap_dm_timer *timer, unsigned int load)
+void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value);
}
-void omap_dm_timer_set_match(struct omap_dm_timer *timer, unsigned int match)
+unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
+ return omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
}
-void omap_dm_timer_enable_compare(struct omap_dm_timer *timer)
+void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
{
- u32 l;
-
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- l |= OMAP_TIMER_CTRL_CE;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ return omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
}
+int omap_dm_timers_active(void)
+{
+ int i;
+
+ for (i = 0; i < dm_timer_count; i++) {
+ struct omap_dm_timer *timer;
+
+ timer = &dm_timers[i];
+ if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
+ OMAP_TIMER_CTRL_ST)
+ return 1;
+ }
+ return 0;
+}
-static inline void __dm_timer_init(void)
+int omap_dm_timer_init(void)
{
struct omap_dm_timer *timer;
+ int i;
+
+ if (!(cpu_is_omap16xx() || cpu_is_omap24xx()))
+ return -ENODEV;
spin_lock_init(&dm_timer_lock);
- INIT_LIST_HEAD(&dm_timer_info.unused_timers);
- INIT_LIST_HEAD(&dm_timer_info.reserved_timers);
-
- timer = &dm_timers[0];
- while (timer->base) {
- list_add_tail((struct list_head *)timer, &dm_timer_info.unused_timers);
- omap_dm_timer_reset(timer);
- timer++;
+#ifdef CONFIG_ARCH_OMAP2
+ for (i = 0; i < ARRAY_SIZE(dm_source_names); i++) {
+ dm_source_clocks[i] = clk_get(NULL, dm_source_names[i]);
+ BUG_ON(dm_source_clocks[i] == NULL);
+ }
+#endif
+
+ for (i = 0; i < dm_timer_count; i++) {
+#ifdef CONFIG_ARCH_OMAP2
+ char clk_name[16];
+#endif
+
+ timer = &dm_timers[i];
+ timer->io_base = (void __iomem *) io_p2v(timer->phys_base);
+#ifdef CONFIG_ARCH_OMAP2
+ sprintf(clk_name, "gpt%d_ick", i + 1);
+ timer->iclk = clk_get(NULL, clk_name);
+ sprintf(clk_name, "gpt%d_fck", i + 1);
+ timer->fclk = clk_get(NULL, clk_name);
+#endif
}
-}
-static int __init omap_dm_timer_init(void)
-{
- if (cpu_is_omap16xx())
- __dm_timer_init();
return 0;
}
-
-arch_initcall(omap_dm_timer_init);
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index d3c8ea7eecf..e75a2ca70ba 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -537,6 +537,49 @@ static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
_clear_gpio_irqbank(bank, 1 << get_gpio_index(gpio));
}
+static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
+{
+ void __iomem *reg = bank->base;
+ int inv = 0;
+ u32 l;
+ u32 mask;
+
+ switch (bank->method) {
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_GPIO_MASKIT;
+ mask = 0xffff;
+ inv = 1;
+ break;
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_INT_MASK;
+ mask = 0xffff;
+ inv = 1;
+ break;
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_IRQENABLE1;
+ mask = 0xffff;
+ break;
+ case METHOD_GPIO_730:
+ reg += OMAP730_GPIO_INT_MASK;
+ mask = 0xffffffff;
+ inv = 1;
+ break;
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_IRQENABLE1;
+ mask = 0xffffffff;
+ break;
+ default:
+ BUG();
+ return 0;
+ }
+
+ l = __raw_readl(reg);
+ if (inv)
+ l = ~l;
+ l &= mask;
+ return l;
+}
+
static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enable)
{
void __iomem *reg = bank->base;
@@ -736,6 +779,8 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
u32 isr;
unsigned int gpio_irq;
struct gpio_bank *bank;
+ u32 retrigger = 0;
+ int unmasked = 0;
desc->chip->ack(irq);
@@ -760,18 +805,22 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
#endif
while(1) {
u32 isr_saved, level_mask = 0;
+ u32 enabled;
- isr_saved = isr = __raw_readl(isr_reg);
+ enabled = _get_gpio_irqbank_mask(bank);
+ isr_saved = isr = __raw_readl(isr_reg) & enabled;
if (cpu_is_omap15xx() && (bank->method == METHOD_MPUIO))
isr &= 0x0000ffff;
- if (cpu_is_omap24xx())
+ if (cpu_is_omap24xx()) {
level_mask =
__raw_readl(bank->base +
OMAP24XX_GPIO_LEVELDETECT0) |
__raw_readl(bank->base +
OMAP24XX_GPIO_LEVELDETECT1);
+ level_mask &= enabled;
+ }
/* clear edge sensitive interrupts before handler(s) are
called so that we don't miss any interrupt occurred while
@@ -782,19 +831,54 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
/* if there is only edge sensitive GPIO pin interrupts
configured, we could unmask GPIO bank interrupt immediately */
- if (!level_mask)
+ if (!level_mask && !unmasked) {
+ unmasked = 1;
desc->chip->unmask(irq);
+ }
+ isr |= retrigger;
+ retrigger = 0;
if (!isr)
break;
gpio_irq = bank->virtual_irq_start;
for (; isr != 0; isr >>= 1, gpio_irq++) {
struct irqdesc *d;
+ int irq_mask;
if (!(isr & 1))
continue;
d = irq_desc + gpio_irq;
+ /* Don't run the handler if it's already running
+ * or was disabled lazely.
+ */
+ if (unlikely((d->disable_depth || d->running))) {
+ irq_mask = 1 <<
+ (gpio_irq - bank->virtual_irq_start);
+ /* The unmasking will be done by
+ * enable_irq in case it is disabled or
+ * after returning from the handler if
+ * it's already running.
+ */
+ _enable_gpio_irqbank(bank, irq_mask, 0);
+ if (!d->disable_depth) {
+ /* Level triggered interrupts
+ * won't ever be reentered
+ */
+ BUG_ON(level_mask & irq_mask);
+ d->pending = 1;
+ }
+ continue;
+ }
+ d->running = 1;
desc_handle_irq(gpio_irq, d, regs);
+ d->running = 0;
+ if (unlikely(d->pending && !d->disable_depth)) {
+ irq_mask = 1 <<
+ (gpio_irq - bank->virtual_irq_start);
+ d->pending = 0;
+ _enable_gpio_irqbank(bank, irq_mask, 1);
+ retrigger |= irq_mask;
+ }
}
if (cpu_is_omap24xx()) {
@@ -804,13 +888,14 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
_enable_gpio_irqbank(bank, isr_saved & level_mask, 1);
}
- /* if bank has any level sensitive GPIO pin interrupt
- configured, we must unmask the bank interrupt only after
- handler(s) are executed in order to avoid spurious bank
- interrupt */
- if (level_mask)
- desc->chip->unmask(irq);
}
+ /* if bank has any level sensitive GPIO pin interrupt
+ configured, we must unmask the bank interrupt only after
+ handler(s) are executed in order to avoid spurious bank
+ interrupt */
+ if (!unmasked)
+ desc->chip->unmask(irq);
+
}
static void gpio_ack_irq(unsigned int irq)
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index b7bf09b1b41..aebd06faf2c 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -158,14 +158,12 @@ static struct map_desc omap_sram_io_desc[] __initdata = {
{ /* .length gets filled in at runtime */
.virtual = OMAP1_SRAM_VA,
.pfn = __phys_to_pfn(OMAP1_SRAM_PA),
- .type = MT_DEVICE
+ .type = MT_MEMORY
}
};
/*
- * In order to use last 2kB of SRAM on 1611b, we must round the size
- * up to multiple of PAGE_SIZE. We cannot use ioremap for SRAM, as
- * clock init needs SRAM early.
+ * Note that we cannot use ioremap for SRAM, as clock init needs SRAM early.
*/
void __init omap_map_sram(void)
{
@@ -185,8 +183,7 @@ void __init omap_map_sram(void)
omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
}
- omap_sram_io_desc[0].length = (omap_sram_size + PAGE_SIZE-1)/PAGE_SIZE;
- omap_sram_io_desc[0].length *= PAGE_SIZE;
+ omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */
iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
diff --git a/arch/arm/plat-omap/timer32k.c b/arch/arm/plat-omap/timer32k.c
index 3461a6c9665..f028e182215 100644
--- a/arch/arm/plat-omap/timer32k.c
+++ b/arch/arm/plat-omap/timer32k.c
@@ -7,6 +7,7 @@
* Partial timer rewrite and additional dynamic tick timer support by
* Tony Lindgen <tony@atomide.com> and
* Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ * OMAP Dual-mode timer framework support by Timo Teras
*
* MPU timer code based on the older MPU timer code for OMAP
* Copyright (C) 2000 RidgeRun, Inc.
@@ -79,18 +80,6 @@ struct sys_timer omap_timer;
#define OMAP1_32K_TIMER_TVR 0x00
#define OMAP1_32K_TIMER_TCR 0x04
-/* 24xx specific defines */
-#define OMAP2_GP_TIMER_BASE 0x48028000
-#define CM_CLKSEL_WKUP 0x48008440
-#define GP_TIMER_TIDR 0x00
-#define GP_TIMER_TISR 0x18
-#define GP_TIMER_TIER 0x1c
-#define GP_TIMER_TCLR 0x24
-#define GP_TIMER_TCRR 0x28
-#define GP_TIMER_TLDR 0x2c
-#define GP_TIMER_TTGR 0x30
-#define GP_TIMER_TSICR 0x40
-
#define OMAP_32K_TICKS_PER_HZ (32768 / HZ)
/*
@@ -102,54 +91,64 @@ struct sys_timer omap_timer;
#define JIFFIES_TO_HW_TICKS(nr_jiffies, clock_rate) \
(((nr_jiffies) * (clock_rate)) / HZ)
+#if defined(CONFIG_ARCH_OMAP1)
+
static inline void omap_32k_timer_write(int val, int reg)
{
- if (cpu_class_is_omap1())
- omap_writew(val, OMAP1_32K_TIMER_BASE + reg);
-
- if (cpu_is_omap24xx())
- omap_writel(val, OMAP2_GP_TIMER_BASE + reg);
+ omap_writew(val, OMAP1_32K_TIMER_BASE + reg);
}
static inline unsigned long omap_32k_timer_read(int reg)
{
- if (cpu_class_is_omap1())
- return omap_readl(OMAP1_32K_TIMER_BASE + reg) & 0xffffff;
+ return omap_readl(OMAP1_32K_TIMER_BASE + reg) & 0xffffff;
+}
- if (cpu_is_omap24xx())
- return omap_readl(OMAP2_GP_TIMER_BASE + reg);
+static inline void omap_32k_timer_start(unsigned long load_val)
+{
+ omap_32k_timer_write(load_val, OMAP1_32K_TIMER_TVR);
+ omap_32k_timer_write(0x0f, OMAP1_32K_TIMER_CR);
}
-/*
- * The 32KHz synchronized timer is an additional timer on 16xx.
- * It is always running.
- */
-static inline unsigned long omap_32k_sync_timer_read(void)
+static inline void omap_32k_timer_stop(void)
{
- return omap_readl(TIMER_32K_SYNCHRONIZED);
+ omap_32k_timer_write(0x0, OMAP1_32K_TIMER_CR);
}
+#define omap_32k_timer_ack_irq()
+
+#elif defined(CONFIG_ARCH_OMAP2)
+
+#include <asm/arch/dmtimer.h>
+
+static struct omap_dm_timer *gptimer;
+
static inline void omap_32k_timer_start(unsigned long load_val)
{
- if (cpu_class_is_omap1()) {
- omap_32k_timer_write(load_val, OMAP1_32K_TIMER_TVR);
- omap_32k_timer_write(0x0f, OMAP1_32K_TIMER_CR);
- }
-
- if (cpu_is_omap24xx()) {
- omap_32k_timer_write(0xffffffff - load_val, GP_TIMER_TCRR);
- omap_32k_timer_write((1 << 1), GP_TIMER_TIER);
- omap_32k_timer_write((1 << 1) | 1, GP_TIMER_TCLR);
- }
+ omap_dm_timer_set_load(gptimer, 1, 0xffffffff - load_val);
+ omap_dm_timer_set_int_enable(gptimer, OMAP_TIMER_INT_OVERFLOW);
+ omap_dm_timer_start(gptimer);
}
static inline void omap_32k_timer_stop(void)
{
- if (cpu_class_is_omap1())
- omap_32k_timer_write(0x0, OMAP1_32K_TIMER_CR);
+ omap_dm_timer_stop(gptimer);
+}
- if (cpu_is_omap24xx())
- omap_32k_timer_write(0x0, GP_TIMER_TCLR);
+static inline void omap_32k_timer_ack_irq(void)
+{
+ u32 status = omap_dm_timer_read_status(gptimer);
+ omap_dm_timer_write_status(gptimer, status);
+}
+
+#endif
+
+/*
+ * The 32KHz synchronized timer is an additional timer on 16xx.
+ * It is always running.
+ */
+static inline unsigned long omap_32k_sync_timer_read(void)
+{
+ return omap_readl(TIMER_32K_SYNCHRONIZED);
}
/*
@@ -203,11 +202,7 @@ static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id,
write_seqlock_irqsave(&xtime_lock, flags);
- if (cpu_is_omap24xx()) {
- u32 status = omap_32k_timer_read(GP_TIMER_TISR);
- omap_32k_timer_write(status, GP_TIMER_TISR);
- }
-
+ omap_32k_timer_ack_irq();
now = omap_32k_sync_timer_read();
while ((signed long)(now - omap_32k_last_tick)
@@ -269,9 +264,6 @@ static struct irqaction omap_32k_timer_irq = {
.handler = omap_32k_timer_interrupt,
};
-static struct clk * gpt1_ick;
-static struct clk * gpt1_fck;
-
static __init void omap_init_32k_timer(void)
{
#ifdef CONFIG_NO_IDLE_HZ
@@ -280,31 +272,19 @@ static __init void omap_init_32k_timer(void)
if (cpu_class_is_omap1())
setup_irq(INT_OS_TIMER, &omap_32k_timer_irq);
- if (cpu_is_omap24xx())
- setup_irq(37, &omap_32k_timer_irq);
omap_timer.offset = omap_32k_timer_gettimeoffset;
omap_32k_last_tick = omap_32k_sync_timer_read();
/* REVISIT: Check 24xx TIOCP_CFG settings after idle works */
if (cpu_is_omap24xx()) {
- omap_32k_timer_write(0, GP_TIMER_TCLR);
- omap_writel(0, CM_CLKSEL_WKUP); /* 32KHz clock source */
-
- gpt1_ick = clk_get(NULL, "gpt1_ick");
- if (IS_ERR(gpt1_ick))
- printk(KERN_ERR "Could not get gpt1_ick\n");
- else
- clk_enable(gpt1_ick);
-
- gpt1_fck = clk_get(NULL, "gpt1_fck");
- if (IS_ERR(gpt1_fck))
- printk(KERN_ERR "Could not get gpt1_fck\n");
- else
- clk_enable(gpt1_fck);
-
- mdelay(100); /* Wait for clocks to stabilize */
-
- omap_32k_timer_write(0x7, GP_TIMER_TISR);
+ gptimer = omap_dm_timer_request_specific(1);
+ BUG_ON(gptimer == NULL);
+
+ omap_dm_timer_set_source(gptimer, OMAP_TIMER_SRC_32_KHZ);
+ setup_irq(omap_dm_timer_get_irq(gptimer), &omap_32k_timer_irq);
+ omap_dm_timer_set_int_enable(gptimer,
+ OMAP_TIMER_INT_CAPTURE | OMAP_TIMER_INT_OVERFLOW |
+ OMAP_TIMER_INT_MATCH);
}
omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD);
@@ -317,6 +297,9 @@ static __init void omap_init_32k_timer(void)
*/
static void __init omap_timer_init(void)
{
+#ifdef CONFIG_OMAP_DM_TIMER
+ omap_dm_timer_init();
+#endif
omap_init_32k_timer();
}