diff options
author | Lars-Peter Clausen <lars@metafoo.de> | 2009-10-04 13:47:44 +0200 |
---|---|---|
committer | Lars-Peter Clausen <lars@metafoo.de> | 2010-05-18 01:01:23 +0200 |
commit | 09cb945ad762e9a7f258287942918dd4a3badae4 (patch) | |
tree | 49e87aaf6f424e3d924560ca72d2c0a67cdecf74 /drivers | |
parent | e40152ee1e1c7a63f4777791863215e3faa37a86 (diff) |
Add glamo driver.
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/mfd/Kconfig | 1 | ||||
-rw-r--r-- | drivers/mfd/Makefile | 3 | ||||
-rw-r--r-- | drivers/mfd/glamo/Kconfig | 42 | ||||
-rw-r--r-- | drivers/mfd/glamo/Makefile | 11 | ||||
-rw-r--r-- | drivers/mfd/glamo/glamo-core.c | 1289 | ||||
-rw-r--r-- | drivers/mfd/glamo/glamo-core.h | 61 | ||||
-rw-r--r-- | drivers/mfd/glamo/glamo-fb.c | 982 | ||||
-rw-r--r-- | drivers/mfd/glamo/glamo-gpio.c | 285 | ||||
-rw-r--r-- | drivers/mfd/glamo/glamo-mci.c | 1023 | ||||
-rw-r--r-- | drivers/mfd/glamo/glamo-regs.h | 630 |
10 files changed, 4326 insertions, 1 deletions
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 2a5a0b78f84..d72b9853290 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -398,6 +398,7 @@ config LPC_SCH help LPC bridge function of the Intel SCH provides support for System Management Bus and General Purpose I/O. +source "drivers/mfd/glamo/Kconfig" endmenu diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 22715add99a..b0c59712244 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o obj-$(CONFIG_MFD_SM501) += sm501.o obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o +obj-$(CONFIG_MFD_GLAMO) += glamo/ obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o @@ -62,4 +63,4 @@ obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o obj-$(CONFIG_AB4500_CORE) += ab4500-core.o obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o obj-$(CONFIG_PMIC_ADP5520) += adp5520.o -obj-$(CONFIG_LPC_SCH) += lpc_sch.o
\ No newline at end of file +obj-$(CONFIG_LPC_SCH) += lpc_sch.o diff --git a/drivers/mfd/glamo/Kconfig b/drivers/mfd/glamo/Kconfig new file mode 100644 index 00000000000..3aa4831a4ff --- /dev/null +++ b/drivers/mfd/glamo/Kconfig @@ -0,0 +1,42 @@ +config MFD_GLAMO + bool "Smedia Glamo 336x/337x support" + select MFD_CORE + help + This enables the core driver for the Smedia Glamo 336x/337x + multi-function device. It includes irq_chip demultiplex as + well as clock / power management and GPIO support. + +config MFD_GLAMO_FB + tristate "Smedia Glamo 336x/337x framebuffer support" + depends on FB && MFD_GLAMO + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + Frame buffer driver for the LCD controller in the Smedia Glamo + 336x/337x. + + This driver is also available as a module ( = code which can be + inserted and removed from the running kernel whenever you want). The + module will be called glamofb. If you want to compile it as a module, + say M here and read <file:Documentation/modules.txt>. + + If unsure, say N. + +config MFD_GLAMO_GPIO + tristate "Glamo GPIO support" + depends on MFD_GLAMO + + help + Enable a bitbanging SPI adapter driver for the Smedia Glamo. + +config MFD_GLAMO_MCI + tristate "Glamo S3C SD/MMC Card Interface support" + depends on MFD_GLAMO && MMC && REGULATOR + select CRC7 + help + This selects a driver for the MCI interface found in + the S-Media GLAMO chip, as used in Openmoko + neo1973 GTA-02. + + If unsure, say N. diff --git a/drivers/mfd/glamo/Makefile b/drivers/mfd/glamo/Makefile new file mode 100644 index 00000000000..ebf26f7e473 --- /dev/null +++ b/drivers/mfd/glamo/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the Smedia Glamo framebuffer driver +# + +obj-$(CONFIG_MFD_GLAMO) += glamo-core.o +obj-$(CONFIG_MFD_GLAMO_GPIO) += glamo-gpio.o +obj-$(CONFIG_MFD_GLAMO_SPI) += glamo-spi.o + +obj-$(CONFIG_MFD_GLAMO_FB) += glamo-fb.o +obj-$(CONFIG_MFD_GLAMO_MCI) += glamo-mci.o + diff --git a/drivers/mfd/glamo/glamo-core.c b/drivers/mfd/glamo/glamo-core.c new file mode 100644 index 00000000000..23073fedea2 --- /dev/null +++ b/drivers/mfd/glamo/glamo-core.c @@ -0,0 +1,1289 @@ +/* Smedia Glamo 3362 driver + * + * (C) 2007 by Openmoko, Inc. + * Author: Harald Welte <laforge@openmoko.org> + * (C) 2009, Lars-Peter Clausen <lars@metafoo.de> + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/platform_device.h> +#include <linux/kernel_stat.h> +#include <linux/spinlock.h> +#include <linux/mfd/core.h> +#include <linux/mfd/glamo.h> +#include <linux/io.h> +#include <linux/slab.h> + +#include <linux/pm.h> + +#include "glamo-regs.h" +#include "glamo-core.h" + +#define GLAMO_MEM_REFRESH_COUNT 0x100 + +#define GLAMO_NR_IRQS 9 + +#define GLAMO_IRQ_HOSTBUS 0 +#define GLAMO_IRQ_JPEG 1 +#define GLAMO_IRQ_MPEG 2 +#define GLAMO_IRQ_MPROC1 3 +#define GLAMO_IRQ_MPROC0 4 +#define GLAMO_IRQ_CMDQUEUE 5 +#define GLAMO_IRQ_2D 6 +#define GLAMO_IRQ_MMC 7 +#define GLAMO_IRQ_RISC 8 + +/* + * Glamo internal settings + * + * We run the memory interface from the faster PLLB on 2.6.28 kernels and + * above. Couple of GTA02 users report trouble with memory bus when they + * upgraded from 2.6.24. So this parameter allows reversion to 2.6.24 + * scheme if their Glamo chip needs it. + * + * you can override the faster default on kernel commandline using + * + * glamo3362.slow_memory=1 + * + * for example + */ + +static int slow_memory; +module_param(slow_memory, int, 0644); + +struct reg_range { + int start; + int count; + char *name; + unsigned dump:1; +}; + +static const struct reg_range reg_range[] = { + { 0x0000, 0x76, "General", 1 }, + { 0x0200, 0x18, "Host Bus", 1 }, + { 0x0300, 0x38, "Memory", 1 }, +/* { 0x0400, 0x100, "Sensor", 0 }, */ +/* { 0x0500, 0x300, "ISP", 0 }, */ +/* { 0x0800, 0x400, "JPEG", 0 }, */ +/* { 0x0c00, 0xcc, "MPEG", 0 }, */ + { 0x1100, 0xb2, "LCD 1", 1 }, + { 0x1200, 0x64, "LCD 2", 1 }, + { 0x1400, 0x42, "MMC", 1 }, +/* { 0x1500, 0x080, "MPU 0", 0 }, + { 0x1580, 0x080, "MPU 1", 0 }, + { 0x1600, 0x080, "Cmd Queue", 0 }, + { 0x1680, 0x080, "RISC CPU", 0 },*/ + { 0x1700, 0x400, "2D Unit", 1 }, +/* { 0x1b00, 0x900, "3D Unit", 0 }, */ +}; + +static inline void __reg_write(struct glamo_core *glamo, + uint16_t reg, uint16_t val) +{ + writew(val, glamo->base + reg); +} + +void glamo_reg_write(struct glamo_core *glamo, + uint16_t reg, uint16_t val) +{ + spin_lock(&glamo->lock); + __reg_write(glamo, reg, val); + spin_unlock(&glamo->lock); +} +EXPORT_SYMBOL_GPL(glamo_reg_write); + + +static inline uint16_t __reg_read(struct glamo_core *glamo, + uint16_t reg) +{ + return readw(glamo->base + reg); +} + +uint16_t glamo_reg_read(struct glamo_core *glamo, uint16_t reg) +{ + uint16_t val; + spin_lock(&glamo->lock); + val = __reg_read(glamo, reg); + spin_unlock(&glamo->lock); + + return val; +} +EXPORT_SYMBOL_GPL(glamo_reg_read); + +static void __reg_set_bit_mask(struct glamo_core *glamo, + uint16_t reg, uint16_t mask, + uint16_t val) +{ + uint16_t tmp; + + val &= mask; + + tmp = __reg_read(glamo, reg); + tmp &= ~mask; + tmp |= val; + __reg_write(glamo, reg, tmp); +} + +static void reg_set_bit_mask(struct glamo_core *glamo, + uint16_t reg, uint16_t mask, + uint16_t val) +{ + spin_lock(&glamo->lock); + __reg_set_bit_mask(glamo, reg, mask, val); + spin_unlock(&glamo->lock); +} + +static inline void __reg_set_bit(struct glamo_core *glamo, + uint16_t reg, uint16_t bit) +{ + uint16_t tmp; + tmp = __reg_read(glamo, reg); + tmp |= bit; + __reg_write(glamo, reg, tmp); +} + +static inline void __reg_clear_bit(struct glamo_core *glamo, + uint16_t reg, uint16_t bit) +{ + uint16_t tmp; + tmp = __reg_read(glamo, reg); + tmp &= ~bit; + __reg_write(glamo, reg, tmp); +} + +static void __reg_write_batch(struct glamo_core *glamo, uint16_t reg, + uint16_t count, uint16_t *values) +{ + uint16_t end; + for (end = reg + count * 2; reg < end; reg += 2, ++values) + __reg_write(glamo, reg, *values); +} + +static void __reg_read_batch(struct glamo_core *glamo, uint16_t reg, + uint16_t count, uint16_t *values) +{ + uint16_t end; + for (end = reg + count * 2; reg < end; reg += 2, ++values) + *values = __reg_read(glamo, reg); +} + +void glamo_reg_write_batch(struct glamo_core *glamo, uint16_t reg, + uint16_t count, uint16_t *values) +{ + spin_lock(&glamo->lock); + __reg_write_batch(glamo, reg, count, values); + spin_unlock(&glamo->lock); +} +EXPORT_SYMBOL(glamo_reg_write_batch); + +void glamo_reg_read_batch(struct glamo_core *glamo, uint16_t reg, + uint16_t count, uint16_t *values) +{ + spin_lock(&glamo->lock); + __reg_read_batch(glamo, reg, count, values); + spin_unlock(&glamo->lock); +} +EXPORT_SYMBOL(glamo_reg_read_batch); + +/*********************************************************************** + * resources of sibling devices + ***********************************************************************/ + +static struct resource glamo_fb_resources[] = { + { + .name = "glamo-fb-regs", + .start = GLAMO_REGOFS_LCD, + .end = GLAMO_REGOFS_MMC - 1, + .flags = IORESOURCE_MEM, + }, { + .name = "glamo-fb-mem", + .start = GLAMO_OFFSET_FB, + .end = GLAMO_OFFSET_FB + GLAMO_FB_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource glamo_mmc_resources[] = { + { + .name = "glamo-mmc-regs", + .start = GLAMO_REGOFS_MMC, + .end = GLAMO_REGOFS_MPROC0 - 1, + .flags = IORESOURCE_MEM + }, { + .name = "glamo-mmc-mem", + .start = GLAMO_OFFSET_FB + GLAMO_FB_SIZE, + .end = GLAMO_OFFSET_FB + GLAMO_FB_SIZE + + GLAMO_MMC_BUFFER_SIZE - 1, + .flags = IORESOURCE_MEM + }, { + .start = GLAMO_IRQ_MMC, + .end = GLAMO_IRQ_MMC, + .flags = IORESOURCE_IRQ, + }, +}; + +enum glamo_cells { + GLAMO_CELL_FB, + GLAMO_CELL_MMC, + GLAMO_CELL_GPIO, +}; + +static const struct mfd_cell glamo_cells[] = { + [GLAMO_CELL_FB] = { + .name = "glamo-fb", + .num_resources = ARRAY_SIZE(glamo_fb_resources), + .resources = glamo_fb_resources, + }, + [GLAMO_CELL_MMC] = { + .name = "glamo-mci", + .num_resources = ARRAY_SIZE(glamo_mmc_resources), + .resources = glamo_mmc_resources, + }, + [GLAMO_CELL_GPIO] = { + .name = "glamo-gpio", + }, +}; + +/*********************************************************************** + * IRQ demultiplexer + ***********************************************************************/ +#define glamo_irq_bit(glamo, x) BIT(x - glamo->irq_base) + +static inline struct glamo_core *irq_to_glamo(unsigned int irq) +{ + return (struct glamo_core *)get_irq_chip_data(irq); +} + +static void glamo_ack_irq(unsigned int irq) +{ + struct glamo_core *glamo = irq_to_glamo(irq); + /* clear interrupt source */ + __reg_write(glamo, GLAMO_REG_IRQ_CLEAR, glamo_irq_bit(glamo, irq)); +} + +static void glamo_mask_irq(unsigned int irq) +{ + struct glamo_core *glamo = irq_to_glamo(irq); + + /* clear bit in enable register */ + __reg_clear_bit(glamo, GLAMO_REG_IRQ_ENABLE, glamo_irq_bit(glamo, irq)); +} + +static void glamo_unmask_irq(unsigned int irq) +{ + struct glamo_core *glamo = irq_to_glamo(irq); + + /* set bit in enable register */ + __reg_set_bit(glamo, GLAMO_REG_IRQ_ENABLE, glamo_irq_bit(glamo, irq)); +} + +static struct irq_chip glamo_irq_chip = { + .name = "glamo", + .ack = glamo_ack_irq, + .mask = glamo_mask_irq, + .unmask = glamo_unmask_irq, +}; + +static void glamo_irq_demux_handler(unsigned int irq, struct irq_desc *desc) +{ + struct glamo_core *glamo = get_irq_desc_data(desc); + desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); + + if (unlikely(desc->status & IRQ_INPROGRESS)) { + desc->status |= (IRQ_PENDING | IRQ_MASKED); + desc->chip->mask(irq); + desc->chip->ack(irq); + return; + } + kstat_incr_irqs_this_cpu(irq, desc); + + desc->chip->ack(irq); + desc->status |= IRQ_INPROGRESS; + + do { + uint16_t irqstatus; + int i; + + if (unlikely((desc->status & + (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == + (IRQ_PENDING | IRQ_MASKED))) { + /* dealing with pending IRQ, unmasking */ + desc->chip->unmask(irq); + desc->status &= ~IRQ_MASKED; + } + + desc->status &= ~IRQ_PENDING; + + /* read IRQ status register */ + irqstatus = __reg_read(glamo, GLAMO_REG_IRQ_STATUS); + for (i = 0; i < 9; ++i) { + if (irqstatus & BIT(i)) + generic_handle_irq(glamo->irq_base + i); + } + + } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); + + desc->status &= ~IRQ_INPROGRESS; +} + +/* +sysfs +*/ + +static ssize_t regs_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct glamo_core *glamo = dev_get_drvdata(dev); + unsigned int reg; + unsigned int val; + + sscanf(buf, "%u %u", ®, &val); + printk(KERN_INFO"reg 0x%02x <-- 0x%04x\n", + reg, val); + + glamo_reg_write(glamo, reg, val); + + return count; +} + +static ssize_t regs_read(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct glamo_core *glamo = dev_get_drvdata(dev); + int i, n; + char *end = buf; + const struct reg_range *rr = reg_range; + + spin_lock(&glamo->lock); + + for (i = 0; i < ARRAY_SIZE(reg_range); ++i, ++rr) { + if (!rr->dump) + continue; + end += sprintf(end, "\n%s\n", rr->name); + for (n = rr->start; n < rr->start + rr->count; n += 2) { + if ((n & 15) == 0) + end += sprintf(end, "\n%04X: ", n); + end += sprintf(end, "%04x ", __reg_read(glamo, n)); + } + end += sprintf(end, "\n"); + } + spin_unlock(&glamo->lock); + + return end - buf; +} + +static DEVICE_ATTR(regs, 0644, regs_read, regs_write); + +struct glamo_engine_reg_set { + uint16_t reg; + uint16_t mask_suspended; + uint16_t mask_enabled; +}; + +struct glamo_engine_desc { + const char *name; + uint16_t hostbus; + const struct glamo_engine_reg_set *regs; + int num_regs; +}; + +static const struct glamo_engine_reg_set glamo_lcd_regs[] = { + { GLAMO_REG_CLOCK_LCD, + GLAMO_CLOCK_LCD_EN_M5CLK | + GLAMO_CLOCK_LCD_DG_M5CLK | + GLAMO_CLOCK_LCD_EN_DMCLK, + + GLAMO_CLOCK_LCD_EN_DHCLK | + GLAMO_CLOCK_LCD_EN_DCLK + }, + { GLAMO_REG_CLOCK_GEN5_1, + GLAMO_CLOCK_GEN51_EN_DIV_DMCLK, + + GLAMO_CLOCK_GEN51_EN_DIV_DHCLK | + GLAMO_CLOCK_GEN51_EN_DIV_DCLK + } +}; + +static const struct glamo_engine_reg_set glamo_mmc_regs[] = { + { GLAMO_REG_CLOCK_MMC, + GLAMO_CLOCK_MMC_EN_M9CLK | + GLAMO_CLOCK_MMC_DG_M9CLK, + + GLAMO_CLOCK_MMC_EN_TCLK | + GLAMO_CLOCK_MMC_DG_TCLK + }, + { GLAMO_REG_CLOCK_GEN5_1, + 0, + GLAMO_CLOCK_GEN51_EN_DIV_TCLK + } +}; + +static const struct glamo_engine_reg_set glamo_2d_regs[] = { + { GLAMO_REG_CLOCK_2D, + GLAMO_CLOCK_2D_EN_M7CLK | + GLAMO_CLOCK_2D_DG_M7CLK, + + GLAMO_CLOCK_2D_EN_GCLK | + GLAMO_CLOCK_2D_DG_GCLK + }, + { GLAMO_REG_CLOCK_GEN5_1, + 0, + GLAMO_CLOCK_GEN51_EN_DIV_GCLK, + } +}; + +static const struct glamo_engine_reg_set glamo_cmdq_regs[] = { + { GLAMO_REG_CLOCK_2D, + GLAMO_CLOCK_2D_EN_M6CLK, + 0 + }, +}; + +#define GLAMO_ENGINE(xname, xhostbus, xregs) { \ + .name = xname, \ + .hostbus = xhostbus, \ + .num_regs = ARRAY_SIZE(xregs), \ + .regs = xregs, \ +} + +static const struct glamo_engine_desc glamo_engines[] = { + [GLAMO_ENGINE_LCD] = GLAMO_ENGINE("LCD", GLAMO_HOSTBUS2_MMIO_EN_LCD, + glamo_lcd_regs), + [GLAMO_ENGINE_MMC] = GLAMO_ENGINE("MMC", GLAMO_HOSTBUS2_MMIO_EN_MMC, + glamo_mmc_regs), + [GLAMO_ENGINE_2D] = GLAMO_ENGINE("2D", GLAMO_HOSTBUS2_MMIO_EN_2D, + glamo_2d_regs), + [GLAMO_ENGINE_CMDQ] = GLAMO_ENGINE("CMDQ", GLAMO_HOSTBUS2_MMIO_EN_CQ, + glamo_cmdq_regs), +}; + +static inline const char *glamo_engine_name(enum glamo_engine engine) +{ + return glamo_engines[engine].name; +} + +/*********************************************************************** + * 'engine' support + ***********************************************************************/ + +int __glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine) +{ + int i; + const struct glamo_engine_desc *engine_desc = &glamo_engines[engine]; + const struct glamo_engine_reg_set *reg; + + switch (engine) { + case GLAMO_ENGINE_LCD: + case GLAMO_ENGINE_MMC: + case GLAMO_ENGINE_2D: + case GLAMO_ENGINE_CMDQ: + break; + default: + return -EINVAL; + } + + reg = engine_desc->regs; + + __reg_set_bit(glamo, GLAMO_REG_HOSTBUS(2), + engine_desc->hostbus); + for (i = engine_desc->num_regs; i; --i, ++reg) + __reg_set_bit(glamo, reg->reg, + reg->mask_suspended | reg->mask_enabled); + + return 0; +} + +int glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine) +{ + int ret = 0; + + spin_lock(&glamo->lock); + + if (glamo->engine_state[engine] != GLAMO_ENGINE_ENABLED) { + ret = __glamo_engine_enable(glamo, engine); + if (!ret) + glamo->engine_state[engine] = GLAMO_ENGINE_ENABLED; + } + + spin_unlock(&glamo->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(glamo_engine_enable); + +int __glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine) +{ + int i; + const struct glamo_engine_desc *engine_desc = &glamo_engines[engine]; + const struct glamo_engine_reg_set *reg; + + switch (engine) { + case GLAMO_ENGINE_LCD: + case GLAMO_ENGINE_MMC: + case GLAMO_ENGINE_2D: + case GLAMO_ENGINE_CMDQ: + break; + default: + return -EINVAL; + } + + reg = engine_desc->regs; + + __reg_clear_bit(glamo, GLAMO_REG_HOSTBUS(2), + engine_desc->hostbus); + for (i = engine_desc->num_regs; i; --i, ++reg) + __reg_clear_bit(glamo, reg->reg, + reg->mask_suspended | reg->mask_enabled); + + return 0; +} +int glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine) +{ + int ret = 0; + + spin_lock(&glamo->lock); + + if (glamo->engine_state[engine] != GLAMO_ENGINE_DISABLED) { + ret = __glamo_engine_disable(glamo, engine); + if (!ret) + glamo->engine_state[engine] = GLAMO_ENGINE_DISABLED; + } + + spin_unlock(&glamo->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(glamo_engine_disable); + +int __glamo_engine_suspend(struct glamo_core *glamo, enum glamo_engine engine) +{ + int i; + const struct glamo_engine_desc *engine_desc = &glamo_engines[engine]; + const struct glamo_engine_reg_set *reg; + + switch (engine) { + case GLAMO_ENGINE_LCD: + case GLAMO_ENGINE_MMC: + case GLAMO_ENGINE_2D: + case GLAMO_ENGINE_CMDQ: + break; + default: + return -EINVAL; + } + + reg = engine_desc->regs; + + __reg_set_bit(glamo, GLAMO_REG_HOSTBUS(2), + engine_desc->hostbus); + for (i = engine_desc->num_regs; i; --i, ++reg) { + __reg_set_bit(glamo, reg->reg, reg->mask_suspended); + __reg_clear_bit(glamo, reg->reg, reg->mask_enabled); + } + + return 0; +} + +int glamo_engine_suspend(struct glamo_core *glamo, enum glamo_engine engine) +{ + int ret = 0; + + spin_lock(&glamo->lock); + + if (glamo->engine_state[engine] != GLAMO_ENGINE_SUSPENDED) { + ret = __glamo_engine_suspend(glamo, engine); + if (!ret) + glamo->engine_state[engine] = GLAMO_ENGINE_SUSPENDED; + } + + spin_unlock(&glamo->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(glamo_engine_suspend); + +static const struct glamo_script reset_regs[] = { + [GLAMO_ENGINE_LCD] = { + GLAMO_REG_CLOCK_LCD, GLAMO_CLOCK_LCD_RESET + }, + [GLAMO_ENGINE_MMC] = { + GLAMO_REG_CLOCK_MMC, GLAMO_CLOCK_MMC_RESET + }, + [GLAMO_ENGINE_CMDQ] = { + GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_CQ_RESET + }, + [GLAMO_ENGINE_2D] = { + GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_RESET + }, + [GLAMO_ENGINE_JPEG] = { + GLAMO_REG_CLOCK_JPEG, GLAMO_CLOCK_JPEG_RESET + }, +}; + +void glamo_engine_reset(struct glamo_core *glamo, enum glamo_engine engine) +{ + uint16_t reg = reset_regs[engine].reg; + uint16_t val = reset_regs[engine].val; + + if (engine >= ARRAY_SIZE(reset_regs)) { + dev_warn(&glamo->pdev->dev, "unknown engine %u ", engine); + return; + } + + spin_lock(&glamo->lock); + __reg_set_bit(glamo, reg, val); + __reg_clear_bit(glamo, reg, val); + spin_unlock(&glamo->lock); +} +EXPORT_SYMBOL_GPL(glamo_engine_reset); + +int glamo_pll_rate(struct glamo_core *glamo, + enum glamo_pll pll) +{ + uint16_t reg; + unsigned int osci = glamo->pdata->osci_clock_rate; + + switch (pll) { + case GLAMO_PLL1: + reg = __reg_read(glamo, GLAMO_REG_PLL_GEN1); + break; + case GLAMO_PLL2: + reg = __reg_read(glamo, GLAMO_REG_PLL_GEN3); + break; + default: + return -EINVAL; + } + + return (int)osci * (int)reg; +} +EXPORT_SYMBOL_GPL(glamo_pll_rate); + +int glamo_engine_reclock(struct glamo_core *glamo, + enum glamo_engine engine, + int hz) +{ + int pll; + uint16_t reg, mask, div; + + if (!hz) + return -EINVAL; + + switch (engine) { + case GLAMO_ENGINE_LCD: + pll = GLAMO_PLL1; + reg = GLAMO_REG_CLOCK_GEN7; + mask = 0xff; + break; + case GLAMO_ENGINE_MMC: + pll = GLAMO_PLL1; + reg = GLAMO_REG_CLOCK_GEN8; + mask = 0xff; + break; + default: + dev_warn(&glamo->pdev->dev, + "reclock of engine 0x%x not supported\n", engine); + return -EINVAL; + break; + } + + pll = glamo_pll_rate(glamo, pll); + + div = pll / hz; + + if (div != 0 && pll / div <= hz) + --div; + + if (div > mask) + div = mask; + + dev_dbg(&glamo->pdev->dev, + "PLL %d, kHZ %d, div %d\n", pll, hz / 1000, div); + + reg_set_bit_mask(glamo, reg, mask, div); + mdelay(5); /* wait some time to stabilize */ + + return pll / (div + 1); +} +EXPORT_SYMBOL_GPL(glamo_engine_reclock); + +/*********************************************************************** + * script support + ***********************************************************************/ + +#define GLAMO_SCRIPT_END 0xffff +#define GLAMO_SCRIPT_WAIT 0xfffe +#define GLAMO_SCRIPT_LOCK_PLL 0xfffd + +/* + * couple of people reported artefacts with 2.6.28 changes, this + * allows reversion to 2.6.24 settings +*/ +static const uint16_t reg_0x200[] = { +0xe03, /* 0 waits on Async BB R & W, Use PLL 2 for mem bus */ +0xef0, /* 3 waits on Async BB R & W, Use PLL 1 for mem bus */ +0xea0, /* 2 waits on Async BB R & W, Use PLL 1 for mem bus */ +0xe50, /* 1 waits on Async BB R & W, Use PLL 1 for mem bus */ +0xe00, /* 0 waits on Async BB R & W, Use PLL 1 for mem bus */ +0xef3, /* 3 waits on Async BB R & W, Use PLL 2 for mem bus */ +0xea3, /* 2 waits on Async BB R & W, Use PLL 2 for mem bus */ +0xe53, /* 1 waits on Async BB R & W, Use PLL 2 for mem bus */ +}; + +static int glamo_run_script(struct glamo_core *glamo, + const struct glamo_script *script, int len, + int may_sleep) +{ + int i; + uint16_t status; + const struct glamo_script *line = script; + + for (i = 0; i < len; ++i, ++line) { + switch (line->reg) { + case GLAMO_SCRIPT_END: + return 0; + case GLAMO_SCRIPT_WAIT: + if (may_sleep) + msleep(line->val); + else + mdelay(line->val * 4); + break; + case GLAMO_SCRIPT_LOCK_PLL: + /* spin until PLLs lock */ + do { + status = __reg_read(glamo, GLAMO_REG_PLL_GEN5); + } while ((status & 3) != 3); + break; + case 0x200: + __reg_write(glamo, line->reg, + reg_0x200[slow_memory & 0x8]); + break; + default: + __reg_write(glamo, line->reg, line->val); + break; + } + } + + return 0; +} + +static const struct glamo_script glamo_init_script[] = { + { GLAMO_REG_CLOCK_HOST, 0x1000 }, + { GLAMO_SCRIPT_WAIT, 2 }, + { GLAMO_REG_CLOCK_MEMORY, 0x1000 }, + { GLAMO_REG_CLOCK_MEMORY, 0x2000 }, + { GLAMO_REG_CLOCK_LCD, 0x1000 }, + { GLAMO_REG_CLOCK_MMC, 0x1000 }, + { GLAMO_REG_CLOCK_ISP, 0x1000 }, + { GLAMO_REG_CLOCK_ISP, 0x3000 }, + { GLAMO_REG_CLOCK_JPEG, 0x1000 }, + { GLAMO_REG_CLOCK_3D, 0x1000 }, + { GLAMO_REG_CLOCK_3D, 0x3000 }, + { GLAMO_REG_CLOCK_2D, 0x1000 }, + { GLAMO_REG_CLOCK_2D, 0x3000 }, + { GLAMO_REG_CLOCK_RISC1, 0x1000 }, + { GLAMO_REG_CLOCK_MPEG, 0x1000 }, + { GLAMO_REG_CLOCK_MPEG, 0x3000 }, + { GLAMO_REG_CLOCK_MPROC, 0x1000 /*0x100f*/ }, + { GLAMO_SCRIPT_WAIT, 2 }, + { GLAMO_REG_CLOCK_HOST, 0x0000 }, + { GLAMO_REG_CLOCK_MEMORY, 0x0000 }, + { GLAMO_REG_CLOCK_LCD, 0x0000 }, + { GLAMO_REG_CLOCK_MMC, 0x0000 }, + { GLAMO_REG_PLL_GEN1, 0x05db }, /* 48MHz */ + { GLAMO_REG_PLL_GEN3, 0x0aba }, /* 90MHz */ + { GLAMO_SCRIPT_LOCK_PLL, 0 }, + /* + * b9 of this register MUST be zero to get any interrupts on INT# + * the other set bits enable all the engine interrupt sources + */ + { GLAMO_REG_IRQ_ENABLE, 0x0100 }, + { GLAMO_REG_CLOCK_GEN6, 0x2000 }, + { GLAMO_REG_CLOCK_GEN7, 0x0101 }, + { GLAMO_REG_CLOCK_GEN8, 0x0100 }, + { GLAMO_REG_CLOCK_HOST, 0x000d }, + /* + * b7..b4 = 0 = no wait states on read or write + * b0 = 1 select PLL2 for Host interface, b1 = enable it + */ + { GLAMO_REG_HOSTBUS(0), 0x0e03 /* this is replaced by script parser */ }, + { GLAMO_REG_HOSTBUS(1), 0x07ff }, /* TODO: Disable all */ + { GLAMO_REG_HOSTBUS(10), 0x0000 }, + { GLAMO_REG_HOSTBUS(11), 0x4000 }, + { GLAMO_REG_HOSTBUS(12), 0xf00e }, + + /* S-Media recommended "set tiling mode to 512 mode for memory access + * more efficiency when 640x480" */ + { GLAMO_REG_MEM_TYPE, 0x0c74 }, /* 8MB, 16 word pg wr+rd */ + { GLAMO_REG_MEM_GEN, 0xafaf }, /* 63 grants min + max */ + + { GLAMO_REG_MEM_TIMING1, 0x0108 }, + { GLAMO_REG_MEM_TIMING2, 0x0010 }, /* Taa = 3 MCLK */ + { GLAMO_REG_MEM_TIMING3, 0x0000 }, + { GLAMO_REG_MEM_TIMING4, 0x0000 }, /* CE1# delay fall/rise */ + { GLAMO_REG_MEM_TIMING5, 0x0000 }, /* UB# LB# */ + { GLAMO_REG_MEM_TIMING6, 0x0000 }, /* OE# */ + { GLAMO_REG_MEM_TIMING7, 0x0000 }, /* WE# */ + { GLAMO_REG_MEM_TIMING8, 0x1002 }, /* MCLK delay, was 0x1000 */ + { GLAMO_REG_MEM_TIMING9, 0x6006 }, + { GLAMO_REG_MEM_TIMING10, 0x00ff }, + { GLAMO_REG_MEM_TIMING11, 0x0001 }, + { GLAMO_REG_MEM_POWER1, 0x0020 }, + { GLAMO_REG_MEM_POWER2, 0x0000 }, + { GLAMO_REG_MEM_DRAM1, 0x0000 }, + { GLAMO_SCRIPT_WAIT, 1 }, + { GLAMO_REG_MEM_DRAM1, 0xc100 }, + { GLAMO_SCRIPT_WAIT, 1 }, + { GLAMO_REG_MEM_DRAM1, 0xe100 }, + { GLAMO_REG_MEM_DRAM2, 0x01d6 }, + { GLAMO_REG_CLOCK_MEMORY, 0x000b }, +}; + +/* Find out if we can support this version of the Glamo chip */ +static int __devinit glamo_supported(struct glamo_core *glamo) +{ + uint16_t dev_id, rev_id; + + dev_id = __reg_read(glamo, GLAMO_REG_DEVICE_ID); + rev_id = __reg_read(glamo, GLAMO_REG_REVISION_ID); + + switch (dev_id) { + case 0x3650: + switch (rev_id) { + case GLAMO_CORE_REV_A2: + break; + case GLAMO_CORE_REV_A0: + case GLAMO_CORE_REV_A1: + case GLAMO_CORE_REV_A3: + dev_warn(&glamo->pdev->dev, "untested core revision " + "%04x, your mileage may vary\n", rev_id); + break; + default: + dev_warn(&glamo->pdev->dev, "unknown glamo revision " + "%04x, your mileage may vary\n", rev_id); + } + break; + default: + dev_err(&glamo->pdev->dev, "unsupported Glamo device %04x\n", + dev_id); + return 0; + } + + dev_dbg(&glamo->pdev->dev, "Detected Glamo core %04x Revision %04x " + "(%uHz CPU / %uHz Memory)\n", dev_id, rev_id, + glamo_pll_rate(glamo, GLAMO_PLL1), + glamo_pll_rate(glamo, GLAMO_PLL2)); + + return 1; +} + +static int __devinit glamo_probe(struct platform_device *pdev) +{ + int ret = 0, irq, irq_base; + struct glamo_core *glamo; + struct resource *mem; + + glamo = kmalloc(GFP_KERNEL, sizeof(*glamo)); + if (!glamo) + return -ENOMEM; + + spin_lock_init(&glamo->lock); + + glamo->pdev = pdev; + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + glamo->irq = platform_get_irq(pdev, 0); + glamo->irq_base = irq_base = platform_get_irq(pdev, 1); + glamo->pdata = pdev->dev.platform_data; + + if (glamo->irq < 0) { + ret = glamo->irq; + dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret); + goto err_free; + } + + if (irq_base < 0) { + ret = glamo->irq; + dev_err(&pdev->dev, "Failed to get glamo irq base: %d\n", ret); + goto err_free; + } + + if (!mem) { + dev_err(&pdev->dev, "Failed to get platform memory\n"); + ret = -ENOENT; + goto err_free; + } + + if (!glamo->pdata) { + dev_err(&pdev->dev, "Missing platform data\n"); + ret = -ENOENT; + goto err_free; + } + + /* only request the generic, hostbus and memory controller registers */ + glamo->mem = request_mem_region(mem->start, GLAMO_REGOFS_VIDCAP, + pdev->name); + + if (!glamo->mem) { + dev_err(&pdev->dev, "Failed to request io memory region\n"); + ret = -ENOENT; + goto err_free; + } + + glamo->base = ioremap(glamo->mem->start, resource_size(glamo->mem)); + if (!glamo->base) { + dev_err(&pdev->dev, "Failed to ioremap() memory region\n"); + goto err_release_mem_region; + } + + /* confirm it isn't insane version */ + if (!glamo_supported(glamo)) { + dev_err(&pdev->dev, + "This version of the Glamo is not supported\n"); + goto err_iounmap; + } + + platform_set_drvdata(pdev, glamo); + + /* sysfs */ + ret = device_create_file(&pdev->dev, &dev_attr_regs); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to create sysfs file\n"); + goto err_iounmap; + } + + /* init the chip with canned register set */ + glamo_run_script(glamo, glamo_init_script, + ARRAY_SIZE(glamo_init_script), 1); + + /* + * finally set the mfd interrupts up + */ + for (irq = irq_base; irq < irq_base + GLAMO_NR_IRQS; ++irq) { +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID); +#else + set_irq_noprobe(irq); +#endif + set_irq_chip_data(irq, glamo); + set_irq_chip_and_handler(irq, &glamo_irq_chip, + handle_level_irq); + } + + set_irq_type(glamo->irq, IRQ_TYPE_EDGE_FALLING); + set_irq_data(glamo->irq, glamo); + set_irq_chained_handler(glamo->irq, glamo_irq_demux_handler); + glamo->irq_works = 1; + + ret = mfd_add_devices(&pdev->dev, pdev->id, glamo_cells, + ARRAY_SIZE(glamo_cells), mem, glamo->irq_base); + + if (ret) { + dev_err(&pdev->dev, "Failed to add child devices: %d\n", ret); + goto err_free_irqs; + } + + dev_info(&glamo->pdev->dev, "Glamo core PLL1: %uHz, PLL2: %uHz\n", + glamo_pll_rate(glamo, GLAMO_PLL1), + glamo_pll_rate(glamo, GLAMO_PLL2)); + + return 0; + +err_free_irqs: + disable_irq(glamo->irq); + set_irq_chained_handler(glamo->irq, NULL); + set_irq_chip_data(glamo->irq, NULL); + + for (irq = irq_base; irq < irq_base + GLAMO_NR_IRQS; ++irq) { + set_irq_chip(irq, NULL); +#ifdef CONFIG_ARM + set_irq_flags(irq, 0); +#else + set_irq_probe(irq); +#endif + set_irq_chip_data(irq, NULL); + } +err_iounmap: + iounmap(glamo->base); +err_release_mem_region: + release_mem_region(glamo->mem->start, resource_size(glamo->mem)); +err_free: + platform_set_drvdata(pdev, NULL); + kfree(glamo); + + return ret; +} + +static int __devexit glamo_remove(struct platform_device *pdev) +{ + struct glamo_core *glamo = platform_get_drvdata(pdev); + int irq; + int irq_base = glamo->irq_base; + + mfd_remove_devices(&pdev->dev); + + disable_irq(glamo->irq); + set_irq_chained_handler(glamo->irq, NULL); + set_irq_chip_data(glamo->irq, NULL); + + for (irq = irq_base; irq < irq_base + GLAMO_NR_IRQS; ++irq) { +#ifdef CONFIG_ARM + set_irq_flags(irq, 0); +#else + set_irq_noprobe(); +#endif + set_irq_chip(irq, NULL); + set_irq_chip_data(irq, NULL); + } + + platform_set_drvdata(pdev, NULL); + iounmap(glamo->base); + release_mem_region(glamo->mem->start, resource_size(glamo->mem)); + kfree(glamo); + + return 0; +} + +#ifdef CONFIG_PM +#if 0 +static struct glamo_script glamo_resume_script[] = { + + { GLAMO_REG_PLL_GEN1, 0x05db }, /* 48MHz */ + { GLAMO_REG_PLL_GEN3, 0x0aba }, /* 90MHz */ + { GLAMO_REG_DFT_GEN6, 1 }, + { 0xfffe, 100 }, + { 0xfffd, 0 }, + { 0x200, 0x0e03 }, + + /* + * b9 of this register MUST be zero to get any interrupts on INT# + * the other set bits enable all the engine interrupt sources + */ + { GLAMO_REG_IRQ_ENABLE, 0x01ff }, + { GLAMO_REG_CLOCK_HOST, 0x0018 }, + { GLAMO_REG_CLOCK_GEN5_1, 0x18b1 }, + + { GLAMO_REG_MEM_DRAM1, 0x0000 }, + { 0xfffe, 1 }, + { GLAMO_REG_MEM_DRAM1, 0xc100 }, + { 0xfffe, 1 }, + { GLAMO_REG_MEM_DRAM1, 0xe100 }, + { GLAMO_REG_MEM_DRAM2, 0x01d6 }, + { GLAMO_REG_CLOCK_MEMORY, 0x000b }, +}; +#endif + +#if 0 +static void glamo_power(struct glamo_core *glamo) +{ + unsigned long flags; + + spin_lock_irqsave(&glamo->lock, flags); + + /* +Power management +static const REG_VALUE_MASK_TYPE reg_powerOn[] = +{ + { REG_GEN_DFT6, REG_BIT_ALL, REG_DATA(1u << 0) }, + { REG_GEN_PLL3, 0u, REG_DATA(1u << 13) }, + { REG_GEN_MEM_CLK, REG_BIT_ALL, REG_BIT_EN_MOCACLK }, + { REG_MEM_DRAM2, 0u, REG_BIT_EN_DEEP_POWER_DOWN }, + { REG_MEM_DRAM1, 0u, REG_BIT_SELF_REFRESH } +}; + +static const REG_VALUE_MASK_TYPE reg_powerStandby[] = +{ + { REG_MEM_DRAM1, REG_BIT_ALL, REG_BIT_SELF_REFRESH }, + { REG_GEN_MEM_CLK, 0u, REG_BIT_EN_MOCACLK }, + { REG_GEN_PLL3, REG_BIT_ALL, REG_DATA(1u << 13) }, + { REG_GEN_DFT5, REG_BIT_ALL, REG_DATA(1u << 0) } +}; + +static const REG_VALUE_MASK_TYPE reg_powerSuspend[] = +{ + { REG_MEM_DRAM2, REG_BIT_ALL, REG_BIT_EN_DEEP_POWER_DOWN }, + { REG_GEN_MEM_CLK, 0u, REG_BIT_EN_MOCACLK }, + { REG_GEN_PLL3, REG_BIT_ALL, REG_DATA(1u << 13) }, + { REG_GEN_DFT5, REG_BIT_ALL, REG_DATA(1u << 0) } +}; +*/ + switch (new_state) { + case GLAMO_POWER_ON: + + /* + * glamo state on resume is nondeterministic in some + * fundamental way, it has also been observed that the + * Glamo reset pin can get asserted by, eg, touching it with + * a scope probe. So the only answer is to roll with it and + * force an external reset on the Glamo during resume. + */ + + + break; + + case GLAMO_POWER_SUSPEND: + + break; + } + spin_unlock_irqrestore(&glamo->lock, flags); +} +#endif + +static int glamo_suspend(struct device *dev) +{ + struct glamo_core *glamo = dev_get_drvdata(dev); + int n; + + spin_lock(&glamo->lock); + + glamo->saved_irq_mask = __reg_read(glamo, GLAMO_REG_IRQ_ENABLE); + + /* nuke interrupts */ + __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, 0x200); + + /* take down each engine before we kill mem and pll */ + for (n = 0; n < __NUM_GLAMO_ENGINES; n++) { + if (glamo->engine_state != GLAMO_ENGINE_DISABLED) + __glamo_engine_disable(glamo, n); + } + + /* enable self-refresh */ + + __reg_write(glamo, GLAMO_REG_MEM_DRAM1, + GLAMO_MEM_DRAM1_EN_DRAM_REFRESH | + GLAMO_MEM_DRAM1_EN_GATE_CKE | + GLAMO_MEM_DRAM1_SELF_REFRESH | + GLAMO_MEM_REFRESH_COUNT); + __reg_write(glamo, GLAMO_REG_MEM_DRAM1, + GLAMO_MEM_DRAM1_EN_MODEREG_SET | + GLAMO_MEM_DRAM1_EN_DRAM_REFRESH | + GLAMO_MEM_DRAM1_EN_GATE_CKE | + GLAMO_MEM_DRAM1_SELF_REFRESH | + GLAMO_MEM_REFRESH_COUNT); + + /* force RAM into deep powerdown */ + __reg_write(glamo, GLAMO_REG_MEM_DRAM2, + GLAMO_MEM_DRAM2_DEEP_PWRDOWN | + (7 << 6) | /* tRC */ + (1 << 4) | /* tRP */ + (1 << 2) | /* tRCD */ + 2); /* CAS latency */ + + /* disable clocks to memory */ + __reg_write(glamo, GLAMO_REG_CLOCK_MEMORY, 0); + + /* all dividers from OSCI */ + __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1, 0x400, 0x400); + + /* PLL2 into bypass */ + __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 12, 1 << 12); + + __reg_write(glamo, GLAMO_BASIC_MMC_EN_TCLK_DLYA1, 0x0e00); + + /* kill PLLS 1 then 2 */ + __reg_write(glamo, GLAMO_REG_DFT_GEN5, 0x0001); + __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 13, 1 << 13); + + spin_unlock(&glamo->lock); + + return 0; +} + +static int glamo_resume(struct device *dev) +{ + struct glamo_core *glamo = dev_get_drvdata(dev); + int n; + + (glamo->pdata->glamo_external_reset)(0); + udelay(10); + (glamo->pdata->glamo_external_reset)(1); + mdelay(5); + + spin_lock(&glamo->lock); + + glamo_run_script(glamo, glamo_init_script, + ARRAY_SIZE(glamo_init_script), 0); + + + for (n = 0; n < __NUM_GLAMO_ENGINES; n++) { + switch (glamo->engine_state[n]) { + case GLAMO_ENGINE_SUSPENDED: + __glamo_engine_suspend(glamo, n); + break; + case GLAMO_ENGINE_ENABLED: + __glamo_engine_enable(glamo, n); + break; + default: + break; + } + } + + __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, glamo->saved_irq_mask); + + spin_unlock(&glamo->lock); + + return 0; +} + +static struct dev_pm_ops glamo_pm_ops = { + .suspend = glamo_suspend, + .resume = glamo_resume, + .poweroff = glamo_suspend, + .restore = glamo_resume, +}; + +#define GLAMO_PM_OPS (&glamo_pm_ops) + +#else +#define GLAMO_PM_OPS NULL +#endif + +static struct platform_driver glamo_driver = { + .probe = glamo_probe, + .remove = __devexit_p(glamo_remove), + .driver = { + .name = "glamo3362", + .owner = THIS_MODULE, + .pm = GLAMO_PM_OPS, + }, +}; + +static int __devinit glamo_init(void) +{ + return platform_driver_register(&glamo_driver); +} +module_init(glamo_init); + +static void __exit glamo_exit(void) +{ + platform_driver_unregister(&glamo_driver); +} +module_exit(glamo_exit); + +MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>"); +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); +MODULE_DESCRIPTION("Smedia Glamo 3362 core/resource driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:glamo3362"); diff --git a/drivers/mfd/glamo/glamo-core.h b/drivers/mfd/glamo/glamo-core.h new file mode 100644 index 00000000000..17017b03fb5 --- /dev/null +++ b/drivers/mfd/glamo/glamo-core.h @@ -0,0 +1,61 @@ +#ifndef __GLAMO_CORE_H +#define __GLAMO_CORE_H + +#include <linux/mfd/glamo.h> + +/* for the time being, we put the on-screen framebuffer into the lowest + * VRAM space. This should make the code easily compatible with the various + * 2MB/4MB/8MB variants of the Smedia chips */ +#define GLAMO_OFFSET_VRAM 0x800000 +#define GLAMO_OFFSET_FB (GLAMO_OFFSET_VRAM) + +/* we only allocate the minimum possible size for the framebuffer to make + * sure we have sufficient memory for other functions of the chip */ +/*#define GLAMO_FB_SIZE (640*480*4) *//* == 0x12c000 */ +#define GLAMO_INTERNAL_RAM_SIZE 0x800000 +#define GLAMO_MMC_BUFFER_SIZE (64 * 1024) +#define GLAMO_FB_SIZE (GLAMO_INTERNAL_RAM_SIZE - GLAMO_MMC_BUFFER_SIZE) + +enum glamo_pll { + GLAMO_PLL1, + GLAMO_PLL2, +}; + +enum glamo_engine_state { + GLAMO_ENGINE_DISABLED, + GLAMO_ENGINE_SUSPENDED, + GLAMO_ENGINE_ENABLED, +}; + +struct glamo_core { + int irq; + int irq_base; + int irq_works; /* 0 means PCB does not support Glamo IRQ */ + struct resource *mem; + void __iomem *base; + struct platform_device *pdev; + struct glamo_platform_data *pdata; + enum glamo_engine_state engine_state[__NUM_GLAMO_ENGINES]; + spinlock_t lock; + uint16_t saved_irq_mask; +}; + +struct glamo_script { + uint16_t reg; + uint16_t val; +}; + +int glamo_pll_rate(struct glamo_core *glamo, enum glamo_pll pll); + +int glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine); +int glamo_engine_suspend(struct glamo_core *glamo, enum glamo_engine engine); +int glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine); +void glamo_engine_reset(struct glamo_core *glamo, enum glamo_engine engine); +int glamo_engine_reclock(struct glamo_core *glamo, + enum glamo_engine engine, int ps); + +void glamo_reg_read_batch(struct glamo_core *glamo, uint16_t reg, + uint16_t count, uint16_t *values); +void glamo_reg_write_batch(struct glamo_core *glamo, uint16_t reg, + uint16_t count, uint16_t *values); +#endif /* __GLAMO_CORE_H */ diff --git a/drivers/mfd/glamo/glamo-fb.c b/drivers/mfd/glamo/glamo-fb.c new file mode 100644 index 00000000000..0eac3ac442b --- /dev/null +++ b/drivers/mfd/glamo/glamo-fb.c @@ -0,0 +1,982 @@ +/* Smedia Glamo 336x/337x driver + * + * (C) 2007-2008 by Openmoko, Inc. + * Author: Harald Welte <laforge@openmoko.org> + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/delay.h> +#include <linux/fb.h> +#include <linux/console.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/io.h> +#include <linux/mfd/glamo.h> + +#include <asm/div64.h> + +#ifdef CONFIG_PM +#include <linux/pm.h> +#endif + +#include <linux/glamofb.h> + +#include "glamo-regs.h" +#include "glamo-core.h" + +static void glamofb_program_mode(struct glamofb_handle *glamo); + +struct glamofb_handle { + struct glamo_core *core; + struct fb_info *fb; + struct device *dev; + struct resource *reg; + struct resource *fb_res; + char __iomem *base; + struct glamo_fb_platform_data *mach_info; + char __iomem *cursor_addr; + int cursor_on; + u_int32_t pseudo_pal[16]; + spinlock_t lock_cmd; + int blank_mode; + int mode_set; /* 0 if the current display mode hasn't been set on the glamo */ + int output_enabled; /* 0 if the video output is disabled */ +}; + +static void glamo_output_enable(struct glamofb_handle *gfb) +{ + struct glamo_core *gcore = gfb->core; + + if (gfb->output_enabled) + return; + + /* enable the pixel clock if off */ + glamo_engine_enable(gcore, GLAMO_ENGINE_LCD); + + gfb->output_enabled = 1; + if (!gfb->mode_set) + glamofb_program_mode(gfb); +} + +static void glamo_output_disable(struct glamofb_handle *gfb) +{ + struct glamo_core *gcore = gfb->core; + + if (!gfb->output_enabled) + return; + + /* enable the pixel clock if off */ + glamo_engine_suspend(gcore, GLAMO_ENGINE_LCD); + + gfb->output_enabled = 0; +} + + +static int reg_read(struct glamofb_handle *glamo, + u_int16_t reg) +{ + int i = 0; + + for (i = 0; i != 2; i++) + nop(); + + return readw(glamo->base + reg); +} + +static void reg_write(struct glamofb_handle *glamo, + uint16_t reg, uint16_t val) +{ + int i = 0; + + for (i = 0; i != 2; i++) + nop(); + + writew(val, glamo->base + reg); +} + +static struct glamo_script glamo_regs[] = { + { GLAMO_REG_LCD_MODE1, 0x0020 }, + /* no display rotation, no hardware cursor, no dither, no gamma, + * no retrace flip, vsync low-active, hsync low active, + * no TVCLK, no partial display, hw dest color from fb, + * no partial display mode, LCD1, software flip, */ + { GLAMO_REG_LCD_MODE2, 0x9020 }, + /* video flip, no ptr, no ptr, dhclk off, + * normal mode, no cpuif, + * res, serial msb first, single fb, no fr ctrl, + * cpu if bits all zero, no crc + * 0000 0000 0010 0000 */ + { GLAMO_REG_LCD_MODE3, 0x0b40 }, + /* src data rgb565, res, 18bit rgb666 + * 000 01 011 0100 0000 */ + { GLAMO_REG_LCD_POLARITY, 0x440c }, + /* DE high active, no cpu/lcd if, cs0 force low, a0 low active, + * np cpu if, 9bit serial data, sclk rising edge latch data + * 01 00 0 100 0 000 01 0 0 */ + /* The following values assume 640*480@16bpp */ + { GLAMO_REG_LCD_A_BASE1, 0x0000 }, /* display A base address 15:0 */ + { GLAMO_REG_LCD_A_BASE2, 0x0000 }, /* display A base address 22:16 */ + { GLAMO_REG_LCD_CURSOR_BASE1, 0xC000 }, /* cursor base address 15:0 */ + { GLAMO_REG_LCD_CURSOR_BASE2, 0x0012 }, /* cursor base address 22:16 */ + { GLAMO_REG_LCD_COMMAND2, 0x0000 }, /* display page A */ +}; + +static int glamofb_run_script(struct glamofb_handle *glamo, + struct glamo_script *script, int len) +{ + int i; + + for (i = 0; i < len; i++) { + struct glamo_script *line = &script[i]; + + if (line->reg == 0xffff) + return 0; + else if (line->reg == 0xfffe) + msleep(line->val); + else + reg_write(glamo, script[i].reg, script[i].val); + } + + return 0; +} + +static int glamofb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct glamofb_handle *glamo = info->par; + + if (var->bits_per_pixel != 16) + var->bits_per_pixel = 16; + + var->height = glamo->mach_info->height; + var->width = glamo->mach_info->width; + + /* FIXME: set rgb positions */ + switch (var->bits_per_pixel) { + case 16: + switch (reg_read(glamo, GLAMO_REG_LCD_MODE3) & 0xc000) { + case GLAMO_LCD_SRC_RGB565: + var->red.offset = 11; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 6; + var->blue.length = 5; + var->transp.length = 0; + break; + case GLAMO_LCD_SRC_ARGB1555: + var->transp.offset = 15; + var->red.offset = 10; + var->green.offset = 5; + var->blue.offset = 0; + var->transp.length = 1; + var->red.length = 5; + var->green.length = 5; + var->blue.length = 5; + break; + case GLAMO_LCD_SRC_ARGB4444: + var->transp.offset = 12; + var->red.offset = 8; + var->green.offset = 4; + var->blue.offset = 0; + var->transp.length = 4; + var->red.length = 4; + var->green.length = 4; + var->blue.length = 4; + break; + } + break; + case 24: + case 32: + default: + /* The Smedia Glamo doesn't support anything but 16bit color */ + printk(KERN_ERR + "Smedia driver does not [yet?] support 24/32bpp\n"); + return -EINVAL; + } + + return 0; +} + +static void reg_set_bit_mask(struct glamofb_handle *glamo, + uint16_t reg, uint16_t mask, + uint16_t val) +{ + u_int16_t tmp; + + val &= mask; + + tmp = reg_read(glamo, reg); + tmp &= ~mask; + tmp |= val; + reg_write(glamo, reg, tmp); +} + +#define GLAMO_LCD_WIDTH_MASK 0x03FF +#define GLAMO_LCD_HEIGHT_MASK 0x03FF +#define GLAMO_LCD_PITCH_MASK 0x07FE +#define GLAMO_LCD_HV_TOTAL_MASK 0x03FF +#define GLAMO_LCD_HV_RETR_START_MASK 0x03FF +#define GLAMO_LCD_HV_RETR_END_MASK 0x03FF +#define GLAMO_LCD_HV_RETR_DISP_START_MASK 0x03FF +#define GLAMO_LCD_HV_RETR_DISP_END_MASK 0x03FF + +/* the caller has to enxure lock_cmd is held and we are in cmd mode */ +static void __rotate_lcd(struct glamofb_handle *glamo, __u32 rotation) +{ + int glamo_rot; + + switch (rotation) { + case FB_ROTATE_CW: + glamo_rot = GLAMO_LCD_ROT_MODE_90; + break; + case FB_ROTATE_UD: + glamo_rot = GLAMO_LCD_ROT_MODE_180; + break; + case FB_ROTATE_CCW: + glamo_rot = GLAMO_LCD_ROT_MODE_270; + break; + default: + glamo_rot = GLAMO_LCD_ROT_MODE_0; + break; + } + + reg_set_bit_mask(glamo, + GLAMO_REG_LCD_WIDTH, + GLAMO_LCD_ROT_MODE_MASK, + glamo_rot); + reg_set_bit_mask(glamo, + GLAMO_REG_LCD_MODE1, + GLAMO_LCD_MODE1_ROTATE_EN, + (glamo_rot != GLAMO_LCD_ROT_MODE_0) ? + GLAMO_LCD_MODE1_ROTATE_EN : 0); +} + +static void glamofb_program_mode(struct glamofb_handle *gfb) +{ + int sync, bp, disp, fp, total; + unsigned long flags; + struct glamo_core *gcore = gfb->core; + struct fb_var_screeninfo *var = &gfb->fb->var; + + dev_dbg(&gcore->pdev->dev, + "glamofb_program_mode spin_lock_irqsave\n"); + spin_lock_irqsave(&gfb->lock_cmd, flags); + + if (glamofb_cmd_mode(gfb, 1)) + goto out_unlock; + + if (var->pixclock) + glamo_engine_reclock(gcore, GLAMO_ENGINE_LCD, + (1000000000UL / gfb->fb->var.pixclock) * 1000); + + reg_set_bit_mask(gfb, + GLAMO_REG_LCD_WIDTH, + GLAMO_LCD_WIDTH_MASK, + var->xres); + reg_set_bit_mask(gfb, + GLAMO_REG_LCD_HEIGHT, + GLAMO_LCD_HEIGHT_MASK, + var->yres); + reg_set_bit_mask(gfb, + GLAMO_REG_LCD_PITCH, + GLAMO_LCD_PITCH_MASK, + gfb->fb->fix.line_length); + + /* honour the rotation request */ + __rotate_lcd(gfb, var->rotate); + + /* update scannout timings */ + sync = 0; + bp = sync + var->hsync_len; + disp = bp + var->left_margin; + fp = disp + var->xres; + total = fp + var->right_margin; + + reg_set_bit_mask(gfb, GLAMO_REG_LCD_HORIZ_TOTAL, + GLAMO_LCD_HV_TOTAL_MASK, total); + reg_set_bit_mask(gfb, GLAMO_REG_LCD_HORIZ_RETR_START, + GLAMO_LCD_HV_RETR_START_MASK, sync); + reg_set_bit_mask(gfb, GLAMO_REG_LCD_HORIZ_RETR_END, + GLAMO_LCD_HV_RETR_END_MASK, bp); + reg_set_bit_mask(gfb, GLAMO_REG_LCD_HORIZ_DISP_START, + GLAMO_LCD_HV_RETR_DISP_START_MASK, disp); + reg_set_bit_mask(gfb, GLAMO_REG_LCD_HORIZ_DISP_END, + GLAMO_LCD_HV_RETR_DISP_END_MASK, fp); + + sync = 0; + bp = sync + var->vsync_len; + disp = bp + var->upper_margin; + fp = disp + var->yres; + total = fp + var->lower_margin; + + reg_set_bit_mask(gfb, GLAMO_REG_LCD_VERT_TOTAL, + GLAMO_LCD_HV_TOTAL_MASK, total); + reg_set_bit_mask(gfb, GLAMO_REG_LCD_VERT_RETR_START, + GLAMO_LCD_HV_RETR_START_MASK, sync); + reg_set_bit_mask(gfb, GLAMO_REG_LCD_VERT_RETR_END, + GLAMO_LCD_HV_RETR_END_MASK, bp); + reg_set_bit_mask(gfb, GLAMO_REG_LCD_VERT_DISP_START, + GLAMO_LCD_HV_RETR_DISP_START_MASK, disp); + reg_set_bit_mask(gfb, GLAMO_REG_LCD_VERT_DISP_END, + GLAMO_LCD_HV_RETR_DISP_END_MASK, fp); + + glamofb_cmd_mode(gfb, 0); + + gfb->mode_set = 1; + +out_unlock: + dev_dbg(&gcore->pdev->dev, + "glamofb_program_mode spin_unlock_irqrestore\n"); + spin_unlock_irqrestore(&gfb->lock_cmd, flags); +} + + +static int glamofb_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return 0; +} + +static struct fb_videomode *glamofb_find_mode(struct fb_info *info, + struct fb_var_screeninfo *var) +{ + struct glamofb_handle *glamo = info->par; + struct glamo_fb_platform_data *pdata = glamo->mach_info; + struct fb_videomode *mode; + int i; + + for (i = pdata->num_modes, mode = pdata->modes; i; --i, ++mode) { + if (mode->xres == var->xres && + mode->yres == var->yres) + return mode; + } + + return NULL; +} + +static int glamofb_set_par(struct fb_info *info) +{ + struct glamofb_handle *glamo = info->par; + struct fb_var_screeninfo *var = &info->var; + struct fb_videomode *mode; + + mode = glamofb_find_mode(info, var); + if (!mode) + return -EINVAL; + + fb_videomode_to_var(var, mode); + + info->mode = mode; + + glamo->mode_set = 0; + + switch (var->rotate) { + case FB_ROTATE_CW: + case FB_ROTATE_CCW: + info->fix.line_length = (var->yres * var->bits_per_pixel) / 8; + /* FIXME: Limit pixelclock */ + var->pixclock *= 2; + break; + default: + info->fix.line_length = (var->xres * var->bits_per_pixel) / 8; + break; + } + + if (glamo->output_enabled) + glamofb_program_mode(glamo); + + return 0; +} + +static int glamofb_blank(int blank_mode, struct fb_info *info) +{ + struct glamofb_handle *gfb = info->par; + + dev_dbg(gfb->dev, "glamofb_blank(%u)\n", blank_mode); + + switch (blank_mode) { + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_HSYNC_SUSPEND: + /* FIXME: add pdata hook/flag to indicate whether + * we should already switch off pixel clock here */ + break; + case FB_BLANK_POWERDOWN: + /* disable the pixel clock */ + glamo_output_disable(gfb); + gfb->blank_mode = blank_mode; + break; + case FB_BLANK_UNBLANK: + case FB_BLANK_NORMAL: + glamo_output_enable(gfb); + gfb->blank_mode = blank_mode; + break; + } + + /* FIXME: once we have proper clock management in glamo-core, + * we can determine if other units need MCLK1 or the PLL, and + * disable it if not used. */ + return 0; +} + +static inline unsigned int chan_to_field(unsigned int chan, + struct fb_bitfield *bf) +{ + chan &= 0xffff; + chan >>= 16 - bf->length; + return chan << bf->offset; +} + +static int glamofb_setcolreg(unsigned regno, + unsigned red, unsigned green, unsigned blue, + unsigned transp, struct fb_info *info) +{ + struct glamofb_handle *glamo = info->par; + unsigned int val; + + switch (glamo->fb->fix.visual) { + case FB_VISUAL_TRUECOLOR: + case FB_VISUAL_DIRECTCOLOR: + /* true-colour, use pseuo-palette */ + + if (regno < 16) { + u32 *pal = glamo->fb->pseudo_palette; + + val = chan_to_field(red, &glamo->fb->var.red); + val |= chan_to_field(green, &glamo->fb->var.green); + val |= chan_to_field(blue, &glamo->fb->var.blue); + + pal[regno] = val; + }; + break; + default: + return 1; /* unknown type */ + } + + return 0; +} + +static int glamofb_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) +{ + struct glamofb_handle *gfb = (struct glamofb_handle *)info->par; + struct glamo_core *gcore = gfb->core; + int retval = -ENOTTY; + + switch (cmd) { + case GLAMOFB_ENGINE_ENABLE: + retval = glamo_engine_enable(gcore, arg); + break; + case GLAMOFB_ENGINE_DISABLE: + retval = glamo_engine_disable(gcore, arg); + break; + case GLAMOFB_ENGINE_RESET: + glamo_engine_reset(gcore, arg); + retval = 0; + break; + default: + break; + } + + return retval; +} + + +#ifdef CONFIG_MFD_GLAMO_HWACCEL +static inline void glamofb_vsync_wait(struct glamofb_handle *glamo, + int line, int size, int range) +{ + int count[2]; + + do { + count[0] = reg_read(glamo, GLAMO_REG_LCD_STATUS2) & 0x3ff; + count[1] = reg_read(glamo, GLAMO_REG_LCD_STATUS2) & 0x3ff; + } while (count[0] != count[1] || + (line < count[0] + range && + size > count[0] - range) || + count[0] < range * 2); +} + +/* + * Enable/disable the hardware cursor mode altogether + * (for blinking and such, use glamofb_cursor()). + */ +static void glamofb_cursor_onoff(struct glamofb_handle *glamo, int on) +{ + int y, size; + + if (glamo->cursor_on) { + y = reg_read(glamo, GLAMO_REG_LCD_CURSOR_Y_POS); + size = reg_read(glamo, GLAMO_REG_LCD_CURSOR_Y_SIZE); + + glamofb_vsync_wait(glamo, y, size, 30); + } + + reg_set_bit_mask(glamo, GLAMO_REG_LCD_MODE1, + GLAMO_LCD_MODE1_CURSOR_EN, + on ? GLAMO_LCD_MODE1_CURSOR_EN : 0); + glamo->cursor_on = on; + + /* Hide the cursor by default */ + reg_write(glamo, GLAMO_REG_LCD_CURSOR_X_SIZE, 0); +} + +static int glamofb_cursor(struct fb_info *info, struct fb_cursor *cursor) +{ + struct glamofb_handle *glamo = info->par; + unsigned long flags; + + spin_lock_irqsave(&glamo->lock_cmd, flags); + + reg_write(glamo, GLAMO_REG_LCD_CURSOR_X_SIZE, + cursor->enable ? cursor->image.width : 0); + + if (cursor->set & FB_CUR_SETPOS) { + reg_write(glamo, GLAMO_REG_LCD_CURSOR_X_POS, + cursor->image.dx); + reg_write(glamo, GLAMO_REG_LCD_CURSOR_Y_POS, + cursor->image.dy); + } + + if (cursor->set & FB_CUR_SETCMAP) { + uint16_t fg = glamo->pseudo_pal[cursor->image.fg_color]; + uint16_t bg = glamo->pseudo_pal[cursor->image.bg_color]; + + reg_write(glamo, GLAMO_REG_LCD_CURSOR_FG_COLOR, fg); + reg_write(glamo, GLAMO_REG_LCD_CURSOR_BG_COLOR, bg); + reg_write(glamo, GLAMO_REG_LCD_CURSOR_DST_COLOR, fg); + } + + if (cursor->set & FB_CUR_SETHOT) + reg_write(glamo, GLAMO_REG_LCD_CURSOR_PRESET, + (cursor->hot.x << 8) | cursor->hot.y); + + if ((cursor->set & FB_CUR_SETSIZE) || + (cursor->set & (FB_CUR_SETIMAGE | FB_CUR_SETSHAPE))) { + int x, y, pitch, op; + const uint8_t *pcol = cursor->image.data; + const uint8_t *pmsk = cursor->mask; + uint8_t __iomem *dst = glamo->cursor_addr; + uint8_t dcol = 0; + uint8_t dmsk = 0; + uint8_t byte = 0; + + if (cursor->image.depth > 1) { + spin_unlock_irqrestore(&glamo->lock_cmd, flags); + return -EINVAL; + } + + pitch = ((cursor->image.width + 7) >> 2) & ~1; + reg_write(glamo, GLAMO_REG_LCD_CURSOR_PITCH, + pitch); + reg_write(glamo, GLAMO_REG_LCD_CURSOR_Y_SIZE, + cursor->image.height); + + for (y = 0; y < cursor->image.height; y++) { + byte = 0; + for (x = 0; x < cursor->image.width; x++) { + if ((x % 8) == 0) { + dcol = *pcol++; + dmsk = *pmsk++; + } else { + dcol >>= 1; + dmsk >>= 1; + } + + if (cursor->rop == ROP_COPY) + op = (dmsk & 1) ? + (dcol & 1) ? 1 : 3 : 0; + else + op = ((dmsk & 1) << 1) | + ((dcol & 1) << 0); + byte |= op << ((x & 3) << 1); + + if (x % 4 == 3) { + writeb(byte, dst + x / 4); + byte = 0; + } + } + if (x % 4) { + writeb(byte, dst + x / 4); + byte = 0; + } + + dst += pitch; + } + } + + spin_unlock_irqrestore(&glamo->lock_cmd, flags); + + return 0; +} +#endif + +static inline int glamofb_cmdq_empty(struct glamofb_handle *gfb) +{ + /* DGCMdQempty -- 1 == command queue is empty */ + return reg_read(gfb, GLAMO_REG_LCD_STATUS1) & (1 << 15); +} + +/* call holding gfb->lock_cmd when locking, until you unlock */ +int glamofb_cmd_mode(struct glamofb_handle *gfb, int on) +{ + int timeout = 2000000; + + dev_dbg(gfb->dev, "glamofb_cmd_mode(gfb=%p, on=%d)\n", gfb, on); + if (on) { + dev_dbg(gfb->dev, "%s: waiting for cmdq empty: ", + __func__); + while (!glamofb_cmdq_empty(gfb) && (timeout--)) + cpu_relax(); + if (timeout < 0) { + printk(KERN_ERR "glamofb cmd_queue never got empty\n"); + return -EIO; + } + dev_dbg(gfb->dev, "empty!\n"); + + /* display the entire frame then switch to command */ + reg_write(gfb, GLAMO_REG_LCD_COMMAND1, + GLAMO_LCD_CMD_TYPE_DISP | + GLAMO_LCD_CMD_DATA_FIRE_VSYNC); + + /* wait until lcd idle */ + dev_dbg(gfb->dev, "waiting for lcd idle: "); + timeout = 2000000; + while (!(reg_read(gfb, GLAMO_REG_LCD_STATUS2) & (1 << 12)) && + (timeout--)) + cpu_relax(); + if (timeout < 0) { + printk(KERN_ERR"*************" + "glamofb lcd never idle" + "*************\n"); + return -EIO; + } + + mdelay(100); + + dev_dbg(gfb->dev, "cmd mode entered\n"); + + } else { + /* RGB interface needs vsync/hsync */ + if (reg_read(gfb, GLAMO_REG_LCD_MODE3) & GLAMO_LCD_MODE3_RGB) + reg_write(gfb, GLAMO_REG_LCD_COMMAND1, + GLAMO_LCD_CMD_TYPE_DISP | + GLAMO_LCD_CMD_DATA_DISP_SYNC); + + reg_write(gfb, GLAMO_REG_LCD_COMMAND1, + GLAMO_LCD_CMD_TYPE_DISP | + GLAMO_LCD_CMD_DATA_DISP_FIRE); + } + + return 0; +} +EXPORT_SYMBOL_GPL(glamofb_cmd_mode); + + +int glamofb_cmd_write(struct glamofb_handle *gfb, u_int16_t val) +{ + int timeout = 200000; + + dev_dbg(gfb->dev, "%s: waiting for cmdq empty\n", __func__); + while ((!glamofb_cmdq_empty(gfb)) && (timeout--)) + yield(); + if (timeout < 0) { + printk(KERN_ERR"*************" + "glamofb cmd_queue never got empty" + "*************\n"); + return 1; + } + dev_dbg(gfb->dev, "idle, writing 0x%04x\n", val); + + reg_write(gfb, GLAMO_REG_LCD_COMMAND1, val); + + return 0; +} +EXPORT_SYMBOL_GPL(glamofb_cmd_write); + +static struct fb_ops glamofb_ops = { + .owner = THIS_MODULE, + .fb_check_var = glamofb_check_var, + .fb_pan_display = glamofb_pan_display, + .fb_set_par = glamofb_set_par, + .fb_blank = glamofb_blank, + .fb_setcolreg = glamofb_setcolreg, + .fb_ioctl = glamofb_ioctl, +#ifdef CONFIG_MFD_GLAMO_HWACCEL + .fb_cursor = glamofb_cursor, +#endif + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, +}; + +static int glamofb_init_regs(struct glamofb_handle *glamo) +{ + struct fb_info *info = glamo->fb; + + glamofb_check_var(&info->var, info); + glamofb_run_script(glamo, glamo_regs, ARRAY_SIZE(glamo_regs)); + glamofb_set_par(info); + + return 0; +} + +static int __init glamofb_probe(struct platform_device *pdev) +{ + int rc = -EIO; + struct fb_info *fbinfo; + struct glamofb_handle *glamofb; + struct glamo_core *core = dev_get_drvdata(pdev->dev.parent); + struct glamo_fb_platform_data *mach_info; + + printk(KERN_INFO "SMEDIA Glamo frame buffer driver (C) 2007 " + "Openmoko, Inc.\n"); + + if (!core->pdata || !core->pdata->fb_data) + return -ENOENT; + + + fbinfo = framebuffer_alloc(sizeof(struct glamofb_handle), &pdev->dev); + if (!fbinfo) + return -ENOMEM; + + + glamofb = fbinfo->par; + glamofb->fb = fbinfo; + glamofb->dev = &pdev->dev; + + glamofb->blank_mode = FB_BLANK_POWERDOWN; + + strcpy(fbinfo->fix.id, "SMedia Glamo"); + + glamofb->reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "glamo-fb-regs"); + if (!glamofb->reg) { + dev_err(&pdev->dev, "platform device with no registers?\n"); + rc = -ENOENT; + goto out_free; + } + + glamofb->fb_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "glamo-fb-mem"); + if (!glamofb->fb_res) { + dev_err(&pdev->dev, "platform device with no memory ?\n"); + rc = -ENOENT; + goto out_free; + } + + glamofb->reg = request_mem_region(glamofb->reg->start, + resource_size(glamofb->reg), + pdev->name); + if (!glamofb->reg) { + dev_err(&pdev->dev, "failed to request mmio region\n"); + goto out_free; + } + + glamofb->fb_res = request_mem_region(glamofb->fb_res->start, + resource_size(glamofb->fb_res), + pdev->name); + if (!glamofb->fb_res) { + dev_err(&pdev->dev, "failed to request vram region\n"); + goto out_release_reg; + } + + /* we want to remap only the registers required for this core + * driver. */ + glamofb->base = ioremap_nocache(glamofb->reg->start, + resource_size(glamofb->reg)); + if (!glamofb->base) { + dev_err(&pdev->dev, "failed to ioremap() mmio memory\n"); + goto out_release_fb; + } + + fbinfo->fix.smem_start = (unsigned long) glamofb->fb_res->start; + fbinfo->fix.smem_len = (__u32) resource_size(glamofb->fb_res); + + fbinfo->screen_base = ioremap(glamofb->fb_res->start, + resource_size(glamofb->fb_res)); + if (!fbinfo->screen_base) { + dev_err(&pdev->dev, "failed to ioremap() vram memory\n"); + goto out_release_fb; + } + glamofb->cursor_addr = fbinfo->screen_base + 0x12C000; + + platform_set_drvdata(pdev, glamofb); + + mach_info = core->pdata->fb_data; + glamofb->core = core; + glamofb->mach_info = mach_info; + + fbinfo->fix.visual = FB_VISUAL_TRUECOLOR; + fbinfo->fix.type = FB_TYPE_PACKED_PIXELS; + fbinfo->fix.type_aux = 0; + fbinfo->fix.xpanstep = 0; + fbinfo->fix.ypanstep = 0; + fbinfo->fix.ywrapstep = 0; + fbinfo->fix.accel = FB_ACCEL_GLAMO; + + + fbinfo->fbops = &glamofb_ops; + fbinfo->flags = FBINFO_FLAG_DEFAULT; + fbinfo->pseudo_palette = &glamofb->pseudo_pal; + + fbinfo->mode = mach_info->modes; + fb_videomode_to_var(&fbinfo->var, fbinfo->mode); + fbinfo->var.bits_per_pixel = 16; + fbinfo->var.nonstd = 0; + fbinfo->var.activate = FB_ACTIVATE_NOW; + fbinfo->var.height = mach_info->height; + fbinfo->var.width = mach_info->width; + fbinfo->var.accel_flags = 0; + fbinfo->var.vmode = FB_VMODE_NONINTERLACED; + + glamo_engine_enable(core, GLAMO_ENGINE_LCD); + glamo_engine_reset(core, GLAMO_ENGINE_LCD); + glamofb->output_enabled = 1; + glamofb->mode_set = 1; + + dev_info(&pdev->dev, "spin_lock_init\n"); + spin_lock_init(&glamofb->lock_cmd); + glamofb_init_regs(glamofb); +#ifdef CONFIG_MFD_GLAMO_HWACCEL + glamofb_cursor_onoff(glamofb, 1); +#endif + + fb_videomode_to_modelist(mach_info->modes, mach_info->num_modes, + &fbinfo->modelist); + + rc = register_framebuffer(fbinfo); + if (rc < 0) { + dev_err(&pdev->dev, "failed to register framebuffer\n"); + goto out_unmap_fb; + } + + printk(KERN_INFO "fb%d: %s frame buffer device\n", + fbinfo->node, fbinfo->fix.id); + + return 0; + +out_unmap_fb: + iounmap(fbinfo->screen_base); + iounmap(glamofb->base); +out_release_fb: + release_mem_region(glamofb->fb_res->start, + resource_size(glamofb->fb_res)); +out_release_reg: + release_mem_region(glamofb->reg->start, + resource_size(glamofb->reg)); +out_free: + framebuffer_release(fbinfo); + return rc; +} + +static int glamofb_remove(struct platform_device *pdev) +{ + struct glamofb_handle *glamofb = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + iounmap(glamofb->base); + release_mem_region(glamofb->reg->start, resource_size(glamofb->reg)); + + framebuffer_release(glamofb->fb); + + return 0; +} + +#ifdef CONFIG_PM + +static int glamofb_suspend(struct device *dev) +{ + struct glamofb_handle *gfb = dev_get_drvdata(dev); + + acquire_console_sem(); + fb_set_suspend(gfb->fb, 1); + release_console_sem(); + + /* seriously -- nobody is allowed to touch glamo memory when we + * are suspended or we lock on nWAIT + */ + /* iounmap(gfb->fb->screen_base); */ + + return 0; +} + +static int glamofb_resume(struct device *dev) +{ + struct glamofb_handle *gfb = dev_get_drvdata(dev); + + /* OK let's allow framebuffer ops again */ + /* gfb->fb->screen_base = ioremap(gfb->fb_res->start, + resource_size(gfb->fb_res)); */ + glamo_engine_enable(gfb->core, GLAMO_ENGINE_LCD); + glamo_engine_reset(gfb->core, GLAMO_ENGINE_LCD); + + glamofb_init_regs(gfb); +#ifdef CONFIG_MFD_GLAMO_HWACCEL + glamofb_cursor_onoff(gfb, 1); +#endif + + acquire_console_sem(); + fb_set_suspend(gfb->fb, 0); + release_console_sem(); + + return 0; +} + +static struct dev_pm_ops glamofb_pm_ops = { + .suspend = glamofb_suspend, + .resume = glamofb_resume, +}; + +#define GLAMOFB_PM_OPS (&glamofb_pm_ops) + +#else +#define GLAMOFB_PM_OPS NULL +#endif + +static struct platform_driver glamofb_driver = { + .probe = glamofb_probe, + .remove = glamofb_remove, + .driver = { + .name = "glamo-fb", + .owner = THIS_MODULE, + .pm = GLAMOFB_PM_OPS + }, +}; + +static int __devinit glamofb_init(void) +{ + return platform_driver_register(&glamofb_driver); +} + +static void __exit glamofb_cleanup(void) +{ + platform_driver_unregister(&glamofb_driver); +} + +module_init(glamofb_init); +module_exit(glamofb_cleanup); + +MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>"); +MODULE_DESCRIPTION("Smedia Glamo 336x/337x framebuffer driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/glamo/glamo-gpio.c b/drivers/mfd/glamo/glamo-gpio.c new file mode 100644 index 00000000000..30ee6ed1d9a --- /dev/null +++ b/drivers/mfd/glamo/glamo-gpio.c @@ -0,0 +1,285 @@ +/* Smedia Glamo 336x/337x gpio driver + * + * (C) 2009 Lars-Peter Clausen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <linux/io.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include <linux/gpio.h> +#include <linux/mfd/glamo.h> + +#include "glamo-core.h" +#include "glamo-regs.h" + +#define GLAMO_NR_GPIO 21 +#define GLAMO_NR_GPIO_REGS DIV_ROUND_UP(GLAMO_NR_GPIO, 4) + +#define GLAMO_REG_GPIO(x) (((x) * 2) + GLAMO_REG_GPIO_GEN1) + +struct glamo_gpio { + struct glamo_core *glamo; + struct gpio_chip chip; + uint16_t saved_regs[GLAMO_NR_GPIO_REGS]; +}; + +#define REG_OF_GPIO(gpio) (GLAMO_REG_GPIO(gpio >> 2)) +#define NUM_OF_GPIO(gpio) (gpio & 0x3) +#define DIRECTION_BIT(gpio) (1 << (NUM_OF_GPIO(gpio) + 0)) +#define OUTPUT_BIT(gpio) (1 << (NUM_OF_GPIO(gpio) + 4)) +#define INPUT_BIT(gpio) (1 << (NUM_OF_GPIO(gpio) + 8)) +#define FUNC_BIT(gpio) (1 << (NUM_OF_GPIO(gpio) + 12)) + + +static inline struct glamo_core *chip_to_glamo(struct gpio_chip *chip) +{ + return container_of(chip, struct glamo_gpio, chip)->glamo; +} + +static void glamo_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +{ + struct glamo_core *glamo = chip_to_glamo(chip); + unsigned int reg = REG_OF_GPIO(offset); + u_int16_t tmp; + + spin_lock(&glamo->lock); + tmp = readw(glamo->base + reg); + if (value) + tmp |= OUTPUT_BIT(offset); + else + tmp &= ~OUTPUT_BIT(offset); + writew(tmp, glamo->base + reg); + spin_unlock(&glamo->lock); +} + +static int glamo_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + struct glamo_core *glamo = chip_to_glamo(chip); + return !!(readw(glamo->base + REG_OF_GPIO(offset)) & INPUT_BIT(offset)); +} + +static int glamo_gpio_request(struct gpio_chip *chip, unsigned offset) +{ + struct glamo_core *glamo = chip_to_glamo(chip); + unsigned int reg = REG_OF_GPIO(offset); + u_int16_t tmp; + + spin_lock(&glamo->lock); + tmp = readw(glamo->base + reg); + if ((tmp & FUNC_BIT(offset)) == 0) { + tmp |= FUNC_BIT(offset); + writew(tmp, glamo->base + reg); + } + spin_unlock(&glamo->lock); + + return 0; +} + +static void glamo_gpio_free(struct gpio_chip *chip, unsigned offset) +{ + struct glamo_core *glamo = chip_to_glamo(chip); + unsigned int reg = REG_OF_GPIO(offset); + u_int16_t tmp; + + spin_lock(&glamo->lock); + tmp = readw(glamo->base + reg); + if ((tmp & FUNC_BIT(offset)) == 1) { + tmp &= ~FUNC_BIT(offset); + writew(tmp, glamo->base + reg); + } + spin_unlock(&glamo->lock); +} + +static int glamo_gpio_direction_output(struct gpio_chip *chip, unsigned offset, + int value) +{ + struct glamo_core *glamo = chip_to_glamo(chip); + unsigned int reg = REG_OF_GPIO(offset); + u_int16_t tmp; + + spin_lock(&glamo->lock); + tmp = readw(glamo->base + reg); + tmp &= ~DIRECTION_BIT(offset); + + if (value) + tmp |= OUTPUT_BIT(offset); + else + tmp &= ~OUTPUT_BIT(offset); + + writew(tmp, glamo->base + reg); + spin_unlock(&glamo->lock); + + return 0; +} + +static int glamo_gpio_direction_input(struct gpio_chip *chip, unsigned offset) +{ + struct glamo_core *glamo = chip_to_glamo(chip); + unsigned int reg = REG_OF_GPIO(offset); + u_int16_t tmp; + + spin_lock(&glamo->lock); + tmp = readw(glamo->base + reg); + if ((tmp & DIRECTION_BIT(offset)) == 0) { + tmp |= DIRECTION_BIT(offset); + writew(tmp, glamo->base + reg); + } + spin_unlock(&glamo->lock); + + return 0; +} + +static const struct __devinit gpio_chip glamo_gpio_chip = { + .label = "glamo", + .request = glamo_gpio_request, + .free = glamo_gpio_free, + .direction_input = glamo_gpio_direction_input, + .get = glamo_gpio_get, + .direction_output = glamo_gpio_direction_output, + .set = glamo_gpio_set, + .base = -1, + .ngpio = GLAMO_NR_GPIO, + .can_sleep = 0, + .owner = THIS_MODULE, +}; + +static int __devinit glamo_gpio_probe(struct platform_device *pdev) +{ + struct glamo_platform_data *pdata = pdev->dev.parent->platform_data; + struct glamo_gpio *glamo_gpio; + int ret; + + glamo_gpio = kzalloc(sizeof(struct glamo_gpio), GFP_KERNEL); + if (!glamo_gpio) + return -ENOMEM; + + glamo_gpio->glamo = dev_get_drvdata(pdev->dev.parent); + glamo_gpio->chip = glamo_gpio_chip; + glamo_gpio->chip.dev = &pdev->dev; + if (pdata && pdata->gpio_data) + glamo_gpio->chip.base = pdata->gpio_data->base; + + ret = gpiochip_add(&glamo_gpio->chip); + + if (ret) { + dev_err(&pdev->dev, "Could not register gpio chip: %d\n", ret); + goto err; + } + + platform_set_drvdata(pdev, glamo_gpio); + + if (pdata && pdata->gpio_data && pdata->gpio_data->registered) + pdata->gpio_data->registered(&pdev->dev); + + return 0; +err: + kfree(glamo_gpio); + return ret; +} + +static int __devexit glamo_gpio_remove(struct platform_device *pdev) +{ + struct glamo_gpio *glamo_gpio = platform_get_drvdata(pdev); + int ret; + + ret = gpiochip_remove(&glamo_gpio->chip); + if (!ret) + goto done; + + platform_set_drvdata(pdev, NULL); + kfree(glamo_gpio); + +done: + return ret; +} + +#ifdef CONFIG_PM + +static int glamo_gpio_suspend(struct device *dev) +{ + struct glamo_gpio *glamo_gpio = dev_get_drvdata(dev); + struct glamo_core *glamo = glamo_gpio->glamo; + uint16_t *saved_regs = glamo_gpio->saved_regs; + int i; + + spin_lock(&glamo->lock); + for (i = 0; i < GLAMO_NR_GPIO / 4; ++i) + saved_regs[i] = readw(glamo->base + GLAMO_REG_GPIO(i)); + spin_unlock(&glamo->lock); + + return 0; +} + +static int glamo_gpio_resume(struct device *dev) +{ + struct glamo_gpio *glamo_gpio = dev_get_drvdata(dev); + struct glamo_core *glamo = glamo_gpio->glamo; + uint16_t *saved_regs = glamo_gpio->saved_regs; + int i; + + spin_lock(&glamo->lock); + for (i = 0; i < GLAMO_NR_GPIO_REGS; ++i) + writew(saved_regs[i], glamo->base + GLAMO_REG_GPIO(i)); + spin_unlock(&glamo->lock); + return 0; +} + +static const struct dev_pm_ops glamo_pm_ops = { + .suspend = glamo_gpio_suspend, + .resume = glamo_gpio_resume, + .freeze = glamo_gpio_suspend, + .thaw = glamo_gpio_resume, +}; + +#define GLAMO_GPIO_PM_OPS (&glamo_pm_ops) + +#else +#define GLAMO_GPIO_PM_OPS NULL +#endif + +static struct platform_driver glamo_gpio_driver = { + .driver = { + .name = "glamo-gpio", + .owner = THIS_MODULE, + .pm = GLAMO_GPIO_PM_OPS, + }, + .probe = glamo_gpio_probe, + .remove = __devexit_p(glamo_gpio_remove), +}; + +static int __devinit glamo_gpio_init(void) +{ + return platform_driver_register(&glamo_gpio_driver); +} +module_init(glamo_gpio_init); + +static void __exit glamo_gpio_exit(void) +{ + platform_driver_unregister(&glamo_gpio_driver); +} +module_exit(glamo_gpio_exit); + +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); +MODULE_DESCRIPTION("GPIO interface for the Glamo multimedia device"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:glamo-gpio"); diff --git a/drivers/mfd/glamo/glamo-mci.c b/drivers/mfd/glamo/glamo-mci.c new file mode 100644 index 00000000000..f49fa7167aa --- /dev/null +++ b/drivers/mfd/glamo/glamo-mci.c @@ -0,0 +1,1023 @@ +/* + * linux/drivers/mmc/host/glamo-mmc.c - Glamo MMC driver + * + * Copyright (C) 2007 Openmoko, Inc, Andy Green <andy@openmoko.com> + * Copyright (C) 2009, Lars-Peter Clausen <lars@metafoo.de> + * Based on S3C MMC driver that was: + * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sd.h> +#include <linux/mmc/host.h> +#include <linux/platform_device.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/crc7.h> +#include <linux/scatterlist.h> +#include <linux/io.h> +#include <linux/regulator/consumer.h> +#include <linux/err.h> +#include <linux/mfd/glamo.h> + +#include "glamo-core.h" +#include "glamo-regs.h" + +struct glamo_mci_host { + struct glamo_mmc_platform_data *pdata; + struct platform_device *pdev; + struct glamo_core *core; + struct mmc_host *mmc; + struct resource *mmio_mem; + struct resource *data_mem; + void __iomem *mmio_base; + uint16_t __iomem *data_base; + + unsigned int irq; + + struct regulator *regulator; + struct mmc_request *mrq; + + unsigned int clk_rate; + + unsigned short vdd; + char power_mode; + + unsigned char request_counter; + + struct timer_list disable_timer; + + struct work_struct irq_work; + struct work_struct read_work; + + unsigned clk_enabled:1; +}; + +static void glamo_mci_send_request(struct mmc_host *mmc, + struct mmc_request *mrq); +static void glamo_mci_send_command(struct glamo_mci_host *host, + struct mmc_command *cmd); + +/* + * Max SD clock rate + * + * held at /(3 + 1) due to concerns of 100R recommended series resistor + * allows 16MHz @ 4-bit --> 8MBytes/sec raw + * + * you can override this on kernel commandline using + * + * glamo_mci.sd_max_clk=10000000 + * + * for example + */ + +static int sd_max_clk = 21000000; +module_param(sd_max_clk, int, 0644); + +/* + * Slow SD clock rate + * + * you can override this on kernel commandline using + * + * glamo_mci.sd_slow_ratio=8 + * + * for example + * + * platform callback is used to decide effective clock rate, if not + * defined then max is used, if defined and returns nonzero, rate is + * divided by this factor + */ + +static int sd_slow_ratio = 8; +module_param(sd_slow_ratio, int, 0644); + +/* + * Post-power SD clock rate + * + * you can override this on kernel commandline using + * + * glamo_mci.sd_post_power_clock=1000000 + * + * for example + * + * After changing power to card, clock is held at this rate until first bulk + * transfer completes + */ + +static int sd_post_power_clock = 1000000; +module_param(sd_post_power_clock, int, 0644); + + +static inline void glamo_reg_write(struct glamo_mci_host *glamo, + uint16_t reg, uint16_t val) +{ + writew(val, glamo->mmio_base + reg); +} + +static inline uint16_t glamo_reg_read(struct glamo_mci_host *glamo, + uint16_t reg) +{ + return readw(glamo->mmio_base + reg); +} + +static void glamo_reg_set_bit_mask(struct glamo_mci_host *glamo, + uint16_t reg, uint16_t mask, + uint16_t val) +{ + uint16_t tmp; + + val &= mask; + + tmp = glamo_reg_read(glamo, reg); + tmp &= ~mask; + tmp |= val; + glamo_reg_write(glamo, reg, tmp); +} + +static void glamo_mci_reset(struct glamo_mci_host *host) +{ + glamo_engine_reset(host->core, GLAMO_ENGINE_MMC); + + glamo_reg_write(host, GLAMO_REG_MMC_WDATADS1, + (uint16_t)(host->data_mem->start)); + glamo_reg_write(host, GLAMO_REG_MMC_WDATADS2, + (uint16_t)(host->data_mem->start >> 16)); + + glamo_reg_write(host, GLAMO_REG_MMC_RDATADS1, + (uint16_t)(host->data_mem->start)); + glamo_reg_write(host, GLAMO_REG_MMC_RDATADS2, + (uint16_t)(host->data_mem->start >> 16)); + +} + +static void glamo_mci_clock_disable(struct glamo_mci_host *host) +{ + glamo_engine_suspend(host->core, GLAMO_ENGINE_MMC); +} + +static void glamo_mci_clock_enable(struct glamo_mci_host *host) +{ + del_timer_sync(&host->disable_timer); + + glamo_engine_enable(host->core, GLAMO_ENGINE_MMC); +} + +static void glamo_mci_disable_timer(unsigned long data) +{ + struct glamo_mci_host *host = (struct glamo_mci_host *)data; + glamo_mci_clock_disable(host); +} + + +static void do_pio_read(struct glamo_mci_host *host, struct mmc_data *data) +{ + struct sg_mapping_iter miter; + uint16_t __iomem *from_ptr = host->data_base; + + dev_dbg(&host->pdev->dev, "pio_read():\n"); + + sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_TO_SG); + + while (sg_miter_next(&miter)) { + memcpy(miter.addr, from_ptr, miter.length); + from_ptr += miter.length >> 1; + + data->bytes_xfered += miter.length; + } + + sg_miter_stop(&miter); + + dev_dbg(&host->pdev->dev, "pio_read(): " + "complete (no more data).\n"); +} + +static void do_pio_write(struct glamo_mci_host *host, struct mmc_data *data) +{ + struct sg_mapping_iter miter; + uint16_t __iomem *to_ptr = host->data_base; + + dev_dbg(&host->pdev->dev, "pio_write():\n"); + sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_FROM_SG); + + while (sg_miter_next(&miter)) { + memcpy(to_ptr, miter.addr, miter.length); + to_ptr += miter.length >> 1; + + data->bytes_xfered += miter.length; + } + + sg_miter_stop(&miter); + dev_dbg(&host->pdev->dev, "pio_write(): complete\n"); +} + +static int glamo_mci_set_card_clock(struct glamo_mci_host *host, int freq) +{ + int real_rate = 0; + + if (freq) { + glamo_mci_clock_enable(host); + real_rate = glamo_engine_reclock(host->core, GLAMO_ENGINE_MMC, + freq); + } else { + glamo_mci_clock_disable(host); + } + + return real_rate; +} + +static int glamo_mci_wait_idle(struct glamo_mci_host *host, + unsigned long timeout) +{ + uint16_t status; + do { + status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1); + } while (!(status & GLAMO_STAT1_MMC_IDLE) && + time_is_after_jiffies(timeout)); + + if (time_is_before_eq_jiffies(timeout)) { + glamo_mci_reset(host); + return -ETIMEDOUT; + } + + return 0; +} + +static void glamo_mci_request_done(struct glamo_mci_host *host, struct +mmc_request *mrq) +{ + mod_timer(&host->disable_timer, jiffies + HZ / 16); + mmc_request_done(host->mmc, mrq); +} + + +static void glamo_mci_irq_worker(struct work_struct *work) +{ + struct glamo_mci_host *host = container_of(work, struct glamo_mci_host, + irq_work); + struct mmc_request *mrq; + struct mmc_command *cmd; + uint16_t status; + + if (!host->mrq || !host->mrq->cmd) + return; + + mrq = host->mrq; + cmd = mrq->cmd; + +#if 0 + if (cmd->data->flags & MMC_DATA_READ) + return; +#endif + + status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1); + dev_dbg(&host->pdev->dev, "status = 0x%04x\n", status); + + /* we ignore a data timeout report if we are also told the data came */ + if (status & GLAMO_STAT1_MMC_RB_DRDY) + status &= ~GLAMO_STAT1_MMC_DTOUT; + + if (status & (GLAMO_STAT1_MMC_RTOUT | GLAMO_STAT1_MMC_DTOUT)) + cmd->error = -ETIMEDOUT; + else if (status & (GLAMO_STAT1_MMC_BWERR | GLAMO_STAT1_MMC_BRERR)) + cmd->error = -EILSEQ; + + if (cmd->error) { + dev_info(&host->pdev->dev, "Error after cmd: 0x%x\n", status); + goto done; + } + + /* issue STOP if we have been given one to use */ + if (mrq->stop) + glamo_mci_send_command(host, mrq->stop); + + if (cmd->data->flags & MMC_DATA_READ) + do_pio_read(host, cmd->data); + + if (mrq->stop) + mrq->stop->error = glamo_mci_wait_idle(host, jiffies + HZ); + +done: + host->mrq = NULL; + glamo_mci_request_done(host, cmd->mrq); +} + +static void glamo_mci_read_worker(struct work_struct *work) +{ + struct glamo_mci_host *host = container_of(work, struct glamo_mci_host, + read_work); + struct mmc_command *cmd; + uint16_t status; + uint16_t blocks_ready; + size_t data_read = 0; + size_t data_ready; + struct scatterlist *sg; + uint16_t __iomem *from_ptr = host->data_base; + void *sg_pointer; + + + cmd = host->mrq->cmd; + sg = cmd->data->sg; + do { + status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1); + + if (status & (GLAMO_STAT1_MMC_RTOUT | GLAMO_STAT1_MMC_DTOUT)) + cmd->error = -ETIMEDOUT; + if (status & (GLAMO_STAT1_MMC_BWERR | GLAMO_STAT1_MMC_BRERR)) + cmd->error = -EILSEQ; + if (cmd->error) { + dev_info(&host->pdev->dev, "Error after cmd: 0x%x\n", + status); + goto done; + } + + blocks_ready = glamo_reg_read(host, GLAMO_REG_MMC_RB_BLKCNT); + data_ready = blocks_ready * cmd->data->blksz; + + if (data_ready == data_read) + yield(); + + while (sg && data_read + sg->length <= data_ready) { + sg_pointer = page_address(sg_page(sg)) + sg->offset; + memcpy(sg_pointer, from_ptr, sg->length); + from_ptr += sg->length >> 1; + + data_read += sg->length; + sg = sg_next(sg); + } + + } while (sg); + cmd->data->bytes_xfered = data_read; + + do { + status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1); + } while (!(status & GLAMO_STAT1_MMC_IDLE)); + + if (host->mrq->stop) + glamo_mci_send_command(host, host->mrq->stop); + + do { + status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1); + } while (!(status & GLAMO_STAT1_MMC_IDLE)); +done: + host->mrq = NULL; + glamo_mci_request_done(host, cmd->mrq); +} + +static irqreturn_t glamo_mci_irq(int irq, void *devid) +{ + struct glamo_mci_host *host = (struct glamo_mci_host *)devid; + schedule_work(&host->irq_work); + + return IRQ_HANDLED; +} + +static void glamo_mci_send_command(struct glamo_mci_host *host, + struct mmc_command *cmd) +{ + uint8_t u8a[6]; + uint16_t fire = 0; + unsigned int timeout = 1000000; + uint16_t *reg_resp = (uint16_t *)(host->mmio_base + GLAMO_REG_MMC_CMD_RSP1); + uint16_t status; + int triggers_int = 1; + + /* if we can't do it, reject as busy */ + if (!(glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1) & + GLAMO_STAT1_MMC_IDLE)) { + cmd->error = -EBUSY; + return; + } + + /* create an array in wire order for CRC computation */ + u8a[0] = 0x40 | (cmd->opcode & 0x3f); + u8a[1] = (uint8_t)(cmd->arg >> 24); + u8a[2] = (uint8_t)(cmd->arg >> 16); + u8a[3] = (uint8_t)(cmd->arg >> 8); + u8a[4] = (uint8_t)cmd->arg; + u8a[5] = (crc7(0, u8a, 5) << 1) | 0x01; + + /* issue the wire-order array including CRC in register order */ + glamo_reg_write(host, GLAMO_REG_MMC_CMD_REG1, ((u8a[4] << 8) | u8a[5])); + glamo_reg_write(host, GLAMO_REG_MMC_CMD_REG2, ((u8a[2] << 8) | u8a[3])); + glamo_reg_write(host, GLAMO_REG_MMC_CMD_REG3, ((u8a[0] << 8) | u8a[1])); + + /* command index toggle */ + fire |= (host->request_counter & 1) << 12; + + /* set type of command */ + switch (mmc_cmd_type(cmd)) { + case MMC_CMD_BC: + fire |= GLAMO_FIRE_MMC_CMDT_BNR; + break; + case MMC_CMD_BCR: + fire |= GLAMO_FIRE_MMC_CMDT_BR; + break; + case MMC_CMD_AC: + fire |= GLAMO_FIRE_MMC_CMDT_AND; + break; + case MMC_CMD_ADTC: + fire |= GLAMO_FIRE_MMC_CMDT_AD; + break; + } + /* + * if it expects a response, set the type expected + * + * R1, Length : 48bit, Normal response + * R1b, Length : 48bit, same R1, but added card busy status + * R2, Length : 136bit (really 128 bits with CRC snipped) + * R3, Length : 48bit (OCR register value) + * R4, Length : 48bit, SDIO_OP_CONDITION, Reverse SDIO Card + * R5, Length : 48bit, IO_RW_DIRECTION, Reverse SDIO Card + * R6, Length : 48bit (RCA register) + * R7, Length : 48bit (interface condition, VHS(voltage supplied), + * check pattern, CRC7) + */ + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: /* same index as R6 and R7 */ + fire |= GLAMO_FIRE_MMC_RSPT_R1; + break; + case MMC_RSP_R1B: + fire |= GLAMO_FIRE_MMC_RSPT_R1b; + break; + case MMC_RSP_R2: + fire |= GLAMO_FIRE_MMC_RSPT_R2; + break; + case MMC_RSP_R3: + fire |= GLAMO_FIRE_MMC_RSPT_R3; + break; + /* R4 and R5 supported by chip not defined in linux/mmc/core.h (sdio) */ + } + /* + * From the command index, set up the command class in the host ctrllr + * + * missing guys present on chip but couldn't figure out how to use yet: + * 0x0 "stream read" + * 0x9 "cancel running command" + */ + switch (cmd->opcode) { + case MMC_READ_SINGLE_BLOCK: + fire |= GLAMO_FIRE_MMC_CC_SBR; /* single block read */ + break; + case MMC_SWITCH: /* 64 byte payload */ + case SD_APP_SEND_SCR: + case MMC_READ_MULTIPLE_BLOCK: + /* we will get an interrupt off this */ + if (!cmd->mrq->stop) { + /* multiblock no stop */ + fire |= GLAMO_FIRE_MMC_CC_MBRNS; + } else { + /* multiblock with stop */ + fire |= GLAMO_FIRE_MMC_CC_MBRS; + } + break; + case MMC_WRITE_BLOCK: + fire |= GLAMO_FIRE_MMC_CC_SBW; /* single block write */ + break; + case MMC_WRITE_MULTIPLE_BLOCK: + if (cmd->mrq->stop) { + /* multiblock with stop */ + fire |= GLAMO_FIRE_MMC_CC_MBWS; + } else { + /* multiblock NO stop-- 'RESERVED'? */ + fire |= GLAMO_FIRE_MMC_CC_MBWNS; + } + break; + case MMC_STOP_TRANSMISSION: + fire |= GLAMO_FIRE_MMC_CC_STOP; /* STOP */ + triggers_int = 0; + break; + default: + fire |= GLAMO_FIRE_MMC_CC_BASIC; /* "basic command" */ + triggers_int = 0; + break; + } + + if (cmd->data) + host->mrq = cmd->mrq; + + /* always largest timeout */ + glamo_reg_write(host, GLAMO_REG_MMC_TIMEOUT, 0xfff); + + /* Generate interrupt on txfer */ + glamo_reg_set_bit_mask(host, GLAMO_REG_MMC_BASIC, 0xff36, + 0x0800 | + GLAMO_BASIC_MMC_NO_CLK_RD_WAIT | + GLAMO_BASIC_MMC_EN_COMPL_INT | + GLAMO_BASIC_MMC_EN_DATA_PUPS | + GLAMO_BASIC_MMC_EN_CMD_PUP); + + /* send the command out on the wire */ + /* dev_info(&host->pdev->dev, "Using FIRE %04X\n", fire); */ + glamo_reg_write(host, GLAMO_REG_MMC_CMD_FIRE, fire); + + /* we are deselecting card? because it isn't going to ack then... */ + if ((cmd->opcode == 7) && (cmd->arg == 0)) + return; + + /* + * we must spin until response is ready or timed out + * -- we don't get interrupts unless there is a bulk rx + */ + do + status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1); + while (((((status >> 15) & 1) != (host->request_counter & 1)) || + (!(status & (GLAMO_STAT1_MMC_RB_RRDY | + GLAMO_STAT1_MMC_RTOUT | + GLAMO_STAT1_MMC_DTOUT | + GLAMO_STAT1_MMC_BWERR | + GLAMO_STAT1_MMC_BRERR)))) && (timeout--)); + + if ((status & (GLAMO_STAT1_MMC_RTOUT | + GLAMO_STAT1_MMC_DTOUT)) || + (timeout == 0)) { + cmd->error = -ETIMEDOUT; + } else if (status & (GLAMO_STAT1_MMC_BWERR | GLAMO_STAT1_MMC_BRERR)) { + cmd->error = -EILSEQ; + } + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + cmd->resp[3] = readw(®_resp[0]) | + (readw(®_resp[1]) << 16); + cmd->resp[2] = readw(®_resp[2]) | + (readw(®_resp[3]) << 16); + cmd->resp[1] = readw(®_resp[4]) | + (readw(®_resp[5]) << 16); + cmd->resp[0] = readw(®_resp[6]) | + (readw(®_resp[7]) << 16); + } else { + cmd->resp[0] = (readw(®_resp[0]) >> 8) | + (readw(®_resp[1]) << 8) | + (readw(®_resp[2]) << 24); + } + } + +#if 0 + /* We'll only get an interrupt when all data has been transfered. + By starting to copy data when it's avaiable we can increase + throughput by up to 30%. */ + if (cmd->data && (cmd->data->flags & MMC_DATA_READ)) + schedule_work(&host->read_work); +#endif + +} + +static int glamo_mci_prepare_pio(struct glamo_mci_host *host, + struct mmc_data *data) +{ + /* set up the block info */ + glamo_reg_write(host, GLAMO_REG_MMC_DATBLKLEN, data->blksz); + glamo_reg_write(host, GLAMO_REG_MMC_DATBLKCNT, data->blocks); + + data->bytes_xfered = 0; + + /* if write, prep the write into the shared RAM before the command */ + if (data->flags & MMC_DATA_WRITE) + do_pio_write(host, data); + + dev_dbg(&host->pdev->dev, "(blksz=%d, count=%d)\n", + data->blksz, data->blocks); + return 0; +} + +static int glamo_mci_irq_poll(struct glamo_mci_host *host, + struct mmc_command *cmd) +{ + int timeout = 1000000; + uint16_t status; + /* + * if the glamo INT# line isn't wired (*cough* it can happen) + * I'm afraid we have to spin on the IRQ status bit and "be + * our own INT# line" + */ + /* + * we have faith we will get an "interrupt"... + * but something insane like suspend problems can mean + * we spin here forever, so we timeout after a LONG time + */ + do { + status = glamo_reg_read(host, GLAMO_REG_IRQ_STATUS); + } while ((--timeout) && !(status & GLAMO_IRQ_MMC)); + + if (timeout <= 0) { + if (cmd->data->error) + cmd->data->error = -ETIMEDOUT; + dev_err(&host->pdev->dev, "Payload timeout\n"); + return -ETIMEDOUT; + } + /* ack this interrupt source */ + writew(GLAMO_IRQ_MMC, host->core->base + + GLAMO_REG_IRQ_CLEAR); + + /* yay we are an interrupt controller! -- call the ISR + * it will stop clock to card + */ + glamo_mci_irq(host->irq, host); + + return 0; +} + +static void glamo_mci_send_request(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct glamo_mci_host *host = mmc_priv(mmc); + struct mmc_command *cmd = mrq->cmd; + + glamo_mci_clock_enable(host); + host->request_counter++; + if (cmd->data) { + if (glamo_mci_prepare_pio(host, cmd->data)) { + cmd->error = -EIO; + cmd->data->error = -EIO; + goto done; + } + } + + dev_dbg(&host->pdev->dev, "cmd 0x%x, " + "arg 0x%x data=%p mrq->stop=%p flags 0x%x\n", + cmd->opcode, cmd->arg, cmd->data, cmd->mrq->stop, + cmd->flags); + + glamo_mci_send_command(host, cmd); + + /* + * if we don't have bulk data to take care of, we're done + */ + if (!cmd->data || cmd->error) + goto done; + + + if (!host->core->irq_works) { + if (glamo_mci_irq_poll(host, mrq->cmd)) + goto done; + } + + /* + * Otherwise can can use the interrupt as async completion -- + * if there is read data coming, or we wait for write data to complete, + * exit without mmc_request_done() as the payload interrupt + * will service it + */ + dev_dbg(&host->pdev->dev, "Waiting for payload data\n"); + return; +done: + if (!cmd->error) + cmd->error = glamo_mci_wait_idle(host, jiffies + 2 * HZ); + glamo_mci_request_done(host, mrq); +} + +static void glamo_mci_set_power_mode(struct glamo_mci_host *host, + unsigned char power_mode) +{ + int ret; + + if (power_mode == host->power_mode) + return; + + switch (power_mode) { + case MMC_POWER_UP: + if (host->power_mode == MMC_POWER_OFF) { + ret = regulator_enable(host->regulator); + if (ret) + dev_err(&host->pdev->dev, + "Failed to enable regulator: %d\n", + ret); + } + break; + case MMC_POWER_ON: + break; + case MMC_POWER_OFF: + default: + glamo_engine_disable(host->core, GLAMO_ENGINE_MMC); + + ret = regulator_disable(host->regulator); + if (ret) + dev_warn(&host->pdev->dev, + "Failed to disable regulator: %d\n", + ret); + break; + } + host->power_mode = power_mode; +} + +static void glamo_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct glamo_mci_host *host = mmc_priv(mmc); + int bus_width = 0; + int rate; + int sd_drive; + int ret; + + /* Set power */ + glamo_mci_set_power_mode(host, ios->power_mode); + + if (host->vdd != ios->vdd) { + ret = mmc_regulator_set_ocr(host->regulator, ios->vdd); + if (ret) + dev_err(&host->pdev->dev, + "Failed to set regulator voltage: %d\n", ret); + else + host->vdd = ios->vdd; + } + rate = glamo_mci_set_card_clock(host, ios->clock); + + if ((ios->power_mode == MMC_POWER_ON) || + (ios->power_mode == MMC_POWER_UP)) { + dev_info(&host->pdev->dev, + "powered (vdd = %hu) clk: %dkHz div=%hu (req: %ukHz). " + "Bus width=%d\n", ios->vdd, + rate / 1000, 0, + ios->clock / 1000, (int)ios->bus_width); + } else { + dev_info(&host->pdev->dev, "glamo_mci_set_ios: power down.\n"); + } + + /* set bus width */ + if (ios->bus_width == MMC_BUS_WIDTH_4) + bus_width = GLAMO_BASIC_MMC_EN_4BIT_DATA; + + sd_drive = (rate * 4) / host->clk_rate; + if (sd_drive > 3) + sd_drive = 3; + + glamo_reg_set_bit_mask(host, GLAMO_REG_MMC_BASIC, + GLAMO_BASIC_MMC_EN_4BIT_DATA | 0xb0, + bus_width | sd_drive << 6); +} + + +/* + * no physical write protect supported by us + */ +static int glamo_mci_get_ro(struct mmc_host *mmc) +{ + return 0; +} + +static struct mmc_host_ops glamo_mci_ops = { + .request = glamo_mci_send_request, + .set_ios = glamo_mci_set_ios, + .get_ro = glamo_mci_get_ro, +}; + +static int glamo_mci_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct glamo_mci_host *host; + struct glamo_core *core = dev_get_drvdata(pdev->dev.parent); + int ret; + + dev_info(&pdev->dev, "glamo_mci driver (C)2007 Openmoko, Inc\n"); + + mmc = mmc_alloc_host(sizeof(struct glamo_mci_host), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto probe_out; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + host->pdev = pdev; + if (core->pdata) + host->pdata = core->pdata->mmc_data; + host->power_mode = MMC_POWER_OFF; + host->clk_enabled = 0; + host->core = core; + host->irq = platform_get_irq(pdev, 0); + + INIT_WORK(&host->irq_work, glamo_mci_irq_worker); + INIT_WORK(&host->read_work, glamo_mci_read_worker); + + host->regulator = regulator_get(pdev->dev.parent, "SD_3V3"); + if (IS_ERR(host->regulator)) { + dev_err(&pdev->dev, "Cannot proceed without regulator.\n"); + ret = PTR_ERR(host->regulator); + goto probe_free_host; + } + + host->mmio_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!host->mmio_mem) { + dev_err(&pdev->dev, + "failed to get io memory region resouce.\n"); + ret = -ENOENT; + goto probe_regulator_put; + } + + host->mmio_mem = request_mem_region(host->mmio_mem->start, + resource_size(host->mmio_mem), + pdev->name); + + if (!host->mmio_mem) { + dev_err(&pdev->dev, "failed to request io memory region.\n"); + ret = -ENOENT; + goto probe_regulator_put; + } + + host->mmio_base = ioremap(host->mmio_mem->start, + resource_size(host->mmio_mem)); + if (!host->mmio_base) { + dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); + ret = -EINVAL; + goto probe_free_mem_region_mmio; + } + + + /* Get ahold of our data buffer we use for data in and out on MMC */ + host->data_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!host->data_mem) { + dev_err(&pdev->dev, + "failed to get io memory region resource.\n"); + ret = -ENOENT; + goto probe_iounmap_mmio; + } + + host->data_mem = request_mem_region(host->data_mem->start, + resource_size(host->data_mem), + pdev->name); + + if (!host->data_mem) { + dev_err(&pdev->dev, "failed to request io memory region.\n"); + ret = -ENOENT; + goto probe_iounmap_mmio; + } + host->data_base = ioremap(host->data_mem->start, + resource_size(host->data_mem)); + + if (host->data_base == 0) { + dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); + ret = -EINVAL; + goto probe_free_mem_region_data; + } + + ret = request_irq(host->irq, glamo_mci_irq, IRQF_SHARED, + pdev->name, host); + if (ret) { + dev_err(&pdev->dev, "failed to register irq.\n"); + goto probe_iounmap_data; + } + + + host->vdd = 0; + host->clk_rate = glamo_pll_rate(host->core, GLAMO_PLL1); + + /* explain our host controller capabilities */ + mmc->ops = &glamo_mci_ops; + mmc->ocr_avail = mmc_regulator_get_ocrmask(host->regulator); + mmc->caps = MMC_CAP_4_BIT_DATA | + MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_SD_HIGHSPEED; + mmc->f_min = host->clk_rate / 256; + mmc->f_max = sd_max_clk; + + mmc->max_blk_count = (1 << 16) - 1; /* GLAMO_REG_MMC_RB_BLKCNT */ + mmc->max_blk_size = (1 << 12) - 1; /* GLAMO_REG_MMC_RB_BLKLEN */ + mmc->max_req_size = resource_size(host->data_mem); + mmc->max_seg_size = mmc->max_req_size; + mmc->max_phys_segs = 128; + mmc->max_hw_segs = 128; + + if (mmc->ocr_avail < 0) { + dev_warn(&pdev->dev, + "Failed to get ocr list for regulator: %d.\n", + mmc->ocr_avail); + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + } + + platform_set_drvdata(pdev, mmc); + + glamo_engine_enable(host->core, GLAMO_ENGINE_MMC); + glamo_mci_reset(host); + glamo_engine_disable(host->core, GLAMO_ENGINE_MMC); + + setup_timer(&host->disable_timer, glamo_mci_disable_timer, + (unsigned long)host); + + ret = mmc_add_host(mmc); + if (ret) { + dev_err(&pdev->dev, "failed to add mmc host.\n"); + goto probe_freeirq; + } + + return 0; + +probe_freeirq: + free_irq(host->irq, host); +probe_iounmap_data: + iounmap(host->data_base); +probe_free_mem_region_data: + release_mem_region(host->data_mem->start, + resource_size(host->data_mem)); +probe_iounmap_mmio: + iounmap(host->mmio_base); +probe_free_mem_region_mmio: + release_mem_region(host->mmio_mem->start, + resource_size(host->mmio_mem)); +probe_regulator_put: + regulator_put(host->regulator); +probe_free_host: + mmc_free_host(mmc); +probe_out: + return ret; +} + +static int glamo_mci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct glamo_mci_host *host = mmc_priv(mmc); + + free_irq(host->irq, host); + + mmc_remove_host(mmc); + iounmap(host->mmio_base); + iounmap(host->data_base); + release_mem_region(host->mmio_mem->start, + resource_size(host->mmio_mem)); + release_mem_region(host->data_mem->start, + resource_size(host->data_mem)); + + regulator_put(host->regulator); + + mmc_free_host(mmc); + + glamo_engine_disable(host->core, GLAMO_ENGINE_MMC); + return 0; +} + + +#ifdef CONFIG_PM + +static int glamo_mci_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct glamo_mci_host *host = mmc_priv(mmc); + int ret; + + cancel_work_sync(&host->irq_work); + + ret = mmc_suspend_host(mmc, PMSG_SUSPEND); + + return ret; +} + +static int glamo_mci_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct glamo_mci_host *host = mmc_priv(mmc); + int ret; + + + glamo_mci_reset(host); + glamo_engine_enable(host->core, GLAMO_ENGINE_MMC); + mdelay(10); + + ret = mmc_resume_host(host->mmc); + + return 0; +} + +static struct dev_pm_ops glamo_mci_pm_ops = { + .suspend = glamo_mci_suspend, + .resume = glamo_mci_resume, + .freeze = glamo_mci_suspend, + .thaw = glamo_mci_resume, +}; +#define GLAMO_MCI_PM_OPS (&glamo_mci_pm_ops) + +#else /* CONFIG_PM */ +#define GLAMO_MCI_PM_OPS NULL +#endif /* CONFIG_PM */ + + +static struct platform_driver glamo_mci_driver = { + .probe = glamo_mci_probe, + .remove = glamo_mci_remove, + .driver = { + .name = "glamo-mci", + .owner = THIS_MODULE, + .pm = GLAMO_MCI_PM_OPS, + }, +}; + +static int __init glamo_mci_init(void) +{ + platform_driver_register(&glamo_mci_driver); + return 0; +} +module_init(glamo_mci_init); + +static void __exit glamo_mci_exit(void) +{ + platform_driver_unregister(&glamo_mci_driver); +} +module_exit(glamo_mci_exit); + +MODULE_DESCRIPTION("Glamo MMC/SD Card Interface driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Andy Green <andy@openmoko.com>"); diff --git a/drivers/mfd/glamo/glamo-regs.h b/drivers/mfd/glamo/glamo-regs.h new file mode 100644 index 00000000000..59848e1122e --- /dev/null +++ b/drivers/mfd/glamo/glamo-regs.h @@ -0,0 +1,630 @@ +#ifndef _GLAMO_REGS_H +#define _GLAMO_REGS_H + +/* Smedia Glamo 336x/337x driver + * + * (C) 2007 by Openmoko, Inc. + * Author: Harald Welte <laforge@openmoko.org> + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +enum glamo_regster_offsets { + GLAMO_REGOFS_GENERIC = 0x0000, + GLAMO_REGOFS_HOSTBUS = 0x0200, + GLAMO_REGOFS_MEMORY = 0x0300, + GLAMO_REGOFS_VIDCAP = 0x0400, + GLAMO_REGOFS_ISP = 0x0500, + GLAMO_REGOFS_JPEG = 0x0800, + GLAMO_REGOFS_MPEG = 0x0c00, + GLAMO_REGOFS_LCD = 0x1100, + GLAMO_REGOFS_MMC = 0x1400, + GLAMO_REGOFS_MPROC0 = 0x1500, + GLAMO_REGOFS_MPROC1 = 0x1580, + GLAMO_REGOFS_CMDQUEUE = 0x1600, + GLAMO_REGOFS_RISC = 0x1680, + GLAMO_REGOFS_2D = 0x1700, + GLAMO_REGOFS_3D = 0x1b00, + GLAMO_REGOFS_END = 0x2400, +}; + + +enum glamo_register_generic { + GLAMO_REG_GCONF1 = 0x0000, + GLAMO_REG_GCONF2 = 0x0002, +#define GLAMO_REG_DEVICE_ID GLAMO_REG_GCONF2 + GLAMO_REG_GCONF3 = 0x0004, +#define GLAMO_REG_REVISION_ID GLAMO_REG_GCONF3 + GLAMO_REG_IRQ_GEN1 = 0x0006, +#define GLAMO_REG_IRQ_ENABLE GLAMO_REG_IRQ_GEN1 + GLAMO_REG_IRQ_GEN2 = 0x0008, +#define GLAMO_REG_IRQ_SET GLAMO_REG_IRQ_GEN2 + GLAMO_REG_IRQ_GEN3 = 0x000a, +#define GLAMO_REG_IRQ_CLEAR GLAMO_REG_IRQ_GEN3 + GLAMO_REG_IRQ_GEN4 = 0x000c, +#define GLAMO_REG_IRQ_STATUS GLAMO_REG_IRQ_GEN4 + GLAMO_REG_CLOCK_HOST = 0x0010, + GLAMO_REG_CLOCK_MEMORY = 0x0012, + GLAMO_REG_CLOCK_LCD = 0x0014, + GLAMO_REG_CLOCK_MMC = 0x0016, + GLAMO_REG_CLOCK_ISP = 0x0018, + GLAMO_REG_CLOCK_JPEG = 0x001a, + GLAMO_REG_CLOCK_3D = 0x001c, + GLAMO_REG_CLOCK_2D = 0x001e, + GLAMO_REG_CLOCK_RISC1 = 0x0020, /* 3365 only? */ + GLAMO_REG_CLOCK_RISC2 = 0x0022, /* 3365 only? */ + GLAMO_REG_CLOCK_MPEG = 0x0024, + GLAMO_REG_CLOCK_MPROC = 0x0026, + + GLAMO_REG_CLOCK_GEN5_1 = 0x0030, + GLAMO_REG_CLOCK_GEN5_2 = 0x0032, + GLAMO_REG_CLOCK_GEN6 = 0x0034, + GLAMO_REG_CLOCK_GEN7 = 0x0036, + GLAMO_REG_CLOCK_GEN8 = 0x0038, + GLAMO_REG_CLOCK_GEN9 = 0x003a, + GLAMO_REG_CLOCK_GEN10 = 0x003c, + GLAMO_REG_CLOCK_GEN11 = 0x003e, + GLAMO_REG_PLL_GEN1 = 0x0040, + GLAMO_REG_PLL_GEN2 = 0x0042, + GLAMO_REG_PLL_GEN3 = 0x0044, + GLAMO_REG_PLL_GEN4 = 0x0046, + GLAMO_REG_PLL_GEN5 = 0x0048, + GLAMO_REG_GPIO_GEN1 = 0x0050, + GLAMO_REG_GPIO_GEN2 = 0x0052, + GLAMO_REG_GPIO_GEN3 = 0x0054, + GLAMO_REG_GPIO_GEN4 = 0x0056, + GLAMO_REG_GPIO_GEN5 = 0x0058, + GLAMO_REG_GPIO_GEN6 = 0x005a, + GLAMO_REG_GPIO_GEN7 = 0x005c, + GLAMO_REG_GPIO_GEN8 = 0x005e, + GLAMO_REG_GPIO_GEN9 = 0x0060, + GLAMO_REG_GPIO_GEN10 = 0x0062, + GLAMO_REG_DFT_GEN1 = 0x0070, + GLAMO_REG_DFT_GEN2 = 0x0072, + GLAMO_REG_DFT_GEN3 = 0x0074, + GLAMO_REG_DFT_GEN4 = 0x0076, + + GLAMO_REG_DFT_GEN5 = 0x01e0, + GLAMO_REG_DFT_GEN6 = 0x01f0, +}; + +#define GLAMO_REG_HOSTBUS(x) (GLAMO_REGOFS_HOSTBUS-2+(x*2)) + +#define REG_MEM(x) (GLAMO_REGOFS_MEMORY+(x)) +#define GLAMO_REG_MEM_TIMING(x) (GLAMO_REG_MEM_TIMING1-2+(x*2)) + +enum glamo_register_mem { + GLAMO_REG_MEM_TYPE = REG_MEM(0x00), + GLAMO_REG_MEM_GEN = REG_MEM(0x02), + GLAMO_REG_MEM_TIMING1 = REG_MEM(0x04), + GLAMO_REG_MEM_TIMING2 = REG_MEM(0x06), + GLAMO_REG_MEM_TIMING3 = REG_MEM(0x08), + GLAMO_REG_MEM_TIMING4 = REG_MEM(0x0a), + GLAMO_REG_MEM_TIMING5 = REG_MEM(0x0c), + GLAMO_REG_MEM_TIMING6 = REG_MEM(0x0e), + GLAMO_REG_MEM_TIMING7 = REG_MEM(0x10), + GLAMO_REG_MEM_TIMING8 = REG_MEM(0x12), + GLAMO_REG_MEM_TIMING9 = REG_MEM(0x14), + GLAMO_REG_MEM_TIMING10 = REG_MEM(0x16), + GLAMO_REG_MEM_TIMING11 = REG_MEM(0x18), + GLAMO_REG_MEM_POWER1 = REG_MEM(0x1a), + GLAMO_REG_MEM_POWER2 = REG_MEM(0x1c), + GLAMO_REG_MEM_LCD_BUF1 = REG_MEM(0x1e), + GLAMO_REG_MEM_LCD_BUF2 = REG_MEM(0x20), + GLAMO_REG_MEM_LCD_BUF3 = REG_MEM(0x22), + GLAMO_REG_MEM_LCD_BUF4 = REG_MEM(0x24), + GLAMO_REG_MEM_BIST1 = REG_MEM(0x26), + GLAMO_REG_MEM_BIST2 = REG_MEM(0x28), + GLAMO_REG_MEM_BIST3 = REG_MEM(0x2a), + GLAMO_REG_MEM_BIST4 = REG_MEM(0x2c), + GLAMO_REG_MEM_BIST5 = REG_MEM(0x2e), + GLAMO_REG_MEM_MAH1 = REG_MEM(0x30), + GLAMO_REG_MEM_MAH2 = REG_MEM(0x32), + GLAMO_REG_MEM_DRAM1 = REG_MEM(0x34), + GLAMO_REG_MEM_DRAM2 = REG_MEM(0x36), + GLAMO_REG_MEM_CRC = REG_MEM(0x38), +}; + +#define GLAMO_MEM_TYPE_MASK 0x03 + +enum glamo_reg_mem_dram1 { + /* b0 - b10 == refresh period, 1 -> 2048 clocks */ + GLAMO_MEM_DRAM1_EN_GATE_CLK = (1 << 11), + GLAMO_MEM_DRAM1_SELF_REFRESH = (1 << 12), + GLAMO_MEM_DRAM1_EN_GATE_CKE = (1 << 13), + GLAMO_MEM_DRAM1_EN_DRAM_REFRESH = (1 << 14), + GLAMO_MEM_DRAM1_EN_MODEREG_SET = (1 << 15), +}; + +enum glamo_reg_mem_dram2 { + GLAMO_MEM_DRAM2_DEEP_PWRDOWN = (1 << 12), +}; + +enum glamo_irq_index { + GLAMO_IRQIDX_HOSTBUS = 0, + GLAMO_IRQIDX_JPEG = 1, + GLAMO_IRQIDX_MPEG = 2, + GLAMO_IRQIDX_MPROC1 = 3, + GLAMO_IRQIDX_MPROC0 = 4, + GLAMO_IRQIDX_CMDQUEUE = 5, + GLAMO_IRQIDX_2D = 6, + GLAMO_IRQIDX_MMC = 7, + GLAMO_IRQIDX_RISC = 8, +}; + +enum glamo_irq { + GLAMO_IRQ_HOSTBUS = (1 << GLAMO_IRQIDX_HOSTBUS), + GLAMO_IRQ_JPEG = (1 << GLAMO_IRQIDX_JPEG), + GLAMO_IRQ_MPEG = (1 << GLAMO_IRQIDX_MPEG), + GLAMO_IRQ_MPROC1 = (1 << GLAMO_IRQIDX_MPROC1), + GLAMO_IRQ_MPROC0 = (1 << GLAMO_IRQIDX_MPROC0), + GLAMO_IRQ_CMDQUEUE = (1 << GLAMO_IRQIDX_CMDQUEUE), + GLAMO_IRQ_2D = (1 << GLAMO_IRQIDX_2D), + GLAMO_IRQ_MMC = (1 << GLAMO_IRQIDX_MMC), + GLAMO_IRQ_RISC = (1 << GLAMO_IRQIDX_RISC), +}; + +enum glamo_reg_clock_host { + GLAMO_CLOCK_HOST_DG_BCLK = 0x0001, + GLAMO_CLOCK_HOST_DG_M0CLK = 0x0004, + GLAMO_CLOCK_HOST_RESET = 0x1000, +}; + +enum glamo_reg_clock_mem { + GLAMO_CLOCK_MEM_DG_M1CLK = 0x0001, + GLAMO_CLOCK_MEM_EN_M1CLK = 0x0002, + GLAMO_CLOCK_MEM_DG_MOCACLK = 0x0004, + GLAMO_CLOCK_MEM_EN_MOCACLK = 0x0008, + GLAMO_CLOCK_MEM_RESET = 0x1000, + GLAMO_CLOCK_MOCA_RESET = 0x2000, +}; + +enum glamo_reg_clock_lcd { + GLAMO_CLOCK_LCD_DG_DCLK = 0x0001, + GLAMO_CLOCK_LCD_EN_DCLK = 0x0002, + GLAMO_CLOCK_LCD_DG_DMCLK = 0x0004, + GLAMO_CLOCK_LCD_EN_DMCLK = 0x0008, + GLAMO_CLOCK_LCD_EN_DHCLK = 0x0020, + GLAMO_CLOCK_LCD_DG_M5CLK = 0x0040, + GLAMO_CLOCK_LCD_EN_M5CLK = 0x0080, + GLAMO_CLOCK_LCD_RESET = 0x1000, +}; + +enum glamo_reg_clock_mmc { + GLAMO_CLOCK_MMC_DG_TCLK = 0x0001, + GLAMO_CLOCK_MMC_EN_TCLK = 0x0002, + GLAMO_CLOCK_MMC_DG_M9CLK = 0x0004, + GLAMO_CLOCK_MMC_EN_M9CLK = 0x0008, + GLAMO_CLOCK_MMC_RESET = 0x1000, +}; + +enum glamo_reg_basic_mmc { + /* set to disable CRC error rejection */ + GLAMO_BASIC_MMC_DISABLE_CRC = 0x0001, + /* enable completion interrupt */ + GLAMO_BASIC_MMC_EN_COMPL_INT = 0x0002, + /* stop MMC clock while enforced idle waiting for data from card */ + GLAMO_BASIC_MMC_NO_CLK_RD_WAIT = 0x0004, + /* 0 = 1-bit bus to card, 1 = use 4-bit bus (has to be negotiated) */ + GLAMO_BASIC_MMC_EN_4BIT_DATA = 0x0008, + /* enable 75K pullups on D3..D0 */ + GLAMO_BASIC_MMC_EN_DATA_PUPS = 0x0010, + /* enable 75K pullup on CMD */ + GLAMO_BASIC_MMC_EN_CMD_PUP = 0x0020, + /* IO drive strength 00=weak -> 11=strongest */ + GLAMO_BASIC_MMC_EN_DR_STR0 = 0x0040, + GLAMO_BASIC_MMC_EN_DR_STR1 = 0x0080, + /* TCLK delay stage A, 0000 = 500ps --> 1111 = 8ns */ + GLAMO_BASIC_MMC_EN_TCLK_DLYA0 = 0x0100, + GLAMO_BASIC_MMC_EN_TCLK_DLYA1 = 0x0200, + GLAMO_BASIC_MMC_EN_TCLK_DLYA2 = 0x0400, + GLAMO_BASIC_MMC_EN_TCLK_DLYA3 = 0x0800, + /* TCLK delay stage B (cumulative), 0000 = 500ps --> 1111 = 8ns */ + GLAMO_BASIC_MMC_EN_TCLK_DLYB0 = 0x1000, + GLAMO_BASIC_MMC_EN_TCLK_DLYB1 = 0x2000, + GLAMO_BASIC_MMC_EN_TCLK_DLYB2 = 0x4000, + GLAMO_BASIC_MMC_EN_TCLK_DLYB3 = 0x8000, +}; + +enum glamo_reg_stat1_mmc { + /* command "counter" (really: toggle) */ + GLAMO_STAT1_MMC_CMD_CTR = 0x8000, + /* engine is idle */ + GLAMO_STAT1_MMC_IDLE = 0x4000, + /* readback response is ready */ + GLAMO_STAT1_MMC_RB_RRDY = 0x0200, + /* readback data is ready */ + GLAMO_STAT1_MMC_RB_DRDY = 0x0100, + /* no response timeout */ + GLAMO_STAT1_MMC_RTOUT = 0x0020, + /* no data timeout */ + GLAMO_STAT1_MMC_DTOUT = 0x0010, + /* CRC error on block write */ + GLAMO_STAT1_MMC_BWERR = 0x0004, + /* CRC error on block read */ + GLAMO_STAT1_MMC_BRERR = 0x0002 +}; + +enum glamo_reg_fire_mmc { + /* command "counter" (really: toggle) + * the STAT1 register reflects this so you can ensure you don't look + * at status for previous command + */ + GLAMO_FIRE_MMC_CMD_CTR = 0x8000, + /* sets kind of response expected */ + GLAMO_FIRE_MMC_RES_MASK = 0x0700, + /* sets command type */ + GLAMO_FIRE_MMC_TYP_MASK = 0x00C0, + /* sets command class */ + GLAMO_FIRE_MMC_CLS_MASK = 0x000F, +}; + +enum glamo_fire_mmc_response_types { + GLAMO_FIRE_MMC_RSPT_R1 = 0x0000, + GLAMO_FIRE_MMC_RSPT_R1b = 0x0100, + GLAMO_FIRE_MMC_RSPT_R2 = 0x0200, + GLAMO_FIRE_MMC_RSPT_R3 = 0x0300, + GLAMO_FIRE_MMC_RSPT_R4 = 0x0400, + GLAMO_FIRE_MMC_RSPT_R5 = 0x0500, +}; + +enum glamo_fire_mmc_command_types { + /* broadcast, no response */ + GLAMO_FIRE_MMC_CMDT_BNR = 0x0000, + /* broadcast, with response */ + GLAMO_FIRE_MMC_CMDT_BR = 0x0040, + /* addressed, no data */ + GLAMO_FIRE_MMC_CMDT_AND = 0x0080, + /* addressed, with data */ + GLAMO_FIRE_MMC_CMDT_AD = 0x00C0, +}; + +enum glamo_fire_mmc_command_class { + /* "Stream Read" */ + GLAMO_FIRE_MMC_CC_STRR = 0x0000, + /* Single Block Read */ + GLAMO_FIRE_MMC_CC_SBR = 0x0001, + /* Multiple Block Read With Stop */ + GLAMO_FIRE_MMC_CC_MBRS = 0x0002, + /* Multiple Block Read No Stop */ + GLAMO_FIRE_MMC_CC_MBRNS = 0x0003, + /* RESERVED for "Stream Write" */ + GLAMO_FIRE_MMC_CC_STRW = 0x0004, + /* "Stream Write" */ + GLAMO_FIRE_MMC_CC_SBW = 0x0005, + /* RESERVED for Multiple Block Write With Stop */ + GLAMO_FIRE_MMC_CC_MBWS = 0x0006, + /* Multiple Block Write No Stop */ + GLAMO_FIRE_MMC_CC_MBWNS = 0x0007, + /* STOP command */ + GLAMO_FIRE_MMC_CC_STOP = 0x0008, + /* Cancel on Running Command */ + GLAMO_FIRE_MMC_CC_CANCL = 0x0009, + /* "Basic Command" */ + GLAMO_FIRE_MMC_CC_BASIC = 0x000a, +}; + +/* these are offsets from the start of the MMC register region */ +enum glamo_register_mmc { + /* MMC command, b15..8 = cmd arg b7..0; b7..1 = CRC; b0 = end bit */ + GLAMO_REG_MMC_CMD_REG1 = 0x00, + /* MMC command, b15..0 = cmd arg b23 .. 8 */ + GLAMO_REG_MMC_CMD_REG2 = 0x02, + /* MMC command, b15=start, b14=transmission, + * b13..8=cmd idx, b7..0=cmd arg b31..24 + */ + GLAMO_REG_MMC_CMD_REG3 = 0x04, + GLAMO_REG_MMC_CMD_FIRE = 0x06, + GLAMO_REG_MMC_CMD_RSP1 = 0x10, + GLAMO_REG_MMC_CMD_RSP2 = 0x12, + GLAMO_REG_MMC_CMD_RSP3 = 0x14, + GLAMO_REG_MMC_CMD_RSP4 = 0x16, + GLAMO_REG_MMC_CMD_RSP5 = 0x18, + GLAMO_REG_MMC_CMD_RSP6 = 0x1a, + GLAMO_REG_MMC_CMD_RSP7 = 0x1c, + GLAMO_REG_MMC_CMD_RSP8 = 0x1e, + GLAMO_REG_MMC_RB_STAT1 = 0x20, + GLAMO_REG_MMC_RB_BLKCNT = 0x22, + GLAMO_REG_MMC_RB_BLKLEN = 0x24, + GLAMO_REG_MMC_BASIC = 0x30, + GLAMO_REG_MMC_RDATADS1 = 0x34, + GLAMO_REG_MMC_RDATADS2 = 0x36, + GLAMO_REG_MMC_WDATADS1 = 0x38, + GLAMO_REG_MMC_WDATADS2 = 0x3a, + GLAMO_REG_MMC_DATBLKCNT = 0x3c, + GLAMO_REG_MMC_DATBLKLEN = 0x3e, + GLAMO_REG_MMC_TIMEOUT = 0x40, + +}; + +enum glamo_reg_clock_isp { + GLAMO_CLOCK_ISP_DG_I1CLK = 0x0001, + GLAMO_CLOCK_ISP_EN_I1CLK = 0x0002, + GLAMO_CLOCK_ISP_DG_CCLK = 0x0004, + GLAMO_CLOCK_ISP_EN_CCLK = 0x0008, + GLAMO_CLOCK_ISP_EN_SCLK = 0x0020, + GLAMO_CLOCK_ISP_DG_M2CLK = 0x0040, + GLAMO_CLOCK_ISP_EN_M2CLK = 0x0080, + GLAMO_CLOCK_ISP_DG_M15CLK = 0x0100, + GLAMO_CLOCK_ISP_EN_M15CLK = 0x0200, + GLAMO_CLOCK_ISP1_RESET = 0x1000, + GLAMO_CLOCK_ISP2_RESET = 0x2000, +}; + +enum glamo_reg_clock_jpeg { + GLAMO_CLOCK_JPEG_DG_JCLK = 0x0001, + GLAMO_CLOCK_JPEG_EN_JCLK = 0x0002, + GLAMO_CLOCK_JPEG_DG_M3CLK = 0x0004, + GLAMO_CLOCK_JPEG_EN_M3CLK = 0x0008, + GLAMO_CLOCK_JPEG_RESET = 0x1000, +}; + +enum glamo_reg_clock_2d { + GLAMO_CLOCK_2D_DG_GCLK = 0x0001, + GLAMO_CLOCK_2D_EN_GCLK = 0x0002, + GLAMO_CLOCK_2D_DG_M7CLK = 0x0004, + GLAMO_CLOCK_2D_EN_M7CLK = 0x0008, + GLAMO_CLOCK_2D_DG_M6CLK = 0x0010, + GLAMO_CLOCK_2D_EN_M6CLK = 0x0020, + GLAMO_CLOCK_2D_RESET = 0x1000, + GLAMO_CLOCK_2D_CQ_RESET = 0x2000, +}; + +enum glamo_reg_clock_3d { + GLAMO_CLOCK_3D_DG_ECLK = 0x0001, + GLAMO_CLOCK_3D_EN_ECLK = 0x0002, + GLAMO_CLOCK_3D_DG_RCLK = 0x0004, + GLAMO_CLOCK_3D_EN_RCLK = 0x0008, + GLAMO_CLOCK_3D_DG_M8CLK = 0x0010, + GLAMO_CLOCK_3D_EN_M8CLK = 0x0020, + GLAMO_CLOCK_3D_BACK_RESET = 0x1000, + GLAMO_CLOCK_3D_FRONT_RESET = 0x2000, +}; + +enum glamo_reg_clock_mpeg { + GLAMO_CLOCK_MPEG_DG_X0CLK = 0x0001, + GLAMO_CLOCK_MPEG_EN_X0CLK = 0x0002, + GLAMO_CLOCK_MPEG_DG_X1CLK = 0x0004, + GLAMO_CLOCK_MPEG_EN_X1CLK = 0x0008, + GLAMO_CLOCK_MPEG_DG_X2CLK = 0x0010, + GLAMO_CLOCK_MPEG_EN_X2CLK = 0x0020, + GLAMO_CLOCK_MPEG_DG_X3CLK = 0x0040, + GLAMO_CLOCK_MPEG_EN_X3CLK = 0x0080, + GLAMO_CLOCK_MPEG_DG_X4CLK = 0x0100, + GLAMO_CLOCK_MPEG_EN_X4CLK = 0x0200, + GLAMO_CLOCK_MPEG_DG_X6CLK = 0x0400, + GLAMO_CLOCK_MPEG_EN_X6CLK = 0x0800, + GLAMO_CLOCK_MPEG_ENC_RESET = 0x1000, + GLAMO_CLOCK_MPEG_DEC_RESET = 0x2000, +}; + +enum glamo_reg_clock51 { + GLAMO_CLOCK_GEN51_EN_DIV_MCLK = 0x0001, + GLAMO_CLOCK_GEN51_EN_DIV_SCLK = 0x0002, + GLAMO_CLOCK_GEN51_EN_DIV_JCLK = 0x0004, + GLAMO_CLOCK_GEN51_EN_DIV_DCLK = 0x0008, + GLAMO_CLOCK_GEN51_EN_DIV_DMCLK = 0x0010, + GLAMO_CLOCK_GEN51_EN_DIV_DHCLK = 0x0020, + GLAMO_CLOCK_GEN51_EN_DIV_GCLK = 0x0040, + GLAMO_CLOCK_GEN51_EN_DIV_TCLK = 0x0080, + /* FIXME: higher bits */ +}; + +enum glamo_reg_hostbus2 { + GLAMO_HOSTBUS2_MMIO_EN_ISP = 0x0001, + GLAMO_HOSTBUS2_MMIO_EN_JPEG = 0x0002, + GLAMO_HOSTBUS2_MMIO_EN_MPEG = 0x0004, + GLAMO_HOSTBUS2_MMIO_EN_LCD = 0x0008, + GLAMO_HOSTBUS2_MMIO_EN_MMC = 0x0010, + GLAMO_HOSTBUS2_MMIO_EN_MICROP0 = 0x0020, + GLAMO_HOSTBUS2_MMIO_EN_MICROP1 = 0x0040, + GLAMO_HOSTBUS2_MMIO_EN_CQ = 0x0080, + GLAMO_HOSTBUS2_MMIO_EN_RISC = 0x0100, + GLAMO_HOSTBUS2_MMIO_EN_2D = 0x0200, + GLAMO_HOSTBUS2_MMIO_EN_3D = 0x0400, +}; + +/* LCD Controller */ + +#define REG_LCD(x) (x) +enum glamo_reg_lcd { + GLAMO_REG_LCD_MODE1 = REG_LCD(0x00), + GLAMO_REG_LCD_MODE2 = REG_LCD(0x02), + GLAMO_REG_LCD_MODE3 = REG_LCD(0x04), + GLAMO_REG_LCD_WIDTH = REG_LCD(0x06), + GLAMO_REG_LCD_HEIGHT = REG_LCD(0x08), + GLAMO_REG_LCD_POLARITY = REG_LCD(0x0a), + GLAMO_REG_LCD_A_BASE1 = REG_LCD(0x0c), + GLAMO_REG_LCD_A_BASE2 = REG_LCD(0x0e), + GLAMO_REG_LCD_B_BASE1 = REG_LCD(0x10), + GLAMO_REG_LCD_B_BASE2 = REG_LCD(0x12), + GLAMO_REG_LCD_C_BASE1 = REG_LCD(0x14), + GLAMO_REG_LCD_C_BASE2 = REG_LCD(0x16), + GLAMO_REG_LCD_PITCH = REG_LCD(0x18), + /* RES */ + GLAMO_REG_LCD_HORIZ_TOTAL = REG_LCD(0x1c), + /* RES */ + GLAMO_REG_LCD_HORIZ_RETR_START = REG_LCD(0x20), + /* RES */ + GLAMO_REG_LCD_HORIZ_RETR_END = REG_LCD(0x24), + /* RES */ + GLAMO_REG_LCD_HORIZ_DISP_START = REG_LCD(0x28), + /* RES */ + GLAMO_REG_LCD_HORIZ_DISP_END = REG_LCD(0x2c), + /* RES */ + GLAMO_REG_LCD_VERT_TOTAL = REG_LCD(0x30), + /* RES */ + GLAMO_REG_LCD_VERT_RETR_START = REG_LCD(0x34), + /* RES */ + GLAMO_REG_LCD_VERT_RETR_END = REG_LCD(0x38), + /* RES */ + GLAMO_REG_LCD_VERT_DISP_START = REG_LCD(0x3c), + /* RES */ + GLAMO_REG_LCD_VERT_DISP_END = REG_LCD(0x40), + /* RES */ + GLAMO_REG_LCD_POL = REG_LCD(0x44), + GLAMO_REG_LCD_DATA_START = REG_LCD(0x46), + GLAMO_REG_LCD_FRATE_CONTRO = REG_LCD(0x48), + GLAMO_REG_LCD_DATA_CMD_HDR = REG_LCD(0x4a), + GLAMO_REG_LCD_SP_START = REG_LCD(0x4c), + GLAMO_REG_LCD_SP_END = REG_LCD(0x4e), + GLAMO_REG_LCD_CURSOR_BASE1 = REG_LCD(0x50), + GLAMO_REG_LCD_CURSOR_BASE2 = REG_LCD(0x52), + GLAMO_REG_LCD_CURSOR_PITCH = REG_LCD(0x54), + GLAMO_REG_LCD_CURSOR_X_SIZE = REG_LCD(0x56), + GLAMO_REG_LCD_CURSOR_Y_SIZE = REG_LCD(0x58), + GLAMO_REG_LCD_CURSOR_X_POS = REG_LCD(0x5a), + GLAMO_REG_LCD_CURSOR_Y_POS = REG_LCD(0x5c), + GLAMO_REG_LCD_CURSOR_PRESET = REG_LCD(0x5e), + GLAMO_REG_LCD_CURSOR_FG_COLOR = REG_LCD(0x60), + /* RES */ + GLAMO_REG_LCD_CURSOR_BG_COLOR = REG_LCD(0x64), + /* RES */ + GLAMO_REG_LCD_CURSOR_DST_COLOR = REG_LCD(0x68), + /* RES */ + GLAMO_REG_LCD_STATUS1 = REG_LCD(0x80), + GLAMO_REG_LCD_STATUS2 = REG_LCD(0x82), + GLAMO_REG_LCD_STATUS3 = REG_LCD(0x84), + GLAMO_REG_LCD_STATUS4 = REG_LCD(0x86), + /* RES */ + GLAMO_REG_LCD_COMMAND1 = REG_LCD(0xa0), + GLAMO_REG_LCD_COMMAND2 = REG_LCD(0xa2), + /* RES */ + GLAMO_REG_LCD_WFORM_DELAY1 = REG_LCD(0xb0), + GLAMO_REG_LCD_WFORM_DELAY2 = REG_LCD(0xb2), + /* RES */ + GLAMO_REG_LCD_GAMMA_CORR = REG_LCD(0x100), + /* RES */ + GLAMO_REG_LCD_GAMMA_R_ENTRY01 = REG_LCD(0x110), + GLAMO_REG_LCD_GAMMA_R_ENTRY23 = REG_LCD(0x112), + GLAMO_REG_LCD_GAMMA_R_ENTRY45 = REG_LCD(0x114), + GLAMO_REG_LCD_GAMMA_R_ENTRY67 = REG_LCD(0x116), + GLAMO_REG_LCD_GAMMA_R_ENTRY8 = REG_LCD(0x118), + /* RES */ + GLAMO_REG_LCD_GAMMA_G_ENTRY01 = REG_LCD(0x130), + GLAMO_REG_LCD_GAMMA_G_ENTRY23 = REG_LCD(0x132), + GLAMO_REG_LCD_GAMMA_G_ENTRY45 = REG_LCD(0x134), + GLAMO_REG_LCD_GAMMA_G_ENTRY67 = REG_LCD(0x136), + GLAMO_REG_LCD_GAMMA_G_ENTRY8 = REG_LCD(0x138), + /* RES */ + GLAMO_REG_LCD_GAMMA_B_ENTRY01 = REG_LCD(0x150), + GLAMO_REG_LCD_GAMMA_B_ENTRY23 = REG_LCD(0x152), + GLAMO_REG_LCD_GAMMA_B_ENTRY45 = REG_LCD(0x154), + GLAMO_REG_LCD_GAMMA_B_ENTRY67 = REG_LCD(0x156), + GLAMO_REG_LCD_GAMMA_B_ENTRY8 = REG_LCD(0x158), + /* RES */ + GLAMO_REG_LCD_SRAM_DRIVING1 = REG_LCD(0x160), + GLAMO_REG_LCD_SRAM_DRIVING2 = REG_LCD(0x162), + GLAMO_REG_LCD_SRAM_DRIVING3 = REG_LCD(0x164), +}; + +enum glamo_reg_lcd_mode1 { + GLAMO_LCD_MODE1_PWRSAVE = 0x0001, + GLAMO_LCD_MODE1_PARTIAL_PRT = 0x0002, + GLAMO_LCD_MODE1_HWFLIP = 0x0004, + GLAMO_LCD_MODE1_LCD2 = 0x0008, + /* RES */ + GLAMO_LCD_MODE1_PARTIAL_MODE = 0x0020, + GLAMO_LCD_MODE1_CURSOR_DSTCOLOR = 0x0040, + GLAMO_LCD_MODE1_PARTIAL_ENABLE = 0x0080, + GLAMO_LCD_MODE1_TVCLK_IN_ENABLE = 0x0100, + GLAMO_LCD_MODE1_HSYNC_HIGH_ACT = 0x0200, + GLAMO_LCD_MODE1_VSYNC_HIGH_ACT = 0x0400, + GLAMO_LCD_MODE1_HSYNC_FLIP = 0x0800, + GLAMO_LCD_MODE1_GAMMA_COR_EN = 0x1000, + GLAMO_LCD_MODE1_DITHER_EN = 0x2000, + GLAMO_LCD_MODE1_CURSOR_EN = 0x4000, + GLAMO_LCD_MODE1_ROTATE_EN = 0x8000, +}; + +enum glamo_reg_lcd_mode2 { + GLAMO_LCD_MODE2_CRC_CHECK_EN = 0x0001, + GLAMO_LCD_MODE2_DCMD_PER_LINE = 0x0002, + GLAMO_LCD_MODE2_NOUSE_BDEF = 0x0004, + GLAMO_LCD_MODE2_OUT_POS_MODE = 0x0008, + GLAMO_LCD_MODE2_FRATE_CTRL_EN = 0x0010, + GLAMO_LCD_MODE2_SINGLE_BUFFER = 0x0020, + GLAMO_LCD_MODE2_SER_LSB_TO_MSB = 0x0040, + /* FIXME */ +}; + +enum glamo_reg_lcd_mode3 { + /* LCD color source data format */ + GLAMO_LCD_SRC_RGB565 = 0x0000, + GLAMO_LCD_SRC_ARGB1555 = 0x4000, + GLAMO_LCD_SRC_ARGB4444 = 0x8000, + /* interface type */ + GLAMO_LCD_MODE3_LCD = 0x1000, + GLAMO_LCD_MODE3_RGB = 0x0800, + GLAMO_LCD_MODE3_CPU = 0x0000, + /* mode */ + GLAMO_LCD_MODE3_RGB332 = 0x0000, + GLAMO_LCD_MODE3_RGB444 = 0x0100, + GLAMO_LCD_MODE3_RGB565 = 0x0200, + GLAMO_LCD_MODE3_RGB666 = 0x0300, + /* depth */ + GLAMO_LCD_MODE3_6BITS = 0x0000, + GLAMO_LCD_MODE3_8BITS = 0x0010, + GLAMO_LCD_MODE3_9BITS = 0x0020, + GLAMO_LCD_MODE3_16BITS = 0x0030, + GLAMO_LCD_MODE3_18BITS = 0x0040, +}; + +enum glamo_lcd_rot_mode { + GLAMO_LCD_ROT_MODE_0 = 0x0000, + GLAMO_LCD_ROT_MODE_180 = 0x2000, + GLAMO_LCD_ROT_MODE_MIRROR = 0x4000, + GLAMO_LCD_ROT_MODE_FLIP = 0x6000, + GLAMO_LCD_ROT_MODE_90 = 0x8000, + GLAMO_LCD_ROT_MODE_270 = 0xa000, +}; +#define GLAMO_LCD_ROT_MODE_MASK 0xe000 + +enum glamo_lcd_cmd_type { + GLAMO_LCD_CMD_TYPE_DISP = 0x0000, + GLAMO_LCD_CMD_TYPE_PARALLEL = 0x4000, + GLAMO_LCD_CMD_TYPE_SERIAL = 0x8000, + GLAMO_LCD_CMD_TYPE_SERIAL_DIRECT = 0xc000, +}; +#define GLAMO_LCD_CMD_TYPE_MASK 0xc000 + +enum glamo_lcd_cmds { + GLAMO_LCD_CMD_DATA_DISP_FIRE = 0x00, + GLAMO_LCD_CMD_DATA_DISP_SYNC = 0x01, /* RGB only */ + /* switch to command mode, no display */ + GLAMO_LCD_CMD_DATA_FIRE_NO_DISP = 0x02, + /* display until VSYNC, switch to command */ + GLAMO_LCD_CMD_DATA_FIRE_VSYNC = 0x11, + /* display until HSYNC, switch to command */ + GLAMO_LCD_CMD_DATA_FIRE_HSYNC = 0x12, + /* display until VSYNC, 1 black frame, VSYNC, switch to command */ + GLAMO_LCD_CMD_DATA_FIRE_VSYNC_B = 0x13, + /* don't care about display and switch to command */ + GLAMO_LCD_CMD_DATA_FIRE_FREE = 0x14, /* RGB only */ + /* don't care about display, keep data display but disable data, + * and switch to command */ + GLAMO_LCD_CMD_DATA_FIRE_FREE_D = 0x15, /* RGB only */ +}; + +enum glamo_core_revisions { + GLAMO_CORE_REV_A0 = 0x0000, + GLAMO_CORE_REV_A1 = 0x0001, + GLAMO_CORE_REV_A2 = 0x0002, + GLAMO_CORE_REV_A3 = 0x0003, +}; + +#endif /* _GLAMO_REGS_H */ |