diff options
Diffstat (limited to 'drivers/mmc')
31 files changed, 2929 insertions, 510 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 85f0e8cd875..1f552c6e757 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -85,7 +85,14 @@ static void mmc_blk_put(struct mmc_blk_data *md) mutex_lock(&open_lock); md->usage--; if (md->usage == 0) { + int devmaj = MAJOR(disk_devt(md->disk)); int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT; + + if (!devmaj) + devidx = md->disk->first_minor >> MMC_SHIFT; + + blk_cleanup_queue(md->queue.queue); + __clear_bit(devidx, dev_use); put_disk(md->disk); @@ -613,6 +620,7 @@ static int mmc_blk_probe(struct mmc_card *card) return 0; out: + mmc_cleanup_queue(&md->queue); mmc_blk_put(md); return err; diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 49e582356c6..c5a7a855f4b 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -90,9 +90,10 @@ static void mmc_request(struct request_queue *q) struct request *req; if (!mq) { - printk(KERN_ERR "MMC: killing requests for dead queue\n"); - while ((req = blk_fetch_request(q)) != NULL) + while ((req = blk_fetch_request(q)) != NULL) { + req->cmd_flags |= REQ_QUIET; __blk_end_request_all(req, -EIO); + } return; } @@ -223,17 +224,18 @@ void mmc_cleanup_queue(struct mmc_queue *mq) struct request_queue *q = mq->queue; unsigned long flags; - /* Mark that we should start throwing out stragglers */ - spin_lock_irqsave(q->queue_lock, flags); - q->queuedata = NULL; - spin_unlock_irqrestore(q->queue_lock, flags); - /* Make sure the queue isn't suspended, as that will deadlock */ mmc_queue_resume(mq); /* Then terminate our worker thread */ kthread_stop(mq->thread); + /* Empty the queue */ + spin_lock_irqsave(q->queue_lock, flags); + q->queuedata = NULL; + blk_start_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); + if (mq->bounce_sg) kfree(mq->bounce_sg); mq->bounce_sg = NULL; @@ -245,8 +247,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq) kfree(mq->bounce_buf); mq->bounce_buf = NULL; - blk_cleanup_queue(mq->queue); - mq->card = NULL; } EXPORT_SYMBOL(mmc_cleanup_queue); diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c index b8e7c5ae981..f53755533e7 100644 --- a/drivers/mmc/card/sdio_uart.c +++ b/drivers/mmc/card/sdio_uart.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/sched.h> #include <linux/mutex.h> #include <linux/seq_file.h> #include <linux/serial_reg.h> @@ -73,11 +74,10 @@ struct uart_icount { }; struct sdio_uart_port { + struct tty_port port; struct kref kref; struct tty_struct *tty; unsigned int index; - unsigned int opened; - struct mutex open_lock; struct sdio_func *func; struct mutex func_lock; struct task_struct *in_sdio_uart_irq; @@ -87,6 +87,7 @@ struct sdio_uart_port { struct uart_icount icount; unsigned int uartclk; unsigned int mctrl; + unsigned int rx_mctrl; unsigned int read_status_mask; unsigned int ignore_status_mask; unsigned char x_char; @@ -102,7 +103,6 @@ static int sdio_uart_add_port(struct sdio_uart_port *port) int index, ret = -EBUSY; kref_init(&port->kref); - mutex_init(&port->open_lock); mutex_init(&port->func_lock); spin_lock_init(&port->write_lock); @@ -151,6 +151,7 @@ static void sdio_uart_port_put(struct sdio_uart_port *port) static void sdio_uart_port_remove(struct sdio_uart_port *port) { struct sdio_func *func; + struct tty_struct *tty; BUG_ON(sdio_uart_table[port->index] != port); @@ -165,15 +166,19 @@ static void sdio_uart_port_remove(struct sdio_uart_port *port) * give up on that port ASAP. * Beware: the lock ordering is critical. */ - mutex_lock(&port->open_lock); + mutex_lock(&port->port.mutex); mutex_lock(&port->func_lock); func = port->func; sdio_claim_host(func); port->func = NULL; mutex_unlock(&port->func_lock); - if (port->opened) - tty_hangup(port->tty); - mutex_unlock(&port->open_lock); + tty = tty_port_tty_get(&port->port); + /* tty_hangup is async so is this safe as is ?? */ + if (tty) { + tty_hangup(tty); + tty_kref_put(tty); + } + mutex_unlock(&port->port.mutex); sdio_release_irq(func); sdio_disable_func(func); sdio_release_host(func); @@ -217,6 +222,8 @@ static unsigned int sdio_uart_get_mctrl(struct sdio_uart_port *port) unsigned char status; unsigned int ret; + /* FIXME: What stops this losing the delta bits and breaking + sdio_uart_check_modem_status ? */ status = sdio_in(port, UART_MSR); ret = 0; @@ -391,7 +398,7 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port) static void sdio_uart_receive_chars(struct sdio_uart_port *port, unsigned int *status) { - struct tty_struct *tty = port->tty; + struct tty_struct *tty = tty_port_tty_get(&port->port); unsigned int ch, flag; int max_count = 256; @@ -428,24 +435,30 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port, } if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0) - tty_insert_flip_char(tty, ch, flag); + if (tty) + tty_insert_flip_char(tty, ch, flag); /* * Overrun is special. Since it's reported immediately, * it doesn't affect the current character. */ if (*status & ~port->ignore_status_mask & UART_LSR_OE) - tty_insert_flip_char(tty, 0, TTY_OVERRUN); + if (tty) + tty_insert_flip_char(tty, 0, TTY_OVERRUN); *status = sdio_in(port, UART_LSR); } while ((*status & UART_LSR_DR) && (max_count-- > 0)); - tty_flip_buffer_push(tty); + if (tty) { + tty_flip_buffer_push(tty); + tty_kref_put(tty); + } } static void sdio_uart_transmit_chars(struct sdio_uart_port *port) { struct circ_buf *xmit = &port->xmit; int count; + struct tty_struct *tty; if (port->x_char) { sdio_out(port, UART_TX, port->x_char); @@ -453,8 +466,13 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port) port->x_char = 0; return; } - if (circ_empty(xmit) || port->tty->stopped || port->tty->hw_stopped) { + + tty = tty_port_tty_get(&port->port); + + if (tty == NULL || circ_empty(xmit) || + tty->stopped || tty->hw_stopped) { sdio_uart_stop_tx(port); + tty_kref_put(tty); return; } @@ -468,15 +486,17 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port) } while (--count > 0); if (circ_chars_pending(xmit) < WAKEUP_CHARS) - tty_wakeup(port->tty); + tty_wakeup(tty); if (circ_empty(xmit)) sdio_uart_stop_tx(port); + tty_kref_put(tty); } static void sdio_uart_check_modem_status(struct sdio_uart_port *port) { int status; + struct tty_struct *tty; status = sdio_in(port, UART_MSR); @@ -487,25 +507,39 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port) port->icount.rng++; if (status & UART_MSR_DDSR) port->icount.dsr++; - if (status & UART_MSR_DDCD) + if (status & UART_MSR_DDCD) { port->icount.dcd++; + /* DCD raise - wake for open */ + if (status & UART_MSR_DCD) + wake_up_interruptible(&port->port.open_wait); + else { + /* DCD drop - hang up if tty attached */ + tty = tty_port_tty_get(&port->port); + if (tty) { + tty_hangup(tty); + tty_kref_put(tty); + } + } + } if (status & UART_MSR_DCTS) { port->icount.cts++; - if (port->tty->termios->c_cflag & CRTSCTS) { + tty = tty_port_tty_get(&port->port); + if (tty && (tty->termios->c_cflag & CRTSCTS)) { int cts = (status & UART_MSR_CTS); - if (port->tty->hw_stopped) { + if (tty->hw_stopped) { if (cts) { - port->tty->hw_stopped = 0; + tty->hw_stopped = 0; sdio_uart_start_tx(port); - tty_wakeup(port->tty); + tty_wakeup(tty); } } else { if (!cts) { - port->tty->hw_stopped = 1; + tty->hw_stopped = 1; sdio_uart_stop_tx(port); } } } + tty_kref_put(tty); } } @@ -542,8 +576,62 @@ static void sdio_uart_irq(struct sdio_func *func) port->in_sdio_uart_irq = NULL; } -static int sdio_uart_startup(struct sdio_uart_port *port) +static int uart_carrier_raised(struct tty_port *tport) +{ + struct sdio_uart_port *port = + container_of(tport, struct sdio_uart_port, port); + unsigned int ret = sdio_uart_claim_func(port); + if (ret) /* Missing hardware shoudn't block for carrier */ + return 1; + ret = sdio_uart_get_mctrl(port); + sdio_uart_release_func(port); + if (ret & TIOCM_CAR) + return 1; + return 0; +} + +/** + * uart_dtr_rts - port helper to set uart signals + * @tport: tty port to be updated + * @onoff: set to turn on DTR/RTS + * + * Called by the tty port helpers when the modem signals need to be + * adjusted during an open, close and hangup. + */ + +static void uart_dtr_rts(struct tty_port *tport, int onoff) +{ + struct sdio_uart_port *port = + container_of(tport, struct sdio_uart_port, port); + int ret = sdio_uart_claim_func(port); + if (ret) + return; + if (onoff == 0) + sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); + else + sdio_uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS); + sdio_uart_release_func(port); +} + +/** + * sdio_uart_activate - start up hardware + * @tport: tty port to activate + * @tty: tty bound to this port + * + * Activate a tty port. The port locking guarantees us this will be + * run exactly once per set of opens, and if successful will see the + * shutdown method run exactly once to match. Start up and shutdown are + * protected from each other by the internal locking and will not run + * at the same time even during a hangup event. + * + * If we successfully start up the port we take an extra kref as we + * will keep it around until shutdown when the kref is dropped. + */ + +static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty) { + struct sdio_uart_port *port = + container_of(tport, struct sdio_uart_port, port); unsigned long page; int ret; @@ -551,7 +639,7 @@ static int sdio_uart_startup(struct sdio_uart_port *port) * Set the TTY IO error marker - we will only clear this * once we have successfully opened the port. */ - set_bit(TTY_IO_ERROR, &port->tty->flags); + set_bit(TTY_IO_ERROR, &tty->flags); /* Initialise and allocate the transmit buffer. */ page = __get_free_page(GFP_KERNEL); @@ -592,19 +680,19 @@ static int sdio_uart_startup(struct sdio_uart_port *port) */ sdio_out(port, UART_LCR, UART_LCR_WLEN8); - port->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE | UART_IER_UUE; + port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE; port->mctrl = TIOCM_OUT2; - sdio_uart_change_speed(port, port->tty->termios, NULL); + sdio_uart_change_speed(port, tty->termios, NULL); - if (port->tty->termios->c_cflag & CBAUD) + if (tty->termios->c_cflag & CBAUD) sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR); - if (port->tty->termios->c_cflag & CRTSCTS) + if (tty->termios->c_cflag & CRTSCTS) if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS)) - port->tty->hw_stopped = 1; + tty->hw_stopped = 1; - clear_bit(TTY_IO_ERROR, &port->tty->flags); + clear_bit(TTY_IO_ERROR, &tty->flags); /* Kick the IRQ handler once while we're still holding the host lock */ sdio_uart_irq(port->func); @@ -621,8 +709,20 @@ err1: return ret; } -static void sdio_uart_shutdown(struct sdio_uart_port *port) +/** + * sdio_uart_shutdown - stop hardware + * @tport: tty port to shut down + * + * Deactivate a tty port. The port locking guarantees us this will be + * run only if a successful matching activate already ran. The two are + * protected from each other by the internal locking and will not run + * at the same time even during a hangup event. + */ + +static void sdio_uart_shutdown(struct tty_port *tport) { + struct sdio_uart_port *port = + container_of(tport, struct sdio_uart_port, port); int ret; ret = sdio_uart_claim_func(port); @@ -631,12 +731,6 @@ static void sdio_uart_shutdown(struct sdio_uart_port *port) sdio_uart_stop_rx(port); - /* TODO: wait here for TX FIFO to drain */ - - /* Turn off DTR and RTS early. */ - if (port->tty->termios->c_cflag & HUPCL) - sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); - /* Disable interrupts from this port */ sdio_release_irq(port->func); port->ier = 0; @@ -661,77 +755,70 @@ skip: free_page((unsigned long)port->xmit.buf); } -static int sdio_uart_open(struct tty_struct *tty, struct file *filp) +/** + * sdio_uart_install - install method + * @driver: the driver in use (sdio_uart in our case) + * @tty: the tty being bound + * + * Look up and bind the tty and the driver together. Initialize + * any needed private data (in our case the termios) + */ + +static int sdio_uart_install(struct tty_driver *driver, struct tty_struct *tty) { - struct sdio_uart_port *port; - int ret; + int idx = tty->index; + struct sdio_uart_port *port = sdio_uart_port_get(idx); + int ret = tty_init_termios(tty); + + if (ret == 0) { + tty_driver_kref_get(driver); + tty->count++; + /* This is the ref sdio_uart_port get provided */ + tty->driver_data = port; + driver->ttys[idx] = tty; + } else + sdio_uart_port_put(port); + return ret; +} - port = sdio_uart_port_get(tty->index); - if (!port) - return -ENODEV; +/** + * sdio_uart_cleanup - called on the last tty kref drop + * @tty: the tty being destroyed + * + * Called asynchronously when the last reference to the tty is dropped. + * We cannot destroy the tty->driver_data port kref until this point + */ - mutex_lock(&port->open_lock); +static void sdio_uart_cleanup(struct tty_struct *tty) +{ + struct sdio_uart_port *port = tty->driver_data; + tty->driver_data = NULL; /* Bug trap */ + sdio_uart_port_put(port); +} - /* - * Make sure not to mess up with a dead port - * which has not been closed yet. - */ - if (tty->driver_data && tty->driver_data != port) { - mutex_unlock(&port->open_lock); - sdio_uart_port_put(port); - return -EBUSY; - } +/* + * Open/close/hangup is now entirely boilerplate + */ - if (!port->opened) { - tty->driver_data = port; - port->tty = tty; - ret = sdio_uart_startup(port); - if (ret) { - tty->driver_data = NULL; - port->tty = NULL; - mutex_unlock(&port->open_lock); - sdio_uart_port_put(port); - return ret; - } - } - port->opened++; - mutex_unlock(&port->open_lock); - return 0; +static int sdio_uart_open(struct tty_struct *tty, struct file *filp) +{ + struct sdio_uart_port *port = tty->driver_data; + return tty_port_open(&port->port, tty, filp); } static void sdio_uart_close(struct tty_struct *tty, struct file * filp) { struct sdio_uart_port *port = tty->driver_data; + tty_port_close(&port->port, tty, filp); +} - if (!port) - return; - - mutex_lock(&port->open_lock); - BUG_ON(!port->opened); - - /* - * This is messy. The tty layer calls us even when open() - * returned an error. Ignore this close request if tty->count - * is larger than port->count. - */ - if (tty->count > port->opened) { - mutex_unlock(&port->open_lock); - return; - } - - if (--port->opened == 0) { - tty->closing = 1; - sdio_uart_shutdown(port); - tty_ldisc_flush(tty); - port->tty = NULL; - tty->driver_data = NULL; - tty->closing = 0; - } - mutex_unlock(&port->open_lock); - sdio_uart_port_put(port); +static void sdio_uart_hangup(struct tty_struct *tty) +{ + struct sdio_uart_port *port = tty->driver_data; + tty_port_hangup(&port->port); } -static int sdio_uart_write(struct tty_struct * tty, const unsigned char *buf, +static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct sdio_uart_port *port = tty->driver_data; @@ -756,7 +843,7 @@ static int sdio_uart_write(struct tty_struct * tty, const unsigned char *buf, } spin_unlock(&port->write_lock); - if ( !(port->ier & UART_IER_THRI)) { + if (!(port->ier & UART_IER_THRI)) { int err = sdio_uart_claim_func(port); if (!err) { sdio_uart_start_tx(port); @@ -843,17 +930,12 @@ static void sdio_uart_unthrottle(struct tty_struct *tty) sdio_uart_release_func(port); } -static void sdio_uart_set_termios(struct tty_struct *tty, struct ktermios *old_termios) +static void sdio_uart_set_termios(struct tty_struct *tty, + struct ktermios *old_termios) { struct sdio_uart_port *port = tty->driver_data; unsigned int cflag = tty->termios->c_cflag; -#define RELEVANT_IFLAG(iflag) ((iflag) & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) - - if ((cflag ^ old_termios->c_cflag) == 0 && - RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0) - return; - if (sdio_uart_claim_func(port) != 0) return; @@ -928,7 +1010,7 @@ static int sdio_uart_tiocmset(struct tty_struct *tty, struct file *file, int result; result = sdio_uart_claim_func(port); - if(!result) { + if (!result) { sdio_uart_update_mctrl(port, set, clear); sdio_uart_release_func(port); } @@ -946,7 +1028,7 @@ static int sdio_uart_proc_show(struct seq_file *m, void *v) struct sdio_uart_port *port = sdio_uart_port_get(i); if (port) { seq_printf(m, "%d: uart:SDIO", i); - if(capable(CAP_SYS_ADMIN)) { + if (capable(CAP_SYS_ADMIN)) { seq_printf(m, " tx:%d rx:%d", port->icount.tx, port->icount.rx); if (port->icount.frame) @@ -994,6 +1076,13 @@ static const struct file_operations sdio_uart_proc_fops = { .release = single_release, }; +static const struct tty_port_operations sdio_uart_port_ops = { + .dtr_rts = uart_dtr_rts, + .carrier_raised = uart_carrier_raised, + .shutdown = sdio_uart_shutdown, + .activate = sdio_uart_activate, +}; + static const struct tty_operations sdio_uart_ops = { .open = sdio_uart_open, .close = sdio_uart_close, @@ -1004,9 +1093,12 @@ static const struct tty_operations sdio_uart_ops = { .throttle = sdio_uart_throttle, .unthrottle = sdio_uart_unthrottle, .set_termios = sdio_uart_set_termios, + .hangup = sdio_uart_hangup, .break_ctl = sdio_uart_break_ctl, .tiocmget = sdio_uart_tiocmget, .tiocmset = sdio_uart_tiocmset, + .install = sdio_uart_install, + .cleanup = sdio_uart_cleanup, .proc_fops = &sdio_uart_proc_fops, }; @@ -1043,7 +1135,7 @@ static int sdio_uart_probe(struct sdio_func *func, } if (!tpl) { printk(KERN_WARNING - "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n", + "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n", sdio_func_id(func)); kfree(port); return -EINVAL; @@ -1068,13 +1160,16 @@ static int sdio_uart_probe(struct sdio_func *func, port->func = func; sdio_set_drvdata(func, port); + tty_port_init(&port->port); + port->port.ops = &sdio_uart_port_ops; ret = sdio_uart_add_port(port); if (ret) { kfree(port); } else { struct device *dev; - dev = tty_register_device(sdio_uart_tty_driver, port->index, &func->dev); + dev = tty_register_device(sdio_uart_tty_driver, + port->index, &func->dev); if (IS_ERR(dev)) { sdio_uart_port_remove(port); ret = PTR_ERR(dev); diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index ab37a6d9d32..bb22ffd76ef 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -3,7 +3,7 @@ # config MMC_UNSAFE_RESUME - bool "Allow unsafe resume (DANGEROUS)" + bool "Assume MMC/SD cards are non-removable (DANGEROUS)" help If you say Y here, the MMC layer will assume that all cards stayed in their respective slots during the suspend. The @@ -14,3 +14,5 @@ config MMC_UNSAFE_RESUME This option is usually just for embedded systems which use a MMC/SD card for rootfs. Most people should say N here. + This option sets a default which can be overridden by the + module parameter "removable=0" or "removable=1". diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 7dab2e5f4bc..30acd526582 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -48,6 +48,22 @@ int use_spi_crc = 1; module_param(use_spi_crc, bool, 0); /* + * We normally treat cards as removed during suspend if they are not + * known to be on a non-removable bus, to avoid the risk of writing + * back data to a different card after resume. Allow this to be + * overridden if necessary. + */ +#ifdef CONFIG_MMC_UNSAFE_RESUME +int mmc_assume_removable; +#else +int mmc_assume_removable = 1; +#endif +module_param_named(removable, mmc_assume_removable, bool, 0644); +MODULE_PARM_DESC( + removable, + "MMC/SD cards are removable and may be removed during suspend"); + +/* * Internal function. Schedule delayed work in the MMC work queue. */ static int mmc_schedule_delayed_work(struct delayed_work *work, diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 67ae6abc423..a811c52a165 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -54,7 +54,9 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr); int mmc_attach_sd(struct mmc_host *host, u32 ocr); int mmc_attach_sdio(struct mmc_host *host, u32 ocr); +/* Module parameters */ extern int use_spi_crc; +extern int mmc_assume_removable; /* Debugfs information for hosts and cards */ void mmc_add_host_debugfs(struct mmc_host *host); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index bfefce365ae..0eac6c81490 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -207,7 +207,7 @@ static int mmc_read_ext_csd(struct mmc_card *card) } card->ext_csd.rev = ext_csd[EXT_CSD_REV]; - if (card->ext_csd.rev > 3) { + if (card->ext_csd.rev > 5) { printk(KERN_ERR "%s: unrecognised EXT_CSD structure " "version %d\n", mmc_hostname(card->host), card->ext_csd.rev); @@ -602,25 +602,6 @@ static int mmc_awake(struct mmc_host *host) return err; } -#ifdef CONFIG_MMC_UNSAFE_RESUME - -static const struct mmc_bus_ops mmc_ops = { - .awake = mmc_awake, - .sleep = mmc_sleep, - .remove = mmc_remove, - .detect = mmc_detect, - .suspend = mmc_suspend, - .resume = mmc_resume, - .power_restore = mmc_power_restore, -}; - -static void mmc_attach_bus_ops(struct mmc_host *host) -{ - mmc_attach_bus(host, &mmc_ops); -} - -#else - static const struct mmc_bus_ops mmc_ops = { .awake = mmc_awake, .sleep = mmc_sleep, @@ -645,15 +626,13 @@ static void mmc_attach_bus_ops(struct mmc_host *host) { const struct mmc_bus_ops *bus_ops; - if (host->caps & MMC_CAP_NONREMOVABLE) + if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable) bus_ops = &mmc_ops_unsafe; else bus_ops = &mmc_ops; mmc_attach_bus(host, bus_ops); } -#endif - /* * Starting point for MMC card init. */ diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 10b2a4d20f5..fdd414eded0 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -606,23 +606,6 @@ static void mmc_sd_power_restore(struct mmc_host *host) mmc_release_host(host); } -#ifdef CONFIG_MMC_UNSAFE_RESUME - -static const struct mmc_bus_ops mmc_sd_ops = { - .remove = mmc_sd_remove, - .detect = mmc_sd_detect, - .suspend = mmc_sd_suspend, - .resume = mmc_sd_resume, - .power_restore = mmc_sd_power_restore, -}; - -static void mmc_sd_attach_bus_ops(struct mmc_host *host) -{ - mmc_attach_bus(host, &mmc_sd_ops); -} - -#else - static const struct mmc_bus_ops mmc_sd_ops = { .remove = mmc_sd_remove, .detect = mmc_sd_detect, @@ -643,15 +626,13 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host) { const struct mmc_bus_ops *bus_ops; - if (host->caps & MMC_CAP_NONREMOVABLE) + if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable) bus_ops = &mmc_sd_ops_unsafe; else bus_ops = &mmc_sd_ops; mmc_attach_bus(host, bus_ops); } -#endif - /* * Starting point for SD card init. */ diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index cdb845b68ab..06b64085a35 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -516,7 +516,8 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr) * The number of functions on the card is encoded inside * the ocr. */ - card->sdio_funcs = funcs = (ocr & 0x70000000) >> 28; + funcs = (ocr & 0x70000000) >> 28; + card->sdio_funcs = 0; /* * If needed, disconnect card detection pull-up resistor. @@ -528,7 +529,7 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr) /* * Initialize (but don't add) all present functions. */ - for (i = 0;i < funcs;i++) { + for (i = 0; i < funcs; i++, card->sdio_funcs++) { err = sdio_init_func(host->card, i + 1); if (err) goto remove; diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index d37464e296a..9e060c87e64 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -248,12 +248,15 @@ int sdio_add_func(struct sdio_func *func) /* * Unregister a SDIO function with the driver model, and * (eventually) free it. + * This function can be called through error paths where sdio_add_func() was + * never executed (because a failure occurred at an earlier point). */ void sdio_remove_func(struct sdio_func *func) { - if (sdio_func_present(func)) - device_del(&func->dev); + if (!sdio_func_present(func)) + return; + device_del(&func->dev); put_device(&func->dev); } diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index f85dcd53650..9538389783c 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -97,26 +97,56 @@ static const unsigned char speed_val[16] = static const unsigned int speed_unit[8] = { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 }; -/* FUNCE tuples with these types get passed to SDIO drivers */ -static const unsigned char funce_type_whitelist[] = { - 4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */ + +typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *, + const unsigned char *, unsigned); + +struct cis_tpl { + unsigned char code; + unsigned char min_size; + tpl_parse_t *parse; }; -static int cistpl_funce_whitelisted(unsigned char type) +static int cis_tpl_parse(struct mmc_card *card, struct sdio_func *func, + const char *tpl_descr, + const struct cis_tpl *tpl, int tpl_count, + unsigned char code, + const unsigned char *buf, unsigned size) { - int i; + int i, ret; - for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) { - if (funce_type_whitelist[i] == type) - return 1; + /* look for a matching code in the table */ + for (i = 0; i < tpl_count; i++, tpl++) { + if (tpl->code == code) + break; } - return 0; + if (i < tpl_count) { + if (size >= tpl->min_size) { + if (tpl->parse) + ret = tpl->parse(card, func, buf, size); + else + ret = -EILSEQ; /* known tuple, not parsed */ + } else { + /* invalid tuple */ + ret = -EINVAL; + } + if (ret && ret != -EILSEQ && ret != -ENOENT) { + printk(KERN_ERR "%s: bad %s tuple 0x%02x (%u bytes)\n", + mmc_hostname(card->host), tpl_descr, code, size); + } + } else { + /* unknown tuple */ + ret = -ENOENT; + } + + return ret; } -static int cistpl_funce_common(struct mmc_card *card, +static int cistpl_funce_common(struct mmc_card *card, struct sdio_func *func, const unsigned char *buf, unsigned size) { - if (size < 0x04 || buf[0] != 0) + /* Only valid for the common CIS (function 0) */ + if (func) return -EINVAL; /* TPLFE_FN0_BLK_SIZE */ @@ -129,20 +159,24 @@ static int cistpl_funce_common(struct mmc_card *card, return 0; } -static int cistpl_funce_func(struct sdio_func *func, +static int cistpl_funce_func(struct mmc_card *card, struct sdio_func *func, const unsigned char *buf, unsigned size) { unsigned vsn; unsigned min_size; - /* let SDIO drivers take care of whitelisted FUNCE tuples */ - if (cistpl_funce_whitelisted(buf[0])) - return -EILSEQ; + /* Only valid for the individual function's CIS (1-7) */ + if (!func) + return -EINVAL; + /* + * This tuple has a different length depending on the SDIO spec + * version. + */ vsn = func->card->cccr.sdio_vsn; min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42; - if (size < min_size || buf[0] != 1) + if (size < min_size) return -EINVAL; /* TPLFE_MAX_BLK_SIZE */ @@ -157,39 +191,32 @@ static int cistpl_funce_func(struct sdio_func *func, return 0; } +/* + * Known TPLFE_TYPEs table for CISTPL_FUNCE tuples. + * + * Note that, unlike PCMCIA, CISTPL_FUNCE tuples are not parsed depending + * on the TPLFID_FUNCTION value of the previous CISTPL_FUNCID as on SDIO + * TPLFID_FUNCTION is always hardcoded to 0x0C. + */ +static const struct cis_tpl cis_tpl_funce_list[] = { + { 0x00, 4, cistpl_funce_common }, + { 0x01, 0, cistpl_funce_func }, + { 0x04, 1+1+6, /* CISTPL_FUNCE_LAN_NODE_ID */ }, +}; + static int cistpl_funce(struct mmc_card *card, struct sdio_func *func, const unsigned char *buf, unsigned size) { - int ret; - - /* - * There should be two versions of the CISTPL_FUNCE tuple, - * one for the common CIS (function 0) and a version used by - * the individual function's CIS (1-7). Yet, the later has a - * different length depending on the SDIO spec version. - */ - if (func) - ret = cistpl_funce_func(func, buf, size); - else - ret = cistpl_funce_common(card, buf, size); - - if (ret && ret != -EILSEQ) { - printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u " - "type %u\n", mmc_hostname(card->host), size, buf[0]); - } + if (size < 1) + return -EINVAL; - return ret; + return cis_tpl_parse(card, func, "CISTPL_FUNCE", + cis_tpl_funce_list, + ARRAY_SIZE(cis_tpl_funce_list), + buf[0], buf, size); } -typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *, - const unsigned char *, unsigned); - -struct cis_tpl { - unsigned char code; - unsigned char min_size; - tpl_parse_t *parse; -}; - +/* Known TPL_CODEs table for CIS tuples */ static const struct cis_tpl cis_tpl_list[] = { { 0x15, 3, cistpl_vers_1 }, { 0x20, 4, cistpl_manfid }, @@ -268,46 +295,38 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) break; } - for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++) - if (cis_tpl_list[i].code == tpl_code) - break; - if (i < ARRAY_SIZE(cis_tpl_list)) { - const struct cis_tpl *tpl = cis_tpl_list + i; - if (tpl_link < tpl->min_size) { - printk(KERN_ERR - "%s: bad CIS tuple 0x%02x" - " (length = %u, expected >= %u)\n", - mmc_hostname(card->host), - tpl_code, tpl_link, tpl->min_size); - ret = -EINVAL; - } else if (tpl->parse) { - ret = tpl->parse(card, func, - this->data, tpl_link); - } + /* Try to parse the CIS tuple */ + ret = cis_tpl_parse(card, func, "CIS", + cis_tpl_list, ARRAY_SIZE(cis_tpl_list), + tpl_code, this->data, tpl_link); + if (ret == -EILSEQ || ret == -ENOENT) { /* - * We don't need the tuple anymore if it was - * successfully parsed by the SDIO core or if it is - * not going to be parsed by SDIO drivers. + * The tuple is unknown or known but not parsed. + * Queue the tuple for the function driver. */ - if (!ret || ret != -EILSEQ) - kfree(this); - } else { - /* unknown tuple */ - ret = -EILSEQ; - } - - if (ret == -EILSEQ) { - /* this tuple is unknown to the core or whitelisted */ this->next = NULL; this->code = tpl_code; this->size = tpl_link; *prev = this; prev = &this->next; - printk(KERN_DEBUG - "%s: queuing CIS tuple 0x%02x length %u\n", - mmc_hostname(card->host), tpl_code, tpl_link); + + if (ret == -ENOENT) { + /* warn about unknown tuples */ + printk(KERN_WARNING "%s: queuing unknown" + " CIS tuple 0x%02x (%u bytes)\n", + mmc_hostname(card->host), + tpl_code, tpl_link); + } + /* keep on analyzing tuples */ ret = 0; + } else { + /* + * We don't need the tuple anymore if it was + * successfully parsed by the SDIO core or if it is + * not going to be queued for a driver. + */ + kfree(this); } ptr += tpl_link; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index e04b751680d..ce1d28884e2 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -44,6 +44,19 @@ config MMC_SDHCI_IO_ACCESSORS This is silent Kconfig symbol that is selected by the drivers that need to overwrite SDHCI IO memory accessors. +config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER + bool + select MMC_SDHCI_IO_ACCESSORS + help + This option is selected by drivers running on big endian hosts + and performing I/O to a SDHCI controller through a bus that + implements a hardware byte swapper using a 32-bit datum. + This endian mapping mode is called "data invariance" and + has the effect of scrambling the addresses and formats of data + accessed in sizes other than the datum size. + + This is the case for the Freescale eSDHC and Nintendo Wii SDHCI. + config MMC_SDHCI_PCI tristate "SDHCI support on PCI bus" depends on MMC_SDHCI && PCI @@ -75,11 +88,29 @@ config MMC_RICOH_MMC config MMC_SDHCI_OF tristate "SDHCI support on OpenFirmware platforms" depends on MMC_SDHCI && PPC_OF - select MMC_SDHCI_IO_ACCESSORS help This selects the OF support for Secure Digital Host Controller - Interfaces. So far, only the Freescale eSDHC controller is known - to exist on OF platforms. + Interfaces. + + If unsure, say N. + +config MMC_SDHCI_OF_ESDHC + bool "SDHCI OF support for the Freescale eSDHC controller" + depends on MMC_SDHCI_OF + select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER + help + This selects the Freescale eSDHC controller support. + + If unsure, say N. + +config MMC_SDHCI_OF_HLWD + bool "SDHCI OF support for the Nintendo Wii SDHCI controllers" + depends on MMC_SDHCI_OF + select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER + help + This selects the Secure Digital Host Controller Interface (SDHCI) + found in the "Hollywood" chipset of the Nintendo Wii video game + console. If unsure, say N. @@ -251,6 +282,14 @@ config MMC_MVSDIO To compile this driver as a module, choose M here: the module will be called mvsdio. +config MMC_DAVINCI + tristate "TI DAVINCI Multimedia Card Interface support" + depends on ARCH_DAVINCI + help + This selects the TI DAVINCI Multimedia card Interface. + If you have an DAVINCI board with a Multimedia Card slot, + say Y or M here. If unsure, say N. + config MMC_SPI tristate "MMC/SD/SDIO over SPI" depends on SPI_MASTER && !HIGHMEM && HAS_DMA @@ -357,3 +396,22 @@ config MMC_VIA_SDMMC If you have a controller with this interface, say Y or M here. If unsure, say N. + +config SDH_BFIN + tristate "Blackfin Secure Digital Host support" + depends on MMC && ((BF54x && !BF544) || (BF51x && !BF512)) + help + If you say yes here you will get support for the Blackfin on-chip + Secure Digital Host interface. This includes support for MMC and + SD cards. + + To compile this driver as a module, choose M here: the + module will be called bfin_sdh. + + If unsure, say N. + +config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND + bool "Blackfin EZkit Missing SDH_CMD Pull Up Resistor Workaround" + depends on SDH_BFIN + help + If you say yes here SD-Cards may work on the EZkit. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index abcb0400e06..3d253dd4240 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -13,7 +13,6 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o obj-$(CONFIG_MMC_SDHCI) += sdhci.o obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o -obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o obj-$(CONFIG_MMC_WBSD) += wbsd.o @@ -25,6 +24,7 @@ obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o +obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o obj-$(CONFIG_MMC_SPI) += mmc_spi.o ifeq ($(CONFIG_OF),y) obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o @@ -34,6 +34,12 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o +obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o + +obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o +sdhci-of-y := sdhci-of-core.o +sdhci-of-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o +sdhci-of-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o ifeq ($(CONFIG_CB710_DEBUG),y) CFLAGS-cb710-mmc += -DDEBUG diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index fc25586b7ee..8072128e933 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -25,6 +25,8 @@ #include <linux/stat.h> #include <linux/mmc/host.h> + +#include <mach/atmel-mci.h> #include <linux/atmel-mci.h> #include <asm/io.h> @@ -92,6 +94,7 @@ struct atmel_mci_dma { * @need_clock_update: Update the clock rate before the next request. * @need_reset: Reset controller before next request. * @mode_reg: Value of the MR register. + * @cfg_reg: Value of the CFG register. * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus * rate and timeout calculations. * @mapbase: Physical address of the MMIO registers. @@ -155,6 +158,7 @@ struct atmel_mci { bool need_clock_update; bool need_reset; u32 mode_reg; + u32 cfg_reg; unsigned long bus_hz; unsigned long mapbase; struct clk *mck; @@ -223,6 +227,19 @@ static bool mci_has_rwproof(void) } /* + * The new MCI2 module isn't 100% compatible with the old MCI module, + * and it has a few nice features which we want to use... + */ +static inline bool atmci_is_mci2(void) +{ + if (cpu_is_at91sam9g45()) + return true; + + return false; +} + + +/* * The debugfs stuff below is mostly optimized away when * CONFIG_DEBUG_FS is not set. */ @@ -357,12 +374,33 @@ static int atmci_regs_show(struct seq_file *s, void *v) buf[MCI_BLKR / 4], buf[MCI_BLKR / 4] & 0xffff, (buf[MCI_BLKR / 4] >> 16) & 0xffff); + if (atmci_is_mci2()) + seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]); /* Don't read RSPR and RDR; it will consume the data there */ atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]); atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]); + if (atmci_is_mci2()) { + u32 val; + + val = buf[MCI_DMA / 4]; + seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n", + val, val & 3, + ((val >> 4) & 3) ? + 1 << (((val >> 4) & 3) + 1) : 1, + val & MCI_DMAEN ? " DMAEN" : ""); + + val = buf[MCI_CFG / 4]; + seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n", + val, + val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "", + val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "", + val & MCI_CFG_HSMODE ? " HSMODE" : "", + val & MCI_CFG_LSYNC ? " LSYNC" : ""); + } + kfree(buf); return 0; @@ -557,6 +595,10 @@ static void atmci_dma_complete(void *arg) dev_vdbg(&host->pdev->dev, "DMA complete\n"); + if (atmci_is_mci2()) + /* Disable DMA hardware handshaking on MCI */ + mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN); + atmci_dma_cleanup(host); /* @@ -592,7 +634,7 @@ static void atmci_dma_complete(void *arg) } static int -atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) +atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) { struct dma_chan *chan; struct dma_async_tx_descriptor *desc; @@ -624,6 +666,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) if (!chan) return -ENODEV; + if (atmci_is_mci2()) + mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN); + if (data->flags & MMC_DATA_READ) direction = DMA_FROM_DEVICE; else @@ -641,10 +686,6 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) host->dma.data_desc = desc; desc->callback = atmci_dma_complete; desc->callback_param = host; - desc->tx_submit(desc); - - /* Go! */ - chan->device->device_issue_pending(chan); return 0; unmap_exit: @@ -652,13 +693,26 @@ unmap_exit: return -ENOMEM; } +static void atmci_submit_data(struct atmel_mci *host) +{ + struct dma_chan *chan = host->data_chan; + struct dma_async_tx_descriptor *desc = host->dma.data_desc; + + if (chan) { + desc->tx_submit(desc); + chan->device->device_issue_pending(chan); + } +} + #else /* CONFIG_MMC_ATMELMCI_DMA */ -static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) +static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) { return -ENOSYS; } +static void atmci_submit_data(struct atmel_mci *host) {} + static void atmci_stop_dma(struct atmel_mci *host) { /* Data transfer was stopped by the interrupt handler */ @@ -672,7 +726,7 @@ static void atmci_stop_dma(struct atmel_mci *host) * Returns a mask of interrupt flags to be enabled after the whole * request has been prepared. */ -static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) +static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) { u32 iflags; @@ -683,7 +737,7 @@ static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) host->data = data; iflags = ATMCI_DATA_ERROR_FLAGS; - if (atmci_submit_data_dma(host, data)) { + if (atmci_prepare_data_dma(host, data)) { host->data_chan = NULL; /* @@ -729,6 +783,8 @@ static void atmci_start_request(struct atmel_mci *host, mci_writel(host, CR, MCI_CR_SWRST); mci_writel(host, CR, MCI_CR_MCIEN); mci_writel(host, MR, host->mode_reg); + if (atmci_is_mci2()) + mci_writel(host, CFG, host->cfg_reg); host->need_reset = false; } mci_writel(host, SDCR, slot->sdc_reg); @@ -744,6 +800,7 @@ static void atmci_start_request(struct atmel_mci *host, while (!(mci_readl(host, SR) & MCI_CMDRDY)) cpu_relax(); } + iflags = 0; data = mrq->data; if (data) { atmci_set_timeout(host, slot, data); @@ -753,15 +810,17 @@ static void atmci_start_request(struct atmel_mci *host, | MCI_BLKLEN(data->blksz)); dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); + + iflags |= atmci_prepare_data(host, data); } - iflags = MCI_CMDRDY; + iflags |= MCI_CMDRDY; cmd = mrq->cmd; cmdflags = atmci_prepare_command(slot->mmc, cmd); atmci_start_command(host, cmd, cmdflags); if (data) - iflags |= atmci_submit_data(host, data); + atmci_submit_data(host); if (mrq->stop) { host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); @@ -857,6 +916,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) clk_enable(host->mck); mci_writel(host, CR, MCI_CR_SWRST); mci_writel(host, CR, MCI_CR_MCIEN); + if (atmci_is_mci2()) + mci_writel(host, CFG, host->cfg_reg); } /* @@ -1095,6 +1156,8 @@ static void atmci_detect_change(unsigned long data) mci_writel(host, CR, MCI_CR_SWRST); mci_writel(host, CR, MCI_CR_MCIEN); mci_writel(host, MR, host->mode_reg); + if (atmci_is_mci2()) + mci_writel(host, CFG, host->cfg_reg); host->data = NULL; host->cmd = NULL; @@ -1584,14 +1647,47 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, #ifdef CONFIG_MMC_ATMELMCI_DMA static bool filter(struct dma_chan *chan, void *slave) { - struct dw_dma_slave *dws = slave; + struct mci_dma_data *sl = slave; - if (dws->dma_dev == chan->device->dev) { - chan->private = dws; + if (sl && find_slave_dev(sl) == chan->device->dev) { + chan->private = slave_data_ptr(sl); return true; - } else + } else { return false; + } } + +static void atmci_configure_dma(struct atmel_mci *host) +{ + struct mci_platform_data *pdata; + + if (host == NULL) + return; + + pdata = host->pdev->dev.platform_data; + + if (pdata && find_slave_dev(pdata->dma_slave)) { + dma_cap_mask_t mask; + + setup_dma_addr(pdata->dma_slave, + host->mapbase + MCI_TDR, + host->mapbase + MCI_RDR); + + /* Try to grab a DMA channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + host->dma.chan = + dma_request_channel(mask, filter, pdata->dma_slave); + } + if (!host->dma.chan) + dev_notice(&host->pdev->dev, "DMA not available, using PIO\n"); + else + dev_info(&host->pdev->dev, + "Using %s for DMA transfers\n", + dma_chan_name(host->dma.chan)); +} +#else +static void atmci_configure_dma(struct atmel_mci *host) {} #endif static int __init atmci_probe(struct platform_device *pdev) @@ -1645,22 +1741,7 @@ static int __init atmci_probe(struct platform_device *pdev) if (ret) goto err_request_irq; -#ifdef CONFIG_MMC_ATMELMCI_DMA - if (pdata->dma_slave.dma_dev) { - struct dw_dma_slave *dws = &pdata->dma_slave; - dma_cap_mask_t mask; - - dws->tx_reg = regs->start + MCI_TDR; - dws->rx_reg = regs->start + MCI_RDR; - - /* Try to grab a DMA channel */ - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - host->dma.chan = dma_request_channel(mask, filter, dws); - } - if (!host->dma.chan) - dev_notice(&pdev->dev, "DMA not available, using PIO\n"); -#endif /* CONFIG_MMC_ATMELMCI_DMA */ + atmci_configure_dma(host); platform_set_drvdata(pdev, host); diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c new file mode 100644 index 00000000000..3343a57355c --- /dev/null +++ b/drivers/mmc/host/bfin_sdh.c @@ -0,0 +1,639 @@ +/* + * bfin_sdh.c - Analog Devices Blackfin SDH Controller + * + * Copyright (C) 2007-2009 Analog Device Inc. + * + * Licensed under the GPL-2 or later. + */ + +#define DRIVER_NAME "bfin-sdh" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/mmc/host.h> +#include <linux/proc_fs.h> + +#include <asm/cacheflush.h> +#include <asm/dma.h> +#include <asm/portmux.h> +#include <asm/bfin_sdh.h> + +#if defined(CONFIG_BF51x) +#define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL +#define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL +#define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL +#define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL +#define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT +#define bfin_write_SDH_COMMAND bfin_write_RSI_COMMAND +#define bfin_write_SDH_DATA_TIMER bfin_write_RSI_DATA_TIMER +#define bfin_read_SDH_RESPONSE0 bfin_read_RSI_RESPONSE0 +#define bfin_read_SDH_RESPONSE1 bfin_read_RSI_RESPONSE1 +#define bfin_read_SDH_RESPONSE2 bfin_read_RSI_RESPONSE2 +#define bfin_read_SDH_RESPONSE3 bfin_read_RSI_RESPONSE3 +#define bfin_write_SDH_DATA_LGTH bfin_write_RSI_DATA_LGTH +#define bfin_read_SDH_DATA_CTL bfin_read_RSI_DATA_CTL +#define bfin_write_SDH_DATA_CTL bfin_write_RSI_DATA_CTL +#define bfin_read_SDH_DATA_CNT bfin_read_RSI_DATA_CNT +#define bfin_write_SDH_STATUS_CLR bfin_write_RSI_STATUS_CLR +#define bfin_read_SDH_E_STATUS bfin_read_RSI_E_STATUS +#define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS +#define bfin_read_SDH_STATUS bfin_read_RSI_STATUS +#define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0 +#define bfin_read_SDH_CFG bfin_read_RSI_CFG +#define bfin_write_SDH_CFG bfin_write_RSI_CFG +#endif + +struct dma_desc_array { + unsigned long start_addr; + unsigned short cfg; + unsigned short x_count; + short x_modify; +} __packed; + +struct sdh_host { + struct mmc_host *mmc; + spinlock_t lock; + struct resource *res; + void __iomem *base; + int irq; + int stat_irq; + int dma_ch; + int dma_dir; + struct dma_desc_array *sg_cpu; + dma_addr_t sg_dma; + int dma_len; + + unsigned int imask; + unsigned int power_mode; + unsigned int clk_div; + + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; +}; + +static struct bfin_sd_host *get_sdh_data(struct platform_device *pdev) +{ + return pdev->dev.platform_data; +} + +static void sdh_stop_clock(struct sdh_host *host) +{ + bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() & ~CLK_E); + SSYNC(); +} + +static void sdh_enable_stat_irq(struct sdh_host *host, unsigned int mask) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->imask |= mask; + bfin_write_SDH_MASK0(mask); + SSYNC(); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void sdh_disable_stat_irq(struct sdh_host *host, unsigned int mask) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->imask &= ~mask; + bfin_write_SDH_MASK0(host->imask); + SSYNC(); + spin_unlock_irqrestore(&host->lock, flags); +} + +static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) +{ + unsigned int length; + unsigned int data_ctl; + unsigned int dma_cfg; + struct scatterlist *sg; + + dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags); + host->data = data; + data_ctl = 0; + dma_cfg = 0; + + length = data->blksz * data->blocks; + bfin_write_SDH_DATA_LGTH(length); + + if (data->flags & MMC_DATA_STREAM) + data_ctl |= DTX_MODE; + + if (data->flags & MMC_DATA_READ) + data_ctl |= DTX_DIR; + /* Only supports power-of-2 block size */ + if (data->blksz & (data->blksz - 1)) + return -EINVAL; + data_ctl |= ((ffs(data->blksz) - 1) << 4); + + bfin_write_SDH_DATA_CTL(data_ctl); + + bfin_write_SDH_DATA_TIMER(0xFFFF); + SSYNC(); + + if (data->flags & MMC_DATA_READ) { + host->dma_dir = DMA_FROM_DEVICE; + dma_cfg |= WNR; + } else + host->dma_dir = DMA_TO_DEVICE; + + sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END)); + host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); +#if defined(CONFIG_BF54x) + dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN; + { + int i; + for_each_sg(data->sg, sg, host->dma_len, i) { + host->sg_cpu[i].start_addr = sg_dma_address(sg); + host->sg_cpu[i].cfg = dma_cfg; + host->sg_cpu[i].x_count = sg_dma_len(sg) / 4; + host->sg_cpu[i].x_modify = 4; + dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, " + "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n", + i, host->sg_cpu[i].start_addr, + host->sg_cpu[i].cfg, host->sg_cpu[i].x_count, + host->sg_cpu[i].x_modify); + } + } + flush_dcache_range((unsigned int)host->sg_cpu, + (unsigned int)host->sg_cpu + + host->dma_len * sizeof(struct dma_desc_array)); + /* Set the last descriptor to stop mode */ + host->sg_cpu[host->dma_len - 1].cfg &= ~(DMAFLOW | NDSIZE); + host->sg_cpu[host->dma_len - 1].cfg |= DI_EN; + + set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma); + set_dma_x_count(host->dma_ch, 0); + set_dma_x_modify(host->dma_ch, 0); + set_dma_config(host->dma_ch, dma_cfg); +#elif defined(CONFIG_BF51x) + /* RSI DMA doesn't work in array mode */ + dma_cfg |= WDSIZE_32 | DMAEN; + set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0])); + set_dma_x_count(host->dma_ch, length / 4); + set_dma_x_modify(host->dma_ch, 4); + set_dma_config(host->dma_ch, dma_cfg); +#endif + bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E); + + SSYNC(); + + dev_dbg(mmc_dev(host->mmc), "%s exit\n", __func__); + return 0; +} + +static void sdh_start_cmd(struct sdh_host *host, struct mmc_command *cmd) +{ + unsigned int sdh_cmd; + unsigned int stat_mask; + + dev_dbg(mmc_dev(host->mmc), "%s enter cmd: 0x%p\n", __func__, cmd); + WARN_ON(host->cmd != NULL); + host->cmd = cmd; + + sdh_cmd = 0; + stat_mask = 0; + + sdh_cmd |= cmd->opcode; + + if (cmd->flags & MMC_RSP_PRESENT) { + sdh_cmd |= CMD_RSP; + stat_mask |= CMD_RESP_END; + } else { + stat_mask |= CMD_SENT; + } + + if (cmd->flags & MMC_RSP_136) + sdh_cmd |= CMD_L_RSP; + + stat_mask |= CMD_CRC_FAIL | CMD_TIME_OUT; + + sdh_enable_stat_irq(host, stat_mask); + + bfin_write_SDH_ARGUMENT(cmd->arg); + bfin_write_SDH_COMMAND(sdh_cmd | CMD_E); + bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() | CLK_E); + SSYNC(); +} + +static void sdh_finish_request(struct sdh_host *host, struct mmc_request *mrq) +{ + dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); + host->mrq = NULL; + host->cmd = NULL; + host->data = NULL; + mmc_request_done(host->mmc, mrq); +} + +static int sdh_cmd_done(struct sdh_host *host, unsigned int stat) +{ + struct mmc_command *cmd = host->cmd; + int ret = 0; + + dev_dbg(mmc_dev(host->mmc), "%s enter cmd: %p\n", __func__, cmd); + if (!cmd) + return 0; + + host->cmd = NULL; + + if (cmd->flags & MMC_RSP_PRESENT) { + cmd->resp[0] = bfin_read_SDH_RESPONSE0(); + if (cmd->flags & MMC_RSP_136) { + cmd->resp[1] = bfin_read_SDH_RESPONSE1(); + cmd->resp[2] = bfin_read_SDH_RESPONSE2(); + cmd->resp[3] = bfin_read_SDH_RESPONSE3(); + } + } + if (stat & CMD_TIME_OUT) + cmd->error = -ETIMEDOUT; + else if (stat & CMD_CRC_FAIL && cmd->flags & MMC_RSP_CRC) + cmd->error = -EILSEQ; + + sdh_disable_stat_irq(host, (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)); + + if (host->data && !cmd->error) { + if (host->data->flags & MMC_DATA_WRITE) { + ret = sdh_setup_data(host, host->data); + if (ret) + return 0; + } + + sdh_enable_stat_irq(host, DAT_END | RX_OVERRUN | TX_UNDERRUN | DAT_TIME_OUT); + } else + sdh_finish_request(host, host->mrq); + + return 1; +} + +static int sdh_data_done(struct sdh_host *host, unsigned int stat) +{ + struct mmc_data *data = host->data; + + dev_dbg(mmc_dev(host->mmc), "%s enter stat: 0x%x\n", __func__, stat); + if (!data) + return 0; + + disable_dma(host->dma_ch); + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + host->dma_dir); + + if (stat & DAT_TIME_OUT) + data->error = -ETIMEDOUT; + else if (stat & DAT_CRC_FAIL) + data->error = -EILSEQ; + else if (stat & (RX_OVERRUN | TX_UNDERRUN)) + data->error = -EIO; + + if (!data->error) + data->bytes_xfered = data->blocks * data->blksz; + else + data->bytes_xfered = 0; + + sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN); + bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \ + DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN); + bfin_write_SDH_DATA_CTL(0); + SSYNC(); + + host->data = NULL; + if (host->mrq->stop) { + sdh_stop_clock(host); + sdh_start_cmd(host, host->mrq->stop); + } else { + sdh_finish_request(host, host->mrq); + } + + return 1; +} + +static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sdh_host *host = mmc_priv(mmc); + int ret = 0; + + dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd); + WARN_ON(host->mrq != NULL); + + host->mrq = mrq; + host->data = mrq->data; + + if (mrq->data && mrq->data->flags & MMC_DATA_READ) { + ret = sdh_setup_data(host, mrq->data); + if (ret) + return; + } + + sdh_start_cmd(host, mrq->cmd); +} + +static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdh_host *host; + unsigned long flags; + u16 clk_ctl = 0; + u16 pwr_ctl = 0; + u16 cfg; + host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + if (ios->clock) { + unsigned long sys_clk, ios_clk; + unsigned char clk_div; + ios_clk = 2 * ios->clock; + sys_clk = get_sclk(); + clk_div = sys_clk / ios_clk; + if (sys_clk % ios_clk == 0) + clk_div -= 1; + clk_div = min_t(unsigned char, clk_div, 0xFF); + clk_ctl |= clk_div; + clk_ctl |= CLK_E; + host->clk_div = clk_div; + } else + sdh_stop_clock(host); + + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) +#ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND + pwr_ctl |= ROD_CTL; +#else + pwr_ctl |= SD_CMD_OD | ROD_CTL; +#endif + + if (ios->bus_width == MMC_BUS_WIDTH_4) { + cfg = bfin_read_SDH_CFG(); + cfg &= ~PD_SDDAT3; + cfg |= PUP_SDDAT3; + /* Enable 4 bit SDIO */ + cfg |= (SD4E | MWE); + bfin_write_SDH_CFG(cfg); + clk_ctl |= WIDE_BUS; + } else { + cfg = bfin_read_SDH_CFG(); + cfg |= MWE; + bfin_write_SDH_CFG(cfg); + } + + bfin_write_SDH_CLK_CTL(clk_ctl); + + host->power_mode = ios->power_mode; + if (ios->power_mode == MMC_POWER_ON) + pwr_ctl |= PWR_ON; + + bfin_write_SDH_PWR_CTL(pwr_ctl); + SSYNC(); + + spin_unlock_irqrestore(&host->lock, flags); + + dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n", + host->clk_div, + host->clk_div ? get_sclk() / (2 * (host->clk_div + 1)) : 0, + ios->clock); +} + +static const struct mmc_host_ops sdh_ops = { + .request = sdh_request, + .set_ios = sdh_set_ios, +}; + +static irqreturn_t sdh_dma_irq(int irq, void *devid) +{ + struct sdh_host *host = devid; + + dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__, + get_dma_curr_irqstat(host->dma_ch)); + clear_dma_irqstat(host->dma_ch); + SSYNC(); + + return IRQ_HANDLED; +} + +static irqreturn_t sdh_stat_irq(int irq, void *devid) +{ + struct sdh_host *host = devid; + unsigned int status; + int handled = 0; + + dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); + status = bfin_read_SDH_E_STATUS(); + if (status & SD_CARD_DET) { + mmc_detect_change(host->mmc, 0); + bfin_write_SDH_E_STATUS(SD_CARD_DET); + } + status = bfin_read_SDH_STATUS(); + if (status & (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)) { + handled |= sdh_cmd_done(host, status); + bfin_write_SDH_STATUS_CLR(CMD_SENT_STAT | CMD_RESP_END_STAT | \ + CMD_TIMEOUT_STAT | CMD_CRC_FAIL_STAT); + SSYNC(); + } + + status = bfin_read_SDH_STATUS(); + if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN)) + handled |= sdh_data_done(host, status); + + dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__); + + return IRQ_RETVAL(handled); +} + +static int __devinit sdh_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct sdh_host *host; + struct bfin_sd_host *drv_data = get_sdh_data(pdev); + int ret; + + if (!drv_data) { + dev_err(&pdev->dev, "missing platform driver data\n"); + ret = -EINVAL; + goto out; + } + + mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto out; + } + + mmc->ops = &sdh_ops; + mmc->max_phys_segs = 32; + mmc->max_seg_size = 1 << 16; + mmc->max_blk_size = 1 << 11; + mmc->max_blk_count = 1 << 11; + mmc->max_req_size = PAGE_SIZE; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->f_max = get_sclk(); + mmc->f_min = mmc->f_max >> 9; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL; + host = mmc_priv(mmc); + host->mmc = mmc; + + spin_lock_init(&host->lock); + host->irq = drv_data->irq_int0; + host->dma_ch = drv_data->dma_chan; + + ret = request_dma(host->dma_ch, DRIVER_NAME "DMA"); + if (ret) { + dev_err(&pdev->dev, "unable to request DMA channel\n"); + goto out1; + } + + ret = set_dma_callback(host->dma_ch, sdh_dma_irq, host); + if (ret) { + dev_err(&pdev->dev, "unable to request DMA irq\n"); + goto out2; + } + + host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); + if (host->sg_cpu == NULL) { + ret = -ENOMEM; + goto out2; + } + + platform_set_drvdata(pdev, mmc); + mmc_add_host(mmc); + + ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host); + if (ret) { + dev_err(&pdev->dev, "unable to request status irq\n"); + goto out3; + } + + ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "unable to request peripheral pins\n"); + goto out4; + } +#if defined(CONFIG_BF54x) + /* Secure Digital Host shares DMA with Nand controller */ + bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); +#endif + + bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); + SSYNC(); + + /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and + * mmc stack will do the detection. + */ + bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); + SSYNC(); + + return 0; + +out4: + free_irq(host->irq, host); +out3: + mmc_remove_host(mmc); + dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); +out2: + free_dma(host->dma_ch); +out1: + mmc_free_host(mmc); + out: + return ret; +} + +static int __devexit sdh_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + if (mmc) { + struct sdh_host *host = mmc_priv(mmc); + + mmc_remove_host(mmc); + + sdh_stop_clock(host); + free_irq(host->irq, host); + free_dma(host->dma_ch); + dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); + + mmc_free_host(mmc); + } + + return 0; +} + +#ifdef CONFIG_PM +static int sdh_suspend(struct platform_device *dev, pm_message_t state) +{ + struct mmc_host *mmc = platform_get_drvdata(dev); + struct bfin_sd_host *drv_data = get_sdh_data(dev); + int ret = 0; + + if (mmc) + ret = mmc_suspend_host(mmc, state); + + bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); + peripheral_free_list(drv_data->pin_req); + + return ret; +} + +static int sdh_resume(struct platform_device *dev) +{ + struct mmc_host *mmc = platform_get_drvdata(dev); + struct bfin_sd_host *drv_data = get_sdh_data(dev); + int ret = 0; + + ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME); + if (ret) { + dev_err(&dev->dev, "unable to request peripheral pins\n"); + return ret; + } + + bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON); +#if defined(CONFIG_BF54x) + /* Secure Digital Host shares DMA with Nand controller */ + bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); +#endif + bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); + SSYNC(); + + bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); + SSYNC(); + + if (mmc) + ret = mmc_resume_host(mmc); + + return ret; +} +#else +# define sdh_suspend NULL +# define sdh_resume NULL +#endif + +static struct platform_driver sdh_driver = { + .probe = sdh_probe, + .remove = __devexit_p(sdh_remove), + .suspend = sdh_suspend, + .resume = sdh_resume, + .driver = { + .name = DRIVER_NAME, + }, +}; + +static int __init sdh_init(void) +{ + return platform_driver_register(&sdh_driver); +} +module_init(sdh_init); + +static void __exit sdh_exit(void) +{ + platform_driver_unregister(&sdh_driver); +} +module_exit(sdh_exit); + +MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver"); +MODULE_AUTHOR("Cliff Cai, Roy Huang"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c new file mode 100644 index 00000000000..dd45e7c3517 --- /dev/null +++ b/drivers/mmc/host/davinci_mmc.c @@ -0,0 +1,1349 @@ +/* + * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver + * + * Copyright (C) 2006 Texas Instruments. + * Original author: Purushotam Kumar + * Copyright (C) 2009 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/cpufreq.h> +#include <linux/mmc/host.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/mmc/mmc.h> + +#include <mach/mmc.h> +#include <mach/edma.h> + +/* + * Register Definitions + */ +#define DAVINCI_MMCCTL 0x00 /* Control Register */ +#define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ +#define DAVINCI_MMCST0 0x08 /* Status Register 0 */ +#define DAVINCI_MMCST1 0x0C /* Status Register 1 */ +#define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ +#define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ +#define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ +#define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ +#define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ +#define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ +#define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ +#define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ +#define DAVINCI_MMCCMD 0x30 /* Command Register */ +#define DAVINCI_MMCARGHL 0x34 /* Argument Register */ +#define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ +#define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ +#define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ +#define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ +#define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ +#define DAVINCI_MMCETOK 0x4C +#define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ +#define DAVINCI_MMCCKC 0x54 +#define DAVINCI_MMCTORC 0x58 +#define DAVINCI_MMCTODC 0x5C +#define DAVINCI_MMCBLNC 0x60 +#define DAVINCI_SDIOCTL 0x64 +#define DAVINCI_SDIOST0 0x68 +#define DAVINCI_SDIOEN 0x6C +#define DAVINCI_SDIOST 0x70 +#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ + +/* DAVINCI_MMCCTL definitions */ +#define MMCCTL_DATRST (1 << 0) +#define MMCCTL_CMDRST (1 << 1) +#define MMCCTL_WIDTH_4_BIT (1 << 2) +#define MMCCTL_DATEG_DISABLED (0 << 6) +#define MMCCTL_DATEG_RISING (1 << 6) +#define MMCCTL_DATEG_FALLING (2 << 6) +#define MMCCTL_DATEG_BOTH (3 << 6) +#define MMCCTL_PERMDR_LE (0 << 9) +#define MMCCTL_PERMDR_BE (1 << 9) +#define MMCCTL_PERMDX_LE (0 << 10) +#define MMCCTL_PERMDX_BE (1 << 10) + +/* DAVINCI_MMCCLK definitions */ +#define MMCCLK_CLKEN (1 << 8) +#define MMCCLK_CLKRT_MASK (0xFF << 0) + +/* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ +#define MMCST0_DATDNE BIT(0) /* data done */ +#define MMCST0_BSYDNE BIT(1) /* busy done */ +#define MMCST0_RSPDNE BIT(2) /* command done */ +#define MMCST0_TOUTRD BIT(3) /* data read timeout */ +#define MMCST0_TOUTRS BIT(4) /* command response timeout */ +#define MMCST0_CRCWR BIT(5) /* data write CRC error */ +#define MMCST0_CRCRD BIT(6) /* data read CRC error */ +#define MMCST0_CRCRS BIT(7) /* command response CRC error */ +#define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ +#define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ +#define MMCST0_DATED BIT(11) /* DAT3 edge detect */ +#define MMCST0_TRNDNE BIT(12) /* transfer done */ + +/* DAVINCI_MMCST1 definitions */ +#define MMCST1_BUSY (1 << 0) + +/* DAVINCI_MMCCMD definitions */ +#define MMCCMD_CMD_MASK (0x3F << 0) +#define MMCCMD_PPLEN (1 << 7) +#define MMCCMD_BSYEXP (1 << 8) +#define MMCCMD_RSPFMT_MASK (3 << 9) +#define MMCCMD_RSPFMT_NONE (0 << 9) +#define MMCCMD_RSPFMT_R1456 (1 << 9) +#define MMCCMD_RSPFMT_R2 (2 << 9) +#define MMCCMD_RSPFMT_R3 (3 << 9) +#define MMCCMD_DTRW (1 << 11) +#define MMCCMD_STRMTP (1 << 12) +#define MMCCMD_WDATX (1 << 13) +#define MMCCMD_INITCK (1 << 14) +#define MMCCMD_DCLR (1 << 15) +#define MMCCMD_DMATRIG (1 << 16) + +/* DAVINCI_MMCFIFOCTL definitions */ +#define MMCFIFOCTL_FIFORST (1 << 0) +#define MMCFIFOCTL_FIFODIR_WR (1 << 1) +#define MMCFIFOCTL_FIFODIR_RD (0 << 1) +#define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ +#define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ +#define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ +#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ +#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ + + +/* MMCSD Init clock in Hz in opendrain mode */ +#define MMCSD_INIT_CLOCK 200000 + +/* + * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, + * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only + * for drivers with max_hw_segs == 1, making the segments bigger (64KB) + * than the page or two that's otherwise typical. NR_SG == 16 gives at + * least the same throughput boost, using EDMA transfer linkage instead + * of spending CPU time copying pages. + */ +#define MAX_CCNT ((1 << 16) - 1) + +#define NR_SG 16 + +static unsigned rw_threshold = 32; +module_param(rw_threshold, uint, S_IRUGO); +MODULE_PARM_DESC(rw_threshold, + "Read/Write threshold. Default = 32"); + +static unsigned __initdata use_dma = 1; +module_param(use_dma, uint, 0); +MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); + +struct mmc_davinci_host { + struct mmc_command *cmd; + struct mmc_data *data; + struct mmc_host *mmc; + struct clk *clk; + unsigned int mmc_input_clk; + void __iomem *base; + struct resource *mem_res; + int irq; + unsigned char bus_mode; + +#define DAVINCI_MMC_DATADIR_NONE 0 +#define DAVINCI_MMC_DATADIR_READ 1 +#define DAVINCI_MMC_DATADIR_WRITE 2 + unsigned char data_dir; + + /* buffer is used during PIO of one scatterlist segment, and + * is updated along with buffer_bytes_left. bytes_left applies + * to all N blocks of the PIO transfer. + */ + u8 *buffer; + u32 buffer_bytes_left; + u32 bytes_left; + + u32 rxdma, txdma; + bool use_dma; + bool do_dma; + + /* Scatterlist DMA uses one or more parameter RAM entries: + * the main one (associated with rxdma or txdma) plus zero or + * more links. The entries for a given transfer differ only + * by memory buffer (address, length) and link field. + */ + struct edmacc_param tx_template; + struct edmacc_param rx_template; + unsigned n_link; + u32 links[NR_SG - 1]; + + /* For PIO we walk scatterlists one segment at a time. */ + unsigned int sg_len; + struct scatterlist *sg; + + /* Version of the MMC/SD controller */ + u8 version; + /* for ns in one cycle calculation */ + unsigned ns_in_one_cycle; +#ifdef CONFIG_CPU_FREQ + struct notifier_block freq_transition; +#endif +}; + + +/* PIO only */ +static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) +{ + host->buffer_bytes_left = sg_dma_len(host->sg); + host->buffer = sg_virt(host->sg); + if (host->buffer_bytes_left > host->bytes_left) + host->buffer_bytes_left = host->bytes_left; +} + +static void davinci_fifo_data_trans(struct mmc_davinci_host *host, + unsigned int n) +{ + u8 *p; + unsigned int i; + + if (host->buffer_bytes_left == 0) { + host->sg = sg_next(host->data->sg); + mmc_davinci_sg_to_buf(host); + } + + p = host->buffer; + if (n > host->buffer_bytes_left) + n = host->buffer_bytes_left; + host->buffer_bytes_left -= n; + host->bytes_left -= n; + + /* NOTE: we never transfer more than rw_threshold bytes + * to/from the fifo here; there's no I/O overlap. + * This also assumes that access width( i.e. ACCWD) is 4 bytes + */ + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { + for (i = 0; i < (n >> 2); i++) { + writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); + p = p + 4; + } + if (n & 3) { + iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); + p = p + (n & 3); + } + } else { + for (i = 0; i < (n >> 2); i++) { + *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); + p = p + 4; + } + if (n & 3) { + ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); + p = p + (n & 3); + } + } + host->buffer = p; +} + +static void mmc_davinci_start_command(struct mmc_davinci_host *host, + struct mmc_command *cmd) +{ + u32 cmd_reg = 0; + u32 im_val; + + dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", + cmd->opcode, cmd->arg, + ({ char *s; + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + s = ", R1/R5/R6/R7 response"; + break; + case MMC_RSP_R1B: + s = ", R1b response"; + break; + case MMC_RSP_R2: + s = ", R2 response"; + break; + case MMC_RSP_R3: + s = ", R3/R4 response"; + break; + default: + s = ", (R? response)"; + break; + }; s; })); + host->cmd = cmd; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1B: + /* There's some spec confusion about when R1B is + * allowed, but if the card doesn't issue a BUSY + * then it's harmless for us to allow it. + */ + cmd_reg |= MMCCMD_BSYEXP; + /* FALLTHROUGH */ + case MMC_RSP_R1: /* 48 bits, CRC */ + cmd_reg |= MMCCMD_RSPFMT_R1456; + break; + case MMC_RSP_R2: /* 136 bits, CRC */ + cmd_reg |= MMCCMD_RSPFMT_R2; + break; + case MMC_RSP_R3: /* 48 bits, no CRC */ + cmd_reg |= MMCCMD_RSPFMT_R3; + break; + default: + cmd_reg |= MMCCMD_RSPFMT_NONE; + dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", + mmc_resp_type(cmd)); + break; + } + + /* Set command index */ + cmd_reg |= cmd->opcode; + + /* Enable EDMA transfer triggers */ + if (host->do_dma) + cmd_reg |= MMCCMD_DMATRIG; + + if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && + host->data_dir == DAVINCI_MMC_DATADIR_READ) + cmd_reg |= MMCCMD_DMATRIG; + + /* Setting whether command involves data transfer or not */ + if (cmd->data) + cmd_reg |= MMCCMD_WDATX; + + /* Setting whether stream or block transfer */ + if (cmd->flags & MMC_DATA_STREAM) + cmd_reg |= MMCCMD_STRMTP; + + /* Setting whether data read or write */ + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) + cmd_reg |= MMCCMD_DTRW; + + if (host->bus_mode == MMC_BUSMODE_PUSHPULL) + cmd_reg |= MMCCMD_PPLEN; + + /* set Command timeout */ + writel(0x1FFF, host->base + DAVINCI_MMCTOR); + + /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ + im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { + im_val |= MMCST0_DATDNE | MMCST0_CRCWR; + + if (!host->do_dma) + im_val |= MMCST0_DXRDY; + } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { + im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; + + if (!host->do_dma) + im_val |= MMCST0_DRRDY; + } + + /* + * Before non-DMA WRITE commands the controller needs priming: + * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size + */ + if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) + davinci_fifo_data_trans(host, rw_threshold); + + writel(cmd->arg, host->base + DAVINCI_MMCARGHL); + writel(cmd_reg, host->base + DAVINCI_MMCCMD); + writel(im_val, host->base + DAVINCI_MMCIM); +} + +/*----------------------------------------------------------------------*/ + +/* DMA infrastructure */ + +static void davinci_abort_dma(struct mmc_davinci_host *host) +{ + int sync_dev; + + if (host->data_dir == DAVINCI_MMC_DATADIR_READ) + sync_dev = host->rxdma; + else + sync_dev = host->txdma; + + edma_stop(sync_dev); + edma_clean_channel(sync_dev); +} + +static void +mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data); + +static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data) +{ + if (DMA_COMPLETE != ch_status) { + struct mmc_davinci_host *host = data; + + /* Currently means: DMA Event Missed, or "null" transfer + * request was seen. In the future, TC errors (like bad + * addresses) might be presented too. + */ + dev_warn(mmc_dev(host->mmc), "DMA %s error\n", + (host->data->flags & MMC_DATA_WRITE) + ? "write" : "read"); + host->data->error = -EIO; + mmc_davinci_xfer_done(host, host->data); + } +} + +/* Set up tx or rx template, to be modified and updated later */ +static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host, + bool tx, struct edmacc_param *template) +{ + unsigned sync_dev; + const u16 acnt = 4; + const u16 bcnt = rw_threshold >> 2; + const u16 ccnt = 0; + u32 src_port = 0; + u32 dst_port = 0; + s16 src_bidx, dst_bidx; + s16 src_cidx, dst_cidx; + + /* + * A-B Sync transfer: each DMA request is for one "frame" of + * rw_threshold bytes, broken into "acnt"-size chunks repeated + * "bcnt" times. Each segment needs "ccnt" such frames; since + * we tell the block layer our mmc->max_seg_size limit, we can + * trust (later) that it's within bounds. + * + * The FIFOs are read/written in 4-byte chunks (acnt == 4) and + * EDMA will optimize memory operations to use larger bursts. + */ + if (tx) { + sync_dev = host->txdma; + + /* src_prt, ccnt, and link to be set up later */ + src_bidx = acnt; + src_cidx = acnt * bcnt; + + dst_port = host->mem_res->start + DAVINCI_MMCDXR; + dst_bidx = 0; + dst_cidx = 0; + } else { + sync_dev = host->rxdma; + + src_port = host->mem_res->start + DAVINCI_MMCDRR; + src_bidx = 0; + src_cidx = 0; + + /* dst_prt, ccnt, and link to be set up later */ + dst_bidx = acnt; + dst_cidx = acnt * bcnt; + } + + /* + * We can't use FIFO mode for the FIFOs because MMC FIFO addresses + * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT + * parameter is ignored. + */ + edma_set_src(sync_dev, src_port, INCR, W8BIT); + edma_set_dest(sync_dev, dst_port, INCR, W8BIT); + + edma_set_src_index(sync_dev, src_bidx, src_cidx); + edma_set_dest_index(sync_dev, dst_bidx, dst_cidx); + + edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC); + + edma_read_slot(sync_dev, template); + + /* don't bother with irqs or chaining */ + template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12; +} + +static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, + struct mmc_data *data) +{ + struct edmacc_param *template; + int channel, slot; + unsigned link; + struct scatterlist *sg; + unsigned sg_len; + unsigned bytes_left = host->bytes_left; + const unsigned shift = ffs(rw_threshold) - 1;; + + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { + template = &host->tx_template; + channel = host->txdma; + } else { + template = &host->rx_template; + channel = host->rxdma; + } + + /* We know sg_len and ccnt will never be out of range because + * we told the mmc layer which in turn tells the block layer + * to ensure that it only hands us one scatterlist segment + * per EDMA PARAM entry. Update the PARAM + * entries needed for each segment of this scatterlist. + */ + for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len; + sg_len-- != 0 && bytes_left; + sg = sg_next(sg), slot = host->links[link++]) { + u32 buf = sg_dma_address(sg); + unsigned count = sg_dma_len(sg); + + template->link_bcntrld = sg_len + ? (EDMA_CHAN_SLOT(host->links[link]) << 5) + : 0xffff; + + if (count > bytes_left) + count = bytes_left; + bytes_left -= count; + + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) + template->src = buf; + else + template->dst = buf; + template->ccnt = count >> shift; + + edma_write_slot(slot, template); + } + + if (host->version == MMC_CTLR_VERSION_2) + edma_clear_event(channel); + + edma_start(channel); +} + +static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, + struct mmc_data *data) +{ + int i; + int mask = rw_threshold - 1; + + host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + ((data->flags & MMC_DATA_WRITE) + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE)); + + /* no individual DMA segment should need a partial FIFO */ + for (i = 0; i < host->sg_len; i++) { + if (sg_dma_len(data->sg + i) & mask) { + dma_unmap_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, + (data->flags & MMC_DATA_WRITE) + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + return -1; + } + } + + host->do_dma = 1; + mmc_davinci_send_dma_request(host, data); + + return 0; +} + +static void __init_or_module +davinci_release_dma_channels(struct mmc_davinci_host *host) +{ + unsigned i; + + if (!host->use_dma) + return; + + for (i = 0; i < host->n_link; i++) + edma_free_slot(host->links[i]); + + edma_free_channel(host->txdma); + edma_free_channel(host->rxdma); +} + +static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) +{ + int r, i; + + /* Acquire master DMA write channel */ + r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host, + EVENTQ_DEFAULT); + if (r < 0) { + dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", + "tx", r); + return r; + } + mmc_davinci_dma_setup(host, true, &host->tx_template); + + /* Acquire master DMA read channel */ + r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host, + EVENTQ_DEFAULT); + if (r < 0) { + dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", + "rx", r); + goto free_master_write; + } + mmc_davinci_dma_setup(host, false, &host->rx_template); + + /* Allocate parameter RAM slots, which will later be bound to a + * channel as needed to handle a scatterlist. + */ + for (i = 0; i < ARRAY_SIZE(host->links); i++) { + r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); + if (r < 0) { + dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", + r); + break; + } + host->links[i] = r; + } + host->n_link = i; + + return 0; + +free_master_write: + edma_free_channel(host->txdma); + + return r; +} + +/*----------------------------------------------------------------------*/ + +static void +mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) +{ + int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; + int timeout; + struct mmc_data *data = req->data; + + if (host->version == MMC_CTLR_VERSION_2) + fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; + + host->data = data; + if (data == NULL) { + host->data_dir = DAVINCI_MMC_DATADIR_NONE; + writel(0, host->base + DAVINCI_MMCBLEN); + writel(0, host->base + DAVINCI_MMCNBLK); + return; + } + + dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n", + (data->flags & MMC_DATA_STREAM) ? "stream" : "block", + (data->flags & MMC_DATA_WRITE) ? "write" : "read", + data->blocks, data->blksz); + dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", + data->timeout_clks, data->timeout_ns); + timeout = data->timeout_clks + + (data->timeout_ns / host->ns_in_one_cycle); + if (timeout > 0xffff) + timeout = 0xffff; + + writel(timeout, host->base + DAVINCI_MMCTOD); + writel(data->blocks, host->base + DAVINCI_MMCNBLK); + writel(data->blksz, host->base + DAVINCI_MMCBLEN); + + /* Configure the FIFO */ + switch (data->flags & MMC_DATA_WRITE) { + case MMC_DATA_WRITE: + host->data_dir = DAVINCI_MMC_DATADIR_WRITE; + writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, + host->base + DAVINCI_MMCFIFOCTL); + writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, + host->base + DAVINCI_MMCFIFOCTL); + break; + + default: + host->data_dir = DAVINCI_MMC_DATADIR_READ; + writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, + host->base + DAVINCI_MMCFIFOCTL); + writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, + host->base + DAVINCI_MMCFIFOCTL); + break; + } + + host->buffer = NULL; + host->bytes_left = data->blocks * data->blksz; + + /* For now we try to use DMA whenever we won't need partial FIFO + * reads or writes, either for the whole transfer (as tested here) + * or for any individual scatterlist segment (tested when we call + * start_dma_transfer). + * + * While we *could* change that, unusual block sizes are rarely + * used. The occasional fallback to PIO should't hurt. + */ + if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 + && mmc_davinci_start_dma_transfer(host, data) == 0) { + /* zero this to ensure we take no PIO paths */ + host->bytes_left = 0; + } else { + /* Revert to CPU Copy */ + host->sg_len = data->sg_len; + host->sg = host->data->sg; + mmc_davinci_sg_to_buf(host); + } +} + +static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct mmc_davinci_host *host = mmc_priv(mmc); + unsigned long timeout = jiffies + msecs_to_jiffies(900); + u32 mmcst1 = 0; + + /* Card may still be sending BUSY after a previous operation, + * typically some kind of write. If so, we can't proceed yet. + */ + while (time_before(jiffies, timeout)) { + mmcst1 = readl(host->base + DAVINCI_MMCST1); + if (!(mmcst1 & MMCST1_BUSY)) + break; + cpu_relax(); + } + if (mmcst1 & MMCST1_BUSY) { + dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); + req->cmd->error = -ETIMEDOUT; + mmc_request_done(mmc, req); + return; + } + + host->do_dma = 0; + mmc_davinci_prepare_data(host, req); + mmc_davinci_start_command(host, req->cmd); +} + +static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, + unsigned int mmc_req_freq) +{ + unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; + + mmc_pclk = host->mmc_input_clk; + if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) + mmc_push_pull_divisor = ((unsigned int)mmc_pclk + / (2 * mmc_req_freq)) - 1; + else + mmc_push_pull_divisor = 0; + + mmc_freq = (unsigned int)mmc_pclk + / (2 * (mmc_push_pull_divisor + 1)); + + if (mmc_freq > mmc_req_freq) + mmc_push_pull_divisor = mmc_push_pull_divisor + 1; + /* Convert ns to clock cycles */ + if (mmc_req_freq <= 400000) + host->ns_in_one_cycle = (1000000) / (((mmc_pclk + / (2 * (mmc_push_pull_divisor + 1)))/1000)); + else + host->ns_in_one_cycle = (1000000) / (((mmc_pclk + / (2 * (mmc_push_pull_divisor + 1)))/1000000)); + + return mmc_push_pull_divisor; +} + +static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) +{ + unsigned int open_drain_freq = 0, mmc_pclk = 0; + unsigned int mmc_push_pull_freq = 0; + struct mmc_davinci_host *host = mmc_priv(mmc); + + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { + u32 temp; + + /* Ignoring the init clock value passed for fixing the inter + * operability with different cards. + */ + open_drain_freq = ((unsigned int)mmc_pclk + / (2 * MMCSD_INIT_CLOCK)) - 1; + + if (open_drain_freq > 0xFF) + open_drain_freq = 0xFF; + + temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; + temp |= open_drain_freq; + writel(temp, host->base + DAVINCI_MMCCLK); + + /* Convert ns to clock cycles */ + host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); + } else { + u32 temp; + mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); + + if (mmc_push_pull_freq > 0xFF) + mmc_push_pull_freq = 0xFF; + + temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; + writel(temp, host->base + DAVINCI_MMCCLK); + + udelay(10); + + temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; + temp |= mmc_push_pull_freq; + writel(temp, host->base + DAVINCI_MMCCLK); + + writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); + + udelay(10); + } +} + +static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + unsigned int mmc_pclk = 0; + struct mmc_davinci_host *host = mmc_priv(mmc); + + mmc_pclk = host->mmc_input_clk; + dev_dbg(mmc_dev(host->mmc), + "clock %dHz busmode %d powermode %d Vdd %04x\n", + ios->clock, ios->bus_mode, ios->power_mode, + ios->vdd); + if (ios->bus_width == MMC_BUS_WIDTH_4) { + dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); + writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_WIDTH_4_BIT, + host->base + DAVINCI_MMCCTL); + } else { + dev_dbg(mmc_dev(host->mmc), "Disabling 4 bit mode\n"); + writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_4_BIT, + host->base + DAVINCI_MMCCTL); + } + + calculate_clk_divider(mmc, ios); + + host->bus_mode = ios->bus_mode; + if (ios->power_mode == MMC_POWER_UP) { + unsigned long timeout = jiffies + msecs_to_jiffies(50); + bool lose = true; + + /* Send clock cycles, poll completion */ + writel(0, host->base + DAVINCI_MMCARGHL); + writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); + while (time_before(jiffies, timeout)) { + u32 tmp = readl(host->base + DAVINCI_MMCST0); + + if (tmp & MMCST0_RSPDNE) { + lose = false; + break; + } + cpu_relax(); + } + if (lose) + dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); + } + + /* FIXME on power OFF, reset things ... */ +} + +static void +mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) +{ + host->data = NULL; + + if (host->do_dma) { + davinci_abort_dma(host); + + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + (data->flags & MMC_DATA_WRITE) + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + host->do_dma = false; + } + host->data_dir = DAVINCI_MMC_DATADIR_NONE; + + if (!data->stop || (host->cmd && host->cmd->error)) { + mmc_request_done(host->mmc, data->mrq); + writel(0, host->base + DAVINCI_MMCIM); + } else + mmc_davinci_start_command(host, data->stop); +} + +static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, + struct mmc_command *cmd) +{ + host->cmd = NULL; + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + /* response type 2 */ + cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); + cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); + cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); + cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); + } else { + /* response types 1, 1b, 3, 4, 5, 6 */ + cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); + } + } + + if (host->data == NULL || cmd->error) { + if (cmd->error == -ETIMEDOUT) + cmd->mrq->cmd->retries = 0; + mmc_request_done(host->mmc, cmd->mrq); + writel(0, host->base + DAVINCI_MMCIM); + } +} + +static void +davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) +{ + u32 temp; + + /* reset command and data state machines */ + temp = readl(host->base + DAVINCI_MMCCTL); + writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST, + host->base + DAVINCI_MMCCTL); + + temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); + udelay(10); + writel(temp, host->base + DAVINCI_MMCCTL); +} + +static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) +{ + struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; + unsigned int status, qstatus; + int end_command = 0; + int end_transfer = 0; + struct mmc_data *data = host->data; + + if (host->cmd == NULL && host->data == NULL) { + status = readl(host->base + DAVINCI_MMCST0); + dev_dbg(mmc_dev(host->mmc), + "Spurious interrupt 0x%04x\n", status); + /* Disable the interrupt from mmcsd */ + writel(0, host->base + DAVINCI_MMCIM); + return IRQ_NONE; + } + + status = readl(host->base + DAVINCI_MMCST0); + qstatus = status; + + /* handle FIFO first when using PIO for data. + * bytes_left will decrease to zero as I/O progress and status will + * read zero over iteration because this controller status + * register(MMCST0) reports any status only once and it is cleared + * by read. So, it is not unbouned loop even in the case of + * non-dma. + */ + while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { + davinci_fifo_data_trans(host, rw_threshold); + status = readl(host->base + DAVINCI_MMCST0); + if (!status) + break; + qstatus |= status; + } + + if (qstatus & MMCST0_DATDNE) { + /* All blocks sent/received, and CRC checks passed */ + if (data != NULL) { + if ((host->do_dma == 0) && (host->bytes_left > 0)) { + /* if datasize < rw_threshold + * no RX ints are generated + */ + davinci_fifo_data_trans(host, host->bytes_left); + } + end_transfer = 1; + data->bytes_xfered = data->blocks * data->blksz; + } else { + dev_err(mmc_dev(host->mmc), + "DATDNE with no host->data\n"); + } + } + + if (qstatus & MMCST0_TOUTRD) { + /* Read data timeout */ + data->error = -ETIMEDOUT; + end_transfer = 1; + + dev_dbg(mmc_dev(host->mmc), + "read data timeout, status %x\n", + qstatus); + + davinci_abort_data(host, data); + } + + if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { + /* Data CRC error */ + data->error = -EILSEQ; + end_transfer = 1; + + /* NOTE: this controller uses CRCWR to report both CRC + * errors and timeouts (on writes). MMCDRSP values are + * only weakly documented, but 0x9f was clearly a timeout + * case and the two three-bit patterns in various SD specs + * (101, 010) aren't part of it ... + */ + if (qstatus & MMCST0_CRCWR) { + u32 temp = readb(host->base + DAVINCI_MMCDRSP); + + if (temp == 0x9f) + data->error = -ETIMEDOUT; + } + dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", + (qstatus & MMCST0_CRCWR) ? "write" : "read", + (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); + + davinci_abort_data(host, data); + } + + if (qstatus & MMCST0_TOUTRS) { + /* Command timeout */ + if (host->cmd) { + dev_dbg(mmc_dev(host->mmc), + "CMD%d timeout, status %x\n", + host->cmd->opcode, qstatus); + host->cmd->error = -ETIMEDOUT; + if (data) { + end_transfer = 1; + davinci_abort_data(host, data); + } else + end_command = 1; + } + } + + if (qstatus & MMCST0_CRCRS) { + /* Command CRC error */ + dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); + if (host->cmd) { + host->cmd->error = -EILSEQ; + end_command = 1; + } + } + + if (qstatus & MMCST0_RSPDNE) { + /* End of command phase */ + end_command = (int) host->cmd; + } + + if (end_command) + mmc_davinci_cmd_done(host, host->cmd); + if (end_transfer) + mmc_davinci_xfer_done(host, data); + return IRQ_HANDLED; +} + +static int mmc_davinci_get_cd(struct mmc_host *mmc) +{ + struct platform_device *pdev = to_platform_device(mmc->parent); + struct davinci_mmc_config *config = pdev->dev.platform_data; + + if (!config || !config->get_cd) + return -ENOSYS; + return config->get_cd(pdev->id); +} + +static int mmc_davinci_get_ro(struct mmc_host *mmc) +{ + struct platform_device *pdev = to_platform_device(mmc->parent); + struct davinci_mmc_config *config = pdev->dev.platform_data; + + if (!config || !config->get_ro) + return -ENOSYS; + return config->get_ro(pdev->id); +} + +static struct mmc_host_ops mmc_davinci_ops = { + .request = mmc_davinci_request, + .set_ios = mmc_davinci_set_ios, + .get_cd = mmc_davinci_get_cd, + .get_ro = mmc_davinci_get_ro, +}; + +/*----------------------------------------------------------------------*/ + +#ifdef CONFIG_CPU_FREQ +static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct mmc_davinci_host *host; + unsigned int mmc_pclk; + struct mmc_host *mmc; + unsigned long flags; + + host = container_of(nb, struct mmc_davinci_host, freq_transition); + mmc = host->mmc; + mmc_pclk = clk_get_rate(host->clk); + + if (val == CPUFREQ_POSTCHANGE) { + spin_lock_irqsave(&mmc->lock, flags); + host->mmc_input_clk = mmc_pclk; + calculate_clk_divider(mmc, &mmc->ios); + spin_unlock_irqrestore(&mmc->lock, flags); + } + + return 0; +} + +static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) +{ + host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; + + return cpufreq_register_notifier(&host->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +} + +static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) +{ + cpufreq_unregister_notifier(&host->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +} +#else +static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) +{ + return 0; +} + +static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) +{ +} +#endif +static void __init init_mmcsd_host(struct mmc_davinci_host *host) +{ + /* DAT line portion is diabled and in reset state */ + writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST, + host->base + DAVINCI_MMCCTL); + + /* CMD line portion is diabled and in reset state */ + writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST, + host->base + DAVINCI_MMCCTL); + + udelay(10); + + writel(0, host->base + DAVINCI_MMCCLK); + writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); + + writel(0x1FFF, host->base + DAVINCI_MMCTOR); + writel(0xFFFF, host->base + DAVINCI_MMCTOD); + + writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST, + host->base + DAVINCI_MMCCTL); + writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST, + host->base + DAVINCI_MMCCTL); + + udelay(10); +} + +static int __init davinci_mmcsd_probe(struct platform_device *pdev) +{ + struct davinci_mmc_config *pdata = pdev->dev.platform_data; + struct mmc_davinci_host *host = NULL; + struct mmc_host *mmc = NULL; + struct resource *r, *mem = NULL; + int ret = 0, irq = 0; + size_t mem_size; + + /* REVISIT: when we're fully converted, fail if pdata is NULL */ + + ret = -ENODEV; + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (!r || irq == NO_IRQ) + goto out; + + ret = -EBUSY; + mem_size = resource_size(r); + mem = request_mem_region(r->start, mem_size, pdev->name); + if (!mem) + goto out; + + ret = -ENOMEM; + mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); + if (!mmc) + goto out; + + host = mmc_priv(mmc); + host->mmc = mmc; /* Important */ + + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (!r) + goto out; + host->rxdma = r->start; + + r = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (!r) + goto out; + host->txdma = r->start; + + host->mem_res = mem; + host->base = ioremap(mem->start, mem_size); + if (!host->base) + goto out; + + ret = -ENXIO; + host->clk = clk_get(&pdev->dev, "MMCSDCLK"); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto out; + } + clk_enable(host->clk); + host->mmc_input_clk = clk_get_rate(host->clk); + + init_mmcsd_host(host); + + host->use_dma = use_dma; + host->irq = irq; + + if (host->use_dma && davinci_acquire_dma_channels(host) != 0) + host->use_dma = 0; + + /* REVISIT: someday, support IRQ-driven card detection. */ + mmc->caps |= MMC_CAP_NEEDS_POLL; + + if (!pdata || pdata->wires == 4 || pdata->wires == 0) + mmc->caps |= MMC_CAP_4_BIT_DATA; + + host->version = pdata->version; + + mmc->ops = &mmc_davinci_ops; + mmc->f_min = 312500; + mmc->f_max = 25000000; + if (pdata && pdata->max_freq) + mmc->f_max = pdata->max_freq; + if (pdata && pdata->caps) + mmc->caps |= pdata->caps; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + /* With no iommu coalescing pages, each phys_seg is a hw_seg. + * Each hw_seg uses one EDMA parameter RAM slot, always one + * channel and then usually some linked slots. + */ + mmc->max_hw_segs = 1 + host->n_link; + mmc->max_phys_segs = mmc->max_hw_segs; + + /* EDMA limit per hw segment (one or two MBytes) */ + mmc->max_seg_size = MAX_CCNT * rw_threshold; + + /* MMC/SD controller limits for multiblock requests */ + mmc->max_blk_size = 4095; /* BLEN is 12 bits */ + mmc->max_blk_count = 65535; /* NBLK is 16 bits */ + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + + dev_dbg(mmc_dev(host->mmc), "max_phys_segs=%d\n", mmc->max_phys_segs); + dev_dbg(mmc_dev(host->mmc), "max_hw_segs=%d\n", mmc->max_hw_segs); + dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); + dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); + dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); + + platform_set_drvdata(pdev, host); + + ret = mmc_davinci_cpufreq_register(host); + if (ret) { + dev_err(&pdev->dev, "failed to register cpufreq\n"); + goto cpu_freq_fail; + } + + ret = mmc_add_host(mmc); + if (ret < 0) + goto out; + + ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host); + if (ret) + goto out; + + rename_region(mem, mmc_hostname(mmc)); + + dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", + host->use_dma ? "DMA" : "PIO", + (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); + + return 0; + +out: + mmc_davinci_cpufreq_deregister(host); +cpu_freq_fail: + if (host) { + davinci_release_dma_channels(host); + + if (host->clk) { + clk_disable(host->clk); + clk_put(host->clk); + } + + if (host->base) + iounmap(host->base); + } + + if (mmc) + mmc_free_host(mmc); + + if (mem) + release_resource(mem); + + dev_dbg(&pdev->dev, "probe err %d\n", ret); + + return ret; +} + +static int __exit davinci_mmcsd_remove(struct platform_device *pdev) +{ + struct mmc_davinci_host *host = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + if (host) { + mmc_davinci_cpufreq_deregister(host); + + mmc_remove_host(host->mmc); + free_irq(host->irq, host); + + davinci_release_dma_channels(host); + + clk_disable(host->clk); + clk_put(host->clk); + + iounmap(host->base); + + release_resource(host->mem_res); + + mmc_free_host(host->mmc); + } + + return 0; +} + +#ifdef CONFIG_PM +static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg) +{ + struct mmc_davinci_host *host = platform_get_drvdata(pdev); + + return mmc_suspend_host(host->mmc, msg); +} + +static int davinci_mmcsd_resume(struct platform_device *pdev) +{ + struct mmc_davinci_host *host = platform_get_drvdata(pdev); + + return mmc_resume_host(host->mmc); +} +#else +#define davinci_mmcsd_suspend NULL +#define davinci_mmcsd_resume NULL +#endif + +static struct platform_driver davinci_mmcsd_driver = { + .driver = { + .name = "davinci_mmc", + .owner = THIS_MODULE, + }, + .remove = __exit_p(davinci_mmcsd_remove), + .suspend = davinci_mmcsd_suspend, + .resume = davinci_mmcsd_resume, +}; + +static int __init davinci_mmcsd_init(void) +{ + return platform_driver_probe(&davinci_mmcsd_driver, + davinci_mmcsd_probe); +} +module_init(davinci_mmcsd_init); + +static void __exit davinci_mmcsd_exit(void) +{ + platform_driver_unregister(&davinci_mmcsd_driver); +} +module_exit(davinci_mmcsd_exit); + +MODULE_AUTHOR("Texas Instruments India"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); + diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 90d168ad03b..84c103a7ee1 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -2,6 +2,7 @@ * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver * * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. + * Copyright (C) 2010 ST-Ericsson AB. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -34,9 +35,6 @@ #define DRIVER_NAME "mmci-pl18x" -#define DBG(host,fmt,args...) \ - pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args) - static unsigned int fmax = 515633; /* @@ -105,8 +103,8 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) void __iomem *base; int blksz_bits; - DBG(host, "blksz %04x blks %04x flags %08x\n", - data->blksz, data->blocks, data->flags); + dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", + data->blksz, data->blocks, data->flags); host->data = data; host->size = data->blksz; @@ -155,7 +153,7 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) { void __iomem *base = host->base; - DBG(host, "op %02x arg %08x flags %08x\n", + dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", cmd->opcode, cmd->arg, cmd->flags); if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { @@ -184,8 +182,20 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, { if (status & MCI_DATABLOCKEND) { host->data_xfered += data->blksz; +#ifdef CONFIG_ARCH_U300 + /* + * On the U300 some signal or other is + * badly routed so that a data write does + * not properly terminate with a MCI_DATAEND + * status flag. This quirk will make writes + * work again. + */ + if (data->flags & MMC_DATA_WRITE) + status |= MCI_DATAEND; +#endif } if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { + dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); if (status & MCI_DATACRCFAIL) data->error = -EILSEQ; else if (status & MCI_DATATIMEOUT) @@ -307,7 +317,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) status = readl(base + MMCISTATUS); - DBG(host, "irq1 %08x\n", status); + dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); do { unsigned long flags; @@ -401,7 +411,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) status &= readl(host->base + MMCIMASK0); writel(status, host->base + MMCICLEAR); - DBG(host, "irq0 %08x\n", status); + dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); data = host->data; if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| @@ -428,8 +438,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) WARN_ON(host->mrq != NULL); if (mrq->data && !is_power_of_2(mrq->data->blksz)) { - printk(KERN_ERR "%s: Unsupported block size (%d bytes)\n", - mmc_hostname(mmc), mrq->data->blksz); + dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", + mrq->data->blksz); mrq->cmd->error = -EINVAL; mmc_request_done(mmc, mrq); return; @@ -582,8 +592,8 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) host->hw_designer = amba_manf(dev); host->hw_revision = amba_rev(dev); - DBG(host, "designer ID = 0x%02x\n", host->hw_designer); - DBG(host, "revision = 0x%01x\n", host->hw_revision); + dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); + dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); host->clk = clk_get(&dev->dev, NULL); if (IS_ERR(host->clk)) { @@ -608,7 +618,8 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) if (ret < 0) goto clk_disable; host->mclk = clk_get_rate(host->clk); - DBG(host, "eventual mclk rate: %u Hz\n", host->mclk); + dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", + host->mclk); } host->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!host->base) { @@ -619,6 +630,8 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) mmc->ops = &mmci_ops; mmc->f_min = (host->mclk + 511) / 512; mmc->f_max = min(host->mclk, fmax); + dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); + #ifdef CONFIG_REGULATOR /* If we're using the regulator framework, try to fetch a regulator */ host->vcc = regulator_get(&dev->dev, "vmmc"); @@ -712,7 +725,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) mmc_add_host(mmc); - printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", + dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", mmc_hostname(mmc), amba_rev(dev), amba_config(dev), (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index dba4600bcdb..b31946e0b4c 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -38,10 +38,9 @@ #include <asm/div64.h> #include <asm/sizes.h> -#include <asm/mach/mmc.h> +#include <mach/mmc.h> #include <mach/msm_iomap.h> #include <mach/dma.h> -#include <mach/htc_pwrsink.h> #include "msm_sdcc.h" @@ -775,13 +774,11 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: - htc_pwrsink_set(PWRSINK_SDCARD, 0); break; case MMC_POWER_UP: pwr |= MCI_PWR_UP; break; case MMC_POWER_ON: - htc_pwrsink_set(PWRSINK_SDCARD, 100); pwr |= MCI_PWR_ON; break; } diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 88671529c45..60a2b69e54f 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -679,17 +679,17 @@ static int mxcmci_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct mxcmci_host *host = NULL; - struct resource *r; + struct resource *iores, *r; int ret = 0, irq; printk(KERN_INFO "i.MX SDHC driver\n"); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (!r || irq < 0) + if (!iores || irq < 0) return -EINVAL; - r = request_mem_region(r->start, resource_size(r), pdev->name); + r = request_mem_region(iores->start, resource_size(iores), pdev->name); if (!r) return -EBUSY; @@ -809,7 +809,7 @@ out_iounmap: out_free: mmc_free_host(mmc); out_release_mem: - release_mem_region(host->res->start, resource_size(host->res)); + release_mem_region(iores->start, resource_size(iores)); return ret; } diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 0c44d560bf1..0c7a63c1f12 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -22,6 +22,8 @@ #include <linux/mmc/core.h> #include <linux/mmc/host.h> +MODULE_LICENSE("GPL"); + enum { CD_GPIO = 0, WP_GPIO, diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 5f970e253e5..c6d7e8ecadb 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -1459,8 +1459,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev) goto err_ioremap; host->iclk = clk_get(&pdev->dev, "ick"); - if (IS_ERR(host->iclk)) + if (IS_ERR(host->iclk)) { + ret = PTR_ERR(host->iclk); goto err_free_mmc_host; + } clk_enable(host->iclk); host->fclk = clk_get(&pdev->dev, "fck"); @@ -1500,10 +1502,8 @@ err_free_irq: err_free_fclk: clk_put(host->fclk); err_free_iclk: - if (host->iclk != NULL) { - clk_disable(host->iclk); - clk_put(host->iclk); - } + clk_disable(host->iclk); + clk_put(host->iclk); err_free_mmc_host: iounmap(host->virt_base); err_ioremap: diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index bb47ff465c0..0d783f3e79e 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -828,7 +828,7 @@ static int pxamci_resume(struct device *dev) return ret; } -static struct dev_pm_ops pxamci_pm_ops = { +static const struct dev_pm_ops pxamci_pm_ops = { .suspend = pxamci_suspend, .resume = pxamci_resume, }; diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 941a4d35ef8..d96e1abf2d6 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -820,7 +820,7 @@ fail_request: static void finalize_request(struct s3cmci_host *host) { struct mmc_request *mrq = host->mrq; - struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; + struct mmc_command *cmd; int debug_as_failure = 0; if (host->complete_what != COMPLETION_FINALIZE) @@ -828,6 +828,7 @@ static void finalize_request(struct s3cmci_host *host) if (!mrq) return; + cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; if (cmd->data && (cmd->error == 0) && (cmd->data->error == 0)) { @@ -1302,10 +1303,8 @@ static int s3cmci_get_ro(struct mmc_host *mmc) if (pdata->no_wprotect) return 0; - ret = s3c2410_gpio_getpin(pdata->gpio_wprotect); - - if (pdata->wprotect_invert) - ret = !ret; + ret = gpio_get_value(pdata->gpio_wprotect) ? 1 : 0; + ret ^= pdata->wprotect_invert; return ret; } @@ -1654,7 +1653,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev) goto probe_free_irq; } - host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); + host->irq_cd = gpio_to_irq(host->pdata->gpio_detect); if (host->irq_cd >= 0) { if (request_irq(host->irq_cd, s3cmci_irq_cd, @@ -1892,7 +1891,7 @@ static int s3cmci_resume(struct device *dev) return mmc_resume_host(mmc); } -static struct dev_pm_ops s3cmci_pm = { +static const struct dev_pm_ops s3cmci_pm = { .suspend = s3cmci_suspend, .resume = s3cmci_resume, }; diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of-core.c index 01ab916c280..55e33135edb 100644 --- a/drivers/mmc/host/sdhci-of.c +++ b/drivers/mmc/host/sdhci-of-core.c @@ -22,62 +22,37 @@ #include <linux/of_platform.h> #include <linux/mmc/host.h> #include <asm/machdep.h> +#include "sdhci-of.h" #include "sdhci.h" -struct sdhci_of_data { - unsigned int quirks; - struct sdhci_ops ops; -}; - -struct sdhci_of_host { - unsigned int clock; - u16 xfer_mode_shadow; -}; +#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER /* - * Ops and quirks for the Freescale eSDHC controller. + * These accessors are designed for big endian hosts doing I/O to + * little endian controllers incorporating a 32-bit hardware byte swapper. */ -#define ESDHC_DMA_SYSCTL 0x40c -#define ESDHC_DMA_SNOOP 0x00000040 - -#define ESDHC_SYSTEM_CONTROL 0x2c -#define ESDHC_CLOCK_MASK 0x0000fff0 -#define ESDHC_PREDIV_SHIFT 8 -#define ESDHC_DIVIDER_SHIFT 4 -#define ESDHC_CLOCK_PEREN 0x00000004 -#define ESDHC_CLOCK_HCKEN 0x00000002 -#define ESDHC_CLOCK_IPGEN 0x00000001 - -#define ESDHC_HOST_CONTROL_RES 0x05 - -static u32 esdhc_readl(struct sdhci_host *host, int reg) +u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg) { return in_be32(host->ioaddr + reg); } -static u16 esdhc_readw(struct sdhci_host *host, int reg) +u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg) { - u16 ret; - - if (unlikely(reg == SDHCI_HOST_VERSION)) - ret = in_be16(host->ioaddr + reg); - else - ret = in_be16(host->ioaddr + (reg ^ 0x2)); - return ret; + return in_be16(host->ioaddr + (reg ^ 0x2)); } -static u8 esdhc_readb(struct sdhci_host *host, int reg) +u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg) { return in_8(host->ioaddr + (reg ^ 0x3)); } -static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) +void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg) { out_be32(host->ioaddr + reg, val); } -static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) +void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg) { struct sdhci_of_host *of_host = sdhci_priv(host); int base = reg & ~0x3; @@ -92,106 +67,21 @@ static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) of_host->xfer_mode_shadow = val; return; case SDHCI_COMMAND: - esdhc_writel(host, val << 16 | of_host->xfer_mode_shadow, - SDHCI_TRANSFER_MODE); + sdhci_be32bs_writel(host, val << 16 | of_host->xfer_mode_shadow, + SDHCI_TRANSFER_MODE); return; - case SDHCI_BLOCK_SIZE: - /* - * Two last DMA bits are reserved, and first one is used for - * non-standard blksz of 4096 bytes that we don't support - * yet. So clear the DMA boundary bits. - */ - val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); - /* fall through */ } clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift); } -static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) +void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg) { int base = reg & ~0x3; int shift = (reg & 0x3) * 8; - /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ - if (reg == SDHCI_HOST_CONTROL) - val &= ~ESDHC_HOST_CONTROL_RES; - clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift); } - -static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) -{ - int pre_div = 2; - int div = 1; - - clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN | - ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); - - if (clock == 0) - goto out; - - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) - pre_div *= 2; - - while (host->max_clk / pre_div / div > clock && div < 16) - div++; - - dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", - clock, host->max_clk / pre_div / div); - - pre_div >>= 1; - div--; - - setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN | - ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | - div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT); - mdelay(100); -out: - host->clock = clock; -} - -static int esdhc_enable_dma(struct sdhci_host *host) -{ - setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); - return 0; -} - -static unsigned int esdhc_get_max_clock(struct sdhci_host *host) -{ - struct sdhci_of_host *of_host = sdhci_priv(host); - - return of_host->clock; -} - -static unsigned int esdhc_get_min_clock(struct sdhci_host *host) -{ - struct sdhci_of_host *of_host = sdhci_priv(host); - - return of_host->clock / 256 / 16; -} - -static struct sdhci_of_data sdhci_esdhc = { - .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 | - SDHCI_QUIRK_BROKEN_CARD_DETECTION | - SDHCI_QUIRK_NO_BUSY_IRQ | - SDHCI_QUIRK_NONSTANDARD_CLOCK | - SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | - SDHCI_QUIRK_PIO_NEEDS_DELAY | - SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | - SDHCI_QUIRK_NO_CARD_NO_RESET, - .ops = { - .readl = esdhc_readl, - .readw = esdhc_readw, - .readb = esdhc_readb, - .writel = esdhc_writel, - .writew = esdhc_writew, - .writeb = esdhc_writeb, - .set_clock = esdhc_set_clock, - .enable_dma = esdhc_enable_dma, - .get_max_clock = esdhc_get_max_clock, - .get_min_clock = esdhc_get_min_clock, - }, -}; +#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */ #ifdef CONFIG_PM @@ -301,9 +191,14 @@ static int __devexit sdhci_of_remove(struct of_device *ofdev) } static const struct of_device_id sdhci_of_match[] = { +#ifdef CONFIG_MMC_SDHCI_OF_ESDHC { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, }, { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, }, { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, }, +#endif +#ifdef CONFIG_MMC_SDHCI_OF_HLWD + { .compatible = "nintendo,hollywood-sdhci", .data = &sdhci_hlwd, }, +#endif { .compatible = "generic-sdhci", }, {}, }; diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c new file mode 100644 index 00000000000..d5b11a17e64 --- /dev/null +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -0,0 +1,143 @@ +/* + * Freescale eSDHC controller driver. + * + * Copyright (c) 2007 Freescale Semiconductor, Inc. + * Copyright (c) 2009 MontaVista Software, Inc. + * + * Authors: Xiaobo Xie <X.Xie@freescale.com> + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/mmc/host.h> +#include "sdhci-of.h" +#include "sdhci.h" + +/* + * Ops and quirks for the Freescale eSDHC controller. + */ + +#define ESDHC_DMA_SYSCTL 0x40c +#define ESDHC_DMA_SNOOP 0x00000040 + +#define ESDHC_SYSTEM_CONTROL 0x2c +#define ESDHC_CLOCK_MASK 0x0000fff0 +#define ESDHC_PREDIV_SHIFT 8 +#define ESDHC_DIVIDER_SHIFT 4 +#define ESDHC_CLOCK_PEREN 0x00000004 +#define ESDHC_CLOCK_HCKEN 0x00000002 +#define ESDHC_CLOCK_IPGEN 0x00000001 + +#define ESDHC_HOST_CONTROL_RES 0x05 + +static u16 esdhc_readw(struct sdhci_host *host, int reg) +{ + u16 ret; + + if (unlikely(reg == SDHCI_HOST_VERSION)) + ret = in_be16(host->ioaddr + reg); + else + ret = sdhci_be32bs_readw(host, reg); + return ret; +} + +static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) +{ + if (reg == SDHCI_BLOCK_SIZE) { + /* + * Two last DMA bits are reserved, and first one is used for + * non-standard blksz of 4096 bytes that we don't support + * yet. So clear the DMA boundary bits. + */ + val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); + } + sdhci_be32bs_writew(host, val, reg); +} + +static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) +{ + /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ + if (reg == SDHCI_HOST_CONTROL) + val &= ~ESDHC_HOST_CONTROL_RES; + sdhci_be32bs_writeb(host, val, reg); +} + +static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) +{ + int pre_div = 2; + int div = 1; + + clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN | + ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); + + if (clock == 0) + goto out; + + while (host->max_clk / pre_div / 16 > clock && pre_div < 256) + pre_div *= 2; + + while (host->max_clk / pre_div / div > clock && div < 16) + div++; + + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", + clock, host->max_clk / pre_div / div); + + pre_div >>= 1; + div--; + + setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN | + ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | + div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT); + mdelay(100); +out: + host->clock = clock; +} + +static int esdhc_enable_dma(struct sdhci_host *host) +{ + setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); + return 0; +} + +static unsigned int esdhc_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_of_host *of_host = sdhci_priv(host); + + return of_host->clock; +} + +static unsigned int esdhc_get_min_clock(struct sdhci_host *host) +{ + struct sdhci_of_host *of_host = sdhci_priv(host); + + return of_host->clock / 256 / 16; +} + +struct sdhci_of_data sdhci_esdhc = { + .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 | + SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_NO_BUSY_IRQ | + SDHCI_QUIRK_NONSTANDARD_CLOCK | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_PIO_NEEDS_DELAY | + SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | + SDHCI_QUIRK_NO_CARD_NO_RESET, + .ops = { + .readl = sdhci_be32bs_readl, + .readw = esdhc_readw, + .readb = sdhci_be32bs_readb, + .writel = sdhci_be32bs_writel, + .writew = esdhc_writew, + .writeb = esdhc_writeb, + .set_clock = esdhc_set_clock, + .enable_dma = esdhc_enable_dma, + .get_max_clock = esdhc_get_max_clock, + .get_min_clock = esdhc_get_min_clock, + }, +}; diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c new file mode 100644 index 00000000000..35117f3ed75 --- /dev/null +++ b/drivers/mmc/host/sdhci-of-hlwd.c @@ -0,0 +1,65 @@ +/* + * drivers/mmc/host/sdhci-of-hlwd.c + * + * Nintendo Wii Secure Digital Host Controller Interface. + * Copyright (C) 2009 The GameCube Linux Team + * Copyright (C) 2009 Albert Herranz + * + * Based on sdhci-of-esdhc.c + * + * Copyright (c) 2007 Freescale Semiconductor, Inc. + * Copyright (c) 2009 MontaVista Software, Inc. + * + * Authors: Xiaobo Xie <X.Xie@freescale.com> + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include <linux/delay.h> +#include <linux/mmc/host.h> +#include "sdhci-of.h" +#include "sdhci.h" + +/* + * Ops and quirks for the Nintendo Wii SDHCI controllers. + */ + +/* + * We need a small delay after each write, or things go horribly wrong. + */ +#define SDHCI_HLWD_WRITE_DELAY 5 /* usecs */ + +static void sdhci_hlwd_writel(struct sdhci_host *host, u32 val, int reg) +{ + sdhci_be32bs_writel(host, val, reg); + udelay(SDHCI_HLWD_WRITE_DELAY); +} + +static void sdhci_hlwd_writew(struct sdhci_host *host, u16 val, int reg) +{ + sdhci_be32bs_writew(host, val, reg); + udelay(SDHCI_HLWD_WRITE_DELAY); +} + +static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg) +{ + sdhci_be32bs_writeb(host, val, reg); + udelay(SDHCI_HLWD_WRITE_DELAY); +} + +struct sdhci_of_data sdhci_hlwd = { + .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE, + .ops = { + .readl = sdhci_be32bs_readl, + .readw = sdhci_be32bs_readw, + .readb = sdhci_be32bs_readb, + .writel = sdhci_hlwd_writel, + .writew = sdhci_hlwd_writew, + .writeb = sdhci_hlwd_writeb, + }, +}; diff --git a/drivers/mmc/host/sdhci-of.h b/drivers/mmc/host/sdhci-of.h new file mode 100644 index 00000000000..ad09ad9915d --- /dev/null +++ b/drivers/mmc/host/sdhci-of.h @@ -0,0 +1,42 @@ +/* + * OpenFirmware bindings for Secure Digital Host Controller Interface. + * + * Copyright (c) 2007 Freescale Semiconductor, Inc. + * Copyright (c) 2009 MontaVista Software, Inc. + * + * Authors: Xiaobo Xie <X.Xie@freescale.com> + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef __SDHCI_OF_H +#define __SDHCI_OF_H + +#include <linux/types.h> +#include "sdhci.h" + +struct sdhci_of_data { + unsigned int quirks; + struct sdhci_ops ops; +}; + +struct sdhci_of_host { + unsigned int clock; + u16 xfer_mode_shadow; +}; + +extern u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg); +extern u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg); +extern u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg); +extern void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg); +extern void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg); +extern void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg); + +extern struct sdhci_of_data sdhci_esdhc; +extern struct sdhci_of_data sdhci_hlwd; + +#endif /* __SDHCI_OF_H */ diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index e0356644d1a..5c3a1767770 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -285,6 +285,73 @@ static const struct sdhci_pci_fixes sdhci_jmicron = { .resume = jmicron_resume, }; +/* SysKonnect CardBus2SDIO extra registers */ +#define SYSKT_CTRL 0x200 +#define SYSKT_RDFIFO_STAT 0x204 +#define SYSKT_WRFIFO_STAT 0x208 +#define SYSKT_POWER_DATA 0x20c +#define SYSKT_POWER_330 0xef +#define SYSKT_POWER_300 0xf8 +#define SYSKT_POWER_184 0xcc +#define SYSKT_POWER_CMD 0x20d +#define SYSKT_POWER_START (1 << 7) +#define SYSKT_POWER_STATUS 0x20e +#define SYSKT_POWER_STATUS_OK (1 << 0) +#define SYSKT_BOARD_REV 0x210 +#define SYSKT_CHIP_REV 0x211 +#define SYSKT_CONF_DATA 0x212 +#define SYSKT_CONF_DATA_1V8 (1 << 2) +#define SYSKT_CONF_DATA_2V5 (1 << 1) +#define SYSKT_CONF_DATA_3V3 (1 << 0) + +static int syskt_probe(struct sdhci_pci_chip *chip) +{ + if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { + chip->pdev->class &= ~0x0000FF; + chip->pdev->class |= PCI_SDHCI_IFDMA; + } + return 0; +} + +static int syskt_probe_slot(struct sdhci_pci_slot *slot) +{ + int tm, ps; + + u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV); + u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV); + dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, " + "board rev %d.%d, chip rev %d.%d\n", + board_rev >> 4, board_rev & 0xf, + chip_rev >> 4, chip_rev & 0xf); + if (chip_rev >= 0x20) + slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA; + + writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA); + writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD); + udelay(50); + tm = 10; /* Wait max 1 ms */ + do { + ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS); + if (ps & SYSKT_POWER_STATUS_OK) + break; + udelay(100); + } while (--tm); + if (!tm) { + dev_err(&slot->chip->pdev->dev, + "power regulator never stabilized"); + writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD); + return -ENODEV; + } + + return 0; +} + +static const struct sdhci_pci_fixes sdhci_syskt = { + .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER, + .probe = syskt_probe, + .probe_slot = syskt_probe_slot, +}; + static int via_probe(struct sdhci_pci_chip *chip) { if (chip->pdev->revision == 0x10) @@ -363,6 +430,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = { }, { + .vendor = PCI_VENDOR_ID_SYSKONNECT, + .device = 0x8000, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_syskt, + }, + + { .vendor = PCI_VENDOR_ID_VIA, .device = 0x95d0, .subvendor = PCI_ANY_ID, diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index ce5f1d73dc0..842f46f9428 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -8,6 +8,8 @@ * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ +#ifndef __SDHCI_H +#define __SDHCI_H #include <linux/scatterlist.h> #include <linux/compiler.h> @@ -408,3 +410,5 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead); extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); extern int sdhci_resume_host(struct sdhci_host *host); #endif + +#endif /* __SDHCI_H */ diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 91991b460c4..e22c3fa3516 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -46,7 +46,9 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) clk |= 0x100; } - sd_config_write8(host, CNF_SD_CLK_MODE, clk >> 22); + if (host->set_clk_div) + host->set_clk_div(host->pdev, (clk>>22) & 1); + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); } @@ -427,12 +429,13 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) /* Power sequence - OFF -> ON -> UP */ switch (ios->power_mode) { case MMC_POWER_OFF: /* power down SD bus */ - sd_config_write8(host, CNF_PWR_CTL_2, 0x00); + if (host->set_pwr) + host->set_pwr(host->pdev, 0); tmio_mmc_clk_stop(host); break; case MMC_POWER_ON: /* power up SD bus */ - - sd_config_write8(host, CNF_PWR_CTL_2, 0x02); + if (host->set_pwr) + host->set_pwr(host->pdev, 1); break; case MMC_POWER_UP: /* start bus clock */ tmio_mmc_clk_start(host); @@ -485,21 +488,15 @@ static int tmio_mmc_resume(struct platform_device *dev) { struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; struct mmc_host *mmc = platform_get_drvdata(dev); - struct tmio_mmc_host *host = mmc_priv(mmc); int ret = 0; /* Tell the MFD core we are ready to be enabled */ - if (cell->enable) { - ret = cell->enable(dev); + if (cell->resume) { + ret = cell->resume(dev); if (ret) goto out; } - /* Enable the MMC/SD Control registers */ - sd_config_write16(host, CNF_CMD, SDCREN); - sd_config_write32(host, CNF_CTL_BASE, - (dev->resource[0].start >> host->bus_shift) & 0xfffe); - mmc_resume_host(mmc); out: @@ -514,17 +511,16 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev) { struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; struct tmio_mmc_data *pdata; - struct resource *res_ctl, *res_cnf; + struct resource *res_ctl; struct tmio_mmc_host *host; struct mmc_host *mmc; int ret = -EINVAL; - if (dev->num_resources != 3) + if (dev->num_resources != 2) goto out; res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); - res_cnf = platform_get_resource(dev, IORESOURCE_MEM, 1); - if (!res_ctl || !res_cnf) + if (!res_ctl) goto out; pdata = cell->driver_data; @@ -539,8 +535,12 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev) host = mmc_priv(mmc); host->mmc = mmc; + host->pdev = dev; platform_set_drvdata(dev, mmc); + host->set_pwr = pdata->set_pwr; + host->set_clk_div = pdata->set_clk_div; + /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ host->bus_shift = resource_size(res_ctl) >> 10; @@ -548,10 +548,6 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev) if (!host->ctl) goto host_free; - host->cnf = ioremap(res_cnf->start, resource_size(res_cnf)); - if (!host->cnf) - goto unmap_ctl; - mmc->ops = &tmio_mmc_ops; mmc->caps = MMC_CAP_4_BIT_DATA; mmc->f_max = pdata->hclk; @@ -562,23 +558,9 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev) if (cell->enable) { ret = cell->enable(dev); if (ret) - goto unmap_cnf; + goto unmap_ctl; } - /* Enable the MMC/SD Control registers */ - sd_config_write16(host, CNF_CMD, SDCREN); - sd_config_write32(host, CNF_CTL_BASE, - (dev->resource[0].start >> host->bus_shift) & 0xfffe); - - /* Disable SD power during suspend */ - sd_config_write8(host, CNF_PWR_CTL_3, 0x01); - - /* The below is required but why? FIXME */ - sd_config_write8(host, CNF_STOP_CLK_CTL, 0x1f); - - /* Power down SD bus*/ - sd_config_write8(host, CNF_PWR_CTL_2, 0x00); - tmio_mmc_clk_stop(host); reset(host); @@ -586,14 +568,14 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev) if (ret >= 0) host->irq = ret; else - goto unmap_cnf; + goto unmap_ctl; disable_mmc_irqs(host, TMIO_MASK_ALL); ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | - IRQF_TRIGGER_FALLING, "tmio-mmc", host); + IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); if (ret) - goto unmap_cnf; + goto unmap_ctl; mmc_add_host(mmc); @@ -605,8 +587,6 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev) return 0; -unmap_cnf: - iounmap(host->cnf); unmap_ctl: iounmap(host->ctl); host_free: @@ -626,7 +606,6 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev) mmc_remove_host(mmc); free_irq(host->irq, host); iounmap(host->ctl); - iounmap(host->cnf); mmc_free_host(mmc); } diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 9fa99859497..692dc23363b 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -11,26 +11,6 @@ #include <linux/highmem.h> -#define CNF_CMD 0x04 -#define CNF_CTL_BASE 0x10 -#define CNF_INT_PIN 0x3d -#define CNF_STOP_CLK_CTL 0x40 -#define CNF_GCLK_CTL 0x41 -#define CNF_SD_CLK_MODE 0x42 -#define CNF_PIN_STATUS 0x44 -#define CNF_PWR_CTL_1 0x48 -#define CNF_PWR_CTL_2 0x49 -#define CNF_PWR_CTL_3 0x4a -#define CNF_CARD_DETECT_MODE 0x4c -#define CNF_SD_SLOT 0x50 -#define CNF_EXT_GCLK_CTL_1 0xf0 -#define CNF_EXT_GCLK_CTL_2 0xf1 -#define CNF_EXT_GCLK_CTL_3 0xf9 -#define CNF_SD_LED_EN_1 0xfa -#define CNF_SD_LED_EN_2 0xfe - -#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/ - #define CTL_SD_CMD 0x00 #define CTL_ARG_REG 0x04 #define CTL_STOP_INTERNAL_ACTION 0x08 @@ -110,7 +90,6 @@ struct tmio_mmc_host { - void __iomem *cnf; void __iomem *ctl; unsigned long bus_shift; struct mmc_command *cmd; @@ -119,10 +98,16 @@ struct tmio_mmc_host { struct mmc_host *mmc; int irq; + /* Callbacks for clock / power control */ + void (*set_pwr)(struct platform_device *host, int state); + void (*set_clk_div)(struct platform_device *host, int state); + /* pio related stuff */ struct scatterlist *sg_ptr; unsigned int sg_len; unsigned int sg_off; + + struct platform_device *pdev; }; #include <linux/io.h> @@ -163,25 +148,6 @@ static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); } -static inline void sd_config_write8(struct tmio_mmc_host *host, int addr, - u8 val) -{ - writeb(val, host->cnf + (addr << host->bus_shift)); -} - -static inline void sd_config_write16(struct tmio_mmc_host *host, int addr, - u16 val) -{ - writew(val, host->cnf + (addr << host->bus_shift)); -} - -static inline void sd_config_write32(struct tmio_mmc_host *host, int addr, - u32 val) -{ - writew(val, host->cnf + (addr << host->bus_shift)); - writew(val >> 16, host->cnf + ((addr + 2) << host->bus_shift)); -} - #include <linux/scatterlist.h> #include <linux/blkdev.h> |