diff options
Diffstat (limited to 'drivers')
110 files changed, 3462 insertions, 1327 deletions
diff --git a/drivers/acorn/char/defkeymap-l7200.c b/drivers/acorn/char/defkeymap-l7200.c index 28a5fbc6aa1..93d80a1c36f 100644 --- a/drivers/acorn/char/defkeymap-l7200.c +++ b/drivers/acorn/char/defkeymap-l7200.c @@ -347,40 +347,40 @@ char *func_table[MAX_NR_FUNC] = { }; struct kbdiacruc accent_table[MAX_DIACR] = { - {'`', 'A', '\300'}, {'`', 'a', '\340'}, - {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, - {'^', 'A', '\302'}, {'^', 'a', '\342'}, - {'~', 'A', '\303'}, {'~', 'a', '\343'}, - {'"', 'A', '\304'}, {'"', 'a', '\344'}, - {'O', 'A', '\305'}, {'o', 'a', '\345'}, - {'0', 'A', '\305'}, {'0', 'a', '\345'}, - {'A', 'A', '\305'}, {'a', 'a', '\345'}, - {'A', 'E', '\306'}, {'a', 'e', '\346'}, - {',', 'C', '\307'}, {',', 'c', '\347'}, - {'`', 'E', '\310'}, {'`', 'e', '\350'}, - {'\'', 'E', '\311'}, {'\'', 'e', '\351'}, - {'^', 'E', '\312'}, {'^', 'e', '\352'}, - {'"', 'E', '\313'}, {'"', 'e', '\353'}, - {'`', 'I', '\314'}, {'`', 'i', '\354'}, - {'\'', 'I', '\315'}, {'\'', 'i', '\355'}, - {'^', 'I', '\316'}, {'^', 'i', '\356'}, - {'"', 'I', '\317'}, {'"', 'i', '\357'}, - {'-', 'D', '\320'}, {'-', 'd', '\360'}, - {'~', 'N', '\321'}, {'~', 'n', '\361'}, - {'`', 'O', '\322'}, {'`', 'o', '\362'}, - {'\'', 'O', '\323'}, {'\'', 'o', '\363'}, - {'^', 'O', '\324'}, {'^', 'o', '\364'}, - {'~', 'O', '\325'}, {'~', 'o', '\365'}, - {'"', 'O', '\326'}, {'"', 'o', '\366'}, - {'/', 'O', '\330'}, {'/', 'o', '\370'}, - {'`', 'U', '\331'}, {'`', 'u', '\371'}, - {'\'', 'U', '\332'}, {'\'', 'u', '\372'}, - {'^', 'U', '\333'}, {'^', 'u', '\373'}, - {'"', 'U', '\334'}, {'"', 'u', '\374'}, - {'\'', 'Y', '\335'}, {'\'', 'y', '\375'}, - {'T', 'H', '\336'}, {'t', 'h', '\376'}, - {'s', 's', '\337'}, {'"', 'y', '\377'}, - {'s', 'z', '\337'}, {'i', 'j', '\377'}, + {'`', 'A', 0300}, {'`', 'a', 0340}, + {'\'', 'A', 0301}, {'\'', 'a', 0341}, + {'^', 'A', 0302}, {'^', 'a', 0342}, + {'~', 'A', 0303}, {'~', 'a', 0343}, + {'"', 'A', 0304}, {'"', 'a', 0344}, + {'O', 'A', 0305}, {'o', 'a', 0345}, + {'0', 'A', 0305}, {'0', 'a', 0345}, + {'A', 'A', 0305}, {'a', 'a', 0345}, + {'A', 'E', 0306}, {'a', 'e', 0346}, + {',', 'C', 0307}, {',', 'c', 0347}, + {'`', 'E', 0310}, {'`', 'e', 0350}, + {'\'', 'E', 0311}, {'\'', 'e', 0351}, + {'^', 'E', 0312}, {'^', 'e', 0352}, + {'"', 'E', 0313}, {'"', 'e', 0353}, + {'`', 'I', 0314}, {'`', 'i', 0354}, + {'\'', 'I', 0315}, {'\'', 'i', 0355}, + {'^', 'I', 0316}, {'^', 'i', 0356}, + {'"', 'I', 0317}, {'"', 'i', 0357}, + {'-', 'D', 0320}, {'-', 'd', 0360}, + {'~', 'N', 0321}, {'~', 'n', 0361}, + {'`', 'O', 0322}, {'`', 'o', 0362}, + {'\'', 'O', 0323}, {'\'', 'o', 0363}, + {'^', 'O', 0324}, {'^', 'o', 0364}, + {'~', 'O', 0325}, {'~', 'o', 0365}, + {'"', 'O', 0326}, {'"', 'o', 0366}, + {'/', 'O', 0330}, {'/', 'o', 0370}, + {'`', 'U', 0331}, {'`', 'u', 0371}, + {'\'', 'U', 0332}, {'\'', 'u', 0372}, + {'^', 'U', 0333}, {'^', 'u', 0373}, + {'"', 'U', 0334}, {'"', 'u', 0374}, + {'\'', 'Y', 0335}, {'\'', 'y', 0375}, + {'T', 'H', 0336}, {'t', 'h', 0376}, + {'s', 's', 0337}, {'"', 'y', 0377}, + {'s', 'z', 0337}, {'i', 'j', 0377}, }; unsigned int accent_table_size = 68; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index fbc24358ada..4fbcce758b0 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -113,7 +113,7 @@ int atapi_enabled = 1; module_param(atapi_enabled, int, 0444); MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); -int atapi_dmadir = 0; +static int atapi_dmadir = 0; module_param(atapi_dmadir, int, 0444); MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); @@ -6567,6 +6567,8 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) ata_lpm_enable(host); rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); + if (rc == 0) + host->dev->power.power_state = mesg; return rc; } @@ -6585,6 +6587,7 @@ void ata_host_resume(struct ata_host *host) { ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); + host->dev->power.power_state = PMSG_ON; /* reenable link pm */ ata_lpm_disable(host); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 0562b0a49f3..8f0e8f2bc62 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -862,9 +862,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, struct request_queue *q = sdev->request_queue; void *buf; - /* set the min alignment */ + /* set the min alignment and padding */ blk_queue_update_dma_alignment(sdev->request_queue, ATA_DMA_PAD_SZ - 1); + blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1); /* configure draining */ buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); @@ -1694,12 +1695,17 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args, u8 *rbuf; unsigned int buflen, rc; struct scsi_cmnd *cmd = args->cmd; + unsigned long flags; + + local_irq_save(flags); buflen = ata_scsi_rbuf_get(cmd, &rbuf); memset(rbuf, 0, buflen); rc = actor(args, rbuf, buflen); ata_scsi_rbuf_put(cmd, rbuf); + local_irq_restore(flags); + if (rc == 0) cmd->result = SAM_STAT_GOOD; args->done(cmd); @@ -2473,6 +2479,9 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { u8 *buf = NULL; unsigned int buflen; + unsigned long flags; + + local_irq_save(flags); buflen = ata_scsi_rbuf_get(cmd, &buf); @@ -2490,6 +2499,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) } ata_scsi_rbuf_put(cmd, buf); + + local_irq_restore(flags); } cmd->result = SAM_STAT_GOOD; @@ -2528,7 +2539,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) } qc->tf.command = ATA_CMD_PACKET; - qc->nbytes = scsi_bufflen(scmd); + qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; /* check whether ATAPI DMA is safe */ if (!using_pio && ata_check_atapi_dma(qc)) @@ -2539,7 +2550,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) * want to set it properly, and for DMA where it is * effectively meaningless. */ - nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024); + nbytes = min(scmd->request->data_len, (unsigned int)63 * 1024); /* Most ATAPI devices which honor transfer chunk size don't * behave according to the spec when odd chunk size which @@ -2865,7 +2876,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) * TODO: find out if we need to do more here to * cover scatter/gather case. */ - qc->nbytes = scsi_bufflen(scmd); + qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; /* request result TF and be quiet about device error */ qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 6036dedfe37..aa884f71a12 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -56,7 +56,6 @@ enum { extern unsigned int ata_print_id; extern struct workqueue_struct *ata_aux_wq; extern int atapi_enabled; -extern int atapi_dmadir; extern int atapi_passthru16; extern int libata_fua; extern int libata_noacpi; diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 69f651e0bc9..840d1c4a785 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -45,6 +45,8 @@ #include <linux/interrupt.h> #include <linux/device.h> #include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi.h> #include <linux/libata.h> #ifdef CONFIG_PPC_OF @@ -59,6 +61,7 @@ enum { /* ap->flags bits */ K2_FLAG_SATA_8_PORTS = (1 << 24), K2_FLAG_NO_ATAPI_DMA = (1 << 25), + K2_FLAG_BAR_POS_3 = (1 << 26), /* Taskfile registers offsets */ K2_SATA_TF_CMD_OFFSET = 0x00, @@ -88,8 +91,10 @@ enum { /* Port stride */ K2_SATA_PORT_OFFSET = 0x100, - board_svw4 = 0, - board_svw8 = 1, + chip_svw4 = 0, + chip_svw8 = 1, + chip_svw42 = 2, /* bar 3 */ + chip_svw43 = 3, /* bar 5 */ }; static u8 k2_stat_check_status(struct ata_port *ap); @@ -97,10 +102,25 @@ static u8 k2_stat_check_status(struct ata_port *ap); static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) { + u8 cmnd = qc->scsicmd->cmnd[0]; + if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) return -1; /* ATAPI DMA not supported */ + else { + switch (cmnd) { + case READ_10: + case READ_12: + case READ_16: + case WRITE_10: + case WRITE_12: + case WRITE_16: + return 0; + + default: + return -1; + } - return 0; + } } static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) @@ -354,7 +374,7 @@ static const struct ata_port_operations k2_sata_ops = { }; static const struct ata_port_info k2_port_info[] = { - /* board_svw4 */ + /* chip_svw4 */ { .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA, @@ -363,7 +383,7 @@ static const struct ata_port_info k2_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &k2_sata_ops, }, - /* board_svw8 */ + /* chip_svw8 */ { .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA | @@ -373,6 +393,24 @@ static const struct ata_port_info k2_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &k2_sata_ops, }, + /* chip_svw42 */ + { + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3, + .pio_mask = 0x1f, + .mwdma_mask = 0x07, + .udma_mask = ATA_UDMA6, + .port_ops = &k2_sata_ops, + }, + /* chip_svw43 */ + { + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_MMIO, + .pio_mask = 0x1f, + .mwdma_mask = 0x07, + .udma_mask = ATA_UDMA6, + .port_ops = &k2_sata_ops, + }, }; static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) @@ -402,7 +440,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en { &k2_port_info[ent->driver_data], NULL }; struct ata_host *host; void __iomem *mmio_base; - int n_ports, i, rc; + int n_ports, i, rc, bar_pos; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); @@ -416,6 +454,9 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en if (!host) return -ENOMEM; + bar_pos = 5; + if (ppi[0]->flags & K2_FLAG_BAR_POS_3) + bar_pos = 3; /* * If this driver happens to only be useful on Apple's K2, then * we should check that here as it has a normal Serverworks ID @@ -428,17 +469,23 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en * Check if we have resources mapped at all (second function may * have been disabled by firmware) */ - if (pci_resource_len(pdev, 5) == 0) + if (pci_resource_len(pdev, bar_pos) == 0) { + /* In IDE mode we need to pin the device to ensure that + pcim_release does not clear the busmaster bit in config + space, clearing causes busmaster DMA to fail on + ports 3 & 4 */ + pcim_pin_device(pdev); return -ENODEV; + } /* Request and iomap PCI regions */ - rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); + rc = pcim_iomap_regions(pdev, 1 << bar_pos, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); - mmio_base = host->iomap[5]; + mmio_base = host->iomap[bar_pos]; /* different controllers have different number of ports - currently 4 or 8 */ /* All ports are on the same function. Multi-function device is no @@ -483,11 +530,13 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en * controller * */ static const struct pci_device_id k2_sata_pci_tbl[] = { - { PCI_VDEVICE(SERVERWORKS, 0x0240), board_svw4 }, - { PCI_VDEVICE(SERVERWORKS, 0x0241), board_svw4 }, - { PCI_VDEVICE(SERVERWORKS, 0x0242), board_svw8 }, - { PCI_VDEVICE(SERVERWORKS, 0x024a), board_svw4 }, - { PCI_VDEVICE(SERVERWORKS, 0x024b), board_svw4 }, + { PCI_VDEVICE(SERVERWORKS, 0x0240), chip_svw4 }, + { PCI_VDEVICE(SERVERWORKS, 0x0241), chip_svw4 }, + { PCI_VDEVICE(SERVERWORKS, 0x0242), chip_svw8 }, + { PCI_VDEVICE(SERVERWORKS, 0x024a), chip_svw4 }, + { PCI_VDEVICE(SERVERWORKS, 0x024b), chip_svw4 }, + { PCI_VDEVICE(SERVERWORKS, 0x0410), chip_svw42 }, + { PCI_VDEVICE(SERVERWORKS, 0x0411), chip_svw43 }, { } }; diff --git a/drivers/base/core.c b/drivers/base/core.c index 9c0070b5bd3..7de543d1d0b 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -621,7 +621,8 @@ static struct kobject *get_device_parent(struct device *dev, static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) { /* see if we live in a "glue" directory */ - if (!dev->class || glue_dir->kset != &dev->class->class_dirs) + if (!glue_dir || !dev->class || + glue_dir->kset != &dev->class->class_dirs) return; kobject_put(glue_dir); @@ -770,17 +771,10 @@ int device_add(struct device *dev) struct class_interface *class_intf; int error; - error = pm_sleep_lock(); - if (error) { - dev_warn(dev, "Suspicious %s during suspend\n", __FUNCTION__); - dump_stack(); - return error; - } - dev = get_device(dev); if (!dev || !strlen(dev->bus_id)) { error = -EINVAL; - goto Error; + goto Done; } pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__); @@ -843,11 +837,9 @@ int device_add(struct device *dev) } Done: put_device(dev); - pm_sleep_unlock(); return error; BusError: device_pm_remove(dev); - dpm_sysfs_remove(dev); PMError: if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index ee9d1c8db0d..d887d5cb5be 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -48,7 +48,6 @@ */ LIST_HEAD(dpm_active); -static LIST_HEAD(dpm_locked); static LIST_HEAD(dpm_off); static LIST_HEAD(dpm_off_irq); static LIST_HEAD(dpm_destroy); @@ -81,28 +80,6 @@ void device_pm_add(struct device *dev) */ void device_pm_remove(struct device *dev) { - /* - * If this function is called during a suspend, it will be blocked, - * because we're holding the device's semaphore at that time, which may - * lead to a deadlock. In that case we want to print a warning. - * However, it may also be called by unregister_dropped_devices() with - * the device's semaphore released, in which case the warning should - * not be printed. - */ - if (down_trylock(&dev->sem)) { - if (down_read_trylock(&pm_sleep_rwsem)) { - /* No suspend in progress, wait on dev->sem */ - down(&dev->sem); - up_read(&pm_sleep_rwsem); - } else { - /* Suspend in progress, we may deadlock */ - dev_warn(dev, "Suspicious %s during suspend\n", - __FUNCTION__); - dump_stack(); - /* The user has been warned ... */ - down(&dev->sem); - } - } pr_debug("PM: Removing info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", kobject_name(&dev->kobj)); @@ -110,7 +87,6 @@ void device_pm_remove(struct device *dev) dpm_sysfs_remove(dev); list_del_init(&dev->power.entry); mutex_unlock(&dpm_list_mtx); - up(&dev->sem); } /** @@ -230,6 +206,8 @@ static int resume_device(struct device *dev) TRACE_DEVICE(dev); TRACE_RESUME(0); + down(&dev->sem); + if (dev->bus && dev->bus->resume) { dev_dbg(dev,"resuming\n"); error = dev->bus->resume(dev); @@ -245,6 +223,8 @@ static int resume_device(struct device *dev) error = dev->class->resume(dev); } + up(&dev->sem); + TRACE_RESUME(error); return error; } @@ -266,7 +246,7 @@ static void dpm_resume(void) struct list_head *entry = dpm_off.next; struct device *dev = to_device(entry); - list_move_tail(entry, &dpm_locked); + list_move_tail(entry, &dpm_active); mutex_unlock(&dpm_list_mtx); resume_device(dev); mutex_lock(&dpm_list_mtx); @@ -275,25 +255,6 @@ static void dpm_resume(void) } /** - * unlock_all_devices - Release each device's semaphore - * - * Go through the dpm_off list. Put each device on the dpm_active - * list and unlock it. - */ -static void unlock_all_devices(void) -{ - mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_locked)) { - struct list_head *entry = dpm_locked.prev; - struct device *dev = to_device(entry); - - list_move(entry, &dpm_active); - up(&dev->sem); - } - mutex_unlock(&dpm_list_mtx); -} - -/** * unregister_dropped_devices - Unregister devices scheduled for removal * * Unregister all devices on the dpm_destroy list. @@ -305,7 +266,6 @@ static void unregister_dropped_devices(void) struct list_head *entry = dpm_destroy.next; struct device *dev = to_device(entry); - up(&dev->sem); mutex_unlock(&dpm_list_mtx); /* This also removes the device from the list */ device_unregister(dev); @@ -324,7 +284,6 @@ void device_resume(void) { might_sleep(); dpm_resume(); - unlock_all_devices(); unregister_dropped_devices(); up_write(&pm_sleep_rwsem); } @@ -388,18 +347,15 @@ int device_power_down(pm_message_t state) struct list_head *entry = dpm_off.prev; struct device *dev = to_device(entry); - list_del_init(&dev->power.entry); error = suspend_device_late(dev, state); if (error) { printk(KERN_ERR "Could not power down device %s: " "error %d\n", kobject_name(&dev->kobj), error); - if (list_empty(&dev->power.entry)) - list_add(&dev->power.entry, &dpm_off); break; } - if (list_empty(&dev->power.entry)) - list_add(&dev->power.entry, &dpm_off_irq); + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &dpm_off_irq); } if (!error) @@ -419,6 +375,8 @@ static int suspend_device(struct device *dev, pm_message_t state) { int error = 0; + down(&dev->sem); + if (dev->power.power_state.event) { dev_dbg(dev, "PM: suspend %d-->%d\n", dev->power.power_state.event, state.event); @@ -441,6 +399,9 @@ static int suspend_device(struct device *dev, pm_message_t state) error = dev->bus->suspend(dev, state); suspend_report_result(dev->bus->suspend, error); } + + up(&dev->sem); + return error; } @@ -461,13 +422,13 @@ static int dpm_suspend(pm_message_t state) int error = 0; mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_locked)) { - struct list_head *entry = dpm_locked.prev; + while (!list_empty(&dpm_active)) { + struct list_head *entry = dpm_active.prev; struct device *dev = to_device(entry); - list_del_init(&dev->power.entry); mutex_unlock(&dpm_list_mtx); error = suspend_device(dev, state); + mutex_lock(&dpm_list_mtx); if (error) { printk(KERN_ERR "Could not suspend device %s: " "error %d%s\n", @@ -476,14 +437,10 @@ static int dpm_suspend(pm_message_t state) (error == -EAGAIN ? " (please convert to suspend_late)" : "")); - mutex_lock(&dpm_list_mtx); - if (list_empty(&dev->power.entry)) - list_add(&dev->power.entry, &dpm_locked); break; } - mutex_lock(&dpm_list_mtx); - if (list_empty(&dev->power.entry)) - list_add(&dev->power.entry, &dpm_off); + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &dpm_off); } mutex_unlock(&dpm_list_mtx); @@ -491,36 +448,6 @@ static int dpm_suspend(pm_message_t state) } /** - * lock_all_devices - Acquire every device's semaphore - * - * Go through the dpm_active list. Carefully lock each device's - * semaphore and put it in on the dpm_locked list. - */ -static void lock_all_devices(void) -{ - mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_active)) { - struct list_head *entry = dpm_active.next; - struct device *dev = to_device(entry); - - /* Required locking order is dev->sem first, - * then dpm_list_mutex. Hence this awkward code. - */ - get_device(dev); - mutex_unlock(&dpm_list_mtx); - down(&dev->sem); - mutex_lock(&dpm_list_mtx); - - if (list_empty(entry)) - up(&dev->sem); /* Device was removed */ - else - list_move_tail(entry, &dpm_locked); - put_device(dev); - } - mutex_unlock(&dpm_list_mtx); -} - -/** * device_suspend - Save state and stop all devices in system. * @state: new power management state * @@ -533,7 +460,6 @@ int device_suspend(pm_message_t state) might_sleep(); down_write(&pm_sleep_rwsem); - lock_all_devices(); error = dpm_suspend(state); if (error) device_resume(); diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c index f25e7c6b2d2..40bca48abc1 100644 --- a/drivers/base/transport_class.c +++ b/drivers/base/transport_class.c @@ -126,9 +126,7 @@ static int transport_setup_classdev(struct attribute_container *cont, } /** - * transport_setup_device - declare a new dev for transport class association - * but don't make it visible yet. - * + * transport_setup_device - declare a new dev for transport class association but don't make it visible yet. * @dev: the generic device representing the entity being added * * Usually, dev represents some component in the HBA system (either diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9715be3f248..55bd35c0f08 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -33,6 +33,7 @@ #include <linux/blkpg.h> #include <linux/timer.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/init.h> #include <linux/hdreg.h> #include <linux/spinlock.h> @@ -131,7 +132,6 @@ static struct board_type products[] = { /*define how many times we will try a command because of bus resets */ #define MAX_CMD_RETRIES 3 -#define READ_AHEAD 1024 #define MAX_CTLR 32 /* Originally cciss driver only supports 8 major numbers */ @@ -174,8 +174,6 @@ static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, static void fail_all_cmds(unsigned long ctlr); #ifdef CONFIG_PROC_FS -static int cciss_proc_get_info(char *buffer, char **start, off_t offset, - int length, int *eof, void *data); static void cciss_procinit(int i); #else static void cciss_procinit(int i) @@ -240,24 +238,46 @@ static inline CommandList_struct *removeQ(CommandList_struct **Qptr, */ #define ENG_GIG 1000000000 #define ENG_GIG_FACTOR (ENG_GIG/512) +#define ENGAGE_SCSI "engage scsi" static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", "UNKNOWN" }; static struct proc_dir_entry *proc_cciss; -static int cciss_proc_get_info(char *buffer, char **start, off_t offset, - int length, int *eof, void *data) +static void cciss_seq_show_header(struct seq_file *seq) { - off_t pos = 0; - off_t len = 0; - int size, i, ctlr; - ctlr_info_t *h = (ctlr_info_t *) data; - drive_info_struct *drv; - unsigned long flags; - sector_t vol_sz, vol_sz_frac; + ctlr_info_t *h = seq->private; + + seq_printf(seq, "%s: HP %s Controller\n" + "Board ID: 0x%08lx\n" + "Firmware Version: %c%c%c%c\n" + "IRQ: %d\n" + "Logical drives: %d\n" + "Current Q depth: %d\n" + "Current # commands on controller: %d\n" + "Max Q depth since init: %d\n" + "Max # commands on controller since init: %d\n" + "Max SG entries since init: %d\n", + h->devname, + h->product_name, + (unsigned long)h->board_id, + h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], + h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], + h->num_luns, + h->Qdepth, h->commands_outstanding, + h->maxQsinceinit, h->max_outstanding, h->maxSG); - ctlr = h->ctlr; +#ifdef CONFIG_CISS_SCSI_TAPE + cciss_seq_tape_report(seq, h->ctlr); +#endif /* CONFIG_CISS_SCSI_TAPE */ +} + +static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) +{ + ctlr_info_t *h = seq->private; + unsigned ctlr = h->ctlr; + unsigned long flags; /* prevent displaying bogus info during configuration * or deconfiguration of a logical volume @@ -265,115 +285,155 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset, spin_lock_irqsave(CCISS_LOCK(ctlr), flags); if (h->busy_configuring) { spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); - return -EBUSY; + return ERR_PTR(-EBUSY); } h->busy_configuring = 1; spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); - size = sprintf(buffer, "%s: HP %s Controller\n" - "Board ID: 0x%08lx\n" - "Firmware Version: %c%c%c%c\n" - "IRQ: %d\n" - "Logical drives: %d\n" - "Max sectors: %d\n" - "Current Q depth: %d\n" - "Current # commands on controller: %d\n" - "Max Q depth since init: %d\n" - "Max # commands on controller since init: %d\n" - "Max SG entries since init: %d\n\n", - h->devname, - h->product_name, - (unsigned long)h->board_id, - h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], - h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], - h->num_luns, - h->cciss_max_sectors, - h->Qdepth, h->commands_outstanding, - h->maxQsinceinit, h->max_outstanding, h->maxSG); - - pos += size; - len += size; - cciss_proc_tape_report(ctlr, buffer, &pos, &len); - for (i = 0; i <= h->highest_lun; i++) { - - drv = &h->drv[i]; - if (drv->heads == 0) - continue; + if (*pos == 0) + cciss_seq_show_header(seq); - vol_sz = drv->nr_blocks; - vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); - vol_sz_frac *= 100; - sector_div(vol_sz_frac, ENG_GIG_FACTOR); + return pos; +} + +static int cciss_seq_show(struct seq_file *seq, void *v) +{ + sector_t vol_sz, vol_sz_frac; + ctlr_info_t *h = seq->private; + unsigned ctlr = h->ctlr; + loff_t *pos = v; + drive_info_struct *drv = &h->drv[*pos]; + + if (*pos > h->highest_lun) + return 0; + + if (drv->heads == 0) + return 0; + + vol_sz = drv->nr_blocks; + vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); + vol_sz_frac *= 100; + sector_div(vol_sz_frac, ENG_GIG_FACTOR); + + if (drv->raid_level > 5) + drv->raid_level = RAID_UNKNOWN; + seq_printf(seq, "cciss/c%dd%d:" + "\t%4u.%02uGB\tRAID %s\n", + ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, + raid_label[drv->raid_level]); + return 0; +} + +static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ctlr_info_t *h = seq->private; + + if (*pos > h->highest_lun) + return NULL; + *pos += 1; + + return pos; +} + +static void cciss_seq_stop(struct seq_file *seq, void *v) +{ + ctlr_info_t *h = seq->private; + + /* Only reset h->busy_configuring if we succeeded in setting + * it during cciss_seq_start. */ + if (v == ERR_PTR(-EBUSY)) + return; - if (drv->raid_level > 5) - drv->raid_level = RAID_UNKNOWN; - size = sprintf(buffer + len, "cciss/c%dd%d:" - "\t%4u.%02uGB\tRAID %s\n", - ctlr, i, (int)vol_sz, (int)vol_sz_frac, - raid_label[drv->raid_level]); - pos += size; - len += size; - } - - *eof = 1; - *start = buffer + offset; - len -= offset; - if (len > length) - len = length; h->busy_configuring = 0; - return len; } -static int -cciss_proc_write(struct file *file, const char __user *buffer, - unsigned long count, void *data) +static struct seq_operations cciss_seq_ops = { + .start = cciss_seq_start, + .show = cciss_seq_show, + .next = cciss_seq_next, + .stop = cciss_seq_stop, +}; + +static int cciss_seq_open(struct inode *inode, struct file *file) { - unsigned char cmd[80]; - int len; -#ifdef CONFIG_CISS_SCSI_TAPE - ctlr_info_t *h = (ctlr_info_t *) data; - int rc; + int ret = seq_open(file, &cciss_seq_ops); + struct seq_file *seq = file->private_data; + + if (!ret) + seq->private = PDE(inode)->data; + + return ret; +} + +static ssize_t +cciss_proc_write(struct file *file, const char __user *buf, + size_t length, loff_t *ppos) +{ + int err; + char *buffer; + +#ifndef CONFIG_CISS_SCSI_TAPE + return -EINVAL; #endif - if (count > sizeof(cmd) - 1) + if (!buf || length > PAGE_SIZE - 1) return -EINVAL; - if (copy_from_user(cmd, buffer, count)) - return -EFAULT; - cmd[count] = '\0'; - len = strlen(cmd); // above 3 lines ensure safety - if (len && cmd[len - 1] == '\n') - cmd[--len] = '\0'; -# ifdef CONFIG_CISS_SCSI_TAPE - if (strcmp("engage scsi", cmd) == 0) { + + buffer = (char *)__get_free_page(GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + err = -EFAULT; + if (copy_from_user(buffer, buf, length)) + goto out; + buffer[length] = '\0'; + +#ifdef CONFIG_CISS_SCSI_TAPE + if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { + struct seq_file *seq = file->private_data; + ctlr_info_t *h = seq->private; + int rc; + rc = cciss_engage_scsi(h->ctlr); if (rc != 0) - return -rc; - return count; - } + err = -rc; + else + err = length; + } else +#endif /* CONFIG_CISS_SCSI_TAPE */ + err = -EINVAL; /* might be nice to have "disengage" too, but it's not safely possible. (only 1 module use count, lock issues.) */ -# endif - return -EINVAL; + +out: + free_page((unsigned long)buffer); + return err; } -/* - * Get us a file in /proc/cciss that says something about each controller. - * Create /proc/cciss if it doesn't exist yet. - */ +static struct file_operations cciss_proc_fops = { + .owner = THIS_MODULE, + .open = cciss_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, + .write = cciss_proc_write, +}; + static void __devinit cciss_procinit(int i) { struct proc_dir_entry *pde; - if (proc_cciss == NULL) { + if (proc_cciss == NULL) proc_cciss = proc_mkdir("cciss", proc_root_driver); - if (!proc_cciss) - return; - } + if (!proc_cciss) + return; + pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | + S_IROTH, proc_cciss, + &cciss_proc_fops); + if (!pde) + return; - pde = create_proc_read_entry(hba[i]->devname, - S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH, - proc_cciss, cciss_proc_get_info, hba[i]); - pde->write_proc = cciss_proc_write; + pde->data = hba[i]; } #endif /* CONFIG_PROC_FS */ @@ -1341,7 +1401,6 @@ geo_inq: disk->private_data = &h->drv[drv_index]; /* Set up queue information */ - disk->queue->backing_dev_info.ra_pages = READ_AHEAD; blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask); /* This is a hardware imposed limit. */ @@ -3434,7 +3493,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, } drv->queue = q; - q->backing_dev_info.ra_pages = READ_AHEAD; blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); /* This is a hardware imposed limit. */ diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 55178e9973a..45ac09300eb 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -1404,21 +1404,18 @@ cciss_engage_scsi(int ctlr) } static void -cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len) +cciss_seq_tape_report(struct seq_file *seq, int ctlr) { unsigned long flags; - int size; - - *pos = *pos -1; *len = *len - 1; // cut off the last trailing newline CPQ_TAPE_LOCK(ctlr, flags); - size = sprintf(buffer + *len, + seq_printf(seq, "Sequential access devices: %d\n\n", ccissscsi[ctlr].ndevices); CPQ_TAPE_UNLOCK(ctlr, flags); - *pos += size; *len += size; } + /* Need at least one of these error handlers to keep ../scsi/hosts.c from * complaining. Doing a host- or bus-reset can't do anything good here. * Despite what it might say in scsi_error.c, there may well be commands @@ -1498,6 +1495,5 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) #define cciss_scsi_setup(cntl_num) #define cciss_unregister_scsi(ctlr) #define cciss_register_scsi(ctlr) -#define cciss_proc_tape_report(ctlr, buffer, pos, len) #endif /* CONFIG_CISS_SCSI_TAPE */ diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 674cd66dcab..18feb1c7c33 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -849,7 +849,8 @@ static int pkt_flush_cache(struct pktcdvd_device *pd) /* * speed is given as the normal factor, e.g. 4 for 4x */ -static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed) +static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, + unsigned write_speed, unsigned read_speed) { struct packet_command cgc; struct request_sense sense; @@ -1776,7 +1777,8 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, return pkt_generic_packet(pd, &cgc); } -static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written) +static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, + long *last_written) { disc_information di; track_information ti; @@ -1813,7 +1815,7 @@ static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written) /* * write mode select package based on pd->settings */ -static int pkt_set_write_settings(struct pktcdvd_device *pd) +static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) { struct packet_command cgc; struct request_sense sense; @@ -1972,7 +1974,7 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) return 1; } -static int pkt_probe_settings(struct pktcdvd_device *pd) +static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) { struct packet_command cgc; unsigned char buf[12]; @@ -2071,7 +2073,8 @@ static int pkt_probe_settings(struct pktcdvd_device *pd) /* * enable/disable write caching on drive */ -static int pkt_write_caching(struct pktcdvd_device *pd, int set) +static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, + int set) { struct packet_command cgc; struct request_sense sense; @@ -2116,7 +2119,8 @@ static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) /* * Returns drive maximum write speed */ -static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed) +static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, + unsigned *write_speed) { struct packet_command cgc; struct request_sense sense; @@ -2177,7 +2181,8 @@ static char us_clv_to_speed[16] = { /* * reads the maximum media speed from ATIP */ -static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) +static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, + unsigned *speed) { struct packet_command cgc; struct request_sense sense; @@ -2249,7 +2254,7 @@ static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) } } -static int pkt_perform_opc(struct pktcdvd_device *pd) +static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) { struct packet_command cgc; struct request_sense sense; diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index db259e60289..12f5baea439 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -1152,8 +1152,8 @@ clean_up_and_return: /* This code is similar to that in open_for_data. The routine is called whenever an audio play operation is requested. */ -int check_for_audio_disc(struct cdrom_device_info * cdi, - struct cdrom_device_ops * cdo) +static int check_for_audio_disc(struct cdrom_device_info * cdi, + struct cdrom_device_ops * cdo) { int ret; tracktype tracks; diff --git a/drivers/char/defkeymap.c_shipped b/drivers/char/defkeymap.c_shipped index 0aa419a6176..d2208dfe3f6 100644 --- a/drivers/char/defkeymap.c_shipped +++ b/drivers/char/defkeymap.c_shipped @@ -223,40 +223,40 @@ char *func_table[MAX_NR_FUNC] = { }; struct kbdiacruc accent_table[MAX_DIACR] = { - {'`', 'A', '\300'}, {'`', 'a', '\340'}, - {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, - {'^', 'A', '\302'}, {'^', 'a', '\342'}, - {'~', 'A', '\303'}, {'~', 'a', '\343'}, - {'"', 'A', '\304'}, {'"', 'a', '\344'}, - {'O', 'A', '\305'}, {'o', 'a', '\345'}, - {'0', 'A', '\305'}, {'0', 'a', '\345'}, - {'A', 'A', '\305'}, {'a', 'a', '\345'}, - {'A', 'E', '\306'}, {'a', 'e', '\346'}, - {',', 'C', '\307'}, {',', 'c', '\347'}, - {'`', 'E', '\310'}, {'`', 'e', '\350'}, - {'\'', 'E', '\311'}, {'\'', 'e', '\351'}, - {'^', 'E', '\312'}, {'^', 'e', '\352'}, - {'"', 'E', '\313'}, {'"', 'e', '\353'}, - {'`', 'I', '\314'}, {'`', 'i', '\354'}, - {'\'', 'I', '\315'}, {'\'', 'i', '\355'}, - {'^', 'I', '\316'}, {'^', 'i', '\356'}, - {'"', 'I', '\317'}, {'"', 'i', '\357'}, - {'-', 'D', '\320'}, {'-', 'd', '\360'}, - {'~', 'N', '\321'}, {'~', 'n', '\361'}, - {'`', 'O', '\322'}, {'`', 'o', '\362'}, - {'\'', 'O', '\323'}, {'\'', 'o', '\363'}, - {'^', 'O', '\324'}, {'^', 'o', '\364'}, - {'~', 'O', '\325'}, {'~', 'o', '\365'}, - {'"', 'O', '\326'}, {'"', 'o', '\366'}, - {'/', 'O', '\330'}, {'/', 'o', '\370'}, - {'`', 'U', '\331'}, {'`', 'u', '\371'}, - {'\'', 'U', '\332'}, {'\'', 'u', '\372'}, - {'^', 'U', '\333'}, {'^', 'u', '\373'}, - {'"', 'U', '\334'}, {'"', 'u', '\374'}, - {'\'', 'Y', '\335'}, {'\'', 'y', '\375'}, - {'T', 'H', '\336'}, {'t', 'h', '\376'}, - {'s', 's', '\337'}, {'"', 'y', '\377'}, - {'s', 'z', '\337'}, {'i', 'j', '\377'}, + {'`', 'A', 0300}, {'`', 'a', 0340}, + {'\'', 'A', 0301}, {'\'', 'a', 0341}, + {'^', 'A', 0302}, {'^', 'a', 0342}, + {'~', 'A', 0303}, {'~', 'a', 0343}, + {'"', 'A', 0304}, {'"', 'a', 0344}, + {'O', 'A', 0305}, {'o', 'a', 0345}, + {'0', 'A', 0305}, {'0', 'a', 0345}, + {'A', 'A', 0305}, {'a', 'a', 0345}, + {'A', 'E', 0306}, {'a', 'e', 0346}, + {',', 'C', 0307}, {',', 'c', 0347}, + {'`', 'E', 0310}, {'`', 'e', 0350}, + {'\'', 'E', 0311}, {'\'', 'e', 0351}, + {'^', 'E', 0312}, {'^', 'e', 0352}, + {'"', 'E', 0313}, {'"', 'e', 0353}, + {'`', 'I', 0314}, {'`', 'i', 0354}, + {'\'', 'I', 0315}, {'\'', 'i', 0355}, + {'^', 'I', 0316}, {'^', 'i', 0356}, + {'"', 'I', 0317}, {'"', 'i', 0357}, + {'-', 'D', 0320}, {'-', 'd', 0360}, + {'~', 'N', 0321}, {'~', 'n', 0361}, + {'`', 'O', 0322}, {'`', 'o', 0362}, + {'\'', 'O', 0323}, {'\'', 'o', 0363}, + {'^', 'O', 0324}, {'^', 'o', 0364}, + {'~', 'O', 0325}, {'~', 'o', 0365}, + {'"', 'O', 0326}, {'"', 'o', 0366}, + {'/', 'O', 0330}, {'/', 'o', 0370}, + {'`', 'U', 0331}, {'`', 'u', 0371}, + {'\'', 'U', 0332}, {'\'', 'u', 0372}, + {'^', 'U', 0333}, {'^', 'u', 0373}, + {'"', 'U', 0334}, {'"', 'u', 0374}, + {'\'', 'Y', 0335}, {'\'', 'y', 0375}, + {'T', 'H', 0336}, {'t', 'h', 0376}, + {'s', 's', 0337}, {'"', 'y', 0377}, + {'s', 'z', 0337}, {'i', 'j', 0377}, }; unsigned int accent_table_size = 68; diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index 85d596a3c18..eba2883b630 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c @@ -1527,7 +1527,7 @@ static int __devinit reset_card(struct pci_dev *pdev, msleep(10); portcount = inw(base + 0x2); - if (!inw(base + 0xe) & 0x1 || (portcount != 0 && portcount != 4 && + if (!(inw(base + 0xe) & 0x1) || (portcount != 0 && portcount != 4 && portcount != 8 && portcount != 16)) { dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n", card + 1); diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c index ff35230058d..d793e68b3e0 100644 --- a/drivers/char/pcmcia/ipwireless/network.c +++ b/drivers/char/pcmcia/ipwireless/network.c @@ -377,13 +377,16 @@ void ipwireless_network_packet_received(struct ipw_network *network, for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) { struct ipw_tty *tty = network->associated_ttys[channel_idx][i]; + if (!tty) + continue; + /* * If it's associated with a tty (other than the RAS channel * when we're online), then send the data to that tty. The RAS * channel's data is handled above - it always goes through * ppp_generic. */ - if (tty && channel_idx == IPW_CHANNEL_RAS + if (channel_idx == IPW_CHANNEL_RAS && (network->ras_control_lines & IPW_CONTROL_LINE_DCD) != 0 && ipwireless_tty_is_modem(tty)) { diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 78b151c4d20..5c3142b6f1f 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -110,8 +110,8 @@ static int rtc_has_irq = 1; #define hpet_set_rtc_irq_bit(arg) 0 #define hpet_rtc_timer_init() do { } while (0) #define hpet_rtc_dropped_irq() 0 -#define hpet_register_irq_handler(h) 0 -#define hpet_unregister_irq_handler(h) 0 +#define hpet_register_irq_handler(h) ({ 0; }) +#define hpet_unregister_irq_handler(h) ({ 0; }) #ifdef RTC_IRQ static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) { diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index c0e08c7bca2..5ff83df67b4 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c @@ -2109,7 +2109,6 @@ static void sx_throttle(struct tty_struct * tty) sx_out(bp, CD186x_CAR, port_No(port)); spin_unlock_irqrestore(&bp->lock, flags); if (I_IXOFF(tty)) { - spin_unlock_irqrestore(&bp->lock, flags); sx_wait_CCR(bp); spin_lock_irqsave(&bp->lock, flags); sx_out(bp, CD186x_CCR, CCR_SSCH2); diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 367be917506..9b58b894f82 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -702,6 +702,7 @@ void redraw_screen(struct vc_data *vc, int is_switch) if (is_switch) { set_leds(); compute_shiftstate(); + notify_update(vc); } } diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c index dfea2bde162..f577daedb63 100644 --- a/drivers/char/xilinx_hwicap/buffer_icap.c +++ b/drivers/char/xilinx_hwicap/buffer_icap.c @@ -73,8 +73,8 @@ #define XHI_BUFFER_START 0 /** - * buffer_icap_get_status: Get the contents of the status register. - * @parameter base_address: is the base address of the device + * buffer_icap_get_status - Get the contents of the status register. + * @base_address: is the base address of the device * * The status register contains the ICAP status and the done bit. * @@ -94,9 +94,9 @@ static inline u32 buffer_icap_get_status(void __iomem *base_address) } /** - * buffer_icap_get_bram: Reads data from the storage buffer bram. - * @parameter base_address: contains the base address of the component. - * @parameter offset: The word offset from which the data should be read. + * buffer_icap_get_bram - Reads data from the storage buffer bram. + * @base_address: contains the base address of the component. + * @offset: The word offset from which the data should be read. * * A bram is used as a configuration memory cache. One frame of data can * be stored in this "storage buffer". @@ -108,8 +108,8 @@ static inline u32 buffer_icap_get_bram(void __iomem *base_address, } /** - * buffer_icap_busy: Return true if the icap device is busy - * @parameter base_address: is the base address of the device + * buffer_icap_busy - Return true if the icap device is busy + * @base_address: is the base address of the device * * The queries the low order bit of the status register, which * indicates whether the current configuration or readback operation @@ -121,8 +121,8 @@ static inline bool buffer_icap_busy(void __iomem *base_address) } /** - * buffer_icap_busy: Return true if the icap device is not busy - * @parameter base_address: is the base address of the device + * buffer_icap_busy - Return true if the icap device is not busy + * @base_address: is the base address of the device * * The queries the low order bit of the status register, which * indicates whether the current configuration or readback operation @@ -134,9 +134,9 @@ static inline bool buffer_icap_done(void __iomem *base_address) } /** - * buffer_icap_set_size: Set the size register. - * @parameter base_address: is the base address of the device - * @parameter data: The size in bytes. + * buffer_icap_set_size - Set the size register. + * @base_address: is the base address of the device + * @data: The size in bytes. * * The size register holds the number of 8 bit bytes to transfer between * bram and the icap (or icap to bram). @@ -148,9 +148,9 @@ static inline void buffer_icap_set_size(void __iomem *base_address, } /** - * buffer_icap_mSetoffsetReg: Set the bram offset register. - * @parameter base_address: contains the base address of the device. - * @parameter data: is the value to be written to the data register. + * buffer_icap_set_offset - Set the bram offset register. + * @base_address: contains the base address of the device. + * @data: is the value to be written to the data register. * * The bram offset register holds the starting bram address to transfer * data from during configuration or write data to during readback. @@ -162,9 +162,9 @@ static inline void buffer_icap_set_offset(void __iomem *base_address, } /** - * buffer_icap_set_rnc: Set the RNC (Readback not Configure) register. - * @parameter base_address: contains the base address of the device. - * @parameter data: is the value to be written to the data register. + * buffer_icap_set_rnc - Set the RNC (Readback not Configure) register. + * @base_address: contains the base address of the device. + * @data: is the value to be written to the data register. * * The RNC register determines the direction of the data transfer. It * controls whether a configuration or readback take place. Writing to @@ -178,10 +178,10 @@ static inline void buffer_icap_set_rnc(void __iomem *base_address, } /** - * buffer_icap_set_bram: Write data to the storage buffer bram. - * @parameter base_address: contains the base address of the component. - * @parameter offset: The word offset at which the data should be written. - * @parameter data: The value to be written to the bram offset. + * buffer_icap_set_bram - Write data to the storage buffer bram. + * @base_address: contains the base address of the component. + * @offset: The word offset at which the data should be written. + * @data: The value to be written to the bram offset. * * A bram is used as a configuration memory cache. One frame of data can * be stored in this "storage buffer". @@ -193,10 +193,10 @@ static inline void buffer_icap_set_bram(void __iomem *base_address, } /** - * buffer_icap_device_read: Transfer bytes from ICAP to the storage buffer. - * @parameter drvdata: a pointer to the drvdata. - * @parameter offset: The storage buffer start address. - * @parameter count: The number of words (32 bit) to read from the + * buffer_icap_device_read - Transfer bytes from ICAP to the storage buffer. + * @drvdata: a pointer to the drvdata. + * @offset: The storage buffer start address. + * @count: The number of words (32 bit) to read from the * device (ICAP). **/ static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, @@ -227,10 +227,10 @@ static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, }; /** - * buffer_icap_device_write: Transfer bytes from ICAP to the storage buffer. - * @parameter drvdata: a pointer to the drvdata. - * @parameter offset: The storage buffer start address. - * @parameter count: The number of words (32 bit) to read from the + * buffer_icap_device_write - Transfer bytes from ICAP to the storage buffer. + * @drvdata: a pointer to the drvdata. + * @offset: The storage buffer start address. + * @count: The number of words (32 bit) to read from the * device (ICAP). **/ static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, @@ -261,8 +261,8 @@ static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, }; /** - * buffer_icap_reset: Reset the logic of the icap device. - * @parameter drvdata: a pointer to the drvdata. + * buffer_icap_reset - Reset the logic of the icap device. + * @drvdata: a pointer to the drvdata. * * Writing to the status register resets the ICAP logic in an internal * version of the core. For the version of the core published in EDK, @@ -274,10 +274,10 @@ void buffer_icap_reset(struct hwicap_drvdata *drvdata) } /** - * buffer_icap_set_configuration: Load a partial bitstream from system memory. - * @parameter drvdata: a pointer to the drvdata. - * @parameter data: Kernel address of the partial bitstream. - * @parameter size: the size of the partial bitstream in 32 bit words. + * buffer_icap_set_configuration - Load a partial bitstream from system memory. + * @drvdata: a pointer to the drvdata. + * @data: Kernel address of the partial bitstream. + * @size: the size of the partial bitstream in 32 bit words. **/ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, u32 size) @@ -333,10 +333,10 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, }; /** - * buffer_icap_get_configuration: Read configuration data from the device. - * @parameter drvdata: a pointer to the drvdata. - * @parameter data: Address of the data representing the partial bitstream - * @parameter size: the size of the partial bitstream in 32 bit words. + * buffer_icap_get_configuration - Read configuration data from the device. + * @drvdata: a pointer to the drvdata. + * @data: Address of the data representing the partial bitstream + * @size: the size of the partial bitstream in 32 bit words. **/ int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, u32 size) diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c index 0988314694a..6f45dbd4712 100644 --- a/drivers/char/xilinx_hwicap/fifo_icap.c +++ b/drivers/char/xilinx_hwicap/fifo_icap.c @@ -94,9 +94,9 @@ /** - * fifo_icap_fifo_write: Write data to the write FIFO. - * @parameter drvdata: a pointer to the drvdata. - * @parameter data: the 32-bit value to be written to the FIFO. + * fifo_icap_fifo_write - Write data to the write FIFO. + * @drvdata: a pointer to the drvdata. + * @data: the 32-bit value to be written to the FIFO. * * This function will silently fail if the fifo is full. **/ @@ -108,8 +108,8 @@ static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata, } /** - * fifo_icap_fifo_read: Read data from the Read FIFO. - * @parameter drvdata: a pointer to the drvdata. + * fifo_icap_fifo_read - Read data from the Read FIFO. + * @drvdata: a pointer to the drvdata. * * This function will silently fail if the fifo is empty. **/ @@ -121,9 +121,9 @@ static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata) } /** - * fifo_icap_set_read_size: Set the the size register. - * @parameter drvdata: a pointer to the drvdata. - * @parameter data: the size of the following read transaction, in words. + * fifo_icap_set_read_size - Set the the size register. + * @drvdata: a pointer to the drvdata. + * @data: the size of the following read transaction, in words. **/ static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, u32 data) @@ -132,8 +132,8 @@ static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, } /** - * fifo_icap_start_config: Initiate a configuration (write) to the device. - * @parameter drvdata: a pointer to the drvdata. + * fifo_icap_start_config - Initiate a configuration (write) to the device. + * @drvdata: a pointer to the drvdata. **/ static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) { @@ -142,8 +142,8 @@ static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) } /** - * fifo_icap_start_readback: Initiate a readback from the device. - * @parameter drvdata: a pointer to the drvdata. + * fifo_icap_start_readback - Initiate a readback from the device. + * @drvdata: a pointer to the drvdata. **/ static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) { @@ -152,8 +152,8 @@ static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) } /** - * fifo_icap_busy: Return true if the ICAP is still processing a transaction. - * @parameter drvdata: a pointer to the drvdata. + * fifo_icap_busy - Return true if the ICAP is still processing a transaction. + * @drvdata: a pointer to the drvdata. **/ static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) { @@ -163,8 +163,8 @@ static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) } /** - * fifo_icap_write_fifo_vacancy: Query the write fifo available space. - * @parameter drvdata: a pointer to the drvdata. + * fifo_icap_write_fifo_vacancy - Query the write fifo available space. + * @drvdata: a pointer to the drvdata. * * Return the number of words that can be safely pushed into the write fifo. **/ @@ -175,8 +175,8 @@ static inline u32 fifo_icap_write_fifo_vacancy( } /** - * fifo_icap_read_fifo_occupancy: Query the read fifo available data. - * @parameter drvdata: a pointer to the drvdata. + * fifo_icap_read_fifo_occupancy - Query the read fifo available data. + * @drvdata: a pointer to the drvdata. * * Return the number of words that can be safely read from the read fifo. **/ @@ -187,11 +187,11 @@ static inline u32 fifo_icap_read_fifo_occupancy( } /** - * fifo_icap_set_configuration: Send configuration data to the ICAP. - * @parameter drvdata: a pointer to the drvdata. - * @parameter frame_buffer: a pointer to the data to be written to the + * fifo_icap_set_configuration - Send configuration data to the ICAP. + * @drvdata: a pointer to the drvdata. + * @frame_buffer: a pointer to the data to be written to the * ICAP device. - * @parameter num_words: the number of words (32 bit) to write to the ICAP + * @num_words: the number of words (32 bit) to write to the ICAP * device. * This function writes the given user data to the Write FIFO in @@ -266,10 +266,10 @@ int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata, } /** - * fifo_icap_get_configuration: Read configuration data from the device. - * @parameter drvdata: a pointer to the drvdata. - * @parameter data: Address of the data representing the partial bitstream - * @parameter size: the size of the partial bitstream in 32 bit words. + * fifo_icap_get_configuration - Read configuration data from the device. + * @drvdata: a pointer to the drvdata. + * @data: Address of the data representing the partial bitstream + * @size: the size of the partial bitstream in 32 bit words. * * This function reads the specified number of words from the ICAP device in * the polled mode. @@ -335,8 +335,8 @@ int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata, } /** - * buffer_icap_reset: Reset the logic of the icap device. - * @parameter drvdata: a pointer to the drvdata. + * buffer_icap_reset - Reset the logic of the icap device. + * @drvdata: a pointer to the drvdata. * * This function forces the software reset of the complete HWICAP device. * All the registers will return to the default value and the FIFO is also @@ -360,8 +360,8 @@ void fifo_icap_reset(struct hwicap_drvdata *drvdata) } /** - * fifo_icap_flush_fifo: This function flushes the FIFOs in the device. - * @parameter drvdata: a pointer to the drvdata. + * fifo_icap_flush_fifo - This function flushes the FIFOs in the device. + * @drvdata: a pointer to the drvdata. */ void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) { diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 24f6aef0fd3..2284fa2a5a5 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -84,7 +84,7 @@ #include <linux/init.h> #include <linux/poll.h> #include <linux/proc_fs.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <linux/sysctl.h> #include <linux/version.h> #include <linux/fs.h> @@ -119,6 +119,7 @@ module_param(xhwicap_minor, int, S_IRUGO); /* An array, which is set to true when the device is registered. */ static bool probed_devices[HWICAP_DEVICES]; +static struct mutex icap_sem; static struct class *icap_class; @@ -199,14 +200,14 @@ static const struct config_registers v5_config_registers = { }; /** - * hwicap_command_desync: Send a DESYNC command to the ICAP port. - * @parameter drvdata: a pointer to the drvdata. + * hwicap_command_desync - Send a DESYNC command to the ICAP port. + * @drvdata: a pointer to the drvdata. * * This command desynchronizes the ICAP After this command, a * bitstream containing a NULL packet, followed by a SYNCH packet is * required before the ICAP will recognize commands. */ -int hwicap_command_desync(struct hwicap_drvdata *drvdata) +static int hwicap_command_desync(struct hwicap_drvdata *drvdata) { u32 buffer[4]; u32 index = 0; @@ -228,51 +229,18 @@ int hwicap_command_desync(struct hwicap_drvdata *drvdata) } /** - * hwicap_command_capture: Send a CAPTURE command to the ICAP port. - * @parameter drvdata: a pointer to the drvdata. - * - * This command captures all of the flip flop states so they will be - * available during readback. One can use this command instead of - * enabling the CAPTURE block in the design. - */ -int hwicap_command_capture(struct hwicap_drvdata *drvdata) -{ - u32 buffer[7]; - u32 index = 0; - - /* - * Create the data to be written to the ICAP. - */ - buffer[index++] = XHI_DUMMY_PACKET; - buffer[index++] = XHI_SYNC_PACKET; - buffer[index++] = XHI_NOOP_PACKET; - buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1; - buffer[index++] = XHI_CMD_GCAPTURE; - buffer[index++] = XHI_DUMMY_PACKET; - buffer[index++] = XHI_DUMMY_PACKET; - - /* - * Write the data to the FIFO and intiate the transfer of data - * present in the FIFO to the ICAP device. - */ - return drvdata->config->set_configuration(drvdata, - &buffer[0], index); - -} - -/** - * hwicap_get_configuration_register: Query a configuration register. - * @parameter drvdata: a pointer to the drvdata. - * @parameter reg: a constant which represents the configuration + * hwicap_get_configuration_register - Query a configuration register. + * @drvdata: a pointer to the drvdata. + * @reg: a constant which represents the configuration * register value to be returned. * Examples: XHI_IDCODE, XHI_FLR. - * @parameter RegData: returns the value of the register. + * @reg_data: returns the value of the register. * * Sends a query packet to the ICAP and then receives the response. * The icap is left in Synched state. */ -int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, - u32 reg, u32 *RegData) +static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, + u32 reg, u32 *reg_data) { int status; u32 buffer[6]; @@ -300,14 +268,14 @@ int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, /* * Read the configuration register */ - status = drvdata->config->get_configuration(drvdata, RegData, 1); + status = drvdata->config->get_configuration(drvdata, reg_data, 1); if (status) return status; return 0; } -int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) +static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) { int status; u32 idcode; @@ -344,7 +312,7 @@ int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) } static ssize_t -hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) +hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct hwicap_drvdata *drvdata = file->private_data; ssize_t bytes_to_read = 0; @@ -353,8 +321,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) u32 bytes_remaining; int status; - if (down_interruptible(&drvdata->sem)) - return -ERESTARTSYS; + status = mutex_lock_interruptible(&drvdata->sem); + if (status) + return status; if (drvdata->read_buffer_in_use) { /* If there are leftover bytes in the buffer, just */ @@ -370,8 +339,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) goto error; } drvdata->read_buffer_in_use -= bytes_to_read; - memcpy(drvdata->read_buffer + bytes_to_read, - drvdata->read_buffer, 4 - bytes_to_read); + memmove(drvdata->read_buffer, + drvdata->read_buffer + bytes_to_read, + 4 - bytes_to_read); } else { /* Get new data from the ICAP, and return was was requested. */ kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); @@ -414,18 +384,20 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) status = -EFAULT; goto error; } - memcpy(kbuf, drvdata->read_buffer, bytes_remaining); + memcpy(drvdata->read_buffer, + kbuf, + bytes_remaining); drvdata->read_buffer_in_use = bytes_remaining; free_page((unsigned long)kbuf); } status = bytes_to_read; error: - up(&drvdata->sem); + mutex_unlock(&drvdata->sem); return status; } static ssize_t -hwicap_write(struct file *file, const char *buf, +hwicap_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct hwicap_drvdata *drvdata = file->private_data; @@ -435,8 +407,9 @@ hwicap_write(struct file *file, const char *buf, ssize_t len; ssize_t status; - if (down_interruptible(&drvdata->sem)) - return -ERESTARTSYS; + status = mutex_lock_interruptible(&drvdata->sem); + if (status) + return status; left += drvdata->write_buffer_in_use; @@ -465,7 +438,7 @@ hwicap_write(struct file *file, const char *buf, memcpy(kbuf, drvdata->write_buffer, drvdata->write_buffer_in_use); if (copy_from_user( - (((char *)kbuf) + (drvdata->write_buffer_in_use)), + (((char *)kbuf) + drvdata->write_buffer_in_use), buf + written, len - (drvdata->write_buffer_in_use))) { free_page((unsigned long)kbuf); @@ -508,7 +481,7 @@ hwicap_write(struct file *file, const char *buf, free_page((unsigned long)kbuf); status = written; error: - up(&drvdata->sem); + mutex_unlock(&drvdata->sem); return status; } @@ -519,8 +492,9 @@ static int hwicap_open(struct inode *inode, struct file *file) drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); - if (down_interruptible(&drvdata->sem)) - return -ERESTARTSYS; + status = mutex_lock_interruptible(&drvdata->sem); + if (status) + return status; if (drvdata->is_open) { status = -EBUSY; @@ -539,7 +513,7 @@ static int hwicap_open(struct inode *inode, struct file *file) drvdata->is_open = 1; error: - up(&drvdata->sem); + mutex_unlock(&drvdata->sem); return status; } @@ -549,8 +523,7 @@ static int hwicap_release(struct inode *inode, struct file *file) int i; int status = 0; - if (down_interruptible(&drvdata->sem)) - return -ERESTARTSYS; + mutex_lock(&drvdata->sem); if (drvdata->write_buffer_in_use) { /* Flush write buffer. */ @@ -569,7 +542,7 @@ static int hwicap_release(struct inode *inode, struct file *file) error: drvdata->is_open = 0; - up(&drvdata->sem); + mutex_unlock(&drvdata->sem); return status; } @@ -592,31 +565,36 @@ static int __devinit hwicap_setup(struct device *dev, int id, dev_info(dev, "Xilinx icap port driver\n"); + mutex_lock(&icap_sem); + if (id < 0) { for (id = 0; id < HWICAP_DEVICES; id++) if (!probed_devices[id]) break; } if (id < 0 || id >= HWICAP_DEVICES) { + mutex_unlock(&icap_sem); dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); return -EINVAL; } if (probed_devices[id]) { + mutex_unlock(&icap_sem); dev_err(dev, "cannot assign to %s%i; it is already in use\n", DRIVER_NAME, id); return -EBUSY; } probed_devices[id] = 1; + mutex_unlock(&icap_sem); devt = MKDEV(xhwicap_major, xhwicap_minor + id); - drvdata = kmalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); + drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); if (!drvdata) { dev_err(dev, "Couldn't allocate device private record\n"); - return -ENOMEM; + retval = -ENOMEM; + goto failed0; } - memset((void *)drvdata, 0, sizeof(struct hwicap_drvdata)); dev_set_drvdata(dev, (void *)drvdata); if (!regs_res) { @@ -648,7 +626,7 @@ static int __devinit hwicap_setup(struct device *dev, int id, drvdata->config = config; drvdata->config_regs = config_regs; - init_MUTEX(&drvdata->sem); + mutex_init(&drvdata->sem); drvdata->is_open = 0; dev_info(dev, "ioremap %lx to %p with size %x\n", @@ -663,7 +641,7 @@ static int __devinit hwicap_setup(struct device *dev, int id, goto failed3; } /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */ - class_device_create(icap_class, NULL, devt, NULL, DRIVER_NAME); + device_create(icap_class, dev, devt, "%s%d", DRIVER_NAME, id); return 0; /* success */ failed3: @@ -675,6 +653,11 @@ static int __devinit hwicap_setup(struct device *dev, int id, failed1: kfree(drvdata); + failed0: + mutex_lock(&icap_sem); + probed_devices[id] = 0; + mutex_unlock(&icap_sem); + return retval; } @@ -699,14 +682,16 @@ static int __devexit hwicap_remove(struct device *dev) if (!drvdata) return 0; - class_device_destroy(icap_class, drvdata->devt); + device_destroy(icap_class, drvdata->devt); cdev_del(&drvdata->cdev); iounmap(drvdata->base_address); release_mem_region(drvdata->mem_start, drvdata->mem_size); kfree(drvdata); dev_set_drvdata(dev, NULL); - probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0; + mutex_lock(&icap_sem); + probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0; + mutex_unlock(&icap_sem); return 0; /* success */ } @@ -821,28 +806,29 @@ static struct of_platform_driver hwicap_of_driver = { }; /* Registration helpers to keep the number of #ifdefs to a minimum */ -static inline int __devinit hwicap_of_register(void) +static inline int __init hwicap_of_register(void) { pr_debug("hwicap: calling of_register_platform_driver()\n"); return of_register_platform_driver(&hwicap_of_driver); } -static inline void __devexit hwicap_of_unregister(void) +static inline void __exit hwicap_of_unregister(void) { of_unregister_platform_driver(&hwicap_of_driver); } #else /* CONFIG_OF */ /* CONFIG_OF not enabled; do nothing helpers */ -static inline int __devinit hwicap_of_register(void) { return 0; } -static inline void __devexit hwicap_of_unregister(void) { } +static inline int __init hwicap_of_register(void) { return 0; } +static inline void __exit hwicap_of_unregister(void) { } #endif /* CONFIG_OF */ -static int __devinit hwicap_module_init(void) +static int __init hwicap_module_init(void) { dev_t devt; int retval; icap_class = class_create(THIS_MODULE, "xilinx_config"); + mutex_init(&icap_sem); if (xhwicap_major) { devt = MKDEV(xhwicap_major, xhwicap_minor); @@ -883,7 +869,7 @@ static int __devinit hwicap_module_init(void) return retval; } -static void __devexit hwicap_module_cleanup(void) +static void __exit hwicap_module_cleanup(void) { dev_t devt = MKDEV(xhwicap_major, xhwicap_minor); diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h index ae771cac162..405fee7e189 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h @@ -48,9 +48,9 @@ struct hwicap_drvdata { u8 write_buffer[4]; u32 read_buffer_in_use; /* Always in [0,3] */ u8 read_buffer[4]; - u32 mem_start; /* phys. address of the control registers */ - u32 mem_end; /* phys. address of the control registers */ - u32 mem_size; + resource_size_t mem_start;/* phys. address of the control registers */ + resource_size_t mem_end; /* phys. address of the control registers */ + resource_size_t mem_size; void __iomem *base_address;/* virt. address of the control registers */ struct device *dev; @@ -61,7 +61,7 @@ struct hwicap_drvdata { const struct config_registers *config_regs; void *private_data; bool is_open; - struct semaphore sem; + struct mutex sem; }; struct hwicap_driver_config { @@ -164,29 +164,29 @@ struct config_registers { #define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL /** - * hwicap_type_1_read: Generates a Type 1 read packet header. - * @parameter: Register is the address of the register to be read back. + * hwicap_type_1_read - Generates a Type 1 read packet header. + * @reg: is the address of the register to be read back. * * Generates a Type 1 read packet header, which is used to indirectly * read registers in the configuration logic. This packet must then * be sent through the icap device, and a return packet received with * the information. **/ -static inline u32 hwicap_type_1_read(u32 Register) +static inline u32 hwicap_type_1_read(u32 reg) { return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | - (Register << XHI_REGISTER_SHIFT) | + (reg << XHI_REGISTER_SHIFT) | (XHI_OP_READ << XHI_OP_SHIFT); } /** - * hwicap_type_1_write: Generates a Type 1 write packet header - * @parameter: Register is the address of the register to be read back. + * hwicap_type_1_write - Generates a Type 1 write packet header + * @reg: is the address of the register to be read back. **/ -static inline u32 hwicap_type_1_write(u32 Register) +static inline u32 hwicap_type_1_write(u32 reg) { return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | - (Register << XHI_REGISTER_SHIFT) | + (reg << XHI_REGISTER_SHIFT) | (XHI_OP_WRITE << XHI_OP_SHIFT); } diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a703deffb79..27340a7b19d 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -4,7 +4,7 @@ menuconfig DMADEVICES bool "DMA Engine support" - depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX + depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC depends on !HIGHMEM64G help DMA engines can do asynchronous data transfers without @@ -37,6 +37,23 @@ config INTEL_IOP_ADMA help Enable support for the Intel(R) IOP Series RAID engines. +config FSL_DMA + bool "Freescale MPC85xx/MPC83xx DMA support" + depends on PPC + select DMA_ENGINE + ---help--- + Enable support for the Freescale DMA engine. Now, it support + MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. + The MPC8349, MPC8360 is also supported. + +config FSL_DMA_SELFTEST + bool "Enable the self test for each DMA channel" + depends on FSL_DMA + default y + ---help--- + Enable the self test for each DMA channel. A self test will be + performed after the channel probed to ensure the DMA works well. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index b152cd84e12..c8036d94590 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o +obj-$(CONFIG_FSL_DMA) += fsldma.o diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c new file mode 100644 index 00000000000..cc9a68158d9 --- /dev/null +++ b/drivers/dma/fsldma.c @@ -0,0 +1,1067 @@ +/* + * Freescale MPC85xx, MPC83xx DMA Engine support + * + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: + * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 + * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 + * + * Description: + * DMA engine driver for Freescale MPC8540 DMA controller, which is + * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. + * The support for MPC8349 DMA contorller is also added. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/dmaengine.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/of_platform.h> + +#include "fsldma.h" + +static void dma_init(struct fsl_dma_chan *fsl_chan) +{ + /* Reset the channel */ + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); + + switch (fsl_chan->feature & FSL_DMA_IP_MASK) { + case FSL_DMA_IP_85XX: + /* Set the channel to below modes: + * EIE - Error interrupt enable + * EOSIE - End of segments interrupt enable (basic mode) + * EOLNIE - End of links interrupt enable + */ + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE + | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); + break; + case FSL_DMA_IP_83XX: + /* Set the channel to below modes: + * EOTIE - End-of-transfer interrupt enable + */ + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, + 32); + break; + } + +} + +static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val) +{ + DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); +} + +static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan) +{ + return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); +} + +static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, + struct fsl_dma_ld_hw *hw, u32 count) +{ + hw->count = CPU_TO_DMA(fsl_chan, count, 32); +} + +static void set_desc_src(struct fsl_dma_chan *fsl_chan, + struct fsl_dma_ld_hw *hw, dma_addr_t src) +{ + u64 snoop_bits; + + snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) + ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; + hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); +} + +static void set_desc_dest(struct fsl_dma_chan *fsl_chan, + struct fsl_dma_ld_hw *hw, dma_addr_t dest) +{ + u64 snoop_bits; + + snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) + ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; + hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); +} + +static void set_desc_next(struct fsl_dma_chan *fsl_chan, + struct fsl_dma_ld_hw *hw, dma_addr_t next) +{ + u64 snoop_bits; + + snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) + ? FSL_DMA_SNEN : 0; + hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); +} + +static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) +{ + DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); +} + +static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) +{ + return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; +} + +static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) +{ + DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); +} + +static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) +{ + return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); +} + +static int dma_is_idle(struct fsl_dma_chan *fsl_chan) +{ + u32 sr = get_sr(fsl_chan); + return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); +} + +static void dma_start(struct fsl_dma_chan *fsl_chan) +{ + u32 mr_set = 0;; + + if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { + DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); + mr_set |= FSL_DMA_MR_EMP_EN; + } else + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) + & ~FSL_DMA_MR_EMP_EN, 32); + + if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) + mr_set |= FSL_DMA_MR_EMS_EN; + else + mr_set |= FSL_DMA_MR_CS; + + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) + | mr_set, 32); +} + +static void dma_halt(struct fsl_dma_chan *fsl_chan) +{ + int i = 0; + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, + 32); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS + | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); + + while (!dma_is_idle(fsl_chan) && (i++ < 100)) + udelay(10); + if (i >= 100 && !dma_is_idle(fsl_chan)) + dev_err(fsl_chan->dev, "DMA halt timeout!\n"); +} + +static void set_ld_eol(struct fsl_dma_chan *fsl_chan, + struct fsl_desc_sw *desc) +{ + desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, + DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, + 64); +} + +static void append_ld_queue(struct fsl_dma_chan *fsl_chan, + struct fsl_desc_sw *new_desc) +{ + struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); + + if (list_empty(&fsl_chan->ld_queue)) + return; + + /* Link to the new descriptor physical address and + * Enable End-of-segment interrupt for + * the last link descriptor. + * (the previous node's next link descriptor) + * + * For FSL_DMA_IP_83xx, the snoop enable bit need be set. + */ + queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, + new_desc->async_tx.phys | FSL_DMA_EOSIE | + (((fsl_chan->feature & FSL_DMA_IP_MASK) + == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); +} + +/** + * fsl_chan_set_src_loop_size - Set source address hold transfer size + * @fsl_chan : Freescale DMA channel + * @size : Address loop size, 0 for disable loop + * + * The set source address hold transfer size. The source + * address hold or loop transfer size is when the DMA transfer + * data from source address (SA), if the loop size is 4, the DMA will + * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, + * SA + 1 ... and so on. + */ +static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) +{ + switch (size) { + case 0: + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & + (~FSL_DMA_MR_SAHE), 32); + break; + case 1: + case 2: + case 4: + case 8: + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | + FSL_DMA_MR_SAHE | (__ilog2(size) << 14), + 32); + break; + } +} + +/** + * fsl_chan_set_dest_loop_size - Set destination address hold transfer size + * @fsl_chan : Freescale DMA channel + * @size : Address loop size, 0 for disable loop + * + * The set destination address hold transfer size. The destination + * address hold or loop transfer size is when the DMA transfer + * data to destination address (TA), if the loop size is 4, the DMA will + * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, + * TA + 1 ... and so on. + */ +static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) +{ + switch (size) { + case 0: + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & + (~FSL_DMA_MR_DAHE), 32); + break; + case 1: + case 2: + case 4: + case 8: + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | + FSL_DMA_MR_DAHE | (__ilog2(size) << 16), + 32); + break; + } +} + +/** + * fsl_chan_toggle_ext_pause - Toggle channel external pause status + * @fsl_chan : Freescale DMA channel + * @size : Pause control size, 0 for disable external pause control. + * The maximum is 1024. + * + * The Freescale DMA channel can be controlled by the external + * signal DREQ#. The pause control size is how many bytes are allowed + * to transfer before pausing the channel, after which a new assertion + * of DREQ# resumes channel operation. + */ +static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) +{ + if (size > 1024) + return; + + if (size) { + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) + | ((__ilog2(size) << 24) & 0x0f000000), + 32); + fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; + } else + fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; +} + +/** + * fsl_chan_toggle_ext_start - Toggle channel external start status + * @fsl_chan : Freescale DMA channel + * @enable : 0 is disabled, 1 is enabled. + * + * If enable the external start, the channel can be started by an + * external DMA start pin. So the dma_start() does not start the + * transfer immediately. The DMA channel will wait for the + * control pin asserted. + */ +static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) +{ + if (enable) + fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; + else + fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; +} + +static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); + struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); + unsigned long flags; + dma_cookie_t cookie; + + /* cookie increment and adding to ld_queue must be atomic */ + spin_lock_irqsave(&fsl_chan->desc_lock, flags); + + cookie = fsl_chan->common.cookie; + cookie++; + if (cookie < 0) + cookie = 1; + desc->async_tx.cookie = cookie; + fsl_chan->common.cookie = desc->async_tx.cookie; + + append_ld_queue(fsl_chan, desc); + list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); + + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + + return cookie; +} + +/** + * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. + * @fsl_chan : Freescale DMA channel + * + * Return - The descriptor allocated. NULL for failed. + */ +static struct fsl_desc_sw *fsl_dma_alloc_descriptor( + struct fsl_dma_chan *fsl_chan) +{ + dma_addr_t pdesc; + struct fsl_desc_sw *desc_sw; + + desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); + if (desc_sw) { + memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); + dma_async_tx_descriptor_init(&desc_sw->async_tx, + &fsl_chan->common); + desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; + INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); + desc_sw->async_tx.phys = pdesc; + } + + return desc_sw; +} + + +/** + * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. + * @fsl_chan : Freescale DMA channel + * + * This function will create a dma pool for descriptor allocation. + * + * Return - The number of descriptors allocated. + */ +static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); + LIST_HEAD(tmp_list); + + /* We need the descriptor to be aligned to 32bytes + * for meeting FSL DMA specification requirement. + */ + fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", + fsl_chan->dev, sizeof(struct fsl_desc_sw), + 32, 0); + if (!fsl_chan->desc_pool) { + dev_err(fsl_chan->dev, "No memory for channel %d " + "descriptor dma pool.\n", fsl_chan->id); + return 0; + } + + return 1; +} + +/** + * fsl_dma_free_chan_resources - Free all resources of the channel. + * @fsl_chan : Freescale DMA channel + */ +static void fsl_dma_free_chan_resources(struct dma_chan *chan) +{ + struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); + struct fsl_desc_sw *desc, *_desc; + unsigned long flags; + + dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); + spin_lock_irqsave(&fsl_chan->desc_lock, flags); + list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { +#ifdef FSL_DMA_LD_DEBUG + dev_dbg(fsl_chan->dev, + "LD %p will be released.\n", desc); +#endif + list_del(&desc->node); + /* free link descriptor */ + dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); + } + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + dma_pool_destroy(fsl_chan->desc_pool); +} + +static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( + struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, + size_t len, unsigned long flags) +{ + struct fsl_dma_chan *fsl_chan; + struct fsl_desc_sw *first = NULL, *prev = NULL, *new; + size_t copy; + LIST_HEAD(link_chain); + + if (!chan) + return NULL; + + if (!len) + return NULL; + + fsl_chan = to_fsl_chan(chan); + + do { + + /* Allocate the link descriptor from DMA pool */ + new = fsl_dma_alloc_descriptor(fsl_chan); + if (!new) { + dev_err(fsl_chan->dev, + "No free memory for link descriptor\n"); + return NULL; + } +#ifdef FSL_DMA_LD_DEBUG + dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); +#endif + + copy = min(len, FSL_DMA_BCR_MAX_CNT); + + set_desc_cnt(fsl_chan, &new->hw, copy); + set_desc_src(fsl_chan, &new->hw, dma_src); + set_desc_dest(fsl_chan, &new->hw, dma_dest); + + if (!first) + first = new; + else + set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); + + new->async_tx.cookie = 0; + new->async_tx.ack = 1; + + prev = new; + len -= copy; + dma_src += copy; + dma_dest += copy; + + /* Insert the link descriptor to the LD ring */ + list_add_tail(&new->node, &first->async_tx.tx_list); + } while (len); + + new->async_tx.ack = 0; /* client is in control of this ack */ + new->async_tx.cookie = -EBUSY; + + /* Set End-of-link to the last link descriptor of new list*/ + set_ld_eol(fsl_chan, new); + + return first ? &first->async_tx : NULL; +} + +/** + * fsl_dma_update_completed_cookie - Update the completed cookie. + * @fsl_chan : Freescale DMA channel + */ +static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) +{ + struct fsl_desc_sw *cur_desc, *desc; + dma_addr_t ld_phy; + + ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; + + if (ld_phy) { + cur_desc = NULL; + list_for_each_entry(desc, &fsl_chan->ld_queue, node) + if (desc->async_tx.phys == ld_phy) { + cur_desc = desc; + break; + } + + if (cur_desc && cur_desc->async_tx.cookie) { + if (dma_is_idle(fsl_chan)) + fsl_chan->completed_cookie = + cur_desc->async_tx.cookie; + else + fsl_chan->completed_cookie = + cur_desc->async_tx.cookie - 1; + } + } +} + +/** + * fsl_chan_ld_cleanup - Clean up link descriptors + * @fsl_chan : Freescale DMA channel + * + * This function clean up the ld_queue of DMA channel. + * If 'in_intr' is set, the function will move the link descriptor to + * the recycle list. Otherwise, free it directly. + */ +static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) +{ + struct fsl_desc_sw *desc, *_desc; + unsigned long flags; + + spin_lock_irqsave(&fsl_chan->desc_lock, flags); + + fsl_dma_update_completed_cookie(fsl_chan); + dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", + fsl_chan->completed_cookie); + list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { + dma_async_tx_callback callback; + void *callback_param; + + if (dma_async_is_complete(desc->async_tx.cookie, + fsl_chan->completed_cookie, fsl_chan->common.cookie) + == DMA_IN_PROGRESS) + break; + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + + /* Remove from ld_queue list */ + list_del(&desc->node); + + dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", + desc); + dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); + + /* Run the link descriptor callback function */ + if (callback) { + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", + desc); + callback(callback_param); + spin_lock_irqsave(&fsl_chan->desc_lock, flags); + } + } + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); +} + +/** + * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. + * @fsl_chan : Freescale DMA channel + */ +static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) +{ + struct list_head *ld_node; + dma_addr_t next_dest_addr; + unsigned long flags; + + if (!dma_is_idle(fsl_chan)) + return; + + dma_halt(fsl_chan); + + /* If there are some link descriptors + * not transfered in queue. We need to start it. + */ + spin_lock_irqsave(&fsl_chan->desc_lock, flags); + + /* Find the first un-transfer desciptor */ + for (ld_node = fsl_chan->ld_queue.next; + (ld_node != &fsl_chan->ld_queue) + && (dma_async_is_complete( + to_fsl_desc(ld_node)->async_tx.cookie, + fsl_chan->completed_cookie, + fsl_chan->common.cookie) == DMA_SUCCESS); + ld_node = ld_node->next); + + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + + if (ld_node != &fsl_chan->ld_queue) { + /* Get the ld start address from ld_queue */ + next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; + dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n", + (u64)next_dest_addr); + set_cdar(fsl_chan, next_dest_addr); + dma_start(fsl_chan); + } else { + set_cdar(fsl_chan, 0); + set_ndar(fsl_chan, 0); + } +} + +/** + * fsl_dma_memcpy_issue_pending - Issue the DMA start command + * @fsl_chan : Freescale DMA channel + */ +static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) +{ + struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); + +#ifdef FSL_DMA_LD_DEBUG + struct fsl_desc_sw *ld; + unsigned long flags; + + spin_lock_irqsave(&fsl_chan->desc_lock, flags); + if (list_empty(&fsl_chan->ld_queue)) { + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + return; + } + + dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); + list_for_each_entry(ld, &fsl_chan->ld_queue, node) { + int i; + dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", + fsl_chan->id, ld->async_tx.phys); + for (i = 0; i < 8; i++) + dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", + i, *(((u32 *)&ld->hw) + i)); + } + dev_dbg(fsl_chan->dev, "----------------\n"); + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); +#endif + + fsl_chan_xfer_ld_queue(fsl_chan); +} + +static void fsl_dma_dependency_added(struct dma_chan *chan) +{ + struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); + + fsl_chan_ld_cleanup(fsl_chan); +} + +/** + * fsl_dma_is_complete - Determine the DMA status + * @fsl_chan : Freescale DMA channel + */ +static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, + dma_cookie_t cookie, + dma_cookie_t *done, + dma_cookie_t *used) +{ + struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_complete; + + fsl_chan_ld_cleanup(fsl_chan); + + last_used = chan->cookie; + last_complete = fsl_chan->completed_cookie; + + if (done) + *done = last_complete; + + if (used) + *used = last_used; + + return dma_async_is_complete(cookie, last_complete, last_used); +} + +static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) +{ + struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; + dma_addr_t stat; + + stat = get_sr(fsl_chan); + dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", + fsl_chan->id, stat); + set_sr(fsl_chan, stat); /* Clear the event register */ + + stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); + if (!stat) + return IRQ_NONE; + + if (stat & FSL_DMA_SR_TE) + dev_err(fsl_chan->dev, "Transfer Error!\n"); + + /* If the link descriptor segment transfer finishes, + * we will recycle the used descriptor. + */ + if (stat & FSL_DMA_SR_EOSI) { + dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); + dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, " + "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan), + (u64)get_ndar(fsl_chan)); + stat &= ~FSL_DMA_SR_EOSI; + } + + /* If it current transfer is the end-of-transfer, + * we should clear the Channel Start bit for + * prepare next transfer. + */ + if (stat & (FSL_DMA_SR_EOLNI | FSL_DMA_SR_EOCDI)) { + dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); + stat &= ~FSL_DMA_SR_EOLNI; + fsl_chan_xfer_ld_queue(fsl_chan); + } + + if (stat) + dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", + stat); + + dev_dbg(fsl_chan->dev, "event: Exit\n"); + tasklet_schedule(&fsl_chan->tasklet); + return IRQ_HANDLED; +} + +static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) +{ + struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; + u32 gsr; + int ch_nr; + + gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) + : in_le32(fdev->reg_base); + ch_nr = (32 - ffs(gsr)) / 8; + + return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, + fdev->chan[ch_nr]) : IRQ_NONE; +} + +static void dma_do_tasklet(unsigned long data) +{ + struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; + fsl_chan_ld_cleanup(fsl_chan); +} + +static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan) +{ + if (fsl_chan) + dev_info(fsl_chan->dev, "selftest: callback is ok!\n"); +} + +static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) +{ + struct dma_chan *chan; + int err = 0; + dma_addr_t dma_dest, dma_src; + dma_cookie_t cookie; + u8 *src, *dest; + int i; + size_t test_size; + struct dma_async_tx_descriptor *tx1, *tx2, *tx3; + + test_size = 4096; + + src = kmalloc(test_size * 2, GFP_KERNEL); + if (!src) { + dev_err(fsl_chan->dev, + "selftest: Cannot alloc memory for test!\n"); + err = -ENOMEM; + goto out; + } + + dest = src + test_size; + + for (i = 0; i < test_size; i++) + src[i] = (u8) i; + + chan = &fsl_chan->common; + + if (fsl_dma_alloc_chan_resources(chan) < 1) { + dev_err(fsl_chan->dev, + "selftest: Cannot alloc resources for DMA\n"); + err = -ENODEV; + goto out; + } + + /* TX 1 */ + dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2, + DMA_TO_DEVICE); + dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2, + DMA_FROM_DEVICE); + tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0); + async_tx_ack(tx1); + + cookie = fsl_dma_tx_submit(tx1); + fsl_dma_memcpy_issue_pending(chan); + msleep(2); + + if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { + dev_err(fsl_chan->dev, "selftest: Time out!\n"); + err = -ENODEV; + goto out; + } + + /* Test free and re-alloc channel resources */ + fsl_dma_free_chan_resources(chan); + + if (fsl_dma_alloc_chan_resources(chan) < 1) { + dev_err(fsl_chan->dev, + "selftest: Cannot alloc resources for DMA\n"); + err = -ENODEV; + goto free_resources; + } + + /* Continue to test + * TX 2 + */ + dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2, + test_size / 4, DMA_TO_DEVICE); + dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2, + test_size / 4, DMA_FROM_DEVICE); + tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); + async_tx_ack(tx2); + + /* TX 3 */ + dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4, + test_size / 4, DMA_TO_DEVICE); + dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4, + test_size / 4, DMA_FROM_DEVICE); + tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); + async_tx_ack(tx3); + + /* Test exchanging the prepared tx sort */ + cookie = fsl_dma_tx_submit(tx3); + cookie = fsl_dma_tx_submit(tx2); + +#ifdef FSL_DMA_CALLBACKTEST + if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *) + dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) { + tx3->callback = fsl_dma_callback_test; + tx3->callback_param = fsl_chan; + } +#endif + fsl_dma_memcpy_issue_pending(chan); + msleep(2); + + if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { + dev_err(fsl_chan->dev, "selftest: Time out!\n"); + err = -ENODEV; + goto free_resources; + } + + err = memcmp(src, dest, test_size); + if (err) { + for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); + i++); + dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is " + "error! src 0x%x, dest 0x%x\n", + i, test_size, *(src + i), *(dest + i)); + } + +free_resources: + fsl_dma_free_chan_resources(chan); +out: + kfree(src); + return err; +} + +static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, + const struct of_device_id *match) +{ + struct fsl_dma_device *fdev; + struct fsl_dma_chan *new_fsl_chan; + int err; + + fdev = dev_get_drvdata(dev->dev.parent); + BUG_ON(!fdev); + + /* alloc channel */ + new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); + if (!new_fsl_chan) { + dev_err(&dev->dev, "No free memory for allocating " + "dma channels!\n"); + err = -ENOMEM; + goto err; + } + + /* get dma channel register base */ + err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg); + if (err) { + dev_err(&dev->dev, "Can't get %s property 'reg'\n", + dev->node->full_name); + goto err; + } + + new_fsl_chan->feature = *(u32 *)match->data; + + if (!fdev->feature) + fdev->feature = new_fsl_chan->feature; + + /* If the DMA device's feature is different than its channels', + * report the bug. + */ + WARN_ON(fdev->feature != new_fsl_chan->feature); + + new_fsl_chan->dev = &dev->dev; + new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, + new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); + + new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; + if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { + dev_err(&dev->dev, "There is no %d channel!\n", + new_fsl_chan->id); + err = -EINVAL; + goto err; + } + fdev->chan[new_fsl_chan->id] = new_fsl_chan; + tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, + (unsigned long)new_fsl_chan); + + /* Init the channel */ + dma_init(new_fsl_chan); + + /* Clear cdar registers */ + set_cdar(new_fsl_chan, 0); + + switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { + case FSL_DMA_IP_85XX: + new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; + new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; + case FSL_DMA_IP_83XX: + new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; + new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; + } + + spin_lock_init(&new_fsl_chan->desc_lock); + INIT_LIST_HEAD(&new_fsl_chan->ld_queue); + + new_fsl_chan->common.device = &fdev->common; + + /* Add the channel to DMA device channel list */ + list_add_tail(&new_fsl_chan->common.device_node, + &fdev->common.channels); + fdev->common.chancnt++; + + new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0); + if (new_fsl_chan->irq != NO_IRQ) { + err = request_irq(new_fsl_chan->irq, + &fsl_dma_chan_do_interrupt, IRQF_SHARED, + "fsldma-channel", new_fsl_chan); + if (err) { + dev_err(&dev->dev, "DMA channel %s request_irq error " + "with return %d\n", dev->node->full_name, err); + goto err; + } + } + +#ifdef CONFIG_FSL_DMA_SELFTEST + err = fsl_dma_self_test(new_fsl_chan); + if (err) + goto err; +#endif + + dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, + match->compatible, new_fsl_chan->irq); + + return 0; +err: + dma_halt(new_fsl_chan); + iounmap(new_fsl_chan->reg_base); + free_irq(new_fsl_chan->irq, new_fsl_chan); + list_del(&new_fsl_chan->common.device_node); + kfree(new_fsl_chan); + return err; +} + +const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN; +const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN; + +static struct of_device_id of_fsl_dma_chan_ids[] = { + { + .compatible = "fsl,mpc8540-dma-channel", + .data = (void *)&mpc8540_dma_ip_feature, + }, + { + .compatible = "fsl,mpc8349-dma-channel", + .data = (void *)&mpc8349_dma_ip_feature, + }, + {} +}; + +static struct of_platform_driver of_fsl_dma_chan_driver = { + .name = "of-fsl-dma-channel", + .match_table = of_fsl_dma_chan_ids, + .probe = of_fsl_dma_chan_probe, +}; + +static __init int of_fsl_dma_chan_init(void) +{ + return of_register_platform_driver(&of_fsl_dma_chan_driver); +} + +static int __devinit of_fsl_dma_probe(struct of_device *dev, + const struct of_device_id *match) +{ + int err; + unsigned int irq; + struct fsl_dma_device *fdev; + + fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); + if (!fdev) { + dev_err(&dev->dev, "No enough memory for 'priv'\n"); + err = -ENOMEM; + goto err; + } + fdev->dev = &dev->dev; + INIT_LIST_HEAD(&fdev->common.channels); + + /* get DMA controller register base */ + err = of_address_to_resource(dev->node, 0, &fdev->reg); + if (err) { + dev_err(&dev->dev, "Can't get %s property 'reg'\n", + dev->node->full_name); + goto err; + } + + dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " + "controller at 0x%08x...\n", + match->compatible, fdev->reg.start); + fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end + - fdev->reg.start + 1); + + dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); + dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); + fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; + fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; + fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; + fdev->common.device_is_tx_complete = fsl_dma_is_complete; + fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; + fdev->common.device_dependency_added = fsl_dma_dependency_added; + fdev->common.dev = &dev->dev; + + irq = irq_of_parse_and_map(dev->node, 0); + if (irq != NO_IRQ) { + err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED, + "fsldma-device", fdev); + if (err) { + dev_err(&dev->dev, "DMA device request_irq error " + "with return %d\n", err); + goto err; + } + } + + dev_set_drvdata(&(dev->dev), fdev); + of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev); + + dma_async_device_register(&fdev->common); + return 0; + +err: + iounmap(fdev->reg_base); + kfree(fdev); + return err; +} + +static struct of_device_id of_fsl_dma_ids[] = { + { .compatible = "fsl,mpc8540-dma", }, + { .compatible = "fsl,mpc8349-dma", }, + {} +}; + +static struct of_platform_driver of_fsl_dma_driver = { + .name = "of-fsl-dma", + .match_table = of_fsl_dma_ids, + .probe = of_fsl_dma_probe, +}; + +static __init int of_fsl_dma_init(void) +{ + return of_register_platform_driver(&of_fsl_dma_driver); +} + +subsys_initcall(of_fsl_dma_chan_init); +subsys_initcall(of_fsl_dma_init); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h new file mode 100644 index 00000000000..ba78c42121b --- /dev/null +++ b/drivers/dma/fsldma.h @@ -0,0 +1,189 @@ +/* + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: + * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 + * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ +#ifndef __DMA_FSLDMA_H +#define __DMA_FSLDMA_H + +#include <linux/device.h> +#include <linux/dmapool.h> +#include <linux/dmaengine.h> + +/* Define data structures needed by Freescale + * MPC8540 and MPC8349 DMA controller. + */ +#define FSL_DMA_MR_CS 0x00000001 +#define FSL_DMA_MR_CC 0x00000002 +#define FSL_DMA_MR_CA 0x00000008 +#define FSL_DMA_MR_EIE 0x00000040 +#define FSL_DMA_MR_XFE 0x00000020 +#define FSL_DMA_MR_EOLNIE 0x00000100 +#define FSL_DMA_MR_EOLSIE 0x00000080 +#define FSL_DMA_MR_EOSIE 0x00000200 +#define FSL_DMA_MR_CDSM 0x00000010 +#define FSL_DMA_MR_CTM 0x00000004 +#define FSL_DMA_MR_EMP_EN 0x00200000 +#define FSL_DMA_MR_EMS_EN 0x00040000 +#define FSL_DMA_MR_DAHE 0x00002000 +#define FSL_DMA_MR_SAHE 0x00001000 + +/* Special MR definition for MPC8349 */ +#define FSL_DMA_MR_EOTIE 0x00000080 + +#define FSL_DMA_SR_CH 0x00000020 +#define FSL_DMA_SR_CB 0x00000004 +#define FSL_DMA_SR_TE 0x00000080 +#define FSL_DMA_SR_EOSI 0x00000002 +#define FSL_DMA_SR_EOLSI 0x00000001 +#define FSL_DMA_SR_EOCDI 0x00000001 +#define FSL_DMA_SR_EOLNI 0x00000008 + +#define FSL_DMA_SATR_SBPATMU 0x20000000 +#define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000 +#define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000 +#define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000 +#define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000 +#define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000 + +#define FSL_DMA_DATR_DBPATMU 0x20000000 +#define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000 +#define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000 +#define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000 + +#define FSL_DMA_EOL ((u64)0x1) +#define FSL_DMA_SNEN ((u64)0x10) +#define FSL_DMA_EOSIE 0x8 +#define FSL_DMA_NLDA_MASK (~(u64)0x1f) + +#define FSL_DMA_BCR_MAX_CNT 0x03ffffffu + +#define FSL_DMA_DGSR_TE 0x80 +#define FSL_DMA_DGSR_CH 0x20 +#define FSL_DMA_DGSR_PE 0x10 +#define FSL_DMA_DGSR_EOLNI 0x08 +#define FSL_DMA_DGSR_CB 0x04 +#define FSL_DMA_DGSR_EOSI 0x02 +#define FSL_DMA_DGSR_EOLSI 0x01 + +struct fsl_dma_ld_hw { + u64 __bitwise src_addr; + u64 __bitwise dst_addr; + u64 __bitwise next_ln_addr; + u32 __bitwise count; + u32 __bitwise reserve; +} __attribute__((aligned(32))); + +struct fsl_desc_sw { + struct fsl_dma_ld_hw hw; + struct list_head node; + struct dma_async_tx_descriptor async_tx; + struct list_head *ld; + void *priv; +} __attribute__((aligned(32))); + +struct fsl_dma_chan_regs { + u32 __bitwise mr; /* 0x00 - Mode Register */ + u32 __bitwise sr; /* 0x04 - Status Register */ + u64 __bitwise cdar; /* 0x08 - Current descriptor address register */ + u64 __bitwise sar; /* 0x10 - Source Address Register */ + u64 __bitwise dar; /* 0x18 - Destination Address Register */ + u32 __bitwise bcr; /* 0x20 - Byte Count Register */ + u64 __bitwise ndar; /* 0x24 - Next Descriptor Address Register */ +}; + +struct fsl_dma_chan; +#define FSL_DMA_MAX_CHANS_PER_DEVICE 4 + +struct fsl_dma_device { + void __iomem *reg_base; /* DGSR register base */ + struct resource reg; /* Resource for register */ + struct device *dev; + struct dma_device common; + struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; + u32 feature; /* The same as DMA channels */ +}; + +/* Define macros for fsl_dma_chan->feature property */ +#define FSL_DMA_LITTLE_ENDIAN 0x00000000 +#define FSL_DMA_BIG_ENDIAN 0x00000001 + +#define FSL_DMA_IP_MASK 0x00000ff0 +#define FSL_DMA_IP_85XX 0x00000010 +#define FSL_DMA_IP_83XX 0x00000020 + +#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 +#define FSL_DMA_CHAN_START_EXT 0x00002000 + +struct fsl_dma_chan { + struct fsl_dma_chan_regs __iomem *reg_base; + dma_cookie_t completed_cookie; /* The maximum cookie completed */ + spinlock_t desc_lock; /* Descriptor operation lock */ + struct list_head ld_queue; /* Link descriptors queue */ + struct dma_chan common; /* DMA common channel */ + struct dma_pool *desc_pool; /* Descriptors pool */ + struct device *dev; /* Channel device */ + struct resource reg; /* Resource for register */ + int irq; /* Channel IRQ */ + int id; /* Raw id of this channel */ + struct tasklet_struct tasklet; + u32 feature; + + void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size); + void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); + void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); + void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); +}; + +#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) +#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) +#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) + +#ifndef __powerpc64__ +static u64 in_be64(const u64 __iomem *addr) +{ + return ((u64)in_be32((u32 *)addr) << 32) | (in_be32((u32 *)addr + 1)); +} + +static void out_be64(u64 __iomem *addr, u64 val) +{ + out_be32((u32 *)addr, val >> 32); + out_be32((u32 *)addr + 1, (u32)val); +} + +/* There is no asm instructions for 64 bits reverse loads and stores */ +static u64 in_le64(const u64 __iomem *addr) +{ + return ((u64)in_le32((u32 *)addr + 1) << 32) | (in_le32((u32 *)addr)); +} + +static void out_le64(u64 __iomem *addr, u64 val) +{ + out_le32((u32 *)addr + 1, val >> 32); + out_le32((u32 *)addr, (u32)val); +} +#endif + +#define DMA_IN(fsl_chan, addr, width) \ + (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ + in_be##width(addr) : in_le##width(addr)) +#define DMA_OUT(fsl_chan, addr, val, width) \ + (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ + out_be##width(addr, val) : out_le##width(addr, val)) + +#define DMA_TO_CPU(fsl_chan, d, width) \ + (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ + be##width##_to_cpu(d) : le##width##_to_cpu(d)) +#define CPU_TO_DMA(fsl_chan, c, width) \ + (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ + cpu_to_be##width(c) : cpu_to_le##width(c)) + +#endif /* __DMA_FSLDMA_H */ diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index dff38accc5c..4017d9e7acd 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -714,6 +714,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( new->len = len; new->dst = dma_dest; new->src = dma_src; + new->async_tx.ack = 0; return &new->async_tx; } else return NULL; @@ -741,6 +742,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( new->len = len; new->dst = dma_dest; new->src = dma_src; + new->async_tx.ack = 0; return &new->async_tx; } else return NULL; diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index 3e9719948a8..a03462750b9 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/errno.h> +#include <linux/delay.h> #include <linux/device.h> #include <linux/mutex.h> #include <linux/crc-itu-t.h> @@ -214,17 +215,29 @@ static void fw_card_bm_work(struct work_struct *work) { struct fw_card *card = container_of(work, struct fw_card, work.work); - struct fw_device *root; + struct fw_device *root_device; + struct fw_node *root_node, *local_node; struct bm_data bmd; unsigned long flags; int root_id, new_root_id, irm_id, gap_count, generation, grace; int do_reset = 0; spin_lock_irqsave(&card->lock, flags); + local_node = card->local_node; + root_node = card->root_node; + + if (local_node == NULL) { + spin_unlock_irqrestore(&card->lock, flags); + return; + } + fw_node_get(local_node); + fw_node_get(root_node); generation = card->generation; - root = card->root_node->data; - root_id = card->root_node->node_id; + root_device = root_node->data; + if (root_device) + fw_device_get(root_device); + root_id = root_node->node_id; grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); if (card->bm_generation + 1 == generation || @@ -243,14 +256,14 @@ fw_card_bm_work(struct work_struct *work) irm_id = card->irm_node->node_id; if (!card->irm_node->link_on) { - new_root_id = card->local_node->node_id; + new_root_id = local_node->node_id; fw_notify("IRM has link off, making local node (%02x) root.\n", new_root_id); goto pick_me; } bmd.lock.arg = cpu_to_be32(0x3f); - bmd.lock.data = cpu_to_be32(card->local_node->node_id); + bmd.lock.data = cpu_to_be32(local_node->node_id); spin_unlock_irqrestore(&card->lock, flags); @@ -267,12 +280,12 @@ fw_card_bm_work(struct work_struct *work) * Another bus reset happened. Just return, * the BM work has been rescheduled. */ - return; + goto out; } if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) /* Somebody else is BM, let them do the work. */ - return; + goto out; spin_lock_irqsave(&card->lock, flags); if (bmd.rcode != RCODE_COMPLETE) { @@ -282,7 +295,7 @@ fw_card_bm_work(struct work_struct *work) * do a bus reset and pick the local node as * root, and thus, IRM. */ - new_root_id = card->local_node->node_id; + new_root_id = local_node->node_id; fw_notify("BM lock failed, making local node (%02x) root.\n", new_root_id); goto pick_me; @@ -295,7 +308,7 @@ fw_card_bm_work(struct work_struct *work) */ spin_unlock_irqrestore(&card->lock, flags); schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); - return; + goto out; } /* @@ -305,20 +318,20 @@ fw_card_bm_work(struct work_struct *work) */ card->bm_generation = generation; - if (root == NULL) { + if (root_device == NULL) { /* * Either link_on is false, or we failed to read the * config rom. In either case, pick another root. */ - new_root_id = card->local_node->node_id; - } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) { + new_root_id = local_node->node_id; + } else if (atomic_read(&root_device->state) != FW_DEVICE_RUNNING) { /* * If we haven't probed this device yet, bail out now * and let's try again once that's done. */ spin_unlock_irqrestore(&card->lock, flags); - return; - } else if (root->config_rom[2] & BIB_CMC) { + goto out; + } else if (root_device->config_rom[2] & BIB_CMC) { /* * FIXME: I suppose we should set the cmstr bit in the * STATE_CLEAR register of this node, as described in @@ -332,7 +345,7 @@ fw_card_bm_work(struct work_struct *work) * successfully read the config rom, but it's not * cycle master capable. */ - new_root_id = card->local_node->node_id; + new_root_id = local_node->node_id; } pick_me: @@ -341,8 +354,8 @@ fw_card_bm_work(struct work_struct *work) * the typically much larger 1394b beta repeater delays though. */ if (!card->beta_repeaters_present && - card->root_node->max_hops < ARRAY_SIZE(gap_count_table)) - gap_count = gap_count_table[card->root_node->max_hops]; + root_node->max_hops < ARRAY_SIZE(gap_count_table)) + gap_count = gap_count_table[root_node->max_hops]; else gap_count = 63; @@ -364,6 +377,11 @@ fw_card_bm_work(struct work_struct *work) fw_send_phy_config(card, new_root_id, generation, gap_count); fw_core_initiate_bus_reset(card, 1); } + out: + if (root_device) + fw_device_put(root_device); + fw_node_put(root_node); + fw_node_put(local_node); } static void @@ -381,6 +399,7 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, static atomic_t index = ATOMIC_INIT(-1); kref_init(&card->kref); + atomic_set(&card->device_count, 0); card->index = atomic_inc_return(&index); card->driver = driver; card->device = device; @@ -511,8 +530,14 @@ fw_core_remove_card(struct fw_card *card) card->driver = &dummy_driver; fw_destroy_nodes(card); - flush_scheduled_work(); + /* + * Wait for all device workqueue jobs to finish. Otherwise the + * firewire-core module could be unloaded before the jobs ran. + */ + while (atomic_read(&card->device_count) > 0) + msleep(100); + cancel_delayed_work_sync(&card->work); fw_flush_transactions(card); del_timer_sync(&card->flush_timer); diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 7e73cbaa412..46bc197a047 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -109,15 +109,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file) struct client *client; unsigned long flags; - device = fw_device_from_devt(inode->i_rdev); + device = fw_device_get_by_devt(inode->i_rdev); if (device == NULL) return -ENODEV; client = kzalloc(sizeof(*client), GFP_KERNEL); - if (client == NULL) + if (client == NULL) { + fw_device_put(device); return -ENOMEM; + } - client->device = fw_device_get(device); + client->device = device; INIT_LIST_HEAD(&client->event_list); INIT_LIST_HEAD(&client->resource_list); spin_lock_init(&client->lock); @@ -644,6 +646,10 @@ static int ioctl_create_iso_context(struct client *client, void *buffer) struct fw_cdev_create_iso_context *request = buffer; struct fw_iso_context *context; + /* We only support one context at this time. */ + if (client->iso_context != NULL) + return -EBUSY; + if (request->channel > 63) return -EINVAL; @@ -790,8 +796,9 @@ static int ioctl_start_iso(struct client *client, void *buffer) { struct fw_cdev_start_iso *request = buffer; - if (request->handle != 0) + if (client->iso_context == NULL || request->handle != 0) return -EINVAL; + if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { if (request->tags == 0 || request->tags > 15) return -EINVAL; @@ -808,7 +815,7 @@ static int ioctl_stop_iso(struct client *client, void *buffer) { struct fw_cdev_stop_iso *request = buffer; - if (request->handle != 0) + if (client->iso_context == NULL || request->handle != 0) return -EINVAL; return fw_iso_context_stop(client->iso_context); diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index de9066e69ad..870125a3638 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c @@ -150,21 +150,10 @@ struct bus_type fw_bus_type = { }; EXPORT_SYMBOL(fw_bus_type); -struct fw_device *fw_device_get(struct fw_device *device) -{ - get_device(&device->device); - - return device; -} - -void fw_device_put(struct fw_device *device) -{ - put_device(&device->device); -} - static void fw_device_release(struct device *dev) { struct fw_device *device = fw_device(dev); + struct fw_card *card = device->card; unsigned long flags; /* @@ -176,9 +165,9 @@ static void fw_device_release(struct device *dev) spin_unlock_irqrestore(&device->card->lock, flags); fw_node_put(device->node); - fw_card_put(device->card); kfree(device->config_rom); kfree(device); + atomic_dec(&card->device_count); } int fw_device_enable_phys_dma(struct fw_device *device) @@ -358,12 +347,9 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev); - u64 guid; - - guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4]; - return snprintf(buf, PAGE_SIZE, "0x%016llx\n", - (unsigned long long)guid); + return snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", + device->config_rom[3], device->config_rom[4]); } static struct device_attribute fw_device_attributes[] = { @@ -610,12 +596,14 @@ static DECLARE_RWSEM(idr_rwsem); static DEFINE_IDR(fw_device_idr); int fw_cdev_major; -struct fw_device *fw_device_from_devt(dev_t devt) +struct fw_device *fw_device_get_by_devt(dev_t devt) { struct fw_device *device; down_read(&idr_rwsem); device = idr_find(&fw_device_idr, MINOR(devt)); + if (device) + fw_device_get(device); up_read(&idr_rwsem); return device; @@ -627,13 +615,14 @@ static void fw_device_shutdown(struct work_struct *work) container_of(work, struct fw_device, work.work); int minor = MINOR(device->device.devt); - down_write(&idr_rwsem); - idr_remove(&fw_device_idr, minor); - up_write(&idr_rwsem); - fw_device_cdev_remove(device); device_for_each_child(&device->device, NULL, shutdown_unit); device_unregister(&device->device); + + down_write(&idr_rwsem); + idr_remove(&fw_device_idr, minor); + up_write(&idr_rwsem); + fw_device_put(device); } static struct device_type fw_device_type = { @@ -668,7 +657,8 @@ static void fw_device_init(struct work_struct *work) */ if (read_bus_info_block(device, device->generation) < 0) { - if (device->config_rom_retries < MAX_RETRIES) { + if (device->config_rom_retries < MAX_RETRIES && + atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { device->config_rom_retries++; schedule_delayed_work(&device->work, RETRY_DELAY); } else { @@ -682,10 +672,13 @@ static void fw_device_init(struct work_struct *work) } err = -ENOMEM; + + fw_device_get(device); down_write(&idr_rwsem); if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) err = idr_get_new(&fw_device_idr, device, &minor); up_write(&idr_rwsem); + if (err < 0) goto error; @@ -717,13 +710,22 @@ static void fw_device_init(struct work_struct *work) */ if (atomic_cmpxchg(&device->state, FW_DEVICE_INITIALIZING, - FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) + FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) { fw_device_shutdown(&device->work.work); - else - fw_notify("created new fw device %s " - "(%d config rom retries, S%d00)\n", - device->device.bus_id, device->config_rom_retries, - 1 << device->max_speed); + } else { + if (device->config_rom_retries) + fw_notify("created device %s: GUID %08x%08x, S%d00, " + "%d config ROM retries\n", + device->device.bus_id, + device->config_rom[3], device->config_rom[4], + 1 << device->max_speed, + device->config_rom_retries); + else + fw_notify("created device %s: GUID %08x%08x, S%d00\n", + device->device.bus_id, + device->config_rom[3], device->config_rom[4], + 1 << device->max_speed); + } /* * Reschedule the IRM work if we just finished reading the @@ -741,7 +743,9 @@ static void fw_device_init(struct work_struct *work) idr_remove(&fw_device_idr, minor); up_write(&idr_rwsem); error: - put_device(&device->device); + fw_device_put(device); /* fw_device_idr's reference */ + + put_device(&device->device); /* our reference */ } static int update_unit(struct device *dev, void *data) @@ -791,7 +795,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) */ device_initialize(&device->device); atomic_set(&device->state, FW_DEVICE_INITIALIZING); - device->card = fw_card_get(card); + atomic_inc(&card->device_count); + device->card = card; device->node = fw_node_get(node); device->node_id = node->node_id; device->generation = card->generation; diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 0854fe2bc11..78ecd3991b7 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h @@ -76,14 +76,26 @@ fw_device_is_shutdown(struct fw_device *device) return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; } -struct fw_device *fw_device_get(struct fw_device *device); -void fw_device_put(struct fw_device *device); +static inline struct fw_device * +fw_device_get(struct fw_device *device) +{ + get_device(&device->device); + + return device; +} + +static inline void +fw_device_put(struct fw_device *device) +{ + put_device(&device->device); +} + +struct fw_device *fw_device_get_by_devt(dev_t devt); int fw_device_enable_phys_dma(struct fw_device *device); void fw_device_cdev_update(struct fw_device *device); void fw_device_cdev_remove(struct fw_device *device); -struct fw_device *fw_device_from_devt(dev_t devt); extern int fw_cdev_major; struct fw_unit { diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 19ece9b6d74..03069a454c0 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c @@ -28,14 +28,15 @@ * and many others. */ +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> #include <linux/kernel.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/mod_devicetable.h> -#include <linux/device.h> #include <linux/scatterlist.h> -#include <linux/dma-mapping.h> -#include <linux/blkdev.h> #include <linux/string.h> #include <linux/stringify.h> #include <linux/timer.h> @@ -47,9 +48,9 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> -#include "fw-transaction.h" -#include "fw-topology.h" #include "fw-device.h" +#include "fw-topology.h" +#include "fw-transaction.h" /* * So far only bridges from Oxford Semiconductor are known to support @@ -82,6 +83,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " * Avoids access beyond actual disk limits on devices with an off-by-one bug. * Don't use this with devices which don't have this bug. * + * - delay inquiry + * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. + * * - override internal blacklist * Instead of adding to the built-in blacklist, use only the workarounds * specified in the module load parameter. @@ -91,6 +95,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " #define SBP2_WORKAROUND_INQUIRY_36 0x2 #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 +#define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 +#define SBP2_INQUIRY_DELAY 12 #define SBP2_WORKAROUND_OVERRIDE 0x100 static int sbp2_param_workarounds; @@ -100,6 +106,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) + ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) ", or a combination)"); @@ -115,7 +122,6 @@ static const char sbp2_driver_name[] = "sbp2"; struct sbp2_logical_unit { struct sbp2_target *tgt; struct list_head link; - struct scsi_device *sdev; struct fw_address_handler address_handler; struct list_head orb_list; @@ -132,6 +138,8 @@ struct sbp2_logical_unit { int generation; int retries; struct delayed_work work; + bool has_sdev; + bool blocked; }; /* @@ -141,16 +149,18 @@ struct sbp2_logical_unit { struct sbp2_target { struct kref kref; struct fw_unit *unit; + const char *bus_id; + struct list_head lu_list; u64 management_agent_address; int directory_id; int node_id; int address_high; - - unsigned workarounds; - struct list_head lu_list; - + unsigned int workarounds; unsigned int mgt_orb_timeout; + + int dont_block; /* counter for each logical unit */ + int blocked; /* ditto */ }; /* @@ -160,7 +170,7 @@ struct sbp2_target { */ #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ -#define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */ +#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ #define SBP2_ORB_NULL 0x80000000 #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 @@ -297,7 +307,7 @@ struct sbp2_command_orb { static const struct { u32 firmware_revision; u32 model; - unsigned workarounds; + unsigned int workarounds; } sbp2_workarounds_table[] = { /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { .firmware_revision = 0x002800, @@ -305,6 +315,11 @@ static const struct { .workarounds = SBP2_WORKAROUND_INQUIRY_36 | SBP2_WORKAROUND_MODE_SENSE_8, }, + /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { + .firmware_revision = 0x002800, + .model = 0x000000, + .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY, + }, /* Initio bridges, actually only needed for some older ones */ { .firmware_revision = 0x000200, .model = ~0, @@ -501,6 +516,9 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, unsigned int timeout; int retval = -ENOMEM; + if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device)) + return 0; + orb = kzalloc(sizeof(*orb), GFP_ATOMIC); if (orb == NULL) return -ENOMEM; @@ -553,20 +571,20 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, retval = -EIO; if (sbp2_cancel_orbs(lu) == 0) { - fw_error("orb reply timed out, rcode=0x%02x\n", - orb->base.rcode); + fw_error("%s: orb reply timed out, rcode=0x%02x\n", + lu->tgt->bus_id, orb->base.rcode); goto out; } if (orb->base.rcode != RCODE_COMPLETE) { - fw_error("management write failed, rcode 0x%02x\n", - orb->base.rcode); + fw_error("%s: management write failed, rcode 0x%02x\n", + lu->tgt->bus_id, orb->base.rcode); goto out; } if (STATUS_GET_RESPONSE(orb->status) != 0 || STATUS_GET_SBP_STATUS(orb->status) != 0) { - fw_error("error status: %d:%d\n", + fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id, STATUS_GET_RESPONSE(orb->status), STATUS_GET_SBP_STATUS(orb->status)); goto out; @@ -590,29 +608,158 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, static void complete_agent_reset_write(struct fw_card *card, int rcode, - void *payload, size_t length, void *data) + void *payload, size_t length, void *done) { - struct fw_transaction *t = data; + complete(done); +} - kfree(t); +static void sbp2_agent_reset(struct sbp2_logical_unit *lu) +{ + struct fw_device *device = fw_device(lu->tgt->unit->device.parent); + DECLARE_COMPLETION_ONSTACK(done); + struct fw_transaction t; + static u32 z; + + fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST, + lu->tgt->node_id, lu->generation, device->max_speed, + lu->command_block_agent_address + SBP2_AGENT_RESET, + &z, sizeof(z), complete_agent_reset_write, &done); + wait_for_completion(&done); +} + +static void +complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) +{ + kfree(data); } -static int sbp2_agent_reset(struct sbp2_logical_unit *lu) +static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) { struct fw_device *device = fw_device(lu->tgt->unit->device.parent); struct fw_transaction *t; - static u32 zero; + static u32 z; - t = kzalloc(sizeof(*t), GFP_ATOMIC); + t = kmalloc(sizeof(*t), GFP_ATOMIC); if (t == NULL) - return -ENOMEM; + return; fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, lu->tgt->node_id, lu->generation, device->max_speed, lu->command_block_agent_address + SBP2_AGENT_RESET, - &zero, sizeof(zero), complete_agent_reset_write, t); + &z, sizeof(z), complete_agent_reset_write_no_wait, t); +} - return 0; +static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation) +{ + struct fw_card *card = fw_device(lu->tgt->unit->device.parent)->card; + unsigned long flags; + + /* serialize with comparisons of lu->generation and card->generation */ + spin_lock_irqsave(&card->lock, flags); + lu->generation = generation; + spin_unlock_irqrestore(&card->lock, flags); +} + +static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) +{ + /* + * We may access dont_block without taking card->lock here: + * All callers of sbp2_allow_block() and all callers of sbp2_unblock() + * are currently serialized against each other. + * And a wrong result in sbp2_conditionally_block()'s access of + * dont_block is rather harmless, it simply misses its first chance. + */ + --lu->tgt->dont_block; +} + +/* + * Blocks lu->tgt if all of the following conditions are met: + * - Login, INQUIRY, and high-level SCSI setup of all of the target's + * logical units have been finished (indicated by dont_block == 0). + * - lu->generation is stale. + * + * Note, scsi_block_requests() must be called while holding card->lock, + * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to + * unblock the target. + */ +static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) +{ + struct sbp2_target *tgt = lu->tgt; + struct fw_card *card = fw_device(tgt->unit->device.parent)->card; + struct Scsi_Host *shost = + container_of((void *)tgt, struct Scsi_Host, hostdata[0]); + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + if (!tgt->dont_block && !lu->blocked && + lu->generation != card->generation) { + lu->blocked = true; + if (++tgt->blocked == 1) { + scsi_block_requests(shost); + fw_notify("blocked %s\n", lu->tgt->bus_id); + } + } + spin_unlock_irqrestore(&card->lock, flags); +} + +/* + * Unblocks lu->tgt as soon as all its logical units can be unblocked. + * Note, it is harmless to run scsi_unblock_requests() outside the + * card->lock protected section. On the other hand, running it inside + * the section might clash with shost->host_lock. + */ +static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) +{ + struct sbp2_target *tgt = lu->tgt; + struct fw_card *card = fw_device(tgt->unit->device.parent)->card; + struct Scsi_Host *shost = + container_of((void *)tgt, struct Scsi_Host, hostdata[0]); + unsigned long flags; + bool unblock = false; + + spin_lock_irqsave(&card->lock, flags); + if (lu->blocked && lu->generation == card->generation) { + lu->blocked = false; + unblock = --tgt->blocked == 0; + } + spin_unlock_irqrestore(&card->lock, flags); + + if (unblock) { + scsi_unblock_requests(shost); + fw_notify("unblocked %s\n", lu->tgt->bus_id); + } +} + +/* + * Prevents future blocking of tgt and unblocks it. + * Note, it is harmless to run scsi_unblock_requests() outside the + * card->lock protected section. On the other hand, running it inside + * the section might clash with shost->host_lock. + */ +static void sbp2_unblock(struct sbp2_target *tgt) +{ + struct fw_card *card = fw_device(tgt->unit->device.parent)->card; + struct Scsi_Host *shost = + container_of((void *)tgt, struct Scsi_Host, hostdata[0]); + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + ++tgt->dont_block; + spin_unlock_irqrestore(&card->lock, flags); + + scsi_unblock_requests(shost); +} + +static int sbp2_lun2int(u16 lun) +{ + struct scsi_lun eight_bytes_lun; + + memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); + eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff; + eight_bytes_lun.scsi_lun[1] = lun & 0xff; + + return scsilun_to_int(&eight_bytes_lun); } static void sbp2_release_target(struct kref *kref) @@ -621,26 +768,31 @@ static void sbp2_release_target(struct kref *kref) struct sbp2_logical_unit *lu, *next; struct Scsi_Host *shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); + struct scsi_device *sdev; struct fw_device *device = fw_device(tgt->unit->device.parent); - list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { - if (lu->sdev) - scsi_remove_device(lu->sdev); + /* prevent deadlocks */ + sbp2_unblock(tgt); - if (!fw_device_is_shutdown(device)) - sbp2_send_management_orb(lu, tgt->node_id, - lu->generation, SBP2_LOGOUT_REQUEST, - lu->login_id, NULL); + list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { + sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun)); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } + sbp2_send_management_orb(lu, tgt->node_id, lu->generation, + SBP2_LOGOUT_REQUEST, lu->login_id, NULL); fw_core_remove_address_handler(&lu->address_handler); list_del(&lu->link); kfree(lu); } scsi_remove_host(shost); - fw_notify("released %s\n", tgt->unit->device.bus_id); + fw_notify("released %s\n", tgt->bus_id); put_device(&tgt->unit->device); scsi_host_put(shost); + fw_device_put(device); } static struct workqueue_struct *sbp2_wq; @@ -666,33 +818,42 @@ static void sbp2_login(struct work_struct *work) { struct sbp2_logical_unit *lu = container_of(work, struct sbp2_logical_unit, work.work); - struct Scsi_Host *shost = - container_of((void *)lu->tgt, struct Scsi_Host, hostdata[0]); + struct sbp2_target *tgt = lu->tgt; + struct fw_device *device = fw_device(tgt->unit->device.parent); + struct Scsi_Host *shost; struct scsi_device *sdev; - struct scsi_lun eight_bytes_lun; - struct fw_unit *unit = lu->tgt->unit; - struct fw_device *device = fw_device(unit->device.parent); struct sbp2_login_response response; int generation, node_id, local_node_id; + if (fw_device_is_shutdown(device)) + goto out; + generation = device->generation; smp_rmb(); /* node_id must not be older than generation */ node_id = device->node_id; local_node_id = device->card->node_id; + /* If this is a re-login attempt, log out, or we might be rejected. */ + if (lu->has_sdev) + sbp2_send_management_orb(lu, device->node_id, generation, + SBP2_LOGOUT_REQUEST, lu->login_id, NULL); + if (sbp2_send_management_orb(lu, node_id, generation, SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { - if (lu->retries++ < 5) + if (lu->retries++ < 5) { sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); - else - fw_error("failed to login to %s LUN %04x\n", - unit->device.bus_id, lu->lun); + } else { + fw_error("%s: failed to login to LUN %04x\n", + tgt->bus_id, lu->lun); + /* Let any waiting I/O fail from now on. */ + sbp2_unblock(lu->tgt); + } goto out; } - lu->generation = generation; - lu->tgt->node_id = node_id; - lu->tgt->address_high = local_node_id << 16; + tgt->node_id = node_id; + tgt->address_high = local_node_id << 16; + sbp2_set_generation(lu, generation); /* Get command block agent offset and login id. */ lu->command_block_agent_address = @@ -700,8 +861,8 @@ static void sbp2_login(struct work_struct *work) response.command_block_agent.low; lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); - fw_notify("logged in to %s LUN %04x (%d retries)\n", - unit->device.bus_id, lu->lun, lu->retries); + fw_notify("%s: logged in to LUN %04x (%d retries)\n", + tgt->bus_id, lu->lun, lu->retries); #if 0 /* FIXME: The linux1394 sbp2 does this last step. */ @@ -711,26 +872,58 @@ static void sbp2_login(struct work_struct *work) PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); sbp2_agent_reset(lu); - memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); - eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff; - eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff; + /* This was a re-login. */ + if (lu->has_sdev) { + sbp2_cancel_orbs(lu); + sbp2_conditionally_unblock(lu); + goto out; + } - sdev = __scsi_add_device(shost, 0, 0, - scsilun_to_int(&eight_bytes_lun), lu); - if (IS_ERR(sdev)) { - sbp2_send_management_orb(lu, node_id, generation, - SBP2_LOGOUT_REQUEST, lu->login_id, NULL); - /* - * Set this back to sbp2_login so we fall back and - * retry login on bus reset. - */ - PREPARE_DELAYED_WORK(&lu->work, sbp2_login); - } else { - lu->sdev = sdev; + if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) + ssleep(SBP2_INQUIRY_DELAY); + + shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); + sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu); + /* + * FIXME: We are unable to perform reconnects while in sbp2_login(). + * Therefore __scsi_add_device() will get into trouble if a bus reset + * happens in parallel. It will either fail or leave us with an + * unusable sdev. As a workaround we check for this and retry the + * whole login and SCSI probing. + */ + + /* Reported error during __scsi_add_device() */ + if (IS_ERR(sdev)) + goto out_logout_login; + + /* Unreported error during __scsi_add_device() */ + smp_rmb(); /* get current card generation */ + if (generation != device->card->generation) { + scsi_remove_device(sdev); scsi_device_put(sdev); + goto out_logout_login; } + + /* No error during __scsi_add_device() */ + lu->has_sdev = true; + scsi_device_put(sdev); + sbp2_allow_block(lu); + goto out; + + out_logout_login: + smp_rmb(); /* generation may have changed */ + generation = device->generation; + smp_rmb(); /* node_id must not be older than generation */ + + sbp2_send_management_orb(lu, device->node_id, generation, + SBP2_LOGOUT_REQUEST, lu->login_id, NULL); + /* + * If a bus reset happened, sbp2_update will have requeued + * lu->work already. Reset the work from reconnect to login. + */ + PREPARE_DELAYED_WORK(&lu->work, sbp2_login); out: - sbp2_target_put(lu->tgt); + sbp2_target_put(tgt); } static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) @@ -751,10 +944,12 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) return -ENOMEM; } - lu->tgt = tgt; - lu->sdev = NULL; - lu->lun = lun_entry & 0xffff; - lu->retries = 0; + lu->tgt = tgt; + lu->lun = lun_entry & 0xffff; + lu->retries = 0; + lu->has_sdev = false; + lu->blocked = false; + ++tgt->dont_block; INIT_LIST_HEAD(&lu->orb_list); INIT_DELAYED_WORK(&lu->work, sbp2_login); @@ -813,7 +1008,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, if (timeout > tgt->mgt_orb_timeout) fw_notify("%s: config rom contains %ds " "management ORB timeout, limiting " - "to %ds\n", tgt->unit->device.bus_id, + "to %ds\n", tgt->bus_id, timeout / 1000, tgt->mgt_orb_timeout / 1000); break; @@ -836,12 +1031,12 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, u32 firmware_revision) { int i; - unsigned w = sbp2_param_workarounds; + unsigned int w = sbp2_param_workarounds; if (w) fw_notify("Please notify linux1394-devel@lists.sourceforge.net " "if you need the workarounds parameter for %s\n", - tgt->unit->device.bus_id); + tgt->bus_id); if (w & SBP2_WORKAROUND_OVERRIDE) goto out; @@ -863,8 +1058,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, if (w) fw_notify("Workarounds for %s: 0x%x " "(firmware_revision 0x%06x, model_id 0x%06x)\n", - tgt->unit->device.bus_id, - w, firmware_revision, model); + tgt->bus_id, w, firmware_revision, model); tgt->workarounds = w; } @@ -888,6 +1082,7 @@ static int sbp2_probe(struct device *dev) tgt->unit = unit; kref_init(&tgt->kref); INIT_LIST_HEAD(&tgt->lu_list); + tgt->bus_id = unit->device.bus_id; if (fw_device_enable_phys_dma(device) < 0) goto fail_shost_put; @@ -895,6 +1090,8 @@ static int sbp2_probe(struct device *dev) if (scsi_add_host(shost, &unit->device) < 0) goto fail_shost_put; + fw_device_get(device); + /* Initialize to values that won't match anything in our table. */ firmware_revision = 0xff000000; model = 0xff000000; @@ -938,10 +1135,13 @@ static void sbp2_reconnect(struct work_struct *work) { struct sbp2_logical_unit *lu = container_of(work, struct sbp2_logical_unit, work.work); - struct fw_unit *unit = lu->tgt->unit; - struct fw_device *device = fw_device(unit->device.parent); + struct sbp2_target *tgt = lu->tgt; + struct fw_device *device = fw_device(tgt->unit->device.parent); int generation, node_id, local_node_id; + if (fw_device_is_shutdown(device)) + goto out; + generation = device->generation; smp_rmb(); /* node_id must not be older than generation */ node_id = device->node_id; @@ -950,10 +1150,17 @@ static void sbp2_reconnect(struct work_struct *work) if (sbp2_send_management_orb(lu, node_id, generation, SBP2_RECONNECT_REQUEST, lu->login_id, NULL) < 0) { - if (lu->retries++ >= 5) { - fw_error("failed to reconnect to %s\n", - unit->device.bus_id); - /* Fall back and try to log in again. */ + /* + * If reconnect was impossible even though we are in the + * current generation, fall back and try to log in again. + * + * We could check for "Function rejected" status, but + * looking at the bus generation as simpler and more general. + */ + smp_rmb(); /* get current card generation */ + if (generation == device->card->generation || + lu->retries++ >= 5) { + fw_error("%s: failed to reconnect\n", tgt->bus_id); lu->retries = 0; PREPARE_DELAYED_WORK(&lu->work, sbp2_login); } @@ -961,17 +1168,18 @@ static void sbp2_reconnect(struct work_struct *work) goto out; } - lu->generation = generation; - lu->tgt->node_id = node_id; - lu->tgt->address_high = local_node_id << 16; + tgt->node_id = node_id; + tgt->address_high = local_node_id << 16; + sbp2_set_generation(lu, generation); - fw_notify("reconnected to %s LUN %04x (%d retries)\n", - unit->device.bus_id, lu->lun, lu->retries); + fw_notify("%s: reconnected to LUN %04x (%d retries)\n", + tgt->bus_id, lu->lun, lu->retries); sbp2_agent_reset(lu); sbp2_cancel_orbs(lu); + sbp2_conditionally_unblock(lu); out: - sbp2_target_put(lu->tgt); + sbp2_target_put(tgt); } static void sbp2_update(struct fw_unit *unit) @@ -986,6 +1194,7 @@ static void sbp2_update(struct fw_unit *unit) * Iteration over tgt->lu_list is therefore safe here. */ list_for_each_entry(lu, &tgt->lu_list, link) { + sbp2_conditionally_block(lu); lu->retries = 0; sbp2_queue_work(lu, 0); } @@ -1063,7 +1272,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) if (status != NULL) { if (STATUS_GET_DEAD(*status)) - sbp2_agent_reset(orb->lu); + sbp2_agent_reset_no_wait(orb->lu); switch (STATUS_GET_RESPONSE(*status)) { case SBP2_STATUS_REQUEST_COMPLETE: @@ -1089,6 +1298,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) * or when sending the write (less likely). */ result = DID_BUS_BUSY << 16; + sbp2_conditionally_block(orb->lu); } dma_unmap_single(device->card->device, orb->base.request_bus, @@ -1197,7 +1407,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) struct sbp2_logical_unit *lu = cmd->device->hostdata; struct fw_device *device = fw_device(lu->tgt->unit->device.parent); struct sbp2_command_orb *orb; - unsigned max_payload; + unsigned int max_payload; int retval = SCSI_MLQUEUE_HOST_BUSY; /* @@ -1275,6 +1485,10 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) { struct sbp2_logical_unit *lu = sdev->hostdata; + /* (Re-)Adding logical units via the SCSI stack is not supported. */ + if (!lu) + return -ENOSYS; + sdev->allow_restart = 1; /* @@ -1319,7 +1533,7 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd) { struct sbp2_logical_unit *lu = cmd->device->hostdata; - fw_notify("sbp2_scsi_abort\n"); + fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id); sbp2_agent_reset(lu); sbp2_cancel_orbs(lu); diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index 172c1867e9a..e47bb040197 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c @@ -383,6 +383,7 @@ void fw_destroy_nodes(struct fw_card *card) card->color++; if (card->local_node != NULL) for_each_fw_node(card, card->local_node, report_lost_node); + card->local_node = NULL; spin_unlock_irqrestore(&card->lock, flags); } diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index fa7967b5740..09cb7287045 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -26,6 +26,7 @@ #include <linux/fs.h> #include <linux/dma-mapping.h> #include <linux/firewire-constants.h> +#include <asm/atomic.h> #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) @@ -219,6 +220,7 @@ extern struct bus_type fw_bus_type; struct fw_card { const struct fw_card_driver *driver; struct device *device; + atomic_t device_count; struct kref kref; int node_id; diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 310e497b583..c8d0e871599 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -670,8 +670,8 @@ static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector, * and attempt to recover if there are problems. Returns 0 if everything's * ok; nonzero if the request has been terminated. */ -static -int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw) +static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, + int len, int ireason, int rw) { /* * ireason == 0: the drive wants to receive data from us @@ -701,6 +701,9 @@ int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw) drive->name, __FUNCTION__, ireason); } + if (rq->cmd_type == REQ_TYPE_ATA_PC) + rq->cmd_flags |= REQ_FAILED; + cdrom_end_request(drive, 0); return -1; } @@ -1071,11 +1074,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) /* * check which way to transfer data */ - if (blk_fs_request(rq) || blk_pc_request(rq)) { - if (ide_cd_check_ireason(drive, len, ireason, write)) - return ide_stopped; + if (ide_cd_check_ireason(drive, rq, len, ireason, write)) + return ide_stopped; - if (blk_fs_request(rq) && write == 0) { + if (blk_fs_request(rq)) { + if (write == 0) { int nskip; if (ide_cd_check_transfer_size(drive, len)) { @@ -1101,16 +1104,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) if (ireason == 0) { write = 1; xferfunc = HWIF(drive)->atapi_output_bytes; - } else if (ireason == 2 || (ireason == 1 && - (blk_fs_request(rq) || blk_pc_request(rq)))) { + } else { write = 0; xferfunc = HWIF(drive)->atapi_input_bytes; - } else { - printk(KERN_ERR "%s: %s: The drive " - "appears confused (ireason = 0x%02x). " - "Trying to recover by ending request.\n", - drive->name, __FUNCTION__, ireason); - goto end_request; } /* @@ -1182,11 +1178,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) else rq->data += blen; } + if (!write && blk_sense_request(rq)) + rq->sense_len += blen; } - if (write && blk_sense_request(rq)) - rq->sense_len += thislen; - /* * pad, if necessary */ @@ -1931,6 +1926,7 @@ static const struct cd_list_entry ide_cd_quirks_list[] = { { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, + { "Optiarc DVD RW AD-5200A", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, { NULL, NULL, 0 } }; diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 8f5bed47105..39501d13025 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -867,7 +867,7 @@ static void idedisk_setup (ide_drive_t *drive) /* Only print cache size when it was specified */ if (id->buf_size) - printk (" w/%dKiB Cache", id->buf_size/2); + printk(KERN_CONT " w/%dKiB Cache", id->buf_size / 2); printk(KERN_CONT ", CHS=%d/%d/%d\n", drive->bios_cyl, drive->bios_head, drive->bios_sect); @@ -949,7 +949,8 @@ static void ide_device_shutdown(ide_drive_t *drive) return; } - printk("Shutdown: %s\n", drive->name); + printk(KERN_INFO "Shutdown: %s\n", drive->name); + drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND); } diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index d0e7b537353..2de99e4be5c 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c @@ -1,9 +1,13 @@ /* + * IDE DMA support (including IDE PCI BM-DMA). + * * Copyright (C) 1995-1998 Mark Lord * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz * * May be copied or modified under the terms of the GNU General Public License + * + * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). */ /* @@ -11,49 +15,6 @@ */ /* - * This module provides support for the bus-master IDE DMA functions - * of various PCI chipsets, including the Intel PIIX (i82371FB for - * the 430 FX chipset), the PIIX3 (i82371SB for the 430 HX/VX and - * 440 chipsets), and the PIIX4 (i82371AB for the 430 TX chipset) - * ("PIIX" stands for "PCI ISA IDE Xcellerator"). - * - * Pretty much the same code works for other IDE PCI bus-mastering chipsets. - * - * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). - * - * By default, DMA support is prepared for use, but is currently enabled only - * for drives which already have DMA enabled (UltraDMA or mode 2 multi/single), - * or which are recognized as "good" (see table below). Drives with only mode0 - * or mode1 (multi/single) DMA should also work with this chipset/driver - * (eg. MC2112A) but are not enabled by default. - * - * Use "hdparm -i" to view modes supported by a given drive. - * - * The hdparm-3.5 (or later) utility can be used for manually enabling/disabling - * DMA support, but must be (re-)compiled against this kernel version or later. - * - * To enable DMA, use "hdparm -d1 /dev/hd?" on a per-drive basis after booting. - * If problems arise, ide.c will disable DMA operation after a few retries. - * This error recovery mechanism works and has been extremely well exercised. - * - * IDE drives, depending on their vintage, may support several different modes - * of DMA operation. The boot-time modes are indicated with a "*" in - * the "hdparm -i" listing, and can be changed with *knowledgeable* use of - * the "hdparm -X" feature. There is seldom a need to do this, as drives - * normally power-up with their "best" PIO/DMA modes enabled. - * - * Testing has been done with a rather extensive number of drives, - * with Quantum & Western Digital models generally outperforming the pack, - * and Fujitsu & Conner (and some Seagate which are really Conner) drives - * showing more lackluster throughput. - * - * Keep an eye on /var/adm/messages for "DMA disabled" messages. - * - * Some people have reported trouble with Intel Zappa motherboards. - * This can be fixed by upgrading the AMI BIOS to version 1.00.04.BS0, - * available from ftp://ftp.intel.com/pub/bios/10004bs0.exe - * (thanks to Glen Morrell <glen@spin.Stanford.edu> for researching this). - * * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for * fixing the problem with the BIOS on some Acer motherboards. * @@ -65,11 +26,6 @@ * * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> * for supplying a Promise UDMA board & WD UDMA drive for this work! - * - * And, yes, Intel Zappa boards really *do* use both PIIX IDE ports. - * - * ATA-66/100 and recovery functions, I forgot the rest...... - * */ #include <linux/module.h> diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 4a2cb286822..194ecb0049e 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -756,7 +756,8 @@ static int ide_probe_port(ide_hwif_t *hwif) BUG_ON(hwif->present); - if (hwif->noprobe) + if (hwif->noprobe || + (hwif->drives[0].noprobe && hwif->drives[1].noprobe)) return -EACCES; /* diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 0598ecfd5f3..43e0e055777 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -3765,6 +3765,11 @@ static int ide_tape_probe(ide_drive_t *drive) g->fops = &idetape_block_ops; ide_register_region(g); + printk(KERN_WARNING "It is possible that this driver does not have any" + " users anymore and, as a result, it will be REMOVED soon." + " Please notify Bart <bzolnier@gmail.com> or Boris" + " <petkovbb@gmail.com> in case you still need it.\n"); + return 0; out_free_tape: diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 477833f0daf..fa16bc30bbc 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -590,11 +590,6 @@ void ide_unregister(unsigned int index, int init_default, int restore) hwif->extra_ports = 0; } - /* - * Note that we only release the standard ports, - * and do not even try to handle any extra ports - * allocated for weird IDE interface chipsets. - */ ide_hwif_release_regions(hwif); /* copy original settings */ @@ -1036,10 +1031,9 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device drive->nice1 = (arg >> IDE_NICE_1) & 1; return 0; case HDIO_DRIVE_RESET: - { - unsigned long flags; - if (!capable(CAP_SYS_ADMIN)) return -EACCES; - + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + /* * Abort the current command on the * group if there is one, taking @@ -1058,17 +1052,15 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device ide_abort(drive, "drive reset"); BUG_ON(HWGROUP(drive)->handler); - + /* Ensure nothing gets queued after we drop the lock. Reset will clear the busy */ - + HWGROUP(drive)->busy = 1; spin_unlock_irqrestore(&ide_lock, flags); (void) ide_do_reset(drive); return 0; - } - case HDIO_GET_BUSSTATE: if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -1449,7 +1441,7 @@ static int __init ide_setup(char *s) case -1: /* "noprobe" */ hwif->noprobe = 1; - goto done; + goto obsolete_option; case 1: /* base */ vals[1] = vals[0] + 0x206; /* default ctl */ diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c index bba29df5f21..2f4f47ad602 100644 --- a/drivers/ide/legacy/qd65xx.c +++ b/drivers/ide/legacy/qd65xx.c @@ -334,43 +334,6 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif) hwif->drives[1].drive_data = t2; } -/* - * qd_unsetup: - * - * called to unsetup an ata channel : back to default values, unlinks tuning - */ -/* -static void __exit qd_unsetup(ide_hwif_t *hwif) -{ - u8 config = hwif->config_data; - int base = hwif->select_data; - void *set_pio_mode = (void *)hwif->set_pio_mode; - - if (hwif->chipset != ide_qd65xx) - return; - - printk(KERN_NOTICE "%s: back to defaults\n", hwif->name); - - hwif->selectproc = NULL; - hwif->set_pio_mode = NULL; - - if (set_pio_mode == (void *)qd6500_set_pio_mode) { - // will do it for both - outb(QD6500_DEF_DATA, QD_TIMREG(&hwif->drives[0])); - } else if (set_pio_mode == (void *)qd6580_set_pio_mode) { - if (QD_CONTROL(hwif) & QD_CONTR_SEC_DISABLED) { - outb(QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0])); - outb(QD6580_DEF_DATA2, QD_TIMREG(&hwif->drives[1])); - } else { - outb(hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0])); - } - } else { - printk(KERN_WARNING "Unknown qd65xx tuning fonction !\n"); - printk(KERN_WARNING "keeping settings !\n"); - } -} -*/ - static const struct ide_port_info qd65xx_port_info __initdata = { .chipset = ide_qd65xx, .host_flags = IDE_HFLAG_IO_32BIT | @@ -444,6 +407,8 @@ static int __init qd_probe(int base) printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", config, control, QD_ID3); + outb(QD_DEF_CONTR, QD_CONTROL_PORT); + if (control & QD_CONTR_SEC_DISABLED) { /* secondary disabled */ @@ -460,8 +425,6 @@ static int __init qd_probe(int base) ide_device_add(idx, &qd65xx_port_info); - outb(QD_DEF_CONTR, QD_CONTROL_PORT); - return 1; } else { ide_hwif_t *mate; @@ -487,8 +450,6 @@ static int __init qd_probe(int base) ide_device_add(idx, &qd65xx_port_info); - outb(QD_DEF_CONTR, QD_CONTROL_PORT); - return 0; /* no other qd65xx possible */ } } diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c index bd24dad3cfc..ec667982809 100644 --- a/drivers/ide/pci/cmd640.c +++ b/drivers/ide/pci/cmd640.c @@ -787,7 +787,8 @@ static int __init cmd640x_init(void) /* * Try to enable the secondary interface, if not already enabled */ - if (cmd_hwif1->noprobe) { + if (cmd_hwif1->noprobe || + (cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe)) { port2 = "not probed"; } else { b = get_cmd640_reg(CNTRL); diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index d0f7bb8b8ad..6357bb6269a 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c @@ -1570,10 +1570,12 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic if (rev < 3) info = &hpt36x; else { - static const struct hpt_info *hpt37x_info[] = - { &hpt370, &hpt370a, &hpt372, &hpt372n }; - - info = hpt37x_info[min_t(u8, rev, 6) - 3]; + switch (min_t(u8, rev, 6)) { + case 3: info = &hpt370; break; + case 4: info = &hpt370a; break; + case 5: info = &hpt372; break; + case 6: info = &hpt372n; break; + } idx++; } break; @@ -1626,7 +1628,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic return ide_setup_pci_device(dev, &d); } -static const struct pci_device_id hpt366_pci_tbl[] = { +static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = { { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 28e155a9e2a..9e2b1964d71 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -183,6 +183,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " * Avoids access beyond actual disk limits on devices with an off-by-one bug. * Don't use this with devices which don't have this bug. * + * - delay inquiry + * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. + * * - override internal blacklist * Instead of adding to the built-in blacklist, use only the workarounds * specified in the module load parameter. @@ -195,6 +198,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) + ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) ", or a combination)"); @@ -357,6 +361,11 @@ static const struct { .workarounds = SBP2_WORKAROUND_INQUIRY_36 | SBP2_WORKAROUND_MODE_SENSE_8, }, + /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { + .firmware_revision = 0x002800, + .model_id = 0x000000, + .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY, + }, /* Initio bridges, actually only needed for some older ones */ { .firmware_revision = 0x000200, .model_id = SBP2_ROM_VALUE_WILDCARD, @@ -914,6 +923,9 @@ static int sbp2_start_device(struct sbp2_lu *lu) sbp2_agent_reset(lu, 1); sbp2_max_speed_and_size(lu); + if (lu->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) + ssleep(SBP2_INQUIRY_DELAY); + error = scsi_add_device(lu->shost, 0, lu->ud->id, 0); if (error) { SBP2_ERR("scsi_add_device failed"); @@ -1962,6 +1974,9 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev) { struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0]; + if (sdev->lun != 0 || sdev->id != lu->ud->id || sdev->channel != 0) + return -ENODEV; + lu->sdev = sdev; sdev->allow_restart = 1; diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index d2ecb0d8a1b..80d8e097b06 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h @@ -343,6 +343,8 @@ enum sbp2lu_state_types { #define SBP2_WORKAROUND_INQUIRY_36 0x2 #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 +#define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 +#define SBP2_INQUIRY_DELAY 12 #define SBP2_WORKAROUND_OVERRIDE 0x100 #endif /* SBP2_H */ diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c index 73bfd1656f8..b8797c66676 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_mem.c +++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c @@ -136,14 +136,8 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list, /* Find largest page shift we can use to cover buffers */ for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) - if (num_phys_buf > 1) { - if ((1ULL << *shift) & mask) - break; - } else - if (1ULL << *shift >= - buffer_list[0].size + - (buffer_list[0].addr & ((1ULL << *shift) - 1))) - break; + if ((1ULL << *shift) & mask) + break; buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); buffer_list[0].addr &= ~0ull << *shift; diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 7f8853b44ee..b2112f5a422 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -567,12 +567,12 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i /* Init the adapter */ nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); - nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; if (!nesdev->nesadapter) { printk(KERN_ERR PFX "Unable to initialize adapter.\n"); ret = -ENOMEM; goto bail5; } + nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; /* nesdev->base_doorbell_index = nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index fd57e8a1582..a48b288618e 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -285,6 +285,21 @@ struct nes_device { }; +static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad) +{ + u32 crc_value; + crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad)); + + /* + * With commit ef19454b ("[LIB] crc32c: Keep intermediate crc + * state in cpu order"), behavior of crc32c changes on + * big-endian platforms. Our algorithm expects the previous + * behavior; otherwise we have RDMA connection establishment + * issue on big-endian. + */ + return cpu_to_le32(crc_value); +} + static inline void set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) { diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index bd5cfeaac20..39adb267fb1 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -370,11 +370,11 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, int ret = 0; u32 was_timer_set; + if (!cm_node) + return -EINVAL; new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); if (!new_send) return -1; - if (!cm_node) - return -EINVAL; /* new_send->timetosend = currenttime */ new_send->retrycount = NES_DEFAULT_RETRYS; @@ -947,6 +947,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); kfree(listener); + listener = NULL; ret = 0; cm_listens_destroyed++; } else { @@ -2319,6 +2320,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct iw_cm_event cm_event; struct nes_hw_qp_wqe *wqe; struct nes_v4_quad nes_quad; + u32 crc_value; int ret; ibqp = nes_get_qp(cm_id->device, conn_param->qpn); @@ -2435,8 +2437,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; /* Produce hash key */ - nesqp->hte_index = cpu_to_be32( - crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); + crc_value = get_crc_value(&nes_quad); + nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); @@ -2750,6 +2752,7 @@ void cm_event_connected(struct nes_cm_event *event) struct iw_cm_event cm_event; struct nes_hw_qp_wqe *wqe; struct nes_v4_quad nes_quad; + u32 crc_value; int ret; /* get all our handles */ @@ -2827,8 +2830,8 @@ void cm_event_connected(struct nes_cm_event *event) nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; /* Produce hash key */ - nesqp->hte_index = cpu_to_be32( - crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); + crc_value = get_crc_value(&nes_quad); + nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 7c4c0fbf0ab..49e53e4c1eb 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -156,15 +156,14 @@ static void nes_nic_tune_timer(struct nes_device *nesdev) spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); - if (shared_timer->cq_count_old < cq_count) { - if (cq_count > shared_timer->threshold_low) - shared_timer->cq_direction_downward=0; - } - if (shared_timer->cq_count_old >= cq_count) + if (shared_timer->cq_count_old <= cq_count) + shared_timer->cq_direction_downward = 0; + else shared_timer->cq_direction_downward++; shared_timer->cq_count_old = cq_count; if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) { - if (cq_count <= shared_timer->threshold_low) { + if (cq_count <= shared_timer->threshold_low && + shared_timer->threshold_low > 4) { shared_timer->threshold_low = shared_timer->threshold_low/2; shared_timer->cq_direction_downward=0; nesdev->currcq_count = 0; @@ -1728,7 +1727,6 @@ int nes_napi_isr(struct nes_device *nesdev) nesdev->int_req &= ~NES_INT_TIMER; nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); - nesadapter->tune_timer.timer_in_use_old = 0; } nesdev->deepcq_count = 0; return 1; @@ -1867,7 +1865,6 @@ void nes_dpc(unsigned long param) nesdev->int_req &= ~NES_INT_TIMER; nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); - nesdev->nesadapter->tune_timer.timer_in_use_old = 0; } else { nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); } diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index 1e10df550c9..b7e2844f096 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -962,7 +962,7 @@ struct nes_arp_entry { #define DEFAULT_JUMBO_NES_QL_LOW 12 #define DEFAULT_JUMBO_NES_QL_TARGET 40 #define DEFAULT_JUMBO_NES_QL_HIGH 128 -#define NES_NIC_CQ_DOWNWARD_TREND 8 +#define NES_NIC_CQ_DOWNWARD_TREND 16 struct nes_hw_tune_timer { //u16 cq_count; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 4dafbe16e82..a651e9d9f0e 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -929,7 +929,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev, NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", nespd->mmap_db_index, nespd->pd_id); - if (nespd->mmap_db_index > NES_MAX_USER_DB_REGIONS) { + if (nespd->mmap_db_index >= NES_MAX_USER_DB_REGIONS) { nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); kfree(nespd); @@ -1327,7 +1327,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, (long long unsigned int)req.user_wqe_buffers); nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); kfree(nesqp->allocated_buffer); - return ERR_PTR(-ENOMEM); + return ERR_PTR(-EFAULT); } } @@ -1674,6 +1674,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, } nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n", (unsigned long)req.user_cq_buffer, entries); + err = 1; list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) { if (nespbl->user_base == (unsigned long )req.user_cq_buffer) { list_del(&nespbl->list); @@ -1686,7 +1687,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, if (err) { nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); kfree(nescq); - return ERR_PTR(err); + return ERR_PTR(-EFAULT); } pbl_entries = nespbl->pbl_size >> 3; @@ -1831,9 +1832,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, spin_unlock_irqrestore(&nesdev->cqp.lock, flags); } } - nes_debug(NES_DBG_CQ, "iWARP CQ%u create timeout expired, major code = 0x%04X," - " minor code = 0x%04X\n", - nescq->hw_cq.cq_number, cqp_request->major_code, cqp_request->minor_code); if (!context) pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, nescq->hw_cq.cq_pbase); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 8b10d9f23be..c5263d63aca 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -42,14 +42,14 @@ config INPUT_M68K_BEEP config INPUT_APANEL tristate "Fujitsu Lifebook Application Panel buttons" - depends on X86 - select I2C_I801 + depends on X86 && I2C && LEDS_CLASS select INPUT_POLLDEV select CHECK_SIGNATURE help Say Y here for support of the Application Panel buttons, used on Fujitsu Lifebook. These are attached to the mainboard through - an SMBus interface managed by the I2C Intel ICH (i801) driver. + an SMBus interface managed by the I2C Intel ICH (i801) driver, + which you should also build for this kernel. To compile this driver as a module, choose M here: the module will be called apanel. diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c index 7993e01f9fc..76043dedba5 100644 --- a/drivers/isdn/hisax/hisax_fcpcipnp.c +++ b/drivers/isdn/hisax/hisax_fcpcipnp.c @@ -725,23 +725,6 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter) switch (adapter->type) { case AVM_FRITZ_PCIV2: - retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED, - "fcpcipnp", adapter); - break; - case AVM_FRITZ_PCI: - retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED, - "fcpcipnp", adapter); - break; - case AVM_FRITZ_PNP: - retval = request_irq(adapter->irq, fcpci_irq, 0, - "fcpcipnp", adapter); - break; - } - if (retval) - goto err_region; - - switch (adapter->type) { - case AVM_FRITZ_PCIV2: case AVM_FRITZ_PCI: val = inl(adapter->io); break; @@ -796,6 +779,23 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter) switch (adapter->type) { case AVM_FRITZ_PCIV2: + retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED, + "fcpcipnp", adapter); + break; + case AVM_FRITZ_PCI: + retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED, + "fcpcipnp", adapter); + break; + case AVM_FRITZ_PNP: + retval = request_irq(adapter->irq, fcpci_irq, 0, + "fcpcipnp", adapter); + break; + } + if (retval) + goto err_region; + + switch (adapter->type) { + case AVM_FRITZ_PCIV2: fcpci2_init(adapter); isacsx_setup(&adapter->isac); break; diff --git a/drivers/isdn/i4l/isdn_ttyfax.c b/drivers/isdn/i4l/isdn_ttyfax.c index f93de4a3035..78f7660c1d0 100644 --- a/drivers/isdn/i4l/isdn_ttyfax.c +++ b/drivers/isdn/i4l/isdn_ttyfax.c @@ -906,7 +906,8 @@ isdn_tty_cmd_FCLASS2(char **p, modem_info * info) sprintf(rs, "\r\n0-2"); isdn_tty_at_cout(rs, info); } else { - if ((f->phase != ISDN_FAX_PHASE_D) || (!info->faxonline & 1)) + if ((f->phase != ISDN_FAX_PHASE_D) || + (!(info->faxonline & 1))) PARSE_ERROR1; par = isdn_getnum(p); if ((par < 0) || (par > 2)) diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index 655ef9a3f4d..a335c85a736 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c @@ -1289,7 +1289,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card) } break; case ISDN_CMD_CLREAZ: - if (!card->flags & ISDNLOOP_FLAGS_RUNNING) + if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) return -ENODEV; if (card->leased) break; @@ -1333,7 +1333,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card) } break; case ISDN_CMD_SETL3: - if (!card->flags & ISDNLOOP_FLAGS_RUNNING) + if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) return -ENODEV; return 0; default: @@ -1380,7 +1380,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel) isdnloop_card *card = isdnloop_findcard(id); if (card) { - if (!card->flags & ISDNLOOP_FLAGS_RUNNING) + if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) return -ENODEV; return (isdnloop_writecmd(buf, len, 1, card)); } diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 7aeceedcf7d..831aed9c56f 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1047,6 +1047,11 @@ void bitmap_daemon_work(struct bitmap *bitmap) if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) return; bitmap->daemon_lastrun = jiffies; + if (bitmap->allclean) { + bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; + return; + } + bitmap->allclean = 1; for (j = 0; j < bitmap->chunks; j++) { bitmap_counter_t *bmc; @@ -1068,8 +1073,10 @@ void bitmap_daemon_work(struct bitmap *bitmap) clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); spin_unlock_irqrestore(&bitmap->lock, flags); - if (need_write) + if (need_write) { write_page(bitmap, page, 0); + bitmap->allclean = 0; + } continue; } @@ -1098,6 +1105,9 @@ void bitmap_daemon_work(struct bitmap *bitmap) /* if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); */ + if (*bmc) + bitmap->allclean = 0; + if (*bmc == 2) { *bmc=1; /* maybe clear the bit next time */ set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); @@ -1132,6 +1142,8 @@ void bitmap_daemon_work(struct bitmap *bitmap) } } + if (bitmap->allclean == 0) + bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ; } static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, @@ -1226,6 +1238,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect sectors -= blocks; else sectors = 0; } + bitmap->allclean = 0; return 0; } @@ -1296,6 +1309,7 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, } } spin_unlock_irq(&bitmap->lock); + bitmap->allclean = 0; return rv; } @@ -1332,6 +1346,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab } unlock: spin_unlock_irqrestore(&bitmap->lock, flags); + bitmap->allclean = 0; } void bitmap_close_sync(struct bitmap *bitmap) @@ -1399,7 +1414,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); } spin_unlock_irq(&bitmap->lock); - + bitmap->allclean = 0; } /* dirty the memory and file bits for bitmap chunks "s" to "e" */ diff --git a/drivers/md/md.c b/drivers/md/md.c index 7da6ec244e1..827824a9f3e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1105,7 +1105,11 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; if (rdev->sb_size & bmask) - rdev-> sb_size = (rdev->sb_size | bmask)+1; + rdev->sb_size = (rdev->sb_size | bmask) + 1; + + if (minor_version + && rdev->data_offset < sb_offset + (rdev->sb_size/512)) + return -EINVAL; if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) rdev->desc_nr = -1; @@ -1137,7 +1141,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) else ret = 0; } - if (minor_version) + if (minor_version) rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; else rdev->size = rdev->sb_offset; @@ -1499,7 +1503,8 @@ static void export_rdev(mdk_rdev_t * rdev) free_disk_sb(rdev); list_del_init(&rdev->same_set); #ifndef MODULE - md_autodetect_dev(rdev->bdev->bd_dev); + if (test_bit(AutoDetected, &rdev->flags)) + md_autodetect_dev(rdev->bdev->bd_dev); #endif unlock_rdev(rdev); kobject_put(&rdev->kobj); @@ -1996,9 +2001,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) char *e; unsigned long long size = simple_strtoull(buf, &e, 10); unsigned long long oldsize = rdev->size; + mddev_t *my_mddev = rdev->mddev; + if (e==buf || (*e && *e != '\n')) return -EINVAL; - if (rdev->mddev->pers) + if (my_mddev->pers) return -EBUSY; rdev->size = size; if (size > oldsize && rdev->mddev->external) { @@ -2011,7 +2018,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) int overlap = 0; struct list_head *tmp, *tmp2; - mddev_unlock(rdev->mddev); + mddev_unlock(my_mddev); for_each_mddev(mddev, tmp) { mdk_rdev_t *rdev2; @@ -2031,7 +2038,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) break; } } - mddev_lock(rdev->mddev); + mddev_lock(my_mddev); if (overlap) { /* Someone else could have slipped in a size * change here, but doing so is just silly. @@ -2043,8 +2050,8 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) return -EBUSY; } } - if (size < rdev->mddev->size || rdev->mddev->size == 0) - rdev->mddev->size = size; + if (size < my_mddev->size || my_mddev->size == 0) + my_mddev->size = size; return len; } @@ -2065,10 +2072,21 @@ rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); + mddev_t *mddev = rdev->mddev; + ssize_t rv; if (!entry->show) return -EIO; - return entry->show(rdev, page); + + rv = mddev ? mddev_lock(mddev) : -EBUSY; + if (!rv) { + if (rdev->mddev == NULL) + rv = -EBUSY; + else + rv = entry->show(rdev, page); + mddev_unlock(mddev); + } + return rv; } static ssize_t @@ -2077,15 +2095,19 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, { struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); - int rv; + ssize_t rv; + mddev_t *mddev = rdev->mddev; if (!entry->store) return -EIO; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - rv = mddev_lock(rdev->mddev); + rv = mddev ? mddev_lock(mddev): -EBUSY; if (!rv) { - rv = entry->store(rdev, page, length); + if (rdev->mddev == NULL) + rv = -EBUSY; + else + rv = entry->store(rdev, page, length); mddev_unlock(rdev->mddev); } return rv; @@ -5351,6 +5373,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi) mddev->ro = 0; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); + md_wakeup_thread(mddev->sync_thread); } atomic_inc(&mddev->writes_pending); if (mddev->in_sync) { @@ -6021,6 +6044,7 @@ static void autostart_arrays(int part) MD_BUG(); continue; } + set_bit(AutoDetected, &rdev->flags); list_add(&rdev->same_set, &pending_raid_disks); i_passed++; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 5c7fef091ce..ff61b309129 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -592,6 +592,37 @@ static int raid1_congested(void *data, int bits) } +static int flush_pending_writes(conf_t *conf) +{ + /* Any writes that have been queued but are awaiting + * bitmap updates get flushed here. + * We return 1 if any requests were actually submitted. + */ + int rv = 0; + + spin_lock_irq(&conf->device_lock); + + if (conf->pending_bio_list.head) { + struct bio *bio; + bio = bio_list_get(&conf->pending_bio_list); + blk_remove_plug(conf->mddev->queue); + spin_unlock_irq(&conf->device_lock); + /* flush any pending bitmap writes to + * disk before proceeding w/ I/O */ + bitmap_unplug(conf->mddev->bitmap); + + while (bio) { /* submit pending writes */ + struct bio *next = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = next; + } + rv = 1; + } else + spin_unlock_irq(&conf->device_lock); + return rv; +} + /* Barriers.... * Sometimes we need to suspend IO while we do something else, * either some resync/recovery, or reconfigure the array. @@ -673,15 +704,23 @@ static void freeze_array(conf_t *conf) /* stop syncio and normal IO and wait for everything to * go quite. * We increment barrier and nr_waiting, and then - * wait until barrier+nr_pending match nr_queued+2 + * wait until nr_pending match nr_queued+1 + * This is called in the context of one normal IO request + * that has failed. Thus any sync request that might be pending + * will be blocked by nr_pending, and we need to wait for + * pending IO requests to complete or be queued for re-try. + * Thus the number queued (nr_queued) plus this request (1) + * must match the number of pending IOs (nr_pending) before + * we continue. */ spin_lock_irq(&conf->resync_lock); conf->barrier++; conf->nr_waiting++; wait_event_lock_irq(conf->wait_barrier, - conf->barrier+conf->nr_pending == conf->nr_queued+2, + conf->nr_pending == conf->nr_queued+1, conf->resync_lock, - raid1_unplug(conf->mddev->queue)); + ({ flush_pending_writes(conf); + raid1_unplug(conf->mddev->queue); })); spin_unlock_irq(&conf->resync_lock); } static void unfreeze_array(conf_t *conf) @@ -907,6 +946,9 @@ static int make_request(struct request_queue *q, struct bio * bio) blk_plug_device(mddev->queue); spin_unlock_irqrestore(&conf->device_lock, flags); + /* In case raid1d snuck into freeze_array */ + wake_up(&conf->wait_barrier); + if (do_sync) md_wakeup_thread(mddev->thread); #if 0 @@ -1473,28 +1515,14 @@ static void raid1d(mddev_t *mddev) for (;;) { char b[BDEVNAME_SIZE]; - spin_lock_irqsave(&conf->device_lock, flags); - - if (conf->pending_bio_list.head) { - bio = bio_list_get(&conf->pending_bio_list); - blk_remove_plug(mddev->queue); - spin_unlock_irqrestore(&conf->device_lock, flags); - /* flush any pending bitmap writes to disk before proceeding w/ I/O */ - bitmap_unplug(mddev->bitmap); - while (bio) { /* submit pending writes */ - struct bio *next = bio->bi_next; - bio->bi_next = NULL; - generic_make_request(bio); - bio = next; - } - unplug = 1; + unplug += flush_pending_writes(conf); - continue; - } - - if (list_empty(head)) + spin_lock_irqsave(&conf->device_lock, flags); + if (list_empty(head)) { + spin_unlock_irqrestore(&conf->device_lock, flags); break; + } r1_bio = list_entry(head->prev, r1bio_t, retry_list); list_del(head->prev); conf->nr_queued--; @@ -1590,7 +1618,6 @@ static void raid1d(mddev_t *mddev) } } } - spin_unlock_irqrestore(&conf->device_lock, flags); if (unplug) unplug_slaves(mddev); } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 017f58113c3..32389d2f18f 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -537,7 +537,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) current_distance = abs(r10_bio->devs[slot].addr - conf->mirrors[disk].head_position); - /* Find the disk whose head is closest */ + /* Find the disk whose head is closest, + * or - for far > 1 - find the closest to partition beginning */ for (nslot = slot; nslot < conf->copies; nslot++) { int ndisk = r10_bio->devs[nslot].devnum; @@ -557,8 +558,13 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) slot = nslot; break; } - new_distance = abs(r10_bio->devs[nslot].addr - - conf->mirrors[ndisk].head_position); + + /* for far > 1 always use the lowest address */ + if (conf->far_copies > 1) + new_distance = r10_bio->devs[nslot].addr; + else + new_distance = abs(r10_bio->devs[nslot].addr - + conf->mirrors[ndisk].head_position); if (new_distance < current_distance) { current_distance = new_distance; disk = ndisk; @@ -629,7 +635,36 @@ static int raid10_congested(void *data, int bits) return ret; } - +static int flush_pending_writes(conf_t *conf) +{ + /* Any writes that have been queued but are awaiting + * bitmap updates get flushed here. + * We return 1 if any requests were actually submitted. + */ + int rv = 0; + + spin_lock_irq(&conf->device_lock); + + if (conf->pending_bio_list.head) { + struct bio *bio; + bio = bio_list_get(&conf->pending_bio_list); + blk_remove_plug(conf->mddev->queue); + spin_unlock_irq(&conf->device_lock); + /* flush any pending bitmap writes to disk + * before proceeding w/ I/O */ + bitmap_unplug(conf->mddev->bitmap); + + while (bio) { /* submit pending writes */ + struct bio *next = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = next; + } + rv = 1; + } else + spin_unlock_irq(&conf->device_lock); + return rv; +} /* Barriers.... * Sometimes we need to suspend IO while we do something else, * either some resync/recovery, or reconfigure the array. @@ -712,15 +747,23 @@ static void freeze_array(conf_t *conf) /* stop syncio and normal IO and wait for everything to * go quiet. * We increment barrier and nr_waiting, and then - * wait until barrier+nr_pending match nr_queued+2 + * wait until nr_pending match nr_queued+1 + * This is called in the context of one normal IO request + * that has failed. Thus any sync request that might be pending + * will be blocked by nr_pending, and we need to wait for + * pending IO requests to complete or be queued for re-try. + * Thus the number queued (nr_queued) plus this request (1) + * must match the number of pending IOs (nr_pending) before + * we continue. */ spin_lock_irq(&conf->resync_lock); conf->barrier++; conf->nr_waiting++; wait_event_lock_irq(conf->wait_barrier, - conf->barrier+conf->nr_pending == conf->nr_queued+2, + conf->nr_pending == conf->nr_queued+1, conf->resync_lock, - raid10_unplug(conf->mddev->queue)); + ({ flush_pending_writes(conf); + raid10_unplug(conf->mddev->queue); })); spin_unlock_irq(&conf->resync_lock); } @@ -892,6 +935,9 @@ static int make_request(struct request_queue *q, struct bio * bio) blk_plug_device(mddev->queue); spin_unlock_irqrestore(&conf->device_lock, flags); + /* In case raid10d snuck in to freeze_array */ + wake_up(&conf->wait_barrier); + if (do_sync) md_wakeup_thread(mddev->thread); @@ -1464,28 +1510,14 @@ static void raid10d(mddev_t *mddev) for (;;) { char b[BDEVNAME_SIZE]; - spin_lock_irqsave(&conf->device_lock, flags); - if (conf->pending_bio_list.head) { - bio = bio_list_get(&conf->pending_bio_list); - blk_remove_plug(mddev->queue); - spin_unlock_irqrestore(&conf->device_lock, flags); - /* flush any pending bitmap writes to disk before proceeding w/ I/O */ - bitmap_unplug(mddev->bitmap); - - while (bio) { /* submit pending writes */ - struct bio *next = bio->bi_next; - bio->bi_next = NULL; - generic_make_request(bio); - bio = next; - } - unplug = 1; - - continue; - } + unplug += flush_pending_writes(conf); - if (list_empty(head)) + spin_lock_irqsave(&conf->device_lock, flags); + if (list_empty(head)) { + spin_unlock_irqrestore(&conf->device_lock, flags); break; + } r10_bio = list_entry(head->prev, r10bio_t, retry_list); list_del(head->prev); conf->nr_queued--; @@ -1548,7 +1580,6 @@ static void raid10d(mddev_t *mddev) } } } - spin_unlock_irqrestore(&conf->device_lock, flags); if (unplug) unplug_slaves(mddev); } @@ -1787,6 +1818,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i if (j == conf->copies) { /* Cannot recover, so abort the recovery */ put_buf(r10_bio); + if (rb2) + atomic_dec(&rb2->remaining); r10_bio = rb2; if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery)) printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 0c303c84b37..6b6df867958 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -632,8 +632,7 @@ mpt_deregister(u8 cb_idx) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_event_register - Register protocol-specific event callback - * handler. + * mpt_event_register - Register protocol-specific event callback handler. * @cb_idx: previously registered (via mpt_register) callback handle * @ev_cbfunc: callback function * @@ -654,8 +653,7 @@ mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_event_deregister - Deregister protocol-specific event callback - * handler. + * mpt_event_deregister - Deregister protocol-specific event callback handler * @cb_idx: previously registered callback handle * * Each protocol-specific driver should call this routine @@ -765,11 +763,13 @@ mpt_device_driver_deregister(u8 cb_idx) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_get_msg_frame - Obtain a MPT request frame from the pool (of 1024) - * allocated per MPT adapter. + * mpt_get_msg_frame - Obtain an MPT request frame from the pool * @cb_idx: Handle of registered MPT protocol driver * @ioc: Pointer to MPT adapter structure * + * Obtain an MPT request frame from the pool (of 1024) that are + * allocated per MPT adapter. + * * Returns pointer to a MPT request frame or %NULL if none are available * or IOC is not active. */ @@ -834,13 +834,12 @@ mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_put_msg_frame - Send a protocol specific MPT request frame - * to a IOC. + * mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC * @cb_idx: Handle of registered MPT protocol driver * @ioc: Pointer to MPT adapter structure * @mf: Pointer to MPT request frame * - * This routine posts a MPT request frame to the request post FIFO of a + * This routine posts an MPT request frame to the request post FIFO of a * specific MPT adapter. */ void @@ -868,13 +867,15 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) } /** - * mpt_put_msg_frame_hi_pri - Send a protocol specific MPT request frame - * to a IOC using hi priority request queue. + * mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame * @cb_idx: Handle of registered MPT protocol driver * @ioc: Pointer to MPT adapter structure * @mf: Pointer to MPT request frame * - * This routine posts a MPT request frame to the request post FIFO of a + * Send a protocol-specific MPT request frame to an IOC using + * hi-priority request queue. + * + * This routine posts an MPT request frame to the request post FIFO of a * specific MPT adapter. **/ void diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index af1de0ccee2..0c252f60c4c 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -1533,7 +1533,7 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx) * * Remark: Currently invoked from a non-interrupt thread (_bh). * - * Remark: With old EH code, at most 1 SCSI TaskMgmt function per IOC + * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC * will be active. * * Returns 0 for SUCCESS, or %FAILED. @@ -2537,14 +2537,12 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR /** * mptscsih_get_scsi_lookup - * - * retrieves scmd entry from ScsiLookup[] array list - * * @ioc: Pointer to MPT_ADAPTER structure * @i: index into the array * - * Returns the scsi_cmd pointer + * retrieves scmd entry from ScsiLookup[] array list * + * Returns the scsi_cmd pointer **/ static struct scsi_cmnd * mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) @@ -2561,14 +2559,12 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) /** * mptscsih_getclear_scsi_lookup - * - * retrieves and clears scmd entry from ScsiLookup[] array list - * * @ioc: Pointer to MPT_ADAPTER structure * @i: index into the array * - * Returns the scsi_cmd pointer + * retrieves and clears scmd entry from ScsiLookup[] array list * + * Returns the scsi_cmd pointer **/ static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index afd82966f9a..13bac53db69 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -48,31 +48,13 @@ struct sm501_devdata { unsigned int pdev_id; unsigned int irq; void __iomem *regs; + unsigned int rev; }; #define MHZ (1000 * 1000) #ifdef DEBUG -static const unsigned int misc_div[] = { - [0] = 1, - [1] = 2, - [2] = 4, - [3] = 8, - [4] = 16, - [5] = 32, - [6] = 64, - [7] = 128, - [8] = 3, - [9] = 6, - [10] = 12, - [11] = 24, - [12] = 48, - [13] = 96, - [14] = 192, - [15] = 384, -}; - -static const unsigned int px_div[] = { +static const unsigned int div_tab[] = { [0] = 1, [1] = 2, [2] = 4, @@ -101,12 +83,12 @@ static const unsigned int px_div[] = { static unsigned long decode_div(unsigned long pll2, unsigned long val, unsigned int lshft, unsigned int selbit, - unsigned long mask, const unsigned int *dtab) + unsigned long mask) { if (val & selbit) pll2 = 288 * MHZ; - return pll2 / dtab[(val >> lshft) & mask]; + return pll2 / div_tab[(val >> lshft) & mask]; } #define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x) @@ -141,10 +123,10 @@ static void sm501_dump_clk(struct sm501_devdata *sm) } sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ; - sdclk0 /= misc_div[((misct >> 8) & 0xf)]; + sdclk0 /= div_tab[((misct >> 8) & 0xf)]; sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ; - sdclk1 /= misc_div[((misct >> 16) & 0xf)]; + sdclk1 /= div_tab[((misct >> 16) & 0xf)]; dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n", misct, pm0, pm1); @@ -158,19 +140,19 @@ static void sm501_dump_clk(struct sm501_devdata *sm) "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", (pmc & 3 ) == 0 ? '*' : '-', - fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31, px_div)), - fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15, misc_div)), - fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15, misc_div)), - fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15, misc_div))); + fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31)), + fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15)), + fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15)), + fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15))); dev_dbg(sm->dev, "PM1[%c]: " "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", (pmc & 3 ) == 1 ? '*' : '-', - fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31, px_div)), - fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15, misc_div)), - fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15, misc_div)), - fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15, misc_div))); + fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31)), + fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15)), + fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15)), + fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15))); } static void sm501_dump_regs(struct sm501_devdata *sm) @@ -436,46 +418,108 @@ struct sm501_clock { unsigned long mclk; int divider; int shift; + unsigned int m, n, k; }; +/* sm501_calc_clock + * + * Calculates the nearest discrete clock frequency that + * can be achieved with the specified input clock. + * the maximum divisor is 3 or 5 + */ + +static int sm501_calc_clock(unsigned long freq, + struct sm501_clock *clock, + int max_div, + unsigned long mclk, + long *best_diff) +{ + int ret = 0; + int divider; + int shift; + long diff; + + /* try dividers 1 and 3 for CRT and for panel, + try divider 5 for panel only.*/ + + for (divider = 1; divider <= max_div; divider += 2) { + /* try all 8 shift values.*/ + for (shift = 0; shift < 8; shift++) { + /* Calculate difference to requested clock */ + diff = sm501fb_round_div(mclk, divider << shift) - freq; + if (diff < 0) + diff = -diff; + + /* If it is less than the current, use it */ + if (diff < *best_diff) { + *best_diff = diff; + + clock->mclk = mclk; + clock->divider = divider; + clock->shift = shift; + ret = 1; + } + } + } + + return ret; +} + +/* sm501_calc_pll + * + * Calculates the nearest discrete clock frequency that can be + * achieved using the programmable PLL. + * the maximum divisor is 3 or 5 + */ + +static unsigned long sm501_calc_pll(unsigned long freq, + struct sm501_clock *clock, + int max_div) +{ + unsigned long mclk; + unsigned int m, n, k; + long best_diff = 999999999; + + /* + * The SM502 datasheet doesn't specify the min/max values for M and N. + * N = 1 at least doesn't work in practice. + */ + for (m = 2; m <= 255; m++) { + for (n = 2; n <= 127; n++) { + for (k = 0; k <= 1; k++) { + mclk = (24000000UL * m / n) >> k; + + if (sm501_calc_clock(freq, clock, max_div, + mclk, &best_diff)) { + clock->m = m; + clock->n = n; + clock->k = k; + } + } + } + } + + /* Return best clock. */ + return clock->mclk / (clock->divider << clock->shift); +} + /* sm501_select_clock * - * selects nearest discrete clock frequency the SM501 can achive + * Calculates the nearest discrete clock frequency that can be + * achieved using the 288MHz and 336MHz PLLs. * the maximum divisor is 3 or 5 */ + static unsigned long sm501_select_clock(unsigned long freq, struct sm501_clock *clock, int max_div) { unsigned long mclk; - int divider; - int shift; - long diff; long best_diff = 999999999; /* Try 288MHz and 336MHz clocks. */ for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) { - /* try dividers 1 and 3 for CRT and for panel, - try divider 5 for panel only.*/ - - for (divider = 1; divider <= max_div; divider += 2) { - /* try all 8 shift values.*/ - for (shift = 0; shift < 8; shift++) { - /* Calculate difference to requested clock */ - diff = sm501fb_round_div(mclk, divider << shift) - freq; - if (diff < 0) - diff = -diff; - - /* If it is less than the current, use it */ - if (diff < best_diff) { - best_diff = diff; - - clock->mclk = mclk; - clock->divider = divider; - clock->shift = shift; - } - } - } + sm501_calc_clock(freq, clock, max_div, mclk, &best_diff); } /* Return best clock. */ @@ -497,6 +541,7 @@ unsigned long sm501_set_clock(struct device *dev, unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE); unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK); unsigned char reg; + unsigned int pll_reg = 0; unsigned long sm501_freq; /* the actual frequency acheived */ struct sm501_clock to; @@ -511,14 +556,28 @@ unsigned long sm501_set_clock(struct device *dev, * requested frequency the value must be multiplied by * 2. This clock also has an additional pre divisor */ - sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2); - reg=to.shift & 0x07;/* bottom 3 bits are shift */ - if (to.divider == 3) - reg |= 0x08; /* /3 divider required */ - else if (to.divider == 5) - reg |= 0x10; /* /5 divider required */ - if (to.mclk != 288000000) - reg |= 0x20; /* which mclk pll is source */ + if (sm->rev >= 0xC0) { + /* SM502 -> use the programmable PLL */ + sm501_freq = (sm501_calc_pll(2 * req_freq, + &to, 5) / 2); + reg = to.shift & 0x07;/* bottom 3 bits are shift */ + if (to.divider == 3) + reg |= 0x08; /* /3 divider required */ + else if (to.divider == 5) + reg |= 0x10; /* /5 divider required */ + reg |= 0x40; /* select the programmable PLL */ + pll_reg = 0x20000 | (to.k << 15) | (to.n << 8) | to.m; + } else { + sm501_freq = (sm501_select_clock(2 * req_freq, + &to, 5) / 2); + reg = to.shift & 0x07;/* bottom 3 bits are shift */ + if (to.divider == 3) + reg |= 0x08; /* /3 divider required */ + else if (to.divider == 5) + reg |= 0x10; /* /5 divider required */ + if (to.mclk != 288000000) + reg |= 0x20; /* which mclk pll is source */ + } break; case SM501_CLOCK_V2XCLK: @@ -579,6 +638,10 @@ unsigned long sm501_set_clock(struct device *dev, } writel(mode, sm->regs + SM501_POWER_MODE_CONTROL); + + if (pll_reg) + writel(pll_reg, sm->regs + SM501_PROGRAMMABLE_PLL_CONTROL); + sm501_sync_regs(sm); dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n", @@ -599,15 +662,24 @@ EXPORT_SYMBOL_GPL(sm501_set_clock); * finds the closest available frequency for a given clock */ -unsigned long sm501_find_clock(int clksrc, +unsigned long sm501_find_clock(struct device *dev, + int clksrc, unsigned long req_freq) { + struct sm501_devdata *sm = dev_get_drvdata(dev); unsigned long sm501_freq; /* the frequency achiveable by the 501 */ struct sm501_clock to; switch (clksrc) { case SM501_CLOCK_P2XCLK: - sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2); + if (sm->rev >= 0xC0) { + /* SM502 -> use the programmable PLL */ + sm501_freq = (sm501_calc_pll(2 * req_freq, + &to, 5) / 2); + } else { + sm501_freq = (sm501_select_clock(2 * req_freq, + &to, 5) / 2); + } break; case SM501_CLOCK_V2XCLK: @@ -914,6 +986,8 @@ static int sm501_init_dev(struct sm501_devdata *sm) dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n", sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq); + sm->rev = devid & SM501_DEVICEID_REVMASK; + sm501_dump_gate(sm); ret = device_create_file(sm->dev, &dev_attr_dbg_regs); diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c index bb269d0c677..6cb781262f9 100644 --- a/drivers/misc/thinkpad_acpi.c +++ b/drivers/misc/thinkpad_acpi.c @@ -1078,7 +1078,8 @@ static int hotkey_get_tablet_mode(int *status) if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) return -EIO; - return ((s & TP_HOTKEY_TABLET_MASK) != 0); + *status = ((s & TP_HOTKEY_TABLET_MASK) != 0); + return 0; } /* diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 0fbf1bbbaee..d7a3ea88edd 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -1253,7 +1253,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) /* Setup interrupt handlers. */ for (idp = id; idp->name; idp++) { - if (request_irq(idp->irq, idp->handler, 0, idp->name, dev) != 0) + if (request_irq(idp->irq, idp->handler, IRQF_DISABLED, idp->name, dev) != 0) printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); } @@ -1382,7 +1382,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) /* Setup interrupt handlers. */ for (idp = id; idp->name; idp++) { - if (request_irq(b+idp->irq, fec_enet_interrupt, 0, idp->name, dev) != 0) + if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name, dev) != 0) printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); } @@ -1553,7 +1553,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) /* Setup interrupt handlers. */ for (idp = id; idp->name; idp++) { - if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) + if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0) printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); } @@ -1680,7 +1680,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) /* Setup interrupt handlers. */ for (idp = id; idp->name; idp++) { - if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) + if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0) printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); } diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig index 1d3b84b4af3..553a9905299 100644 --- a/drivers/parisc/Kconfig +++ b/drivers/parisc/Kconfig @@ -103,6 +103,11 @@ config IOMMU_SBA depends on PCI_LBA default PCI_LBA +config IOMMU_HELPER + bool + depends on IOMMU_SBA || IOMMU_CCIO + default y + #config PCI_EPIC # bool "EPIC/SAGA PCI support" # depends on PCI diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index d08b284de19..60d338cd800 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -43,6 +43,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/scatterlist.h> +#include <linux/iommu-helper.h> #include <asm/byteorder.h> #include <asm/cache.h> /* for L1_CACHE_BYTES */ @@ -302,13 +303,17 @@ static int ioc_count; */ #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ for(; res_ptr < res_end; ++res_ptr) { \ - if(0 == (*res_ptr & mask)) { \ - *res_ptr |= mask; \ - res_idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ - ioc->res_hint = res_idx + (size >> 3); \ - goto resource_found; \ - } \ - } + int ret;\ + unsigned int idx;\ + idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ + ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\ + if ((0 == (*res_ptr & mask)) && !ret) { \ + *res_ptr |= mask; \ + res_idx = idx;\ + ioc->res_hint = res_idx + (size >> 3); \ + goto resource_found; \ + } \ + } #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ @@ -341,10 +346,11 @@ static int ioc_count; * of available pages for the requested size. */ static int -ccio_alloc_range(struct ioc *ioc, size_t size) +ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size) { unsigned int pages_needed = size >> IOVP_SHIFT; unsigned int res_idx; + unsigned long boundary_size; #ifdef CCIO_SEARCH_TIME unsigned long cr_start = mfctl(16); #endif @@ -360,6 +366,9 @@ ccio_alloc_range(struct ioc *ioc, size_t size) ** ggg sacrifices another 710 to the computer gods. */ + boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IOVP_SHIFT); + boundary_size >>= IOVP_SHIFT; + if (pages_needed <= 8) { /* * LAN traffic will not thrash the TLB IFF the same NIC @@ -760,7 +769,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size, ioc->msingle_pages += size >> IOVP_SHIFT; #endif - idx = ccio_alloc_range(ioc, size); + idx = ccio_alloc_range(ioc, dev, size); iovp = (dma_addr_t)MKIOVP(idx); pdir_start = &(ioc->pdir_base[idx]); diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h index 97ba8286c59..a9c46cc2db3 100644 --- a/drivers/parisc/iommu-helpers.h +++ b/drivers/parisc/iommu-helpers.h @@ -96,8 +96,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, static inline unsigned int iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, - struct scatterlist *startsg, int nents, - int (*iommu_alloc_range)(struct ioc *, size_t)) + struct scatterlist *startsg, int nents, + int (*iommu_alloc_range)(struct ioc *, struct device *, size_t)) { struct scatterlist *contig_sg; /* contig chunk head */ unsigned long dma_offset, dma_len; /* start/len of DMA stream */ @@ -166,7 +166,7 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); sg_dma_address(contig_sg) = PIDE_FLAG - | (iommu_alloc_range(ioc, dma_len) << IOVP_SHIFT) + | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT) | dma_offset; n_mappings++; } diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index d06627c3f35..e834127a850 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -29,6 +29,7 @@ #include <linux/string.h> #include <linux/pci.h> #include <linux/scatterlist.h> +#include <linux/iommu-helper.h> #include <asm/byteorder.h> #include <asm/io.h> @@ -313,6 +314,12 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) +unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, + unsigned int bitshiftcnt) +{ + return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) + + bitshiftcnt; +} /** * sba_search_bitmap - find free space in IO PDIR resource bitmap @@ -324,19 +331,36 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) * Cool perf optimization: search for log2(size) bits at a time. */ static SBA_INLINE unsigned long -sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) +sba_search_bitmap(struct ioc *ioc, struct device *dev, + unsigned long bits_wanted) { unsigned long *res_ptr = ioc->res_hint; unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); - unsigned long pide = ~0UL; + unsigned long pide = ~0UL, tpide; + unsigned long boundary_size; + unsigned long shift; + int ret; + + boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IOVP_SHIFT); + boundary_size >>= IOVP_SHIFT; + +#if defined(ZX1_SUPPORT) + BUG_ON(ioc->ibase & ~IOVP_MASK); + shift = ioc->ibase >> IOVP_SHIFT; +#else + shift = 0; +#endif if (bits_wanted > (BITS_PER_LONG/2)) { /* Search word at a time - no mask needed */ for(; res_ptr < res_end; ++res_ptr) { - if (*res_ptr == 0) { + tpide = ptr_to_pide(ioc, res_ptr, 0); + ret = iommu_is_span_boundary(tpide, bits_wanted, + shift, + boundary_size); + if ((*res_ptr == 0) && !ret) { *res_ptr = RESMAP_MASK(bits_wanted); - pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); - pide <<= 3; /* convert to bit address */ + pide = tpide; break; } } @@ -365,11 +389,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) { DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); WARN_ON(mask == 0); - if(((*res_ptr) & mask) == 0) { + tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); + ret = iommu_is_span_boundary(tpide, bits_wanted, + shift, + boundary_size); + if ((((*res_ptr) & mask) == 0) && !ret) { *res_ptr |= mask; /* mark resources busy! */ - pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); - pide <<= 3; /* convert to bit address */ - pide += bitshiftcnt; + pide = tpide; break; } mask >>= o; @@ -404,7 +430,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) * resource bit map. */ static int -sba_alloc_range(struct ioc *ioc, size_t size) +sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) { unsigned int pages_needed = size >> IOVP_SHIFT; #ifdef SBA_COLLECT_STATS @@ -412,9 +438,9 @@ sba_alloc_range(struct ioc *ioc, size_t size) #endif unsigned long pide; - pide = sba_search_bitmap(ioc, pages_needed); + pide = sba_search_bitmap(ioc, dev, pages_needed); if (pide >= (ioc->res_size << 3)) { - pide = sba_search_bitmap(ioc, pages_needed); + pide = sba_search_bitmap(ioc, dev, pages_needed); if (pide >= (ioc->res_size << 3)) panic("%s: I/O MMU @ %p is out of mapping resources\n", __FILE__, ioc->ioc_hpa); @@ -710,7 +736,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, ioc->msingle_calls++; ioc->msingle_pages += size >> IOVP_SHIFT; #endif - pide = sba_alloc_range(ioc, size); + pide = sba_alloc_range(ioc, dev, size); iovp = (dma_addr_t) pide << IOVP_SHIFT; DBG_RUN("%s() 0x%p -> 0x%lx\n", diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index ef5a6a245f5..6a9403d79e0 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -145,13 +145,15 @@ void pci_bus_add_devices(struct pci_bus *bus) child_bus = dev->subordinate; child_bus->dev.parent = child_bus->bridge; retval = device_register(&child_bus->dev); - if (!retval) + if (retval) + dev_err(&dev->dev, "Error registering pci_bus," + " continuing...\n"); + else retval = device_create_file(&child_bus->dev, &dev_attr_cpuaffinity); if (retval) - dev_err(&dev->dev, "Error registering pci_bus" - " device bridge symlink," - " continuing...\n"); + dev_err(&dev->dev, "Error creating cpuaffinity" + " file, continuing...\n"); } } } diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c index a590ef68215..4d4a6447840 100644 --- a/drivers/pci/hotplug-pci.c +++ b/drivers/pci/hotplug-pci.c @@ -4,7 +4,7 @@ #include "pci.h" -unsigned int pci_do_scan_bus(struct pci_bus *bus) +unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus) { unsigned int max; diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index cf22f9e01e0..5e50008d118 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -1085,7 +1085,7 @@ static int acpiphp_bus_trim(acpi_handle handle) * This function should be called per *physical slot*, * not per each slot object in ACPI namespace. */ -static int enable_device(struct acpiphp_slot *slot) +static int __ref enable_device(struct acpiphp_slot *slot) { struct pci_dev *dev; struct pci_bus *bus = slot->bridge->pci_bus; diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index 5e9be44817c..b3515fc4cd3 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c @@ -250,7 +250,7 @@ int cpci_led_off(struct slot* slot) * Device configuration functions */ -int cpci_configure_slot(struct slot* slot) +int __ref cpci_configure_slot(struct slot *slot) { struct pci_bus *parent; int fn; diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 6eba9b2cfb9..698975a6a21 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -711,7 +711,8 @@ static int hpc_power_off_slot(struct slot * slot) retval = pcie_write_cmd(slot, slot_cmd, cmd_mask); if (retval) { err("%s: Write command failed!\n", __FUNCTION__); - return -1; + retval = -1; + goto out; } dbg("%s: SLOTCTRL %x write cmd %x\n", __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); @@ -722,7 +723,7 @@ static int hpc_power_off_slot(struct slot * slot) * removed from the slot/adapter. */ msleep(1000); - + out: if (changed) pcie_unmask_bad_dllp(ctrl); diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index dd50713966d..9372a840b63 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c @@ -167,7 +167,7 @@ static void program_fw_provided_values(struct pci_dev *dev) } } -static int pciehp_add_bridge(struct pci_dev *dev) +static int __ref pciehp_add_bridge(struct pci_dev *dev) { struct pci_bus *parent = dev->bus; int pass, busnr, start = parent->secondary; diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index 0a6b25ef194..a69a2152089 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c @@ -96,7 +96,7 @@ static void program_fw_provided_values(struct pci_dev *dev) } } -int shpchp_configure_device(struct slot *p_slot) +int __ref shpchp_configure_device(struct slot *p_slot) { struct pci_dev *dev; struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 4d23b9fb551..2db2e4bb0d1 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -286,7 +286,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) } } -void pci_read_bridge_bases(struct pci_bus *child) +void __devinit pci_read_bridge_bases(struct pci_bus *child) { struct pci_dev *dev = child->self; u8 io_base_lo, io_limit_lo; @@ -472,7 +472,7 @@ static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) * them, we proceed to assigning numbers to the remaining buses in * order to avoid overlaps between old and new bus numbers. */ -int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass) +int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) { struct pci_bus *child; int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); @@ -1008,7 +1008,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) return nr; } -unsigned int pci_scan_child_bus(struct pci_bus *bus) +unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) { unsigned int devfn, pass, max = bus->secondary; struct pci_dev *dev; @@ -1116,7 +1116,7 @@ err_out: return NULL; } -struct pci_bus *pci_scan_bus_parented(struct device *parent, +struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent, int bus, struct pci_ops *ops, void *sysdata) { struct pci_bus *b; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index bbad4a9f264..e9a333d9855 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1652,9 +1652,8 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) pci_write_config_byte(dev, 0x75, 0x1); pci_write_config_byte(dev, 0x77, 0x0); - printk(KERN_INFO - "PCI: VIA CX700 PCI parking/caching fixup on %s\n", - pci_name(dev)); + dev_info(&dev->dev, + "Disabling VIA CX700 PCI parking/caching\n"); } } } @@ -1726,32 +1725,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2 quirk_msi_ht_cap); -/* - * Force enable MSI mapping capability on HT bridges - */ -static void __devinit quirk_msi_ht_cap_enable(struct pci_dev *dev) -{ - int pos, ttl = 48; - - pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); - while (pos && ttl--) { - u8 flags; - - if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { - printk(KERN_INFO "PCI: Enabling HT MSI Mapping on %s\n", - pci_name(dev)); - - pci_write_config_byte(dev, pos + HT_MSI_FLAGS, - flags | HT_MSI_FLAGS_ENABLE); - } - pos = pci_find_next_ht_capability(dev, pos, - HT_CAPTYPE_MSI_MAPPING); - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, - PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, - quirk_msi_ht_cap_enable); - /* The nVidia CK804 chipset may have 2 HT MSI mappings. * MSI are supported if the MSI capability set in any of these mappings. */ @@ -1778,9 +1751,8 @@ static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_msi_ht_cap); -/* - * Force enable MSI mapping capability on HT bridges */ -static inline void ht_enable_msi_mapping(struct pci_dev *dev) +/* Force enable MSI mapping capability on HT bridges */ +static void __devinit ht_enable_msi_mapping(struct pci_dev *dev) { int pos, ttl = 48; @@ -1799,6 +1771,9 @@ static inline void ht_enable_msi_mapping(struct pci_dev *dev) HT_CAPTYPE_MSI_MAPPING); } } +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, + PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, + ht_enable_msi_mapping); static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) { @@ -1830,7 +1805,7 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { - dev_info(&dev->dev, "Quirk disabling HT MSI mapping"); + dev_info(&dev->dev, "Disabling HT MSI mapping"); pci_write_config_byte(dev, pos + HT_MSI_FLAGS, flags & ~HT_MSI_FLAGS_ENABLE); } diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index a98b2470b9e..bd5c0e03139 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -242,8 +242,7 @@ void pci_remove_rom(struct pci_dev *pdev) #endif /* 0 */ /** - * pci_cleanup_rom - internal routine for freeing the ROM copy created - * by pci_map_rom_copy called from remove.c + * pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy * @pdev: pointer to pci device struct * * Free the copied ROM if we allocated one. diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c index 5480119ff9d..3ce9f3defc1 100644 --- a/drivers/rapidio/rio-driver.c +++ b/drivers/rapidio/rio-driver.c @@ -78,8 +78,7 @@ void rio_dev_put(struct rio_dev *rdev) } /** - * rio_device_probe - Tell if a RIO device structure has a matching RIO - * device id structure + * rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure * @id: the RIO device id structure to match against * @dev: the RIO device structure to match against * @@ -137,7 +136,7 @@ static int rio_device_remove(struct device *dev) * rio_register_driver - register a new RIO driver * @rdrv: the RIO driver structure to register * - * Adds a &struct rio_driver to the list of registered drivers + * Adds a &struct rio_driver to the list of registered drivers. * Returns a negative value on error, otherwise 0. If no error * occurred, the driver remains registered even if no device * was claimed during registration. @@ -167,8 +166,7 @@ void rio_unregister_driver(struct rio_driver *rdrv) } /** - * rio_match_bus - Tell if a RIO device structure has a matching RIO - * driver device id structure + * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure * @dev: the standard device structure to match against * @drv: the standard driver structure containing the ids to match against * diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 6402d699072..82f5ad9c3af 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -250,6 +250,15 @@ config RTC_DRV_TWL92330 platforms. The support is integrated with the rest of the Menelaus driver; it's not separate module. +config RTC_DRV_S35390A + tristate "Seiko Instruments S-35390A" + help + If you say yes here you will get support for the Seiko + Instruments S-35390A. + + This driver can also be built as a module. If so the module + will be called rtc-s35390a. + endif # I2C comment "SPI RTC drivers" diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index ec703f34ab8..872f1218ff9 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o +obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c new file mode 100644 index 00000000000..e8abc90c32c --- /dev/null +++ b/drivers/rtc/rtc-s35390a.c @@ -0,0 +1,316 @@ +/* + * Seiko Instruments S-35390A RTC Driver + * + * Copyright (c) 2007 Byron Bradley + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/rtc.h> +#include <linux/i2c.h> +#include <linux/bitrev.h> +#include <linux/bcd.h> +#include <linux/slab.h> + +#define S35390A_CMD_STATUS1 0 +#define S35390A_CMD_STATUS2 1 +#define S35390A_CMD_TIME1 2 + +#define S35390A_BYTE_YEAR 0 +#define S35390A_BYTE_MONTH 1 +#define S35390A_BYTE_DAY 2 +#define S35390A_BYTE_WDAY 3 +#define S35390A_BYTE_HOURS 4 +#define S35390A_BYTE_MINS 5 +#define S35390A_BYTE_SECS 6 + +#define S35390A_FLAG_POC 0x01 +#define S35390A_FLAG_BLD 0x02 +#define S35390A_FLAG_24H 0x40 +#define S35390A_FLAG_RESET 0x80 +#define S35390A_FLAG_TEST 0x01 + +struct s35390a { + struct i2c_client *client[8]; + struct rtc_device *rtc; + int twentyfourhour; +}; + +static int s35390a_set_reg(struct s35390a *s35390a, int reg, char *buf, int len) +{ + struct i2c_client *client = s35390a->client[reg]; + struct i2c_msg msg[] = { + { client->addr, 0, len, buf }, + }; + + if ((i2c_transfer(client->adapter, msg, 1)) != 1) + return -EIO; + + return 0; +} + +static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) +{ + struct i2c_client *client = s35390a->client[reg]; + struct i2c_msg msg[] = { + { client->addr, I2C_M_RD, len, buf }, + }; + + if ((i2c_transfer(client->adapter, msg, 1)) != 1) + return -EIO; + + return 0; +} + +static int s35390a_reset(struct s35390a *s35390a) +{ + char buf[1]; + + if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0) + return -EIO; + + if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD))) + return 0; + + buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H); + buf[0] &= 0xf0; + return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); +} + +static int s35390a_disable_test_mode(struct s35390a *s35390a) +{ + char buf[1]; + + if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)) < 0) + return -EIO; + + if (!(buf[0] & S35390A_FLAG_TEST)) + return 0; + + buf[0] &= ~S35390A_FLAG_TEST; + return s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)); +} + +static char s35390a_hr2reg(struct s35390a *s35390a, int hour) +{ + if (s35390a->twentyfourhour) + return BIN2BCD(hour); + + if (hour < 12) + return BIN2BCD(hour); + + return 0x40 | BIN2BCD(hour - 12); +} + +static int s35390a_reg2hr(struct s35390a *s35390a, char reg) +{ + unsigned hour; + + if (s35390a->twentyfourhour) + return BCD2BIN(reg & 0x3f); + + hour = BCD2BIN(reg & 0x3f); + if (reg & 0x40) + hour += 12; + + return hour; +} + +static int s35390a_set_datetime(struct i2c_client *client, struct rtc_time *tm) +{ + struct s35390a *s35390a = i2c_get_clientdata(client); + int i, err; + char buf[7]; + + dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d mday=%d, " + "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, + tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, + tm->tm_wday); + + buf[S35390A_BYTE_YEAR] = BIN2BCD(tm->tm_year - 100); + buf[S35390A_BYTE_MONTH] = BIN2BCD(tm->tm_mon + 1); + buf[S35390A_BYTE_DAY] = BIN2BCD(tm->tm_mday); + buf[S35390A_BYTE_WDAY] = BIN2BCD(tm->tm_wday); + buf[S35390A_BYTE_HOURS] = s35390a_hr2reg(s35390a, tm->tm_hour); + buf[S35390A_BYTE_MINS] = BIN2BCD(tm->tm_min); + buf[S35390A_BYTE_SECS] = BIN2BCD(tm->tm_sec); + + /* This chip expects the bits of each byte to be in reverse order */ + for (i = 0; i < 7; ++i) + buf[i] = bitrev8(buf[i]); + + err = s35390a_set_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); + + return err; +} + +static int s35390a_get_datetime(struct i2c_client *client, struct rtc_time *tm) +{ + struct s35390a *s35390a = i2c_get_clientdata(client); + char buf[7]; + int i, err; + + err = s35390a_get_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); + if (err < 0) + return err; + + /* This chip returns the bits of each byte in reverse order */ + for (i = 0; i < 7; ++i) + buf[i] = bitrev8(buf[i]); + + tm->tm_sec = BCD2BIN(buf[S35390A_BYTE_SECS]); + tm->tm_min = BCD2BIN(buf[S35390A_BYTE_MINS]); + tm->tm_hour = s35390a_reg2hr(s35390a, buf[S35390A_BYTE_HOURS]); + tm->tm_wday = BCD2BIN(buf[S35390A_BYTE_WDAY]); + tm->tm_mday = BCD2BIN(buf[S35390A_BYTE_DAY]); + tm->tm_mon = BCD2BIN(buf[S35390A_BYTE_MONTH]) - 1; + tm->tm_year = BCD2BIN(buf[S35390A_BYTE_YEAR]) + 100; + + dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, mday=%d, " + "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, + tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, + tm->tm_wday); + + return rtc_valid_tm(tm); +} + +static int s35390a_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + return s35390a_get_datetime(to_i2c_client(dev), tm); +} + +static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + return s35390a_set_datetime(to_i2c_client(dev), tm); +} + +static const struct rtc_class_ops s35390a_rtc_ops = { + .read_time = s35390a_rtc_read_time, + .set_time = s35390a_rtc_set_time, +}; + +static struct i2c_driver s35390a_driver; + +static int s35390a_probe(struct i2c_client *client) +{ + int err; + unsigned int i; + struct s35390a *s35390a; + struct rtc_time tm; + char buf[1]; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + err = -ENODEV; + goto exit; + } + + s35390a = kzalloc(sizeof(struct s35390a), GFP_KERNEL); + if (!s35390a) { + err = -ENOMEM; + goto exit; + } + + s35390a->client[0] = client; + i2c_set_clientdata(client, s35390a); + + /* This chip uses multiple addresses, use dummy devices for them */ + for (i = 1; i < 8; ++i) { + s35390a->client[i] = i2c_new_dummy(client->adapter, + client->addr + i, "rtc-s35390a"); + if (!s35390a->client[i]) { + dev_err(&client->dev, "Address %02x unavailable\n", + client->addr + i); + err = -EBUSY; + goto exit_dummy; + } + } + + err = s35390a_reset(s35390a); + if (err < 0) { + dev_err(&client->dev, "error resetting chip\n"); + goto exit_dummy; + } + + err = s35390a_disable_test_mode(s35390a); + if (err < 0) { + dev_err(&client->dev, "error disabling test mode\n"); + goto exit_dummy; + } + + err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); + if (err < 0) { + dev_err(&client->dev, "error checking 12/24 hour mode\n"); + goto exit_dummy; + } + if (buf[0] & S35390A_FLAG_24H) + s35390a->twentyfourhour = 1; + else + s35390a->twentyfourhour = 0; + + if (s35390a_get_datetime(client, &tm) < 0) + dev_warn(&client->dev, "clock needs to be set\n"); + + s35390a->rtc = rtc_device_register(s35390a_driver.driver.name, + &client->dev, &s35390a_rtc_ops, THIS_MODULE); + + if (IS_ERR(s35390a->rtc)) { + err = PTR_ERR(s35390a->rtc); + goto exit_dummy; + } + return 0; + +exit_dummy: + for (i = 1; i < 8; ++i) + if (s35390a->client[i]) + i2c_unregister_device(s35390a->client[i]); + kfree(s35390a); + i2c_set_clientdata(client, NULL); + +exit: + return err; +} + +static int s35390a_remove(struct i2c_client *client) +{ + unsigned int i; + + struct s35390a *s35390a = i2c_get_clientdata(client); + for (i = 1; i < 8; ++i) + if (s35390a->client[i]) + i2c_unregister_device(s35390a->client[i]); + + rtc_device_unregister(s35390a->rtc); + kfree(s35390a); + i2c_set_clientdata(client, NULL); + + return 0; +} + +static struct i2c_driver s35390a_driver = { + .driver = { + .name = "rtc-s35390a", + }, + .probe = s35390a_probe, + .remove = s35390a_remove, +}; + +static int __init s35390a_rtc_init(void) +{ + return i2c_add_driver(&s35390a_driver); +} + +static void __exit s35390a_rtc_exit(void) +{ + i2c_del_driver(&s35390a_driver); +} + +MODULE_AUTHOR("Byron Bradley <byron.bbradley@gmail.com>"); +MODULE_DESCRIPTION("S35390A RTC driver"); +MODULE_LICENSE("GPL"); + +module_init(s35390a_rtc_init); +module_exit(s35390a_rtc_exit); diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c index 389346cda6c..07c7f31081b 100644 --- a/drivers/s390/char/defkeymap.c +++ b/drivers/s390/char/defkeymap.c @@ -151,8 +151,8 @@ char *func_table[MAX_NR_FUNC] = { }; struct kbdiacruc accent_table[MAX_DIACR] = { - {'^', 'c', '\003'}, {'^', 'd', '\004'}, - {'^', 'z', '\032'}, {'^', '\012', '\000'}, + {'^', 'c', 0003}, {'^', 'd', 0004}, + {'^', 'z', 0032}, {'^', 0012, 0000}, }; unsigned int accent_table_size = 4; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index fecba05b4e7..e5c6f6af876 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -757,7 +757,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) "Notifying upper driver of completion " "(result %x)\n", cmd->result)); - good_bytes = scsi_bufflen(cmd); + good_bytes = scsi_bufflen(cmd) + cmd->request->extra_len; if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { drv = scsi_cmd_to_driver(cmd); if (drv->done) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 1dc165ad17f..e67c14e31ba 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1577,8 +1577,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, } /** - * scsi_scan_target - scan a target id, possibly including all LUNs on the - * target. + * scsi_scan_target - scan a target id, possibly including all LUNs on the target. * @parent: host to scan * @channel: channel to scan * @id: target id to scan diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c index 6f09cbd7fc4..97c68d021d2 100644 --- a/drivers/serial/8250_pnp.c +++ b/drivers/serial/8250_pnp.c @@ -91,6 +91,8 @@ static const struct pnp_device_id pnp_dev_table[] = { /* Archtek America Corp. */ /* Archtek SmartLink Modem 3334BT Plug & Play */ { "GVC000F", 0 }, + /* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */ + { "GVC0303", 0 }, /* Hayes */ /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ { "HAY0001", 0 }, diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index b82595cf13e..cf627cd1b4c 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -686,7 +686,7 @@ config UART0_RTS_PIN config SERIAL_BFIN_UART1 bool "Enable UART1" - depends on SERIAL_BFIN && (BF534 || BF536 || BF537 || BF54x) + depends on SERIAL_BFIN && (!BF531 && !BF532 && !BF533 && !BF561) help Enable UART1 @@ -699,14 +699,14 @@ config BFIN_UART1_CTSRTS config UART1_CTS_PIN int "UART1 CTS pin" - depends on BFIN_UART1_CTSRTS && (BF53x || BF561) + depends on BFIN_UART1_CTSRTS && !BF54x default -1 help Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. config UART1_RTS_PIN int "UART1 RTS pin" - depends on BFIN_UART1_CTSRTS && (BF53x || BF561) + depends on BFIN_UART1_CTSRTS && !BF54x default -1 help Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index ac2a3ef28d5..0aa345b9a38 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c @@ -1,30 +1,11 @@ /* - * File: drivers/serial/bfin_5xx.c - * Based on: Based on drivers/serial/sa1100.c - * Author: Aubrey Li <aubrey.li@analog.com> + * Blackfin On-Chip Serial Driver * - * Created: - * Description: Driver for blackfin 5xx serial ports + * Copyright 2006-2007 Analog Devices Inc. * - * Modified: - * Copyright 2006 Analog Devices Inc. + * Enter bugs at http://blackfin.uclinux.org/ * - * Bugs: Enter bugs at http://blackfin.uclinux.org/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see the file COPYING, or write - * to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * Licensed under the GPL-2 or later. */ #if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) @@ -67,14 +48,12 @@ #define DMA_RX_XCOUNT 512 #define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) -#define DMA_RX_FLUSH_JIFFIES 5 +#define DMA_RX_FLUSH_JIFFIES (HZ / 50) #ifdef CONFIG_SERIAL_BFIN_DMA static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); #else -static void bfin_serial_do_work(struct work_struct *work); static void bfin_serial_tx_chars(struct bfin_serial_port *uart); -static void local_put_char(struct bfin_serial_port *uart, char ch); #endif static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); @@ -85,23 +64,26 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); static void bfin_serial_stop_tx(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + struct circ_buf *xmit = &uart->port.info->xmit; +#if !defined(CONFIG_BF54x) && !defined(CONFIG_SERIAL_BFIN_DMA) + unsigned short ier; +#endif while (!(UART_GET_LSR(uart) & TEMT)) - continue; + cpu_relax(); #ifdef CONFIG_SERIAL_BFIN_DMA disable_dma(uart->tx_dma_channel); + xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); + uart->port.icount.tx += uart->tx_count; + uart->tx_count = 0; + uart->tx_done = 1; #else #ifdef CONFIG_BF54x - /* Waiting for Transmission Finished */ - while (!(UART_GET_LSR(uart) & TFI)) - continue; /* Clear TFI bit */ UART_PUT_LSR(uart, TFI); UART_CLEAR_IER(uart, ETBEI); #else - unsigned short ier; - ier = UART_GET_IER(uart); ier &= ~ETBEI; UART_PUT_IER(uart, ier); @@ -117,7 +99,8 @@ static void bfin_serial_start_tx(struct uart_port *port) struct bfin_serial_port *uart = (struct bfin_serial_port *)port; #ifdef CONFIG_SERIAL_BFIN_DMA - bfin_serial_dma_tx_chars(uart); + if (uart->tx_done) + bfin_serial_dma_tx_chars(uart); #else #ifdef CONFIG_BF54x UART_SET_IER(uart, ETBEI); @@ -209,34 +192,27 @@ int kgdb_get_debug_char(void) } #endif -#ifdef CONFIG_SERIAL_BFIN_PIO -static void local_put_char(struct bfin_serial_port *uart, char ch) -{ - unsigned short status; - int flags = 0; - - spin_lock_irqsave(&uart->port.lock, flags); - - do { - status = UART_GET_LSR(uart); - } while (!(status & THRE)); - - UART_PUT_CHAR(uart, ch); - SSYNC(); - - spin_unlock_irqrestore(&uart->port.lock, flags); -} +#if ANOMALY_05000230 && defined(CONFIG_SERIAL_BFIN_PIO) +# define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold) +# define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v)) +#else +# define UART_GET_ANOMALY_THRESHOLD(uart) 0 +# define UART_SET_ANOMALY_THRESHOLD(uart, v) +#endif +#ifdef CONFIG_SERIAL_BFIN_PIO static void bfin_serial_rx_chars(struct bfin_serial_port *uart) { struct tty_struct *tty = uart->port.info->tty; unsigned int status, ch, flg; - static int in_break = 0; + static struct timeval anomaly_start = { .tv_sec = 0 }; #ifdef CONFIG_KGDB_UART struct pt_regs *regs = get_irq_regs(); #endif status = UART_GET_LSR(uart); + UART_CLEAR_LSR(uart); + ch = UART_GET_CHAR(uart); uart->port.icount.rx++; @@ -262,28 +238,56 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) #endif if (ANOMALY_05000230) { - /* The BF533 family of processors have a nice misbehavior where - * they continuously generate characters for a "single" break. + /* The BF533 (and BF561) family of processors have a nice anomaly + * where they continuously generate characters for a "single" break. * We have to basically ignore this flood until the "next" valid - * character comes across. All other Blackfin families operate - * properly though. + * character comes across. Due to the nature of the flood, it is + * not possible to reliably catch bytes that are sent too quickly + * after this break. So application code talking to the Blackfin + * which sends a break signal must allow at least 1.5 character + * times after the end of the break for things to stabilize. This + * timeout was picked as it must absolutely be larger than 1 + * character time +/- some percent. So 1.5 sounds good. All other + * Blackfin families operate properly. Woo. * Note: While Anomaly 05000230 does not directly address this, * the changes that went in for it also fixed this issue. + * That anomaly was fixed in 0.5+ silicon. I like bunnies. */ - if (in_break) { - if (ch != 0) { - in_break = 0; - ch = UART_GET_CHAR(uart); - if (bfin_revid() < 5) - return; - } else - return; + if (anomaly_start.tv_sec) { + struct timeval curr; + suseconds_t usecs; + + if ((~ch & (~ch + 1)) & 0xff) + goto known_good_char; + + do_gettimeofday(&curr); + if (curr.tv_sec - anomaly_start.tv_sec > 1) + goto known_good_char; + + usecs = 0; + if (curr.tv_sec != anomaly_start.tv_sec) + usecs += USEC_PER_SEC; + usecs += curr.tv_usec - anomaly_start.tv_usec; + + if (usecs > UART_GET_ANOMALY_THRESHOLD(uart)) + goto known_good_char; + + if (ch) + anomaly_start.tv_sec = 0; + else + anomaly_start = curr; + + return; + + known_good_char: + anomaly_start.tv_sec = 0; } } if (status & BI) { if (ANOMALY_05000230) - in_break = 1; + if (bfin_revid() < 5) + do_gettimeofday(&anomaly_start); uart->port.icount.brk++; if (uart_handle_break(&uart->port)) goto ignore_char; @@ -324,7 +328,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart) UART_PUT_CHAR(uart, uart->port.x_char); uart->port.icount.tx++; uart->port.x_char = 0; - return; } /* * Check the modem control lines before @@ -337,9 +340,12 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart) return; } - local_put_char(uart, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - uart->port.icount.tx++; + while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) { + UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + uart->port.icount.tx++; + SSYNC(); + } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uart->port); @@ -352,21 +358,11 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; -#ifdef CONFIG_BF54x - unsigned short status; - spin_lock(&uart->port.lock); - status = UART_GET_LSR(uart); - while ((UART_GET_IER(uart) & ERBFI) && (status & DR)) { - bfin_serial_rx_chars(uart); - status = UART_GET_LSR(uart); - } - spin_unlock(&uart->port.lock); -#else spin_lock(&uart->port.lock); - while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_RX_READY) + while (UART_GET_LSR(uart) & DR) bfin_serial_rx_chars(uart); spin_unlock(&uart->port.lock); -#endif + return IRQ_HANDLED; } @@ -374,25 +370,16 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; -#ifdef CONFIG_BF54x - unsigned short status; spin_lock(&uart->port.lock); - status = UART_GET_LSR(uart); - while ((UART_GET_IER(uart) & ETBEI) && (status & THRE)) { + if (UART_GET_LSR(uart) & THRE) bfin_serial_tx_chars(uart); - status = UART_GET_LSR(uart); - } spin_unlock(&uart->port.lock); -#else - spin_lock(&uart->port.lock); - while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_TX_READY) - bfin_serial_tx_chars(uart); - spin_unlock(&uart->port.lock); -#endif + return IRQ_HANDLED; } +#endif - +#ifdef CONFIG_SERIAL_BFIN_CTSRTS static void bfin_serial_do_work(struct work_struct *work) { struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue); @@ -406,33 +393,27 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) { struct circ_buf *xmit = &uart->port.info->xmit; unsigned short ier; - int flags = 0; - - if (!uart->tx_done) - return; uart->tx_done = 0; + if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { + uart->tx_count = 0; + uart->tx_done = 1; + return; + } + if (uart->port.x_char) { UART_PUT_CHAR(uart, uart->port.x_char); uart->port.icount.tx++; uart->port.x_char = 0; - uart->tx_done = 1; - return; } + /* * Check the modem control lines before * transmitting anything. */ bfin_serial_mctrl_check(uart); - if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { - bfin_serial_stop_tx(&uart->port); - uart->tx_done = 1; - return; - } - - spin_lock_irqsave(&uart->port.lock, flags); uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) uart->tx_count = UART_XMIT_SIZE - xmit->tail; @@ -448,6 +429,7 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) set_dma_x_count(uart->tx_dma_channel, uart->tx_count); set_dma_x_modify(uart->tx_dma_channel, 1); enable_dma(uart->tx_dma_channel); + #ifdef CONFIG_BF54x UART_SET_IER(uart, ETBEI); #else @@ -455,7 +437,6 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) ier |= ETBEI; UART_PUT_IER(uart, ier); #endif - spin_unlock_irqrestore(&uart->port.lock, flags); } static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) @@ -464,7 +445,11 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) int i, flg, status; status = UART_GET_LSR(uart); - uart->port.icount.rx += CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, UART_XMIT_SIZE);; + UART_CLEAR_LSR(uart); + + uart->port.icount.rx += + CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, + UART_XMIT_SIZE); if (status & BI) { uart->port.icount.brk++; @@ -490,10 +475,12 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) else flg = TTY_NORMAL; - for (i = uart->rx_dma_buf.head; i < uart->rx_dma_buf.tail; i++) { - if (uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) - goto dma_ignore_char; - uart_insert_char(&uart->port, status, OE, uart->rx_dma_buf.buf[i], flg); + for (i = uart->rx_dma_buf.tail; i != uart->rx_dma_buf.head; i++) { + if (i >= UART_XMIT_SIZE) + i = 0; + if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) + uart_insert_char(&uart->port, status, OE, + uart->rx_dma_buf.buf[i], flg); } dma_ignore_char: @@ -503,23 +490,23 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) { int x_pos, pos; - int flags = 0; - - bfin_serial_dma_tx_chars(uart); - spin_lock_irqsave(&uart->port.lock, flags); - x_pos = DMA_RX_XCOUNT - get_dma_curr_xcount(uart->rx_dma_channel); + uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel); + x_pos = get_dma_curr_xcount(uart->rx_dma_channel); + uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows; + if (uart->rx_dma_nrows == DMA_RX_YCOUNT) + uart->rx_dma_nrows = 0; + x_pos = DMA_RX_XCOUNT - x_pos; if (x_pos == DMA_RX_XCOUNT) x_pos = 0; pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; - - if (pos>uart->rx_dma_buf.tail) { - uart->rx_dma_buf.tail = pos; + if (pos != uart->rx_dma_buf.tail) { + uart->rx_dma_buf.head = pos; bfin_serial_dma_rx_chars(uart); - uart->rx_dma_buf.head = uart->rx_dma_buf.tail; + uart->rx_dma_buf.tail = uart->rx_dma_buf.head; } - spin_unlock_irqrestore(&uart->port.lock, flags); + uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; add_timer(&(uart->rx_dma_timer)); } @@ -532,8 +519,8 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) spin_lock(&uart->port.lock); if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { - clear_dma_irqstat(uart->tx_dma_channel); disable_dma(uart->tx_dma_channel); + clear_dma_irqstat(uart->tx_dma_channel); #ifdef CONFIG_BF54x UART_CLEAR_IER(uart, ETBEI); #else @@ -541,15 +528,13 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) ier &= ~ETBEI; UART_PUT_IER(uart, ier); #endif - xmit->tail = (xmit->tail+uart->tx_count) &(UART_XMIT_SIZE -1); - uart->port.icount.tx+=uart->tx_count; + xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); + uart->port.icount.tx += uart->tx_count; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uart->port); - if (uart_circ_empty(xmit)) - bfin_serial_stop_tx(&uart->port); - uart->tx_done = 1; + bfin_serial_dma_tx_chars(uart); } spin_unlock(&uart->port.lock); @@ -561,18 +546,15 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) struct bfin_serial_port *uart = dev_id; unsigned short irqstat; - uart->rx_dma_nrows++; - if (uart->rx_dma_nrows == DMA_RX_YCOUNT) { - uart->rx_dma_nrows = 0; - uart->rx_dma_buf.tail = DMA_RX_XCOUNT*DMA_RX_YCOUNT; - bfin_serial_dma_rx_chars(uart); - uart->rx_dma_buf.head = uart->rx_dma_buf.tail = 0; - } spin_lock(&uart->port.lock); irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); clear_dma_irqstat(uart->rx_dma_channel); - spin_unlock(&uart->port.lock); + + del_timer(&(uart->rx_dma_timer)); + uart->rx_dma_timer.expires = jiffies; + add_timer(&(uart->rx_dma_timer)); + return IRQ_HANDLED; } #endif @@ -599,7 +581,11 @@ static unsigned int bfin_serial_get_mctrl(struct uart_port *port) if (uart->cts_pin < 0) return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; +# ifdef BF54x + if (UART_GET_MSR(uart) & CTS) +# else if (gpio_get_value(uart->cts_pin)) +# endif return TIOCM_DSR | TIOCM_CAR; else #endif @@ -614,9 +600,17 @@ static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) return; if (mctrl & TIOCM_RTS) +# ifdef BF54x + UART_PUT_MCR(uart, UART_GET_MCR(uart) & ~MRTS); +# else gpio_set_value(uart->rts_pin, 0); +# endif else +# ifdef BF54x + UART_PUT_MCR(uart, UART_GET_MCR(uart) | MRTS); +# else gpio_set_value(uart->rts_pin, 1); +# endif #endif } @@ -627,22 +621,17 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart) { #ifdef CONFIG_SERIAL_BFIN_CTSRTS unsigned int status; -# ifdef CONFIG_SERIAL_BFIN_DMA struct uart_info *info = uart->port.info; struct tty_struct *tty = info->tty; status = bfin_serial_get_mctrl(&uart->port); + uart_handle_cts_change(&uart->port, status & TIOCM_CTS); if (!(status & TIOCM_CTS)) { tty->hw_stopped = 1; + schedule_work(&uart->cts_workqueue); } else { tty->hw_stopped = 0; } -# else - status = bfin_serial_get_mctrl(&uart->port); - uart_handle_cts_change(&uart->port, status & TIOCM_CTS); - if (!(status & TIOCM_CTS)) - schedule_work(&uart->cts_workqueue); -# endif #endif } @@ -743,6 +732,7 @@ static void bfin_serial_shutdown(struct uart_port *port) disable_dma(uart->rx_dma_channel); free_dma(uart->rx_dma_channel); del_timer(&(uart->rx_dma_timer)); + dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0); #else #ifdef CONFIG_KGDB_UART if (uart->port.line != CONFIG_KGDB_UART_PORT) @@ -814,6 +804,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, quot = uart_get_divisor(port, baud); spin_lock_irqsave(&uart->port.lock, flags); + UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); + do { lsr = UART_GET_LSR(uart); } while (!(lsr & TEMT)); @@ -956,10 +948,9 @@ static void __init bfin_serial_init_ports(void) bfin_serial_ports[i].rx_dma_channel = bfin_serial_resource[i].uart_rx_dma_channel; init_timer(&(bfin_serial_ports[i].rx_dma_timer)); -#else - INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work); #endif #ifdef CONFIG_SERIAL_BFIN_CTSRTS + INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work); bfin_serial_ports[i].cts_pin = bfin_serial_resource[i].uart_cts_pin; bfin_serial_ports[i].rts_pin = diff --git a/drivers/serial/m32r_sio.c b/drivers/serial/m32r_sio.c index 348ee2c19b5..c2bb11c02bd 100644 --- a/drivers/serial/m32r_sio.c +++ b/drivers/serial/m32r_sio.c @@ -421,7 +421,7 @@ static void transmit_chars(struct uart_sio_port *up) up->port.icount.tx++; if (uart_circ_empty(xmit)) break; - while (!serial_in(up, UART_LSR) & UART_LSR_THRE); + while (!(serial_in(up, UART_LSR) & UART_LSR_THRE)); } while (--count > 0); diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 9ce12cb2ceb..a8c116b80bf 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -41,6 +41,7 @@ #include <linux/delay.h> #include <linux/console.h> #include <linux/platform_device.h> +#include <linux/serial_sci.h> #ifdef CONFIG_CPU_FREQ #include <linux/notifier.h> @@ -54,7 +55,6 @@ #include <asm/kgdb.h> #endif -#include <asm/sci.h> #include "sh-sci.h" struct sci_port { diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index 9cfcfd8dad5..617efb1640b 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c @@ -1,7 +1,7 @@ /* * Core maple bus functionality * - * Copyright (C) 2007 Adrian McMenamin + * Copyright (C) 2007, 2008 Adrian McMenamin * * Based on 2.4 code by: * @@ -18,7 +18,6 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/device.h> -#include <linux/module.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/io.h> @@ -54,7 +53,7 @@ static struct device maple_bus; static int subdevice_map[MAPLE_PORTS]; static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; static unsigned long maple_pnp_time; -static int started, scanning, liststatus, realscan; +static int started, scanning, liststatus, fullscan; static struct kmem_cache *maple_queue_cache; struct maple_device_specify { @@ -62,6 +61,9 @@ struct maple_device_specify { int unit; }; +static bool checked[4]; +static struct maple_device *baseunits[4]; + /** * maple_driver_register - register a device driver * automatically makes the driver bus a maple bus @@ -309,11 +311,9 @@ static void maple_attach_driver(struct maple_device *mdev) else break; - if (realscan) { - printk(KERN_INFO "Maple device detected: %s\n", - mdev->product_name); - printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); - } + printk(KERN_INFO "Maple device detected: %s\n", + mdev->product_name); + printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); function = be32_to_cpu(mdev->devinfo.function); @@ -323,10 +323,9 @@ static void maple_attach_driver(struct maple_device *mdev) mdev->driver = &maple_dummy_driver; sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); } else { - if (realscan) - printk(KERN_INFO - "Maple bus at (%d, %d): Function 0x%lX\n", - mdev->port, mdev->unit, function); + printk(KERN_INFO + "Maple bus at (%d, %d): Function 0x%lX\n", + mdev->port, mdev->unit, function); matched = bus_for_each_drv(&maple_bus_type, NULL, mdev, @@ -334,9 +333,8 @@ static void maple_attach_driver(struct maple_device *mdev) if (matched == 0) { /* Driver does not exist yet */ - if (realscan) - printk(KERN_INFO - "No maple driver found.\n"); + printk(KERN_INFO + "No maple driver found.\n"); mdev->driver = &maple_dummy_driver; } sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, @@ -472,9 +470,12 @@ static void maple_response_none(struct maple_device *mdev, maple_detach_driver(mdev); return; } - if (!started) { - printk(KERN_INFO "No maple devices attached to port %d\n", - mdev->port); + if (!started || !fullscan) { + if (checked[mdev->port] == false) { + checked[mdev->port] = true; + printk(KERN_INFO "No maple devices attached" + " to port %d\n", mdev->port); + } return; } maple_clean_submap(mdev); @@ -485,8 +486,14 @@ static void maple_response_devinfo(struct maple_device *mdev, char *recvbuf) { char submask; - if ((!started) || (scanning == 2)) { - maple_attach_driver(mdev); + if (!started || (scanning == 2) || !fullscan) { + if ((mdev->unit == 0) && (checked[mdev->port] == false)) { + checked[mdev->port] = true; + maple_attach_driver(mdev); + } else { + if (mdev->unit != 0) + maple_attach_driver(mdev); + } return; } if (mdev->unit == 0) { @@ -505,6 +512,7 @@ static void maple_dma_handler(struct work_struct *work) struct maple_device *dev; char *recvbuf; enum maple_code code; + int i; if (!maple_dma_done()) return; @@ -557,6 +565,19 @@ static void maple_dma_handler(struct work_struct *work) } else scanning = 0; + if (!fullscan) { + fullscan = 1; + for (i = 0; i < MAPLE_PORTS; i++) { + if (checked[i] == false) { + fullscan = 0; + dev = baseunits[i]; + dev->mq->command = + MAPLE_COMMAND_DEVINFO; + dev->mq->length = 0; + maple_add_packet(dev->mq); + } + } + } if (started == 0) started = 1; } @@ -694,7 +715,9 @@ static int __init maple_bus_init(void) /* setup maple ports */ for (i = 0; i < MAPLE_PORTS; i++) { + checked[i] = false; mdev[i] = maple_alloc_dev(i, 0); + baseunits[i] = mdev[i]; if (!mdev[i]) { while (i-- > 0) maple_free_dev(mdev[i]); @@ -703,12 +726,9 @@ static int __init maple_bus_init(void) mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; mdev[i]->mq->length = 0; maple_add_packet(mdev[i]->mq); - /* delay aids hardware detection */ - mdelay(5); subdevice_map[i] = 0; } - realscan = 1; /* setup maplebus hardware */ maplebus_dma_reset(); /* initial detection */ diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 253ed5682a6..a86315a0c5b 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c @@ -42,6 +42,7 @@ struct mpc52xx_psc_spi { /* driver internal data */ struct mpc52xx_psc __iomem *psc; + struct mpc52xx_psc_fifo __iomem *fifo; unsigned int irq; u8 bits_per_word; u8 busy; @@ -139,6 +140,7 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, { struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); struct mpc52xx_psc __iomem *psc = mps->psc; + struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; unsigned rb = 0; /* number of bytes receieved */ unsigned sb = 0; /* number of bytes sent */ unsigned char *rx_buf = (unsigned char *)t->rx_buf; @@ -190,11 +192,11 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, out_8(&psc->mode, 0); } else { out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); - out_be16(&psc->rfalarm, rfalarm); + out_be16(&fifo->rfalarm, rfalarm); } out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY); wait_for_completion(&mps->done); - recv_at_once = in_be16(&psc->rfnum); + recv_at_once = in_be16(&fifo->rfnum); dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once); send_at_once = recv_at_once; @@ -331,6 +333,7 @@ static void mpc52xx_psc_spi_cleanup(struct spi_device *spi) static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) { struct mpc52xx_psc __iomem *psc = mps->psc; + struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; u32 mclken_div; int ret = 0; @@ -346,7 +349,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) /* Disable interrupts, interrupts are based on alarm level */ out_be16(&psc->mpc52xx_psc_imr, 0); out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); - out_8(&psc->rfcntl, 0); + out_8(&fifo->rfcntl, 0); out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); /* Configure 8bit codec mode as a SPI master and use EOF flags */ @@ -419,6 +422,8 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, ret = -EFAULT; goto free_master; } + /* On the 5200, fifo regs are immediately ajacent to the psc regs */ + mps->fifo = ((void __iomem *)mps->psc) + sizeof(struct mpc52xx_psc); ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi", mps); diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 5c33cdb9cac..a2b0aa48b8e 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig @@ -87,12 +87,13 @@ config USB_DYNAMIC_MINORS If you are unsure about this, say N here. config USB_SUSPEND - bool "USB selective suspend/resume and wakeup (EXPERIMENTAL)" - depends on USB && PM && EXPERIMENTAL + bool "USB selective suspend/resume and wakeup" + depends on USB && PM help If you say Y here, you can use driver calls or the sysfs - "power/state" file to suspend or resume individual USB - peripherals. + "power/level" file to suspend or resume individual USB + peripherals and to enable or disable autosuspend (see + Documentation/usb/power-management.txt for more details). Also, USB "remote wakeup" signaling is supported, whereby some USB devices (like keyboards and network adapters) can wake up diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index f90ab5e94c5..d9d1eb19f2a 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -28,35 +28,38 @@ * devices is broken... */ static const struct usb_device_id usb_quirk_list[] = { - /* Action Semiconductor flash disk */ - { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255}, - /* CBM - Flash disk */ { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, + /* HP 5300/5370C scanner */ - { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, + { USB_DEVICE(0x03f0, 0x0701), .driver_info = + USB_QUIRK_STRING_FETCH_255 }, /* Creative SB Audigy 2 NX */ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Philips PSC805 audio device */ + { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Roland SC-8820 */ { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, /* Edirol SD-20 */ { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, - /* INTEL VALUE SSD */ - { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, - /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Philips PSC805 audio device */ - { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Action Semiconductor flash disk */ + { USB_DEVICE(0x10d6, 0x2200), .driver_info = + USB_QUIRK_STRING_FETCH_255 }, /* SKYMEDI USB_DRIVE */ { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME }, + /* INTEL VALUE SSD */ + { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, + { } /* terminating entry must be last */ }; diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 4e984060c98..1f0db51190c 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -99,8 +99,7 @@ struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, EXPORT_SYMBOL_GPL(usb_ifnum_to_if); /** - * usb_altnum_to_altsetting - get the altsetting structure with a given - * alternate setting number. + * usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number. * @intf: the interface containing the altsetting in question * @altnum: the desired alternate setting number * @@ -234,7 +233,7 @@ static int ksuspend_usb_init(void) * singlethreaded. Its job doesn't justify running on more * than one CPU. */ - ksuspend_usb_wq = create_singlethread_workqueue("ksuspend_usbd"); + ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd"); if (!ksuspend_usb_wq) return -ENOMEM; return 0; @@ -442,8 +441,7 @@ EXPORT_SYMBOL_GPL(usb_put_intf); */ /** - * usb_lock_device_for_reset - cautiously acquire the lock for a - * usb device structure + * usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure * @udev: device that's being locked * @iface: interface bound to the driver making the request (optional) * diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 4f6bfa100f2..2c32bd08ee7 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c @@ -92,7 +92,6 @@ struct printer_dev { u8 *current_rx_buf; u8 printer_status; u8 reset_printer; - struct class_device *printer_class_dev; struct cdev printer_cdev; struct device *pdev; u8 printer_cdev_open; diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c index 4402d6f042d..096c41cc40d 100644 --- a/drivers/usb/gadget/pxa2xx_udc.c +++ b/drivers/usb/gadget/pxa2xx_udc.c @@ -103,6 +103,12 @@ static const char ep0name [] = "ep0"; #error "Can't configure both IXP and PXA" #endif +/* IXP doesn't yet support <linux/clk.h> */ +#define clk_get(dev,name) NULL +#define clk_enable(clk) do { } while (0) +#define clk_disable(clk) do { } while (0) +#define clk_put(clk) do { } while (0) + #endif #include "pxa2xx_udc.h" @@ -934,20 +940,31 @@ static void udc_disable(struct pxa2xx_udc *); /* We disable the UDC -- and its 48 MHz clock -- whenever it's not * in active use. */ -static int pullup(struct pxa2xx_udc *udc, int is_active) +static int pullup(struct pxa2xx_udc *udc) { - is_active = is_active && udc->vbus && udc->pullup; + int is_active = udc->vbus && udc->pullup && !udc->suspended; DMSG("%s\n", is_active ? "active" : "inactive"); - if (is_active) - udc_enable(udc); - else { - if (udc->gadget.speed != USB_SPEED_UNKNOWN) { - DMSG("disconnect %s\n", udc->driver - ? udc->driver->driver.name - : "(no driver)"); - stop_activity(udc, udc->driver); + if (is_active) { + if (!udc->active) { + udc->active = 1; + /* Enable clock for USB device */ + clk_enable(udc->clk); + udc_enable(udc); } - udc_disable(udc); + } else { + if (udc->active) { + if (udc->gadget.speed != USB_SPEED_UNKNOWN) { + DMSG("disconnect %s\n", udc->driver + ? udc->driver->driver.name + : "(no driver)"); + stop_activity(udc, udc->driver); + } + udc_disable(udc); + /* Disable clock for USB device */ + clk_disable(udc->clk); + udc->active = 0; + } + } return 0; } @@ -958,9 +975,9 @@ static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active) struct pxa2xx_udc *udc; udc = container_of(_gadget, struct pxa2xx_udc, gadget); - udc->vbus = is_active = (is_active != 0); + udc->vbus = (is_active != 0); DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); - pullup(udc, is_active); + pullup(udc); return 0; } @@ -975,9 +992,8 @@ static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active) if (!udc->mach->gpio_pullup && !udc->mach->udc_command) return -EOPNOTSUPP; - is_active = (is_active != 0); - udc->pullup = is_active; - pullup(udc, is_active); + udc->pullup = (is_active != 0); + pullup(udc); return 0; } @@ -997,7 +1013,7 @@ static const struct usb_gadget_ops pxa2xx_udc_ops = { #ifdef CONFIG_USB_GADGET_DEBUG_FS static int -udc_seq_show(struct seq_file *m, void *d) +udc_seq_show(struct seq_file *m, void *_d) { struct pxa2xx_udc *dev = m->private; unsigned long flags; @@ -1146,11 +1162,6 @@ static void udc_disable(struct pxa2xx_udc *dev) udc_clear_mask_UDCCR(UDCCR_UDE); -#ifdef CONFIG_ARCH_PXA - /* Disable clock for USB device */ - clk_disable(dev->clk); -#endif - ep0_idle (dev); dev->gadget.speed = USB_SPEED_UNKNOWN; } @@ -1191,11 +1202,6 @@ static void udc_enable (struct pxa2xx_udc *dev) { udc_clear_mask_UDCCR(UDCCR_UDE); -#ifdef CONFIG_ARCH_PXA - /* Enable clock for USB device */ - clk_enable(dev->clk); -#endif - /* try to clear these bits before we enable the udc */ udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); @@ -1286,7 +1292,7 @@ fail: * for set_configuration as well as eventual disconnect. */ DMSG("registered gadget driver '%s'\n", driver->driver.name); - pullup(dev, 1); + pullup(dev); dump_state(dev); return 0; } @@ -1329,7 +1335,8 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) return -EINVAL; local_irq_disable(); - pullup(dev, 0); + dev->pullup = 0; + pullup(dev); stop_activity(dev, driver); local_irq_enable(); @@ -2131,13 +2138,11 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev) if (irq < 0) return -ENODEV; -#ifdef CONFIG_ARCH_PXA dev->clk = clk_get(&pdev->dev, "UDCCLK"); if (IS_ERR(dev->clk)) { retval = PTR_ERR(dev->clk); goto err_clk; } -#endif pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, dev->has_cfr ? "" : " (!cfr)", @@ -2250,10 +2255,8 @@ lubbock_fail0: if (dev->mach->gpio_vbus) gpio_free(dev->mach->gpio_vbus); err_gpio_vbus: -#ifdef CONFIG_ARCH_PXA clk_put(dev->clk); err_clk: -#endif return retval; } @@ -2269,7 +2272,9 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) if (dev->driver) return -EBUSY; - udc_disable(dev); + dev->pullup = 0; + pullup(dev); + remove_debug_files(dev); if (dev->got_irq) { @@ -2289,9 +2294,7 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) if (dev->mach->gpio_pullup) gpio_free(dev->mach->gpio_pullup); -#ifdef CONFIG_ARCH_PXA clk_put(dev->clk); -#endif platform_set_drvdata(pdev, NULL); the_controller = NULL; @@ -2317,10 +2320,15 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) { struct pxa2xx_udc *udc = platform_get_drvdata(dev); + unsigned long flags; if (!udc->mach->gpio_pullup && !udc->mach->udc_command) WARN("USB host won't detect disconnect!\n"); - pullup(udc, 0); + udc->suspended = 1; + + local_irq_save(flags); + pullup(udc); + local_irq_restore(flags); return 0; } @@ -2328,8 +2336,12 @@ static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) static int pxa2xx_udc_resume(struct platform_device *dev) { struct pxa2xx_udc *udc = platform_get_drvdata(dev); + unsigned long flags; - pullup(udc, 1); + udc->suspended = 0; + local_irq_save(flags); + pullup(udc); + local_irq_restore(flags); return 0; } diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h index b67e3ff5e4e..e2c19e88c87 100644 --- a/drivers/usb/gadget/pxa2xx_udc.h +++ b/drivers/usb/gadget/pxa2xx_udc.h @@ -119,7 +119,9 @@ struct pxa2xx_udc { has_cfr : 1, req_pending : 1, req_std : 1, - req_config : 1; + req_config : 1, + suspended : 1, + active : 1; #define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200)) struct timer_list timer; diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 776a97f3391..2e49de820b1 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -319,10 +319,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) if (likely (last->urb != urb)) { ehci_urb_done(ehci, last->urb, last_status); count++; + last_status = -EINPROGRESS; } ehci_qtd_free (ehci, last); last = NULL; - last_status = -EINPROGRESS; } /* ignore urbs submitted during completions we reported */ diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index 0130fd8571e..d7071c85575 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c @@ -911,8 +911,7 @@ static int isp116x_hub_status_data(struct usb_hcd *hcd, char *buf) buf[0] = 0; for (i = 0; i < ports; i++) { - u32 status = isp116x->rhport[i] = - isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1); + u32 status = isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1); if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC | RH_PS_PRSC)) { @@ -1031,7 +1030,9 @@ static int isp116x_hub_control(struct usb_hcd *hcd, DBG("GetPortStatus\n"); if (!wIndex || wIndex > ports) goto error; - tmp = isp116x->rhport[--wIndex]; + spin_lock_irqsave(&isp116x->lock, flags); + tmp = isp116x_read_reg32(isp116x, (--wIndex) ? HCRHPORT2 : HCRHPORT1); + spin_unlock_irqrestore(&isp116x->lock, flags); *(__le32 *) buf = cpu_to_le32(tmp); DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp); break; @@ -1080,8 +1081,6 @@ static int isp116x_hub_control(struct usb_hcd *hcd, spin_lock_irqsave(&isp116x->lock, flags); isp116x_write_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1, tmp); - isp116x->rhport[wIndex] = - isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1); spin_unlock_irqrestore(&isp116x->lock, flags); break; case SetPortFeature: @@ -1095,24 +1094,22 @@ static int isp116x_hub_control(struct usb_hcd *hcd, spin_lock_irqsave(&isp116x->lock, flags); isp116x_write_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS); + spin_unlock_irqrestore(&isp116x->lock, flags); break; case USB_PORT_FEAT_POWER: DBG("USB_PORT_FEAT_POWER\n"); spin_lock_irqsave(&isp116x->lock, flags); isp116x_write_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS); + spin_unlock_irqrestore(&isp116x->lock, flags); break; case USB_PORT_FEAT_RESET: DBG("USB_PORT_FEAT_RESET\n"); root_port_reset(isp116x, wIndex); - spin_lock_irqsave(&isp116x->lock, flags); break; default: goto error; } - isp116x->rhport[wIndex] = - isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1); - spin_unlock_irqrestore(&isp116x->lock, flags); break; default: diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h index b91e2edd9c5..595b90a9984 100644 --- a/drivers/usb/host/isp116x.h +++ b/drivers/usb/host/isp116x.h @@ -270,7 +270,6 @@ struct isp116x { u32 rhdesca; u32 rhdescb; u32 rhstatus; - u32 rhport[2]; /* async schedule: control, bulk */ struct list_head async; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 76db2fef465..91dc433dbcf 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -92,6 +92,7 @@ struct ftdi_sio_quirk { }; static int ftdi_jtag_probe (struct usb_serial *serial); +static int ftdi_mtxorb_hack_setup (struct usb_serial *serial); static void ftdi_USB_UIRT_setup (struct ftdi_private *priv); static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv); @@ -99,6 +100,10 @@ static struct ftdi_sio_quirk ftdi_jtag_quirk = { .probe = ftdi_jtag_probe, }; +static struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = { + .probe = ftdi_mtxorb_hack_setup, +}; + static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { .port_probe = ftdi_USB_UIRT_setup, }; @@ -161,6 +166,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, + { USB_DEVICE(MTXORB_VK_VID, MTXORB_VK_PID), + .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, @@ -274,6 +281,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, @@ -1088,6 +1096,23 @@ static int ftdi_jtag_probe(struct usb_serial *serial) return 0; } +/* + * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. + * We have to correct it if we want to read from it. + */ +static int ftdi_mtxorb_hack_setup(struct usb_serial *serial) +{ + struct usb_host_endpoint *ep = serial->dev->ep_in[1]; + struct usb_endpoint_descriptor *ep_desc = &ep->desc; + + if (ep->enabled && ep_desc->wMaxPacketSize == 0) { + ep_desc->wMaxPacketSize = 0x40; + info("Fixing invalid wMaxPacketSize on read pipe"); + } + + return 0; +} + /* ftdi_shutdown is called from usbserial:usb_serial_disconnect * it is called when the usb device is disconnected * diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 6eee2ab914e..e1eb742abcd 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h @@ -102,6 +102,13 @@ * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ +/* + * The following are the values for the Matrix Orbital VK204-25-USB + * display, which use the FT232RL. + */ +#define MTXORB_VK_VID 0x1b3d +#define MTXORB_VK_PID 0x0158 + /* Interbiometrics USB I/O Board */ /* Developed for Interbiometrics by Rudolf Gugler */ #define INTERBIOMETRICS_VID 0x1209 diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 869ecd374cb..aeeb9cb2099 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -110,11 +110,20 @@ /* vendor id and device id defines */ +/* The native mos7840/7820 component */ #define USB_VENDOR_ID_MOSCHIP 0x9710 #define MOSCHIP_DEVICE_ID_7840 0x7840 #define MOSCHIP_DEVICE_ID_7820 0x7820 +/* The native component can have its vendor/device id's overridden + * in vendor-specific implementations. Such devices can be handled + * by making a change here, in moschip_port_id_table, and in + * moschip_id_table_combined + */ +#define USB_VENDOR_ID_BANDB 0x0856 +#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 +#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 -/* Interrupt Rotinue Defines */ +/* Interrupt Routine Defines */ #define SERIAL_IIR_RLS 0x06 #define SERIAL_IIR_MS 0x00 @@ -159,12 +168,16 @@ static struct usb_device_id moschip_port_id_table[] = { {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {} /* terminating entry */ }; static __devinitdata struct usb_device_id moschip_id_table_combined[] = { {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {} /* terminating entry */ }; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index af2674c5741..828a4377ec6 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -120,6 +120,9 @@ static int option_send_setup(struct usb_serial_port *port); #define ANYDATA_PRODUCT_ADU_E100A 0x6501 #define ANYDATA_PRODUCT_ADU_500A 0x6502 +#define AXESSTEL_VENDOR_ID 0x1726 +#define AXESSTEL_PRODUCT_MV110H 0x1000 + #define BANDRICH_VENDOR_ID 0x1A8D #define BANDRICH_PRODUCT_C100_1 0x1002 #define BANDRICH_PRODUCT_C100_2 0x1003 @@ -192,6 +195,7 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, + { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c index 958f5b17847..b9b8ede61fb 100644 --- a/drivers/usb/storage/protocol.c +++ b/drivers/usb/storage/protocol.c @@ -170,7 +170,6 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, if (!sg) sg = scsi_sglist(srb); - buflen = min(buflen, scsi_bufflen(srb)); /* This loop handles a single s-g list entry, which may * include multiple pages. Find the initial page structure @@ -232,6 +231,7 @@ void usb_stor_set_xfer_buf(unsigned char *buffer, unsigned int offset = 0; struct scatterlist *sg = NULL; + buflen = min(buflen, scsi_bufflen(srb)); buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, TO_XFER_BUF); if (buflen < scsi_bufflen(srb)) diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c index e83dfba7e63..742b5c656d6 100644 --- a/drivers/video/sm501fb.c +++ b/drivers/video/sm501fb.c @@ -237,12 +237,14 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, /* check we can fit these values into the registers */ - if (var->hsync_len > 255 || var->vsync_len > 255) + if (var->hsync_len > 255 || var->vsync_len > 63) return -EINVAL; - if ((var->xres + var->right_margin) >= 4096) + /* hdisplay end and hsync start */ + if ((var->xres + var->right_margin) > 4096) return -EINVAL; + /* vdisplay end and vsync start */ if ((var->yres + var->lower_margin) > 2048) return -EINVAL; @@ -281,19 +283,21 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, var->blue.length = var->bits_per_pixel; var->blue.offset = 0; var->transp.length = 0; + var->transp.offset = 0; break; case 16: if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) { - var->red.offset = 11; - var->green.offset = 5; - var->blue.offset = 0; - } else { var->blue.offset = 11; var->green.offset = 5; var->red.offset = 0; + } else { + var->red.offset = 11; + var->green.offset = 5; + var->blue.offset = 0; } + var->transp.offset = 0; var->red.length = 5; var->green.length = 6; @@ -397,7 +401,7 @@ static int sm501fb_set_par_common(struct fb_info *info, break; case 16: - info->fix.visual = FB_VISUAL_DIRECTCOLOR; + info->fix.visual = FB_VISUAL_TRUECOLOR; break; case 32: @@ -613,6 +617,7 @@ static int sm501fb_set_par_crt(struct fb_info *info) case 16: control |= SM501_DC_CRT_CONTROL_16BPP; + sm501fb_setup_gamma(fbi, SM501_DC_CRT_PALETTE); break; case 32: @@ -750,6 +755,7 @@ static int sm501fb_set_par_pnl(struct fb_info *info) case 16: control |= SM501_DC_PANEL_CONTROL_16BPP; + sm501fb_setup_gamma(fbi, SM501_DC_PANEL_PALETTE); break; case 32: diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c index 70fb4ee2b42..919ce75db9e 100644 --- a/drivers/video/tridentfb.c +++ b/drivers/video/tridentfb.c @@ -564,19 +564,46 @@ static inline void write3CE(int reg, unsigned char val) t_outb(val, 0x3CF); } -static inline void enable_mmio(void) +static void enable_mmio(void) { + unsigned char tmp; + /* Goto New Mode */ outb(0x0B, 0x3C4); inb(0x3C5); /* Unprotect registers */ outb(NewMode1, 0x3C4); + tmp = inb(0x3C5); outb(0x80, 0x3C5); /* Enable MMIO */ outb(PCIReg, 0x3D4); outb(inb(0x3D5) | 0x01, 0x3D5); + + t_outb(NewMode1, 0x3C4); + t_outb(tmp, 0x3C5); +} + +static void disable_mmio(void) +{ + unsigned char tmp; + + /* Goto New Mode */ + t_outb(0x0B, 0x3C4); + t_inb(0x3C5); + + /* Unprotect registers */ + t_outb(NewMode1, 0x3C4); + tmp = t_inb(0x3C5); + t_outb(0x80, 0x3C5); + + /* Disable MMIO */ + t_outb(PCIReg, 0x3D4); + t_outb(t_inb(0x3D5) & ~0x01, 0x3D5); + + outb(NewMode1, 0x3C4); + outb(tmp, 0x3C5); } #define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) @@ -1239,9 +1266,9 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); if (!default_par.io_virt) { - release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); debug("ioremap failed\n"); - return -1; + err = -1; + goto out_unmap1; } enable_mmio(); @@ -1252,25 +1279,21 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) { debug("request_mem_region failed!\n"); + disable_mmio(); err = -1; - goto out_unmap; + goto out_unmap1; } fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start, tridentfb_fix.smem_len); if (!fb_info.screen_base) { - release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); debug("ioremap failed\n"); err = -1; - goto out_unmap; + goto out_unmap2; } output("%s board found\n", pci_name(dev)); -#if 0 - output("Trident board found : mem = %X, io = %X, mem_v = %X, io_v = %X\n", - tridentfb_fix.smem_start, tridentfb_fix.mmio_start, fb_info.screen_base, default_par.io_virt); -#endif displaytype = get_displaytype(); if (flatpanel) @@ -1288,9 +1311,12 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) { err = -EINVAL; - goto out_unmap; + goto out_unmap2; } - fb_alloc_cmap(&fb_info.cmap, 256, 0); + err = fb_alloc_cmap(&fb_info.cmap, 256, 0); + if (err < 0) + goto out_unmap2; + if (defaultaccel && acc) default_var.accel_flags |= FB_ACCELF_TEXT; else @@ -1300,19 +1326,24 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, fb_info.device = &dev->dev; if (register_framebuffer(&fb_info) < 0) { printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n"); + fb_dealloc_cmap(&fb_info.cmap); err = -EINVAL; - goto out_unmap; + goto out_unmap2; } output("fb%d: %s frame buffer device %dx%d-%dbpp\n", fb_info.node, fb_info.fix.id, default_var.xres, default_var.yres, default_var.bits_per_pixel); return 0; -out_unmap: - if (default_par.io_virt) - iounmap(default_par.io_virt); +out_unmap2: if (fb_info.screen_base) iounmap(fb_info.screen_base); + release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); + disable_mmio(); +out_unmap1: + if (default_par.io_virt) + iounmap(default_par.io_virt); + release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); return err; } @@ -1323,7 +1354,7 @@ static void __devexit trident_pci_remove(struct pci_dev *dev) iounmap(par->io_virt); iounmap(fb_info.screen_base); release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); - release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); + release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); } /* List of boards that we are trying to support */ diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c index 688e435b4d9..10211e49300 100644 --- a/drivers/w1/masters/ds1wm.c +++ b/drivers/w1/masters/ds1wm.c @@ -17,6 +17,7 @@ #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/clk.h> +#include <linux/err.h> #include <linux/delay.h> #include <linux/ds1wm.h> @@ -102,12 +103,12 @@ struct ds1wm_data { static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, u8 val) { - __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); + __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); } static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) { - return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); + return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); } @@ -149,8 +150,8 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); ds1wm_data->reset_complete = NULL; if (!timeleft) { - dev_dbg(&ds1wm_data->pdev->dev, "reset failed\n"); - return 1; + dev_err(&ds1wm_data->pdev->dev, "reset failed\n"); + return 1; } /* Wait for the end of the reset. According to the specs, the time @@ -167,11 +168,11 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); if (!ds1wm_data->slave_present) { - dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); - return 1; - } + dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); + return 1; + } - return 0; + return 0; } static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) @@ -334,7 +335,7 @@ static int ds1wm_probe(struct platform_device *pdev) if (!pdev) return -ENODEV; - ds1wm_data = kzalloc(sizeof (*ds1wm_data), GFP_KERNEL); + ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL); if (!ds1wm_data) return -ENOMEM; @@ -374,8 +375,8 @@ static int ds1wm_probe(struct platform_device *pdev) goto err1; ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm"); - if (!ds1wm_data->clk) { - ret = -ENOENT; + if (IS_ERR(ds1wm_data->clk)) { + ret = PTR_ERR(ds1wm_data->clk); goto err2; } |