aboutsummaryrefslogtreecommitdiff
path: root/drivers/ata
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-02-23 20:38:20 -0800
committerDavid S. Miller <davem@davemloft.net>2008-02-23 20:38:20 -0800
commit8d3c202be23c5a915f7053ebd4e96f44700c6a62 (patch)
treee0f017aff86d3ad0b858fe85f44e11096087ed00 /drivers/ata
parent1b04ab4597725f75f94942da9aa40daa7b9a4bd9 (diff)
parent038eb0ea04b245351be34b0ae76b55eee4603989 (diff)
Merge branch 'master' of ../linux-2.6/
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/ahci.c7
-rw-r--r--drivers/ata/ata_piix.c4
-rw-r--r--drivers/ata/libata-core.c633
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/libata-scsi.c74
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_acpi.c4
-rw-r--r--drivers/ata/pata_amd.c7
-rw-r--r--drivers/ata/pata_cs5536.c10
-rw-r--r--drivers/ata/pata_icside.c8
-rw-r--r--drivers/ata/pata_jmicron.c3
-rw-r--r--drivers/ata/pata_legacy.c46
-rw-r--r--drivers/ata/pata_marvell.c4
-rw-r--r--drivers/ata/pata_scc.c2
-rw-r--r--drivers/ata/sata_fsl.c13
-rw-r--r--drivers/ata/sata_mv.c19
-rw-r--r--drivers/ata/sata_promise.c2
-rw-r--r--drivers/ata/sata_sil24.c5
-rw-r--r--drivers/ata/sata_via.c4
19 files changed, 524 insertions, 330 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 29e71bddd6f..6dd12f7019a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1932,7 +1932,7 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
u32 ctl;
- if (mesg.event == PM_EVENT_SUSPEND) {
+ if (mesg.event & PM_EVENT_SLEEP) {
/* AHCI spec rev1.1 section 8.3.3:
* Software must disable interrupts prior to requesting a
* transition of the HBA to D3 state.
@@ -1975,16 +1975,11 @@ static int ahci_port_start(struct ata_port *ap)
struct ahci_port_priv *pp;
void *mem;
dma_addr_t mem_dma;
- int rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
- rc = ata_pad_alloc(ap, dev);
- if (rc)
- return rc;
-
mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
GFP_KERNEL);
if (!mem)
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 9c2515f67de..fae8404254c 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1339,7 +1339,7 @@ static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
* cycles and power trying to do something to the sleeping
* beauty.
*/
- if (piix_broken_suspend() && mesg.event == PM_EVENT_SUSPEND) {
+ if (piix_broken_suspend() && (mesg.event & PM_EVENT_SLEEP)) {
pci_save_state(pdev);
/* mark its power state as "unknown", since we don't
@@ -1652,7 +1652,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
u8 tmp;
pci_read_config_byte(pdev, PIIX_SCC, &tmp);
if (tmp == PIIX_AHCI_DEVICE) {
- int rc = piix_disable_ahci(pdev);
+ rc = piix_disable_ahci(pdev);
if (rc)
return rc;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 004dae4ea5b..4cf8662df99 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -87,6 +87,28 @@ static struct workqueue_struct *ata_wq;
struct workqueue_struct *ata_aux_wq;
+struct ata_force_param {
+ const char *name;
+ unsigned int cbl;
+ int spd_limit;
+ unsigned long xfer_mask;
+ unsigned int horkage_on;
+ unsigned int horkage_off;
+};
+
+struct ata_force_ent {
+ int port;
+ int device;
+ struct ata_force_param param;
+};
+
+static struct ata_force_ent *ata_force_tbl;
+static int ata_force_tbl_size;
+
+static char ata_force_param_buf[PAGE_SIZE] __initdata;
+module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0444);
+MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
+
int atapi_enabled = 1;
module_param(atapi_enabled, int, 0444);
MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
@@ -130,6 +152,179 @@ MODULE_VERSION(DRV_VERSION);
/**
+ * ata_force_cbl - force cable type according to libata.force
+ * @link: ATA link of interest
+ *
+ * Force cable type according to libata.force and whine about it.
+ * The last entry which has matching port number is used, so it
+ * can be specified as part of device force parameters. For
+ * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
+ * same effect.
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_force_cbl(struct ata_port *ap)
+{
+ int i;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != ap->print_id)
+ continue;
+
+ if (fe->param.cbl == ATA_CBL_NONE)
+ continue;
+
+ ap->cbl = fe->param.cbl;
+ ata_port_printk(ap, KERN_NOTICE,
+ "FORCE: cable set to %s\n", fe->param.name);
+ return;
+ }
+}
+
+/**
+ * ata_force_spd_limit - force SATA spd limit according to libata.force
+ * @link: ATA link of interest
+ *
+ * Force SATA spd limit according to libata.force and whine about
+ * it. When only the port part is specified (e.g. 1:), the limit
+ * applies to all links connected to both the host link and all
+ * fan-out ports connected via PMP. If the device part is
+ * specified as 0 (e.g. 1.00:), it specifies the first fan-out
+ * link not the host link. Device number 15 always points to the
+ * host link whether PMP is attached or not.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_spd_limit(struct ata_link *link)
+{
+ int linkno, i;
+
+ if (ata_is_host_link(link))
+ linkno = 15;
+ else
+ linkno = link->pmp;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != link->ap->print_id)
+ continue;
+
+ if (fe->device != -1 && fe->device != linkno)
+ continue;
+
+ if (!fe->param.spd_limit)
+ continue;
+
+ link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
+ ata_link_printk(link, KERN_NOTICE,
+ "FORCE: PHY spd limit set to %s\n", fe->param.name);
+ return;
+ }
+}
+
+/**
+ * ata_force_xfermask - force xfermask according to libata.force
+ * @dev: ATA device of interest
+ *
+ * Force xfer_mask according to libata.force and whine about it.
+ * For consistency with link selection, device number 15 selects
+ * the first device connected to the host link.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_xfermask(struct ata_device *dev)
+{
+ int devno = dev->link->pmp + dev->devno;
+ int alt_devno = devno;
+ int i;
+
+ /* allow n.15 for the first device attached to host port */
+ if (ata_is_host_link(dev->link) && devno == 0)
+ alt_devno = 15;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+ unsigned long pio_mask, mwdma_mask, udma_mask;
+
+ if (fe->port != -1 && fe->port != dev->link->ap->print_id)
+ continue;
+
+ if (fe->device != -1 && fe->device != devno &&
+ fe->device != alt_devno)
+ continue;
+
+ if (!fe->param.xfer_mask)
+ continue;
+
+ ata_unpack_xfermask(fe->param.xfer_mask,
+ &pio_mask, &mwdma_mask, &udma_mask);
+ if (udma_mask)
+ dev->udma_mask = udma_mask;
+ else if (mwdma_mask) {
+ dev->udma_mask = 0;
+ dev->mwdma_mask = mwdma_mask;
+ } else {
+ dev->udma_mask = 0;
+ dev->mwdma_mask = 0;
+ dev->pio_mask = pio_mask;
+ }
+
+ ata_dev_printk(dev, KERN_NOTICE,
+ "FORCE: xfer_mask set to %s\n", fe->param.name);
+ return;
+ }
+}
+
+/**
+ * ata_force_horkage - force horkage according to libata.force
+ * @dev: ATA device of interest
+ *
+ * Force horkage according to libata.force and whine about it.
+ * For consistency with link selection, device number 15 selects
+ * the first device connected to the host link.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_horkage(struct ata_device *dev)
+{
+ int devno = dev->link->pmp + dev->devno;
+ int alt_devno = devno;
+ int i;
+
+ /* allow n.15 for the first device attached to host port */
+ if (ata_is_host_link(dev->link) && devno == 0)
+ alt_devno = 15;
+
+ for (i = 0; i < ata_force_tbl_size; i++) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != dev->link->ap->print_id)
+ continue;
+
+ if (fe->device != -1 && fe->device != devno &&
+ fe->device != alt_devno)
+ continue;
+
+ if (!(~dev->horkage & fe->param.horkage_on) &&
+ !(dev->horkage & fe->param.horkage_off))
+ continue;
+
+ dev->horkage |= fe->param.horkage_on;
+ dev->horkage &= ~fe->param.horkage_off;
+
+ ata_dev_printk(dev, KERN_NOTICE,
+ "FORCE: horkage modified (%s)\n", fe->param.name);
+ }
+}
+
+/**
* ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
* @tf: Taskfile to convert
* @pmp: Port multiplier port
@@ -2067,6 +2262,7 @@ int ata_dev_configure(struct ata_device *dev)
/* set horkage */
dev->horkage |= ata_dev_blacklisted(dev);
+ ata_force_horkage(dev);
/* let ACPI work its magic */
rc = ata_acpi_on_devcfg(dev);
@@ -3150,6 +3346,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
mode_mask = ATA_DMA_MASK_CFA;
ata_dev_xfermask(dev);
+ ata_force_xfermask(dev);
pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
@@ -4190,6 +4387,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* Devices which report 1 sector over size HPA */
{ "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
{ "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
+ { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
/* Devices which get the IVB wrong */
{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
@@ -4492,30 +4690,13 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct scatterlist *sg = qc->sg;
int dir = qc->dma_dir;
- void *pad_buf = NULL;
WARN_ON(sg == NULL);
- VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem);
+ VPRINTK("unmapping %u sg elements\n", qc->n_elem);
- /* if we padded the buffer out to 32-bit bound, and data
- * xfer direction is from-device, we must copy from the
- * pad buffer back into the supplied buffer
- */
- if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
- pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
-
- if (qc->mapped_n_elem)
- dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir);
- /* restore last sg */
- if (qc->last_sg)
- *qc->last_sg = qc->saved_last_sg;
- if (pad_buf) {
- struct scatterlist *psg = &qc->extra_sg[1];
- void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
- memcpy(addr + psg->offset, pad_buf, qc->pad_len);
- kunmap_atomic(addr, KM_IRQ0);
- }
+ if (qc->n_elem)
+ dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
qc->flags &= ~ATA_QCFLAG_DMAMAP;
qc->sg = NULL;
@@ -4658,43 +4839,6 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
}
/**
- * atapi_qc_may_overflow - Check whether data transfer may overflow
- * @qc: ATA command in question
- *
- * ATAPI commands which transfer variable length data to host
- * might overflow due to application error or hardare bug. This
- * function checks whether overflow should be drained and ignored
- * for @qc.
- *
- * LOCKING:
- * None.
- *
- * RETURNS:
- * 1 if @qc may overflow; otherwise, 0.
- */
-static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
-{
- if (qc->tf.protocol != ATAPI_PROT_PIO &&
- qc->tf.protocol != ATAPI_PROT_DMA)
- return 0;
-
- if (qc->tf.flags & ATA_TFLAG_WRITE)
- return 0;
-
- switch (qc->cdb[0]) {
- case READ_10:
- case READ_12:
- case WRITE_10:
- case WRITE_12:
- case GPCMD_READ_CD:
- case GPCMD_READ_CD_MSF:
- return 0;
- }
-
- return 1;
-}
-
-/**
* ata_std_qc_defer - Check whether a qc needs to be deferred
* @qc: ATA command in question
*
@@ -4781,97 +4925,6 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
qc->cursg = qc->sg;
}
-static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
- unsigned int *n_elem_extra,
- unsigned int *nbytes_extra)
-{
- struct ata_port *ap = qc->ap;
- unsigned int n_elem = qc->n_elem;
- struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL;
-
- *n_elem_extra = 0;
- *nbytes_extra = 0;
-
- /* needs padding? */
- qc->pad_len = qc->nbytes & 3;
-
- if (likely(!qc->pad_len))
- return n_elem;
-
- /* locate last sg and save it */
- lsg = sg_last(qc->sg, n_elem);
- qc->last_sg = lsg;
- qc->saved_last_sg = *lsg;
-
- sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg));
-
- if (qc->pad_len) {
- struct scatterlist *psg = &qc->extra_sg[1];
- void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
- unsigned int offset;
-
- WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
-
- memset(pad_buf, 0, ATA_DMA_PAD_SZ);
-
- /* psg->page/offset are used to copy to-be-written
- * data in this function or read data in ata_sg_clean.
- */
- offset = lsg->offset + lsg->length - qc->pad_len;
- sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
- qc->pad_len, offset_in_page(offset));
-
- if (qc->tf.flags & ATA_TFLAG_WRITE) {
- void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
- memcpy(pad_buf, addr + psg->offset, qc->pad_len);
- kunmap_atomic(addr, KM_IRQ0);
- }
-
- sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
- sg_dma_len(psg) = ATA_DMA_PAD_SZ;
-
- /* Trim the last sg entry and chain the original and
- * padding sg lists.
- *
- * Because chaining consumes one sg entry, one extra
- * sg entry is allocated and the last sg entry is
- * copied to it if the length isn't zero after padded
- * amount is removed.
- *
- * If the last sg entry is completely replaced by
- * padding sg entry, the first sg entry is skipped
- * while chaining.
- */
- lsg->length -= qc->pad_len;
- if (lsg->length) {
- copy_lsg = &qc->extra_sg[0];
- tsg = &qc->extra_sg[0];
- } else {
- n_elem--;
- tsg = &qc->extra_sg[1];
- }
-
- esg = &qc->extra_sg[1];
-
- (*n_elem_extra)++;
- (*nbytes_extra) += 4 - qc->pad_len;
- }
-
- if (copy_lsg)
- sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset);
-
- sg_chain(lsg, 1, tsg);
- sg_mark_end(esg);
-
- /* sglist can't start with chaining sg entry, fast forward */
- if (qc->sg == lsg) {
- qc->sg = tsg;
- qc->cursg = tsg;
- }
-
- return n_elem;
-}
-
/**
* ata_sg_setup - DMA-map the scatter-gather table associated with a command.
* @qc: Command with scatter-gather table to be mapped.
@@ -4888,26 +4941,17 @@ static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
static int ata_sg_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- unsigned int n_elem, n_elem_extra, nbytes_extra;
+ unsigned int n_elem;
VPRINTK("ENTER, ata%u\n", ap->print_id);
- n_elem = ata_sg_setup_extra(qc, &n_elem_extra, &nbytes_extra);
+ n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
+ if (n_elem < 1)
+ return -1;
- if (n_elem) {
- n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir);
- if (n_elem < 1) {
- /* restore last sg */
- if (qc->last_sg)
- *qc->last_sg = qc->saved_last_sg;
- return -1;
- }
- DPRINTK("%d sg elements mapped\n", n_elem);
- }
+ DPRINTK("%d sg elements mapped\n", n_elem);
- qc->n_elem = qc->mapped_n_elem = n_elem;
- qc->n_elem += n_elem_extra;
- qc->nbytes += nbytes_extra;
+ qc->n_elem = n_elem;
qc->flags |= ATA_QCFLAG_DMAMAP;
return 0;
@@ -5145,46 +5189,22 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
*/
static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
{
- int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+ int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
struct ata_port *ap = qc->ap;
- struct ata_eh_info *ehi = &qc->dev->link->eh_info;
+ struct ata_device *dev = qc->dev;
+ struct ata_eh_info *ehi = &dev->link->eh_info;
struct scatterlist *sg;
struct page *page;
unsigned char *buf;
- unsigned int offset, count;
+ unsigned int offset, count, consumed;
next_sg:
sg = qc->cursg;
if (unlikely(!sg)) {
- /*
- * The end of qc->sg is reached and the device expects
- * more data to transfer. In order not to overrun qc->sg
- * and fulfill length specified in the byte count register,
- * - for read case, discard trailing data from the device
- * - for write case, padding zero data to the device
- */
- u16 pad_buf[1] = { 0 };
- unsigned int i;
-
- if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
- ata_ehi_push_desc(ehi, "too much trailing data "
- "buf=%u cur=%u bytes=%u",
- qc->nbytes, qc->curbytes, bytes);
- return -1;
- }
-
- /* overflow is exptected for misc ATAPI commands */
- if (bytes && !atapi_qc_may_overflow(qc))
- ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
- "trailing data (cdb=%02x nbytes=%u)\n",
- bytes, qc->cdb[0], qc->nbytes);
-
- for (i = 0; i < (bytes + 1) / 2; i++)
- ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
-
- qc->curbytes += bytes;
-
- return 0;
+ ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
+ "buf=%u cur=%u bytes=%u",
+ qc->nbytes, qc->curbytes, bytes);
+ return -1;
}
page = sg_page(sg);
@@ -5210,18 +5230,16 @@ next_sg:
buf = kmap_atomic(page, KM_IRQ0);
/* do the actual data transfer */
- ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
+ consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
kunmap_atomic(buf, KM_IRQ0);
local_irq_restore(flags);
} else {
buf = page_address(page);
- ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
+ consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
}
- bytes -= count;
- if ((count & 1) && bytes)
- bytes--;
+ bytes -= min(bytes, consumed);
qc->curbytes += count;
qc->cursg_ofs += count;
@@ -5230,9 +5248,11 @@ next_sg:
qc->cursg_ofs = 0;
}
+ /* consumed can be larger than count only for the last transfer */
+ WARN_ON(qc->cursg && count != consumed);
+
if (bytes)
goto next_sg;
-
return 0;
}
@@ -5250,6 +5270,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *dev = qc->dev;
+ struct ata_eh_info *ehi = &dev->link->eh_info;
unsigned int ireason, bc_lo, bc_hi, bytes;
int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
@@ -5267,26 +5288,28 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
/* shall be cleared to zero, indicating xfer of data */
if (unlikely(ireason & (1 << 0)))
- goto err_out;
+ goto atapi_check;
/* make sure transfer direction matches expected */
i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
if (unlikely(do_write != i_write))
- goto err_out;
+ goto atapi_check;
if (unlikely(!bytes))
- goto err_out;
+ goto atapi_check;
VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
- if (__atapi_pio_bytes(qc, bytes))
+ if (unlikely(__atapi_pio_bytes(qc, bytes)))
goto err_out;
ata_altstatus(ap); /* flush */
return;
-err_out:
- ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
+ atapi_check:
+ ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
+ ireason, bytes);
+ err_out:
qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
}
@@ -5971,9 +5994,6 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
*/
BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
- /* ata_sg_setup() may update nbytes */
- qc->raw_nbytes = qc->nbytes;
-
if (ata_is_dma(prot) || (ata_is_pio(prot) &&
(ap->flags & ATA_FLAG_PIO_DMA)))
if (ata_sg_setup(qc))
@@ -6582,19 +6602,12 @@ void ata_host_resume(struct ata_host *host)
int ata_port_start(struct ata_port *ap)
{
struct device *dev = ap->dev;
- int rc;
ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
GFP_KERNEL);
if (!ap->prd)
return -ENOMEM;
- rc = ata_pad_alloc(ap, dev);
- if (rc)
- return rc;
-
- DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
- (unsigned long long)ap->prd_dma);
return 0;
}
@@ -6681,7 +6694,8 @@ void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
*/
int sata_link_init_spd(struct ata_link *link)
{
- u32 scontrol, spd;
+ u32 scontrol;
+ u8 spd;
int rc;
rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
@@ -6692,6 +6706,8 @@ int sata_link_init_spd(struct ata_link *link)
if (spd)
link->hw_sata_spd_limit &= (1 << spd) - 1;
+ ata_force_spd_limit(link);
+
link->sata_spd_limit = link->hw_sata_spd_limit;
return 0;
@@ -7086,7 +7102,6 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
DPRINTK("probe begin\n");
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- int rc;
/* probe */
if (ap->ops->error_handler) {
@@ -7353,7 +7368,7 @@ void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
pci_save_state(pdev);
pci_disable_device(pdev);
- if (mesg.event == PM_EVENT_SUSPEND)
+ if (mesg.event & PM_EVENT_SLEEP)
pci_set_power_state(pdev, PCI_D3hot);
}
@@ -7403,10 +7418,187 @@ int ata_pci_device_resume(struct pci_dev *pdev)
#endif /* CONFIG_PCI */
+static int __init ata_parse_force_one(char **cur,
+ struct ata_force_ent *force_ent,
+ const char **reason)
+{
+ /* FIXME: Currently, there's no way to tag init const data and
+ * using __initdata causes build failure on some versions of
+ * gcc. Once __initdataconst is implemented, add const to the
+ * following structure.
+ */
+ static struct ata_force_param force_tbl[] __initdata = {
+ { "40c", .cbl = ATA_CBL_PATA40 },
+ { "80c", .cbl = ATA_CBL_PATA80 },
+ { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
+ { "unk", .cbl = ATA_CBL_PATA_UNK },
+ { "ign", .cbl = ATA_CBL_PATA_IGN },
+ { "sata", .cbl = ATA_CBL_SATA },
+ { "1.5Gbps", .spd_limit = 1 },
+ { "3.0Gbps", .spd_limit = 2 },
+ { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
+ { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
+ { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
+ { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
+ { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
+ { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
+ { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
+ { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
+ { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
+ { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
+ { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
+ { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
+ { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
+ { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
+ { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
+ { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
+ { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
+ { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
+ { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
+ { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
+ { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
+ { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
+ { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
+ { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
+ { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
+ { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
+ { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
+ { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
+ { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
+ { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
+ { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
+ { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
+ { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
+ { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
+ { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
+ { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
+ };
+ char *start = *cur, *p = *cur;
+ char *id, *val, *endp;
+ const struct ata_force_param *match_fp = NULL;
+ int nr_matches = 0, i;
+
+ /* find where this param ends and update *cur */
+ while (*p != '\0' && *p != ',')
+ p++;
+
+ if (*p == '\0')
+ *cur = p;
+ else
+ *cur = p + 1;
+
+ *p = '\0';
+
+ /* parse */
+ p = strchr(start, ':');
+ if (!p) {
+ val = strstrip(start);
+ goto parse_val;
+ }
+ *p = '\0';
+
+ id = strstrip(start);
+ val = strstrip(p + 1);
+
+ /* parse id */
+ p = strchr(id, '.');
+ if (p) {
+ *p++ = '\0';
+ force_ent->device = simple_strtoul(p, &endp, 10);
+ if (p == endp || *endp != '\0') {
+ *reason = "invalid device";
+ return -EINVAL;
+ }
+ }
+
+ force_ent->port = simple_strtoul(id, &endp, 10);
+ if (p == endp || *endp != '\0') {
+ *reason = "invalid port/link";
+ return -EINVAL;
+ }
+
+ parse_val:
+ /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
+ for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
+ const struct ata_force_param *fp = &force_tbl[i];
+
+ if (strncasecmp(val, fp->name, strlen(val)))
+ continue;
+
+ nr_matches++;
+ match_fp = fp;
+
+ if (strcasecmp(val, fp->name) == 0) {
+ nr_matches = 1;
+ break;
+ }
+ }
+
+ if (!nr_matches) {
+ *reason = "unknown value";
+ return -EINVAL;
+ }
+ if (nr_matches > 1) {
+ *reason = "ambigious value";
+ return -EINVAL;
+ }
+
+ force_ent->param = *match_fp;
+
+ return 0;
+}
+
+static void __init ata_parse_force_param(void)
+{
+ int idx = 0, size = 1;
+ int last_port = -1, last_device = -1;
+ char *p, *cur, *next;
+
+ /* calculate maximum number of params and allocate force_tbl */
+ for (p = ata_force_param_buf; *p; p++)
+ if (*p == ',')
+ size++;
+
+ ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
+ if (!ata_force_tbl) {
+ printk(KERN_WARNING "ata: failed to extend force table, "
+ "libata.force ignored\n");
+ return;
+ }
+
+ /* parse and populate the table */
+ for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
+ const char *reason = "";
+ struct ata_force_ent te = { .port = -1, .device = -1 };
+
+ next = cur;
+ if (ata_parse_force_one(&next, &te, &reason)) {
+ printk(KERN_WARNING "ata: failed to parse force "
+ "parameter \"%s\" (%s)\n",
+ cur, reason);
+ continue;
+ }
+
+ if (te.port == -1) {
+ te.port = last_port;
+ te.device = last_device;
+ }
+
+ ata_force_tbl[idx++] = te;
+
+ last_port = te.port;
+ last_device = te.device;
+ }
+
+ ata_force_tbl_size = idx;
+}
static int __init ata_init(void)
{
ata_probe_timeout *= HZ;
+
+ ata_parse_force_param();
+
ata_wq = create_workqueue("ata");
if (!ata_wq)
return -ENOMEM;
@@ -7423,6 +7615,7 @@ static int __init ata_init(void)
static void __exit ata_exit(void)
{
+ kfree(ata_force_tbl);
destroy_workqueue(ata_wq);
destroy_workqueue(ata_aux_wq);
}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 4e31071acc0..698ce2cea52 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2393,9 +2393,11 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
}
/* PDIAG- should have been released, ask cable type if post-reset */
- if (ata_is_host_link(link) && ap->ops->cable_detect &&
- (ehc->i.flags & ATA_EHI_DID_RESET))
- ap->cbl = ap->ops->cable_detect(ap);
+ if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
+ if (ap->ops->cable_detect)
+ ap->cbl = ap->ops->cable_detect(ap);
+ ata_force_cbl(ap);
+ }
/* Configure new devices forward such that user doesn't see
* device detection messages backwards.
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index c02c490122d..f888babc828 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -826,30 +826,61 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
sdev->max_device_blocked = 1;
}
-static void ata_scsi_dev_config(struct scsi_device *sdev,
- struct ata_device *dev)
+/**
+ * atapi_drain_needed - Check whether data transfer may overflow
+ * @rq: request to be checked
+ *
+ * ATAPI commands which transfer variable length data to host
+ * might overflow due to application error or hardare bug. This
+ * function checks whether overflow should be drained and ignored
+ * for @request.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * 1 if ; otherwise, 0.
+ */
+static int atapi_drain_needed(struct request *rq)
+{
+ if (likely(!blk_pc_request(rq)))
+ return 0;
+
+ if (!rq->data_len || (rq->cmd_flags & REQ_RW))
+ return 0;
+
+ return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
+}
+
+static int ata_scsi_dev_config(struct scsi_device *sdev,
+ struct ata_device *dev)
{
/* configure max sectors */
blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
- /* SATA DMA transfers must be multiples of 4 byte, so
- * we need to pad ATAPI transfers using an extra sg.
- * Decrement max hw segments accordingly.
- */
if (dev->class == ATA_DEV_ATAPI) {
struct request_queue *q = sdev->request_queue;
- blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
+ void *buf;
/* set the min alignment */
blk_queue_update_dma_alignment(sdev->request_queue,
ATA_DMA_PAD_SZ - 1);
- } else
+
+ /* configure draining */
+ buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
+ if (!buf) {
+ ata_dev_printk(dev, KERN_ERR,
+ "drain buffer allocation failed\n");
+ return -ENOMEM;
+ }
+
+ blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
+ } else {
/* ATA devices must be sector aligned */
blk_queue_update_dma_alignment(sdev->request_queue,
ATA_SECT_SIZE - 1);
-
- if (dev->class == ATA_DEV_ATA)
sdev->manage_start_stop = 1;
+ }
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
@@ -861,6 +892,8 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
depth = min(ATA_MAX_QUEUE - 1, depth);
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
}
+
+ return 0;
}
/**
@@ -879,13 +912,14 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
+ int rc = 0;
ata_scsi_sdev_config(sdev);
if (dev)
- ata_scsi_dev_config(sdev, dev);
+ rc = ata_scsi_dev_config(sdev, dev);
- return 0;
+ return rc;
}
/**
@@ -905,6 +939,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
void ata_scsi_slave_destroy(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct request_queue *q = sdev->request_queue;
unsigned long flags;
struct ata_device *dev;
@@ -920,6 +955,10 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
ata_port_schedule_eh(ap);
}
spin_unlock_irqrestore(ap->lock, flags);
+
+ kfree(q->dma_drain_buffer);
+ q->dma_drain_buffer = NULL;
+ q->dma_drain_size = 0;
}
/**
@@ -1862,7 +1901,7 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
* spin_lock_irqsave(host lock)
*/
-unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
+static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen)
{
u8 pbuf[60];
@@ -2500,7 +2539,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
* want to set it properly, and for DMA where it is
* effectively meaningless.
*/
- nbytes = min(qc->nbytes, (unsigned int)63 * 1024);
+ nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024);
/* Most ATAPI devices which honor transfer chunk size don't
* behave according to the spec when odd chunk size which
@@ -3555,7 +3594,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
* @ap: Port to initialize
*
* Called just after data structures for each port are
- * initialized. Allocates DMA pad.
+ * initialized.
*
* May be used as the port_start() entry in ata_port_operations.
*
@@ -3564,7 +3603,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
*/
int ata_sas_port_start(struct ata_port *ap)
{
- return ata_pad_alloc(ap, ap->dev);
+ return 0;
}
EXPORT_SYMBOL_GPL(ata_sas_port_start);
@@ -3572,8 +3611,6 @@ EXPORT_SYMBOL_GPL(ata_sas_port_start);
* ata_port_stop - Undo ata_sas_port_start()
* @ap: Port to shut down
*
- * Frees the DMA pad.
- *
* May be used as the port_stop() entry in ata_port_operations.
*
* LOCKING:
@@ -3582,7 +3619,6 @@ EXPORT_SYMBOL_GPL(ata_sas_port_start);
void ata_sas_port_stop(struct ata_port *ap)
{
- ata_pad_free(ap, ap->dev);
}
EXPORT_SYMBOL_GPL(ata_sas_port_stop);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 409ffb9af16..6036dedfe37 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -61,6 +61,7 @@ extern int atapi_passthru16;
extern int libata_fua;
extern int libata_noacpi;
extern int libata_allow_tpm;
+extern void ata_force_cbl(struct ata_port *ap);
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 244098a80ce..bdc3b9d7395 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -77,8 +77,8 @@ static int pacpi_cable_detect(struct ata_port *ap)
static void pacpi_error_handler(struct ata_port *ap)
{
- return ata_bmdma_drive_eh(ap, pacpi_pre_reset, ata_std_softreset,
- NULL, ata_std_postreset);
+ ata_bmdma_drive_eh(ap, pacpi_pre_reset, ata_std_softreset, NULL,
+ ata_std_postreset);
}
/**
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index ea567e2b170..4b8d9b592ca 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -146,9 +146,8 @@ static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
static void amd_error_handler(struct ata_port *ap)
{
- return ata_bmdma_drive_eh(ap, amd_pre_reset,
- ata_std_softreset, NULL,
- ata_std_postreset);
+ ata_bmdma_drive_eh(ap, amd_pre_reset, ata_std_softreset, NULL,
+ ata_std_postreset);
}
static int amd_cable_detect(struct ata_port *ap)
@@ -506,7 +505,6 @@ static struct ata_port_operations amd133_port_ops = {
static struct ata_port_operations nv100_port_ops = {
.set_piomode = nv100_set_piomode,
.set_dmamode = nv100_set_dmamode,
- .mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
@@ -541,7 +539,6 @@ static struct ata_port_operations nv100_port_ops = {
static struct ata_port_operations nv133_port_ops = {
.set_piomode = nv133_set_piomode,
.set_dmamode = nv133_set_dmamode,
- .mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index d753e568588..1c4ff9b52b5 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -40,7 +40,7 @@
#include <asm/msr.h>
#define DRV_NAME "pata_cs5536"
-#define DRV_VERSION "0.0.6"
+#define DRV_VERSION "0.0.7"
enum {
CFG = 0,
@@ -85,7 +85,7 @@ static const u8 pci_reg[4] = {
PCI_IDE_CFG, PCI_IDE_DTC, PCI_IDE_CAST, PCI_IDE_ETC,
};
-static inline int cs5536_read(struct pci_dev *pdev, int reg, int *val)
+static inline int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
{
if (unlikely(use_msr)) {
u32 dummy;
@@ -153,8 +153,8 @@ static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
struct ata_device *pair = ata_dev_pair(adev);
int mode = adev->pio_mode - XFER_PIO_0;
int cmdmode = mode;
- int dshift = ap->port_no ? IDE_D1_SHIFT : IDE_D0_SHIFT;
- int cshift = ap->port_no ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
+ int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
+ int cshift = adev->devno ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
u32 dtc, cast, etc;
if (pair)
@@ -201,7 +201,7 @@ static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev)
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 dtc, etc;
int mode = adev->dma_mode;
- int dshift = ap->port_no ? IDE_D1_SHIFT : IDE_D0_SHIFT;
+ int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
if (mode >= XFER_UDMA_0) {
cs5536_read(pdev, ETC, &etc);
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 5b8586dac63..f97068be2d7 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -304,12 +304,6 @@ static int icside_dma_init(struct pata_icside_info *info)
}
-static int pata_icside_port_start(struct ata_port *ap)
-{
- /* No PRD to alloc */
- return ata_pad_alloc(ap, ap->dev);
-}
-
static struct scsi_host_template pata_icside_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
@@ -389,8 +383,6 @@ static struct ata_port_operations pata_icside_port_ops = {
.irq_clear = ata_dummy_noret,
.irq_on = ata_irq_on,
- .port_start = pata_icside_port_start,
-
.bmdma_stop = pata_icside_bmdma_stop,
.bmdma_status = pata_icside_bmdma_status,
};
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 5b8174d9406..00bbbbd50e9 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -115,7 +115,8 @@ static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline)
static void jmicron_error_handler(struct ata_port *ap)
{
- return ata_bmdma_drive_eh(ap, jmicron_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+ ata_bmdma_drive_eh(ap, jmicron_pre_reset, ata_std_softreset, NULL,
+ ata_std_postreset);
}
/* No PIO or DMA methods needed for this device */
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 6c59969fd50..50fe08ebe23 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -774,14 +774,14 @@ static struct ata_port_operations opti82c46x_port_ops = {
static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_timing t;
- struct legacy_data *qdi = ap->host->private_data;
+ struct legacy_data *ld_qdi = ap->host->private_data;
int active, recovery;
u8 timing;
/* Get the timing data in cycles */
ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
- if (qdi->fast) {
+ if (ld_qdi->fast) {
active = 8 - FIT(t.active, 1, 8);
recovery = 18 - FIT(t.recover, 3, 18);
} else {
@@ -790,9 +790,9 @@ static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
timing = (recovery << 4) | active | 0x08;
- qdi->clock[adev->devno] = timing;
+ ld_qdi->clock[adev->devno] = timing;
- outb(timing, qdi->timing);
+ outb(timing, ld_qdi->timing);
}
/**
@@ -808,14 +808,14 @@ static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_timing t;
- struct legacy_data *qdi = ap->host->private_data;
+ struct legacy_data *ld_qdi = ap->host->private_data;
int active, recovery;
u8 timing;
/* Get the timing data in cycles */
ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
- if (qdi->fast) {
+ if (ld_qdi->fast) {
active = 8 - FIT(t.active, 1, 8);
recovery = 18 - FIT(t.recover, 3, 18);
} else {
@@ -824,12 +824,12 @@ static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
timing = (recovery << 4) | active | 0x08;
- qdi->clock[adev->devno] = timing;
+ ld_qdi->clock[adev->devno] = timing;
- outb(timing, qdi->timing + 2 * ap->port_no);
+ outb(timing, ld_qdi->timing + 2 * ap->port_no);
/* Clear the FIFO */
if (adev->class != ATA_DEV_ATA)
- outb(0x5F, qdi->timing + 3);
+ outb(0x5F, ld_qdi->timing + 3);
}
/**
@@ -845,14 +845,14 @@ static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_timing t;
- struct legacy_data *qdi = ap->host->private_data;
+ struct legacy_data *ld_qdi = ap->host->private_data;
int active, recovery;
u8 timing;
/* Get the timing data in cycles */
ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
- if (qdi->fast) {
+ if (ld_qdi->fast) {
active = 8 - FIT(t.active, 1, 8);
recovery = 18 - FIT(t.recover, 3, 18);
} else {
@@ -860,11 +860,11 @@ static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
recovery = 15 - FIT(t.recover, 0, 15);
}
timing = (recovery << 4) | active | 0x08;
- qdi->clock[adev->devno] = timing;
- outb(timing, qdi->timing + 2 * adev->devno);
+ ld_qdi->clock[adev->devno] = timing;
+ outb(timing, ld_qdi->timing + 2 * adev->devno);
/* Clear the FIFO */
if (adev->class != ATA_DEV_ATA)
- outb(0x5F, qdi->timing + 3);
+ outb(0x5F, ld_qdi->timing + 3);
}
/**
@@ -879,12 +879,12 @@ static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
- struct legacy_data *qdi = ap->host->private_data;
+ struct legacy_data *ld_qdi = ap->host->private_data;
- if (qdi->clock[adev->devno] != qdi->last) {
+ if (ld_qdi->clock[adev->devno] != ld_qdi->last) {
if (adev->pio_mode) {
- qdi->last = qdi->clock[adev->devno];
- outb(qdi->clock[adev->devno], qdi->timing +
+ ld_qdi->last = ld_qdi->clock[adev->devno];
+ outb(ld_qdi->clock[adev->devno], ld_qdi->timing +
2 * ap->port_no);
}
}
@@ -1037,12 +1037,12 @@ static u8 winbond_readcfg(unsigned long port, u8 reg)
static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_timing t;
- struct legacy_data *winbond = ap->host->private_data;
+ struct legacy_data *ld_winbond = ap->host->private_data;
int active, recovery;
u8 reg;
int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
- reg = winbond_readcfg(winbond->timing, 0x81);
+ reg = winbond_readcfg(ld_winbond->timing, 0x81);
/* Get the timing data in cycles */
if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
@@ -1053,7 +1053,7 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
active = (FIT(t.active, 3, 17) - 1) & 0x0F;
recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F;
timing = (active << 4) | recovery;
- winbond_writecfg(winbond->timing, timing, reg);
+ winbond_writecfg(ld_winbond->timing, timing, reg);
/* Load the setup timing */
@@ -1063,7 +1063,7 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
if (!ata_pio_need_iordy(adev))
reg |= 0x02; /* IORDY off */
reg |= (FIT(t.setup, 0, 3) << 6);
- winbond_writecfg(winbond->timing, timing + 1, reg);
+ winbond_writecfg(ld_winbond->timing, timing + 1, reg);
}
static int winbond_port(struct platform_device *dev,
@@ -1278,8 +1278,6 @@ static __init int legacy_init_one(struct legacy_probe *probe)
}
}
fail:
- if (host)
- ata_host_detach(host);
platform_device_unregister(pdev);
return ret;
}
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 9afc8a32b22..a81f25d8723 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -85,8 +85,8 @@ static int marvell_cable_detect(struct ata_port *ap)
static void marvell_error_handler(struct ata_port *ap)
{
- return ata_bmdma_drive_eh(ap, marvell_pre_reset, ata_std_softreset,
- NULL, ata_std_postreset);
+ ata_bmdma_drive_eh(ap, marvell_pre_reset, ata_std_softreset, NULL,
+ ata_std_postreset);
}
/* No PIO or DMA methods needed for this device */
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 55055b27524..6c016deeaed 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -1007,6 +1007,8 @@ static const struct ata_port_operations scc_pata_ops = {
.qc_issue = ata_qc_issue_prot,
.freeze = scc_bmdma_freeze,
+ .thaw = ata_bmdma_thaw,
+
.error_handler = scc_error_handler,
.post_internal_cmd = scc_bmdma_stop,
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index efcb66b6cce..9323dd0c7d8 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -601,21 +601,9 @@ static int sata_fsl_port_start(struct ata_port *ap)
if (!pp)
return -ENOMEM;
- /*
- * allocate per command dma alignment pad buffer, which is used
- * internally by libATA to ensure that all transfers ending on
- * unaligned boundaries are padded, to align on Dword boundaries
- */
- retval = ata_pad_alloc(ap, dev);
- if (retval) {
- kfree(pp);
- return retval;
- }
-
mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
GFP_KERNEL);
if (!mem) {
- ata_pad_free(ap, dev);
kfree(pp);
return -ENOMEM;
}
@@ -694,7 +682,6 @@ static void sata_fsl_port_stop(struct ata_port *ap)
dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ,
pp->cmdslot, pp->cmdslot_paddr);
- ata_pad_free(ap, dev);
kfree(pp);
}
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 04b571764af..6ebebde8454 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -870,7 +870,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
struct mv_host_priv *hpriv = ap->host->private_data;
int hard_port = mv_hardport_from_port(ap->port_no);
void __iomem *hc_mmio = mv_hc_base_from_port(
- ap->host->iomap[MV_PRIMARY_BAR], hard_port);
+ mv_host_base(ap->host), hard_port);
u32 hc_irq_cause, ipending;
/* clear EDMA event indicators, if any */
@@ -1158,17 +1158,13 @@ static int mv_port_start(struct ata_port *ap)
struct mv_port_priv *pp;
void __iomem *port_mmio = mv_ap_base(ap);
unsigned long flags;
- int tag, rc;
+ int tag;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
ap->private_data = pp;
- rc = ata_pad_alloc(ap, dev);
- if (rc)
- return rc;
-
pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
if (!pp->crqb)
return -ENOMEM;
@@ -1542,7 +1538,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
eh_freeze_mask = EDMA_EH_FREEZE_5;
if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
- struct mv_port_priv *pp = ap->private_data;
+ pp = ap->private_data;
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
ata_ehi_push_desc(ehi, "EDMA self-disable");
}
@@ -1550,7 +1546,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
eh_freeze_mask = EDMA_EH_FREEZE;
if (edma_err_cause & EDMA_ERR_SELF_DIS) {
- struct mv_port_priv *pp = ap->private_data;
+ pp = ap->private_data;
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
ata_ehi_push_desc(ehi, "EDMA self-disable");
}
@@ -2951,7 +2947,8 @@ static int mv_platform_probe(struct platform_device *pdev)
hpriv->n_ports = n_ports;
host->iomap = NULL;
- hpriv->base = ioremap(res->start, res->end - res->start + 1);
+ hpriv->base = devm_ioremap(&pdev->dev, res->start,
+ res->end - res->start + 1);
hpriv->base -= MV_SATAHC0_REG_BASE;
rc = mv_create_dma_pools(hpriv, &pdev->dev);
@@ -2983,11 +2980,8 @@ static int __devexit mv_platform_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ata_host *host = dev_get_drvdata(dev);
- struct mv_host_priv *hpriv = host->private_data;
- void __iomem *base = hpriv->base;
ata_host_detach(host);
- iounmap(base);
return 0;
}
@@ -3198,6 +3192,7 @@ MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:sata_mv");
#ifdef CONFIG_PCI
module_param(msi, int, 0444);
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index a07d319f6e8..f251a5f569d 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -543,7 +543,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
idx = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, offset;
- u32 sg_len, len;
+ u32 sg_len;
/* determine if physical DMA addr spans 64K boundary.
* Note h/w doesn't support 64-bit, so we unconditionally
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index b4b1f91ea69..df7988df790 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1234,7 +1234,6 @@ static int sil24_port_start(struct ata_port *ap)
union sil24_cmd_block *cb;
size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
dma_addr_t cb_dma;
- int rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
@@ -1247,10 +1246,6 @@ static int sil24_port_start(struct ata_port *ap)
return -ENOMEM;
memset(cb, 0, cb_size);
- rc = ata_pad_alloc(ap, dev);
- if (rc)
- return rc;
-
pp->cmd_block = cb;
pp->cmd_block_dma = cb_dma;
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 30caa033719..0d03f44824f 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -333,8 +333,8 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
static void vt6420_error_handler(struct ata_port *ap)
{
- return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset,
- NULL, ata_std_postreset);
+ ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset, NULL,
+ ata_std_postreset);
}
static int vt6421_pata_cable_detect(struct ata_port *ap)