aboutsummaryrefslogtreecommitdiff
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-04-19 17:17:34 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-04-19 17:17:34 +0100
commitcf816ecb533ab96b883dfdc0db174598b5b5c4d2 (patch)
tree1b7705db288ae2917105e624b01fdf81e0882bf1 /drivers/ata/sata_mv.c
parentadf6d34e460387ee3e8f1e1875d52bff51212c7d (diff)
parent15f7d677ccff6f0f5de8a1ee43a792567e9f9de9 (diff)
Merge branch 'merge-fixes' into devel
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c632
1 files changed, 252 insertions, 380 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 83584b6e1ba..d52ce118832 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1,6 +1,7 @@
/*
* sata_mv.c - Marvell SATA support
*
+ * Copyright 2008: Marvell Corporation, all rights reserved.
* Copyright 2005: EMC Corporation, all rights reserved.
* Copyright 2005 Red Hat, Inc. All rights reserved.
*
@@ -39,7 +40,9 @@
5) Investigate problems with PCI Message Signalled Interrupts (MSI).
- 6) Add port multiplier support (intermediate)
+ 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
+
+ 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
8) Develop a low-power-consumption strategy, and implement it.
@@ -61,7 +64,6 @@
*/
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -132,7 +134,7 @@ enum {
MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
/* SoC integrated controllers, no PCI interface */
- MV_FLAG_SOC = (1 << 28),
+ MV_FLAG_SOC = (1 << 28),
MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
@@ -142,6 +144,7 @@ enum {
CRQB_FLAG_READ = (1 << 0),
CRQB_TAG_SHIFT = 1,
CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
+ CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
CRQB_CMD_ADDR_SHIFT = 8,
CRQB_CMD_CS = (0x2 << 11),
@@ -200,7 +203,7 @@ enum {
TWSI_INT = (1 << 24),
HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
- HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
+ HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
HC_MAIN_RSVD),
@@ -224,13 +227,24 @@ enum {
SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
SATA_ACTIVE_OFS = 0x350,
SATA_FIS_IRQ_CAUSE_OFS = 0x364,
+
+ LTMODE_OFS = 0x30c,
+ LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
+
PHY_MODE3 = 0x310,
PHY_MODE4 = 0x314,
PHY_MODE2 = 0x330,
+ SATA_IFCTL_OFS = 0x344,
+ SATA_IFSTAT_OFS = 0x34c,
+ VENDOR_UNIQUE_FIS_OFS = 0x35c,
+
+ FIS_CFG_OFS = 0x360,
+ FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
+
MV5_PHY_MODE = 0x74,
MV5_LT_MODE = 0x30,
MV5_PHY_CTL = 0x0C,
- SATA_INTERFACE_CTL = 0x050,
+ SATA_INTERFACE_CFG = 0x050,
MV_M2_PREAMP_MASK = 0x7e0,
@@ -241,6 +255,8 @@ enum {
EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
+ EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
+ EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
EDMA_ERR_IRQ_MASK_OFS = 0xc,
@@ -283,7 +299,9 @@ enum {
EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
EDMA_ERR_LNK_CTRL_RX_1 |
EDMA_ERR_LNK_CTRL_RX_3 |
- EDMA_ERR_LNK_CTRL_TX,
+ EDMA_ERR_LNK_CTRL_TX |
+ /* temporary, until we fix hotplug: */
+ (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
EDMA_ERR_PRD_PAR |
@@ -299,6 +317,7 @@ enum {
EDMA_ERR_LNK_DATA_RX |
EDMA_ERR_LNK_DATA_TX |
EDMA_ERR_TRANS_PROTO,
+
EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
EDMA_ERR_PRD_PAR |
EDMA_ERR_DEV_DCON |
@@ -345,7 +364,6 @@ enum {
/* Port private flags (pp_flags) */
MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
- MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
};
#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
@@ -465,7 +483,6 @@ struct mv_hw_ops {
void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
};
-static void mv_irq_clear(struct ata_port *ap);
static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
@@ -475,7 +492,8 @@ static void mv_port_stop(struct ata_port *ap);
static void mv_qc_prep(struct ata_queued_cmd *qc);
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
-static void mv_error_handler(struct ata_port *ap);
+static int mv_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
static void mv_eh_freeze(struct ata_port *ap);
static void mv_eh_thaw(struct ata_port *ap);
static void mv6_dev_config(struct ata_device *dev);
@@ -508,72 +526,46 @@ static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
void __iomem *mmio);
static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
-static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
+static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no);
-static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
- void __iomem *port_mmio, int want_ncq);
-static int __mv_stop_dma(struct ata_port *ap);
+static int mv_stop_edma(struct ata_port *ap);
+static int mv_stop_edma_engine(void __iomem *port_mmio);
+static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
+
+static void mv_pmp_select(struct ata_port *ap, int pmp);
+static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int mv_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
* because we have to allow room for worst case splitting of
* PRDs for 64K boundaries in mv_fill_sg().
*/
static struct scsi_host_template mv5_sht = {
- .module = THIS_MODULE,
- .name = DRV_NAME,
- .ioctl = ata_scsi_ioctl,
- .queuecommand = ata_scsi_queuecmd,
- .can_queue = ATA_DEF_QUEUE,
- .this_id = ATA_SHT_THIS_ID,
+ ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = MV_MAX_SG_CT / 2,
- .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
- .emulated = ATA_SHT_EMULATED,
- .use_clustering = 1,
- .proc_name = DRV_NAME,
.dma_boundary = MV_DMA_BOUNDARY,
- .slave_configure = ata_scsi_slave_config,
- .slave_destroy = ata_scsi_slave_destroy,
- .bios_param = ata_std_bios_param,
};
static struct scsi_host_template mv6_sht = {
- .module = THIS_MODULE,
- .name = DRV_NAME,
- .ioctl = ata_scsi_ioctl,
- .queuecommand = ata_scsi_queuecmd,
- .change_queue_depth = ata_scsi_change_queue_depth,
+ ATA_NCQ_SHT(DRV_NAME),
.can_queue = MV_MAX_Q_DEPTH - 1,
- .this_id = ATA_SHT_THIS_ID,
.sg_tablesize = MV_MAX_SG_CT / 2,
- .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
- .emulated = ATA_SHT_EMULATED,
- .use_clustering = 1,
- .proc_name = DRV_NAME,
.dma_boundary = MV_DMA_BOUNDARY,
- .slave_configure = ata_scsi_slave_config,
- .slave_destroy = ata_scsi_slave_destroy,
- .bios_param = ata_std_bios_param,
};
-static const struct ata_port_operations mv5_ops = {
- .tf_load = ata_tf_load,
- .tf_read = ata_tf_read,
- .check_status = ata_check_status,
- .exec_command = ata_exec_command,
- .dev_select = ata_std_dev_select,
-
- .cable_detect = ata_cable_sata,
+static struct ata_port_operations mv5_ops = {
+ .inherits = &ata_sff_port_ops,
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
- .data_xfer = ata_data_xfer,
-
- .irq_clear = mv_irq_clear,
- .irq_on = ata_irq_on,
- .error_handler = mv_error_handler,
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
+ .hardreset = mv_hardreset,
+ .error_handler = ata_std_error_handler, /* avoid SFF EH */
+ .post_internal_cmd = ATA_OP_NULL,
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
@@ -582,61 +574,24 @@ static const struct ata_port_operations mv5_ops = {
.port_stop = mv_port_stop,
};
-static const struct ata_port_operations mv6_ops = {
+static struct ata_port_operations mv6_ops = {
+ .inherits = &mv5_ops,
+ .qc_defer = sata_pmp_qc_defer_cmd_switch,
.dev_config = mv6_dev_config,
- .tf_load = ata_tf_load,
- .tf_read = ata_tf_read,
- .check_status = ata_check_status,
- .exec_command = ata_exec_command,
- .dev_select = ata_std_dev_select,
-
- .cable_detect = ata_cable_sata,
-
- .qc_prep = mv_qc_prep,
- .qc_issue = mv_qc_issue,
- .data_xfer = ata_data_xfer,
-
- .irq_clear = mv_irq_clear,
- .irq_on = ata_irq_on,
-
- .error_handler = mv_error_handler,
- .freeze = mv_eh_freeze,
- .thaw = mv_eh_thaw,
- .qc_defer = ata_std_qc_defer,
-
.scr_read = mv_scr_read,
.scr_write = mv_scr_write,
- .port_start = mv_port_start,
- .port_stop = mv_port_stop,
+ .pmp_hardreset = mv_pmp_hardreset,
+ .pmp_softreset = mv_softreset,
+ .softreset = mv_softreset,
+ .error_handler = sata_pmp_error_handler,
};
-static const struct ata_port_operations mv_iie_ops = {
- .tf_load = ata_tf_load,
- .tf_read = ata_tf_read,
- .check_status = ata_check_status,
- .exec_command = ata_exec_command,
- .dev_select = ata_std_dev_select,
-
- .cable_detect = ata_cable_sata,
-
+static struct ata_port_operations mv_iie_ops = {
+ .inherits = &mv6_ops,
+ .qc_defer = ata_std_qc_defer, /* FIS-based switching */
+ .dev_config = ATA_OP_NULL,
.qc_prep = mv_qc_prep_iie,
- .qc_issue = mv_qc_issue,
- .data_xfer = ata_data_xfer,
-
- .irq_clear = mv_irq_clear,
- .irq_on = ata_irq_on,
-
- .error_handler = mv_error_handler,
- .freeze = mv_eh_freeze,
- .thaw = mv_eh_thaw,
- .qc_defer = ata_std_qc_defer,
-
- .scr_read = mv_scr_read,
- .scr_write = mv_scr_write,
-
- .port_start = mv_port_start,
- .port_stop = mv_port_stop,
};
static const struct ata_port_info mv_port_info[] = {
@@ -660,6 +615,7 @@ static const struct ata_port_info mv_port_info[] = {
},
{ /* chip_604x */
.flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
ATA_FLAG_NCQ,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
@@ -667,6 +623,7 @@ static const struct ata_port_info mv_port_info[] = {
},
{ /* chip_608x */
.flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
@@ -674,6 +631,7 @@ static const struct ata_port_info mv_port_info[] = {
},
{ /* chip_6042 */
.flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
ATA_FLAG_NCQ,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
@@ -681,16 +639,19 @@ static const struct ata_port_info mv_port_info[] = {
},
{ /* chip_7042 */
.flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
ATA_FLAG_NCQ,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
{ /* chip_soc */
- .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
- .pio_mask = 0x1f, /* pio0-4 */
- .udma_mask = ATA_UDMA6,
- .port_ops = &mv_iie_ops,
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
+ ATA_FLAG_NCQ | MV_FLAG_SOC,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &mv_iie_ops,
},
};
@@ -789,6 +750,14 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
}
+static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
+{
+ void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
+ unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
+
+ return hc_mmio + ofs;
+}
+
static inline void __iomem *mv_host_base(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
@@ -805,10 +774,6 @@ static inline int mv_get_hc_count(unsigned long port_flags)
return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
}
-static void mv_irq_clear(struct ata_port *ap)
-{
-}
-
static void mv_set_edma_ptrs(void __iomem *port_mmio,
struct mv_host_priv *hpriv,
struct mv_port_priv *pp)
@@ -868,7 +833,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
if (want_ncq != using_ncq)
- __mv_stop_dma(ap);
+ mv_stop_edma(ap);
}
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
struct mv_host_priv *hpriv = ap->host->private_data;
@@ -889,7 +854,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
hc_mmio + HC_IRQ_CAUSE_OFS);
}
- mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
+ mv_edma_cfg(ap, want_ncq);
/* clear FIS IRQ Cause */
writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
@@ -903,58 +868,42 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
}
/**
- * __mv_stop_dma - Disable eDMA engine
- * @ap: ATA channel to manipulate
- *
- * Verify the local cache of the eDMA state is accurate with a
- * WARN_ON.
+ * mv_stop_edma_engine - Disable eDMA engine
+ * @port_mmio: io base address
*
* LOCKING:
* Inherited from caller.
*/
-static int __mv_stop_dma(struct ata_port *ap)
+static int mv_stop_edma_engine(void __iomem *port_mmio)
{
- void __iomem *port_mmio = mv_ap_base(ap);
- struct mv_port_priv *pp = ap->private_data;
- u32 reg;
- int i, err = 0;
+ int i;
- if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
- /* Disable EDMA if active. The disable bit auto clears.
- */
- writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
- pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
- } else {
- WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
- }
+ /* Disable eDMA. The disable bit auto clears. */
+ writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
- /* now properly wait for the eDMA to stop */
- for (i = 1000; i > 0; i--) {
- reg = readl(port_mmio + EDMA_CMD_OFS);
+ /* Wait for the chip to confirm eDMA is off. */
+ for (i = 10000; i > 0; i--) {
+ u32 reg = readl(port_mmio + EDMA_CMD_OFS);
if (!(reg & EDMA_EN))
- break;
-
- udelay(100);
- }
-
- if (reg & EDMA_EN) {
- ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
- err = -EIO;
+ return 0;
+ udelay(10);
}
-
- return err;
+ return -EIO;
}
-static int mv_stop_dma(struct ata_port *ap)
+static int mv_stop_edma(struct ata_port *ap)
{
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&ap->host->lock, flags);
- rc = __mv_stop_dma(ap);
- spin_unlock_irqrestore(&ap->host->lock, flags);
+ void __iomem *port_mmio = mv_ap_base(ap);
+ struct mv_port_priv *pp = ap->private_data;
- return rc;
+ if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
+ return 0;
+ pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+ if (mv_stop_edma_engine(port_mmio)) {
+ ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
+ return -EIO;
+ }
+ return 0;
}
#ifdef ATA_DEBUG
@@ -1078,18 +1027,50 @@ static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
static void mv6_dev_config(struct ata_device *adev)
{
/*
+ * Deal with Gen-II ("mv6") hardware quirks/restrictions:
+ *
+ * Gen-II does not support NCQ over a port multiplier
+ * (no FIS-based switching).
+ *
* We don't have hob_nsect when doing NCQ commands on Gen-II.
* See mv_qc_prep() for more info.
*/
- if (adev->flags & ATA_DFLAG_NCQ)
- if (adev->max_sectors > ATA_MAX_SECTORS)
+ if (adev->flags & ATA_DFLAG_NCQ) {
+ if (sata_pmp_attached(adev->link->ap))
+ adev->flags &= ~ATA_DFLAG_NCQ;
+ else if (adev->max_sectors > ATA_MAX_SECTORS)
adev->max_sectors = ATA_MAX_SECTORS;
+ }
}
-static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
- void __iomem *port_mmio, int want_ncq)
+static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
+{
+ u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
+ /*
+ * Various bit settings required for operation
+ * in FIS-based switching (fbs) mode on GenIIe:
+ */
+ old_fcfg = readl(port_mmio + FIS_CFG_OFS);
+ old_ltmode = readl(port_mmio + LTMODE_OFS);
+ if (enable_fbs) {
+ new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
+ new_ltmode = old_ltmode | LTMODE_BIT8;
+ } else { /* disable fbs */
+ new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
+ new_ltmode = old_ltmode & ~LTMODE_BIT8;
+ }
+ if (new_fcfg != old_fcfg)
+ writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
+ if (new_ltmode != old_ltmode)
+ writelfl(new_ltmode, port_mmio + LTMODE_OFS);
+}
+
+static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
{
u32 cfg;
+ struct mv_port_priv *pp = ap->private_data;
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = mv_ap_base(ap);
/* set up non-NCQ EDMA configuration */
cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
@@ -1105,6 +1086,13 @@ static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
cfg |= (1 << 22); /* enab 4-entry host queue cache */
cfg |= (1 << 18); /* enab early completion */
cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
+
+ if (want_ncq && sata_pmp_attached(ap)) {
+ cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
+ mv_config_fbs(port_mmio, 1);
+ } else {
+ mv_config_fbs(port_mmio, 0);
+ }
}
if (want_ncq) {
@@ -1160,8 +1148,6 @@ static int mv_port_start(struct ata_port *ap)
struct device *dev = ap->host->dev;
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp;
- void __iomem *port_mmio = mv_ap_base(ap);
- unsigned long flags;
int tag;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
@@ -1194,18 +1180,6 @@ static int mv_port_start(struct ata_port *ap)
pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
}
}
-
- spin_lock_irqsave(&ap->host->lock, flags);
-
- mv_edma_cfg(pp, hpriv, port_mmio, 0);
- mv_set_edma_ptrs(port_mmio, hpriv, pp);
-
- spin_unlock_irqrestore(&ap->host->lock, flags);
-
- /* Don't turn on EDMA here...do it before DMA commands only. Else
- * we'll be unable to send non-data, PIO, etc due to restricted access
- * to shadow regs.
- */
return 0;
out_port_free_dma_mem:
@@ -1224,7 +1198,7 @@ out_port_free_dma_mem:
*/
static void mv_port_stop(struct ata_port *ap)
{
- mv_stop_dma(ap);
+ mv_stop_edma(ap);
mv_port_free_dma_mem(ap);
}
@@ -1310,6 +1284,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
+ flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */
in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
@@ -1394,14 +1369,14 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
(qc->tf.protocol != ATA_PROT_NCQ))
return;
- /* Fill in Gen IIE command request block
- */
+ /* Fill in Gen IIE command request block */
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
flags |= qc->tag << CRQB_HOSTQ_SHIFT;
+ flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */
in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
@@ -1459,12 +1434,14 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
if ((qc->tf.protocol != ATA_PROT_DMA) &&
(qc->tf.protocol != ATA_PROT_NCQ)) {
- /* We're about to send a non-EDMA capable command to the
+ /*
+ * We're about to send a non-EDMA capable command to the
* port. Turn off EDMA so there won't be problems accessing
* shadow block, etc registers.
*/
- __mv_stop_dma(ap);
- return ata_qc_issue_prot(qc);
+ mv_stop_edma(ap);
+ mv_pmp_select(ap, qc->dev->link->pmp);
+ return ata_sff_qc_issue(qc);
}
mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
@@ -1486,10 +1463,10 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
* @reset_allowed: bool: 0 == don't trigger from reset here
*
* In most cases, just clear the interrupt and move on. However,
- * some cases require an eDMA reset, which is done right before
- * the COMRESET in mv_phy_reset(). The SERR case requires a
- * clear of pending errors in the SATA SERROR register. Finally,
- * if the port disabled DMA, update our cached copy to match.
+ * some cases require an eDMA reset, which also performs a COMRESET.
+ * The SERR case requires a clear of pending errors in the SATA
+ * SERROR register. Finally, if the port disabled DMA,
+ * update our cached copy to match.
*
* LOCKING:
* Inherited from caller.
@@ -1528,14 +1505,14 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
EDMA_ERR_INTRL_PAR)) {
err_mask |= AC_ERR_ATA_BUS;
- action |= ATA_EH_HARDRESET;
+ action |= ATA_EH_RESET;
ata_ehi_push_desc(ehi, "parity error");
}
if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
"dev disconnect" : "dev connect");
- action |= ATA_EH_HARDRESET;
+ action |= ATA_EH_RESET;
}
if (IS_GEN_I(hpriv)) {
@@ -1559,7 +1536,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
sata_scr_read(&ap->link, SCR_ERROR, &serr);
sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
err_mask = AC_ERR_ATA_BUS;
- action |= ATA_EH_HARDRESET;
+ action |= ATA_EH_RESET;
}
}
@@ -1568,7 +1545,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
if (!err_mask) {
err_mask = AC_ERR_OTHER;
- action |= ATA_EH_HARDRESET;
+ action |= ATA_EH_RESET;
}
ehi->serror |= serr;
@@ -1727,9 +1704,9 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
pp = ap->private_data;
shift = port << 1; /* (port * 2) */
- if (port >= MV_PORTS_PER_HC) {
+ if (port >= MV_PORTS_PER_HC)
shift++; /* skip bit 8 in the HC Main IRQ reg */
- }
+
have_err_bits = ((PORT0_ERR << shift) & relevant);
if (unlikely(have_err_bits)) {
@@ -1784,7 +1761,7 @@ static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
ata_ehi_push_desc(ehi,
"PCI err cause 0x%08x", err_cause);
err_mask = AC_ERR_HOST_BUS;
- ehi->action = ATA_EH_HARDRESET;
+ ehi->action = ATA_EH_RESET;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc)
qc->err_mask |= err_mask;
@@ -1818,6 +1795,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
void __iomem *mmio = hpriv->base;
u32 irq_stat, irq_mask;
+ /* Note to self: &host->lock == &ap->host->lock == ap->lock */
spin_lock(&host->lock);
irq_stat = readl(hpriv->main_cause_reg_addr);
@@ -1851,14 +1829,6 @@ out_unlock:
return IRQ_RETVAL(handled);
}
-static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
-{
- void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
- unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
-
- return hc_mmio + ofs;
-}
-
static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
{
unsigned int ofs;
@@ -1984,9 +1954,12 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
{
void __iomem *port_mmio = mv_port_base(mmio, port);
- writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
-
- mv_channel_reset(hpriv, mmio, port);
+ /*
+ * The datasheet warns against setting ATA_RST when EDMA is active
+ * (but doesn't say what the problem might be). So we first try
+ * to disable the EDMA engine before doing the ATA_RST operation.
+ */
+ mv_reset_channel(hpriv, mmio, port);
ZERO(0x028); /* command */
writel(0x11f, port_mmio + EDMA_CFG_OFS);
@@ -2136,6 +2109,13 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
rc = 1;
}
+ /*
+ * Temporary: wait 3 seconds before port-probing can happen,
+ * so that we don't miss finding sleepy SilXXXX port-multipliers.
+ * This can go away once hotplug is fully/correctly implemented.
+ */
+ if (rc == 0)
+ msleep(3000);
done:
return rc;
}
@@ -2204,14 +2184,15 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
m4 = readl(port_mmio + PHY_MODE4);
if (hp_flags & MV_HP_ERRATA_60X1B2)
- tmp = readl(port_mmio + 0x310);
+ tmp = readl(port_mmio + PHY_MODE3);
+ /* workaround for errata FEr SATA#10 (part 1) */
m4 = (m4 & ~(1 << 1)) | (1 << 0);
writel(m4, port_mmio + PHY_MODE4);
if (hp_flags & MV_HP_ERRATA_60X1B2)
- writel(tmp, port_mmio + 0x310);
+ writel(tmp, port_mmio + PHY_MODE3);
}
/* Revert values of pre-emphasis and signal amps to the saved ones */
@@ -2259,9 +2240,12 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
{
void __iomem *port_mmio = mv_port_base(mmio, port);
- writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
-
- mv_channel_reset(hpriv, mmio, port);
+ /*
+ * The datasheet warns against setting ATA_RST when EDMA is active
+ * (but doesn't say what the problem might be). So we first try
+ * to disable the EDMA engine before doing the ATA_RST operation.
+ */
+ mv_reset_channel(hpriv, mmio, port);
ZERO(0x028); /* command */
writel(0x101f, port_mmio + EDMA_CFG_OFS);
@@ -2318,25 +2302,39 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
return;
}
-static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
+static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
+{
+ u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
+
+ ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
+ if (want_gen2i)
+ ifctl |= (1 << 7); /* enable gen2i speed */
+ writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
+}
+
+/*
+ * Caller must ensure that EDMA is not active,
+ * by first doing mv_stop_edma() where needed.
+ */
+static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no)
{
void __iomem *port_mmio = mv_port_base(mmio, port_no);
+ mv_stop_edma_engine(port_mmio);
writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
- if (IS_GEN_II(hpriv)) {
- u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
- ifctl |= (1 << 7); /* enable gen2i speed */
- ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
- writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
+ if (!IS_GEN_I(hpriv)) {
+ /* Enable 3.0gb/s link speed */
+ mv_setup_ifctl(port_mmio, 1);
}
-
- udelay(25); /* allow reset propagation */
-
- /* Spec never mentions clearing the bit. Marvell's driver does
- * clear the bit, however.
+ /*
+ * Strobing ATA_RST here causes a hard reset of the SATA transport,
+ * link, and physical layers. It resets all SATA interface registers
+ * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
*/
+ writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
+ udelay(25); /* allow reset propagation */
writelfl(0, port_mmio + EDMA_CMD_OFS);
hpriv->ops->phy_errata(hpriv, mmio, port_no);
@@ -2345,136 +2343,32 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
mdelay(1);
}
-/**
- * mv_phy_reset - Perform eDMA reset followed by COMRESET
- * @ap: ATA channel to manipulate
- *
- * Part of this is taken from __sata_phy_reset and modified to
- * not sleep since this routine gets called from interrupt level.
- *
- * LOCKING:
- * Inherited from caller. This is coded to safe to call at
- * interrupt level, i.e. it does not sleep.
- */
-static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
- unsigned long deadline)
+static void mv_pmp_select(struct ata_port *ap, int pmp)
{
- struct mv_port_priv *pp = ap->private_data;
- struct mv_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = mv_ap_base(ap);
- int retry = 5;
- u32 sstatus;
-
- VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
-
-#ifdef DEBUG
- {
- u32 sstatus, serror, scontrol;
-
- mv_scr_read(ap, SCR_STATUS, &sstatus);
- mv_scr_read(ap, SCR_ERROR, &serror);
- mv_scr_read(ap, SCR_CONTROL, &scontrol);
- DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
- "SCtrl 0x%08x\n", sstatus, serror, scontrol);
- }
-#endif
-
- /* Issue COMRESET via SControl */
-comreset_retry:
- sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
- msleep(1);
-
- sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
- msleep(20);
+ if (sata_pmp_supported(ap)) {
+ void __iomem *port_mmio = mv_ap_base(ap);
+ u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
+ int old = reg & 0xf;
- do {
- sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
- if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
- break;
-
- msleep(1);
- } while (time_before(jiffies, deadline));
-
- /* work around errata */
- if (IS_GEN_II(hpriv) &&
- (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
- (retry-- > 0))
- goto comreset_retry;
-
-#ifdef DEBUG
- {
- u32 sstatus, serror, scontrol;
-
- mv_scr_read(ap, SCR_STATUS, &sstatus);
- mv_scr_read(ap, SCR_ERROR, &serror);
- mv_scr_read(ap, SCR_CONTROL, &scontrol);
- DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
- "SCtrl 0x%08x\n", sstatus, serror, scontrol);
- }
-#endif
-
- if (ata_link_offline(&ap->link)) {
- *class = ATA_DEV_NONE;
- return;
- }
-
- /* even after SStatus reflects that device is ready,
- * it seems to take a while for link to be fully
- * established (and thus Status no longer 0x80/0x7F),
- * so we poll a bit for that, here.
- */
- retry = 20;
- while (1) {
- u8 drv_stat = ata_check_status(ap);
- if ((drv_stat != 0x80) && (drv_stat != 0x7f))
- break;
- msleep(500);
- if (retry-- <= 0)
- break;
- if (time_after(jiffies, deadline))
- break;
+ if (old != pmp) {
+ reg = (reg & ~0xf) | pmp;
+ writelfl(reg, port_mmio + SATA_IFCTL_OFS);
+ }
}
-
- /* FIXME: if we passed the deadline, the following
- * code probably produces an invalid result
- */
-
- /* finally, read device signature from TF registers */
- *class = ata_dev_try_classify(ap->link.device, 1, NULL);
-
- writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
-
- WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
-
- VPRINTK("EXIT\n");
}
-static int mv_prereset(struct ata_link *link, unsigned long deadline)
+static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
{
- struct ata_port *ap = link->ap;
- struct mv_port_priv *pp = ap->private_data;
- struct ata_eh_context *ehc = &link->eh_context;
- int rc;
-
- rc = mv_stop_dma(ap);
- if (rc)
- ehc->i.action |= ATA_EH_HARDRESET;
-
- if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
- pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
- ehc->i.action |= ATA_EH_HARDRESET;
- }
-
- /* if we're about to do hardreset, nothing more to do */
- if (ehc->i.action & ATA_EH_HARDRESET)
- return 0;
-
- if (ata_link_online(link))
- rc = ata_wait_ready(ap, deadline);
- else
- rc = -ENODEV;
+ mv_pmp_select(link->ap, sata_srst_pmp(link));
+ return sata_std_hardreset(link, class, deadline);
+}
- return rc;
+static int mv_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ mv_pmp_select(link->ap, sata_srst_pmp(link));
+ return ata_sff_softreset(link, class, deadline);
}
static int mv_hardreset(struct ata_link *link, unsigned int *class,
@@ -2482,43 +2376,34 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
{
struct ata_port *ap = link->ap;
struct mv_host_priv *hpriv = ap->host->private_data;
+ struct mv_port_priv *pp = ap->private_data;
void __iomem *mmio = hpriv->base;
+ int rc, attempts = 0, extra = 0;
+ u32 sstatus;
+ bool online;
- mv_stop_dma(ap);
-
- mv_channel_reset(hpriv, mmio, ap->port_no);
-
- mv_phy_reset(ap, class, deadline);
-
- return 0;
-}
-
-static void mv_postreset(struct ata_link *link, unsigned int *classes)
-{
- struct ata_port *ap = link->ap;
- u32 serr;
-
- /* print link status */
- sata_print_link_status(link);
-
- /* clear SError */
- sata_scr_read(link, SCR_ERROR, &serr);
- sata_scr_write_flush(link, SCR_ERROR, serr);
+ mv_reset_channel(hpriv, mmio, ap->port_no);
+ pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
- /* bail out if no device is present */
- if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
- DPRINTK("EXIT, no device\n");
- return;
- }
+ /* Workaround for errata FEr SATA#10 (part 2) */
+ do {
+ const unsigned long *timing =
+ sata_ehc_deb_timing(&link->eh_context);
- /* set up device control */
- iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
-}
+ rc = sata_link_hardreset(link, timing, deadline + extra,
+ &online, NULL);
+ if (rc)
+ return rc;
+ sata_scr_read(link, SCR_STATUS, &sstatus);
+ if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
+ /* Force 1.5gb/s link speed and try again */
+ mv_setup_ifctl(mv_ap_base(ap), 0);
+ if (time_after(jiffies + HZ, deadline))
+ extra = HZ; /* only extend it once, max */
+ }
+ } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
-static void mv_error_handler(struct ata_port *ap)
-{
- ata_do_eh(ap, mv_prereset, ata_std_softreset,
- mv_hardreset, mv_postreset);
+ return rc;
}
static void mv_eh_freeze(struct ata_port *ap)
@@ -2812,19 +2697,6 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
hpriv->ops->enable_leds(hpriv, mmio);
for (port = 0; port < host->n_ports; port++) {
- if (IS_GEN_II(hpriv)) {
- void __iomem *port_mmio = mv_port_base(mmio, port);
-
- u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
- ifctl |= (1 << 7); /* enable gen2i speed */
- ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
- writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
- }
-
- hpriv->ops->phy_errata(hpriv, mmio, port);
- }
-
- for (port = 0; port < host->n_ports; port++) {
struct ata_port *ap = host->ports[port];
void __iomem *port_mmio = mv_port_base(mmio, port);
@@ -3223,7 +3095,7 @@ MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
-MODULE_ALIAS("platform:sata_mv");
+MODULE_ALIAS("platform:" DRV_NAME);
#ifdef CONFIG_PCI
module_param(msi, int, 0444);