aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c46
1 files changed, 38 insertions, 8 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9463c71dd38e..a9fd9709c262 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1898,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
* LOCKING:
* Inherited from caller.
*/
-static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+static void mv_bmdma_stop_ap(struct ata_port *ap)
{
- struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
u32 cmd;
/* clear start/stop bit */
cmd = readl(port_mmio + BMDMA_CMD);
- cmd &= ~ATA_DMA_START;
- writelfl(cmd, port_mmio + BMDMA_CMD);
+ if (cmd & ATA_DMA_START) {
+ cmd &= ~ATA_DMA_START;
+ writelfl(cmd, port_mmio + BMDMA_CMD);
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap);
+ }
+}
- /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
- ata_sff_dma_pause(ap);
+static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ mv_bmdma_stop_ap(qc->ap);
}
/**
@@ -1934,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap)
reg = readl(port_mmio + BMDMA_STATUS);
if (reg & ATA_DMA_ACTIVE)
status = ATA_DMA_ACTIVE;
- else
+ else if (reg & ATA_DMA_ERR)
status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
+ else {
+ /*
+ * Just because DMA_ACTIVE is 0 (DMA completed),
+ * this does _not_ mean the device is "done".
+ * So we should not yet be signalling ATA_DMA_INTR
+ * in some cases. Eg. DSM/TRIM, and perhaps others.
+ */
+ mv_bmdma_stop_ap(ap);
+ if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
+ status = 0;
+ else
+ status = ATA_DMA_INTR;
+ }
return status;
}
@@ -1995,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
switch (tf->protocol) {
case ATA_PROT_DMA:
+ if (tf->command == ATA_CMD_DSM)
+ return;
+ /* fall-thru */
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
@@ -2094,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
return;
+ if (tf->command == ATA_CMD_DSM)
+ return; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2260,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
return 0;
}
@@ -2289,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
+ if (qc->tf.command == ATA_CMD_DSM) {
+ if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
+ return AC_ERR_OTHER;
+ break; /* use bmdma for this */
+ }
+ /* fall thru */
case ATA_PROT_NCQ:
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;