sata_nv: fix ADMA freeze/thaw/irq_clear issues
This patch fixes some problems with ADMA-capable controllers with regard to freeze, thaw and irq_clear libata callbacks. Freeze and thaw didn't switch the ADMA-specific interrupts on or off, and more critically the irq_clear function didn't respect the restriction that the notifier clear registers for both ports have to be written at the same time even when only one port is being cleared. This could result in timeouts on one port when error handling (i.e. as a result of hotplug) occurred on the other port. As well, this fixes some issues in the interrupt handler: we shouldn't check any ADMA status if the port has ADMA switched off because of an ATAPI device, and it also checks to see if any ADMA interrupt has been raised even when we are in port-register mode. Signed-off-by: Robert Hancock <hancockr@shaw.ca> Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
parent
7871e74acb
commit
53014e2526
|
@ -257,6 +257,8 @@ static void nv_adma_port_stop(struct ata_port *ap);
|
|||
static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
|
||||
static int nv_adma_port_resume(struct ata_port *ap);
|
||||
#endif
|
||||
static void nv_adma_freeze(struct ata_port *ap);
|
||||
static void nv_adma_thaw(struct ata_port *ap);
|
||||
static void nv_adma_error_handler(struct ata_port *ap);
|
||||
static void nv_adma_host_stop(struct ata_host *host);
|
||||
static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
|
||||
|
@ -444,8 +446,8 @@ static const struct ata_port_operations nv_adma_ops = {
|
|||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = nv_adma_qc_prep,
|
||||
.qc_issue = nv_adma_qc_issue,
|
||||
.freeze = nv_ck804_freeze,
|
||||
.thaw = nv_ck804_thaw,
|
||||
.freeze = nv_adma_freeze,
|
||||
.thaw = nv_adma_thaw,
|
||||
.error_handler = nv_adma_error_handler,
|
||||
.post_internal_cmd = nv_adma_post_internal_cmd,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
@ -815,8 +817,16 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
|
|||
u16 status;
|
||||
u32 gen_ctl;
|
||||
u32 notifier, notifier_error;
|
||||
|
||||
/* if ADMA is disabled, use standard ata interrupt handler */
|
||||
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
|
||||
u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
|
||||
>> (NV_INT_PORT_SHIFT * i);
|
||||
handled += nv_host_intr(ap, irq_stat);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* if in ATA register mode, use standard ata interrupt handler */
|
||||
/* if in ATA register mode, check for standard interrupts */
|
||||
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
|
||||
u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
|
||||
>> (NV_INT_PORT_SHIFT * i);
|
||||
|
@ -826,7 +836,6 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
|
|||
command is active, to prevent losing interrupts. */
|
||||
irq_stat |= NV_INT_DEV;
|
||||
handled += nv_host_intr(ap, irq_stat);
|
||||
continue;
|
||||
}
|
||||
|
||||
notifier = readl(mmio + NV_ADMA_NOTIFIER);
|
||||
|
@ -912,22 +921,77 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
|
|||
return IRQ_RETVAL(handled);
|
||||
}
|
||||
|
||||
static void nv_adma_freeze(struct ata_port *ap)
|
||||
{
|
||||
struct nv_adma_port_priv *pp = ap->private_data;
|
||||
void __iomem *mmio = pp->ctl_block;
|
||||
u16 tmp;
|
||||
|
||||
nv_ck804_freeze(ap);
|
||||
|
||||
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
|
||||
return;
|
||||
|
||||
/* clear any outstanding CK804 notifications */
|
||||
writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
|
||||
ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
|
||||
|
||||
/* Disable interrupt */
|
||||
tmp = readw(mmio + NV_ADMA_CTL);
|
||||
writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
|
||||
mmio + NV_ADMA_CTL);
|
||||
readw( mmio + NV_ADMA_CTL ); /* flush posted write */
|
||||
}
|
||||
|
||||
static void nv_adma_thaw(struct ata_port *ap)
|
||||
{
|
||||
struct nv_adma_port_priv *pp = ap->private_data;
|
||||
void __iomem *mmio = pp->ctl_block;
|
||||
u16 tmp;
|
||||
|
||||
nv_ck804_thaw(ap);
|
||||
|
||||
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
|
||||
return;
|
||||
|
||||
/* Enable interrupt */
|
||||
tmp = readw(mmio + NV_ADMA_CTL);
|
||||
writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
|
||||
mmio + NV_ADMA_CTL);
|
||||
readw( mmio + NV_ADMA_CTL ); /* flush posted write */
|
||||
}
|
||||
|
||||
static void nv_adma_irq_clear(struct ata_port *ap)
|
||||
{
|
||||
struct nv_adma_port_priv *pp = ap->private_data;
|
||||
void __iomem *mmio = pp->ctl_block;
|
||||
u16 status = readw(mmio + NV_ADMA_STAT);
|
||||
u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
|
||||
u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
|
||||
void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
|
||||
u32 notifier_clears[2];
|
||||
|
||||
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
|
||||
ata_bmdma_irq_clear(ap);
|
||||
return;
|
||||
}
|
||||
|
||||
/* clear any outstanding CK804 notifications */
|
||||
writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
|
||||
ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
|
||||
|
||||
/* clear ADMA status */
|
||||
writew(status, mmio + NV_ADMA_STAT);
|
||||
writel(notifier | notifier_error,
|
||||
pp->notifier_clear_block);
|
||||
|
||||
/** clear legacy status */
|
||||
iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
|
||||
writew(0xffff, mmio + NV_ADMA_STAT);
|
||||
|
||||
/* clear notifiers - note both ports need to be written with
|
||||
something even though we are only clearing on one */
|
||||
if (ap->port_no == 0) {
|
||||
notifier_clears[0] = 0xFFFFFFFF;
|
||||
notifier_clears[1] = 0;
|
||||
} else {
|
||||
notifier_clears[0] = 0;
|
||||
notifier_clears[1] = 0xFFFFFFFF;
|
||||
}
|
||||
pp = ap->host->ports[0]->private_data;
|
||||
writel(notifier_clears[0], pp->notifier_clear_block);
|
||||
pp = ap->host->ports[1]->private_data;
|
||||
writel(notifier_clears[1], pp->notifier_clear_block);
|
||||
}
|
||||
|
||||
static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
|
||||
|
|
Loading…
Reference in New Issue