[libata] Add host lock to struct ata_port
Prepare for changes required to support SATA devices attached to SAS HBAs. For these devices we don't want to use host_set at all, since libata will not be the owner of struct scsi_host. Signed-off-by: Brian King <brking@us.ibm.com> (with slight merge modifications made by...) Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
parent
47005f255e
commit
ba6a13083c
|
@ -715,7 +715,6 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
|
||||||
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
|
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
|
||||||
ata_postreset_fn_t postreset)
|
ata_postreset_fn_t postreset)
|
||||||
{
|
{
|
||||||
struct ata_host_set *host_set = ap->host_set;
|
|
||||||
struct ata_eh_context *ehc = &ap->eh_context;
|
struct ata_eh_context *ehc = &ap->eh_context;
|
||||||
struct ata_queued_cmd *qc;
|
struct ata_queued_cmd *qc;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -726,7 +725,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
|
||||||
qc = NULL;
|
qc = NULL;
|
||||||
|
|
||||||
/* reset PIO HSM and stop DMA engine */
|
/* reset PIO HSM and stop DMA engine */
|
||||||
spin_lock_irqsave(&host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
ap->hsm_task_state = HSM_ST_IDLE;
|
ap->hsm_task_state = HSM_ST_IDLE;
|
||||||
|
|
||||||
|
@ -755,7 +754,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
|
||||||
ata_chk_status(ap);
|
ata_chk_status(ap);
|
||||||
ap->ops->irq_clear(ap);
|
ap->ops->irq_clear(ap);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
if (thaw)
|
if (thaw)
|
||||||
ata_eh_thaw_port(ap);
|
ata_eh_thaw_port(ap);
|
||||||
|
|
|
@ -933,9 +933,9 @@ void ata_port_flush_task(struct ata_port *ap)
|
||||||
|
|
||||||
DPRINTK("ENTER\n");
|
DPRINTK("ENTER\n");
|
||||||
|
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
|
ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
DPRINTK("flush #1\n");
|
DPRINTK("flush #1\n");
|
||||||
flush_workqueue(ata_wq);
|
flush_workqueue(ata_wq);
|
||||||
|
@ -950,9 +950,9 @@ void ata_port_flush_task(struct ata_port *ap)
|
||||||
flush_workqueue(ata_wq);
|
flush_workqueue(ata_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
|
ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
DPRINTK("EXIT\n");
|
DPRINTK("EXIT\n");
|
||||||
}
|
}
|
||||||
|
@ -999,11 +999,11 @@ unsigned ata_exec_internal(struct ata_device *dev,
|
||||||
unsigned int err_mask;
|
unsigned int err_mask;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
/* no internal command while frozen */
|
/* no internal command while frozen */
|
||||||
if (ap->flags & ATA_FLAG_FROZEN) {
|
if (ap->flags & ATA_FLAG_FROZEN) {
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
return AC_ERR_SYSTEM;
|
return AC_ERR_SYSTEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1052,14 +1052,14 @@ unsigned ata_exec_internal(struct ata_device *dev,
|
||||||
|
|
||||||
ata_qc_issue(qc);
|
ata_qc_issue(qc);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
|
rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
|
||||||
|
|
||||||
ata_port_flush_task(ap);
|
ata_port_flush_task(ap);
|
||||||
|
|
||||||
if (!rc) {
|
if (!rc) {
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
/* We're racing with irq here. If we lose, the
|
/* We're racing with irq here. If we lose, the
|
||||||
* following test prevents us from completing the qc
|
* following test prevents us from completing the qc
|
||||||
|
@ -1078,7 +1078,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
|
||||||
"qc timeout (cmd 0x%x)\n", command);
|
"qc timeout (cmd 0x%x)\n", command);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* do post_internal_cmd */
|
/* do post_internal_cmd */
|
||||||
|
@ -1092,7 +1092,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* finish up */
|
/* finish up */
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
*tf = qc->result_tf;
|
*tf = qc->result_tf;
|
||||||
err_mask = qc->err_mask;
|
err_mask = qc->err_mask;
|
||||||
|
@ -1118,7 +1118,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
|
||||||
ata_port_probe(ap);
|
ata_port_probe(ap);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
return err_mask;
|
return err_mask;
|
||||||
}
|
}
|
||||||
|
@ -3912,7 +3912,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
|
||||||
|
|
||||||
if (ap->ops->error_handler) {
|
if (ap->ops->error_handler) {
|
||||||
if (in_wq) {
|
if (in_wq) {
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
/* EH might have kicked in while host_set lock
|
/* EH might have kicked in while host_set lock
|
||||||
* is released.
|
* is released.
|
||||||
|
@ -3926,7 +3926,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
|
||||||
ata_port_freeze(ap);
|
ata_port_freeze(ap);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
} else {
|
} else {
|
||||||
if (likely(!(qc->err_mask & AC_ERR_HSM)))
|
if (likely(!(qc->err_mask & AC_ERR_HSM)))
|
||||||
ata_qc_complete(qc);
|
ata_qc_complete(qc);
|
||||||
|
@ -3935,10 +3935,10 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (in_wq) {
|
if (in_wq) {
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
ata_irq_on(ap);
|
ata_irq_on(ap);
|
||||||
ata_qc_complete(qc);
|
ata_qc_complete(qc);
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
} else
|
} else
|
||||||
ata_qc_complete(qc);
|
ata_qc_complete(qc);
|
||||||
}
|
}
|
||||||
|
@ -4018,7 +4018,7 @@ fsm_start:
|
||||||
* hsm_task_state is changed. Hence, the following locking.
|
* hsm_task_state is changed. Hence, the following locking.
|
||||||
*/
|
*/
|
||||||
if (in_wq)
|
if (in_wq)
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
if (qc->tf.protocol == ATA_PROT_PIO) {
|
if (qc->tf.protocol == ATA_PROT_PIO) {
|
||||||
/* PIO data out protocol.
|
/* PIO data out protocol.
|
||||||
|
@ -4037,7 +4037,7 @@ fsm_start:
|
||||||
atapi_send_cdb(ap, qc);
|
atapi_send_cdb(ap, qc);
|
||||||
|
|
||||||
if (in_wq)
|
if (in_wq)
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
/* if polling, ata_pio_task() handles the rest.
|
/* if polling, ata_pio_task() handles the rest.
|
||||||
* otherwise, interrupt handler takes over from here.
|
* otherwise, interrupt handler takes over from here.
|
||||||
|
@ -5130,9 +5130,9 @@ void ata_dev_init(struct ata_device *dev)
|
||||||
* requests which occur asynchronously. Synchronize using
|
* requests which occur asynchronously. Synchronize using
|
||||||
* host_set lock.
|
* host_set lock.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
dev->flags &= ~ATA_DFLAG_INIT_MASK;
|
dev->flags &= ~ATA_DFLAG_INIT_MASK;
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
|
memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
|
||||||
sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
|
sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
|
||||||
|
@ -5167,6 +5167,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
|
||||||
host->unique_id = ata_unique_id++;
|
host->unique_id = ata_unique_id++;
|
||||||
host->max_cmd_len = 12;
|
host->max_cmd_len = 12;
|
||||||
|
|
||||||
|
ap->lock = &host_set->lock;
|
||||||
ap->flags = ATA_FLAG_DISABLED;
|
ap->flags = ATA_FLAG_DISABLED;
|
||||||
ap->id = host->unique_id;
|
ap->id = host->unique_id;
|
||||||
ap->host = host;
|
ap->host = host;
|
||||||
|
@ -5388,7 +5389,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
|
||||||
ata_port_probe(ap);
|
ata_port_probe(ap);
|
||||||
|
|
||||||
/* kick EH for boot probing */
|
/* kick EH for boot probing */
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
|
ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
|
||||||
ap->eh_info.action |= ATA_EH_SOFTRESET;
|
ap->eh_info.action |= ATA_EH_SOFTRESET;
|
||||||
|
@ -5396,7 +5397,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
|
||||||
ap->flags |= ATA_FLAG_LOADING;
|
ap->flags |= ATA_FLAG_LOADING;
|
||||||
ata_port_schedule_eh(ap);
|
ata_port_schedule_eh(ap);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
/* wait for EH to finish */
|
/* wait for EH to finish */
|
||||||
ata_port_wait_eh(ap);
|
ata_port_wait_eh(ap);
|
||||||
|
@ -5460,29 +5461,29 @@ void ata_port_detach(struct ata_port *ap)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* tell EH we're leaving & flush EH */
|
/* tell EH we're leaving & flush EH */
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
ap->flags |= ATA_FLAG_UNLOADING;
|
ap->flags |= ATA_FLAG_UNLOADING;
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
ata_port_wait_eh(ap);
|
ata_port_wait_eh(ap);
|
||||||
|
|
||||||
/* EH is now guaranteed to see UNLOADING, so no new device
|
/* EH is now guaranteed to see UNLOADING, so no new device
|
||||||
* will be attached. Disable all existing devices.
|
* will be attached. Disable all existing devices.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
||||||
ata_dev_disable(&ap->device[i]);
|
ata_dev_disable(&ap->device[i]);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
/* Final freeze & EH. All in-flight commands are aborted. EH
|
/* Final freeze & EH. All in-flight commands are aborted. EH
|
||||||
* will be skipped and retrials will be terminated with bad
|
* will be skipped and retrials will be terminated with bad
|
||||||
* target.
|
* target.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
ata_port_freeze(ap); /* won't be thawed */
|
ata_port_freeze(ap); /* won't be thawed */
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
ata_port_wait_eh(ap);
|
ata_port_wait_eh(ap);
|
||||||
|
|
||||||
|
|
|
@ -128,7 +128,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = EH_HANDLED;
|
ret = EH_HANDLED;
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
qc = ata_qc_from_tag(ap, ap->active_tag);
|
qc = ata_qc_from_tag(ap, ap->active_tag);
|
||||||
if (qc) {
|
if (qc) {
|
||||||
WARN_ON(qc->scsicmd != cmd);
|
WARN_ON(qc->scsicmd != cmd);
|
||||||
|
@ -136,7 +136,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
|
||||||
qc->err_mask |= AC_ERR_TIMEOUT;
|
qc->err_mask |= AC_ERR_TIMEOUT;
|
||||||
ret = EH_NOT_HANDLED;
|
ret = EH_NOT_HANDLED;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
DPRINTK("EXIT, ret=%d\n", ret);
|
DPRINTK("EXIT, ret=%d\n", ret);
|
||||||
|
@ -158,7 +158,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
|
||||||
void ata_scsi_error(struct Scsi_Host *host)
|
void ata_scsi_error(struct Scsi_Host *host)
|
||||||
{
|
{
|
||||||
struct ata_port *ap = ata_shost_to_port(host);
|
struct ata_port *ap = ata_shost_to_port(host);
|
||||||
spinlock_t *hs_lock = &ap->host_set->lock;
|
spinlock_t *ap_lock = ap->lock;
|
||||||
int i, repeat_cnt = ATA_EH_MAX_REPEAT;
|
int i, repeat_cnt = ATA_EH_MAX_REPEAT;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ void ata_scsi_error(struct Scsi_Host *host)
|
||||||
struct scsi_cmnd *scmd, *tmp;
|
struct scsi_cmnd *scmd, *tmp;
|
||||||
int nr_timedout = 0;
|
int nr_timedout = 0;
|
||||||
|
|
||||||
spin_lock_irqsave(hs_lock, flags);
|
spin_lock_irqsave(ap_lock, flags);
|
||||||
|
|
||||||
list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
|
list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
|
||||||
struct ata_queued_cmd *qc;
|
struct ata_queued_cmd *qc;
|
||||||
|
@ -224,15 +224,15 @@ void ata_scsi_error(struct Scsi_Host *host)
|
||||||
if (nr_timedout)
|
if (nr_timedout)
|
||||||
__ata_port_freeze(ap);
|
__ata_port_freeze(ap);
|
||||||
|
|
||||||
spin_unlock_irqrestore(hs_lock, flags);
|
spin_unlock_irqrestore(ap_lock, flags);
|
||||||
} else
|
} else
|
||||||
spin_unlock_wait(hs_lock);
|
spin_unlock_wait(ap_lock);
|
||||||
|
|
||||||
repeat:
|
repeat:
|
||||||
/* invoke error handler */
|
/* invoke error handler */
|
||||||
if (ap->ops->error_handler) {
|
if (ap->ops->error_handler) {
|
||||||
/* fetch & clear EH info */
|
/* fetch & clear EH info */
|
||||||
spin_lock_irqsave(hs_lock, flags);
|
spin_lock_irqsave(ap_lock, flags);
|
||||||
|
|
||||||
memset(&ap->eh_context, 0, sizeof(ap->eh_context));
|
memset(&ap->eh_context, 0, sizeof(ap->eh_context));
|
||||||
ap->eh_context.i = ap->eh_info;
|
ap->eh_context.i = ap->eh_info;
|
||||||
|
@ -241,7 +241,7 @@ void ata_scsi_error(struct Scsi_Host *host)
|
||||||
ap->flags |= ATA_FLAG_EH_IN_PROGRESS;
|
ap->flags |= ATA_FLAG_EH_IN_PROGRESS;
|
||||||
ap->flags &= ~ATA_FLAG_EH_PENDING;
|
ap->flags &= ~ATA_FLAG_EH_PENDING;
|
||||||
|
|
||||||
spin_unlock_irqrestore(hs_lock, flags);
|
spin_unlock_irqrestore(ap_lock, flags);
|
||||||
|
|
||||||
/* invoke EH. if unloading, just finish failed qcs */
|
/* invoke EH. if unloading, just finish failed qcs */
|
||||||
if (!(ap->flags & ATA_FLAG_UNLOADING))
|
if (!(ap->flags & ATA_FLAG_UNLOADING))
|
||||||
|
@ -253,14 +253,14 @@ void ata_scsi_error(struct Scsi_Host *host)
|
||||||
* recovered the port but before this point. Repeat
|
* recovered the port but before this point. Repeat
|
||||||
* EH in such case.
|
* EH in such case.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(hs_lock, flags);
|
spin_lock_irqsave(ap_lock, flags);
|
||||||
|
|
||||||
if (ap->flags & ATA_FLAG_EH_PENDING) {
|
if (ap->flags & ATA_FLAG_EH_PENDING) {
|
||||||
if (--repeat_cnt) {
|
if (--repeat_cnt) {
|
||||||
ata_port_printk(ap, KERN_INFO,
|
ata_port_printk(ap, KERN_INFO,
|
||||||
"EH pending after completion, "
|
"EH pending after completion, "
|
||||||
"repeating EH (cnt=%d)\n", repeat_cnt);
|
"repeating EH (cnt=%d)\n", repeat_cnt);
|
||||||
spin_unlock_irqrestore(hs_lock, flags);
|
spin_unlock_irqrestore(ap_lock, flags);
|
||||||
goto repeat;
|
goto repeat;
|
||||||
}
|
}
|
||||||
ata_port_printk(ap, KERN_ERR, "EH pending after %d "
|
ata_port_printk(ap, KERN_ERR, "EH pending after %d "
|
||||||
|
@ -270,14 +270,14 @@ void ata_scsi_error(struct Scsi_Host *host)
|
||||||
/* this run is complete, make sure EH info is clear */
|
/* this run is complete, make sure EH info is clear */
|
||||||
memset(&ap->eh_info, 0, sizeof(ap->eh_info));
|
memset(&ap->eh_info, 0, sizeof(ap->eh_info));
|
||||||
|
|
||||||
/* Clear host_eh_scheduled while holding hs_lock such
|
/* Clear host_eh_scheduled while holding ap_lock such
|
||||||
* that if exception occurs after this point but
|
* that if exception occurs after this point but
|
||||||
* before EH completion, SCSI midlayer will
|
* before EH completion, SCSI midlayer will
|
||||||
* re-initiate EH.
|
* re-initiate EH.
|
||||||
*/
|
*/
|
||||||
host->host_eh_scheduled = 0;
|
host->host_eh_scheduled = 0;
|
||||||
|
|
||||||
spin_unlock_irqrestore(hs_lock, flags);
|
spin_unlock_irqrestore(ap_lock, flags);
|
||||||
} else {
|
} else {
|
||||||
WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
|
WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
|
||||||
ap->ops->eng_timeout(ap);
|
ap->ops->eng_timeout(ap);
|
||||||
|
@ -289,7 +289,7 @@ void ata_scsi_error(struct Scsi_Host *host)
|
||||||
scsi_eh_flush_done_q(&ap->eh_done_q);
|
scsi_eh_flush_done_q(&ap->eh_done_q);
|
||||||
|
|
||||||
/* clean up */
|
/* clean up */
|
||||||
spin_lock_irqsave(hs_lock, flags);
|
spin_lock_irqsave(ap_lock, flags);
|
||||||
|
|
||||||
if (ap->flags & ATA_FLAG_LOADING) {
|
if (ap->flags & ATA_FLAG_LOADING) {
|
||||||
ap->flags &= ~ATA_FLAG_LOADING;
|
ap->flags &= ~ATA_FLAG_LOADING;
|
||||||
|
@ -306,7 +306,7 @@ void ata_scsi_error(struct Scsi_Host *host)
|
||||||
ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS;
|
ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS;
|
||||||
wake_up_all(&ap->eh_wait_q);
|
wake_up_all(&ap->eh_wait_q);
|
||||||
|
|
||||||
spin_unlock_irqrestore(hs_lock, flags);
|
spin_unlock_irqrestore(ap_lock, flags);
|
||||||
|
|
||||||
DPRINTK("EXIT\n");
|
DPRINTK("EXIT\n");
|
||||||
}
|
}
|
||||||
|
@ -326,17 +326,17 @@ void ata_port_wait_eh(struct ata_port *ap)
|
||||||
DEFINE_WAIT(wait);
|
DEFINE_WAIT(wait);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) {
|
while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) {
|
||||||
prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
|
prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
schedule();
|
schedule();
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
}
|
}
|
||||||
finish_wait(&ap->eh_wait_q, &wait);
|
finish_wait(&ap->eh_wait_q, &wait);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
/* make sure SCSI EH is complete */
|
/* make sure SCSI EH is complete */
|
||||||
if (scsi_host_in_recovery(ap->host)) {
|
if (scsi_host_in_recovery(ap->host)) {
|
||||||
|
@ -368,7 +368,6 @@ void ata_port_wait_eh(struct ata_port *ap)
|
||||||
static void ata_qc_timeout(struct ata_queued_cmd *qc)
|
static void ata_qc_timeout(struct ata_queued_cmd *qc)
|
||||||
{
|
{
|
||||||
struct ata_port *ap = qc->ap;
|
struct ata_port *ap = qc->ap;
|
||||||
struct ata_host_set *host_set = ap->host_set;
|
|
||||||
u8 host_stat = 0, drv_stat;
|
u8 host_stat = 0, drv_stat;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -376,7 +375,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
|
||||||
|
|
||||||
ap->hsm_task_state = HSM_ST_IDLE;
|
ap->hsm_task_state = HSM_ST_IDLE;
|
||||||
|
|
||||||
spin_lock_irqsave(&host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
switch (qc->tf.protocol) {
|
switch (qc->tf.protocol) {
|
||||||
|
|
||||||
|
@ -405,7 +404,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
ata_eh_qc_complete(qc);
|
ata_eh_qc_complete(qc);
|
||||||
|
|
||||||
|
@ -592,9 +591,9 @@ void ata_eh_freeze_port(struct ata_port *ap)
|
||||||
if (!ap->ops->error_handler)
|
if (!ap->ops->error_handler)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
__ata_port_freeze(ap);
|
__ata_port_freeze(ap);
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -613,14 +612,14 @@ void ata_eh_thaw_port(struct ata_port *ap)
|
||||||
if (!ap->ops->error_handler)
|
if (!ap->ops->error_handler)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
ap->flags &= ~ATA_FLAG_FROZEN;
|
ap->flags &= ~ATA_FLAG_FROZEN;
|
||||||
|
|
||||||
if (ap->ops->thaw)
|
if (ap->ops->thaw)
|
||||||
ap->ops->thaw(ap);
|
ap->ops->thaw(ap);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
DPRINTK("ata%u port thawed\n", ap->id);
|
DPRINTK("ata%u port thawed\n", ap->id);
|
||||||
}
|
}
|
||||||
|
@ -636,11 +635,11 @@ static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
|
||||||
struct scsi_cmnd *scmd = qc->scsicmd;
|
struct scsi_cmnd *scmd = qc->scsicmd;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
qc->scsidone = ata_eh_scsidone;
|
qc->scsidone = ata_eh_scsidone;
|
||||||
__ata_qc_complete(qc);
|
__ata_qc_complete(qc);
|
||||||
WARN_ON(ata_tag_valid(qc->tag));
|
WARN_ON(ata_tag_valid(qc->tag));
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
|
scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
|
||||||
}
|
}
|
||||||
|
@ -694,7 +693,7 @@ static void ata_eh_detach_dev(struct ata_device *dev)
|
||||||
|
|
||||||
ata_dev_disable(dev);
|
ata_dev_disable(dev);
|
||||||
|
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
dev->flags &= ~ATA_DFLAG_DETACH;
|
dev->flags &= ~ATA_DFLAG_DETACH;
|
||||||
|
|
||||||
|
@ -703,7 +702,7 @@ static void ata_eh_detach_dev(struct ata_device *dev)
|
||||||
ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
|
ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ata_eh_clear_action(struct ata_device *dev,
|
static void ata_eh_clear_action(struct ata_device *dev,
|
||||||
|
@ -749,10 +748,10 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
ata_eh_clear_action(dev, &ap->eh_info, action);
|
ata_eh_clear_action(dev, &ap->eh_info, action);
|
||||||
ap->flags |= ATA_FLAG_RECOVERED;
|
ap->flags |= ATA_FLAG_RECOVERED;
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1625,9 +1624,9 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
|
ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -752,7 +752,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
|
||||||
if (!ap->ops->error_handler)
|
if (!ap->ops->error_handler)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
dev = __ata_scsi_find_dev(ap, sdev);
|
dev = __ata_scsi_find_dev(ap, sdev);
|
||||||
if (dev && dev->sdev) {
|
if (dev && dev->sdev) {
|
||||||
/* SCSI device already in CANCEL state, no need to offline it */
|
/* SCSI device already in CANCEL state, no need to offline it */
|
||||||
|
@ -760,7 +760,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
|
||||||
dev->flags |= ATA_DFLAG_DETACH;
|
dev->flags |= ATA_DFLAG_DETACH;
|
||||||
ata_port_schedule_eh(ap);
|
ata_port_schedule_eh(ap);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2684,7 +2684,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||||
ap = ata_shost_to_port(shost);
|
ap = ata_shost_to_port(shost);
|
||||||
|
|
||||||
spin_unlock(shost->host_lock);
|
spin_unlock(shost->host_lock);
|
||||||
spin_lock(&ap->host_set->lock);
|
spin_lock(ap->lock);
|
||||||
|
|
||||||
ata_scsi_dump_cdb(ap, cmd);
|
ata_scsi_dump_cdb(ap, cmd);
|
||||||
|
|
||||||
|
@ -2696,7 +2696,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||||
done(cmd);
|
done(cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&ap->host_set->lock);
|
spin_unlock(ap->lock);
|
||||||
spin_lock(shost->host_lock);
|
spin_lock(shost->host_lock);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -2858,7 +2858,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
|
||||||
* increments reference counts regardless of device state.
|
* increments reference counts regardless of device state.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&ap->host->scan_mutex);
|
mutex_lock(&ap->host->scan_mutex);
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
/* clearing dev->sdev is protected by host_set lock */
|
/* clearing dev->sdev is protected by host_set lock */
|
||||||
sdev = dev->sdev;
|
sdev = dev->sdev;
|
||||||
|
@ -2882,7 +2882,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
mutex_unlock(&ap->host->scan_mutex);
|
mutex_unlock(&ap->host->scan_mutex);
|
||||||
|
|
||||||
if (sdev) {
|
if (sdev) {
|
||||||
|
@ -2926,9 +2926,9 @@ void ata_scsi_hotplug(void *data)
|
||||||
if (!(dev->flags & ATA_DFLAG_DETACHED))
|
if (!(dev->flags & ATA_DFLAG_DETACHED))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
dev->flags &= ~ATA_DFLAG_DETACHED;
|
dev->flags &= ~ATA_DFLAG_DETACHED;
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
ata_scsi_remove_dev(dev);
|
ata_scsi_remove_dev(dev);
|
||||||
}
|
}
|
||||||
|
@ -2981,7 +2981,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
|
||||||
(lun != SCAN_WILD_CARD && lun != 0))
|
(lun != SCAN_WILD_CARD && lun != 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
|
||||||
if (id == SCAN_WILD_CARD) {
|
if (id == SCAN_WILD_CARD) {
|
||||||
ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
|
ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
|
||||||
|
@ -2999,7 +2999,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
|
||||||
if (rc == 0)
|
if (rc == 0)
|
||||||
ata_port_schedule_eh(ap);
|
ata_port_schedule_eh(ap);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -483,6 +483,7 @@ struct ata_eh_context {
|
||||||
struct ata_port {
|
struct ata_port {
|
||||||
struct Scsi_Host *host; /* our co-allocated scsi host */
|
struct Scsi_Host *host; /* our co-allocated scsi host */
|
||||||
const struct ata_port_operations *ops;
|
const struct ata_port_operations *ops;
|
||||||
|
spinlock_t *lock;
|
||||||
unsigned long flags; /* ATA_FLAG_xxx */
|
unsigned long flags; /* ATA_FLAG_xxx */
|
||||||
unsigned int id; /* unique id req'd by scsi midlyr */
|
unsigned int id; /* unique id req'd by scsi midlyr */
|
||||||
unsigned int port_no; /* unique port #; from zero */
|
unsigned int port_no; /* unique port #; from zero */
|
||||||
|
|
Loading…
Reference in New Issue