isci: Callbacks to libsas occur under scic_lock and are synchronized.

This patch changes the callback mechanism to libsas to only occur while
the scic_lock is held; the abort path cleanup of I/Os also checks to make
sure IREQ_ABORT_PATH_ACTIVE is clear before proceding.

Signed-off-by: Jeff Skirvin <jeffrey.d.skirvin@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Jeff Skirvin 2012-03-08 22:42:03 -08:00 committed by Dan Williams
parent 0c3ce38f1b
commit 033d19d298
2 changed files with 40 additions and 13 deletions

View File

@ -1105,8 +1105,6 @@ void isci_host_completion_routine(unsigned long data)
list_splice_init(&ihost->requests_to_complete,
&completed_request_list);
spin_unlock_irq(&ihost->scic_lock);
/* Process any completions in the list. */
list_for_each_safe(current_position, next_position,
&completed_request_list) {
@ -1115,7 +1113,6 @@ void isci_host_completion_routine(unsigned long data)
completed_node);
task = isci_request_access_task(request);
/* Return the task to libsas */
if (task != NULL) {
@ -1141,11 +1138,12 @@ void isci_host_completion_routine(unsigned long data)
}
}
}
if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &request->flags))
wake_up_all(&ihost->eventq);
spin_lock_irq(&ihost->scic_lock);
isci_free_tag(ihost, request->io_tag);
spin_unlock_irq(&ihost->scic_lock);
}
spin_unlock_irq(&ihost->scic_lock);
/* the coalesence timeout doubles at each encoding step, so
* update it based on the ilog2 value of the outstanding requests
@ -2703,8 +2701,6 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
index = ISCI_TAG_TCI(ireq->io_tag);
clear_bit(IREQ_ACTIVE, &ireq->flags);
if (test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
wake_up_all(&ihost->eventq);
return SCI_SUCCESS;
default:
dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",

View File

@ -145,6 +145,39 @@ static bool isci_compare_suspendcount(
return localcount != idev->rnc.suspend_count;
}
static bool isci_check_reqterm(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq,
u32 localcount)
{
unsigned long flags;
bool res;
spin_lock_irqsave(&ihost->scic_lock, flags);
res = isci_compare_suspendcount(idev, localcount)
&& !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
return res;
}
static bool isci_check_devempty(
struct isci_host *ihost,
struct isci_remote_device *idev,
u32 localcount)
{
unsigned long flags;
bool res;
spin_lock_irqsave(&ihost->scic_lock, flags);
res = isci_compare_suspendcount(idev, localcount)
&& idev->started_request_count == 0;
spin_unlock_irqrestore(&ihost->scic_lock, flags);
return res;
}
enum sci_status isci_remote_device_terminate_requests(
struct isci_host *ihost,
struct isci_remote_device *idev,
@ -179,17 +212,15 @@ enum sci_status isci_remote_device_terminate_requests(
sci_remote_device_terminate_req(ihost, idev, 0, ireq);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
wait_event(ihost->eventq,
(isci_compare_suspendcount(idev,
rnc_suspend_count)
&& !test_bit(IREQ_ACTIVE, &ireq->flags)));
isci_check_reqterm(ihost, idev, ireq,
rnc_suspend_count));
} else {
/* Terminate all TCs. */
sci_remote_device_terminate_requests(idev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
wait_event(ihost->eventq,
(isci_compare_suspendcount(idev,
rnc_suspend_count)
&& idev->started_request_count == 0));
isci_check_devempty(ihost, idev,
rnc_suspend_count));
}
dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
__func__, idev);