scsi: Split scsi_internal_device_block()

Instead of passing a "wait" argument to scsi_internal_device_block(),
split this function into a function that waits and a function that
doesn't wait. This will make it easier to serialize SCSI device state
changes through a mutex.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Bart Van Assche 2017-06-02 14:21:53 -07:00 committed by Martin K. Petersen
parent 8e6882545d
commit 551eb598e5
3 changed files with 50 additions and 29 deletions

View File

@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
sas_device_priv_data->sas_target->handle); sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1; sas_device_priv_data->block = 1;
r = scsi_internal_device_block(sdev, false); r = scsi_internal_device_block_nowait(sdev);
if (r == -EINVAL) if (r == -EINVAL)
sdev_printk(KERN_WARNING, sdev, sdev_printk(KERN_WARNING, sdev,
"device_block failed with return(%d) for handle(0x%04x)\n", "device_block failed with return(%d) for handle(0x%04x)\n",
@ -2895,7 +2895,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
"performing a block followed by an unblock\n", "performing a block followed by an unblock\n",
r, sas_device_priv_data->sas_target->handle); r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1; sas_device_priv_data->block = 1;
r = scsi_internal_device_block(sdev, false); r = scsi_internal_device_block_nowait(sdev);
if (r) if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_block " sdev_printk(KERN_WARNING, sdev, "retried device_block "
"failed with return(%d) for handle(0x%04x)\n", "failed with return(%d) for handle(0x%04x)\n",

View File

@ -2944,28 +2944,20 @@ scsi_target_resume(struct scsi_target *starget)
EXPORT_SYMBOL(scsi_target_resume); EXPORT_SYMBOL(scsi_target_resume);
/** /**
* scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
* @sdev: device to block * @sdev: device to block
* @wait: Whether or not to wait until ongoing .queuecommand() /
* .queue_rq() calls have finished.
* *
* Block request made by scsi lld's to temporarily stop all * Pause SCSI command processing on the specified device. Does not sleep.
* scsi commands on the specified device. May sleep.
* *
* Returns zero if successful or error if not * Returns zero if successful or a negative error code upon failure.
* *
* Notes: * Notes:
* This routine transitions the device to the SDEV_BLOCK state * This routine transitions the device to the SDEV_BLOCK state (which must be
* (which must be a legal transition). When the device is in this * a legal transition). When the device is in this state, command processing
* state, all commands are deferred until the scsi lld reenables * is paused until the device leaves the SDEV_BLOCK state. See also
* the device with scsi_device_unblock or device_block_tmo fires. * scsi_internal_device_unblock_nowait().
*
* To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
* scsi_internal_device_block() has blocked a SCSI device and also
* remove the rport mutex lock and unlock calls from srp_queuecommand().
*/ */
int int scsi_internal_device_block_nowait(struct scsi_device *sdev)
scsi_internal_device_block(struct scsi_device *sdev, bool wait)
{ {
struct request_queue *q = sdev->request_queue; struct request_queue *q = sdev->request_queue;
unsigned long flags; unsigned long flags;
@ -2985,21 +2977,50 @@ scsi_internal_device_block(struct scsi_device *sdev, bool wait)
* request queue. * request queue.
*/ */
if (q->mq_ops) { if (q->mq_ops) {
if (wait) blk_mq_stop_hw_queues(q);
blk_mq_quiesce_queue(q);
else
blk_mq_stop_hw_queues(q);
} else { } else {
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q); blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
if (wait)
scsi_wait_for_queuecommand(sdev);
} }
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(scsi_internal_device_block); EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
/**
* scsi_internal_device_block - try to transition to the SDEV_BLOCK state
* @sdev: device to block
*
* Pause SCSI command processing on the specified device and wait until all
* ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep.
*
* Returns zero if successful or a negative error code upon failure.
*
* Note:
* This routine transitions the device to the SDEV_BLOCK state (which must be
* a legal transition). When the device is in this state, command processing
* is paused until the device leaves the SDEV_BLOCK state. See also
* scsi_internal_device_unblock().
*
* To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
* scsi_internal_device_block() has blocked a SCSI device and also
* remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
static int scsi_internal_device_block(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
int err;
err = scsi_internal_device_block_nowait(sdev);
if (err == 0) {
if (q->mq_ops)
blk_mq_quiesce_queue(q);
else
scsi_wait_for_queuecommand(sdev);
}
return err;
}
/** /**
* scsi_internal_device_unblock - resume a device after a block request * scsi_internal_device_unblock - resume a device after a block request
@ -3056,7 +3077,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
static void static void
device_block(struct scsi_device *sdev, void *data) device_block(struct scsi_device *sdev, void *data)
{ {
scsi_internal_device_block(sdev, true); scsi_internal_device_block(sdev);
} }
static int static int

View File

@ -472,7 +472,7 @@ static inline int scsi_device_created(struct scsi_device *sdev)
sdev->sdev_state == SDEV_CREATED_BLOCK; sdev->sdev_state == SDEV_CREATED_BLOCK;
} }
int scsi_internal_device_block(struct scsi_device *sdev, bool wait); int scsi_internal_device_block_nowait(struct scsi_device *sdev);
int scsi_internal_device_unblock(struct scsi_device *sdev, int scsi_internal_device_unblock(struct scsi_device *sdev,
enum scsi_device_state new_state); enum scsi_device_state new_state);