blk-mq: Drop blk_mq_ops.timeout 'reserved' arg
With new API blk_mq_is_reserved_rq() we can tell if a request is from the reserved pool, so stop passing 'reserved' arg. There is actually only a single user of that arg for all the callback implementations, which can use blk_mq_is_reserved_rq() instead. This will also allow us to stop passing the same 'reserved' around the blk-mq iter functions next. Signed-off-by: John Garry <john.garry@huawei.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Hannes Reinecke <hare@suse.de> Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Link: https://lore.kernel.org/r/1657109034-206040-4-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
99e48cd685
commit
9bdb4833dd
|
@ -1427,13 +1427,13 @@ bool blk_mq_queue_inflight(struct request_queue *q)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
|
EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
|
||||||
|
|
||||||
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
|
static void blk_mq_rq_timed_out(struct request *req)
|
||||||
{
|
{
|
||||||
req->rq_flags |= RQF_TIMED_OUT;
|
req->rq_flags |= RQF_TIMED_OUT;
|
||||||
if (req->q->mq_ops->timeout) {
|
if (req->q->mq_ops->timeout) {
|
||||||
enum blk_eh_timer_return ret;
|
enum blk_eh_timer_return ret;
|
||||||
|
|
||||||
ret = req->q->mq_ops->timeout(req, reserved);
|
ret = req->q->mq_ops->timeout(req);
|
||||||
if (ret == BLK_EH_DONE)
|
if (ret == BLK_EH_DONE)
|
||||||
return;
|
return;
|
||||||
WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
|
WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
|
||||||
|
@ -1482,7 +1482,7 @@ static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved)
|
||||||
* from blk_mq_check_expired().
|
* from blk_mq_check_expired().
|
||||||
*/
|
*/
|
||||||
if (blk_mq_req_expired(rq, next))
|
if (blk_mq_req_expired(rq, next))
|
||||||
blk_mq_rq_timed_out(rq, reserved);
|
blk_mq_rq_timed_out(rq);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -331,7 +331,7 @@ void bsg_remove_queue(struct request_queue *q)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bsg_remove_queue);
|
EXPORT_SYMBOL_GPL(bsg_remove_queue);
|
||||||
|
|
||||||
static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved)
|
static enum blk_eh_timer_return bsg_timeout(struct request *rq)
|
||||||
{
|
{
|
||||||
struct bsg_set *bset =
|
struct bsg_set *bset =
|
||||||
container_of(rq->q->tag_set, struct bsg_set, tag_set);
|
container_of(rq->q->tag_set, struct bsg_set, tag_set);
|
||||||
|
|
|
@ -3357,12 +3357,11 @@ static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
|
static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req)
|
||||||
bool reserved)
|
|
||||||
{
|
{
|
||||||
struct driver_data *dd = req->q->queuedata;
|
struct driver_data *dd = req->q->queuedata;
|
||||||
|
|
||||||
if (reserved) {
|
if (blk_mq_is_reserved_rq(req)) {
|
||||||
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
|
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
|
||||||
|
|
||||||
cmd->status = BLK_STS_TIMEOUT;
|
cmd->status = BLK_STS_TIMEOUT;
|
||||||
|
|
|
@ -393,8 +393,7 @@ static u32 req_to_nbd_cmd_type(struct request *req)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
|
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
|
||||||
bool reserved)
|
|
||||||
{
|
{
|
||||||
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
|
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
|
||||||
struct nbd_device *nbd = cmd->nbd;
|
struct nbd_device *nbd = cmd->nbd;
|
||||||
|
|
|
@ -1578,7 +1578,7 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
|
||||||
return nr;
|
return nr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
|
static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||||
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||||
|
|
|
@ -116,8 +116,7 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
|
static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req)
|
||||||
bool reserved)
|
|
||||||
{
|
{
|
||||||
struct request_queue *q = req->q;
|
struct request_queue *q = req->q;
|
||||||
struct mmc_queue *mq = q->queuedata;
|
struct mmc_queue *mq = q->queuedata;
|
||||||
|
|
|
@ -862,8 +862,7 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum blk_eh_timer_return apple_nvme_timeout(struct request *req,
|
static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
|
||||||
bool reserved)
|
|
||||||
{
|
{
|
||||||
struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||||
struct apple_nvme_queue *q = iod->q;
|
struct apple_nvme_queue *q = iod->q;
|
||||||
|
|
|
@ -2565,8 +2565,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
|
||||||
nvme_reset_ctrl(&ctrl->ctrl);
|
nvme_reset_ctrl(&ctrl->ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum blk_eh_timer_return
|
static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
|
||||||
nvme_fc_timeout(struct request *rq, bool reserved)
|
|
||||||
{
|
{
|
||||||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
|
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
|
||||||
struct nvme_fc_ctrl *ctrl = op->ctrl;
|
struct nvme_fc_ctrl *ctrl = op->ctrl;
|
||||||
|
|
|
@ -1344,7 +1344,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
|
||||||
"Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
|
"Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
static enum blk_eh_timer_return nvme_timeout(struct request *req)
|
||||||
{
|
{
|
||||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||||
struct nvme_queue *nvmeq = iod->nvmeq;
|
struct nvme_queue *nvmeq = iod->nvmeq;
|
||||||
|
|
|
@ -2013,8 +2013,7 @@ static void nvme_rdma_complete_timed_out(struct request *rq)
|
||||||
nvmf_complete_timed_out_request(rq);
|
nvmf_complete_timed_out_request(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum blk_eh_timer_return
|
static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
|
||||||
nvme_rdma_timeout(struct request *rq, bool reserved)
|
|
||||||
{
|
{
|
||||||
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
|
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
|
||||||
struct nvme_rdma_queue *queue = req->queue;
|
struct nvme_rdma_queue *queue = req->queue;
|
||||||
|
|
|
@ -2321,8 +2321,7 @@ static void nvme_tcp_complete_timed_out(struct request *rq)
|
||||||
nvmf_complete_timed_out_request(rq);
|
nvmf_complete_timed_out_request(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum blk_eh_timer_return
|
static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
|
||||||
nvme_tcp_timeout(struct request *rq, bool reserved)
|
|
||||||
{
|
{
|
||||||
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
|
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
|
||||||
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
|
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
|
||||||
|
|
|
@ -3145,7 +3145,7 @@ out:
|
||||||
* BLK_EH_DONE if the request is handled or terminated
|
* BLK_EH_DONE if the request is handled or terminated
|
||||||
* by the driver.
|
* by the driver.
|
||||||
*/
|
*/
|
||||||
enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
|
enum blk_eh_timer_return dasd_times_out(struct request *req)
|
||||||
{
|
{
|
||||||
struct dasd_block *block = req->q->queuedata;
|
struct dasd_block *block = req->q->queuedata;
|
||||||
struct dasd_device *device;
|
struct dasd_device *device;
|
||||||
|
|
|
@ -795,7 +795,7 @@ void dasd_free_device(struct dasd_device *);
|
||||||
struct dasd_block *dasd_alloc_block(void);
|
struct dasd_block *dasd_alloc_block(void);
|
||||||
void dasd_free_block(struct dasd_block *);
|
void dasd_free_block(struct dasd_block *);
|
||||||
|
|
||||||
enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved);
|
enum blk_eh_timer_return dasd_times_out(struct request *req);
|
||||||
|
|
||||||
void dasd_enable_device(struct dasd_device *);
|
void dasd_enable_device(struct dasd_device *);
|
||||||
void dasd_set_target_state(struct dasd_device *, int);
|
void dasd_set_target_state(struct dasd_device *, int);
|
||||||
|
|
|
@ -318,7 +318,6 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
|
||||||
/**
|
/**
|
||||||
* scsi_timeout - Timeout function for normal scsi commands.
|
* scsi_timeout - Timeout function for normal scsi commands.
|
||||||
* @req: request that is timing out.
|
* @req: request that is timing out.
|
||||||
* @reserved: whether the request is a reserved request.
|
|
||||||
*
|
*
|
||||||
* Notes:
|
* Notes:
|
||||||
* We do not need to lock this. There is the potential for a race
|
* We do not need to lock this. There is the potential for a race
|
||||||
|
@ -326,7 +325,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
|
||||||
* normal completion function determines that the timer has already
|
* normal completion function determines that the timer has already
|
||||||
* fired, then it mustn't do anything.
|
* fired, then it mustn't do anything.
|
||||||
*/
|
*/
|
||||||
enum blk_eh_timer_return scsi_timeout(struct request *req, bool reserved)
|
enum blk_eh_timer_return scsi_timeout(struct request *req)
|
||||||
{
|
{
|
||||||
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
|
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
|
||||||
enum blk_eh_timer_return rtn = BLK_EH_DONE;
|
enum blk_eh_timer_return rtn = BLK_EH_DONE;
|
||||||
|
|
|
@ -72,8 +72,7 @@ extern void scsi_exit_devinfo(void);
|
||||||
|
|
||||||
/* scsi_error.c */
|
/* scsi_error.c */
|
||||||
extern void scmd_eh_abort_handler(struct work_struct *work);
|
extern void scmd_eh_abort_handler(struct work_struct *work);
|
||||||
extern enum blk_eh_timer_return scsi_timeout(struct request *req,
|
extern enum blk_eh_timer_return scsi_timeout(struct request *req);
|
||||||
bool reserved);
|
|
||||||
extern int scsi_error_handler(void *host);
|
extern int scsi_error_handler(void *host);
|
||||||
extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
|
extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
|
||||||
extern void scsi_eh_wakeup(struct Scsi_Host *shost);
|
extern void scsi_eh_wakeup(struct Scsi_Host *shost);
|
||||||
|
|
|
@ -575,7 +575,7 @@ struct blk_mq_ops {
|
||||||
/**
|
/**
|
||||||
* @timeout: Called on request timeout.
|
* @timeout: Called on request timeout.
|
||||||
*/
|
*/
|
||||||
enum blk_eh_timer_return (*timeout)(struct request *, bool);
|
enum blk_eh_timer_return (*timeout)(struct request *);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @poll: Called to poll for completion of a specific tag.
|
* @poll: Called to poll for completion of a specific tag.
|
||||||
|
|
Loading…
Reference in New Issue