block: change request end_io handler to pass back a return value
Everything is just converted to returning RQ_END_IO_NONE, and there should be no functional changes with this patch. In preparation for allowing the end_io handler to pass ownership back to the block layer, rather than retain ownership of the request. Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
4b6a5d9cea
commit
de671d6116
|
@ -217,7 +217,8 @@ static void blk_flush_complete_seq(struct request *rq,
|
|||
blk_kick_flush(q, fq, cmd_flags);
|
||||
}
|
||||
|
||||
static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
||||
static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
|
||||
blk_status_t error)
|
||||
{
|
||||
struct request_queue *q = flush_rq->q;
|
||||
struct list_head *running;
|
||||
|
@ -231,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
|||
if (!req_ref_put_and_test(flush_rq)) {
|
||||
fq->rq_status = error;
|
||||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||
return;
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
blk_account_io_flush(flush_rq);
|
||||
|
@ -268,6 +269,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
|||
}
|
||||
|
||||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
bool is_flush_rq(struct request *rq)
|
||||
|
@ -353,7 +355,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
|||
blk_flush_queue_rq(flush_rq, false);
|
||||
}
|
||||
|
||||
static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
|
||||
static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
|
||||
blk_status_t error)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
|
@ -375,6 +378,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
|
|||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||
|
||||
blk_mq_sched_restart(hctx);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1001,7 +1001,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
|
|||
|
||||
if (rq->end_io) {
|
||||
rq_qos_done(rq->q, rq);
|
||||
rq->end_io(rq, error);
|
||||
if (rq->end_io(rq, error) == RQ_END_IO_FREE)
|
||||
blk_mq_free_request(rq);
|
||||
} else {
|
||||
blk_mq_free_request(rq);
|
||||
}
|
||||
|
@ -1295,12 +1296,13 @@ struct blk_rq_wait {
|
|||
blk_status_t ret;
|
||||
};
|
||||
|
||||
static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
|
||||
static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
|
||||
{
|
||||
struct blk_rq_wait *wait = rq->end_io_data;
|
||||
|
||||
wait->ret = ret;
|
||||
complete(&wait->done);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
bool blk_rq_is_poll(struct request *rq)
|
||||
|
@ -1534,10 +1536,12 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
|
|||
|
||||
void blk_mq_put_rq_ref(struct request *rq)
|
||||
{
|
||||
if (is_flush_rq(rq))
|
||||
rq->end_io(rq, 0);
|
||||
else if (req_ref_put_and_test(rq))
|
||||
if (is_flush_rq(rq)) {
|
||||
if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
|
||||
blk_mq_free_request(rq);
|
||||
} else if (req_ref_put_and_test(rq)) {
|
||||
__blk_mq_free_request(rq);
|
||||
}
|
||||
}
|
||||
|
||||
static bool blk_mq_check_expired(struct request *rq, void *priv)
|
||||
|
|
|
@ -292,11 +292,13 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
|
|||
dm_complete_request(rq, error);
|
||||
}
|
||||
|
||||
static void end_clone_request(struct request *clone, blk_status_t error)
|
||||
static enum rq_end_io_ret end_clone_request(struct request *clone,
|
||||
blk_status_t error)
|
||||
{
|
||||
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||
|
||||
dm_complete_request(tio->orig, error);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
|
||||
|
|
|
@ -1172,7 +1172,8 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
|
|||
queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
|
||||
}
|
||||
|
||||
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
|
||||
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
|
||||
blk_status_t status)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = rq->end_io_data;
|
||||
unsigned long flags;
|
||||
|
@ -1184,7 +1185,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
|
|||
dev_err(ctrl->device,
|
||||
"failed nvme_keep_alive_end_io error=%d\n",
|
||||
status);
|
||||
return;
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
ctrl->comp_seen = false;
|
||||
|
@ -1195,6 +1196,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
|
|||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
if (startka)
|
||||
nvme_queue_keep_alive_work(ctrl);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
static void nvme_keep_alive_work(struct work_struct *work)
|
||||
|
|
|
@ -392,7 +392,8 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
|
|||
io_uring_cmd_done(ioucmd, status, result);
|
||||
}
|
||||
|
||||
static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
|
||||
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
|
||||
blk_status_t err)
|
||||
{
|
||||
struct io_uring_cmd *ioucmd = req->end_io_data;
|
||||
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
|
||||
|
@ -411,6 +412,8 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
|
|||
nvme_uring_task_cb(ioucmd);
|
||||
else
|
||||
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
|
||||
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
|
|
|
@ -1268,7 +1268,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
|
|||
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
|
||||
}
|
||||
|
||||
static void abort_endio(struct request *req, blk_status_t error)
|
||||
static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
|
||||
{
|
||||
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
|
||||
|
||||
|
@ -1276,6 +1276,7 @@ static void abort_endio(struct request *req, blk_status_t error)
|
|||
"Abort status: 0x%x", nvme_req(req)->status);
|
||||
atomic_inc(&nvmeq->dev->ctrl.abort_limit);
|
||||
blk_mq_free_request(req);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
|
||||
|
@ -2447,22 +2448,25 @@ out_unlock:
|
|||
return result;
|
||||
}
|
||||
|
||||
static void nvme_del_queue_end(struct request *req, blk_status_t error)
|
||||
static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
|
||||
blk_status_t error)
|
||||
{
|
||||
struct nvme_queue *nvmeq = req->end_io_data;
|
||||
|
||||
blk_mq_free_request(req);
|
||||
complete(&nvmeq->delete_done);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
static void nvme_del_cq_end(struct request *req, blk_status_t error)
|
||||
static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
|
||||
blk_status_t error)
|
||||
{
|
||||
struct nvme_queue *nvmeq = req->end_io_data;
|
||||
|
||||
if (error)
|
||||
set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
|
||||
|
||||
nvme_del_queue_end(req, error);
|
||||
return nvme_del_queue_end(req, error);
|
||||
}
|
||||
|
||||
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
|
||||
|
|
|
@ -245,14 +245,15 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
|
|||
nvme_passthru_end(ctrl, effects, req->cmd, status);
|
||||
}
|
||||
|
||||
static void nvmet_passthru_req_done(struct request *rq,
|
||||
blk_status_t blk_status)
|
||||
static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
|
||||
blk_status_t blk_status)
|
||||
{
|
||||
struct nvmet_req *req = rq->end_io_data;
|
||||
|
||||
req->cqe->result = nvme_req(rq)->result;
|
||||
nvmet_req_complete(req, nvme_req(rq)->status);
|
||||
blk_mq_free_request(rq);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
|
||||
|
|
|
@ -2004,9 +2004,11 @@ maybe_retry:
|
|||
}
|
||||
}
|
||||
|
||||
static void eh_lock_door_done(struct request *req, blk_status_t status)
|
||||
static enum rq_end_io_ret eh_lock_door_done(struct request *req,
|
||||
blk_status_t status)
|
||||
{
|
||||
blk_mq_free_request(req);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -177,7 +177,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
|
|||
} Sg_device;
|
||||
|
||||
/* tasklet or soft irq callback */
|
||||
static void sg_rq_end_io(struct request *rq, blk_status_t status);
|
||||
static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status);
|
||||
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
|
||||
static int sg_finish_rem_req(Sg_request * srp);
|
||||
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
|
||||
|
@ -1311,7 +1311,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
|
|||
* This function is a "bottom half" handler that is called by the mid
|
||||
* level when a command is completed (or has failed).
|
||||
*/
|
||||
static void
|
||||
static enum rq_end_io_ret
|
||||
sg_rq_end_io(struct request *rq, blk_status_t status)
|
||||
{
|
||||
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
|
||||
|
@ -1324,11 +1324,11 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
|
|||
int result, resid, done = 1;
|
||||
|
||||
if (WARN_ON(srp->done != 0))
|
||||
return;
|
||||
return RQ_END_IO_NONE;
|
||||
|
||||
sfp = srp->parentfp;
|
||||
if (WARN_ON(sfp == NULL))
|
||||
return;
|
||||
return RQ_END_IO_NONE;
|
||||
|
||||
sdp = sfp->parentdp;
|
||||
if (unlikely(atomic_read(&sdp->detaching)))
|
||||
|
@ -1406,6 +1406,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
|
|||
INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
|
||||
schedule_work(&srp->ew.work);
|
||||
}
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
static const struct file_operations sg_fops = {
|
||||
|
|
|
@ -512,7 +512,8 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
|
|||
atomic64_dec(&STp->stats->in_flight);
|
||||
}
|
||||
|
||||
static void st_scsi_execute_end(struct request *req, blk_status_t status)
|
||||
static enum rq_end_io_ret st_scsi_execute_end(struct request *req,
|
||||
blk_status_t status)
|
||||
{
|
||||
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
|
||||
struct st_request *SRpnt = req->end_io_data;
|
||||
|
@ -532,6 +533,7 @@ static void st_scsi_execute_end(struct request *req, blk_status_t status)
|
|||
|
||||
blk_rq_unmap_user(tmp);
|
||||
blk_mq_free_request(req);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
|
||||
|
|
|
@ -39,7 +39,7 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
|
|||
}
|
||||
|
||||
static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
|
||||
static void pscsi_req_done(struct request *, blk_status_t);
|
||||
static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t);
|
||||
|
||||
/* pscsi_attach_hba():
|
||||
*
|
||||
|
@ -1002,7 +1002,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void pscsi_req_done(struct request *req, blk_status_t status)
|
||||
static enum rq_end_io_ret pscsi_req_done(struct request *req,
|
||||
blk_status_t status)
|
||||
{
|
||||
struct se_cmd *cmd = req->end_io_data;
|
||||
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
|
||||
|
@ -1029,6 +1030,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
|
|||
}
|
||||
|
||||
blk_mq_free_request(req);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
static const struct target_backend_ops pscsi_ops = {
|
||||
|
|
|
@ -613,14 +613,17 @@ static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
|
|||
srgn->srgn_state = HPB_SRGN_VALID;
|
||||
}
|
||||
|
||||
static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
|
||||
static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
|
||||
blk_status_t error)
|
||||
{
|
||||
struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
|
||||
|
||||
ufshpb_put_req(umap_req->hpb, umap_req);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
|
||||
static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req,
|
||||
blk_status_t error)
|
||||
{
|
||||
struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
|
||||
struct ufshpb_lu *hpb = map_req->hpb;
|
||||
|
@ -636,6 +639,7 @@ static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
|
|||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
||||
|
||||
ufshpb_put_map_req(map_req->hpb, map_req);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
|
||||
|
|
|
@ -14,7 +14,12 @@ struct blk_flush_queue;
|
|||
#define BLKDEV_MIN_RQ 4
|
||||
#define BLKDEV_DEFAULT_RQ 128
|
||||
|
||||
typedef void (rq_end_io_fn)(struct request *, blk_status_t);
|
||||
enum rq_end_io_ret {
|
||||
RQ_END_IO_NONE,
|
||||
RQ_END_IO_FREE,
|
||||
};
|
||||
|
||||
typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
|
||||
|
||||
/*
|
||||
* request flags */
|
||||
|
|
Loading…
Reference in New Issue