nvmet: Open code nvmet_req_execute()
Now that nvmet_req_execute does nothing, open code it. Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Signed-off-by: Christoph Hellwig <hch@lst.de> [split patch, update changelog] Signed-off-by: Logan Gunthorpe <logang@deltatee.com> Signed-off-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
e9061c3978
commit
be3f3114dd
|
@ -942,12 +942,6 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nvmet_check_data_len);
|
||||
|
||||
void nvmet_req_execute(struct nvmet_req *req)
|
||||
{
|
||||
req->execute(req);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvmet_req_execute);
|
||||
|
||||
int nvmet_req_alloc_sgl(struct nvmet_req *req)
|
||||
{
|
||||
struct pci_dev *p2p_dev = NULL;
|
||||
|
|
|
@ -2018,7 +2018,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
|
|||
}
|
||||
|
||||
/* data transfer complete, resume with nvmet layer */
|
||||
nvmet_req_execute(&fod->req);
|
||||
fod->req.execute(&fod->req);
|
||||
break;
|
||||
|
||||
case NVMET_FCOP_READDATA:
|
||||
|
@ -2234,7 +2234,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
|||
* can invoke the nvmet_layer now. If read data, cmd completion will
|
||||
* push the data
|
||||
*/
|
||||
nvmet_req_execute(&fod->req);
|
||||
fod->req.execute(&fod->req);
|
||||
return;
|
||||
|
||||
transport_error:
|
||||
|
|
|
@ -125,7 +125,7 @@ static void nvme_loop_execute_work(struct work_struct *work)
|
|||
struct nvme_loop_iod *iod =
|
||||
container_of(work, struct nvme_loop_iod, work);
|
||||
|
||||
nvmet_req_execute(&iod->req);
|
||||
iod->req.execute(&iod->req);
|
||||
}
|
||||
|
||||
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
|
|
|
@ -374,7 +374,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
|||
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
|
||||
void nvmet_req_uninit(struct nvmet_req *req);
|
||||
bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
|
||||
void nvmet_req_execute(struct nvmet_req *req);
|
||||
void nvmet_req_complete(struct nvmet_req *req, u16 status);
|
||||
int nvmet_req_alloc_sgl(struct nvmet_req *req);
|
||||
void nvmet_req_free_sgl(struct nvmet_req *req);
|
||||
|
|
|
@ -603,7 +603,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
return;
|
||||
}
|
||||
|
||||
nvmet_req_execute(&rsp->req);
|
||||
rsp->req.execute(&rsp->req);
|
||||
}
|
||||
|
||||
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
|
||||
|
@ -746,7 +746,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
|
|||
queue->cm_id->port_num, &rsp->read_cqe, NULL))
|
||||
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
|
||||
} else {
|
||||
nvmet_req_execute(&rsp->req);
|
||||
rsp->req.execute(&rsp->req);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -930,7 +930,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
|
|||
goto out;
|
||||
}
|
||||
|
||||
nvmet_req_execute(&queue->cmd->req);
|
||||
queue->cmd->req.execute(&queue->cmd->req);
|
||||
out:
|
||||
nvmet_prepare_receive_pdu(queue);
|
||||
return ret;
|
||||
|
@ -1050,7 +1050,7 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
|
|||
nvmet_tcp_prep_recv_ddgst(cmd);
|
||||
return 0;
|
||||
}
|
||||
nvmet_req_execute(&cmd->req);
|
||||
cmd->req.execute(&cmd->req);
|
||||
}
|
||||
|
||||
nvmet_prepare_receive_pdu(queue);
|
||||
|
@ -1090,7 +1090,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
|
|||
|
||||
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
|
||||
cmd->rbytes_done == cmd->req.transfer_len)
|
||||
nvmet_req_execute(&cmd->req);
|
||||
cmd->req.execute(&cmd->req);
|
||||
ret = 0;
|
||||
out:
|
||||
nvmet_prepare_receive_pdu(queue);
|
||||
|
|
Loading…
Reference in New Issue