nvme-fabrics: introduce init command check for a queue that is not alive
When the fabrics queue is not alive and fully functional, no commands should be allowed to pass but connect (which moves the queue to a fully functional state). Any other command should be failed, with either temporary status BLK_STS_RESOUCE or permanent status BLK_STS_IOERR. This is shared across all fabrics, hence move the check to fabrics library. Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
1690102de5
commit
48832f8d58
|
@ -156,4 +156,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts);
|
||||||
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
|
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
|
||||||
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
|
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
|
||||||
|
|
||||||
|
static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
|
||||||
|
struct request *rq)
|
||||||
|
{
|
||||||
|
struct nvme_command *cmd = nvme_req(rq)->cmd;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We cannot accept any other command until the connect command has
|
||||||
|
* completed, so only allow connect to pass.
|
||||||
|
*/
|
||||||
|
if (!blk_rq_is_passthrough(rq) ||
|
||||||
|
cmd->common.opcode != nvme_fabrics_command ||
|
||||||
|
cmd->fabrics.fctype != nvme_fabrics_type_connect) {
|
||||||
|
/*
|
||||||
|
* Reconnecting state means transport disruption, which can take
|
||||||
|
* a long time and even might fail permanently, fail fast to
|
||||||
|
* give upper layers a chance to failover.
|
||||||
|
* Deleting state means that the ctrl will never accept commands
|
||||||
|
* again, fail it permanently.
|
||||||
|
*/
|
||||||
|
if (ctrl->state == NVME_CTRL_RECONNECTING ||
|
||||||
|
ctrl->state == NVME_CTRL_DELETING) {
|
||||||
|
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
|
||||||
|
return BLK_STS_IOERR;
|
||||||
|
}
|
||||||
|
return BLK_STS_RESOURCE; /* try again later */
|
||||||
|
}
|
||||||
|
|
||||||
|
return BLK_STS_OK;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _NVME_FABRICS_H */
|
#endif /* _NVME_FABRICS_H */
|
||||||
|
|
|
@ -1591,31 +1591,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
|
||||||
* We cannot accept any other command until the Connect command has completed.
|
* We cannot accept any other command until the Connect command has completed.
|
||||||
*/
|
*/
|
||||||
static inline blk_status_t
|
static inline blk_status_t
|
||||||
nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
|
nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
|
||||||
{
|
{
|
||||||
if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
|
if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
|
||||||
struct nvme_command *cmd = nvme_req(rq)->cmd;
|
return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
|
||||||
|
return BLK_STS_OK;
|
||||||
if (!blk_rq_is_passthrough(rq) ||
|
|
||||||
cmd->common.opcode != nvme_fabrics_command ||
|
|
||||||
cmd->fabrics.fctype != nvme_fabrics_type_connect) {
|
|
||||||
/*
|
|
||||||
* reconnecting state means transport disruption, which
|
|
||||||
* can take a long time and even might fail permanently,
|
|
||||||
* fail fast to give upper layers a chance to failover.
|
|
||||||
* deleting state means that the ctrl will never accept
|
|
||||||
* commands again, fail it permanently.
|
|
||||||
*/
|
|
||||||
if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
|
|
||||||
queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
|
|
||||||
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
|
|
||||||
return BLK_STS_IOERR;
|
|
||||||
}
|
|
||||||
return BLK_STS_RESOURCE; /* try again later */
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
|
@ -1634,7 +1614,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
|
|
||||||
WARN_ON_ONCE(rq->tag < 0);
|
WARN_ON_ONCE(rq->tag < 0);
|
||||||
|
|
||||||
ret = nvme_rdma_queue_is_ready(queue, rq);
|
ret = nvme_rdma_is_ready(queue, rq);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue