blk-mq-tag: change busy_iter_fn to return whether to continue or not
We have this functionality in sbitmap, but we don't export it in blk-mq for users of the tags busy iteration. This can be useful for stopping the iteration, if the caller doesn't need to find more requests. Reviewed-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
c28445fa06
commit
7baa85727d
|
@ -422,15 +422,18 @@ struct show_busy_params {
|
|||
|
||||
/*
|
||||
* Note: the state of a request may change while this function is in progress,
|
||||
* e.g. due to a concurrent blk_mq_finish_request() call.
|
||||
* e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
|
||||
* keep iterating requests.
|
||||
*/
|
||||
static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
|
||||
static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
|
||||
{
|
||||
const struct show_busy_params *params = data;
|
||||
|
||||
if (rq->mq_hctx == params->hctx)
|
||||
__blk_mq_debugfs_rq_show(params->m,
|
||||
list_entry_rq(&rq->queuelist));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int hctx_busy_show(void *data, struct seq_file *m)
|
||||
|
|
|
@ -236,7 +236,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
|||
* test and set the bit before assigning ->rqs[].
|
||||
*/
|
||||
if (rq && rq->q == hctx->queue)
|
||||
iter_data->fn(hctx, rq, iter_data->data, reserved);
|
||||
return iter_data->fn(hctx, rq, iter_data->data, reserved);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -289,7 +289,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
|||
*/
|
||||
rq = tags->rqs[bitnr];
|
||||
if (rq && blk_mq_request_started(rq))
|
||||
iter_data->fn(rq, iter_data->data, reserved);
|
||||
return iter_data->fn(rq, iter_data->data, reserved);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ struct mq_inflight {
|
|||
unsigned int *inflight;
|
||||
};
|
||||
|
||||
static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
|
||||
static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, void *priv,
|
||||
bool reserved)
|
||||
{
|
||||
|
@ -109,6 +109,8 @@ static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
|
|||
mi->inflight[0]++;
|
||||
if (mi->part->partno)
|
||||
mi->inflight[1]++;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
|
||||
|
@ -120,7 +122,7 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
|
|||
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
|
||||
}
|
||||
|
||||
static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
|
||||
static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, void *priv,
|
||||
bool reserved)
|
||||
{
|
||||
|
@ -128,6 +130,8 @@ static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
|
|||
|
||||
if (rq->part == mi->part)
|
||||
mi->inflight[rq_data_dir(rq)]++;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
||||
|
@ -821,7 +825,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
|
|||
return false;
|
||||
}
|
||||
|
||||
static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
||||
static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, void *priv, bool reserved)
|
||||
{
|
||||
unsigned long *next = priv;
|
||||
|
@ -831,7 +835,7 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
|||
* so we're not unnecessarilly synchronizing across CPUs.
|
||||
*/
|
||||
if (!blk_mq_req_expired(rq, next))
|
||||
return;
|
||||
return true;
|
||||
|
||||
/*
|
||||
* We have reason to believe the request may be expired. Take a
|
||||
|
@ -843,7 +847,7 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
|||
* timeout handler to posting a natural completion.
|
||||
*/
|
||||
if (!refcount_inc_not_zero(&rq->ref))
|
||||
return;
|
||||
return true;
|
||||
|
||||
/*
|
||||
* The request is now locked and cannot be reallocated underneath the
|
||||
|
@ -855,6 +859,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
|||
blk_mq_rq_timed_out(rq, reserved);
|
||||
if (refcount_dec_and_test(&rq->ref))
|
||||
__blk_mq_free_request(rq);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void blk_mq_timeout_work(struct work_struct *work)
|
||||
|
|
|
@ -2720,7 +2720,7 @@ static void mtip_softirq_done_fn(struct request *rq)
|
|||
blk_mq_end_request(rq, cmd->status);
|
||||
}
|
||||
|
||||
static void mtip_abort_cmd(struct request *req, void *data, bool reserved)
|
||||
static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
|
||||
{
|
||||
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
|
||||
struct driver_data *dd = data;
|
||||
|
@ -2730,14 +2730,16 @@ static void mtip_abort_cmd(struct request *req, void *data, bool reserved)
|
|||
clear_bit(req->tag, dd->port->cmds_to_issue);
|
||||
cmd->status = BLK_STS_IOERR;
|
||||
mtip_softirq_done_fn(req);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void mtip_queue_cmd(struct request *req, void *data, bool reserved)
|
||||
static bool mtip_queue_cmd(struct request *req, void *data, bool reserved)
|
||||
{
|
||||
struct driver_data *dd = data;
|
||||
|
||||
set_bit(req->tag, dd->port->cmds_to_issue);
|
||||
blk_abort_request(req);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3920,12 +3922,13 @@ protocol_init_error:
|
|||
return rv;
|
||||
}
|
||||
|
||||
static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
|
||||
static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
|
||||
{
|
||||
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
cmd->status = BLK_STS_IOERR;
|
||||
blk_mq_complete_request(rq);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -734,12 +734,13 @@ static void recv_work(struct work_struct *work)
|
|||
kfree(args);
|
||||
}
|
||||
|
||||
static void nbd_clear_req(struct request *req, void *data, bool reserved)
|
||||
static bool nbd_clear_req(struct request *req, void *data, bool reserved)
|
||||
{
|
||||
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
|
||||
|
||||
cmd->status = BLK_STS_IOERR;
|
||||
blk_mq_complete_request(req);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nbd_clear_que(struct nbd_device *nbd)
|
||||
|
|
|
@ -382,11 +382,12 @@ static void skd_log_skreq(struct skd_device *skdev,
|
|||
* READ/WRITE REQUESTS
|
||||
*****************************************************************************
|
||||
*/
|
||||
static void skd_inc_in_flight(struct request *rq, void *data, bool reserved)
|
||||
static bool skd_inc_in_flight(struct request *rq, void *data, bool reserved)
|
||||
{
|
||||
int *count = data;
|
||||
|
||||
count++;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int skd_in_flight(struct skd_device *skdev)
|
||||
|
@ -1887,13 +1888,13 @@ static void skd_isr_fwstate(struct skd_device *skdev)
|
|||
skd_skdev_state_to_str(skdev->state), skdev->state);
|
||||
}
|
||||
|
||||
static void skd_recover_request(struct request *req, void *data, bool reserved)
|
||||
static bool skd_recover_request(struct request *req, void *data, bool reserved)
|
||||
{
|
||||
struct skd_device *const skdev = data;
|
||||
struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
|
||||
|
||||
if (skreq->state != SKD_REQ_STATE_BUSY)
|
||||
return;
|
||||
return true;
|
||||
|
||||
skd_log_skreq(skdev, skreq, "recover");
|
||||
|
||||
|
@ -1904,6 +1905,7 @@ static void skd_recover_request(struct request *req, void *data, bool reserved)
|
|||
skreq->state = SKD_REQ_STATE_IDLE;
|
||||
skreq->status = BLK_STS_IOERR;
|
||||
blk_mq_complete_request(req);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void skd_recover_requests(struct skd_device *skdev)
|
||||
|
|
|
@ -268,14 +268,14 @@ void nvme_complete_rq(struct request *req)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_complete_rq);
|
||||
|
||||
void nvme_cancel_request(struct request *req, void *data, bool reserved)
|
||||
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
|
||||
{
|
||||
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
|
||||
"Cancelling I/O %d", req->tag);
|
||||
|
||||
nvme_req(req)->status = NVME_SC_ABORT_REQ;
|
||||
blk_mq_complete_request(req);
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_cancel_request);
|
||||
|
||||
|
|
|
@ -2386,7 +2386,7 @@ nvme_fc_complete_rq(struct request *rq)
|
|||
* status. The done path will return the io request back to the block
|
||||
* layer with an error status.
|
||||
*/
|
||||
static void
|
||||
static bool
|
||||
nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
|
||||
{
|
||||
struct nvme_ctrl *nctrl = data;
|
||||
|
@ -2394,6 +2394,7 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
|
|||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
|
||||
|
||||
__nvme_fc_abort_op(ctrl, op);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -408,7 +408,7 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
|
|||
}
|
||||
|
||||
void nvme_complete_rq(struct request *req);
|
||||
void nvme_cancel_request(struct request *req, void *data, bool reserved);
|
||||
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
|
||||
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
||||
enum nvme_ctrl_state new_state);
|
||||
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
|
||||
|
|
|
@ -129,9 +129,9 @@ typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
|
|||
typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
|
||||
unsigned int);
|
||||
|
||||
typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
|
||||
typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
|
||||
bool);
|
||||
typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
|
||||
typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
|
||||
typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
|
||||
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
|
||||
typedef bool (busy_fn)(struct request_queue *);
|
||||
|
|
Loading…
Reference in New Issue