block: add accessors for setting/querying request deadline
We reduce the resolution of request expiry, but since we're already using jiffies for this where resolution depends on the kernel configuration and since the timeout resolution is coarse anyway, that should be fine. Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
76a86f9d02
commit
0a72e7f449
|
@ -858,7 +858,7 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
||||||
while (true) {
|
while (true) {
|
||||||
start = read_seqcount_begin(&rq->gstate_seq);
|
start = read_seqcount_begin(&rq->gstate_seq);
|
||||||
gstate = READ_ONCE(rq->gstate);
|
gstate = READ_ONCE(rq->gstate);
|
||||||
deadline = rq->deadline;
|
deadline = blk_rq_deadline(rq);
|
||||||
if (!read_seqcount_retry(&rq->gstate_seq, start))
|
if (!read_seqcount_retry(&rq->gstate_seq, start))
|
||||||
break;
|
break;
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
|
@ -112,7 +112,9 @@ static void blk_rq_timed_out(struct request *req)
|
||||||
static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
|
static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
|
||||||
unsigned int *next_set)
|
unsigned int *next_set)
|
||||||
{
|
{
|
||||||
if (time_after_eq(jiffies, rq->deadline)) {
|
const unsigned long deadline = blk_rq_deadline(rq);
|
||||||
|
|
||||||
|
if (time_after_eq(jiffies, deadline)) {
|
||||||
list_del_init(&rq->timeout_list);
|
list_del_init(&rq->timeout_list);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -120,8 +122,8 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout
|
||||||
*/
|
*/
|
||||||
if (!blk_mark_rq_complete(rq))
|
if (!blk_mark_rq_complete(rq))
|
||||||
blk_rq_timed_out(rq);
|
blk_rq_timed_out(rq);
|
||||||
} else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
|
} else if (!*next_set || time_after(*next_timeout, deadline)) {
|
||||||
*next_timeout = rq->deadline;
|
*next_timeout = deadline;
|
||||||
*next_set = 1;
|
*next_set = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -162,7 +164,7 @@ void blk_abort_request(struct request *req)
|
||||||
* immediately and that scan sees the new timeout value.
|
* immediately and that scan sees the new timeout value.
|
||||||
* No need for fancy synchronizations.
|
* No need for fancy synchronizations.
|
||||||
*/
|
*/
|
||||||
req->deadline = jiffies;
|
blk_rq_set_deadline(req, jiffies);
|
||||||
mod_timer(&req->q->timeout, 0);
|
mod_timer(&req->q->timeout, 0);
|
||||||
} else {
|
} else {
|
||||||
if (blk_mark_rq_complete(req))
|
if (blk_mark_rq_complete(req))
|
||||||
|
@ -213,7 +215,7 @@ void blk_add_timer(struct request *req)
|
||||||
if (!req->timeout)
|
if (!req->timeout)
|
||||||
req->timeout = q->rq_timeout;
|
req->timeout = q->rq_timeout;
|
||||||
|
|
||||||
req->deadline = jiffies + req->timeout;
|
blk_rq_set_deadline(req, jiffies + req->timeout);
|
||||||
req->rq_flags &= ~RQF_MQ_TIMEOUT_EXPIRED;
|
req->rq_flags &= ~RQF_MQ_TIMEOUT_EXPIRED;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -228,7 +230,7 @@ void blk_add_timer(struct request *req)
|
||||||
* than an existing one, modify the timer. Round up to next nearest
|
* than an existing one, modify the timer. Round up to next nearest
|
||||||
* second.
|
* second.
|
||||||
*/
|
*/
|
||||||
expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
|
expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req)));
|
||||||
|
|
||||||
if (!timer_pending(&q->timeout) ||
|
if (!timer_pending(&q->timeout) ||
|
||||||
time_before(expiry, q->timeout.expires)) {
|
time_before(expiry, q->timeout.expires)) {
|
||||||
|
|
15
block/blk.h
15
block/blk.h
|
@ -236,6 +236,21 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
|
||||||
q->last_merge = NULL;
|
q->last_merge = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Steal a bit from this field for legacy IO path atomic IO marking. Note that
|
||||||
|
* setting the deadline clears the bottom bit, potentially clearing the
|
||||||
|
* completed bit. The user has to be OK with this (current ones are fine).
|
||||||
|
*/
|
||||||
|
static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
|
||||||
|
{
|
||||||
|
rq->__deadline = time & ~0x1UL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long blk_rq_deadline(struct request *rq)
|
||||||
|
{
|
||||||
|
return rq->__deadline & ~0x1UL;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal io_context interface
|
* Internal io_context interface
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -257,7 +257,9 @@ struct request {
|
||||||
struct u64_stats_sync aborted_gstate_sync;
|
struct u64_stats_sync aborted_gstate_sync;
|
||||||
u64 aborted_gstate;
|
u64 aborted_gstate;
|
||||||
|
|
||||||
unsigned long deadline;
|
/* access through blk_rq_set_deadline, blk_rq_deadline */
|
||||||
|
unsigned long __deadline;
|
||||||
|
|
||||||
struct list_head timeout_list;
|
struct list_head timeout_list;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue