blk-mq: blk_mq_tag_to_rq should handle flush request
flush request is special, which borrows the tag from the parent request. Hence blk_mq_tag_to_rq needs special handling to return the flush request from the tag. Signed-off-by: Shaohua Li <shli@fusionio.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
da52f22fa9
commit
2230237500
|
@ -223,8 +223,10 @@ static void flush_end_io(struct request *flush_rq, int error)
|
||||||
struct request *rq, *n;
|
struct request *rq, *n;
|
||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
|
|
||||||
if (q->mq_ops)
|
if (q->mq_ops) {
|
||||||
spin_lock_irqsave(&q->mq_flush_lock, flags);
|
spin_lock_irqsave(&q->mq_flush_lock, flags);
|
||||||
|
q->flush_rq->cmd_flags = 0;
|
||||||
|
}
|
||||||
|
|
||||||
running = &q->flush_queue[q->flush_running_idx];
|
running = &q->flush_queue[q->flush_running_idx];
|
||||||
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
|
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
|
||||||
|
|
|
@ -541,9 +541,15 @@ void blk_mq_kick_requeue_list(struct request_queue *q)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
|
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
|
||||||
|
|
||||||
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
|
struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag)
|
||||||
{
|
{
|
||||||
return tags->rqs[tag];
|
struct request_queue *q = hctx->queue;
|
||||||
|
|
||||||
|
if ((q->flush_rq->cmd_flags & REQ_FLUSH_SEQ) &&
|
||||||
|
q->flush_rq->tag == tag)
|
||||||
|
return q->flush_rq;
|
||||||
|
|
||||||
|
return hctx->tags->rqs[tag];
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_tag_to_rq);
|
EXPORT_SYMBOL(blk_mq_tag_to_rq);
|
||||||
|
|
||||||
|
@ -572,7 +578,7 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
|
||||||
if (tag >= hctx->tags->nr_tags)
|
if (tag >= hctx->tags->nr_tags)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
rq = blk_mq_tag_to_rq(hctx->tags, tag++);
|
rq = blk_mq_tag_to_rq(hctx, tag++);
|
||||||
if (rq->q != hctx->queue)
|
if (rq->q != hctx->queue)
|
||||||
continue;
|
continue;
|
||||||
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
||||||
|
|
|
@ -154,7 +154,7 @@ void blk_mq_free_request(struct request *rq);
|
||||||
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
|
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
|
||||||
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
|
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
|
||||||
gfp_t gfp, bool reserved);
|
gfp_t gfp, bool reserved);
|
||||||
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
|
struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag);
|
||||||
|
|
||||||
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
|
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
|
||||||
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
|
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
|
||||||
|
|
Loading…
Reference in New Issue