blk-mq: remove blk_mq_wait_for_tags
The current logic for blocking tag allocation is rather confusing, as we first allocated and then free again a tag in blk_mq_wait_for_tags, just to attempt a non-blocking allocation and then repeat if someone else managed to grab the tag before us. Instead change blk_mq_alloc_request_pinned to simply do a blocking tag allocation itself and use the request we get back from it. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
5dee857720
commit
a3bd77567c
|
@ -7,14 +7,6 @@
|
||||||
#include "blk-mq.h"
|
#include "blk-mq.h"
|
||||||
#include "blk-mq-tag.h"
|
#include "blk-mq-tag.h"
|
||||||
|
|
||||||
void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved)
|
|
||||||
{
|
|
||||||
int tag, zero = 0;
|
|
||||||
|
|
||||||
tag = blk_mq_get_tag(hctx, &zero, __GFP_WAIT, reserved);
|
|
||||||
blk_mq_put_tag(hctx, tag, &zero);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
|
static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
|
@ -49,7 +49,6 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r
|
||||||
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
|
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
|
||||||
|
|
||||||
extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved);
|
extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved);
|
||||||
extern void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved);
|
|
||||||
extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
|
extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
|
||||||
extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
|
extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
|
||||||
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
|
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
|
||||||
|
|
|
@ -264,31 +264,30 @@ __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
|
static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
|
||||||
int rw, gfp_t gfp,
|
int rw, gfp_t gfp,
|
||||||
bool reserved)
|
bool reserved)
|
||||||
{
|
{
|
||||||
|
bool gfp_mask = gfp & ~__GFP_WAIT;
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
|
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
|
||||||
struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||||
|
|
||||||
rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT,
|
rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp_mask,
|
||||||
reserved);
|
reserved);
|
||||||
if (rq)
|
if (rq)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (gfp & __GFP_WAIT) {
|
if (!(gfp & __GFP_WAIT)) {
|
||||||
__blk_mq_run_hw_queue(hctx);
|
|
||||||
blk_mq_put_ctx(ctx);
|
|
||||||
} else {
|
|
||||||
blk_mq_put_ctx(ctx);
|
blk_mq_put_ctx(ctx);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_mq_wait_for_tags(hctx, reserved);
|
__blk_mq_run_hw_queue(hctx);
|
||||||
|
blk_mq_put_ctx(ctx);
|
||||||
|
gfp_mask = gfp;
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
return rq;
|
return rq;
|
||||||
|
|
Loading…
Reference in New Issue