diff --git a/block/blk-core.c b/block/blk-core.c index dc1fa454ae30..4bfc0d504b2d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -850,6 +850,12 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return 0; + /* + * As the requests that require a zone lock are not plugged in the + * first place, directly accessing the plug instead of using + * blk_mq_plug() should not have any consequences during flushing for + * zoned devices. + */ blk_flush_plug(current->plug, false); if (bio_queue_enter(bio)) diff --git a/block/blk-mq.c b/block/blk-mq.c index b2b318df4f69..9dd3ec42613f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1215,6 +1215,12 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head) WARN_ON(!blk_rq_is_passthrough(rq)); blk_account_io_start(rq); + + /* + * As plugging can be enabled for passthrough requests on a zoned + * device, directly accessing the plug instead of using blk_mq_plug() + * should not have any consequences. + */ if (current->plug) blk_add_rq_to_plug(current->plug, rq); else