block: move blk_exit_queue into __blk_release_queue
Commit498f6650ae
("block: Fix a race between the cgroup code and request queue initialization") moves what blk_exit_queue does into blk_cleanup_queue() for fixing issue caused by changing back queue lock. However, after legacy request IO path is killed, driver queue lock won't be used at all, and there isn't story for changing back queue lock. Then the issue addressed by Commit498f6650ae
doesn't exist any more. So move move blk_exit_queue into __blk_release_queue. This patch basically reverts the following two commits:498f6650ae
block: Fix a race between the cgroup code and request queue initialization24ecc35853
block: Ensure that a request queue is dissociated from the cgroup controller Cc: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
31cb1d64da
commit
47cdee29ef
|
@ -282,35 +282,6 @@ void blk_set_queue_dying(struct request_queue *q)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
|
||||
|
||||
/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
|
||||
void blk_exit_queue(struct request_queue *q)
|
||||
{
|
||||
/*
|
||||
* Since the I/O scheduler exit code may access cgroup information,
|
||||
* perform I/O scheduler exit before disassociating from the block
|
||||
* cgroup controller.
|
||||
*/
|
||||
if (q->elevator) {
|
||||
ioc_clear_queue(q);
|
||||
elevator_exit(q, q->elevator);
|
||||
q->elevator = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove all references to @q from the block cgroup controller before
|
||||
* restoring @q->queue_lock to avoid that restoring this pointer causes
|
||||
* e.g. blkcg_print_blkgs() to crash.
|
||||
*/
|
||||
blkcg_exit_queue(q);
|
||||
|
||||
/*
|
||||
* Since the cgroup code may dereference the @q->backing_dev_info
|
||||
* pointer, only decrease its reference count after having removed the
|
||||
* association with the block cgroup controller.
|
||||
*/
|
||||
bdi_put(q->backing_dev_info);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_cleanup_queue - shutdown a request queue
|
||||
* @q: request queue to shutdown
|
||||
|
@ -346,14 +317,6 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
|
||||
blk_sync_queue(q);
|
||||
|
||||
/*
|
||||
* I/O scheduler exit is only safe after the sysfs scheduler attribute
|
||||
* has been removed.
|
||||
*/
|
||||
WARN_ON_ONCE(q->kobj.state_in_sysfs);
|
||||
|
||||
blk_exit_queue(q);
|
||||
|
||||
if (queue_is_mq(q))
|
||||
blk_mq_exit_queue(q);
|
||||
|
||||
|
|
|
@ -840,6 +840,36 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
|
|||
kmem_cache_free(blk_requestq_cachep, q);
|
||||
}
|
||||
|
||||
/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
|
||||
static void blk_exit_queue(struct request_queue *q)
|
||||
{
|
||||
/*
|
||||
* Since the I/O scheduler exit code may access cgroup information,
|
||||
* perform I/O scheduler exit before disassociating from the block
|
||||
* cgroup controller.
|
||||
*/
|
||||
if (q->elevator) {
|
||||
ioc_clear_queue(q);
|
||||
elevator_exit(q, q->elevator);
|
||||
q->elevator = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove all references to @q from the block cgroup controller before
|
||||
* restoring @q->queue_lock to avoid that restoring this pointer causes
|
||||
* e.g. blkcg_print_blkgs() to crash.
|
||||
*/
|
||||
blkcg_exit_queue(q);
|
||||
|
||||
/*
|
||||
* Since the cgroup code may dereference the @q->backing_dev_info
|
||||
* pointer, only decrease its reference count after having removed the
|
||||
* association with the block cgroup controller.
|
||||
*/
|
||||
bdi_put(q->backing_dev_info);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* __blk_release_queue - release a request queue
|
||||
* @work: pointer to the release_work member of the request queue to be released
|
||||
|
@ -860,23 +890,10 @@ static void __blk_release_queue(struct work_struct *work)
|
|||
blk_stat_remove_callback(q, q->poll_cb);
|
||||
blk_stat_free_callback(q->poll_cb);
|
||||
|
||||
if (!blk_queue_dead(q)) {
|
||||
/*
|
||||
* Last reference was dropped without having called
|
||||
* blk_cleanup_queue().
|
||||
*/
|
||||
WARN_ONCE(blk_queue_init_done(q),
|
||||
"request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n",
|
||||
q);
|
||||
blk_exit_queue(q);
|
||||
}
|
||||
|
||||
WARN(blk_queue_root_blkg(q),
|
||||
"request queue %p is being released but it has not yet been removed from the blkcg controller\n",
|
||||
q);
|
||||
|
||||
blk_free_queue_stats(q->stats);
|
||||
|
||||
blk_exit_queue(q);
|
||||
|
||||
blk_queue_free_zone_bitmaps(q);
|
||||
|
||||
if (queue_is_mq(q))
|
||||
|
|
|
@ -50,7 +50,6 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
|
|||
int node, int cmd_size, gfp_t flags);
|
||||
void blk_free_flush_queue(struct blk_flush_queue *q);
|
||||
|
||||
void blk_exit_queue(struct request_queue *q);
|
||||
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio);
|
||||
void blk_freeze_queue(struct request_queue *q);
|
||||
|
|
Loading…
Reference in New Issue