block: cancel all throttled bios in del_gendisk()
Throttled bios can't be issued after del_gendisk() is done, thus it's better to cancel them immediately rather than waiting for throttle is done. For example, if user thread is throttled with low bps while it's issuing large io, and the device is deleted. The user thread will wait for a long time for io to return. Signed-off-by: Yu Kuai <yukuai3@huawei.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20220318130144.1066064-4-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
0a9a25ca78
commit
8f9e7b65f8
|
@ -874,7 +874,8 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
|
||||||
bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
|
bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
|
||||||
|
|
||||||
/* If tg->bps = -1, then BW is unlimited */
|
/* If tg->bps = -1, then BW is unlimited */
|
||||||
if (bps_limit == U64_MAX && iops_limit == UINT_MAX) {
|
if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
|
||||||
|
tg->flags & THROTL_TG_CANCELING) {
|
||||||
if (wait)
|
if (wait)
|
||||||
*wait = 0;
|
*wait = 0;
|
||||||
return true;
|
return true;
|
||||||
|
@ -1776,6 +1777,39 @@ static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void blk_throtl_cancel_bios(struct request_queue *q)
|
||||||
|
{
|
||||||
|
struct cgroup_subsys_state *pos_css;
|
||||||
|
struct blkcg_gq *blkg;
|
||||||
|
|
||||||
|
spin_lock_irq(&q->queue_lock);
|
||||||
|
/*
|
||||||
|
* queue_lock is held, rcu lock is not needed here technically.
|
||||||
|
* However, rcu lock is still held to emphasize that following
|
||||||
|
* path need RCU protection and to prevent warning from lockdep.
|
||||||
|
*/
|
||||||
|
rcu_read_lock();
|
||||||
|
blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
|
||||||
|
struct throtl_grp *tg = blkg_to_tg(blkg);
|
||||||
|
struct throtl_service_queue *sq = &tg->service_queue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set the flag to make sure throtl_pending_timer_fn() won't
|
||||||
|
* stop until all throttled bios are dispatched.
|
||||||
|
*/
|
||||||
|
blkg_to_tg(blkg)->flags |= THROTL_TG_CANCELING;
|
||||||
|
/*
|
||||||
|
* Update disptime after setting the above flag to make sure
|
||||||
|
* throtl_select_dispatch() won't exit without dispatching.
|
||||||
|
*/
|
||||||
|
tg_update_disptime(tg);
|
||||||
|
|
||||||
|
throtl_schedule_pending_timer(sq, jiffies + 1);
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
spin_unlock_irq(&q->queue_lock);
|
||||||
|
}
|
||||||
|
|
||||||
static bool throtl_can_upgrade(struct throtl_data *td,
|
static bool throtl_can_upgrade(struct throtl_data *td,
|
||||||
struct throtl_grp *this_tg)
|
struct throtl_grp *this_tg)
|
||||||
{
|
{
|
||||||
|
|
|
@ -56,6 +56,7 @@ enum tg_state_flags {
|
||||||
THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
|
THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
|
||||||
THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
|
THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
|
||||||
THROTL_TG_HAS_IOPS_LIMIT = 1 << 2, /* tg has iops limit */
|
THROTL_TG_HAS_IOPS_LIMIT = 1 << 2, /* tg has iops limit */
|
||||||
|
THROTL_TG_CANCELING = 1 << 3, /* starts to cancel bio */
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -162,11 +163,13 @@ static inline int blk_throtl_init(struct request_queue *q) { return 0; }
|
||||||
static inline void blk_throtl_exit(struct request_queue *q) { }
|
static inline void blk_throtl_exit(struct request_queue *q) { }
|
||||||
static inline void blk_throtl_register_queue(struct request_queue *q) { }
|
static inline void blk_throtl_register_queue(struct request_queue *q) { }
|
||||||
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
|
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
|
||||||
|
static inline void blk_throtl_cancel_bios(struct request_queue *q) { }
|
||||||
#else /* CONFIG_BLK_DEV_THROTTLING */
|
#else /* CONFIG_BLK_DEV_THROTTLING */
|
||||||
int blk_throtl_init(struct request_queue *q);
|
int blk_throtl_init(struct request_queue *q);
|
||||||
void blk_throtl_exit(struct request_queue *q);
|
void blk_throtl_exit(struct request_queue *q);
|
||||||
void blk_throtl_register_queue(struct request_queue *q);
|
void blk_throtl_register_queue(struct request_queue *q);
|
||||||
bool __blk_throtl_bio(struct bio *bio);
|
bool __blk_throtl_bio(struct bio *bio);
|
||||||
|
void blk_throtl_cancel_bios(struct request_queue *q);
|
||||||
static inline bool blk_throtl_bio(struct bio *bio)
|
static inline bool blk_throtl_bio(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
|
struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/badblocks.h>
|
#include <linux/badblocks.h>
|
||||||
#include <linux/part_stat.h>
|
#include <linux/part_stat.h>
|
||||||
|
#include "blk-throttle.h"
|
||||||
|
|
||||||
#include "blk.h"
|
#include "blk.h"
|
||||||
#include "blk-mq-sched.h"
|
#include "blk-mq-sched.h"
|
||||||
|
@ -627,6 +628,8 @@ void del_gendisk(struct gendisk *disk)
|
||||||
|
|
||||||
blk_mq_freeze_queue_wait(q);
|
blk_mq_freeze_queue_wait(q);
|
||||||
|
|
||||||
|
blk_throtl_cancel_bios(disk->queue);
|
||||||
|
|
||||||
blk_sync_queue(q);
|
blk_sync_queue(q);
|
||||||
blk_flush_integrity();
|
blk_flush_integrity();
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue