From 3943b040f11ed0cc6d4585fd286a623ca8634547 Mon Sep 17 00:00:00 2001 From: Shan Hai Date: Thu, 23 Aug 2018 02:02:56 +0800 Subject: [PATCH 1/5] bcache: release dc->writeback_lock properly in bch_writeback_thread() The writeback thread would exit with a lock held when the cache device is detached via sysfs interface, fix it by releasing the held lock before exiting the while-loop. Fixes: fadd94e05c02 (bcache: quit dc->writeback_thread when BCACHE_DEV_DETACHING is set) Signed-off-by: Shan Hai Signed-off-by: Coly Li Tested-by: Shenghui Wang Cc: stable@vger.kernel.org #4.17+ Signed-off-by: Jens Axboe --- drivers/md/bcache/writeback.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 6be05bd7ca67..08c3a9f9676c 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -685,8 +685,10 @@ static int bch_writeback_thread(void *arg) * data on cache. BCACHE_DEV_DETACHING flag is set in * bch_cached_dev_detach(). */ - if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) { + up_write(&dc->writeback_lock); break; + } } up_write(&dc->writeback_lock); From ffa358dcaae1f2f00926484e712e06daa8953cb4 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 20 Aug 2018 13:24:25 -0600 Subject: [PATCH 2/5] blk-wbt: move disable check into get_limit() Check it in one place, instead of in multiple places. Tested-by: Anchal Agarwal Signed-off-by: Jens Axboe --- block/blk-wbt.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index bb93c7c2b182..1dd7edce7a99 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -449,6 +449,13 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) { unsigned int limit; + /* + * If we got disabled, just return UINT_MAX. This ensures that + * we'll properly inc a new IO, and dec+wakeup at the end. + */ + if (!rwb_enabled(rwb)) + return UINT_MAX; + if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD) return rwb->wb_background; @@ -486,16 +493,6 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); DECLARE_WAITQUEUE(wait, current); - /* - * inc it here even if disabled, since we'll dec it at completion. - * this only happens if the task was sleeping in __wbt_wait(), - * and someone turned it off at the same time. - */ - if (!rwb_enabled(rwb)) { - atomic_inc(&rqw->inflight); - return; - } - if (!waitqueue_active(&rqw->wait) && rq_wait_inc_below(rqw, get_limit(rwb, rw))) return; @@ -504,11 +501,6 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, do { set_current_state(TASK_UNINTERRUPTIBLE); - if (!rwb_enabled(rwb)) { - atomic_inc(&rqw->inflight); - break; - } - if (rq_wait_inc_below(rqw, get_limit(rwb, rw))) break; From b78820937b4762b7d30b807d7156bec1d89e4dd3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 20 Aug 2018 13:20:50 -0600 Subject: [PATCH 3/5] blk-wbt: use wq_has_sleeper() for wq active check We need the memory barrier before checking the list head, use the appropriate helper for this. The matching queue side memory barrier is provided by set_current_state(). Tested-by: Anchal Agarwal Signed-off-by: Jens Axboe --- block/blk-wbt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 1dd7edce7a99..853e22492f4e 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -118,7 +118,7 @@ static void rwb_wake_all(struct rq_wb *rwb) for (i = 0; i < WBT_NUM_RWQ; i++) { struct rq_wait *rqw = &rwb->rq_wait[i]; - if (waitqueue_active(&rqw->wait)) + if (wq_has_sleeper(&rqw->wait)) wake_up_all(&rqw->wait); } } @@ -162,7 +162,7 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) if (inflight && inflight >= limit) return; - if (waitqueue_active(&rqw->wait)) { + if (wq_has_sleeper(&rqw->wait)) { int diff = limit - inflight; if (!inflight || diff >= rwb->wb_background / 2) @@ -493,8 +493,8 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); DECLARE_WAITQUEUE(wait, current); - if (!waitqueue_active(&rqw->wait) - && rq_wait_inc_below(rqw, get_limit(rwb, rw))) + if (!wq_has_sleeper(&rqw->wait) && + rq_wait_inc_below(rqw, get_limit(rwb, rw))) return; add_wait_queue_exclusive(&rqw->wait, &wait); From c45e6a037a536530bd25781ac7c989e52deb2a63 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 20 Aug 2018 13:22:27 -0600 Subject: [PATCH 4/5] blk-wbt: fix has-sleeper queueing check We need to do this inside the loop as well, or we can allow new IO to supersede previous IO. Tested-by: Anchal Agarwal Signed-off-by: Jens Axboe --- block/blk-wbt.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 853e22492f4e..c9358f1981fb 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -492,16 +492,17 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, { struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); DECLARE_WAITQUEUE(wait, current); + bool has_sleeper; - if (!wq_has_sleeper(&rqw->wait) && - rq_wait_inc_below(rqw, get_limit(rwb, rw))) + has_sleeper = wq_has_sleeper(&rqw->wait); + if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) return; add_wait_queue_exclusive(&rqw->wait, &wait); do { set_current_state(TASK_UNINTERRUPTIBLE); - if (rq_wait_inc_below(rqw, get_limit(rwb, rw))) + if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) break; if (lock) { @@ -510,6 +511,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, spin_lock_irq(lock); } else io_schedule(); + has_sleeper = false; } while (1); __set_current_state(TASK_RUNNING); From c125311d96b1bfcce0f5930a4f0fdfe39ea14f7c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 23 Aug 2018 09:34:46 -0600 Subject: [PATCH 5/5] blk-wbt: don't maintain inflight counts if disabled A previous commit removed the ability to have per-rq flags. We used those flags to maintain inflight counts. Since we don't have those anymore, we have to always maintain inflight counts, even if wbt is disabled. This is clearly suboptimal. Add a queue quiesce around changing the wbt latency settings from sysfs to work around this. With that, we can reliably put the enabled check in our bio_to_wbt_flags(), since we know the WBT_TRACKED flag will be consistent for the lifetime of the request. Fixes: c1c80384c8f ("block: remove external dependency on wbt_flags") Reviewed-by: Josef Bacik Signed-off-by: Jens Axboe --- block/blk-sysfs.c | 19 ++++++++++++++++++- block/blk-wbt.c | 3 +++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index bb109bb0a055..3772671cf2bc 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -453,9 +453,26 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, else if (val >= 0) val *= 1000ULL; - wbt_set_min_lat(q, val); + /* + * Ensure that the queue is idled, in case the latency update + * ends up either enabling or disabling wbt completely. We can't + * have IO inflight if that happens. + */ + if (q->mq_ops) { + blk_mq_freeze_queue(q); + blk_mq_quiesce_queue(q); + } else + blk_queue_bypass_start(q); + wbt_set_min_lat(q, val); wbt_update_limits(q); + + if (q->mq_ops) { + blk_mq_unquiesce_queue(q); + blk_mq_unfreeze_queue(q); + } else + blk_queue_bypass_end(q); + return count; } diff --git a/block/blk-wbt.c b/block/blk-wbt.c index c9358f1981fb..84507d3e9a98 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -540,6 +540,9 @@ static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio) { enum wbt_flags flags = 0; + if (!rwb_enabled(rwb)) + return 0; + if (bio_op(bio) == REQ_OP_READ) { flags = WBT_READ; } else if (wbt_should_throttle(rwb, bio)) {