diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index bb109bb0a055..3772671cf2bc 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -453,9 +453,26 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, else if (val >= 0) val *= 1000ULL; - wbt_set_min_lat(q, val); + /* + * Ensure that the queue is idled, in case the latency update + * ends up either enabling or disabling wbt completely. We can't + * have IO inflight if that happens. + */ + if (q->mq_ops) { + blk_mq_freeze_queue(q); + blk_mq_quiesce_queue(q); + } else + blk_queue_bypass_start(q); + wbt_set_min_lat(q, val); wbt_update_limits(q); + + if (q->mq_ops) { + blk_mq_unquiesce_queue(q); + blk_mq_unfreeze_queue(q); + } else + blk_queue_bypass_end(q); + return count; } diff --git a/block/blk-wbt.c b/block/blk-wbt.c index bb93c7c2b182..84507d3e9a98 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -118,7 +118,7 @@ static void rwb_wake_all(struct rq_wb *rwb) for (i = 0; i < WBT_NUM_RWQ; i++) { struct rq_wait *rqw = &rwb->rq_wait[i]; - if (waitqueue_active(&rqw->wait)) + if (wq_has_sleeper(&rqw->wait)) wake_up_all(&rqw->wait); } } @@ -162,7 +162,7 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) if (inflight && inflight >= limit) return; - if (waitqueue_active(&rqw->wait)) { + if (wq_has_sleeper(&rqw->wait)) { int diff = limit - inflight; if (!inflight || diff >= rwb->wb_background / 2) @@ -449,6 +449,13 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) { unsigned int limit; + /* + * If we got disabled, just return UINT_MAX. This ensures that + * we'll properly inc a new IO, and dec+wakeup at the end. + */ + if (!rwb_enabled(rwb)) + return UINT_MAX; + if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD) return rwb->wb_background; @@ -485,31 +492,17 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, { struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); DECLARE_WAITQUEUE(wait, current); + bool has_sleeper; - /* - * inc it here even if disabled, since we'll dec it at completion. - * this only happens if the task was sleeping in __wbt_wait(), - * and someone turned it off at the same time. - */ - if (!rwb_enabled(rwb)) { - atomic_inc(&rqw->inflight); - return; - } - - if (!waitqueue_active(&rqw->wait) - && rq_wait_inc_below(rqw, get_limit(rwb, rw))) + has_sleeper = wq_has_sleeper(&rqw->wait); + if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) return; add_wait_queue_exclusive(&rqw->wait, &wait); do { set_current_state(TASK_UNINTERRUPTIBLE); - if (!rwb_enabled(rwb)) { - atomic_inc(&rqw->inflight); - break; - } - - if (rq_wait_inc_below(rqw, get_limit(rwb, rw))) + if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) break; if (lock) { @@ -518,6 +511,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, spin_lock_irq(lock); } else io_schedule(); + has_sleeper = false; } while (1); __set_current_state(TASK_RUNNING); @@ -546,6 +540,9 @@ static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio) { enum wbt_flags flags = 0; + if (!rwb_enabled(rwb)) + return 0; + if (bio_op(bio) == REQ_OP_READ) { flags = WBT_READ; } else if (wbt_should_throttle(rwb, bio)) { diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 6be05bd7ca67..08c3a9f9676c 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -685,8 +685,10 @@ static int bch_writeback_thread(void *arg) * data on cache. BCACHE_DEV_DETACHING flag is set in * bch_cached_dev_detach(). */ - if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) { + up_write(&dc->writeback_lock); break; + } } up_write(&dc->writeback_lock);