for-linus-20180825
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAluBbOMQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpqIBD/0e2/kyEE5UbIPeFK2bmrjDnbmo7Iq7Scb3 MQwYjYL1lQK2y0yu810Ews5acWiVxJtb0D5Hko/eBEiOE81qB9j+DpTB804ewYFR IQwB9pKYhUVuwULHQDyEQTlUUHZba/cgqeHtF+20AmLhdv8NqpL7NfQ4d0XScHti AAsxKcLJ5gWAafmLQB2ZVbjlmLib5rSTLdEjVSLAdKIsED9+g12/WwIzZsnK0s9Q ozGasr3jPegPBPwpR0pepBBlwInwvdsCFlmzIkL1B8Nn0EWdfLY1qsgS47xJynOf Wz/u9ROrHaKClfJuQ08ccqnl3JNUoHBKi06CGpZP+w1S8BmZpH6WAyMEkiLz/l0r 56US10YAC6hyq4Ectfa5aR3Ch/YnR+bVOsv6GVg4xqoXetr8sEHBM4i+iB05tsfL fAoHE7RPB8UwMdbJnnh+OTKsykKZG+IARurWjKBrM0j+zSNcEiRebs+5JI7tgCee MDRH+XOUzREbwcI5pggnz9QPavwf6j07clMfbbT6/C6CNjZJ4rAPwTvYEDnQVpos bXh2ifonOKVp+pq6LHW2vxBCmE6+Xh3Do+q5GBQ5png4BIM9A+hJsJT/ACqzjAip KYN0wdFPJu5eI2w8/X3fxgq2mrN7jUe/9x9HI+SnQ7cIkoQ7ZM7YcBrLtl6ignTN wH2uJIhQMQ== =KQSS -----END PGP SIGNATURE----- Merge tag 'for-linus-20180825' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "A few small fixes for this merge window: - Locking imbalance fix for bcache (Shan Hai) - A few small fixes for wbt. One is a cleanup/prep, one is a fix for an existing issue, and the last two are fixes for changes that went into this merge window (me)" * tag 'for-linus-20180825' of git://git.kernel.dk/linux-block: blk-wbt: don't maintain inflight counts if disabled blk-wbt: fix has-sleeper queueing check blk-wbt: use wq_has_sleeper() for wq active check blk-wbt: move disable check into get_limit() bcache: release dc->writeback_lock properly in bch_writeback_thread()
This commit is contained in:
commit
b8dcdab36f
|
@ -453,9 +453,26 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
|
|||
else if (val >= 0)
|
||||
val *= 1000ULL;
|
||||
|
||||
wbt_set_min_lat(q, val);
|
||||
/*
|
||||
* Ensure that the queue is idled, in case the latency update
|
||||
* ends up either enabling or disabling wbt completely. We can't
|
||||
* have IO inflight if that happens.
|
||||
*/
|
||||
if (q->mq_ops) {
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
} else
|
||||
blk_queue_bypass_start(q);
|
||||
|
||||
wbt_set_min_lat(q, val);
|
||||
wbt_update_limits(q);
|
||||
|
||||
if (q->mq_ops) {
|
||||
blk_mq_unquiesce_queue(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
} else
|
||||
blk_queue_bypass_end(q);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ static void rwb_wake_all(struct rq_wb *rwb)
|
|||
for (i = 0; i < WBT_NUM_RWQ; i++) {
|
||||
struct rq_wait *rqw = &rwb->rq_wait[i];
|
||||
|
||||
if (waitqueue_active(&rqw->wait))
|
||||
if (wq_has_sleeper(&rqw->wait))
|
||||
wake_up_all(&rqw->wait);
|
||||
}
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
|
|||
if (inflight && inflight >= limit)
|
||||
return;
|
||||
|
||||
if (waitqueue_active(&rqw->wait)) {
|
||||
if (wq_has_sleeper(&rqw->wait)) {
|
||||
int diff = limit - inflight;
|
||||
|
||||
if (!inflight || diff >= rwb->wb_background / 2)
|
||||
|
@ -449,6 +449,13 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
|
|||
{
|
||||
unsigned int limit;
|
||||
|
||||
/*
|
||||
* If we got disabled, just return UINT_MAX. This ensures that
|
||||
* we'll properly inc a new IO, and dec+wakeup at the end.
|
||||
*/
|
||||
if (!rwb_enabled(rwb))
|
||||
return UINT_MAX;
|
||||
|
||||
if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
|
||||
return rwb->wb_background;
|
||||
|
||||
|
@ -485,31 +492,17 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
|
|||
{
|
||||
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
bool has_sleeper;
|
||||
|
||||
/*
|
||||
* inc it here even if disabled, since we'll dec it at completion.
|
||||
* this only happens if the task was sleeping in __wbt_wait(),
|
||||
* and someone turned it off at the same time.
|
||||
*/
|
||||
if (!rwb_enabled(rwb)) {
|
||||
atomic_inc(&rqw->inflight);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!waitqueue_active(&rqw->wait)
|
||||
&& rq_wait_inc_below(rqw, get_limit(rwb, rw)))
|
||||
has_sleeper = wq_has_sleeper(&rqw->wait);
|
||||
if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
|
||||
return;
|
||||
|
||||
add_wait_queue_exclusive(&rqw->wait, &wait);
|
||||
do {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
|
||||
if (!rwb_enabled(rwb)) {
|
||||
atomic_inc(&rqw->inflight);
|
||||
break;
|
||||
}
|
||||
|
||||
if (rq_wait_inc_below(rqw, get_limit(rwb, rw)))
|
||||
if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
|
||||
break;
|
||||
|
||||
if (lock) {
|
||||
|
@ -518,6 +511,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
|
|||
spin_lock_irq(lock);
|
||||
} else
|
||||
io_schedule();
|
||||
has_sleeper = false;
|
||||
} while (1);
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
@ -546,6 +540,9 @@ static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
|
|||
{
|
||||
enum wbt_flags flags = 0;
|
||||
|
||||
if (!rwb_enabled(rwb))
|
||||
return 0;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_READ) {
|
||||
flags = WBT_READ;
|
||||
} else if (wbt_should_throttle(rwb, bio)) {
|
||||
|
|
|
@ -685,8 +685,10 @@ static int bch_writeback_thread(void *arg)
|
|||
* data on cache. BCACHE_DEV_DETACHING flag is set in
|
||||
* bch_cached_dev_detach().
|
||||
*/
|
||||
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
|
||||
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
|
||||
up_write(&dc->writeback_lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
up_write(&dc->writeback_lock);
|
||||
|
|
Loading…
Reference in New Issue