blk-throttle: make blk_throtl_drain() ready for hierarchy

The current blk_throtl_drain() assumes that all active throtl_grps are
queued on throtl_data->service_queue, which won't be true once
hierarchy support is implemented.

This patch makes blk_throtl_drain() perform post-order walk of the
blkg hierarchy draining each associated throtl_grp, which guarantees
that all bios will eventually be pushed to the top-level service_queue
in throtl_data.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
This commit is contained in:
Tejun Heo 2013-05-14 13:52:37 -07:00
parent 6e1a5704cb
commit 2a12f0dcda
1 changed files with 40 additions and 11 deletions

View File

@ -1299,6 +1299,28 @@ out:
return throttled; return throttled;
} }
/*
* Dispatch all bios from all children tg's queued on @parent_sq. On
* return, @parent_sq is guaranteed to not have any active children tg's
* and all bios from previously active tg's are on @parent_sq->bio_lists[].
*/
static void tg_drain_bios(struct throtl_service_queue *parent_sq)
{
struct throtl_grp *tg;
while ((tg = throtl_rb_first(parent_sq))) {
struct throtl_service_queue *sq = &tg->service_queue;
struct bio *bio;
throtl_dequeue_tg(tg);
while ((bio = bio_list_peek(&sq->bio_lists[READ])))
tg_dispatch_one_bio(tg, bio_data_dir(bio));
while ((bio = bio_list_peek(&sq->bio_lists[WRITE])))
tg_dispatch_one_bio(tg, bio_data_dir(bio));
}
}
/** /**
* blk_throtl_drain - drain throttled bios * blk_throtl_drain - drain throttled bios
* @q: request_queue to drain throttled bios for * @q: request_queue to drain throttled bios for
@ -1309,27 +1331,34 @@ void blk_throtl_drain(struct request_queue *q)
__releases(q->queue_lock) __acquires(q->queue_lock) __releases(q->queue_lock) __acquires(q->queue_lock)
{ {
struct throtl_data *td = q->td; struct throtl_data *td = q->td;
struct throtl_service_queue *parent_sq = &td->service_queue; struct blkcg_gq *blkg;
struct throtl_grp *tg; struct cgroup *pos_cgrp;
struct bio *bio; struct bio *bio;
int rw; int rw;
queue_lockdep_assert_held(q); queue_lockdep_assert_held(q);
rcu_read_lock();
while ((tg = throtl_rb_first(parent_sq))) { /*
struct throtl_service_queue *sq = &tg->service_queue; * Drain each tg while doing post-order walk on the blkg tree, so
* that all bios are propagated to td->service_queue. It'd be
* better to walk service_queue tree directly but blkg walk is
* easier.
*/
blkg_for_each_descendant_post(blkg, pos_cgrp, td->queue->root_blkg)
tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
throtl_dequeue_tg(tg); tg_drain_bios(&td_root_tg(td)->service_queue);
while ((bio = bio_list_peek(&sq->bio_lists[READ]))) /* finally, transfer bios from top-level tg's into the td */
tg_dispatch_one_bio(tg, bio_data_dir(bio)); tg_drain_bios(&td->service_queue);
while ((bio = bio_list_peek(&sq->bio_lists[WRITE])))
tg_dispatch_one_bio(tg, bio_data_dir(bio)); rcu_read_unlock();
}
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
/* all bios now should be in td->service_queue, issue them */
for (rw = READ; rw <= WRITE; rw++) for (rw = READ; rw <= WRITE; rw++)
while ((bio = bio_list_pop(&parent_sq->bio_lists[rw]))) while ((bio = bio_list_pop(&td->service_queue.bio_lists[rw])))
generic_make_request(bio); generic_make_request(bio);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);