dm: convey that all flushes are processed as empty

Rename __clone_and_map_flush to __clone_and_map_empty_flush for added
clarity.

Simplify logic associated with REQ_FLUSH conditionals.

Introduce a BUG_ON() and add a few more helpful comments to the code
so that it is clear that all flushes are empty.

Cleanup __split_and_process_bio() so that an empty flush isn't processed
by a 'sector_count' focused while loop.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
Mike Snitzer 2010-09-08 18:07:01 +02:00 committed by Jens Axboe
parent 05447420f9
commit b372d360df
1 changed files with 15 additions and 19 deletions

View File

@ -621,16 +621,17 @@ static void dec_pending(struct dm_io *io, int error)
if (io_error == DM_ENDIO_REQUEUE) if (io_error == DM_ENDIO_REQUEUE)
return; return;
if (!(bio->bi_rw & REQ_FLUSH) || !bio->bi_size) { if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
trace_block_bio_complete(md->queue, bio);
bio_endio(bio, io_error);
} else {
/* /*
* Preflush done for flush with data, reissue * Preflush done for flush with data, reissue
* without REQ_FLUSH. * without REQ_FLUSH.
*/ */
bio->bi_rw &= ~REQ_FLUSH; bio->bi_rw &= ~REQ_FLUSH;
queue_io(md, bio); queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
trace_block_bio_complete(md->queue, bio);
bio_endio(bio, io_error);
} }
} }
} }
@ -1132,16 +1133,15 @@ static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
__issue_target_request(ci, ti, request_nr, len); __issue_target_request(ci, ti, request_nr, len);
} }
static int __clone_and_map_flush(struct clone_info *ci) static int __clone_and_map_empty_flush(struct clone_info *ci)
{ {
unsigned target_nr = 0; unsigned target_nr = 0;
struct dm_target *ti; struct dm_target *ti;
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++))) while ((ti = dm_table_get_target(ci->map, target_nr++)))
__issue_target_requests(ci, ti, ti->num_flush_requests, 0); __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
ci->sector_count = 0;
return 0; return 0;
} }
@ -1282,7 +1282,6 @@ static int __clone_and_map(struct clone_info *ci)
*/ */
static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
{ {
bool is_flush = bio->bi_rw & REQ_FLUSH;
struct clone_info ci; struct clone_info ci;
int error = 0; int error = 0;
@ -1302,20 +1301,17 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
ci.sector = bio->bi_sector; ci.sector = bio->bi_sector;
ci.idx = bio->bi_idx; ci.idx = bio->bi_idx;
if (!is_flush) { start_io_acct(ci.io);
if (bio->bi_rw & REQ_FLUSH) {
ci.bio = &ci.md->flush_bio;
ci.sector_count = 0;
error = __clone_and_map_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
} else {
ci.bio = bio; ci.bio = bio;
ci.sector_count = bio_sectors(bio); ci.sector_count = bio_sectors(bio);
} else { while (ci.sector_count && !error)
ci.bio = &ci.md->flush_bio;
ci.sector_count = 1;
}
start_io_acct(ci.io);
while (ci.sector_count && !error) {
if (!is_flush)
error = __clone_and_map(&ci); error = __clone_and_map(&ci);
else
error = __clone_and_map_flush(&ci);
} }
/* drop the extra reference count */ /* drop the extra reference count */