dm writecache: split up writecache_map() to improve code readability
writecache_map() has grown too large and can be confusing to read given all the goto statements. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
99d26de2f6
commit
cdd4d7832d
|
@ -1293,10 +1293,164 @@ static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
|
||||||
bio_list_add(&wc->flush_list, bio);
|
bio_list_add(&wc->flush_list, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int writecache_map(struct dm_target *ti, struct bio *bio)
|
enum wc_map_op {
|
||||||
|
WC_MAP_SUBMIT,
|
||||||
|
WC_MAP_REMAP,
|
||||||
|
WC_MAP_REMAP_ORIGIN,
|
||||||
|
WC_MAP_RETURN,
|
||||||
|
WC_MAP_ERROR,
|
||||||
|
};
|
||||||
|
|
||||||
|
static enum wc_map_op writecache_map_read(struct dm_writecache *wc, struct bio *bio)
|
||||||
|
{
|
||||||
|
enum wc_map_op map_op;
|
||||||
|
struct wc_entry *e;
|
||||||
|
|
||||||
|
read_next_block:
|
||||||
|
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
|
||||||
|
if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
|
||||||
|
if (WC_MODE_PMEM(wc)) {
|
||||||
|
bio_copy_block(wc, bio, memory_data(wc, e));
|
||||||
|
if (bio->bi_iter.bi_size)
|
||||||
|
goto read_next_block;
|
||||||
|
map_op = WC_MAP_SUBMIT;
|
||||||
|
} else {
|
||||||
|
dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
|
||||||
|
bio_set_dev(bio, wc->ssd_dev->bdev);
|
||||||
|
bio->bi_iter.bi_sector = cache_sector(wc, e);
|
||||||
|
if (!writecache_entry_is_committed(wc, e))
|
||||||
|
writecache_wait_for_ios(wc, WRITE);
|
||||||
|
map_op = WC_MAP_REMAP;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (e) {
|
||||||
|
sector_t next_boundary =
|
||||||
|
read_original_sector(wc, e) - bio->bi_iter.bi_sector;
|
||||||
|
if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT)
|
||||||
|
dm_accept_partial_bio(bio, next_boundary);
|
||||||
|
}
|
||||||
|
map_op = WC_MAP_REMAP_ORIGIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
return map_op;
|
||||||
|
}
|
||||||
|
|
||||||
|
static enum wc_map_op writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
|
||||||
|
struct wc_entry *e, bool search_used)
|
||||||
|
{
|
||||||
|
unsigned bio_size = wc->block_size;
|
||||||
|
sector_t start_cache_sec = cache_sector(wc, e);
|
||||||
|
sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
|
||||||
|
|
||||||
|
while (bio_size < bio->bi_iter.bi_size) {
|
||||||
|
if (!search_used) {
|
||||||
|
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
|
||||||
|
if (!f)
|
||||||
|
break;
|
||||||
|
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
|
||||||
|
(bio_size >> SECTOR_SHIFT), wc->seq_count);
|
||||||
|
writecache_insert_entry(wc, f);
|
||||||
|
wc->uncommitted_blocks++;
|
||||||
|
} else {
|
||||||
|
struct wc_entry *f;
|
||||||
|
struct rb_node *next = rb_next(&e->rb_node);
|
||||||
|
if (!next)
|
||||||
|
break;
|
||||||
|
f = container_of(next, struct wc_entry, rb_node);
|
||||||
|
if (f != e + 1)
|
||||||
|
break;
|
||||||
|
if (read_original_sector(wc, f) !=
|
||||||
|
read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
|
||||||
|
break;
|
||||||
|
if (unlikely(f->write_in_progress))
|
||||||
|
break;
|
||||||
|
if (writecache_entry_is_committed(wc, f))
|
||||||
|
wc->overwrote_committed = true;
|
||||||
|
e = f;
|
||||||
|
}
|
||||||
|
bio_size += wc->block_size;
|
||||||
|
current_cache_sec += wc->block_size >> SECTOR_SHIFT;
|
||||||
|
}
|
||||||
|
|
||||||
|
bio_set_dev(bio, wc->ssd_dev->bdev);
|
||||||
|
bio->bi_iter.bi_sector = start_cache_sec;
|
||||||
|
dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
|
||||||
|
|
||||||
|
if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
|
||||||
|
wc->uncommitted_blocks = 0;
|
||||||
|
queue_work(wc->writeback_wq, &wc->flush_work);
|
||||||
|
} else {
|
||||||
|
writecache_schedule_autocommit(wc);
|
||||||
|
}
|
||||||
|
|
||||||
|
return WC_MAP_REMAP;
|
||||||
|
}
|
||||||
|
|
||||||
|
static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio *bio)
|
||||||
{
|
{
|
||||||
struct wc_entry *e;
|
struct wc_entry *e;
|
||||||
|
|
||||||
|
do {
|
||||||
|
bool found_entry = false;
|
||||||
|
bool search_used = false;
|
||||||
|
if (writecache_has_error(wc))
|
||||||
|
return WC_MAP_ERROR;
|
||||||
|
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
|
||||||
|
if (e) {
|
||||||
|
if (!writecache_entry_is_committed(wc, e)) {
|
||||||
|
search_used = true;
|
||||||
|
goto bio_copy;
|
||||||
|
}
|
||||||
|
if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
|
||||||
|
wc->overwrote_committed = true;
|
||||||
|
search_used = true;
|
||||||
|
goto bio_copy;
|
||||||
|
}
|
||||||
|
found_entry = true;
|
||||||
|
} else {
|
||||||
|
if (unlikely(wc->cleaner) ||
|
||||||
|
(wc->metadata_only && !(bio->bi_opf & REQ_META)))
|
||||||
|
goto direct_write;
|
||||||
|
}
|
||||||
|
e = writecache_pop_from_freelist(wc, (sector_t)-1);
|
||||||
|
if (unlikely(!e)) {
|
||||||
|
if (!WC_MODE_PMEM(wc) && !found_entry) {
|
||||||
|
direct_write:
|
||||||
|
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
|
||||||
|
if (e) {
|
||||||
|
sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector;
|
||||||
|
BUG_ON(!next_boundary);
|
||||||
|
if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
|
||||||
|
dm_accept_partial_bio(bio, next_boundary);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return WC_MAP_REMAP_ORIGIN;
|
||||||
|
}
|
||||||
|
writecache_wait_on_freelist(wc);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
|
||||||
|
writecache_insert_entry(wc, e);
|
||||||
|
wc->uncommitted_blocks++;
|
||||||
|
bio_copy:
|
||||||
|
if (WC_MODE_PMEM(wc))
|
||||||
|
bio_copy_block(wc, bio, memory_data(wc, e));
|
||||||
|
else
|
||||||
|
return writecache_bio_copy_ssd(wc, bio, e, search_used);
|
||||||
|
} while (bio->bi_iter.bi_size);
|
||||||
|
|
||||||
|
if (unlikely(bio->bi_opf & REQ_FUA || wc->uncommitted_blocks >= wc->autocommit_blocks))
|
||||||
|
writecache_flush(wc);
|
||||||
|
else
|
||||||
|
writecache_schedule_autocommit(wc);
|
||||||
|
|
||||||
|
return WC_MAP_SUBMIT;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int writecache_map(struct dm_target *ti, struct bio *bio)
|
||||||
|
{
|
||||||
struct dm_writecache *wc = ti->private;
|
struct dm_writecache *wc = ti->private;
|
||||||
|
enum wc_map_op map_op = WC_MAP_ERROR;
|
||||||
|
|
||||||
bio->bi_private = NULL;
|
bio->bi_private = NULL;
|
||||||
|
|
||||||
|
@ -1342,167 +1496,49 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bio_data_dir(bio) == READ) {
|
if (bio_data_dir(bio) == READ)
|
||||||
read_next_block:
|
map_op = writecache_map_read(wc, bio);
|
||||||
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
|
else
|
||||||
if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
|
map_op = writecache_map_write(wc, bio);
|
||||||
if (WC_MODE_PMEM(wc)) {
|
|
||||||
bio_copy_block(wc, bio, memory_data(wc, e));
|
|
||||||
if (bio->bi_iter.bi_size)
|
|
||||||
goto read_next_block;
|
|
||||||
goto unlock_submit;
|
|
||||||
} else {
|
|
||||||
dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
|
|
||||||
bio_set_dev(bio, wc->ssd_dev->bdev);
|
|
||||||
bio->bi_iter.bi_sector = cache_sector(wc, e);
|
|
||||||
if (!writecache_entry_is_committed(wc, e))
|
|
||||||
writecache_wait_for_ios(wc, WRITE);
|
|
||||||
goto unlock_remap;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (e) {
|
|
||||||
sector_t next_boundary =
|
|
||||||
read_original_sector(wc, e) - bio->bi_iter.bi_sector;
|
|
||||||
if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
|
|
||||||
dm_accept_partial_bio(bio, next_boundary);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
goto unlock_remap_origin;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
do {
|
|
||||||
bool found_entry = false;
|
|
||||||
bool search_used = false;
|
|
||||||
if (writecache_has_error(wc))
|
|
||||||
goto unlock_error;
|
|
||||||
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
|
|
||||||
if (e) {
|
|
||||||
if (!writecache_entry_is_committed(wc, e)) {
|
|
||||||
search_used = true;
|
|
||||||
goto bio_copy;
|
|
||||||
}
|
|
||||||
if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
|
|
||||||
wc->overwrote_committed = true;
|
|
||||||
search_used = true;
|
|
||||||
goto bio_copy;
|
|
||||||
}
|
|
||||||
found_entry = true;
|
|
||||||
} else {
|
|
||||||
if (unlikely(wc->cleaner) ||
|
|
||||||
(wc->metadata_only && !(bio->bi_opf & REQ_META)))
|
|
||||||
goto direct_write;
|
|
||||||
}
|
|
||||||
e = writecache_pop_from_freelist(wc, (sector_t)-1);
|
|
||||||
if (unlikely(!e)) {
|
|
||||||
if (!WC_MODE_PMEM(wc) && !found_entry) {
|
|
||||||
direct_write:
|
|
||||||
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
|
|
||||||
if (e) {
|
|
||||||
sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector;
|
|
||||||
BUG_ON(!next_boundary);
|
|
||||||
if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
|
|
||||||
dm_accept_partial_bio(bio, next_boundary);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
goto unlock_remap_origin;
|
|
||||||
}
|
|
||||||
writecache_wait_on_freelist(wc);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
|
|
||||||
writecache_insert_entry(wc, e);
|
|
||||||
wc->uncommitted_blocks++;
|
|
||||||
bio_copy:
|
|
||||||
if (WC_MODE_PMEM(wc)) {
|
|
||||||
bio_copy_block(wc, bio, memory_data(wc, e));
|
|
||||||
} else {
|
|
||||||
unsigned bio_size = wc->block_size;
|
|
||||||
sector_t start_cache_sec = cache_sector(wc, e);
|
|
||||||
sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
|
|
||||||
|
|
||||||
while (bio_size < bio->bi_iter.bi_size) {
|
|
||||||
if (!search_used) {
|
|
||||||
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
|
|
||||||
if (!f)
|
|
||||||
break;
|
|
||||||
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
|
|
||||||
(bio_size >> SECTOR_SHIFT), wc->seq_count);
|
|
||||||
writecache_insert_entry(wc, f);
|
|
||||||
wc->uncommitted_blocks++;
|
|
||||||
} else {
|
|
||||||
struct wc_entry *f;
|
|
||||||
struct rb_node *next = rb_next(&e->rb_node);
|
|
||||||
if (!next)
|
|
||||||
break;
|
|
||||||
f = container_of(next, struct wc_entry, rb_node);
|
|
||||||
if (f != e + 1)
|
|
||||||
break;
|
|
||||||
if (read_original_sector(wc, f) !=
|
|
||||||
read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
|
|
||||||
break;
|
|
||||||
if (unlikely(f->write_in_progress))
|
|
||||||
break;
|
|
||||||
if (writecache_entry_is_committed(wc, f))
|
|
||||||
wc->overwrote_committed = true;
|
|
||||||
e = f;
|
|
||||||
}
|
|
||||||
bio_size += wc->block_size;
|
|
||||||
current_cache_sec += wc->block_size >> SECTOR_SHIFT;
|
|
||||||
}
|
|
||||||
|
|
||||||
bio_set_dev(bio, wc->ssd_dev->bdev);
|
|
||||||
bio->bi_iter.bi_sector = start_cache_sec;
|
|
||||||
dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
|
|
||||||
|
|
||||||
if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
|
|
||||||
wc->uncommitted_blocks = 0;
|
|
||||||
queue_work(wc->writeback_wq, &wc->flush_work);
|
|
||||||
} else {
|
|
||||||
writecache_schedule_autocommit(wc);
|
|
||||||
}
|
|
||||||
goto unlock_remap;
|
|
||||||
}
|
|
||||||
} while (bio->bi_iter.bi_size);
|
|
||||||
|
|
||||||
if (unlikely(bio->bi_opf & REQ_FUA ||
|
|
||||||
wc->uncommitted_blocks >= wc->autocommit_blocks))
|
|
||||||
writecache_flush(wc);
|
|
||||||
else
|
|
||||||
writecache_schedule_autocommit(wc);
|
|
||||||
goto unlock_submit;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
switch (map_op) {
|
||||||
|
case WC_MAP_REMAP_ORIGIN:
|
||||||
unlock_remap_origin:
|
unlock_remap_origin:
|
||||||
if (likely(wc->pause != 0)) {
|
if (likely(wc->pause != 0)) {
|
||||||
if (bio_op(bio) == REQ_OP_WRITE) {
|
if (bio_op(bio) == REQ_OP_WRITE) {
|
||||||
dm_iot_io_begin(&wc->iot, 1);
|
dm_iot_io_begin(&wc->iot, 1);
|
||||||
bio->bi_private = (void *)2;
|
bio->bi_private = (void *)2;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
bio_set_dev(bio, wc->dev->bdev);
|
||||||
bio_set_dev(bio, wc->dev->bdev);
|
wc_unlock(wc);
|
||||||
wc_unlock(wc);
|
return DM_MAPIO_REMAPPED;
|
||||||
return DM_MAPIO_REMAPPED;
|
|
||||||
|
|
||||||
|
case WC_MAP_REMAP:
|
||||||
unlock_remap:
|
unlock_remap:
|
||||||
/* make sure that writecache_end_io decrements bio_in_progress: */
|
/* make sure that writecache_end_io decrements bio_in_progress: */
|
||||||
bio->bi_private = (void *)1;
|
bio->bi_private = (void *)1;
|
||||||
atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
|
atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
|
||||||
wc_unlock(wc);
|
wc_unlock(wc);
|
||||||
return DM_MAPIO_REMAPPED;
|
return DM_MAPIO_REMAPPED;
|
||||||
|
|
||||||
|
case WC_MAP_SUBMIT:
|
||||||
unlock_submit:
|
unlock_submit:
|
||||||
wc_unlock(wc);
|
wc_unlock(wc);
|
||||||
bio_endio(bio);
|
bio_endio(bio);
|
||||||
return DM_MAPIO_SUBMITTED;
|
return DM_MAPIO_SUBMITTED;
|
||||||
|
|
||||||
|
case WC_MAP_RETURN:
|
||||||
unlock_return:
|
unlock_return:
|
||||||
wc_unlock(wc);
|
wc_unlock(wc);
|
||||||
return DM_MAPIO_SUBMITTED;
|
return DM_MAPIO_SUBMITTED;
|
||||||
|
|
||||||
|
case WC_MAP_ERROR:
|
||||||
unlock_error:
|
unlock_error:
|
||||||
wc_unlock(wc);
|
wc_unlock(wc);
|
||||||
bio_io_error(bio);
|
bio_io_error(bio);
|
||||||
return DM_MAPIO_SUBMITTED;
|
return DM_MAPIO_SUBMITTED;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
|
static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
|
||||||
|
|
Loading…
Reference in New Issue