dm cache: submit writethrough writes in parallel to origin and cache
Discontinue issuing writethrough write IO in series to the origin and then cache. Use bio_clone_fast() to create a new origin clone bio that will be mapped to the origin device and then bio_chain() it to the bio that gets remapped to the cache device. The origin clone bio does _not_ have a copy of the per_bio_data -- as such check_if_tick_bio_needed() will not be called. The cache bio (parent bio) will not complete until the origin bio has completed -- this fulfills bio_clone_fast()'s requirements as well as the requirement to not complete the original IO until the write IO has completed to both the origin and cache device. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
8e3c382777
commit
2df3bae9a6
|
@ -450,6 +450,7 @@ struct cache {
|
||||||
struct work_struct migration_worker;
|
struct work_struct migration_worker;
|
||||||
struct delayed_work waker;
|
struct delayed_work waker;
|
||||||
struct dm_bio_prison_v2 *prison;
|
struct dm_bio_prison_v2 *prison;
|
||||||
|
struct bio_set *bs;
|
||||||
|
|
||||||
mempool_t *migration_pool;
|
mempool_t *migration_pool;
|
||||||
|
|
||||||
|
@ -868,16 +869,23 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
|
||||||
spin_unlock_irqrestore(&cache->lock, flags);
|
spin_unlock_irqrestore(&cache->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
|
static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
|
||||||
dm_oblock_t oblock)
|
dm_oblock_t oblock, bool bio_has_pbd)
|
||||||
{
|
{
|
||||||
// FIXME: this is called way too much.
|
if (bio_has_pbd)
|
||||||
check_if_tick_bio_needed(cache, bio);
|
check_if_tick_bio_needed(cache, bio);
|
||||||
remap_to_origin(cache, bio);
|
remap_to_origin(cache, bio);
|
||||||
if (bio_data_dir(bio) == WRITE)
|
if (bio_data_dir(bio) == WRITE)
|
||||||
clear_discard(cache, oblock_to_dblock(cache, oblock));
|
clear_discard(cache, oblock_to_dblock(cache, oblock));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
|
||||||
|
dm_oblock_t oblock)
|
||||||
|
{
|
||||||
|
// FIXME: check_if_tick_bio_needed() is called way too much through this interface
|
||||||
|
__remap_to_origin_clear_discard(cache, bio, oblock, true);
|
||||||
|
}
|
||||||
|
|
||||||
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
|
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
|
||||||
dm_oblock_t oblock, dm_cblock_t cblock)
|
dm_oblock_t oblock, dm_cblock_t cblock)
|
||||||
{
|
{
|
||||||
|
@ -971,23 +979,25 @@ static void writethrough_endio(struct bio *bio)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIXME: send in parallel, huge latency as is.
|
|
||||||
* When running in writethrough mode we need to send writes to clean blocks
|
* When running in writethrough mode we need to send writes to clean blocks
|
||||||
* to both the cache and origin devices. In future we'd like to clone the
|
* to both the cache and origin devices. Clone the bio and send them in parallel.
|
||||||
* bio and send them in parallel, but for now we're doing them in
|
|
||||||
* series as this is easier.
|
|
||||||
*/
|
*/
|
||||||
static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
|
static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
|
||||||
dm_oblock_t oblock, dm_cblock_t cblock)
|
dm_oblock_t oblock, dm_cblock_t cblock)
|
||||||
{
|
{
|
||||||
struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
|
struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, cache->bs);
|
||||||
|
|
||||||
pb->cache = cache;
|
BUG_ON(!origin_bio);
|
||||||
pb->cblock = cblock;
|
|
||||||
dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
|
|
||||||
dm_bio_record(&pb->bio_details, bio);
|
|
||||||
|
|
||||||
remap_to_origin_clear_discard(pb->cache, bio, oblock);
|
bio_chain(origin_bio, bio);
|
||||||
|
/*
|
||||||
|
* Passing false to __remap_to_origin_clear_discard() skips
|
||||||
|
* all code that might use per_bio_data (since clone doesn't have it)
|
||||||
|
*/
|
||||||
|
__remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
|
||||||
|
submit_bio(origin_bio);
|
||||||
|
|
||||||
|
remap_to_cache(cache, bio, cblock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*----------------------------------------------------------------
|
/*----------------------------------------------------------------
|
||||||
|
@ -1873,7 +1883,7 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
|
||||||
} else {
|
} else {
|
||||||
if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
|
if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
|
||||||
!is_dirty(cache, cblock)) {
|
!is_dirty(cache, cblock)) {
|
||||||
remap_to_origin_then_cache(cache, bio, block, cblock);
|
remap_to_origin_and_cache(cache, bio, block, cblock);
|
||||||
accounted_begin(cache, bio);
|
accounted_begin(cache, bio);
|
||||||
} else
|
} else
|
||||||
remap_to_cache_dirty(cache, bio, block, cblock);
|
remap_to_cache_dirty(cache, bio, block, cblock);
|
||||||
|
@ -2132,6 +2142,9 @@ static void destroy(struct cache *cache)
|
||||||
kfree(cache->ctr_args[i]);
|
kfree(cache->ctr_args[i]);
|
||||||
kfree(cache->ctr_args);
|
kfree(cache->ctr_args);
|
||||||
|
|
||||||
|
if (cache->bs)
|
||||||
|
bioset_free(cache->bs);
|
||||||
|
|
||||||
kfree(cache);
|
kfree(cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2578,6 +2591,13 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
||||||
cache->features = ca->features;
|
cache->features = ca->features;
|
||||||
ti->per_io_data_size = get_per_bio_data_size(cache);
|
ti->per_io_data_size = get_per_bio_data_size(cache);
|
||||||
|
|
||||||
|
if (writethrough_mode(cache)) {
|
||||||
|
/* Create bioset for writethrough bios issued to origin */
|
||||||
|
cache->bs = bioset_create(BIO_POOL_SIZE, 0, 0);
|
||||||
|
if (!cache->bs)
|
||||||
|
goto bad;
|
||||||
|
}
|
||||||
|
|
||||||
cache->callbacks.congested_fn = cache_is_congested;
|
cache->callbacks.congested_fn = cache_is_congested;
|
||||||
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
|
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue