dm thin: requeue bios to DM core if no_free_space and in read-only mode
Now that we switch the pool to read-only mode when the data device runs out of space it causes active writers to get IO errors once we resume after resizing the data device. If no_free_space is set, save bios to the 'retry_on_resume_list' and requeue them on resume (once the data or metadata device may have been resized). With this patch the resize_io test passes again (on slower storage): dmtest run --suite thin-provisioning -n /resize_io/ Later patches fix some subtle races associated with the pool mode transitions done as part of the pool's -ENOSPC handling. These races are exposed on fast storage (e.g. PCIe SSD). Signed-off-by: Mike Snitzer <snitzer@redhat.com> Acked-by: Joe Thornber <ejt@redhat.com>
This commit is contained in:
parent
399caddfb1
commit
8c0f0e8c9f
|
@ -164,7 +164,7 @@ struct pool {
|
|||
|
||||
struct pool_features pf;
|
||||
bool low_water_triggered:1; /* A dm event has been sent */
|
||||
bool no_free_space:1; /* A -ENOSPC warning has been issued */
|
||||
bool no_free_space:1; /* bios will be requeued if set */
|
||||
|
||||
struct dm_bio_prison *prison;
|
||||
struct dm_kcopyd_client *copier;
|
||||
|
@ -982,6 +982,20 @@ static void retry_on_resume(struct bio *bio)
|
|||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
}
|
||||
|
||||
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
|
||||
{
|
||||
/*
|
||||
* When pool is read-only, no cell locking is needed because
|
||||
* nothing is changing.
|
||||
*/
|
||||
WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY);
|
||||
|
||||
if (pool->no_free_space)
|
||||
retry_on_resume(bio);
|
||||
else
|
||||
bio_io_error(bio);
|
||||
}
|
||||
|
||||
static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
@ -991,7 +1005,7 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
|
|||
cell_release(pool, cell, &bios);
|
||||
|
||||
while ((bio = bio_list_pop(&bios)))
|
||||
retry_on_resume(bio);
|
||||
handle_unserviceable_bio(pool, bio);
|
||||
}
|
||||
|
||||
static void process_discard(struct thin_c *tc, struct bio *bio)
|
||||
|
@ -1245,7 +1259,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
|
|||
switch (r) {
|
||||
case 0:
|
||||
if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
|
||||
bio_io_error(bio);
|
||||
handle_unserviceable_bio(tc->pool, bio);
|
||||
else {
|
||||
inc_all_io_entry(tc->pool, bio);
|
||||
remap_and_issue(tc, bio, lookup_result.block);
|
||||
|
@ -1254,7 +1268,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
|
|||
|
||||
case -ENODATA:
|
||||
if (rw != READ) {
|
||||
bio_io_error(bio);
|
||||
handle_unserviceable_bio(tc->pool, bio);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1565,9 +1579,9 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|||
if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
|
||||
/*
|
||||
* This block isn't provisioned, and we have no way
|
||||
* of doing so. Just error it.
|
||||
* of doing so.
|
||||
*/
|
||||
bio_io_error(bio);
|
||||
handle_unserviceable_bio(tc->pool, bio);
|
||||
return DM_MAPIO_SUBMITTED;
|
||||
}
|
||||
/* fall through */
|
||||
|
|
Loading…
Reference in New Issue