- Fix DM thin to work as a swap device by using 'limit_swap_bios' DM
target flag (initially added to allow swap to dm-crypt) to throttle the amount of outstanding swap bios. - Fix DM crypt soft lockup warnings by calling cond_resched() from the cpu intensive loop in dmcrypt_write(). - Fix DM crypt to not access an uninitialized tasklet. This fix allows for consistent handling of IO completion, by _not_ needlessly punting to a workqueue when tasklets are not needed. - Fix DM core's alloc_dev() initialization for DM stats to check for and propagate alloc_percpu() failure. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAmQd2p0ACgkQxSPxCi2d A1qvXAf/WCMNXRbFhO35QqukBqS7sUOMfWl1hIEdABRu+3Ul1KHBWzXVYWuWgebw kr79V3LZG63cLvhreCy64X/0tXLZa0c0AGWZI6rJ/QAozSCs9R8BqOrJnB5GT1o9 /lvmOL31MloMnIKArWseIQViNM97gEHmFpuj0saqitcvNTjjipzxq/wOyhmDQwnE 8rxJpKSHBJXs9X/VyM9FTWxtijTQw3c8wxJJo7eV6TTuLyrErm46tyI1cBQ4vDoa ogMVWVrf51uTsqL6DqGenDc+kO7CH5lipIJij1bTtKgs3aBNlaiZQC1nPkMST9Ue hpH61ixAg+bsWi4/xLFafCl6QAGMlA== =71ya -----END PGP SIGNATURE----- Merge tag 'for-6.3/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper fixes from Mike Snitzer: - Fix DM thin to work as a swap device by using 'limit_swap_bios' DM target flag (initially added to allow swap to dm-crypt) to throttle the amount of outstanding swap bios. - Fix DM crypt soft lockup warnings by calling cond_resched() from the cpu intensive loop in dmcrypt_write(). - Fix DM crypt to not access an uninitialized tasklet. This fix allows for consistent handling of IO completion, by _not_ needlessly punting to a workqueue when tasklets are not needed. - Fix DM core's alloc_dev() initialization for DM stats to check for and propagate alloc_percpu() failure. * tag 'for-6.3/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm stats: check for and propagate alloc_percpu failure dm crypt: avoid accessing uninitialized tasklet dm crypt: add cond_resched() to dmcrypt_write() dm thin: fix deadlock when swapping to thin device
This commit is contained in:
commit
5ad4fe9613
|
@ -72,7 +72,9 @@ struct dm_crypt_io {
|
|||
struct crypt_config *cc;
|
||||
struct bio *base_bio;
|
||||
u8 *integrity_metadata;
|
||||
bool integrity_metadata_from_pool;
|
||||
bool integrity_metadata_from_pool:1;
|
||||
bool in_tasklet:1;
|
||||
|
||||
struct work_struct work;
|
||||
struct tasklet_struct tasklet;
|
||||
|
||||
|
@ -1730,6 +1732,7 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
|
|||
io->ctx.r.req = NULL;
|
||||
io->integrity_metadata = NULL;
|
||||
io->integrity_metadata_from_pool = false;
|
||||
io->in_tasklet = false;
|
||||
atomic_set(&io->io_pending, 0);
|
||||
}
|
||||
|
||||
|
@ -1776,14 +1779,13 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
|
|||
* our tasklet. In this case we need to delay bio_endio()
|
||||
* execution to after the tasklet is done and dequeued.
|
||||
*/
|
||||
if (tasklet_trylock(&io->tasklet)) {
|
||||
tasklet_unlock(&io->tasklet);
|
||||
bio_endio(base_bio);
|
||||
if (io->in_tasklet) {
|
||||
INIT_WORK(&io->work, kcryptd_io_bio_endio);
|
||||
queue_work(cc->io_queue, &io->work);
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&io->work, kcryptd_io_bio_endio);
|
||||
queue_work(cc->io_queue, &io->work);
|
||||
bio_endio(base_bio);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1936,6 +1938,7 @@ pop_from_list:
|
|||
io = crypt_io_from_node(rb_first(&write_tree));
|
||||
rb_erase(&io->rb_node, &write_tree);
|
||||
kcryptd_io_write(io);
|
||||
cond_resched();
|
||||
} while (!RB_EMPTY_ROOT(&write_tree));
|
||||
blk_finish_plug(&plug);
|
||||
}
|
||||
|
@ -2230,6 +2233,7 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
|
|||
* it is being executed with irqs disabled.
|
||||
*/
|
||||
if (in_hardirq() || irqs_disabled()) {
|
||||
io->in_tasklet = true;
|
||||
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
|
||||
tasklet_schedule(&io->tasklet);
|
||||
return;
|
||||
|
|
|
@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared)
|
|||
atomic_read(&shared->in_flight[WRITE]);
|
||||
}
|
||||
|
||||
void dm_stats_init(struct dm_stats *stats)
|
||||
int dm_stats_init(struct dm_stats *stats)
|
||||
{
|
||||
int cpu;
|
||||
struct dm_stats_last_position *last;
|
||||
|
@ -197,11 +197,16 @@ void dm_stats_init(struct dm_stats *stats)
|
|||
INIT_LIST_HEAD(&stats->list);
|
||||
stats->precise_timestamps = false;
|
||||
stats->last = alloc_percpu(struct dm_stats_last_position);
|
||||
if (!stats->last)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
last = per_cpu_ptr(stats->last, cpu);
|
||||
last->last_sector = (sector_t)ULLONG_MAX;
|
||||
last->last_rw = UINT_MAX;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dm_stats_cleanup(struct dm_stats *stats)
|
||||
|
|
|
@ -21,7 +21,7 @@ struct dm_stats_aux {
|
|||
unsigned long long duration_ns;
|
||||
};
|
||||
|
||||
void dm_stats_init(struct dm_stats *st);
|
||||
int dm_stats_init(struct dm_stats *st);
|
||||
void dm_stats_cleanup(struct dm_stats *st);
|
||||
|
||||
struct mapped_device;
|
||||
|
|
|
@ -3369,6 +3369,7 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
pt->low_water_blocks = low_water_blocks;
|
||||
pt->adjusted_pf = pt->requested_pf = pf;
|
||||
ti->num_flush_bios = 1;
|
||||
ti->limit_swap_bios = true;
|
||||
|
||||
/*
|
||||
* Only need to enable discards if the pool should pass
|
||||
|
@ -4249,6 +4250,7 @@ static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
goto bad;
|
||||
|
||||
ti->num_flush_bios = 1;
|
||||
ti->limit_swap_bios = true;
|
||||
ti->flush_supported = true;
|
||||
ti->accounts_remapped_io = true;
|
||||
ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
|
||||
|
|
|
@ -2097,7 +2097,9 @@ static struct mapped_device *alloc_dev(int minor)
|
|||
if (!md->pending_io)
|
||||
goto bad;
|
||||
|
||||
dm_stats_init(&md->stats);
|
||||
r = dm_stats_init(&md->stats);
|
||||
if (r < 0)
|
||||
goto bad;
|
||||
|
||||
/* Populate the mapping, nobody knows we exist yet */
|
||||
spin_lock(&_minor_lock);
|
||||
|
|
Loading…
Reference in New Issue