- a stable fix in both DM crypt and DM log-writes for too large bios (as

generated by bcache)
 
 - 2 other stable fixes for DM log-writes
 
 - a stable fix for a DM crypt bug that could result in freeing pointers
   from uninitialized memory in the tfm allocation error path
 
 - a DM bufio cleanup to discontinue using create_singlethread_workqueue()
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJXybwpAAoJEMUj8QotnQNaVjIIALIS2erGyUquUcFALyGzK0So
 f3GUA3+o/1ttkzkHvDwdgPO0CscVsAp71hMN+3+GrPtXJZRoqlE/w2QfGLYHvV++
 xZR4+kBYuKrlo7+ldvjEi4KI2YtZ541QyaRez7Vy8XKDBo54cFe9oUnGznOYIC+2
 +oH0d2w933rrFgsUa3RFa+8Qyv2ch6SAhDhn6oy0vk7HhH8MIGQKMDQEHVRbgfJ9
 kG45wakb4rDDzmxqT+ZyA8rNk4sV+WanNVfj/7mww/NZe4HW+O7xMJTVgUqczADu
 Sny4VhQOk6w4rpooDeJ2djWHUi8THtX1W616Owu701fmQ9ttALEw0xiZXEOYzBA=
 =v6+u
 -----END PGP SIGNATURE-----

Merge tag 'dm-4.8-fixes-4' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - a stable fix in both DM crypt and DM log-writes for too large bios
   (as generated by bcache)

 - two other stable fixes for DM log-writes

 - a stable fix for a DM crypt bug that could result in freeing pointers
   from uninitialized memory in the tfm allocation error path

 - a DM bufio cleanup to discontinue using create_singlethread_workqueue()

* tag 'dm-4.8-fixes-4' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm bufio: remove use of deprecated create_singlethread_workqueue()
  dm crypt: fix free of bad values after tfm allocation failure
  dm crypt: fix error with too large bios
  dm log writes: fix check of kthread_run() return value
  dm log writes: fix bug with too large bios
  dm log writes: move IO accounting earlier to fix error path
This commit is contained in:
Linus Torvalds 2016-09-03 17:29:58 -07:00
commit 28e68154c5
3 changed files with 14 additions and 7 deletions

View File

@ -1879,7 +1879,7 @@ static int __init dm_bufio_init(void)
__cache_size_refresh(); __cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock); mutex_unlock(&dm_bufio_clients_lock);
dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache"); dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
if (!dm_bufio_wq) if (!dm_bufio_wq)
return -ENOMEM; return -ENOMEM;

View File

@ -1453,7 +1453,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
unsigned i; unsigned i;
int err; int err;
cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *), cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
GFP_KERNEL); GFP_KERNEL);
if (!cc->tfms) if (!cc->tfms)
return -ENOMEM; return -ENOMEM;
@ -1924,6 +1924,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
/*
* Check if bio is too large, split as needed.
*/
if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
bio_data_dir(bio) == WRITE)
dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
io = dm_per_bio_data(bio, cc->per_bio_data_size); io = dm_per_bio_data(bio, cc->per_bio_data_size);
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
io->ctx.req = (struct skcipher_request *)(io + 1); io->ctx.req = (struct skcipher_request *)(io + 1);

View File

@ -259,12 +259,12 @@ static int log_one_block(struct log_writes_c *lc,
goto out; goto out;
sector++; sector++;
bio = bio_alloc(GFP_KERNEL, block->vec_cnt); atomic_inc(&lc->io_blocks);
bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
if (!bio) { if (!bio) {
DMERR("Couldn't alloc log bio"); DMERR("Couldn't alloc log bio");
goto error; goto error;
} }
atomic_inc(&lc->io_blocks);
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_bdev = lc->logdev->bdev; bio->bi_bdev = lc->logdev->bdev;
@ -282,7 +282,7 @@ static int log_one_block(struct log_writes_c *lc,
if (ret != block->vecs[i].bv_len) { if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks); atomic_inc(&lc->io_blocks);
submit_bio(bio); submit_bio(bio);
bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i); bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
if (!bio) { if (!bio) {
DMERR("Couldn't alloc log bio"); DMERR("Couldn't alloc log bio");
goto error; goto error;
@ -459,9 +459,9 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad; goto bad;
} }
ret = -EINVAL;
lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
if (!lc->log_kthread) { if (IS_ERR(lc->log_kthread)) {
ret = PTR_ERR(lc->log_kthread);
ti->error = "Couldn't alloc kthread"; ti->error = "Couldn't alloc kthread";
dm_put_device(ti, lc->dev); dm_put_device(ti, lc->dev);
dm_put_device(ti, lc->logdev); dm_put_device(ti, lc->logdev);