diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 126ed21e6b17..357387fa10ca 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -36,7 +36,6 @@ struct dm_crypt_io { struct work_struct work; atomic_t pending; int error; - int post_process; }; /* @@ -80,7 +79,8 @@ struct crypt_config { mempool_t *page_pool; struct bio_set *bs; - struct workqueue_struct *queue; + struct workqueue_struct *io_queue; + struct workqueue_struct *crypt_queue; /* * crypto related data */ @@ -476,19 +476,36 @@ static void dec_pending(struct dm_crypt_io *io, int error) } /* - * kcryptd: + * kcryptd/kcryptd_io: * * Needed because it would be very unwise to do decryption in an * interrupt context. + * + * kcryptd performs the actual encryption or decryption. + * + * kcryptd_io performs the IO submission. + * + * They must be separated as otherwise the final stages could be + * starved by new requests which can block in the first stages due + * to memory allocation. */ static void kcryptd_do_work(struct work_struct *work); +static void kcryptd_do_crypt(struct work_struct *work); static void kcryptd_queue_io(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; INIT_WORK(&io->work, kcryptd_do_work); - queue_work(cc->queue, &io->work); + queue_work(cc->io_queue, &io->work); +} + +static void kcryptd_queue_crypt(struct dm_crypt_io *io) +{ + struct crypt_config *cc = io->target->private; + + INIT_WORK(&io->work, kcryptd_do_crypt); + queue_work(cc->crypt_queue, &io->work); } static void crypt_endio(struct bio *clone, int error) @@ -511,8 +528,7 @@ static void crypt_endio(struct bio *clone, int error) } bio_put(clone); - io->post_process = 1; - kcryptd_queue_io(io); + kcryptd_queue_crypt(io); return; out: @@ -634,10 +650,16 @@ static void kcryptd_do_work(struct work_struct *work) { struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); - if (io->post_process) - process_read_endio(io); - else if (bio_data_dir(io->base_bio) == READ) + if (bio_data_dir(io->base_bio) == READ) process_read(io); +} + +static void kcryptd_do_crypt(struct work_struct *work) +{ + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); + + if (bio_data_dir(io->base_bio) == READ) + process_read_endio(io); else process_write(io); } @@ -870,16 +892,24 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } else cc->iv_mode = NULL; - cc->queue = create_singlethread_workqueue("kcryptd"); - if (!cc->queue) { + cc->io_queue = create_singlethread_workqueue("kcryptd_io"); + if (!cc->io_queue) { + ti->error = "Couldn't create kcryptd io queue"; + goto bad_io_queue; + } + + cc->crypt_queue = create_singlethread_workqueue("kcryptd"); + if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; - goto bad_queue; + goto bad_crypt_queue; } ti->private = cc; return 0; -bad_queue: +bad_crypt_queue: + destroy_workqueue(cc->io_queue); +bad_io_queue: kfree(cc->iv_mode); bad_iv_mode: dm_put_device(ti, cc->dev); @@ -905,7 +935,8 @@ static void crypt_dtr(struct dm_target *ti) { struct crypt_config *cc = (struct crypt_config *) ti->private; - destroy_workqueue(cc->queue); + destroy_workqueue(cc->io_queue); + destroy_workqueue(cc->crypt_queue); bioset_free(cc->bs); mempool_destroy(cc->page_pool); @@ -931,9 +962,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, io = mempool_alloc(cc->io_pool, GFP_NOIO); io->target = ti; io->base_bio = bio; - io->error = io->post_process = 0; + io->error = 0; atomic_set(&io->pending, 0); - kcryptd_queue_io(io); + + if (bio_data_dir(io->base_bio) == READ) + kcryptd_queue_io(io); + else + kcryptd_queue_crypt(io); return DM_MAPIO_SUBMITTED; }