2019-05-19 21:51:43 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2016-12-15 10:03:16 +08:00
|
|
|
/* Algorithms supported by virtio crypto device
|
|
|
|
*
|
|
|
|
* Authors: Gonglei <arei.gonglei@huawei.com>
|
|
|
|
*
|
|
|
|
* Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/scatterlist.h>
|
|
|
|
#include <crypto/algapi.h>
|
2019-11-10 01:09:28 +08:00
|
|
|
#include <crypto/internal/skcipher.h>
|
2016-12-15 10:03:16 +08:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <crypto/scatterwalk.h>
|
|
|
|
#include <linux/atomic.h>
|
|
|
|
|
|
|
|
#include <uapi/linux/virtio_crypto.h>
|
|
|
|
#include "virtio_crypto_common.h"
|
|
|
|
|
2017-06-23 23:31:19 +08:00
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
struct virtio_crypto_skcipher_ctx {
|
2018-01-27 03:15:32 +08:00
|
|
|
struct crypto_engine_ctx enginectx;
|
2017-06-23 23:31:19 +08:00
|
|
|
struct virtio_crypto *vcrypto;
|
2019-11-10 01:09:28 +08:00
|
|
|
struct crypto_skcipher *tfm;
|
2017-06-23 23:31:19 +08:00
|
|
|
|
|
|
|
struct virtio_crypto_sym_session_info enc_sess_info;
|
|
|
|
struct virtio_crypto_sym_session_info dec_sess_info;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct virtio_crypto_sym_request {
|
|
|
|
struct virtio_crypto_request base;
|
|
|
|
|
|
|
|
/* Cipher or aead */
|
|
|
|
uint32_t type;
|
2019-11-10 01:09:28 +08:00
|
|
|
struct virtio_crypto_skcipher_ctx *skcipher_ctx;
|
|
|
|
struct skcipher_request *skcipher_req;
|
2017-06-23 23:31:19 +08:00
|
|
|
uint8_t *iv;
|
|
|
|
/* Encryption? */
|
|
|
|
bool encrypt;
|
|
|
|
};
|
|
|
|
|
2018-06-19 23:41:34 +08:00
|
|
|
struct virtio_crypto_algo {
|
|
|
|
uint32_t algonum;
|
|
|
|
uint32_t service;
|
|
|
|
unsigned int active_devs;
|
2019-11-10 01:09:28 +08:00
|
|
|
struct skcipher_alg algo;
|
2018-06-19 23:41:34 +08:00
|
|
|
};
|
|
|
|
|
2016-12-15 10:03:16 +08:00
|
|
|
/*
|
|
|
|
* The algs_lock protects the below global virtio_crypto_active_devs
|
|
|
|
* and crypto algorithms registion.
|
|
|
|
*/
|
|
|
|
static DEFINE_MUTEX(algs_lock);
|
2019-11-10 01:09:28 +08:00
|
|
|
static void virtio_crypto_skcipher_finalize_req(
|
2017-06-23 23:31:19 +08:00
|
|
|
struct virtio_crypto_sym_request *vc_sym_req,
|
2019-11-10 01:09:28 +08:00
|
|
|
struct skcipher_request *req,
|
2017-06-23 23:31:19 +08:00
|
|
|
int err);
|
|
|
|
|
|
|
|
static void virtio_crypto_dataq_sym_callback
|
|
|
|
(struct virtio_crypto_request *vc_req, int len)
|
|
|
|
{
|
|
|
|
struct virtio_crypto_sym_request *vc_sym_req =
|
|
|
|
container_of(vc_req, struct virtio_crypto_sym_request, base);
|
2019-11-10 01:09:28 +08:00
|
|
|
struct skcipher_request *ablk_req;
|
2017-06-23 23:31:19 +08:00
|
|
|
int error;
|
|
|
|
|
|
|
|
/* Finish the encrypt or decrypt process */
|
|
|
|
if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
|
|
|
|
switch (vc_req->status) {
|
|
|
|
case VIRTIO_CRYPTO_OK:
|
|
|
|
error = 0;
|
|
|
|
break;
|
|
|
|
case VIRTIO_CRYPTO_INVSESS:
|
|
|
|
case VIRTIO_CRYPTO_ERR:
|
|
|
|
error = -EINVAL;
|
|
|
|
break;
|
|
|
|
case VIRTIO_CRYPTO_BADMSG:
|
|
|
|
error = -EBADMSG;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
2019-11-10 01:09:28 +08:00
|
|
|
ablk_req = vc_sym_req->skcipher_req;
|
|
|
|
virtio_crypto_skcipher_finalize_req(vc_sym_req,
|
2017-06-23 23:31:19 +08:00
|
|
|
ablk_req, error);
|
|
|
|
}
|
|
|
|
}
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
|
|
|
|
{
|
|
|
|
u64 total = 0;
|
|
|
|
|
|
|
|
for (total = 0; sg; sg = sg_next(sg))
|
|
|
|
total += sg->length;
|
|
|
|
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
|
|
|
|
{
|
|
|
|
switch (key_len) {
|
|
|
|
case AES_KEYSIZE_128:
|
|
|
|
case AES_KEYSIZE_192:
|
|
|
|
case AES_KEYSIZE_256:
|
|
|
|
*alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
static int virtio_crypto_alg_skcipher_init_session(
|
|
|
|
struct virtio_crypto_skcipher_ctx *ctx,
|
2016-12-15 10:03:16 +08:00
|
|
|
uint32_t alg, const uint8_t *key,
|
|
|
|
unsigned int keylen,
|
|
|
|
int encrypt)
|
|
|
|
{
|
|
|
|
struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
|
|
|
|
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
|
|
|
int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
|
|
|
|
int err;
|
|
|
|
unsigned int num_out = 0, num_in = 0;
|
2022-05-06 21:16:23 +08:00
|
|
|
struct virtio_crypto_op_ctrl_req *ctrl;
|
|
|
|
struct virtio_crypto_session_input *input;
|
|
|
|
struct virtio_crypto_sym_create_session_req *sym_create_session;
|
2022-05-06 21:16:24 +08:00
|
|
|
struct virtio_crypto_ctrl_request *vc_ctrl_req;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid to do DMA from the stack, switch to using
|
|
|
|
* dynamically-allocated for the key
|
|
|
|
*/
|
2019-07-04 00:27:08 +08:00
|
|
|
uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
if (!cipher_key)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2022-05-06 21:16:24 +08:00
|
|
|
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
|
|
|
|
if (!vc_ctrl_req) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-12-15 10:03:16 +08:00
|
|
|
/* Pad ctrl header */
|
2022-05-06 21:16:24 +08:00
|
|
|
ctrl = &vc_ctrl_req->ctrl;
|
2022-05-06 21:16:23 +08:00
|
|
|
ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
|
|
|
|
ctrl->header.algo = cpu_to_le32(alg);
|
2016-12-15 10:03:16 +08:00
|
|
|
/* Set the default dataqueue id to 0 */
|
2022-05-06 21:16:23 +08:00
|
|
|
ctrl->header.queue_id = 0;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
2022-05-06 21:16:24 +08:00
|
|
|
input = &vc_ctrl_req->input;
|
2022-05-06 21:16:23 +08:00
|
|
|
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
|
2016-12-15 10:03:16 +08:00
|
|
|
/* Pad cipher's parameters */
|
2022-05-06 21:16:23 +08:00
|
|
|
sym_create_session = &ctrl->u.sym_create_session;
|
|
|
|
sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
|
|
|
|
sym_create_session->u.cipher.para.algo = ctrl->header.algo;
|
|
|
|
sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
|
|
|
|
sym_create_session->u.cipher.para.op = cpu_to_le32(op);
|
|
|
|
|
|
|
|
sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
|
2016-12-15 10:03:16 +08:00
|
|
|
sgs[num_out++] = &outhdr;
|
|
|
|
|
|
|
|
/* Set key */
|
|
|
|
sg_init_one(&key_sg, cipher_key, keylen);
|
|
|
|
sgs[num_out++] = &key_sg;
|
|
|
|
|
|
|
|
/* Return status and session id back */
|
2022-05-06 21:16:23 +08:00
|
|
|
sg_init_one(&inhdr, input, sizeof(*input));
|
2016-12-15 10:03:16 +08:00
|
|
|
sgs[num_out + num_in++] = &inhdr;
|
|
|
|
|
virtio-crypto: wait ctrl queue instead of busy polling
Originally, after submitting request into virtio crypto control
queue, the guest side polls the result from the virt queue. This
works like following:
CPU0 CPU1 ... CPUx CPUy
| | | |
\ \ / /
\--------spin_lock(&vcrypto->ctrl_lock)-------/
|
virtqueue add & kick
|
busy poll virtqueue
|
spin_unlock(&vcrypto->ctrl_lock)
...
There are two problems:
1, The queue depth is always 1, the performance of a virtio crypto
device gets limited. Multi user processes share a single control
queue, and hit spin lock race from control queue. Test on Intel
Platinum 8260, a single worker gets ~35K/s create/close session
operations, and 8 workers get ~40K/s operations with 800% CPU
utilization.
2, The control request is supposed to get handled immediately, but
in the current implementation of QEMU(v6.2), the vCPU thread kicks
another thread to do this work, the latency also gets unstable.
Tracking latency of virtio_crypto_alg_akcipher_close_session in 5s:
usecs : count distribution
0 -> 1 : 0 | |
2 -> 3 : 7 | |
4 -> 7 : 72 | |
8 -> 15 : 186485 |************************|
16 -> 31 : 687 | |
32 -> 63 : 5 | |
64 -> 127 : 3 | |
128 -> 255 : 1 | |
256 -> 511 : 0 | |
512 -> 1023 : 0 | |
1024 -> 2047 : 0 | |
2048 -> 4095 : 0 | |
4096 -> 8191 : 0 | |
8192 -> 16383 : 2 | |
This means that a CPU may hold vcrypto->ctrl_lock as long as 8192~16383us.
To improve the performance of control queue, a request on control queue
waits completion instead of busy polling to reduce lock racing, and gets
completed by control queue callback.
CPU0 CPU1 ... CPUx CPUy
| | | |
\ \ / /
\--------spin_lock(&vcrypto->ctrl_lock)-------/
|
virtqueue add & kick
|
---------spin_unlock(&vcrypto->ctrl_lock)------
/ / \ \
| | | |
wait wait wait wait
Test this patch, the guest side get ~200K/s operations with 300% CPU
utilization.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Gonglei <arei.gonglei@huawei.com>
Reviewed-by: Gonglei <arei.gonglei@huawei.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
Message-Id: <20220506131627.180784-4-pizhenwei@bytedance.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-06 21:16:25 +08:00
|
|
|
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
|
|
|
|
if (err < 0)
|
2022-05-06 21:16:24 +08:00
|
|
|
goto out;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
2022-05-06 21:16:23 +08:00
|
|
|
if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
|
2016-12-15 10:03:16 +08:00
|
|
|
pr_err("virtio_crypto: Create session failed status: %u\n",
|
2022-05-06 21:16:23 +08:00
|
|
|
le32_to_cpu(input->status));
|
2022-05-06 21:16:24 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
2016-12-15 10:03:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (encrypt)
|
2022-05-06 21:16:23 +08:00
|
|
|
ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
|
2016-12-15 10:03:16 +08:00
|
|
|
else
|
2022-05-06 21:16:23 +08:00
|
|
|
ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
|
2016-12-15 10:03:16 +08:00
|
|
|
|
2022-05-06 21:16:24 +08:00
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
kfree(vc_ctrl_req);
|
2020-08-07 14:18:13 +08:00
|
|
|
kfree_sensitive(cipher_key);
|
2022-05-06 21:16:24 +08:00
|
|
|
return err;
|
2016-12-15 10:03:16 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
static int virtio_crypto_alg_skcipher_close_session(
|
|
|
|
struct virtio_crypto_skcipher_ctx *ctx,
|
2016-12-15 10:03:16 +08:00
|
|
|
int encrypt)
|
|
|
|
{
|
|
|
|
struct scatterlist outhdr, status_sg, *sgs[2];
|
|
|
|
struct virtio_crypto_destroy_session_req *destroy_session;
|
|
|
|
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
|
|
|
int err;
|
|
|
|
unsigned int num_out = 0, num_in = 0;
|
2022-05-06 21:16:23 +08:00
|
|
|
struct virtio_crypto_op_ctrl_req *ctrl;
|
|
|
|
struct virtio_crypto_inhdr *ctrl_status;
|
2022-05-06 21:16:24 +08:00
|
|
|
struct virtio_crypto_ctrl_request *vc_ctrl_req;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
2022-05-06 21:16:24 +08:00
|
|
|
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
|
|
|
|
if (!vc_ctrl_req)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ctrl_status = &vc_ctrl_req->ctrl_status;
|
2022-05-06 21:16:23 +08:00
|
|
|
ctrl_status->status = VIRTIO_CRYPTO_ERR;
|
2016-12-15 10:03:16 +08:00
|
|
|
/* Pad ctrl header */
|
2022-05-06 21:16:24 +08:00
|
|
|
ctrl = &vc_ctrl_req->ctrl;
|
2022-05-06 21:16:23 +08:00
|
|
|
ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
|
2016-12-15 10:03:16 +08:00
|
|
|
/* Set the default virtqueue id to 0 */
|
2022-05-06 21:16:23 +08:00
|
|
|
ctrl->header.queue_id = 0;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
2022-05-06 21:16:23 +08:00
|
|
|
destroy_session = &ctrl->u.destroy_session;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
if (encrypt)
|
2022-05-06 21:16:23 +08:00
|
|
|
destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
|
2016-12-15 10:03:16 +08:00
|
|
|
else
|
2022-05-06 21:16:23 +08:00
|
|
|
destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
|
2016-12-15 10:03:16 +08:00
|
|
|
|
2022-05-06 21:16:23 +08:00
|
|
|
sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
|
2016-12-15 10:03:16 +08:00
|
|
|
sgs[num_out++] = &outhdr;
|
|
|
|
|
|
|
|
/* Return status and session id back */
|
2022-05-06 21:16:23 +08:00
|
|
|
sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
|
2016-12-15 10:03:16 +08:00
|
|
|
sgs[num_out + num_in++] = &status_sg;
|
|
|
|
|
virtio-crypto: wait ctrl queue instead of busy polling
Originally, after submitting request into virtio crypto control
queue, the guest side polls the result from the virt queue. This
works like following:
CPU0 CPU1 ... CPUx CPUy
| | | |
\ \ / /
\--------spin_lock(&vcrypto->ctrl_lock)-------/
|
virtqueue add & kick
|
busy poll virtqueue
|
spin_unlock(&vcrypto->ctrl_lock)
...
There are two problems:
1, The queue depth is always 1, the performance of a virtio crypto
device gets limited. Multi user processes share a single control
queue, and hit spin lock race from control queue. Test on Intel
Platinum 8260, a single worker gets ~35K/s create/close session
operations, and 8 workers get ~40K/s operations with 800% CPU
utilization.
2, The control request is supposed to get handled immediately, but
in the current implementation of QEMU(v6.2), the vCPU thread kicks
another thread to do this work, the latency also gets unstable.
Tracking latency of virtio_crypto_alg_akcipher_close_session in 5s:
usecs : count distribution
0 -> 1 : 0 | |
2 -> 3 : 7 | |
4 -> 7 : 72 | |
8 -> 15 : 186485 |************************|
16 -> 31 : 687 | |
32 -> 63 : 5 | |
64 -> 127 : 3 | |
128 -> 255 : 1 | |
256 -> 511 : 0 | |
512 -> 1023 : 0 | |
1024 -> 2047 : 0 | |
2048 -> 4095 : 0 | |
4096 -> 8191 : 0 | |
8192 -> 16383 : 2 | |
This means that a CPU may hold vcrypto->ctrl_lock as long as 8192~16383us.
To improve the performance of control queue, a request on control queue
waits completion instead of busy polling to reduce lock racing, and gets
completed by control queue callback.
CPU0 CPU1 ... CPUx CPUy
| | | |
\ \ / /
\--------spin_lock(&vcrypto->ctrl_lock)-------/
|
virtqueue add & kick
|
---------spin_unlock(&vcrypto->ctrl_lock)------
/ / \ \
| | | |
wait wait wait wait
Test this patch, the guest side get ~200K/s operations with 300% CPU
utilization.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Gonglei <arei.gonglei@huawei.com>
Reviewed-by: Gonglei <arei.gonglei@huawei.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
Message-Id: <20220506131627.180784-4-pizhenwei@bytedance.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-06 21:16:25 +08:00
|
|
|
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
|
|
|
|
if (err < 0)
|
2022-05-06 21:16:24 +08:00
|
|
|
goto out;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
2022-05-06 21:16:23 +08:00
|
|
|
if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
|
2016-12-15 10:03:16 +08:00
|
|
|
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
|
2022-05-06 21:16:23 +08:00
|
|
|
ctrl_status->status, destroy_session->session_id);
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-05-06 21:16:24 +08:00
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
kfree(vc_ctrl_req);
|
|
|
|
return err;
|
2016-12-15 10:03:16 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
static int virtio_crypto_alg_skcipher_init_sessions(
|
|
|
|
struct virtio_crypto_skcipher_ctx *ctx,
|
2016-12-15 10:03:16 +08:00
|
|
|
const uint8_t *key, unsigned int keylen)
|
|
|
|
{
|
|
|
|
uint32_t alg;
|
|
|
|
int ret;
|
|
|
|
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
|
|
|
|
|
|
|
if (keylen > vcrypto->max_cipher_key_len) {
|
|
|
|
pr_err("virtio_crypto: the key is too long\n");
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
return -EINVAL;
|
2016-12-15 10:03:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (virtio_crypto_alg_validate_key(keylen, &alg))
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
return -EINVAL;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
/* Create encryption session */
|
2019-11-10 01:09:28 +08:00
|
|
|
ret = virtio_crypto_alg_skcipher_init_session(ctx,
|
2016-12-15 10:03:16 +08:00
|
|
|
alg, key, keylen, 1);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
/* Create decryption session */
|
2019-11-10 01:09:28 +08:00
|
|
|
ret = virtio_crypto_alg_skcipher_init_session(ctx,
|
2016-12-15 10:03:16 +08:00
|
|
|
alg, key, keylen, 0);
|
|
|
|
if (ret) {
|
2019-11-10 01:09:28 +08:00
|
|
|
virtio_crypto_alg_skcipher_close_session(ctx, 1);
|
2016-12-15 10:03:16 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note: kernel crypto API realization */
|
2019-11-10 01:09:28 +08:00
|
|
|
static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
|
2016-12-15 10:03:16 +08:00
|
|
|
const uint8_t *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
2019-11-10 01:09:28 +08:00
|
|
|
struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2018-06-19 23:41:34 +08:00
|
|
|
uint32_t alg;
|
2016-12-15 10:03:16 +08:00
|
|
|
int ret;
|
|
|
|
|
2018-06-19 23:41:34 +08:00
|
|
|
ret = virtio_crypto_alg_validate_key(keylen, &alg);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-12-15 10:03:16 +08:00
|
|
|
if (!ctx->vcrypto) {
|
|
|
|
/* New key */
|
|
|
|
int node = virtio_crypto_get_current_node();
|
|
|
|
struct virtio_crypto *vcrypto =
|
2018-06-19 23:41:34 +08:00
|
|
|
virtcrypto_get_dev_node(node,
|
|
|
|
VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
|
2016-12-15 10:03:16 +08:00
|
|
|
if (!vcrypto) {
|
2018-06-19 23:41:34 +08:00
|
|
|
pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
|
2016-12-15 10:03:16 +08:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->vcrypto = vcrypto;
|
|
|
|
} else {
|
|
|
|
/* Rekeying, we should close the created sessions previously */
|
2019-11-10 01:09:28 +08:00
|
|
|
virtio_crypto_alg_skcipher_close_session(ctx, 1);
|
|
|
|
virtio_crypto_alg_skcipher_close_session(ctx, 0);
|
2016-12-15 10:03:16 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
|
2016-12-15 10:03:16 +08:00
|
|
|
if (ret) {
|
|
|
|
virtcrypto_dev_put(ctx->vcrypto);
|
|
|
|
ctx->vcrypto = NULL;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2019-11-10 01:09:28 +08:00
|
|
|
__virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
|
|
|
|
struct skcipher_request *req,
|
2016-12-27 14:49:07 +08:00
|
|
|
struct data_queue *data_vq)
|
2016-12-15 10:03:16 +08:00
|
|
|
{
|
2019-11-10 01:09:28 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
|
2017-06-23 23:31:19 +08:00
|
|
|
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
|
2019-11-10 01:09:28 +08:00
|
|
|
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
|
2016-12-15 10:03:16 +08:00
|
|
|
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
|
|
|
struct virtio_crypto_op_data_req *req_data;
|
|
|
|
int src_nents, dst_nents;
|
|
|
|
int err;
|
|
|
|
unsigned long flags;
|
|
|
|
struct scatterlist outhdr, iv_sg, status_sg, **sgs;
|
|
|
|
u64 dst_len;
|
|
|
|
unsigned int num_out = 0, num_in = 0;
|
|
|
|
int sg_total;
|
|
|
|
uint8_t *iv;
|
2020-06-02 15:04:59 +08:00
|
|
|
struct scatterlist *sg;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src, req->cryptlen);
|
2020-06-02 15:04:59 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
pr_err("Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
|
|
|
|
2016-12-15 10:03:16 +08:00
|
|
|
dst_nents = sg_nents(req->dst);
|
|
|
|
|
|
|
|
pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
|
|
|
|
src_nents, dst_nents);
|
|
|
|
|
|
|
|
/* Why 3? outhdr + iv + inhdr */
|
|
|
|
sg_total = src_nents + dst_nents + 3;
|
2018-07-23 16:43:46 +08:00
|
|
|
sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
|
2016-12-15 10:03:16 +08:00
|
|
|
dev_to_node(&vcrypto->vdev->dev));
|
|
|
|
if (!sgs)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2018-07-23 16:43:46 +08:00
|
|
|
req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
|
2016-12-15 10:03:16 +08:00
|
|
|
dev_to_node(&vcrypto->vdev->dev));
|
|
|
|
if (!req_data) {
|
|
|
|
kfree(sgs);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
vc_req->req_data = req_data;
|
2017-06-23 23:31:19 +08:00
|
|
|
vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
|
2016-12-15 10:03:16 +08:00
|
|
|
/* Head of operation */
|
2017-06-23 23:31:19 +08:00
|
|
|
if (vc_sym_req->encrypt) {
|
2016-12-15 10:03:16 +08:00
|
|
|
req_data->header.session_id =
|
|
|
|
cpu_to_le64(ctx->enc_sess_info.session_id);
|
|
|
|
req_data->header.opcode =
|
|
|
|
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
|
|
|
|
} else {
|
|
|
|
req_data->header.session_id =
|
|
|
|
cpu_to_le64(ctx->dec_sess_info.session_id);
|
2018-12-30 21:46:21 +08:00
|
|
|
req_data->header.opcode =
|
2016-12-15 10:03:16 +08:00
|
|
|
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
|
|
|
|
}
|
|
|
|
req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
|
|
|
|
req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
|
|
|
|
req_data->u.sym_req.u.cipher.para.src_data_len =
|
2019-11-10 01:09:28 +08:00
|
|
|
cpu_to_le32(req->cryptlen);
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
|
|
|
|
if (unlikely(dst_len > U32_MAX)) {
|
|
|
|
pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
|
2020-06-02 15:05:01 +08:00
|
|
|
dst_len = min_t(unsigned int, req->cryptlen, dst_len);
|
2016-12-15 10:03:16 +08:00
|
|
|
pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
|
2019-11-10 01:09:28 +08:00
|
|
|
req->cryptlen, dst_len);
|
2016-12-15 10:03:16 +08:00
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
if (unlikely(req->cryptlen + dst_len + ivsize +
|
2016-12-15 10:03:16 +08:00
|
|
|
sizeof(vc_req->status) > vcrypto->max_size)) {
|
|
|
|
pr_err("virtio_crypto: The length is too big\n");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
|
|
|
|
req_data->u.sym_req.u.cipher.para.dst_data_len =
|
|
|
|
cpu_to_le32((uint32_t)dst_len);
|
|
|
|
|
|
|
|
/* Outhdr */
|
|
|
|
sg_init_one(&outhdr, req_data, sizeof(*req_data));
|
|
|
|
sgs[num_out++] = &outhdr;
|
|
|
|
|
|
|
|
/* IV */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid to do DMA from the stack, switch to using
|
|
|
|
* dynamically-allocated for the IV
|
|
|
|
*/
|
|
|
|
iv = kzalloc_node(ivsize, GFP_ATOMIC,
|
|
|
|
dev_to_node(&vcrypto->vdev->dev));
|
|
|
|
if (!iv) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto free;
|
|
|
|
}
|
2019-11-10 01:09:28 +08:00
|
|
|
memcpy(iv, req->iv, ivsize);
|
2019-11-10 01:09:26 +08:00
|
|
|
if (!vc_sym_req->encrypt)
|
2019-11-10 01:09:28 +08:00
|
|
|
scatterwalk_map_and_copy(req->iv, req->src,
|
|
|
|
req->cryptlen - AES_BLOCK_SIZE,
|
2019-11-10 01:09:26 +08:00
|
|
|
AES_BLOCK_SIZE, 0);
|
|
|
|
|
2016-12-15 10:03:16 +08:00
|
|
|
sg_init_one(&iv_sg, iv, ivsize);
|
|
|
|
sgs[num_out++] = &iv_sg;
|
2017-06-23 23:31:19 +08:00
|
|
|
vc_sym_req->iv = iv;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
/* Source data */
|
2020-06-02 15:04:59 +08:00
|
|
|
for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
|
|
|
|
sgs[num_out++] = sg;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
/* Destination data */
|
2020-06-02 15:04:59 +08:00
|
|
|
for (sg = req->dst; sg; sg = sg_next(sg))
|
|
|
|
sgs[num_out + num_in++] = sg;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
/* Status */
|
|
|
|
sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
|
|
|
|
sgs[num_out + num_in++] = &status_sg;
|
|
|
|
|
|
|
|
vc_req->sgs = sgs;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&data_vq->lock, flags);
|
|
|
|
err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
|
|
|
|
num_in, vc_req, GFP_ATOMIC);
|
|
|
|
virtqueue_kick(data_vq->vq);
|
|
|
|
spin_unlock_irqrestore(&data_vq->lock, flags);
|
|
|
|
if (unlikely(err < 0))
|
|
|
|
goto free_iv;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_iv:
|
2020-08-07 14:18:13 +08:00
|
|
|
kfree_sensitive(iv);
|
2016-12-15 10:03:16 +08:00
|
|
|
free:
|
2020-08-07 14:18:13 +08:00
|
|
|
kfree_sensitive(req_data);
|
2016-12-15 10:03:16 +08:00
|
|
|
kfree(sgs);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
|
2016-12-15 10:03:16 +08:00
|
|
|
{
|
2019-11-10 01:09:28 +08:00
|
|
|
struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
|
2017-06-23 23:31:19 +08:00
|
|
|
struct virtio_crypto_sym_request *vc_sym_req =
|
2019-11-10 01:09:28 +08:00
|
|
|
skcipher_request_ctx(req);
|
2017-06-23 23:31:19 +08:00
|
|
|
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
|
2016-12-15 10:03:16 +08:00
|
|
|
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
|
|
|
/* Use the first data virtqueue as default */
|
|
|
|
struct data_queue *data_vq = &vcrypto->data_vq[0];
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
if (!req->cryptlen)
|
2019-11-10 01:09:27 +08:00
|
|
|
return 0;
|
2019-11-10 01:09:28 +08:00
|
|
|
if (req->cryptlen % AES_BLOCK_SIZE)
|
2019-11-10 01:09:27 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-12-27 14:49:07 +08:00
|
|
|
vc_req->dataq = data_vq;
|
2017-06-23 23:31:19 +08:00
|
|
|
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
|
2019-11-10 01:09:28 +08:00
|
|
|
vc_sym_req->skcipher_ctx = ctx;
|
|
|
|
vc_sym_req->skcipher_req = req;
|
2017-06-23 23:31:19 +08:00
|
|
|
vc_sym_req->encrypt = true;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
|
2016-12-15 10:03:16 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
|
2016-12-15 10:03:16 +08:00
|
|
|
{
|
2019-11-10 01:09:28 +08:00
|
|
|
struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
|
2017-06-23 23:31:19 +08:00
|
|
|
struct virtio_crypto_sym_request *vc_sym_req =
|
2019-11-10 01:09:28 +08:00
|
|
|
skcipher_request_ctx(req);
|
2017-06-23 23:31:19 +08:00
|
|
|
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
|
2016-12-15 10:03:16 +08:00
|
|
|
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
|
|
|
/* Use the first data virtqueue as default */
|
|
|
|
struct data_queue *data_vq = &vcrypto->data_vq[0];
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
if (!req->cryptlen)
|
2019-11-10 01:09:27 +08:00
|
|
|
return 0;
|
2019-11-10 01:09:28 +08:00
|
|
|
if (req->cryptlen % AES_BLOCK_SIZE)
|
2019-11-10 01:09:27 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-12-27 14:49:07 +08:00
|
|
|
vc_req->dataq = data_vq;
|
2017-06-23 23:31:19 +08:00
|
|
|
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
|
2019-11-10 01:09:28 +08:00
|
|
|
vc_sym_req->skcipher_ctx = ctx;
|
|
|
|
vc_sym_req->skcipher_req = req;
|
2017-06-23 23:31:19 +08:00
|
|
|
vc_sym_req->encrypt = false;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
|
2016-12-15 10:03:16 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
|
2016-12-15 10:03:16 +08:00
|
|
|
{
|
2019-11-10 01:09:28 +08:00
|
|
|
struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2016-12-15 10:03:16 +08:00
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
|
2016-12-15 10:03:16 +08:00
|
|
|
ctx->tfm = tfm;
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
|
2018-01-27 03:15:32 +08:00
|
|
|
ctx->enginectx.op.prepare_request = NULL;
|
|
|
|
ctx->enginectx.op.unprepare_request = NULL;
|
2016-12-15 10:03:16 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
|
2016-12-15 10:03:16 +08:00
|
|
|
{
|
2019-11-10 01:09:28 +08:00
|
|
|
struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
if (!ctx->vcrypto)
|
|
|
|
return;
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
virtio_crypto_alg_skcipher_close_session(ctx, 1);
|
|
|
|
virtio_crypto_alg_skcipher_close_session(ctx, 0);
|
2016-12-15 10:03:16 +08:00
|
|
|
virtcrypto_dev_put(ctx->vcrypto);
|
|
|
|
ctx->vcrypto = NULL;
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
int virtio_crypto_skcipher_crypt_req(
|
2018-01-27 03:15:32 +08:00
|
|
|
struct crypto_engine *engine, void *vreq)
|
2016-12-27 14:49:07 +08:00
|
|
|
{
|
2019-11-10 01:09:28 +08:00
|
|
|
struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
|
2017-06-23 23:31:19 +08:00
|
|
|
struct virtio_crypto_sym_request *vc_sym_req =
|
2019-11-10 01:09:28 +08:00
|
|
|
skcipher_request_ctx(req);
|
2017-06-23 23:31:19 +08:00
|
|
|
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
|
2016-12-27 14:49:07 +08:00
|
|
|
struct data_queue *data_vq = vc_req->dataq;
|
|
|
|
int ret;
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
|
2016-12-27 14:49:07 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
virtqueue_kick(data_vq->vq);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:28 +08:00
|
|
|
static void virtio_crypto_skcipher_finalize_req(
|
2017-06-23 23:31:19 +08:00
|
|
|
struct virtio_crypto_sym_request *vc_sym_req,
|
2019-11-10 01:09:28 +08:00
|
|
|
struct skcipher_request *req,
|
2016-12-27 14:49:07 +08:00
|
|
|
int err)
|
|
|
|
{
|
2019-11-10 01:09:26 +08:00
|
|
|
if (vc_sym_req->encrypt)
|
2019-11-10 01:09:28 +08:00
|
|
|
scatterwalk_map_and_copy(req->iv, req->dst,
|
|
|
|
req->cryptlen - AES_BLOCK_SIZE,
|
2019-11-10 01:09:26 +08:00
|
|
|
AES_BLOCK_SIZE, 0);
|
2020-08-07 14:18:13 +08:00
|
|
|
kfree_sensitive(vc_sym_req->iv);
|
2017-06-23 23:31:19 +08:00
|
|
|
virtcrypto_clear_request(&vc_sym_req->base);
|
2020-06-02 15:05:00 +08:00
|
|
|
|
|
|
|
crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
|
|
|
|
req, err);
|
2016-12-27 14:49:07 +08:00
|
|
|
}
|
|
|
|
|
2018-06-19 23:41:34 +08:00
|
|
|
static struct virtio_crypto_algo virtio_crypto_algs[] = { {
|
|
|
|
.algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
|
|
|
|
.service = VIRTIO_CRYPTO_SERVICE_CIPHER,
|
|
|
|
.algo = {
|
2019-11-10 01:09:28 +08:00
|
|
|
.base.cra_name = "cbc(aes)",
|
|
|
|
.base.cra_driver_name = "virtio_crypto_aes_cbc",
|
|
|
|
.base.cra_priority = 150,
|
2020-07-10 14:20:41 +08:00
|
|
|
.base.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_ALLOCATES_MEMORY,
|
2019-11-10 01:09:28 +08:00
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
.init = virtio_crypto_skcipher_init,
|
|
|
|
.exit = virtio_crypto_skcipher_exit,
|
|
|
|
.setkey = virtio_crypto_skcipher_setkey,
|
|
|
|
.decrypt = virtio_crypto_skcipher_decrypt,
|
|
|
|
.encrypt = virtio_crypto_skcipher_encrypt,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
2016-12-15 10:03:16 +08:00
|
|
|
},
|
|
|
|
} };
|
|
|
|
|
2022-03-02 11:39:17 +08:00
|
|
|
int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
|
2016-12-15 10:03:16 +08:00
|
|
|
{
|
|
|
|
int ret = 0;
|
2018-06-19 23:41:34 +08:00
|
|
|
int i = 0;
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
mutex_lock(&algs_lock);
|
|
|
|
|
2018-06-19 23:41:34 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
|
|
|
|
|
|
|
|
uint32_t service = virtio_crypto_algs[i].service;
|
|
|
|
uint32_t algonum = virtio_crypto_algs[i].algonum;
|
|
|
|
|
|
|
|
if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (virtio_crypto_algs[i].active_devs == 0) {
|
2019-11-10 01:09:28 +08:00
|
|
|
ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
|
2018-06-19 23:41:34 +08:00
|
|
|
if (ret)
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_crypto_algs[i].active_devs++;
|
|
|
|
dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
|
2019-11-10 01:09:28 +08:00
|
|
|
virtio_crypto_algs[i].algo.base.cra_name);
|
2018-06-19 23:41:34 +08:00
|
|
|
}
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&algs_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-03-02 11:39:17 +08:00
|
|
|
void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
|
2016-12-15 10:03:16 +08:00
|
|
|
{
|
2018-06-19 23:41:34 +08:00
|
|
|
int i = 0;
|
|
|
|
|
2016-12-15 10:03:16 +08:00
|
|
|
mutex_lock(&algs_lock);
|
|
|
|
|
2018-06-19 23:41:34 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
|
|
|
|
|
|
|
|
uint32_t service = virtio_crypto_algs[i].service;
|
|
|
|
uint32_t algonum = virtio_crypto_algs[i].algonum;
|
|
|
|
|
|
|
|
if (virtio_crypto_algs[i].active_devs == 0 ||
|
|
|
|
!virtcrypto_algo_is_supported(vcrypto, service, algonum))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (virtio_crypto_algs[i].active_devs == 1)
|
2019-11-10 01:09:28 +08:00
|
|
|
crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
|
2018-06-19 23:41:34 +08:00
|
|
|
|
|
|
|
virtio_crypto_algs[i].active_devs--;
|
|
|
|
}
|
2016-12-15 10:03:16 +08:00
|
|
|
|
|
|
|
mutex_unlock(&algs_lock);
|
|
|
|
}
|