crypto: hisilicon/sec - add fallback tfm supporting for XTS mode
Add fallback tfm supporting for hisi_sec driver. Due to the hardware not supports 192bit key length when using XTS mode. So the driver needs to setting the soft fallback skcipher tfm for user. Signed-off-by: Kai Ye <yekai13@huawei.com> Signed-off-by: Longfang Liu <liulongfang@huawei.com> Reported-by: kernel test robot <lkp@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
7b44c0eecd
commit
5652d55a76
|
@ -97,6 +97,10 @@ struct sec_cipher_ctx {
|
||||||
u8 c_mode;
|
u8 c_mode;
|
||||||
u8 c_alg;
|
u8 c_alg;
|
||||||
u8 c_key_len;
|
u8 c_key_len;
|
||||||
|
|
||||||
|
/* add software support */
|
||||||
|
bool fallback;
|
||||||
|
struct crypto_sync_skcipher *fbtfm;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* SEC queue context which defines queue's relatives */
|
/* SEC queue context which defines queue's relatives */
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
|
|
||||||
#define SEC_PRIORITY 4001
|
#define SEC_PRIORITY 4001
|
||||||
#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
|
#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
|
||||||
|
#define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE)
|
||||||
#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
|
#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
|
||||||
#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
|
#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
|
||||||
#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
|
#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
|
||||||
|
@ -81,6 +82,7 @@
|
||||||
#define MAX_INPUT_DATA_LEN 0xFFFE00
|
#define MAX_INPUT_DATA_LEN 0xFFFE00
|
||||||
#define BITS_MASK 0xFF
|
#define BITS_MASK 0xFF
|
||||||
#define BYTE_BITS 0x8
|
#define BYTE_BITS 0x8
|
||||||
|
#define SEC_XTS_NAME_SZ 0x3
|
||||||
|
|
||||||
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
|
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
|
||||||
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
|
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
|
||||||
|
@ -598,6 +600,26 @@ static void sec_auth_uninit(struct sec_ctx *ctx)
|
||||||
a_ctx->a_key, a_ctx->a_key_dma);
|
a_ctx->a_key, a_ctx->a_key_dma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
|
||||||
|
{
|
||||||
|
const char *alg = crypto_tfm_alg_name(&tfm->base);
|
||||||
|
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
|
||||||
|
|
||||||
|
c_ctx->fallback = false;
|
||||||
|
if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
|
||||||
|
CRYPTO_ALG_NEED_FALLBACK);
|
||||||
|
if (IS_ERR(c_ctx->fbtfm)) {
|
||||||
|
pr_err("failed to alloc fallback tfm!\n");
|
||||||
|
return PTR_ERR(c_ctx->fbtfm);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int sec_skcipher_init(struct crypto_skcipher *tfm)
|
static int sec_skcipher_init(struct crypto_skcipher *tfm)
|
||||||
{
|
{
|
||||||
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
@ -619,8 +641,14 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_cipher_init;
|
goto err_cipher_init;
|
||||||
|
|
||||||
|
ret = sec_skcipher_fbtfm_init(tfm);
|
||||||
|
if (ret)
|
||||||
|
goto err_fbtfm_init;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_fbtfm_init:
|
||||||
|
sec_cipher_uninit(ctx);
|
||||||
err_cipher_init:
|
err_cipher_init:
|
||||||
sec_ctx_base_uninit(ctx);
|
sec_ctx_base_uninit(ctx);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -630,6 +658,9 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
|
||||||
{
|
{
|
||||||
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
|
if (ctx->c_ctx.fbtfm)
|
||||||
|
crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
|
||||||
|
|
||||||
sec_cipher_uninit(ctx);
|
sec_cipher_uninit(ctx);
|
||||||
sec_ctx_base_uninit(ctx);
|
sec_ctx_base_uninit(ctx);
|
||||||
}
|
}
|
||||||
|
@ -669,6 +700,9 @@ static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
|
||||||
case SEC_XTS_MIN_KEY_SIZE:
|
case SEC_XTS_MIN_KEY_SIZE:
|
||||||
c_ctx->c_key_len = SEC_CKEY_128BIT;
|
c_ctx->c_key_len = SEC_CKEY_128BIT;
|
||||||
break;
|
break;
|
||||||
|
case SEC_XTS_MID_KEY_SIZE:
|
||||||
|
c_ctx->fallback = true;
|
||||||
|
break;
|
||||||
case SEC_XTS_MAX_KEY_SIZE:
|
case SEC_XTS_MAX_KEY_SIZE:
|
||||||
c_ctx->c_key_len = SEC_CKEY_256BIT;
|
c_ctx->c_key_len = SEC_CKEY_256BIT;
|
||||||
break;
|
break;
|
||||||
|
@ -740,7 +774,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(c_ctx->c_key, key, keylen);
|
memcpy(c_ctx->c_key, key, keylen);
|
||||||
|
if (c_ctx->fallback) {
|
||||||
|
ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "failed to set fallback skcipher key!\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1709,6 +1749,37 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
|
||||||
|
struct skcipher_request *sreq, bool encrypt)
|
||||||
|
{
|
||||||
|
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
|
||||||
|
struct device *dev = ctx->dev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
|
||||||
|
|
||||||
|
if (!c_ctx->fbtfm) {
|
||||||
|
dev_err(dev, "failed to check fallback tfm\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
|
||||||
|
|
||||||
|
/* software need sync mode to do crypto */
|
||||||
|
skcipher_request_set_callback(subreq, sreq->base.flags,
|
||||||
|
NULL, NULL);
|
||||||
|
skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
|
||||||
|
sreq->cryptlen, sreq->iv);
|
||||||
|
if (encrypt)
|
||||||
|
ret = crypto_skcipher_encrypt(subreq);
|
||||||
|
else
|
||||||
|
ret = crypto_skcipher_decrypt(subreq);
|
||||||
|
|
||||||
|
skcipher_request_zero(subreq);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
|
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
|
||||||
{
|
{
|
||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
|
||||||
|
@ -1716,8 +1787,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
|
||||||
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!sk_req->cryptlen)
|
if (!sk_req->cryptlen) {
|
||||||
|
if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
|
||||||
|
return -EINVAL;
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
req->flag = sk_req->base.flags;
|
req->flag = sk_req->base.flags;
|
||||||
req->c_req.sk_req = sk_req;
|
req->c_req.sk_req = sk_req;
|
||||||
|
@ -1728,6 +1802,9 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (unlikely(ctx->c_ctx.fallback))
|
||||||
|
return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
|
||||||
|
|
||||||
return ctx->req_op->process(ctx, req);
|
return ctx->req_op->process(ctx, req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1748,7 +1825,9 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
|
||||||
.cra_name = sec_cra_name,\
|
.cra_name = sec_cra_name,\
|
||||||
.cra_driver_name = "hisi_sec_"sec_cra_name,\
|
.cra_driver_name = "hisi_sec_"sec_cra_name,\
|
||||||
.cra_priority = SEC_PRIORITY,\
|
.cra_priority = SEC_PRIORITY,\
|
||||||
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
|
.cra_flags = CRYPTO_ALG_ASYNC |\
|
||||||
|
CRYPTO_ALG_ALLOCATES_MEMORY |\
|
||||||
|
CRYPTO_ALG_NEED_FALLBACK,\
|
||||||
.cra_blocksize = blk_size,\
|
.cra_blocksize = blk_size,\
|
||||||
.cra_ctxsize = sizeof(struct sec_ctx),\
|
.cra_ctxsize = sizeof(struct sec_ctx),\
|
||||||
.cra_module = THIS_MODULE,\
|
.cra_module = THIS_MODULE,\
|
||||||
|
|
Loading…
Reference in New Issue