2019-06-04 16:11:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-03-10 16:47:47 +08:00
|
|
|
/*
|
|
|
|
* aes-ce-glue.c - wrapper code for ARMv8 AES
|
|
|
|
*
|
|
|
|
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/hwcap.h>
|
|
|
|
#include <asm/neon.h>
|
2019-07-03 03:41:38 +08:00
|
|
|
#include <asm/simd.h>
|
2019-07-03 03:41:37 +08:00
|
|
|
#include <asm/unaligned.h>
|
2015-03-10 16:47:47 +08:00
|
|
|
#include <crypto/aes.h>
|
2019-07-03 03:41:38 +08:00
|
|
|
#include <crypto/ctr.h>
|
2016-11-22 20:08:37 +08:00
|
|
|
#include <crypto/internal/simd.h>
|
|
|
|
#include <crypto/internal/skcipher.h>
|
2019-09-04 00:43:35 +08:00
|
|
|
#include <crypto/scatterwalk.h>
|
2017-05-21 18:23:36 +08:00
|
|
|
#include <linux/cpufeature.h>
|
2015-03-10 16:47:47 +08:00
|
|
|
#include <linux/module.h>
|
2016-02-17 14:00:01 +08:00
|
|
|
#include <crypto/xts.h>
|
2015-03-10 16:47:47 +08:00
|
|
|
|
|
|
|
MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
|
|
|
|
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
|
|
|
|
/* defined in aes-ce-core.S */
|
|
|
|
asmlinkage u32 ce_aes_sub(u32 input);
|
|
|
|
asmlinkage void ce_aes_invert(void *dst, void *src);
|
|
|
|
|
2019-09-04 00:43:23 +08:00
|
|
|
asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
|
2015-03-10 16:47:47 +08:00
|
|
|
int rounds, int blocks);
|
2019-09-04 00:43:23 +08:00
|
|
|
asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
|
2015-03-10 16:47:47 +08:00
|
|
|
int rounds, int blocks);
|
|
|
|
|
2019-09-04 00:43:23 +08:00
|
|
|
asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
|
2015-03-10 16:47:47 +08:00
|
|
|
int rounds, int blocks, u8 iv[]);
|
2019-09-04 00:43:23 +08:00
|
|
|
asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
|
2015-03-10 16:47:47 +08:00
|
|
|
int rounds, int blocks, u8 iv[]);
|
2019-09-04 00:43:37 +08:00
|
|
|
asmlinkage void ce_aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
|
|
|
|
int rounds, int bytes, u8 const iv[]);
|
|
|
|
asmlinkage void ce_aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
|
|
|
|
int rounds, int bytes, u8 const iv[]);
|
2015-03-10 16:47:47 +08:00
|
|
|
|
2019-09-04 00:43:23 +08:00
|
|
|
asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
|
2015-03-10 16:47:47 +08:00
|
|
|
int rounds, int blocks, u8 ctr[]);
|
|
|
|
|
2019-09-04 00:43:23 +08:00
|
|
|
asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
|
2019-09-04 00:43:35 +08:00
|
|
|
int rounds, int bytes, u8 iv[],
|
2019-09-04 00:43:23 +08:00
|
|
|
u32 const rk2[], int first);
|
|
|
|
asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
|
2019-09-04 00:43:35 +08:00
|
|
|
int rounds, int bytes, u8 iv[],
|
2019-09-04 00:43:23 +08:00
|
|
|
u32 const rk2[], int first);
|
2015-03-10 16:47:47 +08:00
|
|
|
|
|
|
|
struct aes_block {
|
|
|
|
u8 b[AES_BLOCK_SIZE];
|
|
|
|
};
|
|
|
|
|
|
|
|
static int num_rounds(struct crypto_aes_ctx *ctx)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* # of rounds specified by AES:
|
|
|
|
* 128 bit key 10 rounds
|
|
|
|
* 192 bit key 12 rounds
|
|
|
|
* 256 bit key 14 rounds
|
|
|
|
* => n byte key => 6 + (n/4) rounds
|
|
|
|
*/
|
|
|
|
return 6 + ctx->key_length / 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
|
|
|
|
unsigned int key_len)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The AES key schedule round constants
|
|
|
|
*/
|
|
|
|
static u8 const rcon[] = {
|
|
|
|
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
|
|
|
|
};
|
|
|
|
|
|
|
|
u32 kwords = key_len / sizeof(u32);
|
|
|
|
struct aes_block *key_enc, *key_dec;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
if (key_len != AES_KEYSIZE_128 &&
|
|
|
|
key_len != AES_KEYSIZE_192 &&
|
|
|
|
key_len != AES_KEYSIZE_256)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ctx->key_length = key_len;
|
2019-07-03 03:41:37 +08:00
|
|
|
for (i = 0; i < kwords; i++)
|
|
|
|
ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
|
2015-03-10 16:47:47 +08:00
|
|
|
|
|
|
|
kernel_neon_begin();
|
|
|
|
for (i = 0; i < sizeof(rcon); i++) {
|
|
|
|
u32 *rki = ctx->key_enc + (i * kwords);
|
|
|
|
u32 *rko = rki + kwords;
|
|
|
|
|
|
|
|
rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
|
|
|
|
rko[0] = rko[0] ^ rki[0] ^ rcon[i];
|
|
|
|
rko[1] = rko[0] ^ rki[1];
|
|
|
|
rko[2] = rko[1] ^ rki[2];
|
|
|
|
rko[3] = rko[2] ^ rki[3];
|
|
|
|
|
|
|
|
if (key_len == AES_KEYSIZE_192) {
|
|
|
|
if (i >= 7)
|
|
|
|
break;
|
|
|
|
rko[4] = rko[3] ^ rki[4];
|
|
|
|
rko[5] = rko[4] ^ rki[5];
|
|
|
|
} else if (key_len == AES_KEYSIZE_256) {
|
|
|
|
if (i >= 6)
|
|
|
|
break;
|
|
|
|
rko[4] = ce_aes_sub(rko[3]) ^ rki[4];
|
|
|
|
rko[5] = rko[4] ^ rki[5];
|
|
|
|
rko[6] = rko[5] ^ rki[6];
|
|
|
|
rko[7] = rko[6] ^ rki[7];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Generate the decryption keys for the Equivalent Inverse Cipher.
|
|
|
|
* This involves reversing the order of the round keys, and applying
|
|
|
|
* the Inverse Mix Columns transformation on all but the first and
|
|
|
|
* the last one.
|
|
|
|
*/
|
|
|
|
key_enc = (struct aes_block *)ctx->key_enc;
|
|
|
|
key_dec = (struct aes_block *)ctx->key_dec;
|
|
|
|
j = num_rounds(ctx);
|
|
|
|
|
|
|
|
key_dec[0] = key_enc[j];
|
|
|
|
for (i = 1, j--; j > 0; i++, j--)
|
|
|
|
ce_aes_invert(key_dec + i, key_enc + j);
|
|
|
|
key_dec[i] = key_enc[0];
|
|
|
|
|
|
|
|
kernel_neon_end();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-22 20:08:37 +08:00
|
|
|
static int ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
2015-03-10 16:47:47 +08:00
|
|
|
unsigned int key_len)
|
|
|
|
{
|
2016-11-22 20:08:37 +08:00
|
|
|
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2015-03-10 16:47:47 +08:00
|
|
|
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
return ce_aes_expandkey(ctx, in_key, key_len);
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct crypto_aes_xts_ctx {
|
|
|
|
struct crypto_aes_ctx key1;
|
|
|
|
struct crypto_aes_ctx __aligned(8) key2;
|
|
|
|
};
|
|
|
|
|
2016-11-22 20:08:37 +08:00
|
|
|
static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
2015-03-10 16:47:47 +08:00
|
|
|
unsigned int key_len)
|
|
|
|
{
|
2016-11-22 20:08:37 +08:00
|
|
|
struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2015-03-10 16:47:47 +08:00
|
|
|
int ret;
|
|
|
|
|
2016-11-22 20:08:37 +08:00
|
|
|
ret = xts_verify_key(tfm, in_key, key_len);
|
2016-02-09 22:37:47 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2015-03-10 16:47:47 +08:00
|
|
|
ret = ce_aes_expandkey(&ctx->key1, in_key, key_len / 2);
|
|
|
|
if (!ret)
|
|
|
|
ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2],
|
|
|
|
key_len / 2);
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
return ret;
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
|
|
|
|
2016-11-22 20:08:37 +08:00
|
|
|
static int ecb_encrypt(struct skcipher_request *req)
|
2015-03-10 16:47:47 +08:00
|
|
|
{
|
2016-11-22 20:08:37 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
struct skcipher_walk walk;
|
2015-03-10 16:47:47 +08:00
|
|
|
unsigned int blocks;
|
|
|
|
int err;
|
|
|
|
|
2019-09-04 00:43:24 +08:00
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
2015-03-10 16:47:47 +08:00
|
|
|
|
|
|
|
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_begin();
|
2015-03-10 16:47:47 +08:00
|
|
|
ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
2019-09-04 00:43:23 +08:00
|
|
|
ctx->key_enc, num_rounds(ctx), blocks);
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_end();
|
2016-11-22 20:08:37 +08:00
|
|
|
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-11-22 20:08:37 +08:00
|
|
|
static int ecb_decrypt(struct skcipher_request *req)
|
2015-03-10 16:47:47 +08:00
|
|
|
{
|
2016-11-22 20:08:37 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
struct skcipher_walk walk;
|
2015-03-10 16:47:47 +08:00
|
|
|
unsigned int blocks;
|
|
|
|
int err;
|
|
|
|
|
2019-09-04 00:43:24 +08:00
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
2015-03-10 16:47:47 +08:00
|
|
|
|
|
|
|
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_begin();
|
2015-03-10 16:47:47 +08:00
|
|
|
ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
2019-09-04 00:43:23 +08:00
|
|
|
ctx->key_dec, num_rounds(ctx), blocks);
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_end();
|
2016-11-22 20:08:37 +08:00
|
|
|
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-09-04 00:43:37 +08:00
|
|
|
static int cbc_encrypt_walk(struct skcipher_request *req,
|
|
|
|
struct skcipher_walk *walk)
|
2015-03-10 16:47:47 +08:00
|
|
|
{
|
2016-11-22 20:08:37 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2015-03-10 16:47:47 +08:00
|
|
|
unsigned int blocks;
|
2019-09-04 00:43:37 +08:00
|
|
|
int err = 0;
|
2015-03-10 16:47:47 +08:00
|
|
|
|
2019-09-04 00:43:37 +08:00
|
|
|
while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_begin();
|
2019-09-04 00:43:37 +08:00
|
|
|
ce_aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr,
|
2019-09-04 00:43:23 +08:00
|
|
|
ctx->key_enc, num_rounds(ctx), blocks,
|
2019-09-04 00:43:37 +08:00
|
|
|
walk->iv);
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_end();
|
2019-09-04 00:43:37 +08:00
|
|
|
err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-09-04 00:43:37 +08:00
|
|
|
static int cbc_encrypt(struct skcipher_request *req)
|
2015-03-10 16:47:47 +08:00
|
|
|
{
|
2016-11-22 20:08:37 +08:00
|
|
|
struct skcipher_walk walk;
|
2015-03-10 16:47:47 +08:00
|
|
|
int err;
|
|
|
|
|
2019-09-04 00:43:24 +08:00
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
2019-09-04 00:43:37 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
return cbc_encrypt_walk(req, &walk);
|
|
|
|
}
|
2015-03-10 16:47:47 +08:00
|
|
|
|
2019-09-04 00:43:37 +08:00
|
|
|
static int cbc_decrypt_walk(struct skcipher_request *req,
|
|
|
|
struct skcipher_walk *walk)
|
|
|
|
{
|
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
unsigned int blocks;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_begin();
|
2019-09-04 00:43:37 +08:00
|
|
|
ce_aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr,
|
2019-09-04 00:43:23 +08:00
|
|
|
ctx->key_dec, num_rounds(ctx), blocks,
|
2019-09-04 00:43:37 +08:00
|
|
|
walk->iv);
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_end();
|
2019-09-04 00:43:37 +08:00
|
|
|
err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-09-04 00:43:37 +08:00
|
|
|
static int cbc_decrypt(struct skcipher_request *req)
|
|
|
|
{
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
return cbc_decrypt_walk(req, &walk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cts_cbc_encrypt(struct skcipher_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
|
|
|
|
struct scatterlist *src = req->src, *dst = req->dst;
|
|
|
|
struct scatterlist sg_src[2], sg_dst[2];
|
|
|
|
struct skcipher_request subreq;
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
skcipher_request_set_tfm(&subreq, tfm);
|
|
|
|
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
|
|
|
|
NULL, NULL);
|
|
|
|
|
|
|
|
if (req->cryptlen <= AES_BLOCK_SIZE) {
|
|
|
|
if (req->cryptlen < AES_BLOCK_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
cbc_blocks = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cbc_blocks > 0) {
|
|
|
|
skcipher_request_set_crypt(&subreq, req->src, req->dst,
|
|
|
|
cbc_blocks * AES_BLOCK_SIZE,
|
|
|
|
req->iv);
|
|
|
|
|
|
|
|
err = skcipher_walk_virt(&walk, &subreq, false) ?:
|
|
|
|
cbc_encrypt_walk(&subreq, &walk);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (req->cryptlen == AES_BLOCK_SIZE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
|
|
|
|
if (req->dst != req->src)
|
|
|
|
dst = scatterwalk_ffwd(sg_dst, req->dst,
|
|
|
|
subreq.cryptlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle ciphertext stealing */
|
|
|
|
skcipher_request_set_crypt(&subreq, src, dst,
|
|
|
|
req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
|
|
|
|
req->iv);
|
|
|
|
|
|
|
|
err = skcipher_walk_virt(&walk, &subreq, false);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
kernel_neon_begin();
|
|
|
|
ce_aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
ctx->key_enc, num_rounds(ctx), walk.nbytes,
|
|
|
|
walk.iv);
|
|
|
|
kernel_neon_end();
|
|
|
|
|
|
|
|
return skcipher_walk_done(&walk, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cts_cbc_decrypt(struct skcipher_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
|
|
|
|
struct scatterlist *src = req->src, *dst = req->dst;
|
|
|
|
struct scatterlist sg_src[2], sg_dst[2];
|
|
|
|
struct skcipher_request subreq;
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
skcipher_request_set_tfm(&subreq, tfm);
|
|
|
|
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
|
|
|
|
NULL, NULL);
|
|
|
|
|
|
|
|
if (req->cryptlen <= AES_BLOCK_SIZE) {
|
|
|
|
if (req->cryptlen < AES_BLOCK_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
cbc_blocks = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cbc_blocks > 0) {
|
|
|
|
skcipher_request_set_crypt(&subreq, req->src, req->dst,
|
|
|
|
cbc_blocks * AES_BLOCK_SIZE,
|
|
|
|
req->iv);
|
|
|
|
|
|
|
|
err = skcipher_walk_virt(&walk, &subreq, false) ?:
|
|
|
|
cbc_decrypt_walk(&subreq, &walk);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (req->cryptlen == AES_BLOCK_SIZE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
|
|
|
|
if (req->dst != req->src)
|
|
|
|
dst = scatterwalk_ffwd(sg_dst, req->dst,
|
|
|
|
subreq.cryptlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle ciphertext stealing */
|
|
|
|
skcipher_request_set_crypt(&subreq, src, dst,
|
|
|
|
req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
|
|
|
|
req->iv);
|
|
|
|
|
|
|
|
err = skcipher_walk_virt(&walk, &subreq, false);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
kernel_neon_begin();
|
|
|
|
ce_aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
ctx->key_dec, num_rounds(ctx), walk.nbytes,
|
|
|
|
walk.iv);
|
|
|
|
kernel_neon_end();
|
|
|
|
|
|
|
|
return skcipher_walk_done(&walk, 0);
|
|
|
|
}
|
|
|
|
|
2016-11-22 20:08:37 +08:00
|
|
|
static int ctr_encrypt(struct skcipher_request *req)
|
2015-03-10 16:47:47 +08:00
|
|
|
{
|
2016-11-22 20:08:37 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
struct skcipher_walk walk;
|
2015-03-10 16:47:47 +08:00
|
|
|
int err, blocks;
|
|
|
|
|
2019-09-04 00:43:24 +08:00
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
2015-03-10 16:47:47 +08:00
|
|
|
|
|
|
|
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_begin();
|
2015-03-10 16:47:47 +08:00
|
|
|
ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
2019-09-04 00:43:23 +08:00
|
|
|
ctx->key_enc, num_rounds(ctx), blocks,
|
2015-03-10 16:47:47 +08:00
|
|
|
walk.iv);
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_end();
|
2016-11-22 20:08:37 +08:00
|
|
|
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
2016-11-22 20:08:37 +08:00
|
|
|
if (walk.nbytes) {
|
2015-03-10 16:47:47 +08:00
|
|
|
u8 __aligned(8) tail[AES_BLOCK_SIZE];
|
2016-11-22 20:08:37 +08:00
|
|
|
unsigned int nbytes = walk.nbytes;
|
|
|
|
u8 *tdst = walk.dst.virt.addr;
|
|
|
|
u8 *tsrc = walk.src.virt.addr;
|
2015-03-10 16:47:47 +08:00
|
|
|
|
|
|
|
/*
|
2017-01-29 07:25:31 +08:00
|
|
|
* Tell aes_ctr_encrypt() to process a tail block.
|
2015-03-10 16:47:47 +08:00
|
|
|
*/
|
2017-01-29 07:25:31 +08:00
|
|
|
blocks = -1;
|
2015-03-10 16:47:47 +08:00
|
|
|
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_begin();
|
2019-09-04 00:43:23 +08:00
|
|
|
ce_aes_ctr_encrypt(tail, NULL, ctx->key_enc, num_rounds(ctx),
|
|
|
|
blocks, walk.iv);
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_end();
|
crypto: algapi - make crypto_xor() take separate dst and src arguments
There are quite a number of occurrences in the kernel of the pattern
if (dst != src)
memcpy(dst, src, walk.total % AES_BLOCK_SIZE);
crypto_xor(dst, final, walk.total % AES_BLOCK_SIZE);
or
crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes);
where crypto_xor() is preceded or followed by a memcpy() invocation
that is only there because crypto_xor() uses its output parameter as
one of the inputs. To avoid having to add new instances of this pattern
in the arm64 code, which will be refactored to implement non-SIMD
fallbacks, add an alternative implementation called crypto_xor_cpy(),
taking separate input and output arguments. This removes the need for
the separate memcpy().
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2017-07-24 18:28:04 +08:00
|
|
|
crypto_xor_cpy(tdst, tsrc, tail, nbytes);
|
2016-11-22 20:08:37 +08:00
|
|
|
err = skcipher_walk_done(&walk, 0);
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-03 03:41:38 +08:00
|
|
|
static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
|
|
|
|
{
|
|
|
|
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Temporarily disable interrupts to avoid races where
|
|
|
|
* cachelines are evicted when the CPU is interrupted
|
|
|
|
* to do something else.
|
|
|
|
*/
|
|
|
|
local_irq_save(flags);
|
|
|
|
aes_encrypt(ctx, dst, src);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ctr_encrypt_sync(struct skcipher_request *req)
|
|
|
|
{
|
|
|
|
if (!crypto_simd_usable())
|
|
|
|
return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
|
|
|
|
|
|
|
|
return ctr_encrypt(req);
|
|
|
|
}
|
|
|
|
|
2016-11-22 20:08:37 +08:00
|
|
|
static int xts_encrypt(struct skcipher_request *req)
|
2015-03-10 16:47:47 +08:00
|
|
|
{
|
2016-11-22 20:08:37 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2015-03-10 16:47:47 +08:00
|
|
|
int err, first, rounds = num_rounds(&ctx->key1);
|
2019-09-04 00:43:35 +08:00
|
|
|
int tail = req->cryptlen % AES_BLOCK_SIZE;
|
|
|
|
struct scatterlist sg_src[2], sg_dst[2];
|
|
|
|
struct skcipher_request subreq;
|
|
|
|
struct scatterlist *src, *dst;
|
2016-11-22 20:08:37 +08:00
|
|
|
struct skcipher_walk walk;
|
2019-09-04 00:43:35 +08:00
|
|
|
|
|
|
|
if (req->cryptlen < AES_BLOCK_SIZE)
|
|
|
|
return -EINVAL;
|
2015-03-10 16:47:47 +08:00
|
|
|
|
2019-09-04 00:43:24 +08:00
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
2015-03-10 16:47:47 +08:00
|
|
|
|
2019-09-04 00:43:35 +08:00
|
|
|
if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
|
|
|
|
int xts_blocks = DIV_ROUND_UP(req->cryptlen,
|
|
|
|
AES_BLOCK_SIZE) - 2;
|
|
|
|
|
|
|
|
skcipher_walk_abort(&walk);
|
|
|
|
|
|
|
|
skcipher_request_set_tfm(&subreq, tfm);
|
|
|
|
skcipher_request_set_callback(&subreq,
|
|
|
|
skcipher_request_flags(req),
|
|
|
|
NULL, NULL);
|
|
|
|
skcipher_request_set_crypt(&subreq, req->src, req->dst,
|
|
|
|
xts_blocks * AES_BLOCK_SIZE,
|
|
|
|
req->iv);
|
|
|
|
req = &subreq;
|
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
} else {
|
|
|
|
tail = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
|
|
|
|
int nbytes = walk.nbytes;
|
|
|
|
|
|
|
|
if (walk.nbytes < walk.total)
|
|
|
|
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
|
|
|
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_begin();
|
2015-03-10 16:47:47 +08:00
|
|
|
ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
2019-09-04 00:43:35 +08:00
|
|
|
ctx->key1.key_enc, rounds, nbytes, walk.iv,
|
2019-09-04 00:43:23 +08:00
|
|
|
ctx->key2.key_enc, first);
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_end();
|
2019-09-04 00:43:35 +08:00
|
|
|
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
2019-09-04 00:43:35 +08:00
|
|
|
|
|
|
|
if (err || likely(!tail))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
|
|
|
|
if (req->dst != req->src)
|
|
|
|
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
|
|
|
|
|
|
|
|
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
|
|
|
|
req->iv);
|
|
|
|
|
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
kernel_neon_begin();
|
|
|
|
ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
ctx->key1.key_enc, rounds, walk.nbytes, walk.iv,
|
|
|
|
ctx->key2.key_enc, first);
|
|
|
|
kernel_neon_end();
|
|
|
|
|
|
|
|
return skcipher_walk_done(&walk, 0);
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
|
|
|
|
2016-11-22 20:08:37 +08:00
|
|
|
static int xts_decrypt(struct skcipher_request *req)
|
2015-03-10 16:47:47 +08:00
|
|
|
{
|
2016-11-22 20:08:37 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2015-03-10 16:47:47 +08:00
|
|
|
int err, first, rounds = num_rounds(&ctx->key1);
|
2019-09-04 00:43:35 +08:00
|
|
|
int tail = req->cryptlen % AES_BLOCK_SIZE;
|
|
|
|
struct scatterlist sg_src[2], sg_dst[2];
|
|
|
|
struct skcipher_request subreq;
|
|
|
|
struct scatterlist *src, *dst;
|
2016-11-22 20:08:37 +08:00
|
|
|
struct skcipher_walk walk;
|
2019-09-04 00:43:35 +08:00
|
|
|
|
|
|
|
if (req->cryptlen < AES_BLOCK_SIZE)
|
|
|
|
return -EINVAL;
|
2015-03-10 16:47:47 +08:00
|
|
|
|
2019-09-04 00:43:24 +08:00
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
2015-03-10 16:47:47 +08:00
|
|
|
|
2019-09-04 00:43:35 +08:00
|
|
|
if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
|
|
|
|
int xts_blocks = DIV_ROUND_UP(req->cryptlen,
|
|
|
|
AES_BLOCK_SIZE) - 2;
|
|
|
|
|
|
|
|
skcipher_walk_abort(&walk);
|
|
|
|
|
|
|
|
skcipher_request_set_tfm(&subreq, tfm);
|
|
|
|
skcipher_request_set_callback(&subreq,
|
|
|
|
skcipher_request_flags(req),
|
|
|
|
NULL, NULL);
|
|
|
|
skcipher_request_set_crypt(&subreq, req->src, req->dst,
|
|
|
|
xts_blocks * AES_BLOCK_SIZE,
|
|
|
|
req->iv);
|
|
|
|
req = &subreq;
|
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
} else {
|
|
|
|
tail = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
|
|
|
|
int nbytes = walk.nbytes;
|
|
|
|
|
|
|
|
if (walk.nbytes < walk.total)
|
|
|
|
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
|
|
|
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_begin();
|
2015-03-10 16:47:47 +08:00
|
|
|
ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
2019-09-04 00:43:35 +08:00
|
|
|
ctx->key1.key_dec, rounds, nbytes, walk.iv,
|
2019-09-04 00:43:23 +08:00
|
|
|
ctx->key2.key_enc, first);
|
2019-09-04 00:43:24 +08:00
|
|
|
kernel_neon_end();
|
2019-09-04 00:43:35 +08:00
|
|
|
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
2019-09-04 00:43:35 +08:00
|
|
|
|
|
|
|
if (err || likely(!tail))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
|
|
|
|
if (req->dst != req->src)
|
|
|
|
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
|
|
|
|
|
|
|
|
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
|
|
|
|
req->iv);
|
|
|
|
|
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
kernel_neon_begin();
|
|
|
|
ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
ctx->key1.key_dec, rounds, walk.nbytes, walk.iv,
|
|
|
|
ctx->key2.key_enc, first);
|
|
|
|
kernel_neon_end();
|
|
|
|
|
|
|
|
return skcipher_walk_done(&walk, 0);
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
|
|
|
|
2016-11-22 20:08:37 +08:00
|
|
|
static struct skcipher_alg aes_algs[] = { {
|
2019-07-03 03:41:19 +08:00
|
|
|
.base.cra_name = "__ecb(aes)",
|
|
|
|
.base.cra_driver_name = "__ecb-aes-ce",
|
|
|
|
.base.cra_priority = 300,
|
|
|
|
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.setkey = ce_aes_setkey,
|
|
|
|
.encrypt = ecb_encrypt,
|
|
|
|
.decrypt = ecb_decrypt,
|
2015-03-10 16:47:47 +08:00
|
|
|
}, {
|
2019-07-03 03:41:19 +08:00
|
|
|
.base.cra_name = "__cbc(aes)",
|
|
|
|
.base.cra_driver_name = "__cbc-aes-ce",
|
|
|
|
.base.cra_priority = 300,
|
|
|
|
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = ce_aes_setkey,
|
|
|
|
.encrypt = cbc_encrypt,
|
|
|
|
.decrypt = cbc_decrypt,
|
2019-09-04 00:43:37 +08:00
|
|
|
}, {
|
|
|
|
.base.cra_name = "__cts(cbc(aes))",
|
|
|
|
.base.cra_driver_name = "__cts-cbc-aes-ce",
|
|
|
|
.base.cra_priority = 300,
|
|
|
|
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.walksize = 2 * AES_BLOCK_SIZE,
|
|
|
|
.setkey = ce_aes_setkey,
|
|
|
|
.encrypt = cts_cbc_encrypt,
|
|
|
|
.decrypt = cts_cbc_decrypt,
|
2015-03-10 16:47:47 +08:00
|
|
|
}, {
|
2019-07-03 03:41:19 +08:00
|
|
|
.base.cra_name = "__ctr(aes)",
|
|
|
|
.base.cra_driver_name = "__ctr-aes-ce",
|
|
|
|
.base.cra_priority = 300,
|
|
|
|
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
|
|
|
.base.cra_blocksize = 1,
|
|
|
|
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.chunksize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = ce_aes_setkey,
|
|
|
|
.encrypt = ctr_encrypt,
|
|
|
|
.decrypt = ctr_encrypt,
|
2019-07-03 03:41:38 +08:00
|
|
|
}, {
|
|
|
|
.base.cra_name = "ctr(aes)",
|
|
|
|
.base.cra_driver_name = "ctr-aes-ce-sync",
|
|
|
|
.base.cra_priority = 300 - 1,
|
|
|
|
.base.cra_blocksize = 1,
|
|
|
|
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.chunksize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = ce_aes_setkey,
|
|
|
|
.encrypt = ctr_encrypt_sync,
|
|
|
|
.decrypt = ctr_encrypt_sync,
|
2015-03-10 16:47:47 +08:00
|
|
|
}, {
|
2019-07-03 03:41:19 +08:00
|
|
|
.base.cra_name = "__xts(aes)",
|
|
|
|
.base.cra_driver_name = "__xts-aes-ce",
|
|
|
|
.base.cra_priority = 300,
|
|
|
|
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
|
|
|
|
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
2019-09-04 00:43:35 +08:00
|
|
|
.walksize = 2 * AES_BLOCK_SIZE,
|
2019-07-03 03:41:19 +08:00
|
|
|
.setkey = xts_set_key,
|
|
|
|
.encrypt = xts_encrypt,
|
|
|
|
.decrypt = xts_decrypt,
|
2015-03-10 16:47:47 +08:00
|
|
|
} };
|
|
|
|
|
2016-12-01 13:45:05 +08:00
|
|
|
static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
|
2016-11-22 20:08:37 +08:00
|
|
|
|
|
|
|
static void aes_exit(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++)
|
|
|
|
simd_skcipher_free(aes_simd_algs[i]);
|
|
|
|
|
|
|
|
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
|
|
|
|
}
|
|
|
|
|
2015-03-10 16:47:47 +08:00
|
|
|
static int __init aes_init(void)
|
|
|
|
{
|
2016-11-22 20:08:37 +08:00
|
|
|
struct simd_skcipher_alg *simd;
|
|
|
|
const char *basename;
|
|
|
|
const char *algname;
|
|
|
|
const char *drvname;
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
|
2019-07-03 03:41:38 +08:00
|
|
|
if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
|
|
|
|
continue;
|
|
|
|
|
2016-11-22 20:08:37 +08:00
|
|
|
algname = aes_algs[i].base.cra_name + 2;
|
|
|
|
drvname = aes_algs[i].base.cra_driver_name + 2;
|
|
|
|
basename = aes_algs[i].base.cra_driver_name;
|
|
|
|
simd = simd_skcipher_create_compat(algname, drvname, basename);
|
|
|
|
err = PTR_ERR(simd);
|
|
|
|
if (IS_ERR(simd))
|
|
|
|
goto unregister_simds;
|
|
|
|
|
|
|
|
aes_simd_algs[i] = simd;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
unregister_simds:
|
|
|
|
aes_exit();
|
|
|
|
return err;
|
2015-03-10 16:47:47 +08:00
|
|
|
}
|
|
|
|
|
2017-05-21 18:23:36 +08:00
|
|
|
module_cpu_feature_match(AES, aes_init);
|
2015-03-10 16:47:47 +08:00
|
|
|
module_exit(aes_exit);
|