2018-07-13 22:51:37 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2017-05-24 22:10:34 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2017 Marvell
|
|
|
|
*
|
|
|
|
* Antoine Tenart <antoine.tenart@free-electrons.com>
|
|
|
|
*/
|
|
|
|
|
2019-09-09 19:13:06 +08:00
|
|
|
#include <crypto/aes.h>
|
2017-06-15 15:56:17 +08:00
|
|
|
#include <crypto/hmac.h>
|
2018-06-28 23:21:53 +08:00
|
|
|
#include <crypto/md5.h>
|
2017-05-24 22:10:34 +08:00
|
|
|
#include <crypto/sha.h>
|
2019-09-14 02:56:47 +08:00
|
|
|
#include <crypto/sha3.h>
|
2019-09-09 19:13:07 +08:00
|
|
|
#include <crypto/skcipher.h>
|
2019-09-13 23:20:36 +08:00
|
|
|
#include <crypto/sm3.h>
|
2017-05-24 22:10:34 +08:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/dmapool.h>
|
|
|
|
|
|
|
|
#include "safexcel.h"
|
|
|
|
|
|
|
|
struct safexcel_ahash_ctx {
|
|
|
|
struct safexcel_context base;
|
|
|
|
struct safexcel_crypto_priv *priv;
|
|
|
|
|
|
|
|
u32 alg;
|
2019-09-09 19:13:06 +08:00
|
|
|
u8 key_sz;
|
2019-09-09 19:13:07 +08:00
|
|
|
bool cbcmac;
|
2019-09-14 02:56:47 +08:00
|
|
|
bool do_fallback;
|
|
|
|
bool fb_init_done;
|
2019-09-14 02:56:48 +08:00
|
|
|
bool fb_do_setkey;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-10-22 17:01:43 +08:00
|
|
|
__le32 ipad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
|
|
|
|
__le32 opad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
|
2019-09-09 19:13:07 +08:00
|
|
|
|
|
|
|
struct crypto_cipher *kaes;
|
2019-09-14 02:56:47 +08:00
|
|
|
struct crypto_ahash *fback;
|
2019-09-14 02:56:48 +08:00
|
|
|
struct crypto_shash *shpre;
|
|
|
|
struct shash_desc *shdesc;
|
2017-05-24 22:10:34 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct safexcel_ahash_req {
|
|
|
|
bool last_req;
|
|
|
|
bool finish;
|
|
|
|
bool hmac;
|
2017-12-11 19:10:55 +08:00
|
|
|
bool needs_inv;
|
2019-07-02 22:40:00 +08:00
|
|
|
bool hmac_zlen;
|
|
|
|
bool len_is_le;
|
2019-09-09 19:13:06 +08:00
|
|
|
bool not_first;
|
|
|
|
bool xcbcmac;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2017-12-27 00:21:16 +08:00
|
|
|
int nents;
|
2018-02-26 21:45:10 +08:00
|
|
|
dma_addr_t result_dma;
|
2017-12-27 00:21:16 +08:00
|
|
|
|
2018-03-19 16:21:13 +08:00
|
|
|
u32 digest;
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
u8 state_sz; /* expected state size, only set once */
|
|
|
|
u8 block_sz; /* block size, only set once */
|
2019-09-14 02:56:48 +08:00
|
|
|
u8 digest_sz; /* output digest size, only set once */
|
2019-10-22 17:01:43 +08:00
|
|
|
__le32 state[SHA3_512_BLOCK_SIZE /
|
|
|
|
sizeof(__le32)] __aligned(sizeof(__le32));
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-07-05 15:36:31 +08:00
|
|
|
u64 len;
|
|
|
|
u64 processed;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32));
|
2018-02-26 21:45:11 +08:00
|
|
|
dma_addr_t cache_dma;
|
|
|
|
unsigned int cache_sz;
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
u8 cache_next[HASH_CACHE_SIZE] __aligned(sizeof(u32));
|
2017-05-24 22:10:34 +08:00
|
|
|
};
|
|
|
|
|
2018-05-29 20:13:46 +08:00
|
|
|
static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
|
|
|
|
{
|
2019-07-05 15:36:31 +08:00
|
|
|
return req->len - req->processed;
|
2018-05-29 20:13:46 +08:00
|
|
|
}
|
|
|
|
|
2017-05-24 22:10:34 +08:00
|
|
|
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
|
2019-09-09 19:13:06 +08:00
|
|
|
u32 input_length, u32 result_length,
|
2019-09-09 19:13:07 +08:00
|
|
|
bool cbcmac)
|
2017-05-24 22:10:34 +08:00
|
|
|
{
|
|
|
|
struct safexcel_token *token =
|
|
|
|
(struct safexcel_token *)cdesc->control_data.token;
|
|
|
|
|
|
|
|
token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
|
|
|
|
token[0].packet_length = input_length;
|
|
|
|
token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
|
|
|
|
|
2019-09-09 19:13:06 +08:00
|
|
|
input_length &= 15;
|
2019-09-09 19:13:07 +08:00
|
|
|
if (unlikely(cbcmac && input_length)) {
|
2019-12-12 00:32:35 +08:00
|
|
|
token[0].stat = 0;
|
2019-09-09 19:13:06 +08:00
|
|
|
token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
|
|
|
|
token[1].packet_length = 16 - input_length;
|
|
|
|
token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
|
|
|
|
token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH;
|
|
|
|
} else {
|
|
|
|
token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
|
2019-12-12 00:32:35 +08:00
|
|
|
eip197_noop_token(&token[1]);
|
2019-09-09 19:13:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
|
|
|
|
token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
|
2017-05-24 22:10:34 +08:00
|
|
|
EIP197_TOKEN_STAT_LAST_PACKET;
|
2019-09-09 19:13:06 +08:00
|
|
|
token[2].packet_length = result_length;
|
|
|
|
token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
|
2017-05-24 22:10:34 +08:00
|
|
|
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
|
2019-12-12 00:32:35 +08:00
|
|
|
|
|
|
|
eip197_noop_token(&token[3]);
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
|
|
|
|
struct safexcel_ahash_req *req,
|
2019-07-02 22:39:59 +08:00
|
|
|
struct safexcel_command_desc *cdesc)
|
2017-05-24 22:10:34 +08:00
|
|
|
{
|
2018-05-29 20:13:46 +08:00
|
|
|
struct safexcel_crypto_priv *priv = ctx->priv;
|
2019-07-02 22:39:59 +08:00
|
|
|
u64 count = 0;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-09-09 19:10:29 +08:00
|
|
|
cdesc->control_data.control0 = ctx->alg;
|
2019-12-12 00:32:35 +08:00
|
|
|
cdesc->control_data.control1 = 0;
|
2019-07-02 22:39:59 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy the input digest if needed, and setup the context
|
|
|
|
* fields. Do this now as we need it to setup the first command
|
|
|
|
* descriptor.
|
|
|
|
*/
|
2019-09-09 19:10:29 +08:00
|
|
|
if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM)) {
|
2019-09-09 19:13:06 +08:00
|
|
|
if (req->xcbcmac)
|
|
|
|
memcpy(ctx->base.ctxr->data, ctx->ipad, ctx->key_sz);
|
|
|
|
else
|
|
|
|
memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
|
2019-09-09 19:10:29 +08:00
|
|
|
|
2019-09-09 19:13:06 +08:00
|
|
|
if (!req->finish && req->xcbcmac)
|
|
|
|
cdesc->control_data.control0 |=
|
|
|
|
CONTEXT_CONTROL_DIGEST_XCM |
|
|
|
|
CONTEXT_CONTROL_TYPE_HASH_OUT |
|
|
|
|
CONTEXT_CONTROL_NO_FINISH_HASH |
|
|
|
|
CONTEXT_CONTROL_SIZE(req->state_sz /
|
|
|
|
sizeof(u32));
|
|
|
|
else
|
|
|
|
cdesc->control_data.control0 |=
|
|
|
|
CONTEXT_CONTROL_DIGEST_XCM |
|
|
|
|
CONTEXT_CONTROL_TYPE_HASH_OUT |
|
|
|
|
CONTEXT_CONTROL_SIZE(req->state_sz /
|
|
|
|
sizeof(u32));
|
2019-09-09 19:10:29 +08:00
|
|
|
return;
|
|
|
|
} else if (!req->processed) {
|
2019-07-02 22:39:59 +08:00
|
|
|
/* First - and possibly only - block of basic hash only */
|
2019-09-09 19:13:06 +08:00
|
|
|
if (req->finish)
|
2019-09-09 19:10:29 +08:00
|
|
|
cdesc->control_data.control0 |= req->digest |
|
2019-07-02 22:39:59 +08:00
|
|
|
CONTEXT_CONTROL_TYPE_HASH_OUT |
|
|
|
|
CONTEXT_CONTROL_RESTART_HASH |
|
|
|
|
/* ensure its not 0! */
|
|
|
|
CONTEXT_CONTROL_SIZE(1);
|
2019-09-09 19:13:06 +08:00
|
|
|
else
|
2019-09-09 19:10:29 +08:00
|
|
|
cdesc->control_data.control0 |= req->digest |
|
2019-07-02 22:39:59 +08:00
|
|
|
CONTEXT_CONTROL_TYPE_HASH_OUT |
|
|
|
|
CONTEXT_CONTROL_RESTART_HASH |
|
|
|
|
CONTEXT_CONTROL_NO_FINISH_HASH |
|
|
|
|
/* ensure its not 0! */
|
|
|
|
CONTEXT_CONTROL_SIZE(1);
|
|
|
|
return;
|
|
|
|
}
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
/* Hash continuation or HMAC, setup (inner) digest from state */
|
|
|
|
memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
|
|
|
|
|
|
|
|
if (req->finish) {
|
|
|
|
/* Compute digest count for hash/HMAC finish operations */
|
|
|
|
if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
|
2019-07-05 15:36:31 +08:00
|
|
|
req->hmac_zlen || (req->processed != req->block_sz)) {
|
|
|
|
count = req->processed / EIP197_COUNTER_BLOCK_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
|
|
|
|
/* This is a hardware limitation, as the
|
|
|
|
* counter must fit into an u32. This represents
|
|
|
|
* a fairly big amount of input data, so we
|
|
|
|
* shouldn't see this.
|
|
|
|
*/
|
|
|
|
if (unlikely(count & 0xffffffff00000000ULL)) {
|
|
|
|
dev_warn(priv->dev,
|
|
|
|
"Input data is too big\n");
|
|
|
|
return;
|
2018-05-29 20:13:46 +08:00
|
|
|
}
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
|
2019-07-02 22:40:00 +08:00
|
|
|
/* Special case: zero length HMAC */
|
|
|
|
req->hmac_zlen ||
|
2019-07-02 22:39:59 +08:00
|
|
|
/* PE HW < 4.4 cannot do HMAC continue, fake using hash */
|
2019-07-05 15:36:31 +08:00
|
|
|
(req->processed != req->block_sz)) {
|
2019-07-02 22:39:59 +08:00
|
|
|
/* Basic hash continue operation, need digest + cnt */
|
|
|
|
cdesc->control_data.control0 |=
|
|
|
|
CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) |
|
|
|
|
CONTEXT_CONTROL_TYPE_HASH_OUT |
|
|
|
|
CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
2019-07-02 22:40:00 +08:00
|
|
|
/* For zero-len HMAC, don't finalize, already padded! */
|
|
|
|
if (req->hmac_zlen)
|
|
|
|
cdesc->control_data.control0 |=
|
|
|
|
CONTEXT_CONTROL_NO_FINISH_HASH;
|
2019-07-02 22:39:59 +08:00
|
|
|
cdesc->control_data.control1 |=
|
|
|
|
CONTEXT_CONTROL_DIGEST_CNT;
|
|
|
|
ctx->base.ctxr->data[req->state_sz >> 2] =
|
|
|
|
cpu_to_le32(count);
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
2019-07-02 22:40:00 +08:00
|
|
|
|
|
|
|
/* Clear zero-length HMAC flag for next operation! */
|
|
|
|
req->hmac_zlen = false;
|
2019-07-02 22:39:59 +08:00
|
|
|
} else { /* HMAC */
|
|
|
|
/* Need outer digest for HMAC finalization */
|
|
|
|
memcpy(ctx->base.ctxr->data + (req->state_sz >> 2),
|
|
|
|
ctx->opad, req->state_sz);
|
|
|
|
|
|
|
|
/* Single pass HMAC - no digest count */
|
|
|
|
cdesc->control_data.control0 |=
|
|
|
|
CONTEXT_CONTROL_SIZE(req->state_sz >> 1) |
|
|
|
|
CONTEXT_CONTROL_TYPE_HASH_OUT |
|
|
|
|
CONTEXT_CONTROL_DIGEST_HMAC;
|
|
|
|
}
|
|
|
|
} else { /* Hash continuation, do not finish yet */
|
|
|
|
cdesc->control_data.control0 |=
|
|
|
|
CONTEXT_CONTROL_SIZE(req->state_sz >> 2) |
|
|
|
|
CONTEXT_CONTROL_DIGEST_PRECOMPUTED |
|
|
|
|
CONTEXT_CONTROL_TYPE_HASH_OUT |
|
|
|
|
CONTEXT_CONTROL_NO_FINISH_HASH;
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
static int safexcel_ahash_enqueue(struct ahash_request *areq);
|
|
|
|
|
|
|
|
static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
|
|
|
|
int ring,
|
2017-12-11 19:10:55 +08:00
|
|
|
struct crypto_async_request *async,
|
|
|
|
bool *should_complete, int *ret)
|
2017-05-24 22:10:34 +08:00
|
|
|
{
|
|
|
|
struct safexcel_result_desc *rdesc;
|
|
|
|
struct ahash_request *areq = ahash_request_cast(async);
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
|
|
|
|
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
|
2019-07-02 22:39:59 +08:00
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
|
2018-05-29 20:13:46 +08:00
|
|
|
u64 cache_len;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
*ret = 0;
|
|
|
|
|
|
|
|
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
|
|
|
|
if (IS_ERR(rdesc)) {
|
|
|
|
dev_err(priv->dev,
|
|
|
|
"hash: result: could not retrieve the result descriptor\n");
|
|
|
|
*ret = PTR_ERR(rdesc);
|
2018-05-14 21:11:01 +08:00
|
|
|
} else {
|
|
|
|
*ret = safexcel_rdesc_check_errors(priv, rdesc);
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
safexcel_complete(priv, ring);
|
|
|
|
|
2017-12-27 00:21:16 +08:00
|
|
|
if (sreq->nents) {
|
|
|
|
dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
|
|
|
|
sreq->nents = 0;
|
|
|
|
}
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2018-02-26 21:45:10 +08:00
|
|
|
if (sreq->result_dma) {
|
2019-09-14 02:56:48 +08:00
|
|
|
dma_unmap_single(priv->dev, sreq->result_dma, sreq->digest_sz,
|
2018-02-26 21:45:10 +08:00
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
sreq->result_dma = 0;
|
|
|
|
}
|
|
|
|
|
2018-02-26 21:45:11 +08:00
|
|
|
if (sreq->cache_dma) {
|
|
|
|
dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
sreq->cache_dma = 0;
|
2019-05-27 22:51:00 +08:00
|
|
|
sreq->cache_sz = 0;
|
2018-02-26 21:45:11 +08:00
|
|
|
}
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
if (sreq->finish) {
|
|
|
|
if (sreq->hmac &&
|
|
|
|
(sreq->digest != CONTEXT_CONTROL_DIGEST_HMAC)) {
|
|
|
|
/* Faking HMAC using hash - need to do outer hash */
|
|
|
|
memcpy(sreq->cache, sreq->state,
|
|
|
|
crypto_ahash_digestsize(ahash));
|
|
|
|
|
2019-09-14 02:56:48 +08:00
|
|
|
memcpy(sreq->state, ctx->opad, sreq->digest_sz);
|
2019-07-02 22:39:59 +08:00
|
|
|
|
2019-07-05 15:36:31 +08:00
|
|
|
sreq->len = sreq->block_sz +
|
|
|
|
crypto_ahash_digestsize(ahash);
|
|
|
|
sreq->processed = sreq->block_sz;
|
2019-07-02 22:39:59 +08:00
|
|
|
sreq->hmac = 0;
|
|
|
|
|
2019-12-12 00:32:37 +08:00
|
|
|
if (priv->flags & EIP197_TRC_CACHE)
|
|
|
|
ctx->base.needs_inv = true;
|
2019-07-02 22:39:59 +08:00
|
|
|
areq->nbytes = 0;
|
|
|
|
safexcel_ahash_enqueue(areq);
|
|
|
|
|
|
|
|
*should_complete = false; /* Not done yet */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2019-09-09 19:13:06 +08:00
|
|
|
if (unlikely(sreq->digest == CONTEXT_CONTROL_DIGEST_XCM &&
|
|
|
|
ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_CRC32)) {
|
2019-09-09 19:10:29 +08:00
|
|
|
/* Undo final XOR with 0xffffffff ...*/
|
2019-10-22 17:01:43 +08:00
|
|
|
*(__le32 *)areq->result = ~sreq->state[0];
|
2019-09-09 19:10:29 +08:00
|
|
|
} else {
|
|
|
|
memcpy(areq->result, sreq->state,
|
|
|
|
crypto_ahash_digestsize(ahash));
|
|
|
|
}
|
2019-07-02 22:39:59 +08:00
|
|
|
}
|
2018-03-19 16:21:17 +08:00
|
|
|
|
2018-05-29 20:13:46 +08:00
|
|
|
cache_len = safexcel_queued_len(sreq);
|
2017-05-24 22:10:34 +08:00
|
|
|
if (cache_len)
|
|
|
|
memcpy(sreq->cache, sreq->cache_next, cache_len);
|
|
|
|
|
|
|
|
*should_complete = true;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-12-11 19:10:55 +08:00
|
|
|
static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
|
|
|
|
int *commands, int *results)
|
2017-05-24 22:10:34 +08:00
|
|
|
{
|
|
|
|
struct ahash_request *areq = ahash_request_cast(async);
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
struct safexcel_crypto_priv *priv = ctx->priv;
|
|
|
|
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
|
|
|
|
struct safexcel_result_desc *rdesc;
|
|
|
|
struct scatterlist *sg;
|
2019-12-12 00:32:35 +08:00
|
|
|
struct safexcel_token *dmmy;
|
2019-09-14 02:56:48 +08:00
|
|
|
int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0;
|
2019-09-09 19:13:06 +08:00
|
|
|
u64 queued, len;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-09-09 19:13:06 +08:00
|
|
|
queued = safexcel_queued_len(req);
|
2019-07-02 22:39:59 +08:00
|
|
|
if (queued <= HASH_CACHE_SIZE)
|
2017-05-24 22:10:34 +08:00
|
|
|
cache_len = queued;
|
|
|
|
else
|
|
|
|
cache_len = queued - areq->nbytes;
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
if (!req->finish && !req->last_req) {
|
2017-12-27 00:21:17 +08:00
|
|
|
/* If this is not the last request and the queued data does not
|
2019-07-02 22:39:59 +08:00
|
|
|
* fit into full cache blocks, cache it for the next send call.
|
2017-12-27 00:21:17 +08:00
|
|
|
*/
|
2019-07-02 22:39:59 +08:00
|
|
|
extra = queued & (HASH_CACHE_SIZE - 1);
|
2019-05-27 22:51:04 +08:00
|
|
|
|
2019-05-27 22:50:54 +08:00
|
|
|
/* If this is not the last request and the queued data
|
|
|
|
* is a multiple of a block, cache the last one for now.
|
|
|
|
*/
|
2017-12-27 00:21:17 +08:00
|
|
|
if (!extra)
|
2019-07-02 22:39:59 +08:00
|
|
|
extra = HASH_CACHE_SIZE;
|
2017-12-27 00:21:17 +08:00
|
|
|
|
2019-05-27 22:50:56 +08:00
|
|
|
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
|
|
|
|
req->cache_next, extra,
|
|
|
|
areq->nbytes - extra);
|
|
|
|
|
|
|
|
queued -= extra;
|
2019-07-02 22:39:57 +08:00
|
|
|
|
|
|
|
if (!queued) {
|
|
|
|
*commands = 0;
|
|
|
|
*results = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
2019-09-09 19:13:06 +08:00
|
|
|
|
|
|
|
extra = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(req->xcbcmac && req->processed > AES_BLOCK_SIZE)) {
|
|
|
|
if (unlikely(cache_len < AES_BLOCK_SIZE)) {
|
|
|
|
/*
|
|
|
|
* Cache contains less than 1 full block, complete.
|
|
|
|
*/
|
|
|
|
extra = AES_BLOCK_SIZE - cache_len;
|
|
|
|
if (queued > cache_len) {
|
|
|
|
/* More data follows: borrow bytes */
|
|
|
|
u64 tmp = queued - cache_len;
|
|
|
|
|
|
|
|
skip = min_t(u64, tmp, extra);
|
|
|
|
sg_pcopy_to_buffer(areq->src,
|
|
|
|
sg_nents(areq->src),
|
|
|
|
req->cache + cache_len,
|
|
|
|
skip, 0);
|
|
|
|
}
|
|
|
|
extra -= skip;
|
|
|
|
memset(req->cache + cache_len + skip, 0, extra);
|
2019-09-09 19:13:07 +08:00
|
|
|
if (!ctx->cbcmac && extra) {
|
|
|
|
// 10- padding for XCBCMAC & CMAC
|
|
|
|
req->cache[cache_len + skip] = 0x80;
|
|
|
|
// HW will use K2 iso K3 - compensate!
|
|
|
|
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
|
2019-10-22 17:01:43 +08:00
|
|
|
((__be32 *)req->cache)[i] ^=
|
|
|
|
cpu_to_be32(le32_to_cpu(
|
|
|
|
ctx->ipad[i] ^ ctx->ipad[i + 4]));
|
2019-09-09 19:13:07 +08:00
|
|
|
}
|
2019-09-09 19:13:06 +08:00
|
|
|
cache_len = AES_BLOCK_SIZE;
|
|
|
|
queued = queued + extra;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XCBC continue: XOR previous result into 1st word */
|
|
|
|
crypto_xor(req->cache, (const u8 *)req->state, AES_BLOCK_SIZE);
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
2019-09-09 19:13:06 +08:00
|
|
|
len = queued;
|
2017-05-24 22:10:34 +08:00
|
|
|
/* Add a command descriptor for the cached data, if any */
|
|
|
|
if (cache_len) {
|
2018-02-26 21:45:11 +08:00
|
|
|
req->cache_dma = dma_map_single(priv->dev, req->cache,
|
|
|
|
cache_len, DMA_TO_DEVICE);
|
2018-06-28 23:21:57 +08:00
|
|
|
if (dma_mapping_error(priv->dev, req->cache_dma))
|
2018-02-26 21:45:11 +08:00
|
|
|
return -EINVAL;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2018-02-26 21:45:11 +08:00
|
|
|
req->cache_sz = cache_len;
|
2017-05-24 22:10:34 +08:00
|
|
|
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
|
|
|
|
(cache_len == len),
|
2019-09-09 19:13:06 +08:00
|
|
|
req->cache_dma, cache_len,
|
2019-12-12 00:32:35 +08:00
|
|
|
len, ctx->base.ctxr_dma,
|
|
|
|
&dmmy);
|
2017-05-24 22:10:34 +08:00
|
|
|
if (IS_ERR(first_cdesc)) {
|
|
|
|
ret = PTR_ERR(first_cdesc);
|
|
|
|
goto unmap_cache;
|
|
|
|
}
|
|
|
|
n_cdesc++;
|
|
|
|
|
|
|
|
queued -= cache_len;
|
|
|
|
if (!queued)
|
|
|
|
goto send_command;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now handle the current ahash request buffer(s) */
|
2019-07-02 22:39:59 +08:00
|
|
|
req->nents = dma_map_sg(priv->dev, areq->src,
|
|
|
|
sg_nents_for_len(areq->src,
|
|
|
|
areq->nbytes),
|
2017-12-27 00:21:16 +08:00
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (!req->nents) {
|
2017-05-24 22:10:34 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto cdesc_rollback;
|
|
|
|
}
|
|
|
|
|
2017-12-27 00:21:16 +08:00
|
|
|
for_each_sg(areq->src, sg, req->nents, i) {
|
2017-05-24 22:10:34 +08:00
|
|
|
int sglen = sg_dma_len(sg);
|
|
|
|
|
2019-09-09 19:13:06 +08:00
|
|
|
if (unlikely(sglen <= skip)) {
|
|
|
|
skip -= sglen;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-05-24 22:10:34 +08:00
|
|
|
/* Do not overflow the request */
|
2019-09-09 19:13:06 +08:00
|
|
|
if ((queued + skip) <= sglen)
|
2017-05-24 22:10:34 +08:00
|
|
|
sglen = queued;
|
2019-09-09 19:13:06 +08:00
|
|
|
else
|
|
|
|
sglen -= skip;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
|
2019-07-02 22:39:59 +08:00
|
|
|
!(queued - sglen),
|
2019-09-09 19:13:06 +08:00
|
|
|
sg_dma_address(sg) + skip, sglen,
|
2019-12-12 00:32:35 +08:00
|
|
|
len, ctx->base.ctxr_dma, &dmmy);
|
2017-05-24 22:10:34 +08:00
|
|
|
if (IS_ERR(cdesc)) {
|
|
|
|
ret = PTR_ERR(cdesc);
|
2018-03-19 16:21:16 +08:00
|
|
|
goto unmap_sg;
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
2019-09-09 19:13:06 +08:00
|
|
|
if (!n_cdesc)
|
2017-05-24 22:10:34 +08:00
|
|
|
first_cdesc = cdesc;
|
2019-09-09 19:13:06 +08:00
|
|
|
n_cdesc++;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
queued -= sglen;
|
|
|
|
if (!queued)
|
|
|
|
break;
|
2019-09-09 19:13:06 +08:00
|
|
|
skip = 0;
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
send_command:
|
|
|
|
/* Setup the context options */
|
2019-07-02 22:39:59 +08:00
|
|
|
safexcel_context_control(ctx, req, first_cdesc);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-09-14 02:56:48 +08:00
|
|
|
/* Add the token */
|
|
|
|
safexcel_hash_token(first_cdesc, len, req->digest_sz, ctx->cbcmac);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-09-14 02:56:48 +08:00
|
|
|
req->result_dma = dma_map_single(priv->dev, req->state, req->digest_sz,
|
2018-02-26 21:45:10 +08:00
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
if (dma_mapping_error(priv->dev, req->result_dma)) {
|
2017-05-24 22:10:34 +08:00
|
|
|
ret = -EINVAL;
|
2018-03-19 16:21:16 +08:00
|
|
|
goto unmap_sg;
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Add a result descriptor */
|
2018-02-26 21:45:10 +08:00
|
|
|
rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz);
|
2017-05-24 22:10:34 +08:00
|
|
|
if (IS_ERR(rdesc)) {
|
|
|
|
ret = PTR_ERR(rdesc);
|
2018-02-13 16:26:57 +08:00
|
|
|
goto unmap_result;
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
2018-06-28 23:21:57 +08:00
|
|
|
safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-09-09 19:13:06 +08:00
|
|
|
req->processed += len - extra;
|
2018-05-29 20:13:46 +08:00
|
|
|
|
2017-05-24 22:10:34 +08:00
|
|
|
*commands = n_cdesc;
|
|
|
|
*results = 1;
|
|
|
|
return 0;
|
|
|
|
|
2018-02-13 16:26:57 +08:00
|
|
|
unmap_result:
|
2019-09-14 02:56:48 +08:00
|
|
|
dma_unmap_single(priv->dev, req->result_dma, req->digest_sz,
|
2018-03-19 16:21:16 +08:00
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
unmap_sg:
|
2019-09-09 19:13:06 +08:00
|
|
|
if (req->nents) {
|
|
|
|
dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
|
|
|
|
req->nents = 0;
|
|
|
|
}
|
2017-05-24 22:10:34 +08:00
|
|
|
cdesc_rollback:
|
|
|
|
for (i = 0; i < n_cdesc; i++)
|
|
|
|
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
|
|
|
|
unmap_cache:
|
2018-02-26 21:45:11 +08:00
|
|
|
if (req->cache_dma) {
|
|
|
|
dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
|
|
|
|
DMA_TO_DEVICE);
|
2019-05-27 22:51:00 +08:00
|
|
|
req->cache_dma = 0;
|
2018-02-26 21:45:11 +08:00
|
|
|
req->cache_sz = 0;
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
|
|
|
|
int ring,
|
|
|
|
struct crypto_async_request *async,
|
|
|
|
bool *should_complete, int *ret)
|
|
|
|
{
|
|
|
|
struct safexcel_result_desc *rdesc;
|
|
|
|
struct ahash_request *areq = ahash_request_cast(async);
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
int enq_ret;
|
|
|
|
|
|
|
|
*ret = 0;
|
|
|
|
|
|
|
|
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
|
|
|
|
if (IS_ERR(rdesc)) {
|
|
|
|
dev_err(priv->dev,
|
|
|
|
"hash: invalidate: could not retrieve the result descriptor\n");
|
|
|
|
*ret = PTR_ERR(rdesc);
|
2018-05-29 20:13:43 +08:00
|
|
|
} else {
|
|
|
|
*ret = safexcel_rdesc_check_errors(priv, rdesc);
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
safexcel_complete(priv, ring);
|
|
|
|
|
|
|
|
if (ctx->base.exit_inv) {
|
|
|
|
dma_pool_free(priv->context_pool, ctx->base.ctxr,
|
|
|
|
ctx->base.ctxr_dma);
|
|
|
|
|
|
|
|
*should_complete = true;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-06-15 15:56:24 +08:00
|
|
|
ring = safexcel_select_ring(priv);
|
|
|
|
ctx->base.ring = ring;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2017-06-15 15:56:24 +08:00
|
|
|
spin_lock_bh(&priv->ring[ring].queue_lock);
|
|
|
|
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
|
|
|
|
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
if (enq_ret != -EINPROGRESS)
|
|
|
|
*ret = enq_ret;
|
|
|
|
|
2017-12-14 22:26:51 +08:00
|
|
|
queue_work(priv->ring[ring].workqueue,
|
|
|
|
&priv->ring[ring].work_data.work);
|
2017-06-15 15:56:24 +08:00
|
|
|
|
2017-05-24 22:10:34 +08:00
|
|
|
*should_complete = false;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-12-11 19:10:55 +08:00
|
|
|
static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
|
|
|
|
struct crypto_async_request *async,
|
|
|
|
bool *should_complete, int *ret)
|
|
|
|
{
|
|
|
|
struct ahash_request *areq = ahash_request_cast(async);
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
int err;
|
|
|
|
|
2018-06-28 23:15:35 +08:00
|
|
|
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
|
2017-12-14 22:26:58 +08:00
|
|
|
|
2017-12-11 19:10:55 +08:00
|
|
|
if (req->needs_inv) {
|
|
|
|
req->needs_inv = false;
|
|
|
|
err = safexcel_handle_inv_result(priv, ring, async,
|
|
|
|
should_complete, ret);
|
|
|
|
} else {
|
|
|
|
err = safexcel_handle_req_result(priv, ring, async,
|
|
|
|
should_complete, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-05-24 22:10:34 +08:00
|
|
|
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
|
2018-06-28 23:21:57 +08:00
|
|
|
int ring, int *commands, int *results)
|
2017-05-24 22:10:34 +08:00
|
|
|
{
|
|
|
|
struct ahash_request *areq = ahash_request_cast(async);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
int ret;
|
|
|
|
|
2017-12-14 22:26:50 +08:00
|
|
|
ret = safexcel_invalidate_cache(async, ctx->priv,
|
2018-06-28 23:21:57 +08:00
|
|
|
ctx->base.ctxr_dma, ring);
|
2017-05-24 22:10:34 +08:00
|
|
|
if (unlikely(ret))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
*commands = 1;
|
|
|
|
*results = 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-11 19:10:55 +08:00
|
|
|
static int safexcel_ahash_send(struct crypto_async_request *async,
|
2018-06-28 23:21:57 +08:00
|
|
|
int ring, int *commands, int *results)
|
2017-12-11 19:10:55 +08:00
|
|
|
{
|
|
|
|
struct ahash_request *areq = ahash_request_cast(async);
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (req->needs_inv)
|
2018-06-28 23:21:57 +08:00
|
|
|
ret = safexcel_ahash_send_inv(async, ring, commands, results);
|
2017-12-11 19:10:55 +08:00
|
|
|
else
|
2018-06-28 23:21:57 +08:00
|
|
|
ret = safexcel_ahash_send_req(async, ring, commands, results);
|
|
|
|
|
2017-12-11 19:10:55 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-05-24 22:10:34 +08:00
|
|
|
static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
struct safexcel_crypto_priv *priv = ctx->priv;
|
2018-05-14 21:10:55 +08:00
|
|
|
EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
|
2017-12-11 19:10:57 +08:00
|
|
|
struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
|
2017-09-12 18:12:16 +08:00
|
|
|
struct safexcel_inv_result result = {};
|
2017-06-15 15:56:24 +08:00
|
|
|
int ring = ctx->base.ring;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-05-27 22:51:01 +08:00
|
|
|
memset(req, 0, EIP197_AHASH_REQ_SIZE);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
/* create invalidation request */
|
|
|
|
init_completion(&result.completion);
|
2017-12-11 19:10:57 +08:00
|
|
|
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
2017-05-24 22:10:34 +08:00
|
|
|
safexcel_inv_complete, &result);
|
|
|
|
|
2017-12-11 19:10:57 +08:00
|
|
|
ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
|
|
|
|
ctx = crypto_tfm_ctx(req->base.tfm);
|
2017-05-24 22:10:34 +08:00
|
|
|
ctx->base.exit_inv = true;
|
2017-12-11 19:10:55 +08:00
|
|
|
rctx->needs_inv = true;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2017-06-15 15:56:24 +08:00
|
|
|
spin_lock_bh(&priv->ring[ring].queue_lock);
|
2017-12-11 19:10:57 +08:00
|
|
|
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
|
2017-06-15 15:56:24 +08:00
|
|
|
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2017-12-14 22:26:51 +08:00
|
|
|
queue_work(priv->ring[ring].workqueue,
|
|
|
|
&priv->ring[ring].work_data.work);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2018-02-13 16:26:55 +08:00
|
|
|
wait_for_completion(&result.completion);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
if (result.error) {
|
|
|
|
dev_warn(priv->dev, "hash: completion error (%d)\n",
|
|
|
|
result.error);
|
|
|
|
return result.error;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-14 22:26:46 +08:00
|
|
|
/* safexcel_ahash_cache: cache data until at least one request can be sent to
|
|
|
|
* the engine, aka. when there is at least 1 block size in the pipe.
|
|
|
|
*/
|
2019-07-02 22:39:59 +08:00
|
|
|
static int safexcel_ahash_cache(struct ahash_request *areq)
|
2017-05-24 22:10:34 +08:00
|
|
|
{
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
2019-07-02 22:39:59 +08:00
|
|
|
u64 cache_len;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2018-05-29 20:13:46 +08:00
|
|
|
/* cache_len: everything accepted by the driver but not sent yet,
|
|
|
|
* tot sz handled by update() - last req sz - tot sz handled by send()
|
|
|
|
*/
|
2019-07-02 22:39:59 +08:00
|
|
|
cache_len = safexcel_queued_len(req);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In case there isn't enough bytes to proceed (less than a
|
|
|
|
* block size), cache the data until we have enough.
|
|
|
|
*/
|
2019-07-02 22:39:59 +08:00
|
|
|
if (cache_len + areq->nbytes <= HASH_CACHE_SIZE) {
|
2017-05-24 22:10:34 +08:00
|
|
|
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
|
|
|
|
req->cache + cache_len,
|
|
|
|
areq->nbytes, 0);
|
2019-07-02 22:39:59 +08:00
|
|
|
return 0;
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
2017-12-14 22:26:44 +08:00
|
|
|
/* We couldn't cache all the data */
|
2017-05-24 22:10:34 +08:00
|
|
|
return -E2BIG;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_ahash_enqueue(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
struct safexcel_crypto_priv *priv = ctx->priv;
|
2017-06-15 15:56:24 +08:00
|
|
|
int ret, ring;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2017-12-11 19:10:55 +08:00
|
|
|
req->needs_inv = false;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
if (ctx->base.ctxr) {
|
2018-06-28 23:15:35 +08:00
|
|
|
if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
|
2019-09-09 19:13:06 +08:00
|
|
|
/* invalidate for *any* non-XCBC continuation */
|
|
|
|
((req->not_first && !req->xcbcmac) ||
|
2019-07-02 22:39:59 +08:00
|
|
|
/* invalidate if (i)digest changed */
|
|
|
|
memcmp(ctx->base.ctxr->data, req->state, req->state_sz) ||
|
|
|
|
/* invalidate for HMAC finish with odigest changed */
|
2019-09-09 19:10:29 +08:00
|
|
|
(req->finish && req->hmac &&
|
2019-07-02 22:39:59 +08:00
|
|
|
memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
|
|
|
|
ctx->opad, req->state_sz))))
|
|
|
|
/*
|
|
|
|
* We're still setting needs_inv here, even though it is
|
2017-12-14 22:26:47 +08:00
|
|
|
* cleared right away, because the needs_inv flag can be
|
|
|
|
* set in other functions and we want to keep the same
|
|
|
|
* logic.
|
|
|
|
*/
|
2019-07-02 22:39:59 +08:00
|
|
|
ctx->base.needs_inv = true;
|
2017-12-14 22:26:47 +08:00
|
|
|
|
2017-12-11 19:10:55 +08:00
|
|
|
if (ctx->base.needs_inv) {
|
|
|
|
ctx->base.needs_inv = false;
|
|
|
|
req->needs_inv = true;
|
|
|
|
}
|
2017-05-24 22:10:34 +08:00
|
|
|
} else {
|
|
|
|
ctx->base.ring = safexcel_select_ring(priv);
|
|
|
|
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
|
|
|
|
EIP197_GFP_FLAGS(areq->base),
|
|
|
|
&ctx->base.ctxr_dma);
|
|
|
|
if (!ctx->base.ctxr)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2019-09-09 19:13:06 +08:00
|
|
|
req->not_first = true;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2017-06-15 15:56:24 +08:00
|
|
|
ring = ctx->base.ring;
|
|
|
|
|
|
|
|
spin_lock_bh(&priv->ring[ring].queue_lock);
|
|
|
|
ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
|
|
|
|
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2017-12-14 22:26:51 +08:00
|
|
|
queue_work(priv->ring[ring].workqueue,
|
|
|
|
&priv->ring[ring].work_data.work);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_ahash_update(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
2019-07-02 22:39:59 +08:00
|
|
|
int ret;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
/* If the request is 0 length, do nothing */
|
|
|
|
if (!areq->nbytes)
|
|
|
|
return 0;
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
/* Add request to the cache if it fits */
|
|
|
|
ret = safexcel_ahash_cache(areq);
|
|
|
|
|
|
|
|
/* Update total request length */
|
2019-07-05 15:36:31 +08:00
|
|
|
req->len += areq->nbytes;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
/* If not all data could fit into the cache, go process the excess.
|
|
|
|
* Also go process immediately for an HMAC IV precompute, which
|
|
|
|
* will never be finished at all, but needs to be processed anyway.
|
2017-05-24 22:10:34 +08:00
|
|
|
*/
|
2019-07-02 22:39:59 +08:00
|
|
|
if ((ret && !req->finish) || req->last_req)
|
2017-05-24 22:10:34 +08:00
|
|
|
return safexcel_ahash_enqueue(areq);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_ahash_final(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
|
|
|
|
req->finish = true;
|
|
|
|
|
2019-07-05 15:36:31 +08:00
|
|
|
if (unlikely(!req->len && !areq->nbytes)) {
|
2019-07-02 22:39:58 +08:00
|
|
|
/*
|
|
|
|
* If we have an overall 0 length *hash* request:
|
|
|
|
* The HW cannot do 0 length hash, so we provide the correct
|
|
|
|
* result directly here.
|
|
|
|
*/
|
2018-06-28 23:21:53 +08:00
|
|
|
if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
|
|
|
|
memcpy(areq->result, md5_zero_message_hash,
|
|
|
|
MD5_DIGEST_SIZE);
|
|
|
|
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
|
2017-05-24 22:10:34 +08:00
|
|
|
memcpy(areq->result, sha1_zero_message_hash,
|
|
|
|
SHA1_DIGEST_SIZE);
|
|
|
|
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
|
|
|
|
memcpy(areq->result, sha224_zero_message_hash,
|
|
|
|
SHA224_DIGEST_SIZE);
|
|
|
|
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
|
|
|
|
memcpy(areq->result, sha256_zero_message_hash,
|
|
|
|
SHA256_DIGEST_SIZE);
|
2018-05-29 20:13:50 +08:00
|
|
|
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
|
|
|
|
memcpy(areq->result, sha384_zero_message_hash,
|
|
|
|
SHA384_DIGEST_SIZE);
|
2018-05-29 20:13:46 +08:00
|
|
|
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
|
|
|
|
memcpy(areq->result, sha512_zero_message_hash,
|
|
|
|
SHA512_DIGEST_SIZE);
|
2019-09-13 23:20:36 +08:00
|
|
|
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SM3) {
|
2019-10-17 23:36:28 +08:00
|
|
|
memcpy(areq->result,
|
|
|
|
EIP197_SM3_ZEROM_HASH, SM3_DIGEST_SIZE);
|
2019-09-13 23:20:36 +08:00
|
|
|
}
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-09-09 19:10:29 +08:00
|
|
|
return 0;
|
|
|
|
} else if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM &&
|
|
|
|
ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5 &&
|
|
|
|
req->len == sizeof(u32) && !areq->nbytes)) {
|
|
|
|
/* Zero length CRC32 */
|
|
|
|
memcpy(areq->result, ctx->ipad, sizeof(u32));
|
2017-05-24 22:10:34 +08:00
|
|
|
return 0;
|
2019-09-09 19:13:07 +08:00
|
|
|
} else if (unlikely(ctx->cbcmac && req->len == AES_BLOCK_SIZE &&
|
2019-09-09 19:13:06 +08:00
|
|
|
!areq->nbytes)) {
|
|
|
|
/* Zero length CBC MAC */
|
|
|
|
memset(areq->result, 0, AES_BLOCK_SIZE);
|
|
|
|
return 0;
|
2019-09-09 19:13:07 +08:00
|
|
|
} else if (unlikely(req->xcbcmac && req->len == AES_BLOCK_SIZE &&
|
|
|
|
!areq->nbytes)) {
|
|
|
|
/* Zero length (X)CBC/CMAC */
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
|
2019-10-22 17:01:43 +08:00
|
|
|
((__be32 *)areq->result)[i] =
|
|
|
|
cpu_to_be32(le32_to_cpu(ctx->ipad[i + 4]));//K3
|
2019-09-09 19:13:07 +08:00
|
|
|
areq->result[0] ^= 0x80; // 10- padding
|
|
|
|
crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
|
|
|
|
return 0;
|
2019-07-05 15:36:31 +08:00
|
|
|
} else if (unlikely(req->hmac &&
|
|
|
|
(req->len == req->block_sz) &&
|
2019-07-02 22:39:59 +08:00
|
|
|
!areq->nbytes)) {
|
2019-07-02 22:40:00 +08:00
|
|
|
/*
|
|
|
|
* If we have an overall 0 length *HMAC* request:
|
|
|
|
* For HMAC, we need to finalize the inner digest
|
|
|
|
* and then perform the outer hash.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* generate pad block in the cache */
|
|
|
|
/* start with a hash block of all zeroes */
|
|
|
|
memset(req->cache, 0, req->block_sz);
|
|
|
|
/* set the first byte to 0x80 to 'append a 1 bit' */
|
|
|
|
req->cache[0] = 0x80;
|
|
|
|
/* add the length in bits in the last 2 bytes */
|
|
|
|
if (req->len_is_le) {
|
|
|
|
/* Little endian length word (e.g. MD5) */
|
|
|
|
req->cache[req->block_sz-8] = (req->block_sz << 3) &
|
|
|
|
255;
|
|
|
|
req->cache[req->block_sz-7] = (req->block_sz >> 5);
|
|
|
|
} else {
|
|
|
|
/* Big endian length word (e.g. any SHA) */
|
|
|
|
req->cache[req->block_sz-2] = (req->block_sz >> 5);
|
|
|
|
req->cache[req->block_sz-1] = (req->block_sz << 3) &
|
|
|
|
255;
|
|
|
|
}
|
|
|
|
|
2019-07-05 15:36:31 +08:00
|
|
|
req->len += req->block_sz; /* plus 1 hash block */
|
2019-07-02 22:40:00 +08:00
|
|
|
|
|
|
|
/* Set special zero-length HMAC flag */
|
|
|
|
req->hmac_zlen = true;
|
|
|
|
|
|
|
|
/* Finalize HMAC */
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
|
2019-07-02 22:39:59 +08:00
|
|
|
} else if (req->hmac) {
|
|
|
|
/* Finalize HMAC */
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return safexcel_ahash_enqueue(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_ahash_finup(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
req->finish = true;
|
|
|
|
|
|
|
|
safexcel_ahash_update(areq);
|
|
|
|
return safexcel_ahash_final(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_ahash_export(struct ahash_request *areq, void *out)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
struct safexcel_ahash_export_state *export = out;
|
|
|
|
|
2019-07-05 15:36:31 +08:00
|
|
|
export->len = req->len;
|
|
|
|
export->processed = req->processed;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2018-03-19 16:21:13 +08:00
|
|
|
export->digest = req->digest;
|
|
|
|
|
2017-05-24 22:10:34 +08:00
|
|
|
memcpy(export->state, req->state, req->state_sz);
|
2019-07-02 22:39:59 +08:00
|
|
|
memcpy(export->cache, req->cache, HASH_CACHE_SIZE);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
const struct safexcel_ahash_export_state *export = in;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = crypto_ahash_init(areq);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-07-05 15:36:31 +08:00
|
|
|
req->len = export->len;
|
|
|
|
req->processed = export->processed;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2018-03-19 16:21:13 +08:00
|
|
|
req->digest = export->digest;
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
memcpy(req->cache, export->cache, HASH_CACHE_SIZE);
|
2017-05-24 22:10:34 +08:00
|
|
|
memcpy(req->state, export->state, req->state_sz);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
struct safexcel_alg_template *tmpl =
|
|
|
|
container_of(__crypto_ahash_alg(tfm->__crt_alg),
|
|
|
|
struct safexcel_alg_template, alg.ahash);
|
|
|
|
|
|
|
|
ctx->priv = tmpl->priv;
|
2017-12-11 19:10:55 +08:00
|
|
|
ctx->base.send = safexcel_ahash_send;
|
|
|
|
ctx->base.handle_result = safexcel_handle_result;
|
2019-09-14 02:56:48 +08:00
|
|
|
ctx->fb_do_setkey = false;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
|
|
sizeof(struct safexcel_ahash_req));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha1_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
|
2018-03-19 16:21:13 +08:00
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
2017-05-24 22:10:34 +08:00
|
|
|
req->state_sz = SHA1_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA1_DIGEST_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
req->block_sz = SHA1_BLOCK_SIZE;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha1_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_sha1_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
struct safexcel_crypto_priv *priv = ctx->priv;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* context not allocated, skip invalidation */
|
|
|
|
if (!ctx->base.ctxr)
|
|
|
|
return;
|
|
|
|
|
2018-06-28 23:15:35 +08:00
|
|
|
if (priv->flags & EIP197_TRC_CACHE) {
|
2017-12-14 22:26:58 +08:00
|
|
|
ret = safexcel_ahash_exit_inv(tfm);
|
|
|
|
if (ret)
|
|
|
|
dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
|
|
|
|
} else {
|
|
|
|
dma_pool_free(priv->context_pool, ctx->base.ctxr,
|
|
|
|
ctx->base.ctxr_dma);
|
|
|
|
}
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_sha1 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
2019-08-30 15:40:54 +08:00
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA1,
|
2017-05-24 22:10:34 +08:00
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_sha1_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_sha1_digest,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA1_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "sha1",
|
|
|
|
.cra_driver_name = "safexcel-sha1",
|
2019-08-30 15:41:47 +08:00
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
2017-05-24 22:10:34 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA1_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha1_init(struct ahash_request *areq)
|
|
|
|
{
|
2019-07-02 22:39:59 +08:00
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
2018-03-19 16:21:13 +08:00
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Start from ipad precompute */
|
|
|
|
memcpy(req->state, ctx->ipad, SHA1_DIGEST_SIZE);
|
|
|
|
/* Already processed the key^ipad part now! */
|
2019-07-05 15:36:31 +08:00
|
|
|
req->len = SHA1_BLOCK_SIZE;
|
|
|
|
req->processed = SHA1_BLOCK_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
|
|
|
req->state_sz = SHA1_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA1_DIGEST_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
req->block_sz = SHA1_BLOCK_SIZE;
|
|
|
|
req->hmac = true;
|
|
|
|
|
2017-05-24 22:10:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_hmac_sha1_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_ahash_result {
|
|
|
|
struct completion completion;
|
|
|
|
int error;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_result *result = req->data;
|
|
|
|
|
|
|
|
if (error == -EINPROGRESS)
|
|
|
|
return;
|
|
|
|
|
|
|
|
result->error = error;
|
|
|
|
complete(&result->completion);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_init_pad(struct ahash_request *areq,
|
|
|
|
unsigned int blocksize, const u8 *key,
|
|
|
|
unsigned int keylen, u8 *ipad, u8 *opad)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_result result;
|
|
|
|
struct scatterlist sg;
|
|
|
|
int ret, i;
|
|
|
|
u8 *keydup;
|
|
|
|
|
|
|
|
if (keylen <= blocksize) {
|
|
|
|
memcpy(ipad, key, keylen);
|
|
|
|
} else {
|
|
|
|
keydup = kmemdup(key, keylen, GFP_KERNEL);
|
|
|
|
if (!keydup)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
|
|
|
safexcel_ahash_complete, &result);
|
|
|
|
sg_init_one(&sg, keydup, keylen);
|
|
|
|
ahash_request_set_crypt(areq, &sg, ipad, keylen);
|
|
|
|
init_completion(&result.completion);
|
|
|
|
|
|
|
|
ret = crypto_ahash_digest(areq);
|
2018-02-26 21:45:12 +08:00
|
|
|
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
2017-05-24 22:10:34 +08:00
|
|
|
wait_for_completion_interruptible(&result.completion);
|
|
|
|
ret = result.error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Avoid leaking */
|
|
|
|
memzero_explicit(keydup, keylen);
|
|
|
|
kfree(keydup);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(ipad + keylen, 0, blocksize - keylen);
|
|
|
|
memcpy(opad, ipad, blocksize);
|
|
|
|
|
|
|
|
for (i = 0; i < blocksize; i++) {
|
2017-06-15 15:56:17 +08:00
|
|
|
ipad[i] ^= HMAC_IPAD_VALUE;
|
|
|
|
opad[i] ^= HMAC_OPAD_VALUE;
|
2017-05-24 22:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_init_iv(struct ahash_request *areq,
|
|
|
|
unsigned int blocksize, u8 *pad, void *state)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_result result;
|
|
|
|
struct safexcel_ahash_req *req;
|
|
|
|
struct scatterlist sg;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
|
|
|
safexcel_ahash_complete, &result);
|
|
|
|
sg_init_one(&sg, pad, blocksize);
|
|
|
|
ahash_request_set_crypt(areq, &sg, pad, blocksize);
|
|
|
|
init_completion(&result.completion);
|
|
|
|
|
|
|
|
ret = crypto_ahash_init(areq);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
req = ahash_request_ctx(areq);
|
|
|
|
req->hmac = true;
|
|
|
|
req->last_req = true;
|
|
|
|
|
|
|
|
ret = crypto_ahash_update(areq);
|
2017-12-14 22:26:48 +08:00
|
|
|
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
|
2017-05-24 22:10:34 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
wait_for_completion_interruptible(&result.completion);
|
|
|
|
if (result.error)
|
|
|
|
return result.error;
|
|
|
|
|
|
|
|
return crypto_ahash_export(areq, state);
|
|
|
|
}
|
|
|
|
|
2018-05-14 21:11:02 +08:00
|
|
|
int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
|
|
|
|
void *istate, void *ostate)
|
2017-05-24 22:10:34 +08:00
|
|
|
{
|
|
|
|
struct ahash_request *areq;
|
|
|
|
struct crypto_ahash *tfm;
|
|
|
|
unsigned int blocksize;
|
|
|
|
u8 *ipad, *opad;
|
|
|
|
int ret;
|
|
|
|
|
2018-07-01 06:16:16 +08:00
|
|
|
tfm = crypto_alloc_ahash(alg, 0, 0);
|
2017-05-24 22:10:34 +08:00
|
|
|
if (IS_ERR(tfm))
|
|
|
|
return PTR_ERR(tfm);
|
|
|
|
|
|
|
|
areq = ahash_request_alloc(tfm, GFP_KERNEL);
|
|
|
|
if (!areq) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free_ahash;
|
|
|
|
}
|
|
|
|
|
|
|
|
crypto_ahash_clear_flags(tfm, ~0);
|
|
|
|
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
|
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 05:03:40 +08:00
|
|
|
ipad = kcalloc(2, blocksize, GFP_KERNEL);
|
2017-05-24 22:10:34 +08:00
|
|
|
if (!ipad) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free_request;
|
|
|
|
}
|
|
|
|
|
|
|
|
opad = ipad + blocksize;
|
|
|
|
|
|
|
|
ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
|
|
|
|
if (ret)
|
|
|
|
goto free_ipad;
|
|
|
|
|
|
|
|
ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
|
|
|
|
if (ret)
|
|
|
|
goto free_ipad;
|
|
|
|
|
|
|
|
ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
|
|
|
|
|
|
|
|
free_ipad:
|
|
|
|
kfree(ipad);
|
|
|
|
free_request:
|
|
|
|
ahash_request_free(areq);
|
|
|
|
free_ahash:
|
|
|
|
crypto_free_ahash(tfm);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:21:20 +08:00
|
|
|
static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen, const char *alg,
|
|
|
|
unsigned int state_sz)
|
2017-05-24 22:10:34 +08:00
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
|
2017-12-14 22:26:58 +08:00
|
|
|
struct safexcel_crypto_priv *priv = ctx->priv;
|
2017-05-24 22:10:34 +08:00
|
|
|
struct safexcel_ahash_export_state istate, ostate;
|
2019-07-02 22:39:59 +08:00
|
|
|
int ret;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2018-03-19 16:21:20 +08:00
|
|
|
ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
|
2017-05-24 22:10:34 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr &&
|
|
|
|
(memcmp(ctx->ipad, istate.state, state_sz) ||
|
|
|
|
memcmp(ctx->opad, ostate.state, state_sz)))
|
|
|
|
ctx->base.needs_inv = true;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
2018-03-19 16:21:20 +08:00
|
|
|
memcpy(ctx->ipad, &istate.state, state_sz);
|
|
|
|
memcpy(ctx->opad, &ostate.state, state_sz);
|
2017-07-19 17:02:30 +08:00
|
|
|
|
2017-05-24 22:10:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:21:20 +08:00
|
|
|
static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
|
|
|
|
SHA1_DIGEST_SIZE);
|
|
|
|
}
|
|
|
|
|
2017-05-24 22:10:34 +08:00
|
|
|
struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
2019-08-30 15:40:54 +08:00
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA1,
|
2017-05-24 22:10:34 +08:00
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_hmac_sha1_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_hmac_sha1_digest,
|
|
|
|
.setkey = safexcel_hmac_sha1_setkey,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA1_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(sha1)",
|
|
|
|
.cra_driver_name = "safexcel-hmac-sha1",
|
2019-08-30 15:41:47 +08:00
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
2017-05-24 22:10:34 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA1_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int safexcel_sha256_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
|
2018-03-19 16:21:13 +08:00
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
2017-05-24 22:10:34 +08:00
|
|
|
req->state_sz = SHA256_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA256_DIGEST_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
req->block_sz = SHA256_BLOCK_SIZE;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha256_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_sha256_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_sha256 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
2019-08-30 15:40:54 +08:00
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA2_256,
|
2017-05-24 22:10:34 +08:00
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_sha256_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_sha256_digest,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA256_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "sha256",
|
|
|
|
.cra_driver_name = "safexcel-sha256",
|
2019-08-30 15:41:47 +08:00
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
2017-05-24 22:10:34 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA256_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int safexcel_sha224_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
|
2018-03-19 16:21:13 +08:00
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
2017-05-24 22:10:34 +08:00
|
|
|
req->state_sz = SHA256_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA256_DIGEST_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
req->block_sz = SHA256_BLOCK_SIZE;
|
2017-05-24 22:10:34 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha224_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_sha224_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_sha224 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
2019-08-30 15:40:54 +08:00
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA2_256,
|
2017-05-24 22:10:34 +08:00
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_sha224_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_sha224_digest,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA224_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "sha224",
|
|
|
|
.cra_driver_name = "safexcel-sha224",
|
2019-08-30 15:41:47 +08:00
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
2017-05-24 22:10:34 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA224_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2018-03-19 16:21:20 +08:00
|
|
|
|
2018-03-19 16:21:21 +08:00
|
|
|
static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
|
|
|
|
SHA256_DIGEST_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha224_init(struct ahash_request *areq)
|
|
|
|
{
|
2019-07-02 22:39:59 +08:00
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
2018-03-19 16:21:21 +08:00
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Start from ipad precompute */
|
|
|
|
memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
|
|
|
|
/* Already processed the key^ipad part now! */
|
2019-07-05 15:36:31 +08:00
|
|
|
req->len = SHA256_BLOCK_SIZE;
|
|
|
|
req->processed = SHA256_BLOCK_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
|
|
|
req->state_sz = SHA256_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA256_DIGEST_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
req->block_sz = SHA256_BLOCK_SIZE;
|
|
|
|
req->hmac = true;
|
|
|
|
|
2018-03-19 16:21:21 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_hmac_sha224_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
2019-08-30 15:40:54 +08:00
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA2_256,
|
2018-03-19 16:21:21 +08:00
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_hmac_sha224_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_hmac_sha224_digest,
|
|
|
|
.setkey = safexcel_hmac_sha224_setkey,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA224_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(sha224)",
|
|
|
|
.cra_driver_name = "safexcel-hmac-sha224",
|
2019-08-30 15:41:47 +08:00
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
2018-03-19 16:21:21 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA224_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2018-03-19 16:21:20 +08:00
|
|
|
static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
|
|
|
|
SHA256_DIGEST_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha256_init(struct ahash_request *areq)
|
|
|
|
{
|
2019-07-02 22:39:59 +08:00
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
2018-03-19 16:21:20 +08:00
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Start from ipad precompute */
|
|
|
|
memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
|
|
|
|
/* Already processed the key^ipad part now! */
|
2019-07-05 15:36:31 +08:00
|
|
|
req->len = SHA256_BLOCK_SIZE;
|
|
|
|
req->processed = SHA256_BLOCK_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
|
|
|
req->state_sz = SHA256_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA256_DIGEST_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
req->block_sz = SHA256_BLOCK_SIZE;
|
|
|
|
req->hmac = true;
|
|
|
|
|
2018-03-19 16:21:20 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_hmac_sha256_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
2019-08-30 15:40:54 +08:00
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA2_256,
|
2018-03-19 16:21:20 +08:00
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_hmac_sha256_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_hmac_sha256_digest,
|
|
|
|
.setkey = safexcel_hmac_sha256_setkey,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA256_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(sha256)",
|
|
|
|
.cra_driver_name = "safexcel-hmac-sha256",
|
2019-08-30 15:41:47 +08:00
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
2018-03-19 16:21:20 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA256_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2018-05-29 20:13:46 +08:00
|
|
|
|
|
|
|
static int safexcel_sha512_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
|
|
|
req->state_sz = SHA512_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA512_DIGEST_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
req->block_sz = SHA512_BLOCK_SIZE;
|
2018-05-29 20:13:46 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha512_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_sha512_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_sha512 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
2019-08-30 15:40:54 +08:00
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA2_512,
|
2018-05-29 20:13:46 +08:00
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_sha512_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_sha512_digest,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA512_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "sha512",
|
|
|
|
.cra_driver_name = "safexcel-sha512",
|
2019-08-30 15:41:47 +08:00
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
2018-05-29 20:13:46 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA512_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2018-05-29 20:13:47 +08:00
|
|
|
|
2018-05-29 20:13:50 +08:00
|
|
|
static int safexcel_sha384_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
|
|
|
req->state_sz = SHA512_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA512_DIGEST_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
req->block_sz = SHA512_BLOCK_SIZE;
|
2018-05-29 20:13:50 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha384_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_sha384_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_sha384 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
2019-08-30 15:40:54 +08:00
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA2_512,
|
2018-05-29 20:13:50 +08:00
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_sha384_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_sha384_digest,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA384_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "sha384",
|
|
|
|
.cra_driver_name = "safexcel-sha384",
|
2019-08-30 15:41:47 +08:00
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
2018-05-29 20:13:50 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA384_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2018-05-29 20:13:47 +08:00
|
|
|
static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
|
|
|
|
SHA512_DIGEST_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha512_init(struct ahash_request *areq)
|
|
|
|
{
|
2019-07-02 22:39:59 +08:00
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
2018-05-29 20:13:47 +08:00
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Start from ipad precompute */
|
|
|
|
memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
|
|
|
|
/* Already processed the key^ipad part now! */
|
2019-07-05 15:36:31 +08:00
|
|
|
req->len = SHA512_BLOCK_SIZE;
|
|
|
|
req->processed = SHA512_BLOCK_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
|
|
|
req->state_sz = SHA512_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA512_DIGEST_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
req->block_sz = SHA512_BLOCK_SIZE;
|
|
|
|
req->hmac = true;
|
|
|
|
|
2018-05-29 20:13:47 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_hmac_sha512_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
2019-08-30 15:40:54 +08:00
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA2_512,
|
2018-05-29 20:13:47 +08:00
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_hmac_sha512_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_hmac_sha512_digest,
|
|
|
|
.setkey = safexcel_hmac_sha512_setkey,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA512_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(sha512)",
|
|
|
|
.cra_driver_name = "safexcel-hmac-sha512",
|
2019-08-30 15:41:47 +08:00
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
2018-05-29 20:13:47 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA512_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2018-05-29 20:13:51 +08:00
|
|
|
|
|
|
|
static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
|
|
|
|
SHA512_DIGEST_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha384_init(struct ahash_request *areq)
|
|
|
|
{
|
2019-07-02 22:39:59 +08:00
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
2018-05-29 20:13:51 +08:00
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Start from ipad precompute */
|
|
|
|
memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
|
|
|
|
/* Already processed the key^ipad part now! */
|
2019-07-05 15:36:31 +08:00
|
|
|
req->len = SHA512_BLOCK_SIZE;
|
|
|
|
req->processed = SHA512_BLOCK_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
|
|
|
req->state_sz = SHA512_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA512_DIGEST_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
req->block_sz = SHA512_BLOCK_SIZE;
|
|
|
|
req->hmac = true;
|
|
|
|
|
2018-05-29 20:13:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_hmac_sha384_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
2019-08-30 15:40:54 +08:00
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA2_512,
|
2018-05-29 20:13:51 +08:00
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_hmac_sha384_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_hmac_sha384_digest,
|
|
|
|
.setkey = safexcel_hmac_sha384_setkey,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA384_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(sha384)",
|
|
|
|
.cra_driver_name = "safexcel-hmac-sha384",
|
2019-08-30 15:41:47 +08:00
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
2018-05-29 20:13:51 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA384_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2018-06-28 23:21:53 +08:00
|
|
|
|
|
|
|
static int safexcel_md5_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
|
|
|
req->state_sz = MD5_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = MD5_DIGEST_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
req->block_sz = MD5_HMAC_BLOCK_SIZE;
|
2018-06-28 23:21:53 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_md5_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_md5_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_md5 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
2019-08-30 15:40:54 +08:00
|
|
|
.algo_mask = SAFEXCEL_ALG_MD5,
|
2018-06-28 23:21:53 +08:00
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_md5_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_md5_digest,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = MD5_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "md5",
|
|
|
|
.cra_driver_name = "safexcel-md5",
|
2019-08-30 15:41:47 +08:00
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
2018-06-28 23:21:53 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2018-06-28 23:21:54 +08:00
|
|
|
|
|
|
|
static int safexcel_hmac_md5_init(struct ahash_request *areq)
|
|
|
|
{
|
2019-07-02 22:39:59 +08:00
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
2018-06-28 23:21:54 +08:00
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
2019-07-02 22:39:59 +08:00
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Start from ipad precompute */
|
|
|
|
memcpy(req->state, ctx->ipad, MD5_DIGEST_SIZE);
|
|
|
|
/* Already processed the key^ipad part now! */
|
2019-07-05 15:36:31 +08:00
|
|
|
req->len = MD5_HMAC_BLOCK_SIZE;
|
|
|
|
req->processed = MD5_HMAC_BLOCK_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
|
|
|
req->state_sz = MD5_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = MD5_DIGEST_SIZE;
|
2019-07-02 22:39:59 +08:00
|
|
|
req->block_sz = MD5_HMAC_BLOCK_SIZE;
|
2019-07-02 22:40:00 +08:00
|
|
|
req->len_is_le = true; /* MD5 is little endian! ... */
|
2019-07-02 22:39:59 +08:00
|
|
|
req->hmac = true;
|
|
|
|
|
2018-06-28 23:21:54 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
|
|
|
|
MD5_DIGEST_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_md5_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_hmac_md5_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_hmac_md5 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
2019-08-30 15:40:54 +08:00
|
|
|
.algo_mask = SAFEXCEL_ALG_MD5,
|
2018-06-28 23:21:54 +08:00
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_hmac_md5_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_hmac_md5_digest,
|
|
|
|
.setkey = safexcel_hmac_md5_setkey,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = MD5_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(md5)",
|
|
|
|
.cra_driver_name = "safexcel-hmac-md5",
|
2019-08-30 15:41:47 +08:00
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
2018-06-28 23:21:54 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2019-09-09 19:10:29 +08:00
|
|
|
|
|
|
|
static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
int ret = safexcel_ahash_cra_init(tfm);
|
|
|
|
|
|
|
|
/* Default 'key' is all zeroes */
|
|
|
|
memset(ctx->ipad, 0, sizeof(u32));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_crc32_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Start from loaded key */
|
2019-10-22 17:01:43 +08:00
|
|
|
req->state[0] = (__force __le32)le32_to_cpu(~ctx->ipad[0]);
|
2019-09-09 19:10:29 +08:00
|
|
|
/* Set processed to non-zero to enable invalidation detection */
|
|
|
|
req->len = sizeof(u32);
|
|
|
|
req->processed = sizeof(u32);
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_CRC32;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_XCM;
|
|
|
|
req->state_sz = sizeof(u32);
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = sizeof(u32);
|
2019-09-09 19:10:29 +08:00
|
|
|
req->block_sz = sizeof(u32);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
|
|
|
|
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
if (keylen != sizeof(u32))
|
2019-09-09 19:10:29 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
memcpy(ctx->ipad, key, sizeof(u32));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_crc32_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
return safexcel_crc32_init(areq) ?: safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_crc32 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = 0,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_crc32_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_crc32_digest,
|
|
|
|
.setkey = safexcel_crc32_setkey,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = sizeof(u32),
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "crc32",
|
|
|
|
.cra_driver_name = "safexcel-crc32",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY |
|
|
|
|
CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = 1,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_crc32_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2019-09-09 19:13:06 +08:00
|
|
|
|
|
|
|
static int safexcel_cbcmac_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Start from loaded keys */
|
|
|
|
memcpy(req->state, ctx->ipad, ctx->key_sz);
|
|
|
|
/* Set processed to non-zero to enable invalidation detection */
|
|
|
|
req->len = AES_BLOCK_SIZE;
|
|
|
|
req->processed = AES_BLOCK_SIZE;
|
|
|
|
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_XCM;
|
|
|
|
req->state_sz = ctx->key_sz;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = AES_BLOCK_SIZE;
|
2019-09-09 19:13:06 +08:00
|
|
|
req->block_sz = AES_BLOCK_SIZE;
|
|
|
|
req->xcbcmac = true;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
|
|
|
|
struct crypto_aes_ctx aes;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
ret = aes_expandkey(&aes, key, len);
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
if (ret)
|
2019-09-09 19:13:06 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE);
|
|
|
|
for (i = 0; i < len / sizeof(u32); i++)
|
2019-10-22 17:01:43 +08:00
|
|
|
ctx->ipad[i + 8] = (__force __le32)cpu_to_be32(aes.key_enc[i]);
|
2019-09-09 19:13:06 +08:00
|
|
|
|
|
|
|
if (len == AES_KEYSIZE_192) {
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
|
|
|
|
ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
|
|
|
|
} else if (len == AES_KEYSIZE_256) {
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
|
|
|
|
ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
|
|
|
|
} else {
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
|
|
|
|
ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
|
|
|
|
}
|
2019-09-09 19:13:07 +08:00
|
|
|
ctx->cbcmac = true;
|
2019-09-09 19:13:06 +08:00
|
|
|
|
|
|
|
memzero_explicit(&aes, sizeof(aes));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_cbcmac_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
return safexcel_cbcmac_init(areq) ?: safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_cbcmac = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = 0,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_cbcmac_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_cbcmac_digest,
|
|
|
|
.setkey = safexcel_cbcmac_setkey,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = AES_BLOCK_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "cbcmac(aes)",
|
|
|
|
.cra_driver_name = "safexcel-cbcmac-aes",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = 1,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2019-09-09 19:13:07 +08:00
|
|
|
|
|
|
|
static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
|
|
|
|
struct crypto_aes_ctx aes;
|
|
|
|
u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)];
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
ret = aes_expandkey(&aes, key, len);
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
if (ret)
|
2019-09-09 19:13:07 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* precompute the XCBC key material */
|
|
|
|
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
|
|
|
|
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
|
|
|
|
CRYPTO_TFM_REQ_MASK);
|
|
|
|
ret = crypto_cipher_setkey(ctx->kaes, key, len);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
|
|
|
|
"\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
|
|
|
|
crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp,
|
|
|
|
"\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
|
|
|
|
crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
|
|
|
|
"\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
|
|
|
|
for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
|
2019-10-22 17:01:43 +08:00
|
|
|
ctx->ipad[i] =
|
|
|
|
cpu_to_le32((__force u32)cpu_to_be32(key_tmp[i]));
|
2019-09-09 19:13:07 +08:00
|
|
|
|
|
|
|
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
|
|
|
|
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
|
|
|
|
CRYPTO_TFM_REQ_MASK);
|
|
|
|
ret = crypto_cipher_setkey(ctx->kaes,
|
|
|
|
(u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
|
|
|
|
AES_MIN_KEY_SIZE);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
|
|
|
|
ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
|
|
|
|
ctx->cbcmac = false;
|
|
|
|
|
|
|
|
memzero_explicit(&aes, sizeof(aes));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
safexcel_ahash_cra_init(tfm);
|
|
|
|
ctx->kaes = crypto_alloc_cipher("aes", 0, 0);
|
2019-10-09 20:06:21 +08:00
|
|
|
return PTR_ERR_OR_ZERO(ctx->kaes);
|
2019-09-09 19:13:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
crypto_free_cipher(ctx->kaes);
|
|
|
|
safexcel_ahash_cra_exit(tfm);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_xcbcmac = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = 0,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_cbcmac_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_cbcmac_digest,
|
|
|
|
.setkey = safexcel_xcbcmac_setkey,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = AES_BLOCK_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "xcbc(aes)",
|
|
|
|
.cra_driver_name = "safexcel-xcbc-aes",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_xcbcmac_cra_init,
|
|
|
|
.cra_exit = safexcel_xcbcmac_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2019-09-09 19:13:08 +08:00
|
|
|
|
|
|
|
static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
|
|
|
|
struct crypto_aes_ctx aes;
|
|
|
|
__be64 consts[4];
|
|
|
|
u64 _const[2];
|
|
|
|
u8 msb_mask, gfmask;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
ret = aes_expandkey(&aes, key, len);
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
if (ret)
|
2019-09-09 19:13:08 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
for (i = 0; i < len / sizeof(u32); i++)
|
2019-10-22 17:01:43 +08:00
|
|
|
ctx->ipad[i + 8] =
|
|
|
|
cpu_to_le32((__force u32)cpu_to_be32(aes.key_enc[i]));
|
2019-09-09 19:13:08 +08:00
|
|
|
|
|
|
|
/* precompute the CMAC key material */
|
|
|
|
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
|
|
|
|
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
|
|
|
|
CRYPTO_TFM_REQ_MASK);
|
|
|
|
ret = crypto_cipher_setkey(ctx->kaes, key, len);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* code below borrowed from crypto/cmac.c */
|
|
|
|
/* encrypt the zero block */
|
|
|
|
memset(consts, 0, AES_BLOCK_SIZE);
|
|
|
|
crypto_cipher_encrypt_one(ctx->kaes, (u8 *)consts, (u8 *)consts);
|
|
|
|
|
|
|
|
gfmask = 0x87;
|
|
|
|
_const[0] = be64_to_cpu(consts[1]);
|
|
|
|
_const[1] = be64_to_cpu(consts[0]);
|
|
|
|
|
|
|
|
/* gf(2^128) multiply zero-ciphertext with u and u^2 */
|
|
|
|
for (i = 0; i < 4; i += 2) {
|
|
|
|
msb_mask = ((s64)_const[1] >> 63) & gfmask;
|
|
|
|
_const[1] = (_const[1] << 1) | (_const[0] >> 63);
|
|
|
|
_const[0] = (_const[0] << 1) ^ msb_mask;
|
|
|
|
|
|
|
|
consts[i + 0] = cpu_to_be64(_const[1]);
|
|
|
|
consts[i + 1] = cpu_to_be64(_const[0]);
|
|
|
|
}
|
|
|
|
/* end of code borrowed from crypto/cmac.c */
|
|
|
|
|
|
|
|
for (i = 0; i < 2 * AES_BLOCK_SIZE / sizeof(u32); i++)
|
2019-10-22 17:01:43 +08:00
|
|
|
ctx->ipad[i] = (__force __le32)cpu_to_be32(((u32 *)consts)[i]);
|
2019-09-09 19:13:08 +08:00
|
|
|
|
|
|
|
if (len == AES_KEYSIZE_192) {
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
|
|
|
|
ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
|
|
|
|
} else if (len == AES_KEYSIZE_256) {
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
|
|
|
|
ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
|
|
|
|
} else {
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
|
|
|
|
ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
|
|
|
|
}
|
|
|
|
ctx->cbcmac = false;
|
|
|
|
|
|
|
|
memzero_explicit(&aes, sizeof(aes));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_cmac = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = 0,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_cbcmac_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_cbcmac_digest,
|
|
|
|
.setkey = safexcel_cmac_setkey,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = AES_BLOCK_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "cmac(aes)",
|
|
|
|
.cra_driver_name = "safexcel-cmac-aes",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_xcbcmac_cra_init,
|
|
|
|
.cra_exit = safexcel_xcbcmac_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2019-09-13 23:20:36 +08:00
|
|
|
|
|
|
|
static int safexcel_sm3_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
|
|
|
req->state_sz = SM3_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SM3_DIGEST_SIZE;
|
2019-09-13 23:20:36 +08:00
|
|
|
req->block_sz = SM3_BLOCK_SIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sm3_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_sm3_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_sm3 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = SAFEXCEL_ALG_SM3,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_sm3_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_sm3_digest,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SM3_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "sm3",
|
|
|
|
.cra_driver_name = "safexcel-sm3",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SM3_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2019-09-13 23:20:37 +08:00
|
|
|
|
|
|
|
static int safexcel_hmac_sm3_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sm3",
|
|
|
|
SM3_DIGEST_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sm3_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Start from ipad precompute */
|
|
|
|
memcpy(req->state, ctx->ipad, SM3_DIGEST_SIZE);
|
|
|
|
/* Already processed the key^ipad part now! */
|
|
|
|
req->len = SM3_BLOCK_SIZE;
|
|
|
|
req->processed = SM3_BLOCK_SIZE;
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
|
|
|
|
req->state_sz = SM3_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SM3_DIGEST_SIZE;
|
2019-09-13 23:20:37 +08:00
|
|
|
req->block_sz = SM3_BLOCK_SIZE;
|
|
|
|
req->hmac = true;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sm3_digest(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
int ret = safexcel_hmac_sm3_init(areq);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return safexcel_ahash_finup(areq);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_hmac_sm3 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = SAFEXCEL_ALG_SM3,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_hmac_sm3_init,
|
|
|
|
.update = safexcel_ahash_update,
|
|
|
|
.final = safexcel_ahash_final,
|
|
|
|
.finup = safexcel_ahash_finup,
|
|
|
|
.digest = safexcel_hmac_sm3_digest,
|
|
|
|
.setkey = safexcel_hmac_sm3_setkey,
|
|
|
|
.export = safexcel_ahash_export,
|
|
|
|
.import = safexcel_ahash_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SM3_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(sm3)",
|
|
|
|
.cra_driver_name = "safexcel-hmac-sm3",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SM3_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_ahash_cra_init,
|
|
|
|
.cra_exit = safexcel_ahash_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2019-09-14 02:56:47 +08:00
|
|
|
|
|
|
|
static int safexcel_sha3_224_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
|
|
|
|
req->state_sz = SHA3_224_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA3_224_DIGEST_SIZE;
|
2019-09-14 02:56:47 +08:00
|
|
|
req->block_sz = SHA3_224_BLOCK_SIZE;
|
|
|
|
ctx->do_fallback = false;
|
|
|
|
ctx->fb_init_done = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha3_fbcheck(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct ahash_request *subreq = ahash_request_ctx(req);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (ctx->do_fallback) {
|
|
|
|
ahash_request_set_tfm(subreq, ctx->fback);
|
|
|
|
ahash_request_set_callback(subreq, req->base.flags,
|
|
|
|
req->base.complete, req->base.data);
|
|
|
|
ahash_request_set_crypt(subreq, req->src, req->result,
|
|
|
|
req->nbytes);
|
|
|
|
if (!ctx->fb_init_done) {
|
2019-09-14 02:56:48 +08:00
|
|
|
if (ctx->fb_do_setkey) {
|
|
|
|
/* Set fallback cipher HMAC key */
|
|
|
|
u8 key[SHA3_224_BLOCK_SIZE];
|
|
|
|
|
|
|
|
memcpy(key, ctx->ipad,
|
|
|
|
crypto_ahash_blocksize(ctx->fback) / 2);
|
|
|
|
memcpy(key +
|
|
|
|
crypto_ahash_blocksize(ctx->fback) / 2,
|
|
|
|
ctx->opad,
|
|
|
|
crypto_ahash_blocksize(ctx->fback) / 2);
|
|
|
|
ret = crypto_ahash_setkey(ctx->fback, key,
|
|
|
|
crypto_ahash_blocksize(ctx->fback));
|
|
|
|
memzero_explicit(key,
|
|
|
|
crypto_ahash_blocksize(ctx->fback));
|
|
|
|
ctx->fb_do_setkey = false;
|
|
|
|
}
|
|
|
|
ret = ret ?: crypto_ahash_init(subreq);
|
2019-09-14 02:56:47 +08:00
|
|
|
ctx->fb_init_done = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha3_update(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct ahash_request *subreq = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
ctx->do_fallback = true;
|
|
|
|
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_update(subreq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha3_final(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct ahash_request *subreq = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
ctx->do_fallback = true;
|
|
|
|
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_final(subreq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha3_finup(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct ahash_request *subreq = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
ctx->do_fallback |= !req->nbytes;
|
|
|
|
if (ctx->do_fallback)
|
|
|
|
/* Update or ex/import happened or len 0, cannot use the HW */
|
|
|
|
return safexcel_sha3_fbcheck(req) ?:
|
|
|
|
crypto_ahash_finup(subreq);
|
|
|
|
else
|
|
|
|
return safexcel_ahash_finup(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha3_digest_fallback(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct ahash_request *subreq = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
ctx->do_fallback = true;
|
|
|
|
ctx->fb_init_done = false;
|
|
|
|
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_finup(subreq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha3_224_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
if (req->nbytes)
|
|
|
|
return safexcel_sha3_224_init(req) ?: safexcel_ahash_finup(req);
|
|
|
|
|
|
|
|
/* HW cannot do zero length hash, use fallback instead */
|
|
|
|
return safexcel_sha3_digest_fallback(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha3_export(struct ahash_request *req, void *out)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct ahash_request *subreq = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
ctx->do_fallback = true;
|
|
|
|
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_export(subreq, out);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha3_import(struct ahash_request *req, const void *in)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct ahash_request *subreq = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
ctx->do_fallback = true;
|
|
|
|
return safexcel_sha3_fbcheck(req) ?: crypto_ahash_import(subreq, in);
|
|
|
|
// return safexcel_ahash_import(req, in);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha3_cra_init(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
safexcel_ahash_cra_init(tfm);
|
|
|
|
|
|
|
|
/* Allocate fallback implementation */
|
|
|
|
ctx->fback = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
|
|
|
|
CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK);
|
|
|
|
if (IS_ERR(ctx->fback))
|
|
|
|
return PTR_ERR(ctx->fback);
|
|
|
|
|
|
|
|
/* Update statesize from fallback algorithm! */
|
|
|
|
crypto_hash_alg_common(ahash)->statesize =
|
|
|
|
crypto_ahash_statesize(ctx->fback);
|
|
|
|
crypto_ahash_set_reqsize(ahash, max(sizeof(struct safexcel_ahash_req),
|
|
|
|
sizeof(struct ahash_request) +
|
|
|
|
crypto_ahash_reqsize(ctx->fback)));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void safexcel_sha3_cra_exit(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
crypto_free_ahash(ctx->fback);
|
|
|
|
safexcel_ahash_cra_exit(tfm);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_sha3_224 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA3,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_sha3_224_init,
|
|
|
|
.update = safexcel_sha3_update,
|
|
|
|
.final = safexcel_sha3_final,
|
|
|
|
.finup = safexcel_sha3_finup,
|
|
|
|
.digest = safexcel_sha3_224_digest,
|
|
|
|
.export = safexcel_sha3_export,
|
|
|
|
.import = safexcel_sha3_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA3_224_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "sha3-224",
|
|
|
|
.cra_driver_name = "safexcel-sha3-224",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK,
|
|
|
|
.cra_blocksize = SHA3_224_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_sha3_cra_init,
|
|
|
|
.cra_exit = safexcel_sha3_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int safexcel_sha3_256_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
|
|
|
|
req->state_sz = SHA3_256_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA3_256_DIGEST_SIZE;
|
2019-09-14 02:56:47 +08:00
|
|
|
req->block_sz = SHA3_256_BLOCK_SIZE;
|
|
|
|
ctx->do_fallback = false;
|
|
|
|
ctx->fb_init_done = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha3_256_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
if (req->nbytes)
|
|
|
|
return safexcel_sha3_256_init(req) ?: safexcel_ahash_finup(req);
|
|
|
|
|
|
|
|
/* HW cannot do zero length hash, use fallback instead */
|
|
|
|
return safexcel_sha3_digest_fallback(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_sha3_256 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA3,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_sha3_256_init,
|
|
|
|
.update = safexcel_sha3_update,
|
|
|
|
.final = safexcel_sha3_final,
|
|
|
|
.finup = safexcel_sha3_finup,
|
|
|
|
.digest = safexcel_sha3_256_digest,
|
|
|
|
.export = safexcel_sha3_export,
|
|
|
|
.import = safexcel_sha3_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA3_256_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "sha3-256",
|
|
|
|
.cra_driver_name = "safexcel-sha3-256",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK,
|
|
|
|
.cra_blocksize = SHA3_256_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_sha3_cra_init,
|
|
|
|
.cra_exit = safexcel_sha3_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int safexcel_sha3_384_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
|
|
|
|
req->state_sz = SHA3_384_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA3_384_DIGEST_SIZE;
|
2019-09-14 02:56:47 +08:00
|
|
|
req->block_sz = SHA3_384_BLOCK_SIZE;
|
|
|
|
ctx->do_fallback = false;
|
|
|
|
ctx->fb_init_done = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha3_384_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
if (req->nbytes)
|
|
|
|
return safexcel_sha3_384_init(req) ?: safexcel_ahash_finup(req);
|
|
|
|
|
|
|
|
/* HW cannot do zero length hash, use fallback instead */
|
|
|
|
return safexcel_sha3_digest_fallback(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_sha3_384 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA3,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_sha3_384_init,
|
|
|
|
.update = safexcel_sha3_update,
|
|
|
|
.final = safexcel_sha3_final,
|
|
|
|
.finup = safexcel_sha3_finup,
|
|
|
|
.digest = safexcel_sha3_384_digest,
|
|
|
|
.export = safexcel_sha3_export,
|
|
|
|
.import = safexcel_sha3_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA3_384_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "sha3-384",
|
|
|
|
.cra_driver_name = "safexcel-sha3-384",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK,
|
|
|
|
.cra_blocksize = SHA3_384_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_sha3_cra_init,
|
|
|
|
.cra_exit = safexcel_sha3_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int safexcel_sha3_512_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
|
|
|
|
req->state_sz = SHA3_512_DIGEST_SIZE;
|
2019-09-14 02:56:48 +08:00
|
|
|
req->digest_sz = SHA3_512_DIGEST_SIZE;
|
2019-09-14 02:56:47 +08:00
|
|
|
req->block_sz = SHA3_512_BLOCK_SIZE;
|
|
|
|
ctx->do_fallback = false;
|
|
|
|
ctx->fb_init_done = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_sha3_512_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
if (req->nbytes)
|
|
|
|
return safexcel_sha3_512_init(req) ?: safexcel_ahash_finup(req);
|
|
|
|
|
|
|
|
/* HW cannot do zero length hash, use fallback instead */
|
|
|
|
return safexcel_sha3_digest_fallback(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_sha3_512 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA3,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_sha3_512_init,
|
|
|
|
.update = safexcel_sha3_update,
|
|
|
|
.final = safexcel_sha3_final,
|
|
|
|
.finup = safexcel_sha3_finup,
|
|
|
|
.digest = safexcel_sha3_512_digest,
|
|
|
|
.export = safexcel_sha3_export,
|
|
|
|
.import = safexcel_sha3_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA3_512_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "sha3-512",
|
|
|
|
.cra_driver_name = "safexcel-sha3-512",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK,
|
|
|
|
.cra_blocksize = SHA3_512_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_sha3_cra_init,
|
|
|
|
.cra_exit = safexcel_sha3_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2019-09-14 02:56:48 +08:00
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_cra_init(struct crypto_tfm *tfm, const char *alg)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = safexcel_sha3_cra_init(tfm);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Allocate precalc basic digest implementation */
|
|
|
|
ctx->shpre = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
|
|
|
|
if (IS_ERR(ctx->shpre))
|
|
|
|
return PTR_ERR(ctx->shpre);
|
|
|
|
|
|
|
|
ctx->shdesc = kmalloc(sizeof(*ctx->shdesc) +
|
|
|
|
crypto_shash_descsize(ctx->shpre), GFP_KERNEL);
|
|
|
|
if (!ctx->shdesc) {
|
|
|
|
crypto_free_shash(ctx->shpre);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ctx->shdesc->tfm = ctx->shpre;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void safexcel_hmac_sha3_cra_exit(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
crypto_free_ahash(ctx->fback);
|
|
|
|
crypto_free_shash(ctx->shpre);
|
|
|
|
kfree(ctx->shdesc);
|
|
|
|
safexcel_ahash_cra_exit(tfm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (keylen > crypto_ahash_blocksize(tfm)) {
|
|
|
|
/*
|
|
|
|
* If the key is larger than the blocksize, then hash it
|
|
|
|
* first using our fallback cipher
|
|
|
|
*/
|
|
|
|
ret = crypto_shash_digest(ctx->shdesc, key, keylen,
|
|
|
|
(u8 *)ctx->ipad);
|
|
|
|
keylen = crypto_shash_digestsize(ctx->shpre);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the digest is larger than half the blocksize, we need to
|
|
|
|
* move the rest to opad due to the way our HMAC infra works.
|
|
|
|
*/
|
|
|
|
if (keylen > crypto_ahash_blocksize(tfm) / 2)
|
|
|
|
/* Buffers overlap, need to use memmove iso memcpy! */
|
|
|
|
memmove(ctx->opad,
|
|
|
|
(u8 *)ctx->ipad +
|
|
|
|
crypto_ahash_blocksize(tfm) / 2,
|
|
|
|
keylen - crypto_ahash_blocksize(tfm) / 2);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Copy the key to our ipad & opad buffers
|
|
|
|
* Note that ipad and opad each contain one half of the key,
|
|
|
|
* to match the existing HMAC driver infrastructure.
|
|
|
|
*/
|
|
|
|
if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
|
|
|
|
memcpy(ctx->ipad, key, keylen);
|
|
|
|
} else {
|
|
|
|
memcpy(ctx->ipad, key,
|
|
|
|
crypto_ahash_blocksize(tfm) / 2);
|
|
|
|
memcpy(ctx->opad,
|
|
|
|
key + crypto_ahash_blocksize(tfm) / 2,
|
|
|
|
keylen - crypto_ahash_blocksize(tfm) / 2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pad key with zeroes */
|
|
|
|
if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
|
|
|
|
memset((u8 *)ctx->ipad + keylen, 0,
|
|
|
|
crypto_ahash_blocksize(tfm) / 2 - keylen);
|
|
|
|
memset(ctx->opad, 0, crypto_ahash_blocksize(tfm) / 2);
|
|
|
|
} else {
|
|
|
|
memset((u8 *)ctx->opad + keylen -
|
|
|
|
crypto_ahash_blocksize(tfm) / 2, 0,
|
|
|
|
crypto_ahash_blocksize(tfm) - keylen);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If doing fallback, still need to set the new key! */
|
|
|
|
ctx->fb_do_setkey = true;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_224_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Copy (half of) the key */
|
|
|
|
memcpy(req->state, ctx->ipad, SHA3_224_BLOCK_SIZE / 2);
|
|
|
|
/* Start of HMAC should have len == processed == blocksize */
|
|
|
|
req->len = SHA3_224_BLOCK_SIZE;
|
|
|
|
req->processed = SHA3_224_BLOCK_SIZE;
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
|
|
|
|
req->state_sz = SHA3_224_BLOCK_SIZE / 2;
|
|
|
|
req->digest_sz = SHA3_224_DIGEST_SIZE;
|
|
|
|
req->block_sz = SHA3_224_BLOCK_SIZE;
|
|
|
|
req->hmac = true;
|
|
|
|
ctx->do_fallback = false;
|
|
|
|
ctx->fb_init_done = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_224_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
if (req->nbytes)
|
|
|
|
return safexcel_hmac_sha3_224_init(req) ?:
|
|
|
|
safexcel_ahash_finup(req);
|
|
|
|
|
|
|
|
/* HW cannot do zero length HMAC, use fallback instead */
|
|
|
|
return safexcel_sha3_digest_fallback(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_224_cra_init(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
return safexcel_hmac_sha3_cra_init(tfm, "sha3-224");
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_hmac_sha3_224 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA3,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_hmac_sha3_224_init,
|
|
|
|
.update = safexcel_sha3_update,
|
|
|
|
.final = safexcel_sha3_final,
|
|
|
|
.finup = safexcel_sha3_finup,
|
|
|
|
.digest = safexcel_hmac_sha3_224_digest,
|
|
|
|
.setkey = safexcel_hmac_sha3_setkey,
|
|
|
|
.export = safexcel_sha3_export,
|
|
|
|
.import = safexcel_sha3_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA3_224_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(sha3-224)",
|
|
|
|
.cra_driver_name = "safexcel-hmac-sha3-224",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK,
|
|
|
|
.cra_blocksize = SHA3_224_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_hmac_sha3_224_cra_init,
|
|
|
|
.cra_exit = safexcel_hmac_sha3_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_256_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Copy (half of) the key */
|
|
|
|
memcpy(req->state, ctx->ipad, SHA3_256_BLOCK_SIZE / 2);
|
|
|
|
/* Start of HMAC should have len == processed == blocksize */
|
|
|
|
req->len = SHA3_256_BLOCK_SIZE;
|
|
|
|
req->processed = SHA3_256_BLOCK_SIZE;
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
|
|
|
|
req->state_sz = SHA3_256_BLOCK_SIZE / 2;
|
|
|
|
req->digest_sz = SHA3_256_DIGEST_SIZE;
|
|
|
|
req->block_sz = SHA3_256_BLOCK_SIZE;
|
|
|
|
req->hmac = true;
|
|
|
|
ctx->do_fallback = false;
|
|
|
|
ctx->fb_init_done = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_256_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
if (req->nbytes)
|
|
|
|
return safexcel_hmac_sha3_256_init(req) ?:
|
|
|
|
safexcel_ahash_finup(req);
|
|
|
|
|
|
|
|
/* HW cannot do zero length HMAC, use fallback instead */
|
|
|
|
return safexcel_sha3_digest_fallback(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_256_cra_init(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
return safexcel_hmac_sha3_cra_init(tfm, "sha3-256");
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_hmac_sha3_256 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA3,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_hmac_sha3_256_init,
|
|
|
|
.update = safexcel_sha3_update,
|
|
|
|
.final = safexcel_sha3_final,
|
|
|
|
.finup = safexcel_sha3_finup,
|
|
|
|
.digest = safexcel_hmac_sha3_256_digest,
|
|
|
|
.setkey = safexcel_hmac_sha3_setkey,
|
|
|
|
.export = safexcel_sha3_export,
|
|
|
|
.import = safexcel_sha3_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA3_256_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(sha3-256)",
|
|
|
|
.cra_driver_name = "safexcel-hmac-sha3-256",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK,
|
|
|
|
.cra_blocksize = SHA3_256_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_hmac_sha3_256_cra_init,
|
|
|
|
.cra_exit = safexcel_hmac_sha3_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_384_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Copy (half of) the key */
|
|
|
|
memcpy(req->state, ctx->ipad, SHA3_384_BLOCK_SIZE / 2);
|
|
|
|
/* Start of HMAC should have len == processed == blocksize */
|
|
|
|
req->len = SHA3_384_BLOCK_SIZE;
|
|
|
|
req->processed = SHA3_384_BLOCK_SIZE;
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
|
|
|
|
req->state_sz = SHA3_384_BLOCK_SIZE / 2;
|
|
|
|
req->digest_sz = SHA3_384_DIGEST_SIZE;
|
|
|
|
req->block_sz = SHA3_384_BLOCK_SIZE;
|
|
|
|
req->hmac = true;
|
|
|
|
ctx->do_fallback = false;
|
|
|
|
ctx->fb_init_done = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_384_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
if (req->nbytes)
|
|
|
|
return safexcel_hmac_sha3_384_init(req) ?:
|
|
|
|
safexcel_ahash_finup(req);
|
|
|
|
|
|
|
|
/* HW cannot do zero length HMAC, use fallback instead */
|
|
|
|
return safexcel_sha3_digest_fallback(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_384_cra_init(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
return safexcel_hmac_sha3_cra_init(tfm, "sha3-384");
|
|
|
|
}
|
|
|
|
|
|
|
|
struct safexcel_alg_template safexcel_alg_hmac_sha3_384 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA3,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_hmac_sha3_384_init,
|
|
|
|
.update = safexcel_sha3_update,
|
|
|
|
.final = safexcel_sha3_final,
|
|
|
|
.finup = safexcel_sha3_finup,
|
|
|
|
.digest = safexcel_hmac_sha3_384_digest,
|
|
|
|
.setkey = safexcel_hmac_sha3_setkey,
|
|
|
|
.export = safexcel_sha3_export,
|
|
|
|
.import = safexcel_sha3_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA3_384_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(sha3-384)",
|
|
|
|
.cra_driver_name = "safexcel-hmac-sha3-384",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK,
|
|
|
|
.cra_blocksize = SHA3_384_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_hmac_sha3_384_cra_init,
|
|
|
|
.cra_exit = safexcel_hmac_sha3_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_512_init(struct ahash_request *areq)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
|
|
|
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
|
|
|
|
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
|
|
|
|
/* Copy (half of) the key */
|
|
|
|
memcpy(req->state, ctx->ipad, SHA3_512_BLOCK_SIZE / 2);
|
|
|
|
/* Start of HMAC should have len == processed == blocksize */
|
|
|
|
req->len = SHA3_512_BLOCK_SIZE;
|
|
|
|
req->processed = SHA3_512_BLOCK_SIZE;
|
|
|
|
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512;
|
|
|
|
req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
|
|
|
|
req->state_sz = SHA3_512_BLOCK_SIZE / 2;
|
|
|
|
req->digest_sz = SHA3_512_DIGEST_SIZE;
|
|
|
|
req->block_sz = SHA3_512_BLOCK_SIZE;
|
|
|
|
req->hmac = true;
|
|
|
|
ctx->do_fallback = false;
|
|
|
|
ctx->fb_init_done = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_512_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
if (req->nbytes)
|
|
|
|
return safexcel_hmac_sha3_512_init(req) ?:
|
|
|
|
safexcel_ahash_finup(req);
|
|
|
|
|
|
|
|
/* HW cannot do zero length HMAC, use fallback instead */
|
|
|
|
return safexcel_sha3_digest_fallback(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int safexcel_hmac_sha3_512_cra_init(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
return safexcel_hmac_sha3_cra_init(tfm, "sha3-512");
|
|
|
|
}
|
|
|
|
struct safexcel_alg_template safexcel_alg_hmac_sha3_512 = {
|
|
|
|
.type = SAFEXCEL_ALG_TYPE_AHASH,
|
|
|
|
.algo_mask = SAFEXCEL_ALG_SHA3,
|
|
|
|
.alg.ahash = {
|
|
|
|
.init = safexcel_hmac_sha3_512_init,
|
|
|
|
.update = safexcel_sha3_update,
|
|
|
|
.final = safexcel_sha3_final,
|
|
|
|
.finup = safexcel_sha3_finup,
|
|
|
|
.digest = safexcel_hmac_sha3_512_digest,
|
|
|
|
.setkey = safexcel_hmac_sha3_setkey,
|
|
|
|
.export = safexcel_sha3_export,
|
|
|
|
.import = safexcel_sha3_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA3_512_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct safexcel_ahash_export_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(sha3-512)",
|
|
|
|
.cra_driver_name = "safexcel-hmac-sha3-512",
|
|
|
|
.cra_priority = SAFEXCEL_CRA_PRIORITY,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK,
|
|
|
|
.cra_blocksize = SHA3_512_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
|
|
|
|
.cra_init = safexcel_hmac_sha3_512_cra_init,
|
|
|
|
.cra_exit = safexcel_hmac_sha3_cra_exit,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|