2018-10-10 19:26:48 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2012-06-23 08:48:47 +08:00
|
|
|
/*
|
|
|
|
* caam - Freescale FSL CAAM support for ahash functions of crypto API
|
|
|
|
*
|
|
|
|
* Copyright 2011 Freescale Semiconductor, Inc.
|
2019-01-21 21:22:42 +08:00
|
|
|
* Copyright 2018-2019 NXP
|
2012-06-23 08:48:47 +08:00
|
|
|
*
|
|
|
|
* Based on caamalg.c crypto API driver.
|
|
|
|
*
|
|
|
|
* relationship of digest job descriptor or first job descriptor after init to
|
|
|
|
* shared descriptors:
|
|
|
|
*
|
|
|
|
* --------------- ---------------
|
|
|
|
* | JobDesc #1 |-------------------->| ShareDesc |
|
|
|
|
* | *(packet 1) | | (hashKey) |
|
|
|
|
* --------------- | (operation) |
|
|
|
|
* ---------------
|
|
|
|
*
|
|
|
|
* relationship of subsequent job descriptors to shared descriptors:
|
|
|
|
*
|
|
|
|
* --------------- ---------------
|
|
|
|
* | JobDesc #2 |-------------------->| ShareDesc |
|
|
|
|
* | *(packet 2) | |------------->| (hashKey) |
|
|
|
|
* --------------- | |-------->| (operation) |
|
|
|
|
* . | | | (load ctx2) |
|
|
|
|
* . | | ---------------
|
|
|
|
* --------------- | |
|
|
|
|
* | JobDesc #3 |------| |
|
|
|
|
* | *(packet 3) | |
|
|
|
|
* --------------- |
|
|
|
|
* . |
|
|
|
|
* . |
|
|
|
|
* --------------- |
|
|
|
|
* | JobDesc #4 |------------
|
|
|
|
* | *(packet 4) |
|
|
|
|
* ---------------
|
|
|
|
*
|
|
|
|
* The SharedDesc never changes for a connection unless rekeyed, but
|
|
|
|
* each packet will likely be in a different place. So all we need
|
|
|
|
* to know to process the packet is where the input is, where the
|
|
|
|
* output goes, and what context we want to process with. Context is
|
|
|
|
* in the SharedDesc, packet references in the JobDesc.
|
|
|
|
*
|
|
|
|
* So, a job desc looks like:
|
|
|
|
*
|
|
|
|
* ---------------------
|
|
|
|
* | Header |
|
|
|
|
* | ShareDesc Pointer |
|
|
|
|
* | SEQ_OUT_PTR |
|
|
|
|
* | (output buffer) |
|
|
|
|
* | (output length) |
|
|
|
|
* | SEQ_IN_PTR |
|
|
|
|
* | (input buffer) |
|
|
|
|
* | (input length) |
|
|
|
|
* ---------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "compat.h"
|
|
|
|
|
|
|
|
#include "regs.h"
|
|
|
|
#include "intern.h"
|
|
|
|
#include "desc_constr.h"
|
|
|
|
#include "jr.h"
|
|
|
|
#include "error.h"
|
|
|
|
#include "sg_sw_sec4.h"
|
|
|
|
#include "key_gen.h"
|
2018-09-12 16:59:35 +08:00
|
|
|
#include "caamhash_desc.h"
|
2020-02-13 01:55:24 +08:00
|
|
|
#include <crypto/engine.h>
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
#define CAAM_CRA_PRIORITY 3000
|
|
|
|
|
|
|
|
/* max hash key is max split key size */
|
|
|
|
#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
|
|
|
|
|
|
|
|
#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
|
|
|
|
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
|
|
|
|
|
|
|
|
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
|
|
|
|
CAAM_MAX_HASH_KEY_SIZE)
|
|
|
|
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
|
|
|
|
|
|
|
|
/* caam context sizes for hashes: running digest + 8 */
|
|
|
|
#define HASH_MSG_LEN 8
|
|
|
|
#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
|
|
|
|
|
2013-10-25 14:31:03 +08:00
|
|
|
static struct list_head hash_list;
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
/* ahash per-session context */
|
|
|
|
struct caam_hash_ctx {
|
2020-02-13 01:55:24 +08:00
|
|
|
struct crypto_engine_ctx enginectx;
|
2016-08-09 01:04:36 +08:00
|
|
|
u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
|
|
|
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
|
|
|
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
|
|
|
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
2018-12-21 23:59:10 +08:00
|
|
|
u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
|
2016-08-09 01:04:36 +08:00
|
|
|
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
|
2012-06-23 08:48:47 +08:00
|
|
|
dma_addr_t sh_desc_update_first_dma;
|
|
|
|
dma_addr_t sh_desc_fin_dma;
|
|
|
|
dma_addr_t sh_desc_digest_dma;
|
2017-12-19 18:16:07 +08:00
|
|
|
enum dma_data_direction dir;
|
2019-07-31 21:08:11 +08:00
|
|
|
enum dma_data_direction key_dir;
|
2016-08-09 01:04:36 +08:00
|
|
|
struct device *jrdev;
|
2012-06-23 08:48:47 +08:00
|
|
|
int ctx_len;
|
2016-11-22 21:44:04 +08:00
|
|
|
struct alginfo adata;
|
2012-06-23 08:48:47 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* ahash state */
|
|
|
|
struct caam_hash_state {
|
|
|
|
dma_addr_t buf_dma;
|
|
|
|
dma_addr_t ctx_dma;
|
2019-02-19 22:56:55 +08:00
|
|
|
int ctx_dma_len;
|
2019-12-10 00:59:55 +08:00
|
|
|
u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
|
|
|
|
int buflen;
|
|
|
|
int next_buflen;
|
2015-08-06 02:28:35 +08:00
|
|
|
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
|
2020-02-13 01:55:24 +08:00
|
|
|
int (*update)(struct ahash_request *req) ____cacheline_aligned;
|
2012-06-23 08:48:47 +08:00
|
|
|
int (*final)(struct ahash_request *req);
|
|
|
|
int (*finup)(struct ahash_request *req);
|
2020-02-13 01:55:24 +08:00
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
|
|
|
|
void *context);
|
2012-06-23 08:48:47 +08:00
|
|
|
};
|
|
|
|
|
2015-10-19 00:51:25 +08:00
|
|
|
struct caam_export_state {
|
|
|
|
u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
|
|
|
|
u8 caam_ctx[MAX_CTX_LEN];
|
|
|
|
int buflen;
|
|
|
|
int (*update)(struct ahash_request *req);
|
|
|
|
int (*final)(struct ahash_request *req);
|
|
|
|
int (*finup)(struct ahash_request *req);
|
|
|
|
};
|
|
|
|
|
2019-01-21 21:22:42 +08:00
|
|
|
static inline bool is_cmac_aes(u32 algtype)
|
2018-12-21 23:59:10 +08:00
|
|
|
{
|
|
|
|
return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
|
2019-01-21 21:22:42 +08:00
|
|
|
(OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
|
2018-12-21 23:59:10 +08:00
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
/* Common job descriptor seq in/out ptr routines */
|
|
|
|
|
|
|
|
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
|
2014-07-11 20:34:49 +08:00
|
|
|
static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
|
|
|
|
struct caam_hash_state *state,
|
|
|
|
int ctx_len)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
2019-02-19 22:56:55 +08:00
|
|
|
state->ctx_dma_len = ctx_len;
|
2012-06-23 08:48:47 +08:00
|
|
|
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
|
|
|
|
ctx_len, DMA_FROM_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, state->ctx_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map ctx\n");
|
2017-02-10 20:07:23 +08:00
|
|
|
state->ctx_dma = 0;
|
2014-07-11 20:34:49 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
|
2014-07-11 20:34:49 +08:00
|
|
|
|
|
|
|
return 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
2017-02-10 20:07:25 +08:00
|
|
|
/* Map current buffer in state (if length > 0) and put it in link table */
|
|
|
|
static inline int buf_map_to_sec4_sg(struct device *jrdev,
|
|
|
|
struct sec4_sg_entry *sec4_sg,
|
|
|
|
struct caam_hash_state *state)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
2019-12-10 00:59:55 +08:00
|
|
|
int buflen = state->buflen;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2017-02-10 20:07:25 +08:00
|
|
|
if (!buflen)
|
|
|
|
return 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-12-10 00:59:55 +08:00
|
|
|
state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
|
2017-02-10 20:07:25 +08:00
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(jrdev, state->buf_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map buf\n");
|
|
|
|
state->buf_dma = 0;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2017-02-10 20:07:25 +08:00
|
|
|
dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
|
|
|
|
|
|
|
|
return 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Map state->caam_ctx, and add it to link table */
|
2017-11-01 15:13:11 +08:00
|
|
|
static inline int ctx_map_to_sec4_sg(struct device *jrdev,
|
2014-07-11 20:34:49 +08:00
|
|
|
struct caam_hash_state *state, int ctx_len,
|
|
|
|
struct sec4_sg_entry *sec4_sg, u32 flag)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
2019-02-19 22:56:55 +08:00
|
|
|
state->ctx_dma_len = ctx_len;
|
2012-06-23 08:48:47 +08:00
|
|
|
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, state->ctx_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map ctx\n");
|
2017-02-10 20:07:23 +08:00
|
|
|
state->ctx_dma = 0;
|
2014-07-11 20:34:49 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
|
2014-07-11 20:34:49 +08:00
|
|
|
|
|
|
|
return 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
2012-06-23 08:48:47 +08:00
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
2017-12-19 18:16:07 +08:00
|
|
|
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
|
2012-06-23 08:48:47 +08:00
|
|
|
u32 *desc;
|
|
|
|
|
2017-12-19 18:16:07 +08:00
|
|
|
ctx->adata.key_virt = ctx->key;
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
/* ahash_update shared descriptor */
|
|
|
|
desc = ctx->sh_desc_update;
|
2018-09-12 16:59:35 +08:00
|
|
|
cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
|
|
|
|
ctx->ctx_len, true, ctrlpriv->era);
|
2017-02-10 20:07:22 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
|
2017-12-19 18:16:07 +08:00
|
|
|
desc_bytes(desc), ctx->dir);
|
2019-05-23 16:50:29 +08:00
|
|
|
|
|
|
|
print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
|
|
|
1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/* ahash_update_first shared descriptor */
|
|
|
|
desc = ctx->sh_desc_update_first;
|
2018-09-12 16:59:35 +08:00
|
|
|
cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
|
|
|
|
ctx->ctx_len, false, ctrlpriv->era);
|
2017-02-10 20:07:22 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
2017-12-19 18:16:07 +08:00
|
|
|
desc_bytes(desc), ctx->dir);
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
|
|
|
|
": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/* ahash_final shared descriptor */
|
|
|
|
desc = ctx->sh_desc_fin;
|
2018-09-12 16:59:35 +08:00
|
|
|
cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
|
|
|
|
ctx->ctx_len, true, ctrlpriv->era);
|
2017-02-10 20:07:22 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
|
2017-12-19 18:16:07 +08:00
|
|
|
desc_bytes(desc), ctx->dir);
|
2019-05-23 16:50:29 +08:00
|
|
|
|
|
|
|
print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/* ahash_digest shared descriptor */
|
|
|
|
desc = ctx->sh_desc_digest;
|
2018-09-12 16:59:35 +08:00
|
|
|
cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
|
|
|
|
ctx->ctx_len, false, ctrlpriv->era);
|
2017-02-10 20:07:22 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
|
2017-12-19 18:16:07 +08:00
|
|
|
desc_bytes(desc), ctx->dir);
|
2019-05-23 16:50:29 +08:00
|
|
|
|
|
|
|
print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-21 23:59:10 +08:00
|
|
|
static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
2018-12-21 23:59:10 +08:00
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
u32 *desc;
|
|
|
|
|
|
|
|
/* shared descriptor for ahash_update */
|
|
|
|
desc = ctx->sh_desc_update;
|
2019-01-21 21:22:42 +08:00
|
|
|
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
|
2019-07-31 21:08:10 +08:00
|
|
|
ctx->ctx_len, ctx->ctx_len);
|
2018-12-21 23:59:10 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
|
|
|
|
desc_bytes(desc), ctx->dir);
|
|
|
|
print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
|
|
|
1);
|
|
|
|
|
|
|
|
/* shared descriptor for ahash_{final,finup} */
|
|
|
|
desc = ctx->sh_desc_fin;
|
2019-01-21 21:22:42 +08:00
|
|
|
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
|
2019-07-31 21:08:10 +08:00
|
|
|
digestsize, ctx->ctx_len);
|
2018-12-21 23:59:10 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
|
|
|
|
desc_bytes(desc), ctx->dir);
|
|
|
|
print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
|
|
|
1);
|
|
|
|
|
|
|
|
/* key is immediate data for INIT and INITFINAL states */
|
|
|
|
ctx->adata.key_virt = ctx->key;
|
|
|
|
|
|
|
|
/* shared descriptor for first invocation of ahash_update */
|
|
|
|
desc = ctx->sh_desc_update_first;
|
2019-01-21 21:22:42 +08:00
|
|
|
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
|
2019-07-31 21:08:10 +08:00
|
|
|
ctx->ctx_len);
|
2018-12-21 23:59:10 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
|
|
|
desc_bytes(desc), ctx->dir);
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
|
|
|
|
" : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
2018-12-21 23:59:10 +08:00
|
|
|
|
|
|
|
/* shared descriptor for ahash_digest */
|
|
|
|
desc = ctx->sh_desc_digest;
|
2019-01-21 21:22:42 +08:00
|
|
|
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
|
2019-07-31 21:08:10 +08:00
|
|
|
digestsize, ctx->ctx_len);
|
2018-12-21 23:59:10 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
|
|
|
|
desc_bytes(desc), ctx->dir);
|
|
|
|
print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
|
|
|
1);
|
2019-01-21 21:22:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int acmac_set_sh_desc(struct crypto_ahash *ahash)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
2019-01-21 21:22:42 +08:00
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
u32 *desc;
|
|
|
|
|
|
|
|
/* shared descriptor for ahash_update */
|
|
|
|
desc = ctx->sh_desc_update;
|
|
|
|
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
|
2019-07-31 21:08:10 +08:00
|
|
|
ctx->ctx_len, ctx->ctx_len);
|
2019-01-21 21:22:42 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
|
|
|
|
desc_bytes(desc), ctx->dir);
|
|
|
|
print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
|
|
|
|
|
|
|
/* shared descriptor for ahash_{final,finup} */
|
|
|
|
desc = ctx->sh_desc_fin;
|
|
|
|
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
|
2019-07-31 21:08:10 +08:00
|
|
|
digestsize, ctx->ctx_len);
|
2019-01-21 21:22:42 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
|
|
|
|
desc_bytes(desc), ctx->dir);
|
|
|
|
print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
|
|
|
|
|
|
|
/* shared descriptor for first invocation of ahash_update */
|
|
|
|
desc = ctx->sh_desc_update_first;
|
|
|
|
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
|
2019-07-31 21:08:10 +08:00
|
|
|
ctx->ctx_len);
|
2019-01-21 21:22:42 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
|
|
|
desc_bytes(desc), ctx->dir);
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
|
|
|
|
" : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
2019-01-21 21:22:42 +08:00
|
|
|
desc_bytes(desc), 1);
|
|
|
|
|
|
|
|
/* shared descriptor for ahash_digest */
|
|
|
|
desc = ctx->sh_desc_digest;
|
|
|
|
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
|
2019-07-31 21:08:10 +08:00
|
|
|
digestsize, ctx->ctx_len);
|
2019-01-21 21:22:42 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
|
|
|
|
desc_bytes(desc), ctx->dir);
|
|
|
|
print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
2018-12-21 23:59:10 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
/* Digest hash size if it is too large */
|
2019-02-19 22:56:57 +08:00
|
|
|
static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
|
|
|
|
u32 digestsize)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
u32 *desc;
|
|
|
|
struct split_key_result result;
|
2019-02-19 22:56:57 +08:00
|
|
|
dma_addr_t key_dma;
|
2016-09-15 21:24:02 +08:00
|
|
|
int ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2013-07-10 14:26:13 +08:00
|
|
|
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
|
2012-09-07 04:17:03 +08:00
|
|
|
if (!desc) {
|
|
|
|
dev_err(jrdev, "unable to allocate key input memory\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
init_job_desc(desc, 0);
|
|
|
|
|
2019-02-19 22:56:57 +08:00
|
|
|
key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
|
|
|
|
if (dma_mapping_error(jrdev, key_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map key memory\n");
|
2012-06-23 08:48:47 +08:00
|
|
|
kfree(desc);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Job descriptor to perform unkeyed hash on key_in */
|
2016-11-22 21:44:04 +08:00
|
|
|
append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
|
2012-06-23 08:48:47 +08:00
|
|
|
OP_ALG_AS_INITFINAL);
|
2019-02-19 22:56:57 +08:00
|
|
|
append_seq_in_ptr(desc, key_dma, *keylen, 0);
|
2012-06-23 08:48:47 +08:00
|
|
|
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
|
|
|
|
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
|
2019-02-19 22:56:57 +08:00
|
|
|
append_seq_out_ptr(desc, key_dma, digestsize, 0);
|
2012-06-23 08:48:47 +08:00
|
|
|
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
|
|
|
|
LDST_SRCDST_BYTE_CONTEXT);
|
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
|
|
|
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
|
|
|
1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
result.err = 0;
|
|
|
|
init_completion(&result.completion);
|
|
|
|
|
|
|
|
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
2020-02-13 01:55:20 +08:00
|
|
|
if (ret == -EINPROGRESS) {
|
2012-06-23 08:48:47 +08:00
|
|
|
/* in progress */
|
2017-07-07 21:57:06 +08:00
|
|
|
wait_for_completion(&result.completion);
|
2012-06-23 08:48:47 +08:00
|
|
|
ret = result.err;
|
2019-05-23 16:50:29 +08:00
|
|
|
|
|
|
|
print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, key,
|
|
|
|
digestsize, 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
2019-02-19 22:56:57 +08:00
|
|
|
dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2014-07-11 20:34:50 +08:00
|
|
|
*keylen = digestsize;
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
kfree(desc);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_setkey(struct crypto_ahash *ahash,
|
|
|
|
const u8 *key, unsigned int keylen)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
2019-05-23 16:50:29 +08:00
|
|
|
struct device *jrdev = ctx->jrdev;
|
2012-06-23 08:48:47 +08:00
|
|
|
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
|
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
2017-12-19 18:16:07 +08:00
|
|
|
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
|
2016-09-15 21:24:02 +08:00
|
|
|
int ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
u8 *hashed_key = NULL;
|
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
dev_dbg(jrdev, "keylen %d\n", keylen);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
if (keylen > blocksize) {
|
2019-02-19 22:56:57 +08:00
|
|
|
hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!hashed_key)
|
|
|
|
return -ENOMEM;
|
2019-02-19 22:56:57 +08:00
|
|
|
ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (ret)
|
2016-09-15 19:54:49 +08:00
|
|
|
goto bad_free_key;
|
2012-06-23 08:48:47 +08:00
|
|
|
key = hashed_key;
|
|
|
|
}
|
|
|
|
|
2017-12-19 18:16:07 +08:00
|
|
|
/*
|
|
|
|
* If DKP is supported, use it in the shared descriptor to generate
|
|
|
|
* the split key.
|
|
|
|
*/
|
|
|
|
if (ctrlpriv->era >= 6) {
|
|
|
|
ctx->adata.key_inline = true;
|
|
|
|
ctx->adata.keylen = keylen;
|
|
|
|
ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
|
|
|
|
OP_ALG_ALGSEL_MASK);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2017-12-19 18:16:07 +08:00
|
|
|
if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
|
|
|
|
goto bad_free_key;
|
|
|
|
|
|
|
|
memcpy(ctx->key, key, keylen);
|
2019-07-31 21:08:11 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In case |user key| > |derived key|, using DKP<imm,imm>
|
|
|
|
* would result in invalid opcodes (last bytes of user key) in
|
|
|
|
* the resulting descriptor. Use DKP<ptr,imm> instead => both
|
|
|
|
* virtual and dma key addresses are needed.
|
|
|
|
*/
|
|
|
|
if (keylen > ctx->adata.keylen_pad)
|
|
|
|
dma_sync_single_for_device(ctx->jrdev,
|
|
|
|
ctx->adata.key_dma,
|
|
|
|
ctx->adata.keylen_pad,
|
|
|
|
DMA_TO_DEVICE);
|
2017-12-19 18:16:07 +08:00
|
|
|
} else {
|
|
|
|
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
|
|
|
|
keylen, CAAM_MAX_HASH_KEY_SIZE);
|
|
|
|
if (ret)
|
|
|
|
goto bad_free_key;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
kfree(hashed_key);
|
2017-02-10 20:07:21 +08:00
|
|
|
return ahash_set_sh_desc(ahash);
|
2016-09-15 19:54:49 +08:00
|
|
|
bad_free_key:
|
2012-06-23 08:48:47 +08:00
|
|
|
kfree(hashed_key);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-12-21 23:59:10 +08:00
|
|
|
static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
2018-12-21 23:59:10 +08:00
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
if (keylen != AES_KEYSIZE_128)
|
2019-07-31 21:08:05 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2018-12-21 23:59:10 +08:00
|
|
|
memcpy(ctx->key, key, keylen);
|
2019-07-31 21:08:10 +08:00
|
|
|
dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
|
|
|
|
DMA_TO_DEVICE);
|
2018-12-21 23:59:10 +08:00
|
|
|
ctx->adata.keylen = keylen;
|
|
|
|
|
|
|
|
print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
|
|
|
|
|
|
|
|
return axcbc_set_sh_desc(ahash);
|
|
|
|
}
|
2019-01-21 21:22:42 +08:00
|
|
|
|
|
|
|
static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
2019-07-31 21:08:05 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = aes_check_keylen(keylen);
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
if (err)
|
2019-07-31 21:08:05 +08:00
|
|
|
return err;
|
2019-01-21 21:22:42 +08:00
|
|
|
|
|
|
|
/* key is immediate data for all cmac shared descriptors */
|
|
|
|
ctx->adata.key_virt = key;
|
|
|
|
ctx->adata.keylen = keylen;
|
|
|
|
|
|
|
|
print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
|
|
|
|
|
|
|
return acmac_set_sh_desc(ahash);
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
/*
|
|
|
|
* ahash_edesc - s/w-extended ahash descriptor
|
|
|
|
* @sec4_sg_dma: physical mapped address of h/w link table
|
|
|
|
* @src_nents: number of segments in input scatterlist
|
|
|
|
* @sec4_sg_bytes: length of dma mapped sec4_sg space
|
2020-02-13 01:55:24 +08:00
|
|
|
* @bklog: stored to determine if the request needs backlog
|
2012-06-23 08:48:47 +08:00
|
|
|
* @hw_desc: the h/w job descriptor followed by any referenced link tables
|
2016-08-09 01:04:52 +08:00
|
|
|
* @sec4_sg: h/w link table
|
2012-06-23 08:48:47 +08:00
|
|
|
*/
|
|
|
|
struct ahash_edesc {
|
|
|
|
dma_addr_t sec4_sg_dma;
|
|
|
|
int src_nents;
|
|
|
|
int sec4_sg_bytes;
|
2020-02-13 01:55:24 +08:00
|
|
|
bool bklog;
|
2019-08-21 04:23:55 +08:00
|
|
|
u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
|
2020-02-25 00:21:00 +08:00
|
|
|
struct sec4_sg_entry sec4_sg[];
|
2012-06-23 08:48:47 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline void ahash_unmap(struct device *dev,
|
|
|
|
struct ahash_edesc *edesc,
|
|
|
|
struct ahash_request *req, int dst_len)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2017-02-10 20:07:25 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
if (edesc->src_nents)
|
2015-09-23 19:55:27 +08:00
|
|
|
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
if (edesc->sec4_sg_bytes)
|
|
|
|
dma_unmap_single(dev, edesc->sec4_sg_dma,
|
|
|
|
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
|
2017-02-10 20:07:25 +08:00
|
|
|
|
|
|
|
if (state->buf_dma) {
|
2019-12-10 00:59:55 +08:00
|
|
|
dma_unmap_single(dev, state->buf_dma, state->buflen,
|
2017-02-10 20:07:25 +08:00
|
|
|
DMA_TO_DEVICE);
|
|
|
|
state->buf_dma = 0;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ahash_unmap_ctx(struct device *dev,
|
|
|
|
struct ahash_edesc *edesc,
|
|
|
|
struct ahash_request *req, int dst_len, u32 flag)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2017-02-10 20:07:23 +08:00
|
|
|
if (state->ctx_dma) {
|
2019-02-19 22:56:55 +08:00
|
|
|
dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
|
2017-02-10 20:07:23 +08:00
|
|
|
state->ctx_dma = 0;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
ahash_unmap(dev, edesc, req, dst_len);
|
|
|
|
}
|
|
|
|
|
2020-02-13 01:55:17 +08:00
|
|
|
static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
|
|
|
|
void *context, enum dma_data_direction dir)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
|
|
|
struct ahash_request *req = context;
|
2020-02-13 01:55:24 +08:00
|
|
|
struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
|
2012-06-23 08:48:47 +08:00
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
2019-07-31 21:08:03 +08:00
|
|
|
int ecode = 0;
|
2020-04-07 06:47:27 +08:00
|
|
|
bool has_bklog;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
edesc = state->edesc;
|
2020-04-07 06:47:27 +08:00
|
|
|
has_bklog = edesc->bklog;
|
2020-02-13 01:55:24 +08:00
|
|
|
|
2014-04-25 02:05:12 +08:00
|
|
|
if (err)
|
2019-07-31 21:08:03 +08:00
|
|
|
ecode = caam_jr_strstatus(jrdev, err);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:17 +08:00
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
|
2019-01-27 02:02:15 +08:00
|
|
|
memcpy(req->result, state->caam_ctx, digestsize);
|
2012-06-23 08:48:47 +08:00
|
|
|
kfree(edesc);
|
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
|
|
|
ctx->ctx_len, 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
/*
|
|
|
|
* If no backlog flag, the completion of the request is done
|
|
|
|
* by CAAM, not crypto engine.
|
|
|
|
*/
|
2020-04-07 06:47:27 +08:00
|
|
|
if (!has_bklog)
|
2020-02-13 01:55:24 +08:00
|
|
|
req->base.complete(&req->base, ecode);
|
|
|
|
else
|
|
|
|
crypto_finalize_hash_request(jrp->engine, req, ecode);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
2020-02-13 01:55:17 +08:00
|
|
|
static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
|
|
|
void *context)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
2020-02-13 01:55:17 +08:00
|
|
|
ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
|
|
|
void *context)
|
|
|
|
{
|
2020-02-13 01:55:17 +08:00
|
|
|
ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
2020-02-13 01:55:17 +08:00
|
|
|
static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
|
|
|
|
void *context, enum dma_data_direction dir)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
|
|
|
struct ahash_request *req = context;
|
2020-02-13 01:55:24 +08:00
|
|
|
struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
|
2012-06-23 08:48:47 +08:00
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
2019-07-31 21:08:03 +08:00
|
|
|
int ecode = 0;
|
2020-04-07 06:47:27 +08:00
|
|
|
bool has_bklog;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
edesc = state->edesc;
|
2020-04-07 06:47:27 +08:00
|
|
|
has_bklog = edesc->bklog;
|
2014-04-25 02:05:12 +08:00
|
|
|
if (err)
|
2019-07-31 21:08:03 +08:00
|
|
|
ecode = caam_jr_strstatus(jrdev, err);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:17 +08:00
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
|
2012-06-23 08:48:47 +08:00
|
|
|
kfree(edesc);
|
|
|
|
|
2019-12-10 00:59:55 +08:00
|
|
|
scatterwalk_map_and_copy(state->buf, req->src,
|
|
|
|
req->nbytes - state->next_buflen,
|
|
|
|
state->next_buflen, 0);
|
|
|
|
state->buflen = state->next_buflen;
|
|
|
|
|
|
|
|
print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
|
|
|
|
state->buflen, 1);
|
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
|
|
|
ctx->ctx_len, 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (req->result)
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("result@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
|
|
|
digestsize, 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
/*
|
|
|
|
* If no backlog flag, the completion of the request is done
|
|
|
|
* by CAAM, not crypto engine.
|
|
|
|
*/
|
2020-04-07 06:47:27 +08:00
|
|
|
if (!has_bklog)
|
2020-02-13 01:55:24 +08:00
|
|
|
req->base.complete(&req->base, ecode);
|
|
|
|
else
|
|
|
|
crypto_finalize_hash_request(jrp->engine, req, ecode);
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
2020-02-13 01:55:17 +08:00
|
|
|
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
|
|
|
|
void *context)
|
|
|
|
{
|
|
|
|
ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
|
|
|
void *context)
|
|
|
|
{
|
|
|
|
ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
|
|
|
|
}
|
|
|
|
|
2016-08-09 01:05:08 +08:00
|
|
|
/*
|
|
|
|
* Allocate an enhanced descriptor, which contains the hardware descriptor
|
|
|
|
* and space for hardware scatter table containing sg_num entries.
|
|
|
|
*/
|
2020-02-13 01:55:18 +08:00
|
|
|
static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
|
2016-08-09 01:05:13 +08:00
|
|
|
int sg_num, u32 *sh_desc,
|
2020-02-13 01:55:18 +08:00
|
|
|
dma_addr_t sh_desc_dma)
|
2016-08-09 01:05:08 +08:00
|
|
|
{
|
2020-02-13 01:55:18 +08:00
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2020-02-13 01:55:18 +08:00
|
|
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
|
|
|
GFP_KERNEL : GFP_ATOMIC;
|
2016-08-09 01:05:08 +08:00
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
|
|
|
|
|
|
|
|
edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
|
|
|
|
if (!edesc) {
|
|
|
|
dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
state->edesc = edesc;
|
|
|
|
|
2016-08-09 01:05:13 +08:00
|
|
|
init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
|
|
|
|
HDR_SHARE_DEFER | HDR_REVERSE);
|
|
|
|
|
2016-08-09 01:05:08 +08:00
|
|
|
return edesc;
|
|
|
|
}
|
|
|
|
|
2016-08-09 01:05:19 +08:00
|
|
|
static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
|
|
|
|
struct ahash_edesc *edesc,
|
|
|
|
struct ahash_request *req, int nents,
|
|
|
|
unsigned int first_sg,
|
|
|
|
unsigned int first_bytes, size_t to_hash)
|
|
|
|
{
|
|
|
|
dma_addr_t src_dma;
|
|
|
|
u32 options;
|
|
|
|
|
|
|
|
if (nents > 1 || first_sg) {
|
|
|
|
struct sec4_sg_entry *sg = edesc->sec4_sg;
|
2019-05-03 22:17:38 +08:00
|
|
|
unsigned int sgsize = sizeof(*sg) *
|
|
|
|
pad_sg_nents(first_sg + nents);
|
2016-08-09 01:05:19 +08:00
|
|
|
|
2019-06-10 21:30:58 +08:00
|
|
|
sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
|
2016-08-09 01:05:19 +08:00
|
|
|
|
|
|
|
src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(ctx->jrdev, src_dma)) {
|
|
|
|
dev_err(ctx->jrdev, "unable to map S/G table\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
edesc->sec4_sg_bytes = sgsize;
|
|
|
|
edesc->sec4_sg_dma = src_dma;
|
|
|
|
options = LDST_SGF;
|
|
|
|
} else {
|
|
|
|
src_dma = sg_dma_address(req->src);
|
|
|
|
options = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
|
|
|
|
options);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
|
|
|
|
{
|
|
|
|
struct ahash_request *req = ahash_request_cast(areq);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req));
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2020-02-13 01:55:24 +08:00
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
u32 *desc = state->edesc->hw_desc;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
state->edesc->bklog = true;
|
|
|
|
|
|
|
|
ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
|
|
|
|
|
2021-11-22 19:32:34 +08:00
|
|
|
if (ret == -ENOSPC && engine->retry_support)
|
|
|
|
return ret;
|
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
if (ret != -EINPROGRESS) {
|
|
|
|
ahash_unmap(jrdev, state->edesc, req, 0);
|
|
|
|
kfree(state->edesc);
|
|
|
|
} else {
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_enqueue_req(struct device *jrdev,
|
|
|
|
void (*cbk)(struct device *jrdev, u32 *desc,
|
|
|
|
u32 err, void *context),
|
|
|
|
struct ahash_request *req,
|
|
|
|
int dst_len, enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2020-02-13 01:55:24 +08:00
|
|
|
struct ahash_edesc *edesc = state->edesc;
|
|
|
|
u32 *desc = edesc->hw_desc;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
state->ahash_op_done = cbk;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only the backlog request are sent to crypto-engine since the others
|
|
|
|
* can be handled by CAAM, if free, especially since JR has up to 1024
|
|
|
|
* entries (more than the 10 entries from crypto-engine).
|
|
|
|
*/
|
|
|
|
if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
|
|
|
|
ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
|
|
|
|
req);
|
|
|
|
else
|
|
|
|
ret = caam_jr_enqueue(jrdev, desc, cbk, req);
|
|
|
|
|
|
|
|
if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
|
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
|
|
|
|
kfree(edesc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
/* submit update job descriptor */
|
|
|
|
static int ahash_update_ctx(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
struct device *jrdev = ctx->jrdev;
|
2019-12-10 00:59:55 +08:00
|
|
|
u8 *buf = state->buf;
|
|
|
|
int *buflen = &state->buflen;
|
|
|
|
int *next_buflen = &state->next_buflen;
|
2018-12-21 23:59:10 +08:00
|
|
|
int blocksize = crypto_ahash_blocksize(ahash);
|
2012-06-23 08:48:47 +08:00
|
|
|
int in_len = *buflen + req->nbytes, to_hash;
|
2016-08-09 01:05:13 +08:00
|
|
|
u32 *desc;
|
2016-08-09 01:05:03 +08:00
|
|
|
int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
|
2012-06-23 08:48:47 +08:00
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
int ret = 0;
|
|
|
|
|
2018-12-21 23:59:10 +08:00
|
|
|
*next_buflen = in_len & (blocksize - 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
to_hash = in_len - *next_buflen;
|
|
|
|
|
2018-12-21 23:59:10 +08:00
|
|
|
/*
|
2019-01-21 21:22:42 +08:00
|
|
|
* For XCBC and CMAC, if to_hash is multiple of block size,
|
2018-12-21 23:59:10 +08:00
|
|
|
* keep last block in internal buffer
|
|
|
|
*/
|
2019-01-21 21:22:42 +08:00
|
|
|
if ((is_xcbc_aes(ctx->adata.algtype) ||
|
|
|
|
is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
|
|
|
|
(*next_buflen == 0)) {
|
2018-12-21 23:59:10 +08:00
|
|
|
*next_buflen = blocksize;
|
|
|
|
to_hash -= blocksize;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
if (to_hash) {
|
2019-05-03 22:17:38 +08:00
|
|
|
int pad_nents;
|
2019-06-10 21:30:58 +08:00
|
|
|
int src_len = req->nbytes - *next_buflen;
|
2019-05-03 22:17:38 +08:00
|
|
|
|
2019-06-10 21:30:58 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src, src_len);
|
2015-11-05 04:13:38 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
2016-08-09 01:05:03 +08:00
|
|
|
|
|
|
|
if (src_nents) {
|
|
|
|
mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (!mapped_nents) {
|
|
|
|
dev_err(jrdev, "unable to DMA map source\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mapped_nents = 0;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
|
2019-05-03 22:17:38 +08:00
|
|
|
pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
|
|
|
|
sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* allocate space for base edesc and hw desc commands,
|
|
|
|
* link tables
|
|
|
|
*/
|
2020-02-13 01:55:18 +08:00
|
|
|
edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
|
|
|
|
ctx->sh_desc_update_dma);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
2016-08-09 01:05:03 +08:00
|
|
|
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
edesc->src_nents = src_nents;
|
|
|
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
|
|
|
|
2017-11-01 15:13:11 +08:00
|
|
|
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
|
2014-07-11 20:34:49 +08:00
|
|
|
edesc->sec4_sg, DMA_BIDIRECTIONAL);
|
|
|
|
if (ret)
|
2016-09-15 20:43:38 +08:00
|
|
|
goto unmap_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2017-02-10 20:07:25 +08:00
|
|
|
ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
|
|
|
|
if (ret)
|
|
|
|
goto unmap_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
crypto: caam - fix copy of next buffer for xcbc and cmac
Fix a side effect of adding xcbc support, when the next_buffer is not
copied.
The issue occurs, when there is stored from previous state a blocksize
buffer and received, a less than blocksize, from user. In this case, the
nents for req->src is 0, and the next_buffer is not copied.
An example is:
{
.tap = { 17, 15, 8 },
.psize = 40,
.np = 3,
.ksize = 16,
}
Fixes: 12b8567f6fa4 ("crypto: caam - add support for xcbc(aes)")
Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-03-22 20:12:30 +08:00
|
|
|
if (mapped_nents)
|
2019-06-10 21:30:58 +08:00
|
|
|
sg_to_sec4_sg_last(req->src, src_len,
|
2016-08-09 01:05:03 +08:00
|
|
|
edesc->sec4_sg + sec4_sg_src_index,
|
|
|
|
0);
|
crypto: caam - fix copy of next buffer for xcbc and cmac
Fix a side effect of adding xcbc support, when the next_buffer is not
copied.
The issue occurs, when there is stored from previous state a blocksize
buffer and received, a less than blocksize, from user. In this case, the
nents for req->src is 0, and the next_buffer is not copied.
An example is:
{
.tap = { 17, 15, 8 },
.psize = 40,
.np = 3,
.ksize = 16,
}
Fixes: 12b8567f6fa4 ("crypto: caam - add support for xcbc(aes)")
Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-03-22 20:12:30 +08:00
|
|
|
else
|
2017-07-18 23:30:47 +08:00
|
|
|
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
|
|
|
|
1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
|
2014-06-23 22:20:26 +08:00
|
|
|
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
|
|
|
sec4_sg_bytes,
|
|
|
|
DMA_TO_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map S/G table\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ret = -ENOMEM;
|
2016-09-15 20:43:38 +08:00
|
|
|
goto unmap_ctx;
|
2014-07-11 20:34:49 +08:00
|
|
|
}
|
2014-06-23 22:20:26 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
|
|
|
|
to_hash, LDST_SGF);
|
|
|
|
|
|
|
|
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
|
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
|
|
|
|
ctx->ctx_len, DMA_BIDIRECTIONAL);
|
2012-06-23 08:48:47 +08:00
|
|
|
} else if (*next_buflen) {
|
2014-08-14 18:51:56 +08:00
|
|
|
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
|
|
|
|
req->nbytes, 0);
|
2012-06-23 08:48:47 +08:00
|
|
|
*buflen = *next_buflen;
|
2019-05-23 16:50:29 +08:00
|
|
|
|
2019-12-10 00:59:55 +08:00
|
|
|
print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, buf,
|
|
|
|
*buflen, 1);
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
return ret;
|
2019-01-22 23:26:08 +08:00
|
|
|
unmap_ctx:
|
2016-08-09 01:04:58 +08:00
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
|
|
|
|
kfree(edesc);
|
|
|
|
return ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_final_ctx(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
struct device *jrdev = ctx->jrdev;
|
2019-12-10 00:59:55 +08:00
|
|
|
int buflen = state->buflen;
|
2016-08-09 01:05:13 +08:00
|
|
|
u32 *desc;
|
2019-05-03 22:17:38 +08:00
|
|
|
int sec4_sg_bytes;
|
2012-06-23 08:48:47 +08:00
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
struct ahash_edesc *edesc;
|
2016-09-15 21:24:02 +08:00
|
|
|
int ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-05-03 22:17:38 +08:00
|
|
|
sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
|
|
|
|
sizeof(struct sec4_sg_entry);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/* allocate space for base edesc and hw desc commands, link tables */
|
2020-02-13 01:55:18 +08:00
|
|
|
edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
|
|
|
|
ctx->sh_desc_fin_dma);
|
2016-08-09 01:05:08 +08:00
|
|
|
if (!edesc)
|
2012-06-23 08:48:47 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
|
|
|
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
|
|
|
|
2017-11-01 15:13:11 +08:00
|
|
|
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
|
2019-01-27 02:02:15 +08:00
|
|
|
edesc->sec4_sg, DMA_BIDIRECTIONAL);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (ret)
|
2016-09-15 20:43:38 +08:00
|
|
|
goto unmap_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2017-02-10 20:07:25 +08:00
|
|
|
ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
|
|
|
|
if (ret)
|
|
|
|
goto unmap_ctx;
|
|
|
|
|
2019-05-03 22:17:38 +08:00
|
|
|
sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2014-06-23 22:20:26 +08:00
|
|
|
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
|
|
|
sec4_sg_bytes, DMA_TO_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map S/G table\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ret = -ENOMEM;
|
2016-09-15 20:43:38 +08:00
|
|
|
goto unmap_ctx;
|
2014-07-11 20:34:49 +08:00
|
|
|
}
|
2014-06-23 22:20:26 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
|
|
|
|
LDST_SGF);
|
2019-01-27 02:02:15 +08:00
|
|
|
append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
|
|
|
1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
|
|
|
|
digestsize, DMA_BIDIRECTIONAL);
|
2016-09-15 20:43:38 +08:00
|
|
|
unmap_ctx:
|
2019-01-27 02:02:15 +08:00
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
2016-08-09 01:04:58 +08:00
|
|
|
kfree(edesc);
|
2012-06-23 08:48:47 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_finup_ctx(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
struct device *jrdev = ctx->jrdev;
|
2019-12-10 00:59:55 +08:00
|
|
|
int buflen = state->buflen;
|
2016-08-09 01:05:13 +08:00
|
|
|
u32 *desc;
|
2016-08-09 01:05:19 +08:00
|
|
|
int sec4_sg_src_index;
|
2016-08-09 01:05:03 +08:00
|
|
|
int src_nents, mapped_nents;
|
2012-06-23 08:48:47 +08:00
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
struct ahash_edesc *edesc;
|
2016-09-15 21:24:02 +08:00
|
|
|
int ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2015-09-23 19:55:27 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src, req->nbytes);
|
2015-11-05 04:13:38 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
2016-08-09 01:05:03 +08:00
|
|
|
|
|
|
|
if (src_nents) {
|
|
|
|
mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (!mapped_nents) {
|
|
|
|
dev_err(jrdev, "unable to DMA map source\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mapped_nents = 0;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
|
|
|
|
|
|
|
|
/* allocate space for base edesc and hw desc commands, link tables */
|
2020-02-13 01:55:18 +08:00
|
|
|
edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
|
|
|
|
ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
2016-08-09 01:05:03 +08:00
|
|
|
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
|
|
|
|
edesc->src_nents = src_nents;
|
|
|
|
|
2017-11-01 15:13:11 +08:00
|
|
|
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
|
2019-01-27 02:02:15 +08:00
|
|
|
edesc->sec4_sg, DMA_BIDIRECTIONAL);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (ret)
|
2016-09-15 20:43:38 +08:00
|
|
|
goto unmap_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2017-02-10 20:07:25 +08:00
|
|
|
ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
|
|
|
|
if (ret)
|
|
|
|
goto unmap_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2016-08-09 01:05:19 +08:00
|
|
|
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
|
|
|
|
sec4_sg_src_index, ctx->ctx_len + buflen,
|
|
|
|
req->nbytes);
|
|
|
|
if (ret)
|
2016-09-15 20:43:38 +08:00
|
|
|
goto unmap_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-01-27 02:02:15 +08:00
|
|
|
append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
|
|
|
1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
|
|
|
|
digestsize, DMA_BIDIRECTIONAL);
|
2016-09-15 20:43:38 +08:00
|
|
|
unmap_ctx:
|
2019-01-27 02:02:15 +08:00
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
2016-08-09 01:04:58 +08:00
|
|
|
kfree(edesc);
|
2012-06-23 08:48:47 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
struct device *jrdev = ctx->jrdev;
|
2016-08-09 01:05:13 +08:00
|
|
|
u32 *desc;
|
2012-06-23 08:48:47 +08:00
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
2016-08-09 01:05:19 +08:00
|
|
|
int src_nents, mapped_nents;
|
2012-06-23 08:48:47 +08:00
|
|
|
struct ahash_edesc *edesc;
|
2016-09-15 21:24:02 +08:00
|
|
|
int ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2017-02-10 20:07:25 +08:00
|
|
|
state->buf_dma = 0;
|
|
|
|
|
crypto: caam - fix DMA API mapping leak
caamhash contains this weird code:
src_nents = sg_count(req->src, req->nbytes);
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
...
edesc->src_nents = src_nents;
sg_count() returns zero when sg_nents_for_len() returns zero or one.
This means we don't need to use a hardware scatterlist. However,
setting src_nents to zero causes problems when we unmap:
if (edesc->src_nents)
dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE, edesc->chained);
as zero here means that we have no entries to unmap. This causes us
to leak DMA mappings, where we map one scatterlist entry and then
fail to unmap it.
This can be fixed in two ways: either by writing the number of entries
that were requested of dma_map_sg(), or by reworking the "no SG
required" case.
We adopt the re-work solution here - we replace sg_count() with
sg_nents_for_len(), so src_nents now contains the real number of
scatterlist entries, and we then change the test for using the
hardware scatterlist to src_nents > 1 rather than just non-zero.
This change passes my sshd, openssl tests hashing /bin and tcrypt
tests.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-08-09 01:04:31 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src, req->nbytes);
|
2015-11-05 04:13:38 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
2016-08-09 01:05:03 +08:00
|
|
|
|
|
|
|
if (src_nents) {
|
|
|
|
mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (!mapped_nents) {
|
|
|
|
dev_err(jrdev, "unable to map source for DMA\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mapped_nents = 0;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
/* allocate space for base edesc and hw desc commands, link tables */
|
2020-02-13 01:55:18 +08:00
|
|
|
edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
|
|
|
|
ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
2016-08-09 01:05:03 +08:00
|
|
|
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2016-08-09 01:04:52 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
edesc->src_nents = src_nents;
|
|
|
|
|
2016-08-09 01:05:19 +08:00
|
|
|
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
|
|
|
|
req->nbytes);
|
|
|
|
if (ret) {
|
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
|
|
|
return ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
2016-08-09 01:05:19 +08:00
|
|
|
|
|
|
|
desc = edesc->hw_desc;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-01-27 02:02:15 +08:00
|
|
|
ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
|
|
|
|
if (ret) {
|
2016-08-09 01:04:58 +08:00
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
2014-07-11 20:34:49 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
|
|
|
1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
|
|
|
|
DMA_FROM_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* submit ahash final if it the first job descriptor */
|
|
|
|
static int ahash_final_no_ctx(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
struct device *jrdev = ctx->jrdev;
|
2019-12-10 00:59:55 +08:00
|
|
|
u8 *buf = state->buf;
|
|
|
|
int buflen = state->buflen;
|
2016-08-09 01:05:13 +08:00
|
|
|
u32 *desc;
|
2012-06-23 08:48:47 +08:00
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
struct ahash_edesc *edesc;
|
2016-09-15 21:24:02 +08:00
|
|
|
int ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/* allocate space for base edesc and hw desc commands, link tables */
|
2020-02-13 01:55:18 +08:00
|
|
|
edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
|
|
|
|
ctx->sh_desc_digest_dma);
|
2016-08-09 01:05:08 +08:00
|
|
|
if (!edesc)
|
2012-06-23 08:48:47 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
|
2018-12-19 22:36:44 +08:00
|
|
|
if (buflen) {
|
|
|
|
state->buf_dma = dma_map_single(jrdev, buf, buflen,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(jrdev, state->buf_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map src\n");
|
|
|
|
goto unmap;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2018-12-19 22:36:44 +08:00
|
|
|
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-01-27 02:02:15 +08:00
|
|
|
ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
|
|
|
|
if (ret)
|
2016-09-15 22:00:55 +08:00
|
|
|
goto unmap;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
|
|
|
1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
return ahash_enqueue_req(jrdev, ahash_done, req,
|
|
|
|
digestsize, DMA_FROM_DEVICE);
|
2016-09-15 22:00:55 +08:00
|
|
|
unmap:
|
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
|
|
|
return -ENOMEM;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* submit ahash update if it the first job descriptor after update */
|
|
|
|
static int ahash_update_no_ctx(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
struct device *jrdev = ctx->jrdev;
|
2019-12-10 00:59:55 +08:00
|
|
|
u8 *buf = state->buf;
|
|
|
|
int *buflen = &state->buflen;
|
|
|
|
int *next_buflen = &state->next_buflen;
|
2018-12-21 23:59:10 +08:00
|
|
|
int blocksize = crypto_ahash_blocksize(ahash);
|
2012-06-23 08:48:47 +08:00
|
|
|
int in_len = *buflen + req->nbytes, to_hash;
|
2016-08-09 01:05:03 +08:00
|
|
|
int sec4_sg_bytes, src_nents, mapped_nents;
|
2012-06-23 08:48:47 +08:00
|
|
|
struct ahash_edesc *edesc;
|
2016-08-09 01:05:13 +08:00
|
|
|
u32 *desc;
|
2012-06-23 08:48:47 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2018-12-21 23:59:10 +08:00
|
|
|
*next_buflen = in_len & (blocksize - 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
to_hash = in_len - *next_buflen;
|
|
|
|
|
2018-12-21 23:59:10 +08:00
|
|
|
/*
|
2019-01-21 21:22:42 +08:00
|
|
|
* For XCBC and CMAC, if to_hash is multiple of block size,
|
2018-12-21 23:59:10 +08:00
|
|
|
* keep last block in internal buffer
|
|
|
|
*/
|
2019-01-21 21:22:42 +08:00
|
|
|
if ((is_xcbc_aes(ctx->adata.algtype) ||
|
|
|
|
is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
|
|
|
|
(*next_buflen == 0)) {
|
2018-12-21 23:59:10 +08:00
|
|
|
*next_buflen = blocksize;
|
|
|
|
to_hash -= blocksize;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
if (to_hash) {
|
2019-05-03 22:17:38 +08:00
|
|
|
int pad_nents;
|
2019-06-10 21:30:58 +08:00
|
|
|
int src_len = req->nbytes - *next_buflen;
|
2019-05-03 22:17:38 +08:00
|
|
|
|
2019-06-10 21:30:58 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src, src_len);
|
2015-11-05 04:13:38 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
2016-08-09 01:05:03 +08:00
|
|
|
|
|
|
|
if (src_nents) {
|
|
|
|
mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (!mapped_nents) {
|
|
|
|
dev_err(jrdev, "unable to DMA map source\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mapped_nents = 0;
|
|
|
|
}
|
|
|
|
|
2019-05-03 22:17:38 +08:00
|
|
|
pad_nents = pad_sg_nents(1 + mapped_nents);
|
|
|
|
sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* allocate space for base edesc and hw desc commands,
|
|
|
|
* link tables
|
|
|
|
*/
|
2020-02-13 01:55:18 +08:00
|
|
|
edesc = ahash_edesc_alloc(req, pad_nents,
|
2016-08-09 01:05:13 +08:00
|
|
|
ctx->sh_desc_update_first,
|
2020-02-13 01:55:18 +08:00
|
|
|
ctx->sh_desc_update_first_dma);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
2016-08-09 01:05:03 +08:00
|
|
|
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
edesc->src_nents = src_nents;
|
|
|
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
|
|
|
|
2017-02-10 20:07:25 +08:00
|
|
|
ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
|
|
|
|
if (ret)
|
|
|
|
goto unmap_ctx;
|
|
|
|
|
2019-06-10 21:30:58 +08:00
|
|
|
sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
|
2016-08-09 01:05:03 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
desc = edesc->hw_desc;
|
|
|
|
|
2014-06-23 22:20:26 +08:00
|
|
|
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
|
|
|
sec4_sg_bytes,
|
|
|
|
DMA_TO_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map S/G table\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ret = -ENOMEM;
|
2016-09-15 20:43:38 +08:00
|
|
|
goto unmap_ctx;
|
2014-07-11 20:34:49 +08:00
|
|
|
}
|
2014-06-23 22:20:26 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
|
|
|
|
|
2014-07-11 20:34:49 +08:00
|
|
|
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
|
|
|
|
if (ret)
|
2016-09-15 20:43:38 +08:00
|
|
|
goto unmap_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
|
|
|
|
ctx->ctx_len, DMA_TO_DEVICE);
|
|
|
|
if ((ret != -EINPROGRESS) && (ret != -EBUSY))
|
|
|
|
return ret;
|
2016-08-09 01:04:58 +08:00
|
|
|
state->update = ahash_update_ctx;
|
|
|
|
state->finup = ahash_finup_ctx;
|
|
|
|
state->final = ahash_final_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
} else if (*next_buflen) {
|
2014-08-14 18:51:56 +08:00
|
|
|
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
|
|
|
|
req->nbytes, 0);
|
2012-06-23 08:48:47 +08:00
|
|
|
*buflen = *next_buflen;
|
2019-05-23 16:50:29 +08:00
|
|
|
|
2019-12-10 00:59:55 +08:00
|
|
|
print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, buf,
|
|
|
|
*buflen, 1);
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
return ret;
|
2016-09-15 20:43:38 +08:00
|
|
|
unmap_ctx:
|
2016-08-09 01:04:58 +08:00
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
|
|
|
|
kfree(edesc);
|
|
|
|
return ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* submit ahash finup if it the first job descriptor after update */
|
|
|
|
static int ahash_finup_no_ctx(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
struct device *jrdev = ctx->jrdev;
|
2019-12-10 00:59:55 +08:00
|
|
|
int buflen = state->buflen;
|
2016-08-09 01:05:13 +08:00
|
|
|
u32 *desc;
|
2016-08-09 01:05:03 +08:00
|
|
|
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
|
2012-06-23 08:48:47 +08:00
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
struct ahash_edesc *edesc;
|
2016-09-15 21:24:02 +08:00
|
|
|
int ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2015-09-23 19:55:27 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src, req->nbytes);
|
2015-11-05 04:13:38 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
2016-08-09 01:05:03 +08:00
|
|
|
|
|
|
|
if (src_nents) {
|
|
|
|
mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (!mapped_nents) {
|
|
|
|
dev_err(jrdev, "unable to DMA map source\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mapped_nents = 0;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
sec4_sg_src_index = 2;
|
2016-08-09 01:05:03 +08:00
|
|
|
sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
|
2012-06-23 08:48:47 +08:00
|
|
|
sizeof(struct sec4_sg_entry);
|
|
|
|
|
|
|
|
/* allocate space for base edesc and hw desc commands, link tables */
|
2020-02-13 01:55:18 +08:00
|
|
|
edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
|
|
|
|
ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
2016-08-09 01:05:03 +08:00
|
|
|
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
|
|
|
|
edesc->src_nents = src_nents;
|
|
|
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
|
|
|
|
2017-02-10 20:07:25 +08:00
|
|
|
ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
|
|
|
|
if (ret)
|
|
|
|
goto unmap;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2016-08-09 01:05:19 +08:00
|
|
|
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
|
|
|
|
req->nbytes);
|
|
|
|
if (ret) {
|
2014-07-11 20:34:49 +08:00
|
|
|
dev_err(jrdev, "unable to map S/G table\n");
|
2016-09-15 22:00:55 +08:00
|
|
|
goto unmap;
|
2014-07-11 20:34:49 +08:00
|
|
|
}
|
2014-06-23 22:20:26 +08:00
|
|
|
|
2019-01-27 02:02:15 +08:00
|
|
|
ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
|
|
|
|
if (ret)
|
2016-09-15 22:00:55 +08:00
|
|
|
goto unmap;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
|
|
|
1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
return ahash_enqueue_req(jrdev, ahash_done, req,
|
|
|
|
digestsize, DMA_FROM_DEVICE);
|
2016-09-15 22:00:55 +08:00
|
|
|
unmap:
|
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* submit first update job descriptor after init */
|
|
|
|
static int ahash_update_first(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
struct device *jrdev = ctx->jrdev;
|
2019-12-10 00:59:55 +08:00
|
|
|
u8 *buf = state->buf;
|
|
|
|
int *buflen = &state->buflen;
|
|
|
|
int *next_buflen = &state->next_buflen;
|
2012-06-23 08:48:47 +08:00
|
|
|
int to_hash;
|
2018-12-21 23:59:10 +08:00
|
|
|
int blocksize = crypto_ahash_blocksize(ahash);
|
2016-08-09 01:05:13 +08:00
|
|
|
u32 *desc;
|
2016-08-09 01:05:19 +08:00
|
|
|
int src_nents, mapped_nents;
|
2012-06-23 08:48:47 +08:00
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
int ret = 0;
|
|
|
|
|
2018-12-21 23:59:10 +08:00
|
|
|
*next_buflen = req->nbytes & (blocksize - 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
to_hash = req->nbytes - *next_buflen;
|
|
|
|
|
2018-12-21 23:59:10 +08:00
|
|
|
/*
|
2019-01-21 21:22:42 +08:00
|
|
|
* For XCBC and CMAC, if to_hash is multiple of block size,
|
2018-12-21 23:59:10 +08:00
|
|
|
* keep last block in internal buffer
|
|
|
|
*/
|
2019-01-21 21:22:42 +08:00
|
|
|
if ((is_xcbc_aes(ctx->adata.algtype) ||
|
|
|
|
is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
|
|
|
|
(*next_buflen == 0)) {
|
2018-12-21 23:59:10 +08:00
|
|
|
*next_buflen = blocksize;
|
|
|
|
to_hash -= blocksize;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
if (to_hash) {
|
crypto: caam - fix DMA API mapping leak
caamhash contains this weird code:
src_nents = sg_count(req->src, req->nbytes);
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
...
edesc->src_nents = src_nents;
sg_count() returns zero when sg_nents_for_len() returns zero or one.
This means we don't need to use a hardware scatterlist. However,
setting src_nents to zero causes problems when we unmap:
if (edesc->src_nents)
dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE, edesc->chained);
as zero here means that we have no entries to unmap. This causes us
to leak DMA mappings, where we map one scatterlist entry and then
fail to unmap it.
This can be fixed in two ways: either by writing the number of entries
that were requested of dma_map_sg(), or by reworking the "no SG
required" case.
We adopt the re-work solution here - we replace sg_count() with
sg_nents_for_len(), so src_nents now contains the real number of
scatterlist entries, and we then change the test for using the
hardware scatterlist to src_nents > 1 rather than just non-zero.
This change passes my sshd, openssl tests hashing /bin and tcrypt
tests.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-08-09 01:04:31 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src,
|
|
|
|
req->nbytes - *next_buflen);
|
2015-11-05 04:13:38 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
2016-08-09 01:05:03 +08:00
|
|
|
|
|
|
|
if (src_nents) {
|
|
|
|
mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (!mapped_nents) {
|
|
|
|
dev_err(jrdev, "unable to map source for DMA\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mapped_nents = 0;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* allocate space for base edesc and hw desc commands,
|
|
|
|
* link tables
|
|
|
|
*/
|
2020-02-13 01:55:18 +08:00
|
|
|
edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
|
2016-08-09 01:05:13 +08:00
|
|
|
mapped_nents : 0,
|
|
|
|
ctx->sh_desc_update_first,
|
2020-02-13 01:55:18 +08:00
|
|
|
ctx->sh_desc_update_first_dma);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
2016-08-09 01:05:03 +08:00
|
|
|
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
edesc->src_nents = src_nents;
|
|
|
|
|
2016-08-09 01:05:19 +08:00
|
|
|
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
|
|
|
|
to_hash);
|
|
|
|
if (ret)
|
2016-09-15 20:43:38 +08:00
|
|
|
goto unmap_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
|
2014-07-11 20:34:49 +08:00
|
|
|
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
|
|
|
|
if (ret)
|
2016-09-15 20:43:38 +08:00
|
|
|
goto unmap_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2019-05-23 16:50:29 +08:00
|
|
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2020-02-13 01:55:24 +08:00
|
|
|
ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
|
|
|
|
ctx->ctx_len, DMA_TO_DEVICE);
|
|
|
|
if ((ret != -EINPROGRESS) && (ret != -EBUSY))
|
|
|
|
return ret;
|
2016-08-09 01:04:58 +08:00
|
|
|
state->update = ahash_update_ctx;
|
|
|
|
state->finup = ahash_finup_ctx;
|
|
|
|
state->final = ahash_final_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
} else if (*next_buflen) {
|
|
|
|
state->update = ahash_update_no_ctx;
|
|
|
|
state->finup = ahash_finup_no_ctx;
|
|
|
|
state->final = ahash_final_no_ctx;
|
2019-12-10 00:59:55 +08:00
|
|
|
scatterwalk_map_and_copy(buf, req->src, 0,
|
2014-08-14 18:51:56 +08:00
|
|
|
req->nbytes, 0);
|
2019-12-10 00:59:55 +08:00
|
|
|
*buflen = *next_buflen;
|
2019-05-23 16:50:29 +08:00
|
|
|
|
2019-12-10 00:59:55 +08:00
|
|
|
print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
|
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, buf,
|
|
|
|
*buflen, 1);
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
return ret;
|
2016-09-15 20:43:38 +08:00
|
|
|
unmap_ctx:
|
2016-08-09 01:04:58 +08:00
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
|
|
|
|
kfree(edesc);
|
|
|
|
return ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_finup_first(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
return ahash_digest(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_init(struct ahash_request *req)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
state->update = ahash_update_first;
|
|
|
|
state->finup = ahash_finup_first;
|
|
|
|
state->final = ahash_final_no_ctx;
|
|
|
|
|
2017-02-10 20:07:23 +08:00
|
|
|
state->ctx_dma = 0;
|
2019-02-19 22:56:55 +08:00
|
|
|
state->ctx_dma_len = 0;
|
2014-07-11 20:34:55 +08:00
|
|
|
state->buf_dma = 0;
|
2019-12-10 00:59:55 +08:00
|
|
|
state->buflen = 0;
|
|
|
|
state->next_buflen = 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_update(struct ahash_request *req)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
return state->update(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_finup(struct ahash_request *req)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
return state->finup(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_final(struct ahash_request *req)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
return state->final(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_export(struct ahash_request *req, void *out)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2015-10-19 00:51:25 +08:00
|
|
|
struct caam_export_state *export = out;
|
2019-12-10 00:59:55 +08:00
|
|
|
u8 *buf = state->buf;
|
|
|
|
int len = state->buflen;
|
2015-10-19 00:51:25 +08:00
|
|
|
|
|
|
|
memcpy(export->buf, buf, len);
|
|
|
|
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
|
|
|
|
export->buflen = len;
|
|
|
|
export->update = state->update;
|
|
|
|
export->final = state->final;
|
|
|
|
export->finup = state->finup;
|
2015-10-19 00:51:15 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_import(struct ahash_request *req, const void *in)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_state *state = ahash_request_ctx_dma(req);
|
2015-10-19 00:51:25 +08:00
|
|
|
const struct caam_export_state *export = in;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2015-10-19 00:51:25 +08:00
|
|
|
memset(state, 0, sizeof(*state));
|
2019-12-10 00:59:55 +08:00
|
|
|
memcpy(state->buf, export->buf, export->buflen);
|
2015-10-19 00:51:25 +08:00
|
|
|
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
|
2019-12-10 00:59:55 +08:00
|
|
|
state->buflen = export->buflen;
|
2015-10-19 00:51:25 +08:00
|
|
|
state->update = export->update;
|
|
|
|
state->final = export->final;
|
|
|
|
state->finup = export->finup;
|
2015-10-19 00:51:15 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct caam_hash_template {
|
|
|
|
char name[CRYPTO_MAX_ALG_NAME];
|
|
|
|
char driver_name[CRYPTO_MAX_ALG_NAME];
|
2012-06-23 08:48:48 +08:00
|
|
|
char hmac_name[CRYPTO_MAX_ALG_NAME];
|
|
|
|
char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
|
2012-06-23 08:48:47 +08:00
|
|
|
unsigned int blocksize;
|
|
|
|
struct ahash_alg template_ahash;
|
|
|
|
u32 alg_type;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* ahash descriptors */
|
|
|
|
static struct caam_hash_template driver_hash[] = {
|
|
|
|
{
|
2012-06-23 08:48:48 +08:00
|
|
|
.name = "sha1",
|
|
|
|
.driver_name = "sha1-caam",
|
|
|
|
.hmac_name = "hmac(sha1)",
|
|
|
|
.hmac_driver_name = "hmac-sha1-caam",
|
2012-06-23 08:48:47 +08:00
|
|
|
.blocksize = SHA1_BLOCK_SIZE,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = ahash_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA1_DIGEST_SIZE,
|
2015-10-19 00:51:25 +08:00
|
|
|
.statesize = sizeof(struct caam_export_state),
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
2015-10-19 00:51:31 +08:00
|
|
|
},
|
2012-06-23 08:48:47 +08:00
|
|
|
.alg_type = OP_ALG_ALGSEL_SHA1,
|
|
|
|
}, {
|
2012-06-23 08:48:48 +08:00
|
|
|
.name = "sha224",
|
|
|
|
.driver_name = "sha224-caam",
|
|
|
|
.hmac_name = "hmac(sha224)",
|
|
|
|
.hmac_driver_name = "hmac-sha224-caam",
|
2012-06-23 08:48:47 +08:00
|
|
|
.blocksize = SHA224_BLOCK_SIZE,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = ahash_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA224_DIGEST_SIZE,
|
2015-10-19 00:51:25 +08:00
|
|
|
.statesize = sizeof(struct caam_export_state),
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
2015-10-19 00:51:31 +08:00
|
|
|
},
|
2012-06-23 08:48:47 +08:00
|
|
|
.alg_type = OP_ALG_ALGSEL_SHA224,
|
|
|
|
}, {
|
2012-06-23 08:48:48 +08:00
|
|
|
.name = "sha256",
|
|
|
|
.driver_name = "sha256-caam",
|
|
|
|
.hmac_name = "hmac(sha256)",
|
|
|
|
.hmac_driver_name = "hmac-sha256-caam",
|
2012-06-23 08:48:47 +08:00
|
|
|
.blocksize = SHA256_BLOCK_SIZE,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = ahash_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA256_DIGEST_SIZE,
|
2015-10-19 00:51:25 +08:00
|
|
|
.statesize = sizeof(struct caam_export_state),
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
2015-10-19 00:51:31 +08:00
|
|
|
},
|
2012-06-23 08:48:47 +08:00
|
|
|
.alg_type = OP_ALG_ALGSEL_SHA256,
|
|
|
|
}, {
|
2012-06-23 08:48:48 +08:00
|
|
|
.name = "sha384",
|
|
|
|
.driver_name = "sha384-caam",
|
|
|
|
.hmac_name = "hmac(sha384)",
|
|
|
|
.hmac_driver_name = "hmac-sha384-caam",
|
2012-06-23 08:48:47 +08:00
|
|
|
.blocksize = SHA384_BLOCK_SIZE,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = ahash_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA384_DIGEST_SIZE,
|
2015-10-19 00:51:25 +08:00
|
|
|
.statesize = sizeof(struct caam_export_state),
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
2015-10-19 00:51:31 +08:00
|
|
|
},
|
2012-06-23 08:48:47 +08:00
|
|
|
.alg_type = OP_ALG_ALGSEL_SHA384,
|
|
|
|
}, {
|
2012-06-23 08:48:48 +08:00
|
|
|
.name = "sha512",
|
|
|
|
.driver_name = "sha512-caam",
|
|
|
|
.hmac_name = "hmac(sha512)",
|
|
|
|
.hmac_driver_name = "hmac-sha512-caam",
|
2012-06-23 08:48:47 +08:00
|
|
|
.blocksize = SHA512_BLOCK_SIZE,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = ahash_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA512_DIGEST_SIZE,
|
2015-10-19 00:51:25 +08:00
|
|
|
.statesize = sizeof(struct caam_export_state),
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
2015-10-19 00:51:31 +08:00
|
|
|
},
|
2012-06-23 08:48:47 +08:00
|
|
|
.alg_type = OP_ALG_ALGSEL_SHA512,
|
|
|
|
}, {
|
2012-06-23 08:48:48 +08:00
|
|
|
.name = "md5",
|
|
|
|
.driver_name = "md5-caam",
|
|
|
|
.hmac_name = "hmac(md5)",
|
|
|
|
.hmac_driver_name = "hmac-md5-caam",
|
2012-06-23 08:48:47 +08:00
|
|
|
.blocksize = MD5_BLOCK_WORDS * 4,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = ahash_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = MD5_DIGEST_SIZE,
|
2015-10-19 00:51:25 +08:00
|
|
|
.statesize = sizeof(struct caam_export_state),
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
2015-10-19 00:51:31 +08:00
|
|
|
},
|
2012-06-23 08:48:47 +08:00
|
|
|
.alg_type = OP_ALG_ALGSEL_MD5,
|
2018-12-21 23:59:10 +08:00
|
|
|
}, {
|
|
|
|
.hmac_name = "xcbc(aes)",
|
|
|
|
.hmac_driver_name = "xcbc-aes-caam",
|
|
|
|
.blocksize = AES_BLOCK_SIZE,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = axcbc_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = AES_BLOCK_SIZE,
|
|
|
|
.statesize = sizeof(struct caam_export_state),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
|
2019-01-21 21:22:42 +08:00
|
|
|
}, {
|
|
|
|
.hmac_name = "cmac(aes)",
|
|
|
|
.hmac_driver_name = "cmac-aes-caam",
|
|
|
|
.blocksize = AES_BLOCK_SIZE,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = acmac_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = AES_BLOCK_SIZE,
|
|
|
|
.statesize = sizeof(struct caam_export_state),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
struct caam_hash_alg {
|
|
|
|
struct list_head entry;
|
|
|
|
int alg_type;
|
|
|
|
struct ahash_alg ahash_alg;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int caam_hash_cra_init(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
|
|
|
|
struct crypto_alg *base = tfm->__crt_alg;
|
|
|
|
struct hash_alg_common *halg =
|
|
|
|
container_of(base, struct hash_alg_common, base);
|
|
|
|
struct ahash_alg *alg =
|
|
|
|
container_of(halg, struct ahash_alg, halg);
|
|
|
|
struct caam_hash_alg *caam_hash =
|
|
|
|
container_of(alg, struct caam_hash_alg, ahash_alg);
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
|
2012-06-23 08:48:47 +08:00
|
|
|
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
|
|
|
|
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
|
|
|
|
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
|
|
|
|
HASH_MSG_LEN + 32,
|
|
|
|
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
|
|
|
|
HASH_MSG_LEN + 64,
|
|
|
|
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
|
2020-02-13 01:55:24 +08:00
|
|
|
const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
|
|
|
|
sh_desc_update);
|
2017-02-10 20:07:22 +08:00
|
|
|
dma_addr_t dma_addr;
|
2017-12-19 18:16:07 +08:00
|
|
|
struct caam_drv_private *priv;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/*
|
2013-10-25 14:31:03 +08:00
|
|
|
* Get a Job ring from Job Ring driver to ensure in-order
|
2012-06-23 08:48:47 +08:00
|
|
|
* crypto request processing per tfm
|
|
|
|
*/
|
2013-10-25 14:31:03 +08:00
|
|
|
ctx->jrdev = caam_jr_alloc();
|
|
|
|
if (IS_ERR(ctx->jrdev)) {
|
|
|
|
pr_err("Job Ring Device allocation for transform failed\n");
|
|
|
|
return PTR_ERR(ctx->jrdev);
|
|
|
|
}
|
2017-02-10 20:07:22 +08:00
|
|
|
|
2017-12-19 18:16:07 +08:00
|
|
|
priv = dev_get_drvdata(ctx->jrdev->parent);
|
2018-12-21 23:59:10 +08:00
|
|
|
|
|
|
|
if (is_xcbc_aes(caam_hash->alg_type)) {
|
|
|
|
ctx->dir = DMA_TO_DEVICE;
|
2019-07-31 21:08:11 +08:00
|
|
|
ctx->key_dir = DMA_BIDIRECTIONAL;
|
2018-12-21 23:59:10 +08:00
|
|
|
ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
|
|
|
|
ctx->ctx_len = 48;
|
2019-01-21 21:22:42 +08:00
|
|
|
} else if (is_cmac_aes(caam_hash->alg_type)) {
|
|
|
|
ctx->dir = DMA_TO_DEVICE;
|
2019-07-31 21:08:11 +08:00
|
|
|
ctx->key_dir = DMA_NONE;
|
2019-01-21 21:22:42 +08:00
|
|
|
ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
|
|
|
|
ctx->ctx_len = 32;
|
2018-12-21 23:59:10 +08:00
|
|
|
} else {
|
2019-07-31 21:08:11 +08:00
|
|
|
if (priv->era >= 6) {
|
|
|
|
ctx->dir = DMA_BIDIRECTIONAL;
|
|
|
|
ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
|
|
|
|
} else {
|
|
|
|
ctx->dir = DMA_TO_DEVICE;
|
|
|
|
ctx->key_dir = DMA_NONE;
|
|
|
|
}
|
2018-12-21 23:59:10 +08:00
|
|
|
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
|
|
|
|
ctx->ctx_len = runninglen[(ctx->adata.algtype &
|
|
|
|
OP_ALG_ALGSEL_SUBMASK) >>
|
|
|
|
OP_ALG_ALGSEL_SHIFT];
|
|
|
|
}
|
2017-12-19 18:16:07 +08:00
|
|
|
|
2019-07-31 21:08:11 +08:00
|
|
|
if (ctx->key_dir != DMA_NONE) {
|
|
|
|
ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
|
|
|
|
ARRAY_SIZE(ctx->key),
|
|
|
|
ctx->key_dir,
|
|
|
|
DMA_ATTR_SKIP_CPU_SYNC);
|
|
|
|
if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
|
|
|
|
dev_err(ctx->jrdev, "unable to map key\n");
|
|
|
|
caam_jr_free(ctx->jrdev);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-10 20:07:22 +08:00
|
|
|
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
|
2020-02-13 01:55:24 +08:00
|
|
|
offsetof(struct caam_hash_ctx, key) -
|
|
|
|
sh_desc_update_offset,
|
2017-12-19 18:16:07 +08:00
|
|
|
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
|
2017-02-10 20:07:22 +08:00
|
|
|
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
|
|
|
|
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
|
2018-12-21 23:59:10 +08:00
|
|
|
|
2019-07-31 21:08:11 +08:00
|
|
|
if (ctx->key_dir != DMA_NONE)
|
2019-07-31 21:08:10 +08:00
|
|
|
dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
|
2018-12-21 23:59:10 +08:00
|
|
|
ARRAY_SIZE(ctx->key),
|
2019-07-31 21:08:11 +08:00
|
|
|
ctx->key_dir,
|
2018-12-21 23:59:10 +08:00
|
|
|
DMA_ATTR_SKIP_CPU_SYNC);
|
|
|
|
|
2017-02-10 20:07:22 +08:00
|
|
|
caam_jr_free(ctx->jrdev);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->sh_desc_update_dma = dma_addr;
|
|
|
|
ctx->sh_desc_update_first_dma = dma_addr +
|
|
|
|
offsetof(struct caam_hash_ctx,
|
2020-02-13 01:55:24 +08:00
|
|
|
sh_desc_update_first) -
|
|
|
|
sh_desc_update_offset;
|
2017-02-10 20:07:22 +08:00
|
|
|
ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
|
2020-02-13 01:55:24 +08:00
|
|
|
sh_desc_fin) -
|
|
|
|
sh_desc_update_offset;
|
2017-02-10 20:07:22 +08:00
|
|
|
ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
|
2020-02-13 01:55:24 +08:00
|
|
|
sh_desc_digest) -
|
|
|
|
sh_desc_update_offset;
|
|
|
|
|
|
|
|
ctx->enginectx.op.do_one_request = ahash_do_one_req;
|
2017-02-10 20:07:22 +08:00
|
|
|
|
2022-11-25 12:36:45 +08:00
|
|
|
crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
|
2018-12-21 23:59:09 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For keyed hash algorithms shared descriptors
|
|
|
|
* will be created later in setkey() callback
|
|
|
|
*/
|
|
|
|
return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
|
|
|
|
{
|
2022-11-25 12:36:45 +08:00
|
|
|
struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2017-02-10 20:07:22 +08:00
|
|
|
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
|
2020-02-13 01:55:24 +08:00
|
|
|
offsetof(struct caam_hash_ctx, key) -
|
|
|
|
offsetof(struct caam_hash_ctx, sh_desc_update),
|
2017-12-19 18:16:07 +08:00
|
|
|
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
|
2019-07-31 21:08:11 +08:00
|
|
|
if (ctx->key_dir != DMA_NONE)
|
2019-07-31 21:08:10 +08:00
|
|
|
dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
|
2019-07-31 21:08:11 +08:00
|
|
|
ARRAY_SIZE(ctx->key), ctx->key_dir,
|
2018-12-21 23:59:10 +08:00
|
|
|
DMA_ATTR_SKIP_CPU_SYNC);
|
2013-10-25 14:31:03 +08:00
|
|
|
caam_jr_free(ctx->jrdev);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
2019-05-03 22:17:39 +08:00
|
|
|
void caam_algapi_hash_exit(void)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
|
|
|
struct caam_hash_alg *t_alg, *n;
|
|
|
|
|
2013-10-25 14:31:03 +08:00
|
|
|
if (!hash_list.next)
|
2012-06-23 08:48:47 +08:00
|
|
|
return;
|
|
|
|
|
2013-10-25 14:31:03 +08:00
|
|
|
list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
|
2012-06-23 08:48:47 +08:00
|
|
|
crypto_unregister_ahash(&t_alg->ahash_alg);
|
|
|
|
list_del(&t_alg->entry);
|
|
|
|
kfree(t_alg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct caam_hash_alg *
|
2013-10-25 14:31:03 +08:00
|
|
|
caam_hash_alloc(struct caam_hash_template *template,
|
2012-06-23 08:48:48 +08:00
|
|
|
bool keyed)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
|
|
|
struct caam_hash_alg *t_alg;
|
|
|
|
struct ahash_alg *halg;
|
|
|
|
struct crypto_alg *alg;
|
|
|
|
|
2015-08-22 00:52:00 +08:00
|
|
|
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!t_alg) {
|
2013-10-25 14:31:03 +08:00
|
|
|
pr_err("failed to allocate t_alg\n");
|
2012-06-23 08:48:47 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
t_alg->ahash_alg = template->template_ahash;
|
|
|
|
halg = &t_alg->ahash_alg;
|
|
|
|
alg = &halg->halg.base;
|
|
|
|
|
2012-06-23 08:48:48 +08:00
|
|
|
if (keyed) {
|
|
|
|
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
|
|
|
|
template->hmac_name);
|
|
|
|
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
|
|
|
template->hmac_driver_name);
|
|
|
|
} else {
|
|
|
|
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
|
|
|
|
template->name);
|
|
|
|
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
|
|
|
template->driver_name);
|
2016-08-09 15:27:17 +08:00
|
|
|
t_alg->ahash_alg.setkey = NULL;
|
2012-06-23 08:48:48 +08:00
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
alg->cra_module = THIS_MODULE;
|
|
|
|
alg->cra_init = caam_hash_cra_init;
|
|
|
|
alg->cra_exit = caam_hash_cra_exit;
|
2022-11-25 12:36:45 +08:00
|
|
|
alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
|
2012-06-23 08:48:47 +08:00
|
|
|
alg->cra_priority = CAAM_CRA_PRIORITY;
|
|
|
|
alg->cra_blocksize = template->blocksize;
|
|
|
|
alg->cra_alignmask = 0;
|
2020-07-10 14:20:41 +08:00
|
|
|
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
t_alg->alg_type = template->alg_type;
|
|
|
|
|
|
|
|
return t_alg;
|
|
|
|
}
|
|
|
|
|
2019-05-03 22:17:39 +08:00
|
|
|
int caam_algapi_hash_init(struct device *ctrldev)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
|
|
|
int i = 0, err = 0;
|
2019-05-03 22:17:39 +08:00
|
|
|
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
2015-08-06 02:28:48 +08:00
|
|
|
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
2018-11-08 21:36:27 +08:00
|
|
|
u32 md_inst, md_vid;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2015-08-06 02:28:48 +08:00
|
|
|
/*
|
|
|
|
* Register crypto algorithms the device supports. First, identify
|
|
|
|
* presence and attributes of MD block.
|
|
|
|
*/
|
2018-11-08 21:36:27 +08:00
|
|
|
if (priv->era < 10) {
|
|
|
|
md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
|
|
|
|
CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
|
|
|
|
md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
|
|
|
|
CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
|
|
|
|
} else {
|
|
|
|
u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
|
|
|
|
|
|
|
|
md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
|
|
|
|
md_inst = mdha & CHA_VER_NUM_MASK;
|
|
|
|
}
|
2015-08-06 02:28:48 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Skip registration of any hashing algorithms if MD block
|
|
|
|
* is not present.
|
|
|
|
*/
|
2019-05-03 22:17:39 +08:00
|
|
|
if (!md_inst)
|
2019-07-31 21:08:15 +08:00
|
|
|
return 0;
|
2015-08-06 02:28:48 +08:00
|
|
|
|
|
|
|
/* Limit digest size based on LP256 */
|
2018-11-08 21:36:27 +08:00
|
|
|
if (md_vid == CHA_VER_VID_MD_LP256)
|
2015-08-06 02:28:48 +08:00
|
|
|
md_limit = SHA256_DIGEST_SIZE;
|
|
|
|
|
2013-10-25 14:31:03 +08:00
|
|
|
INIT_LIST_HEAD(&hash_list);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/* register crypto algorithms the device supports */
|
|
|
|
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
|
|
|
|
struct caam_hash_alg *t_alg;
|
2015-08-06 02:28:48 +08:00
|
|
|
struct caam_hash_template *alg = driver_hash + i;
|
|
|
|
|
|
|
|
/* If MD size is not supported by device, skip registration */
|
2018-12-21 23:59:10 +08:00
|
|
|
if (is_mdha(alg->alg_type) &&
|
|
|
|
alg->template_ahash.halg.digestsize > md_limit)
|
2015-08-06 02:28:48 +08:00
|
|
|
continue;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2012-06-23 08:48:48 +08:00
|
|
|
/* register hmac version */
|
2015-08-06 02:28:48 +08:00
|
|
|
t_alg = caam_hash_alloc(alg, true);
|
2012-06-23 08:48:48 +08:00
|
|
|
if (IS_ERR(t_alg)) {
|
|
|
|
err = PTR_ERR(t_alg);
|
2018-12-21 23:59:08 +08:00
|
|
|
pr_warn("%s alg allocation failed\n",
|
|
|
|
alg->hmac_driver_name);
|
2012-06-23 08:48:48 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = crypto_register_ahash(&t_alg->ahash_alg);
|
|
|
|
if (err) {
|
2015-10-19 00:51:10 +08:00
|
|
|
pr_warn("%s alg registration failed: %d\n",
|
|
|
|
t_alg->ahash_alg.halg.base.cra_driver_name,
|
|
|
|
err);
|
2012-06-23 08:48:48 +08:00
|
|
|
kfree(t_alg);
|
|
|
|
} else
|
2013-10-25 14:31:03 +08:00
|
|
|
list_add_tail(&t_alg->entry, &hash_list);
|
2012-06-23 08:48:48 +08:00
|
|
|
|
2018-12-21 23:59:10 +08:00
|
|
|
if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
|
|
|
|
continue;
|
|
|
|
|
2012-06-23 08:48:48 +08:00
|
|
|
/* register unkeyed version */
|
2015-08-06 02:28:48 +08:00
|
|
|
t_alg = caam_hash_alloc(alg, false);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (IS_ERR(t_alg)) {
|
|
|
|
err = PTR_ERR(t_alg);
|
2015-08-06 02:28:48 +08:00
|
|
|
pr_warn("%s alg allocation failed\n", alg->driver_name);
|
2012-06-23 08:48:47 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = crypto_register_ahash(&t_alg->ahash_alg);
|
|
|
|
if (err) {
|
2015-10-19 00:51:10 +08:00
|
|
|
pr_warn("%s alg registration failed: %d\n",
|
|
|
|
t_alg->ahash_alg.halg.base.cra_driver_name,
|
|
|
|
err);
|
2012-06-23 08:48:47 +08:00
|
|
|
kfree(t_alg);
|
|
|
|
} else
|
2013-10-25 14:31:03 +08:00
|
|
|
list_add_tail(&t_alg->entry, &hash_list);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|