2012-06-23 08:48:47 +08:00
|
|
|
/*
|
|
|
|
* caam - Freescale FSL CAAM support for ahash functions of crypto API
|
|
|
|
*
|
|
|
|
* Copyright 2011 Freescale Semiconductor, Inc.
|
|
|
|
*
|
|
|
|
* Based on caamalg.c crypto API driver.
|
|
|
|
*
|
|
|
|
* relationship of digest job descriptor or first job descriptor after init to
|
|
|
|
* shared descriptors:
|
|
|
|
*
|
|
|
|
* --------------- ---------------
|
|
|
|
* | JobDesc #1 |-------------------->| ShareDesc |
|
|
|
|
* | *(packet 1) | | (hashKey) |
|
|
|
|
* --------------- | (operation) |
|
|
|
|
* ---------------
|
|
|
|
*
|
|
|
|
* relationship of subsequent job descriptors to shared descriptors:
|
|
|
|
*
|
|
|
|
* --------------- ---------------
|
|
|
|
* | JobDesc #2 |-------------------->| ShareDesc |
|
|
|
|
* | *(packet 2) | |------------->| (hashKey) |
|
|
|
|
* --------------- | |-------->| (operation) |
|
|
|
|
* . | | | (load ctx2) |
|
|
|
|
* . | | ---------------
|
|
|
|
* --------------- | |
|
|
|
|
* | JobDesc #3 |------| |
|
|
|
|
* | *(packet 3) | |
|
|
|
|
* --------------- |
|
|
|
|
* . |
|
|
|
|
* . |
|
|
|
|
* --------------- |
|
|
|
|
* | JobDesc #4 |------------
|
|
|
|
* | *(packet 4) |
|
|
|
|
* ---------------
|
|
|
|
*
|
|
|
|
* The SharedDesc never changes for a connection unless rekeyed, but
|
|
|
|
* each packet will likely be in a different place. So all we need
|
|
|
|
* to know to process the packet is where the input is, where the
|
|
|
|
* output goes, and what context we want to process with. Context is
|
|
|
|
* in the SharedDesc, packet references in the JobDesc.
|
|
|
|
*
|
|
|
|
* So, a job desc looks like:
|
|
|
|
*
|
|
|
|
* ---------------------
|
|
|
|
* | Header |
|
|
|
|
* | ShareDesc Pointer |
|
|
|
|
* | SEQ_OUT_PTR |
|
|
|
|
* | (output buffer) |
|
|
|
|
* | (output length) |
|
|
|
|
* | SEQ_IN_PTR |
|
|
|
|
* | (input buffer) |
|
|
|
|
* | (input length) |
|
|
|
|
* ---------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "compat.h"
|
|
|
|
|
|
|
|
#include "regs.h"
|
|
|
|
#include "intern.h"
|
|
|
|
#include "desc_constr.h"
|
|
|
|
#include "jr.h"
|
|
|
|
#include "error.h"
|
|
|
|
#include "sg_sw_sec4.h"
|
|
|
|
#include "key_gen.h"
|
|
|
|
|
|
|
|
#define CAAM_CRA_PRIORITY 3000
|
|
|
|
|
|
|
|
/* max hash key is max split key size */
|
|
|
|
#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
|
|
|
|
|
|
|
|
#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
|
|
|
|
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
|
|
|
|
|
|
|
|
/* length of descriptors text */
|
|
|
|
#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
|
|
|
|
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
|
|
|
|
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
|
|
|
|
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
|
|
|
|
#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
|
|
|
|
#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
|
|
|
|
|
|
|
|
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
|
|
|
|
CAAM_MAX_HASH_KEY_SIZE)
|
|
|
|
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
|
|
|
|
|
|
|
|
/* caam context sizes for hashes: running digest + 8 */
|
|
|
|
#define HASH_MSG_LEN 8
|
|
|
|
#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
/* for print_hex_dumps with line references */
|
|
|
|
#define debug(format, arg...) printk(format, arg)
|
|
|
|
#else
|
|
|
|
#define debug(format, arg...)
|
|
|
|
#endif
|
|
|
|
|
2013-10-25 14:31:03 +08:00
|
|
|
|
|
|
|
static struct list_head hash_list;
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
/* ahash per-session context */
|
|
|
|
struct caam_hash_ctx {
|
2016-08-09 01:04:36 +08:00
|
|
|
u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
|
|
|
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
|
|
|
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
|
|
|
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
|
|
|
u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
|
|
|
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
|
2012-06-23 08:48:47 +08:00
|
|
|
dma_addr_t sh_desc_update_first_dma;
|
|
|
|
dma_addr_t sh_desc_fin_dma;
|
|
|
|
dma_addr_t sh_desc_digest_dma;
|
|
|
|
dma_addr_t sh_desc_finup_dma;
|
2016-08-09 01:04:36 +08:00
|
|
|
struct device *jrdev;
|
2012-06-23 08:48:47 +08:00
|
|
|
u32 alg_type;
|
|
|
|
u32 alg_op;
|
|
|
|
u8 key[CAAM_MAX_HASH_KEY_SIZE];
|
|
|
|
dma_addr_t key_dma;
|
|
|
|
int ctx_len;
|
|
|
|
unsigned int split_key_len;
|
|
|
|
unsigned int split_key_pad_len;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* ahash state */
|
|
|
|
struct caam_hash_state {
|
|
|
|
dma_addr_t buf_dma;
|
|
|
|
dma_addr_t ctx_dma;
|
|
|
|
u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
|
|
|
|
int buflen_0;
|
|
|
|
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
|
|
|
|
int buflen_1;
|
2015-08-06 02:28:35 +08:00
|
|
|
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
|
2012-06-23 08:48:47 +08:00
|
|
|
int (*update)(struct ahash_request *req);
|
|
|
|
int (*final)(struct ahash_request *req);
|
|
|
|
int (*finup)(struct ahash_request *req);
|
|
|
|
int current_buf;
|
|
|
|
};
|
|
|
|
|
2015-10-19 00:51:25 +08:00
|
|
|
struct caam_export_state {
|
|
|
|
u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
|
|
|
|
u8 caam_ctx[MAX_CTX_LEN];
|
|
|
|
int buflen;
|
|
|
|
int (*update)(struct ahash_request *req);
|
|
|
|
int (*final)(struct ahash_request *req);
|
|
|
|
int (*finup)(struct ahash_request *req);
|
|
|
|
};
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
/* Common job descriptor seq in/out ptr routines */
|
|
|
|
|
|
|
|
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
|
2014-07-11 20:34:49 +08:00
|
|
|
static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
|
|
|
|
struct caam_hash_state *state,
|
|
|
|
int ctx_len)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
|
|
|
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
|
|
|
|
ctx_len, DMA_FROM_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, state->ctx_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map ctx\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
|
2014-07-11 20:34:49 +08:00
|
|
|
|
|
|
|
return 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Map req->result, and append seq_out_ptr command that points to it */
|
|
|
|
static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
|
|
|
|
u8 *result, int digestsize)
|
|
|
|
{
|
|
|
|
dma_addr_t dst_dma;
|
|
|
|
|
|
|
|
dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
|
|
|
|
append_seq_out_ptr(desc, dst_dma, digestsize, 0);
|
|
|
|
|
|
|
|
return dst_dma;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map current buffer in state and put it in link table */
|
|
|
|
static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
|
|
|
|
struct sec4_sg_entry *sec4_sg,
|
|
|
|
u8 *buf, int buflen)
|
|
|
|
{
|
|
|
|
dma_addr_t buf_dma;
|
|
|
|
|
|
|
|
buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
|
|
|
|
dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
|
|
|
|
|
|
|
|
return buf_dma;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map req->src and put it in link table */
|
|
|
|
static inline void src_map_to_sec4_sg(struct device *jrdev,
|
|
|
|
struct scatterlist *src, int src_nents,
|
2015-09-23 19:55:27 +08:00
|
|
|
struct sec4_sg_entry *sec4_sg)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
2015-09-23 19:55:27 +08:00
|
|
|
dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only put buffer in link table if it contains data, which is possible,
|
|
|
|
* since a buffer has previously been used, and needs to be unmapped,
|
|
|
|
*/
|
|
|
|
static inline dma_addr_t
|
|
|
|
try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
|
|
|
|
u8 *buf, dma_addr_t buf_dma, int buflen,
|
|
|
|
int last_buflen)
|
|
|
|
{
|
|
|
|
if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
|
|
|
|
dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
|
|
|
|
if (buflen)
|
|
|
|
buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
|
|
|
|
else
|
|
|
|
buf_dma = 0;
|
|
|
|
|
|
|
|
return buf_dma;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map state->caam_ctx, and add it to link table */
|
2014-07-11 20:34:49 +08:00
|
|
|
static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
|
|
|
|
struct caam_hash_state *state, int ctx_len,
|
|
|
|
struct sec4_sg_entry *sec4_sg, u32 flag)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
|
|
|
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, state->ctx_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map ctx\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
|
2014-07-11 20:34:49 +08:00
|
|
|
|
|
|
|
return 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Common shared descriptor commands */
|
|
|
|
static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
|
|
|
|
{
|
|
|
|
append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
|
|
|
|
ctx->split_key_len, CLASS_2 |
|
|
|
|
KEY_DEST_MDHA_SPLIT | KEY_ENC);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Append key if it has been set */
|
|
|
|
static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
|
|
|
|
{
|
|
|
|
u32 *key_jump_cmd;
|
|
|
|
|
2012-07-14 06:49:28 +08:00
|
|
|
init_sh_desc(desc, HDR_SHARE_SERIAL);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
if (ctx->split_key_len) {
|
|
|
|
/* Skip if already shared */
|
|
|
|
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
|
|
|
JUMP_COND_SHRD);
|
|
|
|
|
|
|
|
append_key_ahash(desc, ctx);
|
|
|
|
|
|
|
|
set_jump_tgt_here(desc, key_jump_cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Propagate errors from shared to job descriptor */
|
|
|
|
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For ahash read data from seqin following state->caam_ctx,
|
|
|
|
* and write resulting class2 context to seqout, which may be state->caam_ctx
|
|
|
|
* or req->result
|
|
|
|
*/
|
|
|
|
static inline void ahash_append_load_str(u32 *desc, int digestsize)
|
|
|
|
{
|
|
|
|
/* Calculate remaining bytes to read */
|
|
|
|
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
|
|
|
|
|
|
|
|
/* Read remaining bytes */
|
|
|
|
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
|
|
|
|
FIFOLD_TYPE_MSG | KEY_VLF);
|
|
|
|
|
|
|
|
/* Store class2 context bytes */
|
|
|
|
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
|
|
|
|
LDST_SRCDST_BYTE_CONTEXT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For ahash update, final and finup, import context, read and write to seqout
|
|
|
|
*/
|
|
|
|
static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
|
|
|
|
int digestsize,
|
|
|
|
struct caam_hash_ctx *ctx)
|
|
|
|
{
|
|
|
|
init_sh_desc_key_ahash(desc, ctx);
|
|
|
|
|
|
|
|
/* Import context from software */
|
|
|
|
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
|
|
|
|
LDST_CLASS_2_CCB | ctx->ctx_len);
|
|
|
|
|
|
|
|
/* Class 2 operation */
|
|
|
|
append_operation(desc, op | state | OP_ALG_ENCRYPT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load from buf and/or src and write to req->result or state->context
|
|
|
|
*/
|
|
|
|
ahash_append_load_str(desc, digestsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For ahash firsts and digest, read and write to seqout */
|
|
|
|
static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
|
|
|
|
int digestsize, struct caam_hash_ctx *ctx)
|
|
|
|
{
|
|
|
|
init_sh_desc_key_ahash(desc, ctx);
|
|
|
|
|
|
|
|
/* Class 2 operation */
|
|
|
|
append_operation(desc, op | state | OP_ALG_ENCRYPT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load from buf and/or src and write to req->result or state->context
|
|
|
|
*/
|
|
|
|
ahash_append_load_str(desc, digestsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
|
|
|
{
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
u32 have_key = 0;
|
|
|
|
u32 *desc;
|
|
|
|
|
|
|
|
if (ctx->split_key_len)
|
|
|
|
have_key = OP_ALG_AAI_HMAC_PRECOMP;
|
|
|
|
|
|
|
|
/* ahash_update shared descriptor */
|
|
|
|
desc = ctx->sh_desc_update;
|
|
|
|
|
2012-07-14 06:49:28 +08:00
|
|
|
init_sh_desc(desc, HDR_SHARE_SERIAL);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/* Import context from software */
|
|
|
|
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
|
|
|
|
LDST_CLASS_2_CCB | ctx->ctx_len);
|
|
|
|
|
|
|
|
/* Class 2 operation */
|
|
|
|
append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
|
|
|
|
OP_ALG_ENCRYPT);
|
|
|
|
|
|
|
|
/* Load data and write to result or context */
|
|
|
|
ahash_append_load_str(desc, ctx->ctx_len);
|
|
|
|
|
|
|
|
ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map shared descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR,
|
|
|
|
"ahash update shdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* ahash_update_first shared descriptor */
|
|
|
|
desc = ctx->sh_desc_update_first;
|
|
|
|
|
|
|
|
ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
|
|
|
|
ctx->ctx_len, ctx);
|
|
|
|
|
|
|
|
ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
|
|
|
|
desc_bytes(desc),
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map shared descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR,
|
|
|
|
"ahash update first shdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* ahash_final shared descriptor */
|
|
|
|
desc = ctx->sh_desc_fin;
|
|
|
|
|
|
|
|
ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
|
|
|
|
OP_ALG_AS_FINALIZE, digestsize, ctx);
|
|
|
|
|
|
|
|
ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map shared descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* ahash_finup shared descriptor */
|
|
|
|
desc = ctx->sh_desc_finup;
|
|
|
|
|
|
|
|
ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
|
|
|
|
OP_ALG_AS_FINALIZE, digestsize, ctx);
|
|
|
|
|
|
|
|
ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map shared descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* ahash_digest shared descriptor */
|
|
|
|
desc = ctx->sh_desc_digest;
|
|
|
|
|
|
|
|
ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
|
|
|
|
digestsize, ctx);
|
|
|
|
|
|
|
|
ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
|
|
|
|
desc_bytes(desc),
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map shared descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR,
|
|
|
|
"ahash digest shdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-27 07:10:14 +08:00
|
|
|
static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
2012-06-23 08:48:47 +08:00
|
|
|
u32 keylen)
|
|
|
|
{
|
|
|
|
return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
|
|
|
|
ctx->split_key_pad_len, key_in, keylen,
|
|
|
|
ctx->alg_op);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Digest hash size if it is too large */
|
2013-03-27 07:10:14 +08:00
|
|
|
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
2012-06-23 08:48:47 +08:00
|
|
|
u32 *keylen, u8 *key_out, u32 digestsize)
|
|
|
|
{
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
u32 *desc;
|
|
|
|
struct split_key_result result;
|
|
|
|
dma_addr_t src_dma, dst_dma;
|
|
|
|
int ret = 0;
|
|
|
|
|
2013-07-10 14:26:13 +08:00
|
|
|
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
|
2012-09-07 04:17:03 +08:00
|
|
|
if (!desc) {
|
|
|
|
dev_err(jrdev, "unable to allocate key input memory\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
init_job_desc(desc, 0);
|
|
|
|
|
|
|
|
src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(jrdev, src_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map key input memory\n");
|
|
|
|
kfree(desc);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
if (dma_mapping_error(jrdev, dst_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map key output memory\n");
|
|
|
|
dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
|
|
|
|
kfree(desc);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Job descriptor to perform unkeyed hash on key_in */
|
|
|
|
append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
|
|
|
|
OP_ALG_AS_INITFINAL);
|
|
|
|
append_seq_in_ptr(desc, src_dma, *keylen, 0);
|
|
|
|
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
|
|
|
|
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
|
|
|
|
append_seq_out_ptr(desc, dst_dma, digestsize, 0);
|
|
|
|
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
|
|
|
|
LDST_SRCDST_BYTE_CONTEXT);
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
result.err = 0;
|
|
|
|
init_completion(&result.completion);
|
|
|
|
|
|
|
|
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
|
|
|
if (!ret) {
|
|
|
|
/* in progress */
|
|
|
|
wait_for_completion_interruptible(&result.completion);
|
|
|
|
ret = result.err;
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR,
|
|
|
|
"digested key@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, key_in,
|
|
|
|
digestsize, 1);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
|
|
|
|
dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
|
|
|
|
|
2014-07-11 20:34:50 +08:00
|
|
|
*keylen = digestsize;
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
kfree(desc);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_setkey(struct crypto_ahash *ahash,
|
|
|
|
const u8 *key, unsigned int keylen)
|
|
|
|
{
|
|
|
|
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
|
|
|
|
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
|
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
int ret = 0;
|
|
|
|
u8 *hashed_key = NULL;
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
printk(KERN_ERR "keylen %d\n", keylen);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (keylen > blocksize) {
|
|
|
|
hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
|
|
|
|
GFP_DMA);
|
|
|
|
if (!hashed_key)
|
|
|
|
return -ENOMEM;
|
|
|
|
ret = hash_digest_key(ctx, key, &keylen, hashed_key,
|
|
|
|
digestsize);
|
|
|
|
if (ret)
|
|
|
|
goto badkey;
|
|
|
|
key = hashed_key;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pick class 2 key length from algorithm submask */
|
|
|
|
ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
|
|
|
|
OP_ALG_ALGSEL_SHIFT] * 2;
|
|
|
|
ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
|
|
|
|
ctx->split_key_len, ctx->split_key_pad_len);
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ret = gen_split_hash_key(ctx, key, keylen);
|
|
|
|
if (ret)
|
|
|
|
goto badkey;
|
|
|
|
|
|
|
|
ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(jrdev, ctx->key_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map key i/o memory\n");
|
2014-04-18 18:01:41 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto map_err;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
|
|
|
|
ctx->split_key_pad_len, 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ret = ahash_set_sh_desc(ahash);
|
|
|
|
if (ret) {
|
|
|
|
dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
}
|
|
|
|
|
2014-04-18 18:01:41 +08:00
|
|
|
map_err:
|
2012-06-23 08:48:47 +08:00
|
|
|
kfree(hashed_key);
|
|
|
|
return ret;
|
|
|
|
badkey:
|
|
|
|
kfree(hashed_key);
|
|
|
|
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ahash_edesc - s/w-extended ahash descriptor
|
|
|
|
* @dst_dma: physical mapped address of req->result
|
|
|
|
* @sec4_sg_dma: physical mapped address of h/w link table
|
|
|
|
* @src_nents: number of segments in input scatterlist
|
|
|
|
* @sec4_sg_bytes: length of dma mapped sec4_sg space
|
|
|
|
* @hw_desc: the h/w job descriptor followed by any referenced link tables
|
2016-08-09 01:04:52 +08:00
|
|
|
* @sec4_sg: h/w link table
|
2012-06-23 08:48:47 +08:00
|
|
|
*/
|
|
|
|
struct ahash_edesc {
|
|
|
|
dma_addr_t dst_dma;
|
|
|
|
dma_addr_t sec4_sg_dma;
|
|
|
|
int src_nents;
|
|
|
|
int sec4_sg_bytes;
|
2016-08-09 01:04:47 +08:00
|
|
|
u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
|
2016-08-09 01:04:52 +08:00
|
|
|
struct sec4_sg_entry sec4_sg[0];
|
2012-06-23 08:48:47 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline void ahash_unmap(struct device *dev,
|
|
|
|
struct ahash_edesc *edesc,
|
|
|
|
struct ahash_request *req, int dst_len)
|
|
|
|
{
|
|
|
|
if (edesc->src_nents)
|
2015-09-23 19:55:27 +08:00
|
|
|
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (edesc->dst_dma)
|
|
|
|
dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
if (edesc->sec4_sg_bytes)
|
|
|
|
dma_unmap_single(dev, edesc->sec4_sg_dma,
|
|
|
|
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ahash_unmap_ctx(struct device *dev,
|
|
|
|
struct ahash_edesc *edesc,
|
|
|
|
struct ahash_request *req, int dst_len, u32 flag)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
if (state->ctx_dma)
|
|
|
|
dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
|
|
|
|
ahash_unmap(dev, edesc, req, dst_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
|
|
|
void *context)
|
|
|
|
{
|
|
|
|
struct ahash_request *req = context;
|
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
#ifdef DEBUG
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
edesc = (struct ahash_edesc *)((char *)desc -
|
|
|
|
offsetof(struct ahash_edesc, hw_desc));
|
2014-04-25 02:05:12 +08:00
|
|
|
if (err)
|
|
|
|
caam_jr_strstatus(jrdev, err);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
|
|
|
ctx->ctx_len, 1);
|
|
|
|
if (req->result)
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
|
|
|
digestsize, 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
req->base.complete(&req->base, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
|
|
|
|
void *context)
|
|
|
|
{
|
|
|
|
struct ahash_request *req = context;
|
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
#ifdef DEBUG
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
|
|
|
|
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
edesc = (struct ahash_edesc *)((char *)desc -
|
|
|
|
offsetof(struct ahash_edesc, hw_desc));
|
2014-04-25 02:05:12 +08:00
|
|
|
if (err)
|
|
|
|
caam_jr_strstatus(jrdev, err);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
|
|
|
|
kfree(edesc);
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
|
|
|
ctx->ctx_len, 1);
|
|
|
|
if (req->result)
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
|
|
|
digestsize, 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
req->base.complete(&req->base, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
|
|
|
void *context)
|
|
|
|
{
|
|
|
|
struct ahash_request *req = context;
|
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
#ifdef DEBUG
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
edesc = (struct ahash_edesc *)((char *)desc -
|
|
|
|
offsetof(struct ahash_edesc, hw_desc));
|
2014-04-25 02:05:12 +08:00
|
|
|
if (err)
|
|
|
|
caam_jr_strstatus(jrdev, err);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2014-07-11 20:34:52 +08:00
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
kfree(edesc);
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
|
|
|
ctx->ctx_len, 1);
|
|
|
|
if (req->result)
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
|
|
|
digestsize, 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
req->base.complete(&req->base, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
|
|
|
void *context)
|
|
|
|
{
|
|
|
|
struct ahash_request *req = context;
|
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
#ifdef DEBUG
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
|
|
|
|
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
edesc = (struct ahash_edesc *)((char *)desc -
|
|
|
|
offsetof(struct ahash_edesc, hw_desc));
|
2014-04-25 02:05:12 +08:00
|
|
|
if (err)
|
|
|
|
caam_jr_strstatus(jrdev, err);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2014-07-11 20:34:51 +08:00
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
|
2012-06-23 08:48:47 +08:00
|
|
|
kfree(edesc);
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
|
|
|
ctx->ctx_len, 1);
|
|
|
|
if (req->result)
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
|
|
|
digestsize, 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
req->base.complete(&req->base, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* submit update job descriptor */
|
|
|
|
static int ahash_update_ctx(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
|
|
|
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
|
|
|
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
|
|
|
|
int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
|
|
|
|
u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
|
|
|
|
int *next_buflen = state->current_buf ? &state->buflen_0 :
|
|
|
|
&state->buflen_1, last_buflen;
|
|
|
|
int in_len = *buflen + req->nbytes, to_hash;
|
|
|
|
u32 *sh_desc = ctx->sh_desc_update, *desc;
|
|
|
|
dma_addr_t ptr = ctx->sh_desc_update_dma;
|
|
|
|
int src_nents, sec4_sg_bytes, sec4_sg_src_index;
|
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
int ret = 0;
|
|
|
|
int sh_len;
|
|
|
|
|
|
|
|
last_buflen = *next_buflen;
|
|
|
|
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
|
|
|
|
to_hash = in_len - *next_buflen;
|
|
|
|
|
|
|
|
if (to_hash) {
|
2015-09-23 19:55:27 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src,
|
|
|
|
req->nbytes - (*next_buflen));
|
2015-11-05 04:13:38 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
|
|
|
|
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
|
|
|
|
sizeof(struct sec4_sg_entry);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* allocate space for base edesc and hw desc commands,
|
|
|
|
* link tables
|
|
|
|
*/
|
2016-08-09 01:04:42 +08:00
|
|
|
edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes,
|
|
|
|
GFP_DMA | flags);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
|
|
|
dev_err(jrdev,
|
|
|
|
"could not allocate extended descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
edesc->src_nents = src_nents;
|
|
|
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
|
|
|
|
2014-07-11 20:34:49 +08:00
|
|
|
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
|
|
|
|
edesc->sec4_sg, DMA_BIDIRECTIONAL);
|
|
|
|
if (ret)
|
2016-08-09 01:04:58 +08:00
|
|
|
goto err;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
|
|
|
|
edesc->sec4_sg + 1,
|
|
|
|
buf, state->buf_dma,
|
2015-10-19 00:51:20 +08:00
|
|
|
*buflen, last_buflen);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
if (src_nents) {
|
|
|
|
src_map_to_sec4_sg(jrdev, req->src, src_nents,
|
2015-09-23 19:55:27 +08:00
|
|
|
edesc->sec4_sg + sec4_sg_src_index);
|
2015-06-16 07:52:57 +08:00
|
|
|
if (*next_buflen)
|
2014-08-14 18:51:56 +08:00
|
|
|
scatterwalk_map_and_copy(next_buf, req->src,
|
|
|
|
to_hash - *buflen,
|
|
|
|
*next_buflen, 0);
|
2012-06-23 08:48:47 +08:00
|
|
|
} else {
|
|
|
|
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
|
2016-05-19 23:11:26 +08:00
|
|
|
cpu_to_caam32(SEC4_SG_LEN_FIN);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
2015-06-16 07:52:57 +08:00
|
|
|
state->current_buf = !state->current_buf;
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
sh_len = desc_len(sh_desc);
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
|
|
|
|
HDR_REVERSE);
|
|
|
|
|
2014-06-23 22:20:26 +08:00
|
|
|
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
|
|
|
sec4_sg_bytes,
|
|
|
|
DMA_TO_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map S/G table\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
2014-07-11 20:34:49 +08:00
|
|
|
}
|
2014-06-23 22:20:26 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
|
|
|
|
to_hash, LDST_SGF);
|
|
|
|
|
|
|
|
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
|
2016-08-09 01:04:58 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
ret = -EINPROGRESS;
|
2012-06-23 08:48:47 +08:00
|
|
|
} else if (*next_buflen) {
|
2014-08-14 18:51:56 +08:00
|
|
|
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
|
|
|
|
req->nbytes, 0);
|
2012-06-23 08:48:47 +08:00
|
|
|
*buflen = *next_buflen;
|
|
|
|
*next_buflen = last_buflen;
|
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
|
|
|
*next_buflen, 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return ret;
|
2016-08-09 01:04:58 +08:00
|
|
|
|
|
|
|
err:
|
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
|
|
|
|
kfree(edesc);
|
|
|
|
return ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_final_ctx(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
|
|
|
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
|
|
|
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
|
|
|
|
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
|
|
|
|
int last_buflen = state->current_buf ? state->buflen_0 :
|
|
|
|
state->buflen_1;
|
|
|
|
u32 *sh_desc = ctx->sh_desc_fin, *desc;
|
|
|
|
dma_addr_t ptr = ctx->sh_desc_fin_dma;
|
2015-08-12 01:19:20 +08:00
|
|
|
int sec4_sg_bytes, sec4_sg_src_index;
|
2012-06-23 08:48:47 +08:00
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
int ret = 0;
|
|
|
|
int sh_len;
|
|
|
|
|
2015-08-12 01:19:20 +08:00
|
|
|
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
|
|
|
|
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/* allocate space for base edesc and hw desc commands, link tables */
|
2016-08-09 01:04:42 +08:00
|
|
|
edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
|
|
|
dev_err(jrdev, "could not allocate extended descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
sh_len = desc_len(sh_desc);
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
|
|
|
|
|
|
|
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
|
|
|
edesc->src_nents = 0;
|
|
|
|
|
2014-07-11 20:34:49 +08:00
|
|
|
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
|
|
|
|
edesc->sec4_sg, DMA_TO_DEVICE);
|
|
|
|
if (ret)
|
2016-08-09 01:04:58 +08:00
|
|
|
goto err;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
|
|
|
|
buf, state->buf_dma, buflen,
|
|
|
|
last_buflen);
|
2016-05-19 23:11:26 +08:00
|
|
|
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
|
|
|
|
cpu_to_caam32(SEC4_SG_LEN_FIN);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2014-06-23 22:20:26 +08:00
|
|
|
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
|
|
|
sec4_sg_bytes, DMA_TO_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map S/G table\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
2014-07-11 20:34:49 +08:00
|
|
|
}
|
2014-06-23 22:20:26 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
|
|
|
|
LDST_SGF);
|
|
|
|
|
|
|
|
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
|
|
|
digestsize);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map dst\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
2014-07-11 20:34:49 +08:00
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
2016-08-09 01:04:58 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2016-08-09 01:04:58 +08:00
|
|
|
return -EINPROGRESS;
|
|
|
|
|
|
|
|
err:
|
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
|
|
|
kfree(edesc);
|
2012-06-23 08:48:47 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_finup_ctx(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
|
|
|
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
|
|
|
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
|
|
|
|
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
|
|
|
|
int last_buflen = state->current_buf ? state->buflen_0 :
|
|
|
|
state->buflen_1;
|
|
|
|
u32 *sh_desc = ctx->sh_desc_finup, *desc;
|
|
|
|
dma_addr_t ptr = ctx->sh_desc_finup_dma;
|
|
|
|
int sec4_sg_bytes, sec4_sg_src_index;
|
|
|
|
int src_nents;
|
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
int ret = 0;
|
|
|
|
int sh_len;
|
|
|
|
|
2015-09-23 19:55:27 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src, req->nbytes);
|
2015-11-05 04:13:38 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
|
|
|
|
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
|
|
|
|
sizeof(struct sec4_sg_entry);
|
|
|
|
|
|
|
|
/* allocate space for base edesc and hw desc commands, link tables */
|
2016-08-09 01:04:42 +08:00
|
|
|
edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
|
|
|
dev_err(jrdev, "could not allocate extended descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
sh_len = desc_len(sh_desc);
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
|
|
|
|
|
|
|
|
edesc->src_nents = src_nents;
|
|
|
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
|
|
|
|
2014-07-11 20:34:49 +08:00
|
|
|
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
|
|
|
|
edesc->sec4_sg, DMA_TO_DEVICE);
|
|
|
|
if (ret)
|
2016-08-09 01:04:58 +08:00
|
|
|
goto err;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
|
|
|
|
buf, state->buf_dma, buflen,
|
|
|
|
last_buflen);
|
|
|
|
|
|
|
|
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
|
2015-09-23 19:55:27 +08:00
|
|
|
sec4_sg_src_index);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2014-06-23 22:20:26 +08:00
|
|
|
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
|
|
|
sec4_sg_bytes, DMA_TO_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map S/G table\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
2014-07-11 20:34:49 +08:00
|
|
|
}
|
2014-06-23 22:20:26 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
|
|
|
|
buflen + req->nbytes, LDST_SGF);
|
|
|
|
|
|
|
|
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
|
|
|
digestsize);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map dst\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
2014-07-11 20:34:49 +08:00
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
2016-08-09 01:04:58 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2016-08-09 01:04:58 +08:00
|
|
|
return -EINPROGRESS;
|
|
|
|
|
|
|
|
err:
|
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
|
|
|
kfree(edesc);
|
2012-06-23 08:48:47 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
|
|
|
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
|
|
|
u32 *sh_desc = ctx->sh_desc_digest, *desc;
|
|
|
|
dma_addr_t ptr = ctx->sh_desc_digest_dma;
|
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
int src_nents, sec4_sg_bytes;
|
|
|
|
dma_addr_t src_dma;
|
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
int ret = 0;
|
|
|
|
u32 options;
|
|
|
|
int sh_len;
|
|
|
|
|
crypto: caam - fix DMA API mapping leak
caamhash contains this weird code:
src_nents = sg_count(req->src, req->nbytes);
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
...
edesc->src_nents = src_nents;
sg_count() returns zero when sg_nents_for_len() returns zero or one.
This means we don't need to use a hardware scatterlist. However,
setting src_nents to zero causes problems when we unmap:
if (edesc->src_nents)
dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE, edesc->chained);
as zero here means that we have no entries to unmap. This causes us
to leak DMA mappings, where we map one scatterlist entry and then
fail to unmap it.
This can be fixed in two ways: either by writing the number of entries
that were requested of dma_map_sg(), or by reworking the "no SG
required" case.
We adopt the re-work solution here - we replace sg_count() with
sg_nents_for_len(), so src_nents now contains the real number of
scatterlist entries, and we then change the test for using the
hardware scatterlist to src_nents > 1 rather than just non-zero.
This change passes my sshd, openssl tests hashing /bin and tcrypt
tests.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-08-09 01:04:31 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src, req->nbytes);
|
2015-11-05 04:13:38 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
crypto: caam - fix DMA API mapping leak
caamhash contains this weird code:
src_nents = sg_count(req->src, req->nbytes);
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
...
edesc->src_nents = src_nents;
sg_count() returns zero when sg_nents_for_len() returns zero or one.
This means we don't need to use a hardware scatterlist. However,
setting src_nents to zero causes problems when we unmap:
if (edesc->src_nents)
dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE, edesc->chained);
as zero here means that we have no entries to unmap. This causes us
to leak DMA mappings, where we map one scatterlist entry and then
fail to unmap it.
This can be fixed in two ways: either by writing the number of entries
that were requested of dma_map_sg(), or by reworking the "no SG
required" case.
We adopt the re-work solution here - we replace sg_count() with
sg_nents_for_len(), so src_nents now contains the real number of
scatterlist entries, and we then change the test for using the
hardware scatterlist to src_nents > 1 rather than just non-zero.
This change passes my sshd, openssl tests hashing /bin and tcrypt
tests.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-08-09 01:04:31 +08:00
|
|
|
dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
|
|
|
if (src_nents > 1)
|
|
|
|
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
|
|
|
|
else
|
|
|
|
sec4_sg_bytes = 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/* allocate space for base edesc and hw desc commands, link tables */
|
2016-08-09 01:04:42 +08:00
|
|
|
edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
|
|
|
dev_err(jrdev, "could not allocate extended descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2016-08-09 01:04:52 +08:00
|
|
|
|
2014-07-11 20:34:53 +08:00
|
|
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
2012-06-23 08:48:47 +08:00
|
|
|
edesc->src_nents = src_nents;
|
|
|
|
|
|
|
|
sh_len = desc_len(sh_desc);
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
|
|
|
|
|
crypto: caam - fix DMA API mapping leak
caamhash contains this weird code:
src_nents = sg_count(req->src, req->nbytes);
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
...
edesc->src_nents = src_nents;
sg_count() returns zero when sg_nents_for_len() returns zero or one.
This means we don't need to use a hardware scatterlist. However,
setting src_nents to zero causes problems when we unmap:
if (edesc->src_nents)
dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE, edesc->chained);
as zero here means that we have no entries to unmap. This causes us
to leak DMA mappings, where we map one scatterlist entry and then
fail to unmap it.
This can be fixed in two ways: either by writing the number of entries
that were requested of dma_map_sg(), or by reworking the "no SG
required" case.
We adopt the re-work solution here - we replace sg_count() with
sg_nents_for_len(), so src_nents now contains the real number of
scatterlist entries, and we then change the test for using the
hardware scatterlist to src_nents > 1 rather than just non-zero.
This change passes my sshd, openssl tests hashing /bin and tcrypt
tests.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-08-09 01:04:31 +08:00
|
|
|
if (src_nents > 1) {
|
2012-06-23 08:48:47 +08:00
|
|
|
sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
|
2014-06-23 22:20:26 +08:00
|
|
|
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
|
|
|
sec4_sg_bytes, DMA_TO_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map S/G table\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
2014-07-11 20:34:49 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
src_dma = edesc->sec4_sg_dma;
|
|
|
|
options = LDST_SGF;
|
|
|
|
} else {
|
|
|
|
src_dma = sg_dma_address(req->src);
|
|
|
|
options = 0;
|
|
|
|
}
|
|
|
|
append_seq_in_ptr(desc, src_dma, req->nbytes, options);
|
|
|
|
|
|
|
|
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
|
|
|
digestsize);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map dst\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
2014-07-11 20:34:49 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
|
|
|
if (!ret) {
|
|
|
|
ret = -EINPROGRESS;
|
|
|
|
} else {
|
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* submit ahash final if it the first job descriptor */
|
|
|
|
static int ahash_final_no_ctx(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
|
|
|
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
|
|
|
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
|
|
|
|
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
|
|
|
|
u32 *sh_desc = ctx->sh_desc_digest, *desc;
|
|
|
|
dma_addr_t ptr = ctx->sh_desc_digest_dma;
|
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
int ret = 0;
|
|
|
|
int sh_len;
|
|
|
|
|
|
|
|
/* allocate space for base edesc and hw desc commands, link tables */
|
2016-08-09 01:04:42 +08:00
|
|
|
edesc = kzalloc(sizeof(*edesc), GFP_DMA | flags);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
|
|
|
dev_err(jrdev, "could not allocate extended descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2015-03-06 10:34:41 +08:00
|
|
|
edesc->sec4_sg_bytes = 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
sh_len = desc_len(sh_desc);
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
|
|
|
|
|
|
|
|
state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, state->buf_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map src\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
2014-07-11 20:34:49 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
|
|
|
|
|
|
|
|
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
|
|
|
digestsize);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map dst\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
2014-07-11 20:34:49 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
edesc->src_nents = 0;
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
|
|
|
if (!ret) {
|
|
|
|
ret = -EINPROGRESS;
|
|
|
|
} else {
|
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* submit ahash update if it the first job descriptor after update */
|
|
|
|
static int ahash_update_no_ctx(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
|
|
|
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
|
|
|
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
|
|
|
|
int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
|
|
|
|
u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
|
|
|
|
int *next_buflen = state->current_buf ? &state->buflen_0 :
|
|
|
|
&state->buflen_1;
|
|
|
|
int in_len = *buflen + req->nbytes, to_hash;
|
|
|
|
int sec4_sg_bytes, src_nents;
|
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
u32 *desc, *sh_desc = ctx->sh_desc_update_first;
|
|
|
|
dma_addr_t ptr = ctx->sh_desc_update_first_dma;
|
|
|
|
int ret = 0;
|
|
|
|
int sh_len;
|
|
|
|
|
|
|
|
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
|
|
|
|
to_hash = in_len - *next_buflen;
|
|
|
|
|
|
|
|
if (to_hash) {
|
2015-09-23 19:55:27 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src,
|
crypto: caam - fix DMA API mapping leak
caamhash contains this weird code:
src_nents = sg_count(req->src, req->nbytes);
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
...
edesc->src_nents = src_nents;
sg_count() returns zero when sg_nents_for_len() returns zero or one.
This means we don't need to use a hardware scatterlist. However,
setting src_nents to zero causes problems when we unmap:
if (edesc->src_nents)
dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE, edesc->chained);
as zero here means that we have no entries to unmap. This causes us
to leak DMA mappings, where we map one scatterlist entry and then
fail to unmap it.
This can be fixed in two ways: either by writing the number of entries
that were requested of dma_map_sg(), or by reworking the "no SG
required" case.
We adopt the re-work solution here - we replace sg_count() with
sg_nents_for_len(), so src_nents now contains the real number of
scatterlist entries, and we then change the test for using the
hardware scatterlist to src_nents > 1 rather than just non-zero.
This change passes my sshd, openssl tests hashing /bin and tcrypt
tests.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-08-09 01:04:31 +08:00
|
|
|
req->nbytes - *next_buflen);
|
2015-11-05 04:13:38 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
sec4_sg_bytes = (1 + src_nents) *
|
|
|
|
sizeof(struct sec4_sg_entry);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* allocate space for base edesc and hw desc commands,
|
|
|
|
* link tables
|
|
|
|
*/
|
2016-08-09 01:04:42 +08:00
|
|
|
edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes,
|
|
|
|
GFP_DMA | flags);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
|
|
|
dev_err(jrdev,
|
|
|
|
"could not allocate extended descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
edesc->src_nents = src_nents;
|
|
|
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
2014-07-11 20:34:54 +08:00
|
|
|
edesc->dst_dma = 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
|
|
|
|
buf, *buflen);
|
|
|
|
src_map_to_sec4_sg(jrdev, req->src, src_nents,
|
2015-09-23 19:55:27 +08:00
|
|
|
edesc->sec4_sg + 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (*next_buflen) {
|
2014-08-14 18:51:56 +08:00
|
|
|
scatterwalk_map_and_copy(next_buf, req->src,
|
|
|
|
to_hash - *buflen,
|
|
|
|
*next_buflen, 0);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
2015-06-16 07:52:57 +08:00
|
|
|
state->current_buf = !state->current_buf;
|
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
sh_len = desc_len(sh_desc);
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
|
|
|
|
HDR_REVERSE);
|
|
|
|
|
2014-06-23 22:20:26 +08:00
|
|
|
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
|
|
|
sec4_sg_bytes,
|
|
|
|
DMA_TO_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map S/G table\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
2014-07-11 20:34:49 +08:00
|
|
|
}
|
2014-06-23 22:20:26 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
|
|
|
|
|
2014-07-11 20:34:49 +08:00
|
|
|
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
|
|
|
|
if (ret)
|
2016-08-09 01:04:58 +08:00
|
|
|
goto err;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
2016-08-09 01:04:58 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
ret = -EINPROGRESS;
|
|
|
|
state->update = ahash_update_ctx;
|
|
|
|
state->finup = ahash_finup_ctx;
|
|
|
|
state->final = ahash_final_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
} else if (*next_buflen) {
|
2014-08-14 18:51:56 +08:00
|
|
|
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
|
|
|
|
req->nbytes, 0);
|
2012-06-23 08:48:47 +08:00
|
|
|
*buflen = *next_buflen;
|
|
|
|
*next_buflen = 0;
|
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
|
|
|
*next_buflen, 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return ret;
|
2016-08-09 01:04:58 +08:00
|
|
|
|
|
|
|
err:
|
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
|
|
|
|
kfree(edesc);
|
|
|
|
return ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* submit ahash finup if it the first job descriptor after update */
|
|
|
|
static int ahash_finup_no_ctx(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
|
|
|
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
|
|
|
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
|
|
|
|
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
|
|
|
|
int last_buflen = state->current_buf ? state->buflen_0 :
|
|
|
|
state->buflen_1;
|
|
|
|
u32 *sh_desc = ctx->sh_desc_digest, *desc;
|
|
|
|
dma_addr_t ptr = ctx->sh_desc_digest_dma;
|
|
|
|
int sec4_sg_bytes, sec4_sg_src_index, src_nents;
|
|
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
int sh_len;
|
|
|
|
int ret = 0;
|
|
|
|
|
2015-09-23 19:55:27 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src, req->nbytes);
|
2015-11-05 04:13:38 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
sec4_sg_src_index = 2;
|
|
|
|
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
|
|
|
|
sizeof(struct sec4_sg_entry);
|
|
|
|
|
|
|
|
/* allocate space for base edesc and hw desc commands, link tables */
|
2016-08-09 01:04:42 +08:00
|
|
|
edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
|
|
|
dev_err(jrdev, "could not allocate extended descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
sh_len = desc_len(sh_desc);
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
|
|
|
|
|
|
|
|
edesc->src_nents = src_nents;
|
|
|
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
|
|
|
|
|
|
|
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
|
|
|
|
state->buf_dma, buflen,
|
|
|
|
last_buflen);
|
|
|
|
|
2015-09-23 19:55:27 +08:00
|
|
|
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2014-06-23 22:20:26 +08:00
|
|
|
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
|
|
|
sec4_sg_bytes, DMA_TO_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map S/G table\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
2014-07-11 20:34:49 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2014-06-23 22:20:26 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
|
|
|
|
req->nbytes, LDST_SGF);
|
|
|
|
|
|
|
|
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
|
|
|
digestsize);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map dst\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
2014-07-11 20:34:49 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
|
|
|
if (!ret) {
|
|
|
|
ret = -EINPROGRESS;
|
|
|
|
} else {
|
|
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
|
|
kfree(edesc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* submit first update job descriptor after init */
|
|
|
|
static int ahash_update_first(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
struct device *jrdev = ctx->jrdev;
|
|
|
|
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
|
|
|
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
2014-08-14 18:51:57 +08:00
|
|
|
u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
|
|
|
|
int *next_buflen = state->current_buf ?
|
|
|
|
&state->buflen_1 : &state->buflen_0;
|
2012-06-23 08:48:47 +08:00
|
|
|
int to_hash;
|
|
|
|
u32 *sh_desc = ctx->sh_desc_update_first, *desc;
|
|
|
|
dma_addr_t ptr = ctx->sh_desc_update_first_dma;
|
|
|
|
int sec4_sg_bytes, src_nents;
|
|
|
|
dma_addr_t src_dma;
|
|
|
|
u32 options;
|
|
|
|
struct ahash_edesc *edesc;
|
|
|
|
int ret = 0;
|
|
|
|
int sh_len;
|
|
|
|
|
|
|
|
*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
|
|
|
|
1);
|
|
|
|
to_hash = req->nbytes - *next_buflen;
|
|
|
|
|
|
|
|
if (to_hash) {
|
crypto: caam - fix DMA API mapping leak
caamhash contains this weird code:
src_nents = sg_count(req->src, req->nbytes);
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
...
edesc->src_nents = src_nents;
sg_count() returns zero when sg_nents_for_len() returns zero or one.
This means we don't need to use a hardware scatterlist. However,
setting src_nents to zero causes problems when we unmap:
if (edesc->src_nents)
dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE, edesc->chained);
as zero here means that we have no entries to unmap. This causes us
to leak DMA mappings, where we map one scatterlist entry and then
fail to unmap it.
This can be fixed in two ways: either by writing the number of entries
that were requested of dma_map_sg(), or by reworking the "no SG
required" case.
We adopt the re-work solution here - we replace sg_count() with
sg_nents_for_len(), so src_nents now contains the real number of
scatterlist entries, and we then change the test for using the
hardware scatterlist to src_nents > 1 rather than just non-zero.
This change passes my sshd, openssl tests hashing /bin and tcrypt
tests.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-08-09 01:04:31 +08:00
|
|
|
src_nents = sg_nents_for_len(req->src,
|
|
|
|
req->nbytes - *next_buflen);
|
2015-11-05 04:13:38 +08:00
|
|
|
if (src_nents < 0) {
|
|
|
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
|
|
|
return src_nents;
|
|
|
|
}
|
crypto: caam - fix DMA API mapping leak
caamhash contains this weird code:
src_nents = sg_count(req->src, req->nbytes);
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
...
edesc->src_nents = src_nents;
sg_count() returns zero when sg_nents_for_len() returns zero or one.
This means we don't need to use a hardware scatterlist. However,
setting src_nents to zero causes problems when we unmap:
if (edesc->src_nents)
dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE, edesc->chained);
as zero here means that we have no entries to unmap. This causes us
to leak DMA mappings, where we map one scatterlist entry and then
fail to unmap it.
This can be fixed in two ways: either by writing the number of entries
that were requested of dma_map_sg(), or by reworking the "no SG
required" case.
We adopt the re-work solution here - we replace sg_count() with
sg_nents_for_len(), so src_nents now contains the real number of
scatterlist entries, and we then change the test for using the
hardware scatterlist to src_nents > 1 rather than just non-zero.
This change passes my sshd, openssl tests hashing /bin and tcrypt
tests.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-08-09 01:04:31 +08:00
|
|
|
dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
|
|
|
if (src_nents > 1)
|
|
|
|
sec4_sg_bytes = src_nents *
|
|
|
|
sizeof(struct sec4_sg_entry);
|
|
|
|
else
|
|
|
|
sec4_sg_bytes = 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* allocate space for base edesc and hw desc commands,
|
|
|
|
* link tables
|
|
|
|
*/
|
2016-08-09 01:04:42 +08:00
|
|
|
edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes,
|
|
|
|
GFP_DMA | flags);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!edesc) {
|
|
|
|
dev_err(jrdev,
|
|
|
|
"could not allocate extended descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
edesc->src_nents = src_nents;
|
|
|
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
2014-07-11 20:34:54 +08:00
|
|
|
edesc->dst_dma = 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
crypto: caam - fix DMA API mapping leak
caamhash contains this weird code:
src_nents = sg_count(req->src, req->nbytes);
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
...
edesc->src_nents = src_nents;
sg_count() returns zero when sg_nents_for_len() returns zero or one.
This means we don't need to use a hardware scatterlist. However,
setting src_nents to zero causes problems when we unmap:
if (edesc->src_nents)
dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE, edesc->chained);
as zero here means that we have no entries to unmap. This causes us
to leak DMA mappings, where we map one scatterlist entry and then
fail to unmap it.
This can be fixed in two ways: either by writing the number of entries
that were requested of dma_map_sg(), or by reworking the "no SG
required" case.
We adopt the re-work solution here - we replace sg_count() with
sg_nents_for_len(), so src_nents now contains the real number of
scatterlist entries, and we then change the test for using the
hardware scatterlist to src_nents > 1 rather than just non-zero.
This change passes my sshd, openssl tests hashing /bin and tcrypt
tests.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-08-09 01:04:31 +08:00
|
|
|
if (src_nents > 1) {
|
2012-06-23 08:48:47 +08:00
|
|
|
sg_to_sec4_sg_last(req->src, src_nents,
|
|
|
|
edesc->sec4_sg, 0);
|
2014-06-23 22:20:26 +08:00
|
|
|
edesc->sec4_sg_dma = dma_map_single(jrdev,
|
|
|
|
edesc->sec4_sg,
|
|
|
|
sec4_sg_bytes,
|
|
|
|
DMA_TO_DEVICE);
|
2014-07-11 20:34:49 +08:00
|
|
|
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
|
|
|
dev_err(jrdev, "unable to map S/G table\n");
|
2016-08-09 01:04:58 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
2014-07-11 20:34:49 +08:00
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
src_dma = edesc->sec4_sg_dma;
|
|
|
|
options = LDST_SGF;
|
|
|
|
} else {
|
|
|
|
src_dma = sg_dma_address(req->src);
|
|
|
|
options = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*next_buflen)
|
2014-08-14 18:51:56 +08:00
|
|
|
scatterwalk_map_and_copy(next_buf, req->src, to_hash,
|
|
|
|
*next_buflen, 0);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
sh_len = desc_len(sh_desc);
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
|
|
|
|
HDR_REVERSE);
|
|
|
|
|
|
|
|
append_seq_in_ptr(desc, src_dma, to_hash, options);
|
|
|
|
|
2014-07-11 20:34:49 +08:00
|
|
|
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
|
|
|
|
if (ret)
|
2016-08-09 01:04:58 +08:00
|
|
|
goto err;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
|
|
|
desc_bytes(desc), 1);
|
|
|
|
#endif
|
|
|
|
|
2016-08-09 01:04:58 +08:00
|
|
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
ret = -EINPROGRESS;
|
|
|
|
state->update = ahash_update_ctx;
|
|
|
|
state->finup = ahash_finup_ctx;
|
|
|
|
state->final = ahash_final_ctx;
|
2012-06-23 08:48:47 +08:00
|
|
|
} else if (*next_buflen) {
|
|
|
|
state->update = ahash_update_no_ctx;
|
|
|
|
state->finup = ahash_finup_no_ctx;
|
|
|
|
state->final = ahash_final_no_ctx;
|
2014-08-14 18:51:56 +08:00
|
|
|
scatterwalk_map_and_copy(next_buf, req->src, 0,
|
|
|
|
req->nbytes, 0);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
2013-08-14 23:56:45 +08:00
|
|
|
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
2012-06-23 08:48:47 +08:00
|
|
|
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
|
|
|
*next_buflen, 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return ret;
|
2016-08-09 01:04:58 +08:00
|
|
|
|
|
|
|
err:
|
|
|
|
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
|
|
|
|
kfree(edesc);
|
|
|
|
return ret;
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_finup_first(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
return ahash_digest(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_init(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
state->update = ahash_update_first;
|
|
|
|
state->finup = ahash_finup_first;
|
|
|
|
state->final = ahash_final_no_ctx;
|
|
|
|
|
|
|
|
state->current_buf = 0;
|
2014-07-11 20:34:55 +08:00
|
|
|
state->buf_dma = 0;
|
2015-06-16 07:52:56 +08:00
|
|
|
state->buflen_0 = 0;
|
|
|
|
state->buflen_1 = 0;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_update(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
return state->update(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_finup(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
return state->finup(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_final(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
return state->final(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_export(struct ahash_request *req, void *out)
|
|
|
|
{
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
2015-10-19 00:51:25 +08:00
|
|
|
struct caam_export_state *export = out;
|
|
|
|
int len;
|
|
|
|
u8 *buf;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2015-10-19 00:51:25 +08:00
|
|
|
if (state->current_buf) {
|
|
|
|
buf = state->buf_1;
|
|
|
|
len = state->buflen_1;
|
|
|
|
} else {
|
|
|
|
buf = state->buf_0;
|
2015-11-30 21:03:58 +08:00
|
|
|
len = state->buflen_0;
|
2015-10-19 00:51:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(export->buf, buf, len);
|
|
|
|
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
|
|
|
|
export->buflen = len;
|
|
|
|
export->update = state->update;
|
|
|
|
export->final = state->final;
|
|
|
|
export->finup = state->finup;
|
2015-10-19 00:51:15 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ahash_import(struct ahash_request *req, const void *in)
|
|
|
|
{
|
|
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
2015-10-19 00:51:25 +08:00
|
|
|
const struct caam_export_state *export = in;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2015-10-19 00:51:25 +08:00
|
|
|
memset(state, 0, sizeof(*state));
|
|
|
|
memcpy(state->buf_0, export->buf, export->buflen);
|
|
|
|
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
|
|
|
|
state->buflen_0 = export->buflen;
|
|
|
|
state->update = export->update;
|
|
|
|
state->final = export->final;
|
|
|
|
state->finup = export->finup;
|
2015-10-19 00:51:15 +08:00
|
|
|
|
2012-06-23 08:48:47 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct caam_hash_template {
|
|
|
|
char name[CRYPTO_MAX_ALG_NAME];
|
|
|
|
char driver_name[CRYPTO_MAX_ALG_NAME];
|
2012-06-23 08:48:48 +08:00
|
|
|
char hmac_name[CRYPTO_MAX_ALG_NAME];
|
|
|
|
char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
|
2012-06-23 08:48:47 +08:00
|
|
|
unsigned int blocksize;
|
|
|
|
struct ahash_alg template_ahash;
|
|
|
|
u32 alg_type;
|
|
|
|
u32 alg_op;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* ahash descriptors */
|
|
|
|
static struct caam_hash_template driver_hash[] = {
|
|
|
|
{
|
2012-06-23 08:48:48 +08:00
|
|
|
.name = "sha1",
|
|
|
|
.driver_name = "sha1-caam",
|
|
|
|
.hmac_name = "hmac(sha1)",
|
|
|
|
.hmac_driver_name = "hmac-sha1-caam",
|
2012-06-23 08:48:47 +08:00
|
|
|
.blocksize = SHA1_BLOCK_SIZE,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = ahash_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA1_DIGEST_SIZE,
|
2015-10-19 00:51:25 +08:00
|
|
|
.statesize = sizeof(struct caam_export_state),
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
2015-10-19 00:51:31 +08:00
|
|
|
},
|
2012-06-23 08:48:47 +08:00
|
|
|
.alg_type = OP_ALG_ALGSEL_SHA1,
|
|
|
|
.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
|
|
|
|
}, {
|
2012-06-23 08:48:48 +08:00
|
|
|
.name = "sha224",
|
|
|
|
.driver_name = "sha224-caam",
|
|
|
|
.hmac_name = "hmac(sha224)",
|
|
|
|
.hmac_driver_name = "hmac-sha224-caam",
|
2012-06-23 08:48:47 +08:00
|
|
|
.blocksize = SHA224_BLOCK_SIZE,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = ahash_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA224_DIGEST_SIZE,
|
2015-10-19 00:51:25 +08:00
|
|
|
.statesize = sizeof(struct caam_export_state),
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
2015-10-19 00:51:31 +08:00
|
|
|
},
|
2012-06-23 08:48:47 +08:00
|
|
|
.alg_type = OP_ALG_ALGSEL_SHA224,
|
|
|
|
.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
|
|
|
|
}, {
|
2012-06-23 08:48:48 +08:00
|
|
|
.name = "sha256",
|
|
|
|
.driver_name = "sha256-caam",
|
|
|
|
.hmac_name = "hmac(sha256)",
|
|
|
|
.hmac_driver_name = "hmac-sha256-caam",
|
2012-06-23 08:48:47 +08:00
|
|
|
.blocksize = SHA256_BLOCK_SIZE,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = ahash_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA256_DIGEST_SIZE,
|
2015-10-19 00:51:25 +08:00
|
|
|
.statesize = sizeof(struct caam_export_state),
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
2015-10-19 00:51:31 +08:00
|
|
|
},
|
2012-06-23 08:48:47 +08:00
|
|
|
.alg_type = OP_ALG_ALGSEL_SHA256,
|
|
|
|
.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
|
|
|
|
}, {
|
2012-06-23 08:48:48 +08:00
|
|
|
.name = "sha384",
|
|
|
|
.driver_name = "sha384-caam",
|
|
|
|
.hmac_name = "hmac(sha384)",
|
|
|
|
.hmac_driver_name = "hmac-sha384-caam",
|
2012-06-23 08:48:47 +08:00
|
|
|
.blocksize = SHA384_BLOCK_SIZE,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = ahash_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA384_DIGEST_SIZE,
|
2015-10-19 00:51:25 +08:00
|
|
|
.statesize = sizeof(struct caam_export_state),
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
2015-10-19 00:51:31 +08:00
|
|
|
},
|
2012-06-23 08:48:47 +08:00
|
|
|
.alg_type = OP_ALG_ALGSEL_SHA384,
|
|
|
|
.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
|
|
|
|
}, {
|
2012-06-23 08:48:48 +08:00
|
|
|
.name = "sha512",
|
|
|
|
.driver_name = "sha512-caam",
|
|
|
|
.hmac_name = "hmac(sha512)",
|
|
|
|
.hmac_driver_name = "hmac-sha512-caam",
|
2012-06-23 08:48:47 +08:00
|
|
|
.blocksize = SHA512_BLOCK_SIZE,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = ahash_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA512_DIGEST_SIZE,
|
2015-10-19 00:51:25 +08:00
|
|
|
.statesize = sizeof(struct caam_export_state),
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
2015-10-19 00:51:31 +08:00
|
|
|
},
|
2012-06-23 08:48:47 +08:00
|
|
|
.alg_type = OP_ALG_ALGSEL_SHA512,
|
|
|
|
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
|
|
|
|
}, {
|
2012-06-23 08:48:48 +08:00
|
|
|
.name = "md5",
|
|
|
|
.driver_name = "md5-caam",
|
|
|
|
.hmac_name = "hmac(md5)",
|
|
|
|
.hmac_driver_name = "hmac-md5-caam",
|
2012-06-23 08:48:47 +08:00
|
|
|
.blocksize = MD5_BLOCK_WORDS * 4,
|
|
|
|
.template_ahash = {
|
|
|
|
.init = ahash_init,
|
|
|
|
.update = ahash_update,
|
|
|
|
.final = ahash_final,
|
|
|
|
.finup = ahash_finup,
|
|
|
|
.digest = ahash_digest,
|
|
|
|
.export = ahash_export,
|
|
|
|
.import = ahash_import,
|
|
|
|
.setkey = ahash_setkey,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = MD5_DIGEST_SIZE,
|
2015-10-19 00:51:25 +08:00
|
|
|
.statesize = sizeof(struct caam_export_state),
|
2012-06-23 08:48:47 +08:00
|
|
|
},
|
2015-10-19 00:51:31 +08:00
|
|
|
},
|
2012-06-23 08:48:47 +08:00
|
|
|
.alg_type = OP_ALG_ALGSEL_MD5,
|
|
|
|
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
struct caam_hash_alg {
|
|
|
|
struct list_head entry;
|
|
|
|
int alg_type;
|
|
|
|
int alg_op;
|
|
|
|
struct ahash_alg ahash_alg;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int caam_hash_cra_init(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
|
|
|
|
struct crypto_alg *base = tfm->__crt_alg;
|
|
|
|
struct hash_alg_common *halg =
|
|
|
|
container_of(base, struct hash_alg_common, base);
|
|
|
|
struct ahash_alg *alg =
|
|
|
|
container_of(halg, struct ahash_alg, halg);
|
|
|
|
struct caam_hash_alg *caam_hash =
|
|
|
|
container_of(alg, struct caam_hash_alg, ahash_alg);
|
|
|
|
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
|
|
|
|
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
|
|
|
|
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
|
|
|
|
HASH_MSG_LEN + 32,
|
|
|
|
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
|
|
|
|
HASH_MSG_LEN + 64,
|
|
|
|
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/*
|
2013-10-25 14:31:03 +08:00
|
|
|
* Get a Job ring from Job Ring driver to ensure in-order
|
2012-06-23 08:48:47 +08:00
|
|
|
* crypto request processing per tfm
|
|
|
|
*/
|
2013-10-25 14:31:03 +08:00
|
|
|
ctx->jrdev = caam_jr_alloc();
|
|
|
|
if (IS_ERR(ctx->jrdev)) {
|
|
|
|
pr_err("Job Ring Device allocation for transform failed\n");
|
|
|
|
return PTR_ERR(ctx->jrdev);
|
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
/* copy descriptor header template value */
|
|
|
|
ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
|
|
|
|
ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
|
|
|
|
|
|
|
|
ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
|
|
|
|
OP_ALG_ALGSEL_SHIFT];
|
|
|
|
|
|
|
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
|
|
sizeof(struct caam_hash_state));
|
|
|
|
|
|
|
|
ret = ahash_set_sh_desc(ahash);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
if (ctx->sh_desc_update_dma &&
|
|
|
|
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
|
|
|
|
dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
|
|
|
|
desc_bytes(ctx->sh_desc_update),
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (ctx->sh_desc_update_first_dma &&
|
|
|
|
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
|
|
|
|
dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
|
|
|
|
desc_bytes(ctx->sh_desc_update_first),
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (ctx->sh_desc_fin_dma &&
|
|
|
|
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
|
|
|
|
dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
|
|
|
|
desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
|
|
|
|
if (ctx->sh_desc_digest_dma &&
|
|
|
|
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
|
|
|
|
dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
|
|
|
|
desc_bytes(ctx->sh_desc_digest),
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (ctx->sh_desc_finup_dma &&
|
|
|
|
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
|
|
|
|
dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
|
|
|
|
desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
|
2013-10-25 14:31:03 +08:00
|
|
|
|
|
|
|
caam_jr_free(ctx->jrdev);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit caam_algapi_hash_exit(void)
|
|
|
|
{
|
|
|
|
struct caam_hash_alg *t_alg, *n;
|
|
|
|
|
2013-10-25 14:31:03 +08:00
|
|
|
if (!hash_list.next)
|
2012-06-23 08:48:47 +08:00
|
|
|
return;
|
|
|
|
|
2013-10-25 14:31:03 +08:00
|
|
|
list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
|
2012-06-23 08:48:47 +08:00
|
|
|
crypto_unregister_ahash(&t_alg->ahash_alg);
|
|
|
|
list_del(&t_alg->entry);
|
|
|
|
kfree(t_alg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct caam_hash_alg *
|
2013-10-25 14:31:03 +08:00
|
|
|
caam_hash_alloc(struct caam_hash_template *template,
|
2012-06-23 08:48:48 +08:00
|
|
|
bool keyed)
|
2012-06-23 08:48:47 +08:00
|
|
|
{
|
|
|
|
struct caam_hash_alg *t_alg;
|
|
|
|
struct ahash_alg *halg;
|
|
|
|
struct crypto_alg *alg;
|
|
|
|
|
2015-08-22 00:52:00 +08:00
|
|
|
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (!t_alg) {
|
2013-10-25 14:31:03 +08:00
|
|
|
pr_err("failed to allocate t_alg\n");
|
2012-06-23 08:48:47 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
t_alg->ahash_alg = template->template_ahash;
|
|
|
|
halg = &t_alg->ahash_alg;
|
|
|
|
alg = &halg->halg.base;
|
|
|
|
|
2012-06-23 08:48:48 +08:00
|
|
|
if (keyed) {
|
|
|
|
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
|
|
|
|
template->hmac_name);
|
|
|
|
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
|
|
|
template->hmac_driver_name);
|
|
|
|
} else {
|
|
|
|
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
|
|
|
|
template->name);
|
|
|
|
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
|
|
|
template->driver_name);
|
2016-08-09 15:27:17 +08:00
|
|
|
t_alg->ahash_alg.setkey = NULL;
|
2012-06-23 08:48:48 +08:00
|
|
|
}
|
2012-06-23 08:48:47 +08:00
|
|
|
alg->cra_module = THIS_MODULE;
|
|
|
|
alg->cra_init = caam_hash_cra_init;
|
|
|
|
alg->cra_exit = caam_hash_cra_exit;
|
|
|
|
alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
|
|
|
|
alg->cra_priority = CAAM_CRA_PRIORITY;
|
|
|
|
alg->cra_blocksize = template->blocksize;
|
|
|
|
alg->cra_alignmask = 0;
|
|
|
|
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
|
|
|
|
alg->cra_type = &crypto_ahash_type;
|
|
|
|
|
|
|
|
t_alg->alg_type = template->alg_type;
|
|
|
|
t_alg->alg_op = template->alg_op;
|
|
|
|
|
|
|
|
return t_alg;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init caam_algapi_hash_init(void)
|
|
|
|
{
|
2014-07-07 13:12:12 +08:00
|
|
|
struct device_node *dev_node;
|
|
|
|
struct platform_device *pdev;
|
|
|
|
struct device *ctrldev;
|
2012-06-23 08:48:47 +08:00
|
|
|
int i = 0, err = 0;
|
2015-08-06 02:28:48 +08:00
|
|
|
struct caam_drv_private *priv;
|
|
|
|
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
|
|
|
u32 cha_inst, cha_vid;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2014-07-07 13:12:12 +08:00
|
|
|
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
|
|
|
if (!dev_node) {
|
|
|
|
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
|
|
|
if (!dev_node)
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
pdev = of_find_device_by_node(dev_node);
|
|
|
|
if (!pdev) {
|
|
|
|
of_node_put(dev_node);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctrldev = &pdev->dev;
|
|
|
|
priv = dev_get_drvdata(ctrldev);
|
|
|
|
of_node_put(dev_node);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If priv is NULL, it's probably because the caam driver wasn't
|
|
|
|
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
|
|
|
*/
|
|
|
|
if (!priv)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2015-08-06 02:28:48 +08:00
|
|
|
/*
|
|
|
|
* Register crypto algorithms the device supports. First, identify
|
|
|
|
* presence and attributes of MD block.
|
|
|
|
*/
|
|
|
|
cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
|
|
|
|
cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Skip registration of any hashing algorithms if MD block
|
|
|
|
* is not present.
|
|
|
|
*/
|
|
|
|
if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/* Limit digest size based on LP256 */
|
|
|
|
if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
|
|
|
|
md_limit = SHA256_DIGEST_SIZE;
|
|
|
|
|
2013-10-25 14:31:03 +08:00
|
|
|
INIT_LIST_HEAD(&hash_list);
|
2012-06-23 08:48:47 +08:00
|
|
|
|
|
|
|
/* register crypto algorithms the device supports */
|
|
|
|
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
|
|
|
|
struct caam_hash_alg *t_alg;
|
2015-08-06 02:28:48 +08:00
|
|
|
struct caam_hash_template *alg = driver_hash + i;
|
|
|
|
|
|
|
|
/* If MD size is not supported by device, skip registration */
|
|
|
|
if (alg->template_ahash.halg.digestsize > md_limit)
|
|
|
|
continue;
|
2012-06-23 08:48:47 +08:00
|
|
|
|
2012-06-23 08:48:48 +08:00
|
|
|
/* register hmac version */
|
2015-08-06 02:28:48 +08:00
|
|
|
t_alg = caam_hash_alloc(alg, true);
|
2012-06-23 08:48:48 +08:00
|
|
|
if (IS_ERR(t_alg)) {
|
|
|
|
err = PTR_ERR(t_alg);
|
2015-08-06 02:28:48 +08:00
|
|
|
pr_warn("%s alg allocation failed\n", alg->driver_name);
|
2012-06-23 08:48:48 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = crypto_register_ahash(&t_alg->ahash_alg);
|
|
|
|
if (err) {
|
2015-10-19 00:51:10 +08:00
|
|
|
pr_warn("%s alg registration failed: %d\n",
|
|
|
|
t_alg->ahash_alg.halg.base.cra_driver_name,
|
|
|
|
err);
|
2012-06-23 08:48:48 +08:00
|
|
|
kfree(t_alg);
|
|
|
|
} else
|
2013-10-25 14:31:03 +08:00
|
|
|
list_add_tail(&t_alg->entry, &hash_list);
|
2012-06-23 08:48:48 +08:00
|
|
|
|
|
|
|
/* register unkeyed version */
|
2015-08-06 02:28:48 +08:00
|
|
|
t_alg = caam_hash_alloc(alg, false);
|
2012-06-23 08:48:47 +08:00
|
|
|
if (IS_ERR(t_alg)) {
|
|
|
|
err = PTR_ERR(t_alg);
|
2015-08-06 02:28:48 +08:00
|
|
|
pr_warn("%s alg allocation failed\n", alg->driver_name);
|
2012-06-23 08:48:47 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = crypto_register_ahash(&t_alg->ahash_alg);
|
|
|
|
if (err) {
|
2015-10-19 00:51:10 +08:00
|
|
|
pr_warn("%s alg registration failed: %d\n",
|
|
|
|
t_alg->ahash_alg.halg.base.cra_driver_name,
|
|
|
|
err);
|
2012-06-23 08:48:47 +08:00
|
|
|
kfree(t_alg);
|
|
|
|
} else
|
2013-10-25 14:31:03 +08:00
|
|
|
list_add_tail(&t_alg->entry, &hash_list);
|
2012-06-23 08:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(caam_algapi_hash_init);
|
|
|
|
module_exit(caam_algapi_hash_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
|
|
|
|
MODULE_AUTHOR("Freescale Semiconductor - NMG");
|