crypto: caam - refactor ahash_edesc_alloc

Changed parameters for ahash_edesc_alloc function:
- remove flags since they can be computed in
ahash_edesc_alloc, the only place they are needed;
- use ahash_request instead of caam_hash_ctx, to be
able to compute gfp flags.

Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
Reviewed-by: Horia Geanta <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Iuliana Prodan 2020-02-12 19:55:18 +02:00 committed by Herbert Xu
parent c3f7394eb9
commit 2ba1e79831
1 changed files with 22 additions and 40 deletions

View File

@ -661,11 +661,14 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
* Allocate an enhanced descriptor, which contains the hardware descriptor
* and space for hardware scatter table containing sg_num entries.
*/
static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
int sg_num, u32 *sh_desc,
dma_addr_t sh_desc_dma,
gfp_t flags)
dma_addr_t sh_desc_dma)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc;
unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
@ -724,8 +727,6 @@ static int ahash_update_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf;
int *buflen = &state->buflen;
int *next_buflen = &state->next_buflen;
@ -779,8 +780,8 @@ static int ahash_update_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
ctx->sh_desc_update_dma, flags);
edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
ctx->sh_desc_update_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@ -854,8 +855,6 @@ static int ahash_final_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes;
@ -867,8 +866,8 @@ static int ahash_final_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
ctx->sh_desc_fin_dma, flags);
edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
ctx->sh_desc_fin_dma);
if (!edesc)
return -ENOMEM;
@ -920,8 +919,6 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int buflen = state->buflen;
u32 *desc;
int sec4_sg_src_index;
@ -950,9 +947,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
flags);
edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@ -1000,8 +996,6 @@ static int ahash_digest(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
int src_nents, mapped_nents;
@ -1028,9 +1022,8 @@ static int ahash_digest(struct ahash_request *req)
}
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
flags);
edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@ -1077,8 +1070,6 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf;
int buflen = state->buflen;
u32 *desc;
@ -1087,8 +1078,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int ret;
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
ctx->sh_desc_digest_dma, flags);
edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
ctx->sh_desc_digest_dma);
if (!edesc)
return -ENOMEM;
@ -1136,8 +1127,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf;
int *buflen = &state->buflen;
int *next_buflen = &state->next_buflen;
@ -1190,10 +1179,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
edesc = ahash_edesc_alloc(ctx, pad_nents,
edesc = ahash_edesc_alloc(req, pad_nents,
ctx->sh_desc_update_first,
ctx->sh_desc_update_first_dma,
flags);
ctx->sh_desc_update_first_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@ -1261,8 +1249,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
@ -1292,9 +1278,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
flags);
edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@ -1347,8 +1332,6 @@ static int ahash_update_first(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf;
int *buflen = &state->buflen;
int *next_buflen = &state->next_buflen;
@ -1396,11 +1379,10 @@ static int ahash_update_first(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
mapped_nents : 0,
ctx->sh_desc_update_first,
ctx->sh_desc_update_first_dma,
flags);
ctx->sh_desc_update_first_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;