crypto: caam/qi2 - fix DMA mapping of stack memory
Commitsc19650d6ea
("crypto: caam - fix DMA mapping of stack memory") and65055e2108
("crypto: caam - fix hash context DMA unmap size") fixed the ahash implementation in caam/jr driver such that req->result is not DMA-mapped (since it's not guaranteed to be DMA-able). Apply a similar fix for ahash implementation in caam/qi2 driver. Cc: <stable@vger.kernel.org> # v4.20+ Fixes:3f16f6c9d6
("crypto: caam/qi2 - add support for ahash algorithms") Signed-off-by: Horia Geantă <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
07586d3ddf
commit
5965dc7452
|
@ -2894,6 +2894,7 @@ struct caam_hash_state {
|
|||
struct caam_request caam_req;
|
||||
dma_addr_t buf_dma;
|
||||
dma_addr_t ctx_dma;
|
||||
int ctx_dma_len;
|
||||
u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
|
||||
int buflen_0;
|
||||
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
|
||||
|
@ -2967,6 +2968,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev,
|
|||
struct caam_hash_state *state, int ctx_len,
|
||||
struct dpaa2_sg_entry *qm_sg, u32 flag)
|
||||
{
|
||||
state->ctx_dma_len = ctx_len;
|
||||
state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
|
||||
if (dma_mapping_error(dev, state->ctx_dma)) {
|
||||
dev_err(dev, "unable to map ctx\n");
|
||||
|
@ -3205,14 +3207,12 @@ bad_free_key:
|
|||
}
|
||||
|
||||
static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
|
||||
struct ahash_request *req, int dst_len)
|
||||
struct ahash_request *req)
|
||||
{
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
|
||||
if (edesc->src_nents)
|
||||
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
|
||||
if (edesc->dst_dma)
|
||||
dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
|
||||
|
||||
if (edesc->qm_sg_bytes)
|
||||
dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
|
||||
|
@ -3227,18 +3227,15 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
|
|||
|
||||
static inline void ahash_unmap_ctx(struct device *dev,
|
||||
struct ahash_edesc *edesc,
|
||||
struct ahash_request *req, int dst_len,
|
||||
u32 flag)
|
||||
struct ahash_request *req, u32 flag)
|
||||
{
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
|
||||
if (state->ctx_dma) {
|
||||
dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
|
||||
dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
|
||||
state->ctx_dma = 0;
|
||||
}
|
||||
ahash_unmap(dev, edesc, req, dst_len);
|
||||
ahash_unmap(dev, edesc, req);
|
||||
}
|
||||
|
||||
static void ahash_done(void *cbk_ctx, u32 status)
|
||||
|
@ -3259,16 +3256,13 @@ static void ahash_done(void *cbk_ctx, u32 status)
|
|||
ecode = -EIO;
|
||||
}
|
||||
|
||||
ahash_unmap(ctx->dev, edesc, req, digestsize);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
|
||||
memcpy(req->result, state->caam_ctx, digestsize);
|
||||
qi_cache_free(edesc);
|
||||
|
||||
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
if (req->result)
|
||||
print_hex_dump_debug("result@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
|
||||
req->base.complete(&req->base, ecode);
|
||||
}
|
||||
|
@ -3290,7 +3284,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
|
|||
ecode = -EIO;
|
||||
}
|
||||
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
|
||||
switch_buf(state);
|
||||
qi_cache_free(edesc);
|
||||
|
||||
|
@ -3323,16 +3317,13 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
|
|||
ecode = -EIO;
|
||||
}
|
||||
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
|
||||
memcpy(req->result, state->caam_ctx, digestsize);
|
||||
qi_cache_free(edesc);
|
||||
|
||||
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
if (req->result)
|
||||
print_hex_dump_debug("result@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
|
||||
req->base.complete(&req->base, ecode);
|
||||
}
|
||||
|
@ -3354,7 +3345,7 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
|
|||
ecode = -EIO;
|
||||
}
|
||||
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
|
||||
switch_buf(state);
|
||||
qi_cache_free(edesc);
|
||||
|
||||
|
@ -3492,7 +3483,7 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|||
|
||||
return ret;
|
||||
unmap_ctx:
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3524,7 +3515,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||
sg_table = &edesc->sgt[0];
|
||||
|
||||
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
|
||||
DMA_TO_DEVICE);
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
|
@ -3543,22 +3534,13 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||
}
|
||||
edesc->qm_sg_bytes = qm_sg_bytes;
|
||||
|
||||
edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
|
||||
dev_err(ctx->dev, "unable to map dst\n");
|
||||
edesc->dst_dma = 0;
|
||||
ret = -ENOMEM;
|
||||
goto unmap_ctx;
|
||||
}
|
||||
|
||||
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
|
||||
dpaa2_fl_set_final(in_fle, true);
|
||||
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
|
||||
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
|
||||
dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
|
||||
dpaa2_fl_set_addr(out_fle, state->ctx_dma);
|
||||
dpaa2_fl_set_len(out_fle, digestsize);
|
||||
|
||||
req_ctx->flc = &ctx->flc[FINALIZE];
|
||||
|
@ -3573,7 +3555,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||
return ret;
|
||||
|
||||
unmap_ctx:
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3626,7 +3608,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
|||
sg_table = &edesc->sgt[0];
|
||||
|
||||
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
|
||||
DMA_TO_DEVICE);
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
|
@ -3645,22 +3627,13 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
|||
}
|
||||
edesc->qm_sg_bytes = qm_sg_bytes;
|
||||
|
||||
edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
|
||||
dev_err(ctx->dev, "unable to map dst\n");
|
||||
edesc->dst_dma = 0;
|
||||
ret = -ENOMEM;
|
||||
goto unmap_ctx;
|
||||
}
|
||||
|
||||
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
|
||||
dpaa2_fl_set_final(in_fle, true);
|
||||
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
|
||||
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
|
||||
dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
|
||||
dpaa2_fl_set_addr(out_fle, state->ctx_dma);
|
||||
dpaa2_fl_set_len(out_fle, digestsize);
|
||||
|
||||
req_ctx->flc = &ctx->flc[FINALIZE];
|
||||
|
@ -3675,7 +3648,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
|||
return ret;
|
||||
|
||||
unmap_ctx:
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3744,18 +3717,19 @@ static int ahash_digest(struct ahash_request *req)
|
|||
dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
|
||||
}
|
||||
|
||||
edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
|
||||
state->ctx_dma_len = digestsize;
|
||||
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
|
||||
dev_err(ctx->dev, "unable to map dst\n");
|
||||
edesc->dst_dma = 0;
|
||||
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
|
||||
dev_err(ctx->dev, "unable to map ctx\n");
|
||||
state->ctx_dma = 0;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
dpaa2_fl_set_final(in_fle, true);
|
||||
dpaa2_fl_set_len(in_fle, req->nbytes);
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
|
||||
dpaa2_fl_set_addr(out_fle, state->ctx_dma);
|
||||
dpaa2_fl_set_len(out_fle, digestsize);
|
||||
|
||||
req_ctx->flc = &ctx->flc[DIGEST];
|
||||
|
@ -3769,7 +3743,7 @@ static int ahash_digest(struct ahash_request *req)
|
|||
return ret;
|
||||
|
||||
unmap:
|
||||
ahash_unmap(ctx->dev, edesc, req, digestsize);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3804,11 +3778,12 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
|||
}
|
||||
}
|
||||
|
||||
edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
|
||||
state->ctx_dma_len = digestsize;
|
||||
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
|
||||
dev_err(ctx->dev, "unable to map dst\n");
|
||||
edesc->dst_dma = 0;
|
||||
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
|
||||
dev_err(ctx->dev, "unable to map ctx\n");
|
||||
state->ctx_dma = 0;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
|
@ -3826,7 +3801,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
|||
dpaa2_fl_set_len(in_fle, buflen);
|
||||
}
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
|
||||
dpaa2_fl_set_addr(out_fle, state->ctx_dma);
|
||||
dpaa2_fl_set_len(out_fle, digestsize);
|
||||
|
||||
req_ctx->flc = &ctx->flc[DIGEST];
|
||||
|
@ -3841,7 +3816,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
|||
return ret;
|
||||
|
||||
unmap:
|
||||
ahash_unmap(ctx->dev, edesc, req, digestsize);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3921,6 +3896,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|||
}
|
||||
edesc->qm_sg_bytes = qm_sg_bytes;
|
||||
|
||||
state->ctx_dma_len = ctx->ctx_len;
|
||||
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
|
||||
ctx->ctx_len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
|
||||
|
@ -3969,7 +3945,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|||
|
||||
return ret;
|
||||
unmap_ctx:
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
|
@ -4034,11 +4010,12 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
|||
}
|
||||
edesc->qm_sg_bytes = qm_sg_bytes;
|
||||
|
||||
edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
|
||||
state->ctx_dma_len = digestsize;
|
||||
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
|
||||
dev_err(ctx->dev, "unable to map dst\n");
|
||||
edesc->dst_dma = 0;
|
||||
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
|
||||
dev_err(ctx->dev, "unable to map ctx\n");
|
||||
state->ctx_dma = 0;
|
||||
ret = -ENOMEM;
|
||||
goto unmap;
|
||||
}
|
||||
|
@ -4049,7 +4026,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
|||
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
|
||||
dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
|
||||
dpaa2_fl_set_addr(out_fle, state->ctx_dma);
|
||||
dpaa2_fl_set_len(out_fle, digestsize);
|
||||
|
||||
req_ctx->flc = &ctx->flc[DIGEST];
|
||||
|
@ -4064,7 +4041,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
|||
|
||||
return ret;
|
||||
unmap:
|
||||
ahash_unmap(ctx->dev, edesc, req, digestsize);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
|
||||
qi_cache_free(edesc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -4151,6 +4128,7 @@ static int ahash_update_first(struct ahash_request *req)
|
|||
scatterwalk_map_and_copy(next_buf, req->src, to_hash,
|
||||
*next_buflen, 0);
|
||||
|
||||
state->ctx_dma_len = ctx->ctx_len;
|
||||
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
|
||||
ctx->ctx_len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
|
||||
|
@ -4194,7 +4172,7 @@ static int ahash_update_first(struct ahash_request *req)
|
|||
|
||||
return ret;
|
||||
unmap_ctx:
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
|
@ -4213,6 +4191,7 @@ static int ahash_init(struct ahash_request *req)
|
|||
state->final = ahash_final_no_ctx;
|
||||
|
||||
state->ctx_dma = 0;
|
||||
state->ctx_dma_len = 0;
|
||||
state->current_buf = 0;
|
||||
state->buf_dma = 0;
|
||||
state->buflen_0 = 0;
|
||||
|
|
|
@ -162,14 +162,12 @@ struct skcipher_edesc {
|
|||
|
||||
/*
|
||||
* ahash_edesc - s/w-extended ahash descriptor
|
||||
* @dst_dma: I/O virtual address of req->result
|
||||
* @qm_sg_dma: I/O virtual address of h/w link table
|
||||
* @src_nents: number of segments in input scatterlist
|
||||
* @qm_sg_bytes: length of dma mapped qm_sg space
|
||||
* @sgt: pointer to h/w link table
|
||||
*/
|
||||
struct ahash_edesc {
|
||||
dma_addr_t dst_dma;
|
||||
dma_addr_t qm_sg_dma;
|
||||
int src_nents;
|
||||
int qm_sg_bytes;
|
||||
|
|
Loading…
Reference in New Issue