crypto: ccree - don't map AEAD key and IV on stack
The AEAD authenc key and IVs might be passed to us on stack. Copy it to a slab buffer before mapping to gurantee proper DMA mapping. Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Cc: stable@vger.kernel.org # v4.19+ Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
874e163759
commit
e8662a6a5f
|
@ -424,7 +424,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
|
|||
/* This function prepers the user key so it can pass to the hmac processing
|
||||
* (copy to intenral buffer or hash in case of key longer than block
|
||||
*/
|
||||
static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
|
||||
static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
|
||||
unsigned int keylen)
|
||||
{
|
||||
dma_addr_t key_dma_addr = 0;
|
||||
|
@ -437,6 +437,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
|
|||
unsigned int hashmode;
|
||||
unsigned int idx = 0;
|
||||
int rc = 0;
|
||||
u8 *key = NULL;
|
||||
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
|
||||
dma_addr_t padded_authkey_dma_addr =
|
||||
ctx->auth_state.hmac.padded_authkey_dma_addr;
|
||||
|
@ -455,11 +456,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
|
|||
}
|
||||
|
||||
if (keylen != 0) {
|
||||
|
||||
key = kmemdup(authkey, keylen, GFP_KERNEL);
|
||||
if (!key)
|
||||
return -ENOMEM;
|
||||
|
||||
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, key_dma_addr)) {
|
||||
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
|
||||
key, keylen);
|
||||
kzfree(key);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (keylen > blocksize) {
|
||||
|
@ -542,6 +549,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
|
|||
if (key_dma_addr)
|
||||
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
|
||||
|
||||
kzfree(key);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -557,6 +557,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
|
|||
if (areq_ctx->gen_ctx.iv_dma_addr) {
|
||||
dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
|
||||
hw_iv_size, DMA_BIDIRECTIONAL);
|
||||
kzfree(areq_ctx->gen_ctx.iv);
|
||||
}
|
||||
|
||||
/* Release pool */
|
||||
|
@ -607,19 +608,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
|
|||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
gfp_t flags = cc_gfp_flags(&req->base);
|
||||
int rc = 0;
|
||||
|
||||
if (!req->iv) {
|
||||
areq_ctx->gen_ctx.iv_dma_addr = 0;
|
||||
areq_ctx->gen_ctx.iv = NULL;
|
||||
goto chain_iv_exit;
|
||||
}
|
||||
|
||||
areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
|
||||
hw_iv_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
|
||||
if (!areq_ctx->gen_ctx.iv)
|
||||
return -ENOMEM;
|
||||
|
||||
areq_ctx->gen_ctx.iv_dma_addr =
|
||||
dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
|
||||
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
|
||||
hw_iv_size, req->iv);
|
||||
kzfree(areq_ctx->gen_ctx.iv);
|
||||
areq_ctx->gen_ctx.iv = NULL;
|
||||
rc = -ENOMEM;
|
||||
goto chain_iv_exit;
|
||||
}
|
||||
|
|
|
@ -199,6 +199,7 @@ struct cc_alg_template {
|
|||
|
||||
struct async_gen_req_ctx {
|
||||
dma_addr_t iv_dma_addr;
|
||||
u8 *iv;
|
||||
enum drv_crypto_direction op_type;
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue