crypto: sun4i-ss - do not allocate backup IV on requests
Instead of allocate memory on each requests, it is easier to pre-allocate buffer for backup IV. This made error path easier. Signed-off-by: Corentin Labbe <clabbe@baylibre.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
22f7c2f8cf
commit
22d03a0aad
|
@ -20,7 +20,6 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
|
|||
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
|
||||
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
|
||||
u32 mode = ctx->mode;
|
||||
void *backup_iv = NULL;
|
||||
/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
|
||||
u32 rx_cnt = SS_RX_DEFAULT;
|
||||
u32 tx_cnt = 0;
|
||||
|
@ -48,10 +47,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
|
|||
}
|
||||
|
||||
if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
|
||||
backup_iv = kzalloc(ivsize, GFP_KERNEL);
|
||||
if (!backup_iv)
|
||||
return -ENOMEM;
|
||||
scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
|
||||
scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
|
||||
areq->cryptlen - ivsize, ivsize, 0);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
|
||||
|
@ -134,8 +131,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
|
|||
|
||||
if (areq->iv) {
|
||||
if (mode & SS_DECRYPTION) {
|
||||
memcpy(areq->iv, backup_iv, ivsize);
|
||||
kfree_sensitive(backup_iv);
|
||||
memcpy(areq->iv, ctx->backup_iv, ivsize);
|
||||
memzero_explicit(ctx->backup_iv, ivsize);
|
||||
} else {
|
||||
scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
|
||||
ivsize, 0);
|
||||
|
@ -199,7 +196,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
|||
unsigned int ileft = areq->cryptlen;
|
||||
unsigned int oleft = areq->cryptlen;
|
||||
unsigned int todo;
|
||||
void *backup_iv = NULL;
|
||||
struct sg_mapping_iter mi, mo;
|
||||
unsigned long pi = 0, po = 0; /* progress for in and out */
|
||||
bool miter_err;
|
||||
|
@ -244,10 +240,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
|||
return sun4i_ss_cipher_poll_fallback(areq);
|
||||
|
||||
if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
|
||||
backup_iv = kzalloc(ivsize, GFP_KERNEL);
|
||||
if (!backup_iv)
|
||||
return -ENOMEM;
|
||||
scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
|
||||
scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
|
||||
areq->cryptlen - ivsize, ivsize, 0);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
|
||||
|
@ -384,8 +378,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
|||
}
|
||||
if (areq->iv) {
|
||||
if (mode & SS_DECRYPTION) {
|
||||
memcpy(areq->iv, backup_iv, ivsize);
|
||||
kfree_sensitive(backup_iv);
|
||||
memcpy(areq->iv, ctx->backup_iv, ivsize);
|
||||
memzero_explicit(ctx->backup_iv, ivsize);
|
||||
} else {
|
||||
scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
|
||||
ivsize, 0);
|
||||
|
|
|
@ -183,6 +183,7 @@ struct sun4i_tfm_ctx {
|
|||
|
||||
struct sun4i_cipher_req_ctx {
|
||||
u32 mode;
|
||||
u8 backup_iv[AES_BLOCK_SIZE];
|
||||
struct skcipher_request fallback_req; // keep at the end
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue