Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: "This fixes the following issues: - Fix pointer size when caam is used with AArch64 boot loader on AArch32 kernel. - Fix ahash state corruption in marvell driver. - Fix buggy algif_aed tag handling. - Prevent mcryptd from being used with incompatible algorithms which can cause crashes" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: algif_aead - fix uninitialized variable warning crypto: mcryptd - Check mcryptd algorithm compatibility crypto: algif_aead - fix AEAD tag memory handling crypto: caam - fix pointer size for AArch64 boot loader, AArch32 kernel crypto: marvell - Don't corrupt state of an STD req for re-stepped ahash crypto: marvell - Don't copy hash operation twice into the SRAM
This commit is contained in:
commit
045169816b
|
@ -81,7 +81,11 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx)
|
|||
{
|
||||
unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
|
||||
|
||||
return ctx->used >= ctx->aead_assoclen + as;
|
||||
/*
|
||||
* The minimum amount of memory needed for an AEAD cipher is
|
||||
* the AAD and in case of decryption the tag.
|
||||
*/
|
||||
return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
|
||||
}
|
||||
|
||||
static void aead_reset_ctx(struct aead_ctx *ctx)
|
||||
|
@ -416,7 +420,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
|||
unsigned int i, reqlen = GET_REQ_SIZE(tfm);
|
||||
int err = -ENOMEM;
|
||||
unsigned long used;
|
||||
size_t outlen;
|
||||
size_t outlen = 0;
|
||||
size_t usedpages = 0;
|
||||
|
||||
lock_sock(sk);
|
||||
|
@ -426,12 +430,15 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
used = ctx->used;
|
||||
outlen = used;
|
||||
|
||||
if (!aead_sufficient_data(ctx))
|
||||
goto unlock;
|
||||
|
||||
used = ctx->used;
|
||||
if (ctx->enc)
|
||||
outlen = used + as;
|
||||
else
|
||||
outlen = used - as;
|
||||
|
||||
req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
|
||||
if (unlikely(!req))
|
||||
goto unlock;
|
||||
|
@ -445,7 +452,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
|||
aead_request_set_ad(req, ctx->aead_assoclen);
|
||||
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
aead_async_cb, sk);
|
||||
used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
|
||||
used -= ctx->aead_assoclen;
|
||||
|
||||
/* take over all tx sgls from ctx */
|
||||
areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
|
||||
|
@ -461,7 +468,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
|||
areq->tsgls = sgl->cur;
|
||||
|
||||
/* create rx sgls */
|
||||
while (iov_iter_count(&msg->msg_iter)) {
|
||||
while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
|
||||
size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
|
||||
(outlen - usedpages));
|
||||
|
||||
|
@ -491,16 +498,14 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
|||
|
||||
last_rsgl = rsgl;
|
||||
|
||||
/* we do not need more iovecs as we have sufficient memory */
|
||||
if (outlen <= usedpages)
|
||||
break;
|
||||
|
||||
iov_iter_advance(&msg->msg_iter, err);
|
||||
}
|
||||
err = -EINVAL;
|
||||
|
||||
/* ensure output buffer is sufficiently large */
|
||||
if (usedpages < outlen)
|
||||
goto free;
|
||||
if (usedpages < outlen) {
|
||||
err = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
|
||||
areq->iv);
|
||||
|
@ -571,6 +576,7 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
/* data length provided by caller via sendmsg/sendpage */
|
||||
used = ctx->used;
|
||||
|
||||
/*
|
||||
|
@ -585,16 +591,27 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
|
|||
if (!aead_sufficient_data(ctx))
|
||||
goto unlock;
|
||||
|
||||
outlen = used;
|
||||
/*
|
||||
* Calculate the minimum output buffer size holding the result of the
|
||||
* cipher operation. When encrypting data, the receiving buffer is
|
||||
* larger by the tag length compared to the input buffer as the
|
||||
* encryption operation generates the tag. For decryption, the input
|
||||
* buffer provides the tag which is consumed resulting in only the
|
||||
* plaintext without a buffer for the tag returned to the caller.
|
||||
*/
|
||||
if (ctx->enc)
|
||||
outlen = used + as;
|
||||
else
|
||||
outlen = used - as;
|
||||
|
||||
/*
|
||||
* The cipher operation input data is reduced by the associated data
|
||||
* length as this data is processed separately later on.
|
||||
*/
|
||||
used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
|
||||
used -= ctx->aead_assoclen;
|
||||
|
||||
/* convert iovecs of output buffers into scatterlists */
|
||||
while (iov_iter_count(&msg->msg_iter)) {
|
||||
while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
|
||||
size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
|
||||
(outlen - usedpages));
|
||||
|
||||
|
@ -621,16 +638,14 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
|
|||
|
||||
last_rsgl = rsgl;
|
||||
|
||||
/* we do not need more iovecs as we have sufficient memory */
|
||||
if (outlen <= usedpages)
|
||||
break;
|
||||
iov_iter_advance(&msg->msg_iter, err);
|
||||
}
|
||||
|
||||
err = -EINVAL;
|
||||
/* ensure output buffer is sufficiently large */
|
||||
if (usedpages < outlen)
|
||||
if (usedpages < outlen) {
|
||||
err = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
sg_mark_end(sgl->sg + sgl->cur - 1);
|
||||
aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
|
||||
|
|
|
@ -254,18 +254,22 @@ out_free_inst:
|
|||
goto out;
|
||||
}
|
||||
|
||||
static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
|
||||
static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
|
||||
u32 *mask)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return;
|
||||
if ((algt->type & CRYPTO_ALG_INTERNAL))
|
||||
*type |= CRYPTO_ALG_INTERNAL;
|
||||
if ((algt->mask & CRYPTO_ALG_INTERNAL))
|
||||
*mask |= CRYPTO_ALG_INTERNAL;
|
||||
return false;
|
||||
|
||||
*type |= algt->type & CRYPTO_ALG_INTERNAL;
|
||||
*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
|
||||
|
||||
if (*type & *mask & CRYPTO_ALG_INTERNAL)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
|
||||
|
@ -492,7 +496,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
|||
u32 mask = 0;
|
||||
int err;
|
||||
|
||||
mcryptd_check_internal(tb, &type, &mask);
|
||||
if (!mcryptd_check_internal(tb, &type, &mask))
|
||||
return -EINVAL;
|
||||
|
||||
halg = ahash_attr_alg(tb[1], type, mask);
|
||||
if (IS_ERR(halg))
|
||||
|
|
|
@ -558,8 +558,9 @@ static int caam_probe(struct platform_device *pdev)
|
|||
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
|
||||
* long pointers in master configuration register
|
||||
*/
|
||||
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
|
||||
MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
|
||||
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
|
||||
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
|
||||
MCFGR_WDENABLE | MCFGR_LARGE_BURST |
|
||||
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
|
||||
|
||||
/*
|
||||
|
|
|
@ -168,12 +168,11 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
|
|||
mv_cesa_adjust_op(engine, &creq->op_tmpl);
|
||||
memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
|
||||
|
||||
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
|
||||
for (i = 0; i < digsize / 4; i++)
|
||||
writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
|
||||
|
||||
mv_cesa_adjust_op(engine, &creq->op_tmpl);
|
||||
memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
|
||||
if (!sreq->offset) {
|
||||
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
|
||||
for (i = 0; i < digsize / 4; i++)
|
||||
writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
|
||||
}
|
||||
|
||||
if (creq->cache_ptr)
|
||||
memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
|
||||
|
|
Loading…
Reference in New Issue