crypto: marvell - properly handle CRYPTO_TFM_REQ_MAY_BACKLOG-flagged requests

The mv_cesa_queue_req() function calls crypto_enqueue_request() to
enqueue a request. In the normal case (i.e the queue isn't full), this
function returns -EINPROGRESS. The current Marvell CESA crypto driver
takes this into account and cleans up the request only if an error
occured, i.e if the return value is not -EINPROGRESS.

Unfortunately this causes problems with
CRYPTO_TFM_REQ_MAY_BACKLOG-flagged requests. When such a request is
passed to crypto_enqueue_request() and the queue is full,
crypto_enqueue_request() will return -EBUSY, but will keep the request
enqueued nonetheless. This situation was not properly handled by the
Marvell CESA driver, which was anyway cleaning up the request in such
a situation. When later on the request was taken out of the backlog
and actually processed, a kernel crash occured due to the internal
driver data structures for this structure having been cleaned up.

To avoid this situation, this commit adds a
mv_cesa_req_needs_cleanup() helper function which indicates if the
request needs to be cleaned up or not after a call to
crypto_enqueue_request(). This helper allows to do the cleanup only in
the appropriate cases, and all call sites of mv_cesa_queue_req() are
fixed to use this new helper function.

Reported-by: Vincent Donnefort <vdonnefort@gmail.com>
Fixes: db509a4533 ("crypto: marvell/cesa - add TDMA support")
Cc: <stable@vger.kernel.org> # v4.2+
Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Tested-by: Vincent Donnefort <vdonnefort@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Thomas Petazzoni 2015-09-18 17:25:36 +02:00 committed by Herbert Xu
parent 84cba178a3
commit cfcd2271a9
3 changed files with 33 additions and 9 deletions

View File

@ -687,6 +687,33 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine)
int mv_cesa_queue_req(struct crypto_async_request *req);
/*
* Helper function that indicates whether a crypto request needs to be
* cleaned up or not after being enqueued using mv_cesa_queue_req().
*/
static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
int ret)
{
/*
* The queue still had some space, the request was queued
* normally, so there's no need to clean it up.
*/
if (ret == -EINPROGRESS)
return false;
/*
* The queue had not space left, but since the request is
* flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to
* the backlog and will be processed later. There's no need to
* clean it up.
*/
if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
return false;
/* Request wasn't queued, we need to clean it up */
return true;
}
/* TDMA functions */
static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,

View File

@ -189,7 +189,6 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
{
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
creq->req.base.engine = engine;
if (creq->req.base.type == CESA_DMA_REQ)
@ -431,7 +430,7 @@ static int mv_cesa_des_op(struct ablkcipher_request *req,
return ret;
ret = mv_cesa_queue_req(&req->base);
if (ret && ret != -EINPROGRESS)
if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ablkcipher_cleanup(req);
return ret;
@ -551,7 +550,7 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
return ret;
ret = mv_cesa_queue_req(&req->base);
if (ret && ret != -EINPROGRESS)
if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ablkcipher_cleanup(req);
return ret;
@ -693,7 +692,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
return ret;
ret = mv_cesa_queue_req(&req->base);
if (ret && ret != -EINPROGRESS)
if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ablkcipher_cleanup(req);
return ret;

View File

@ -739,10 +739,8 @@ static int mv_cesa_ahash_update(struct ahash_request *req)
return 0;
ret = mv_cesa_queue_req(&req->base);
if (ret && ret != -EINPROGRESS) {
if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ahash_cleanup(req);
return ret;
}
return ret;
}
@ -766,7 +764,7 @@ static int mv_cesa_ahash_final(struct ahash_request *req)
return 0;
ret = mv_cesa_queue_req(&req->base);
if (ret && ret != -EINPROGRESS)
if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ahash_cleanup(req);
return ret;
@ -791,7 +789,7 @@ static int mv_cesa_ahash_finup(struct ahash_request *req)
return 0;
ret = mv_cesa_queue_req(&req->base);
if (ret && ret != -EINPROGRESS)
if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ahash_cleanup(req);
return ret;