crypto: inside-secure - keep the requests push/pop synced

This patch updates the Inside Secure SafeXcel driver to avoid being
out-of-sync between the number of requests sent and the one being
completed.

The number of requests acknowledged by the driver can be different than
the threshold that was configured if new requests were being pushed to
the h/w in the meantime. The driver wasn't taking those into account,
and the number of remaining requests to handled (to reconfigure the
interrupt threshold) could be out-of sync.

This patch fixes it by not taking in account the number of requests
left, but by taking in account the total number of requests being sent
to the hardware, so that new requests are being taken into account.

Fixes: dc7e28a328 ("crypto: inside-secure - dequeue all requests at once")
Suggested-by: Ofer Heifetz <oferh@marvell.com>
Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Antoine Tenart 2018-02-13 09:26:56 +01:00 committed by Herbert Xu
parent b7007dbccd
commit f7268c538b
2 changed files with 15 additions and 19 deletions

View File

@ -432,20 +432,18 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
}
/* Called with ring's lock taken */
static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
int ring, int reqs)
static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
int ring)
{
int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);
int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
if (!coal)
return 0;
return;
/* Configure when we want an interrupt */
writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
return coal;
}
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
@ -521,13 +519,13 @@ finalize:
spin_lock_bh(&priv->ring[ring].egress_lock);
priv->ring[ring].requests += nreq;
if (!priv->ring[ring].busy) {
nreq -= safexcel_try_push_requests(priv, ring, nreq);
safexcel_try_push_requests(priv, ring);
priv->ring[ring].busy = true;
}
priv->ring[ring].requests_left += nreq;
spin_unlock_bh(&priv->ring[ring].egress_lock);
/* let the RDR know we have pending descriptors */
@ -631,7 +629,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
{
struct safexcel_request *sreq;
struct safexcel_context *ctx;
int ret, i, nreq, ndesc, tot_descs, done;
int ret, i, nreq, ndesc, tot_descs, handled = 0;
bool should_complete;
handle_results:
@ -667,6 +665,7 @@ handle_results:
kfree(sreq);
tot_descs += ndesc;
handled++;
}
acknowledge:
@ -685,11 +684,10 @@ acknowledge:
requests_left:
spin_lock_bh(&priv->ring[ring].egress_lock);
done = safexcel_try_push_requests(priv, ring,
priv->ring[ring].requests_left);
priv->ring[ring].requests -= handled;
safexcel_try_push_requests(priv, ring);
priv->ring[ring].requests_left -= done;
if (!done && !priv->ring[ring].requests_left)
if (!priv->ring[ring].requests)
priv->ring[ring].busy = false;
spin_unlock_bh(&priv->ring[ring].egress_lock);
@ -970,7 +968,7 @@ static int safexcel_probe(struct platform_device *pdev)
goto err_clk;
}
priv->ring[i].requests_left = 0;
priv->ring[i].requests = 0;
priv->ring[i].busy = false;
crypto_init_queue(&priv->ring[i].queue,

View File

@ -551,10 +551,8 @@ struct safexcel_crypto_priv {
struct crypto_queue queue;
spinlock_t queue_lock;
/* Number of requests in the engine that needs the threshold
* interrupt to be set up.
*/
int requests_left;
/* Number of requests in the engine. */
int requests;
/* The ring is currently handling at least one request */
bool busy;