crypto: inside-secure - keep the requests push/pop synced
This patch updates the Inside Secure SafeXcel driver to avoid being
out-of-sync between the number of requests sent and the one being
completed.
The number of requests acknowledged by the driver can be different than
the threshold that was configured if new requests were being pushed to
the h/w in the meantime. The driver wasn't taking those into account,
and the number of remaining requests to handled (to reconfigure the
interrupt threshold) could be out-of sync.
This patch fixes it by not taking in account the number of requests
left, but by taking in account the total number of requests being sent
to the hardware, so that new requests are being taken into account.
Fixes: dc7e28a328
("crypto: inside-secure - dequeue all requests at once")
Suggested-by: Ofer Heifetz <oferh@marvell.com>
Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
b7007dbccd
commit
f7268c538b
|
@ -432,20 +432,18 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called with ring's lock taken */
|
/* Called with ring's lock taken */
|
||||||
static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
|
static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
|
||||||
int ring, int reqs)
|
int ring)
|
||||||
{
|
{
|
||||||
int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);
|
int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
|
||||||
|
|
||||||
if (!coal)
|
if (!coal)
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
/* Configure when we want an interrupt */
|
/* Configure when we want an interrupt */
|
||||||
writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
|
writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
|
||||||
EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
|
EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
|
||||||
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
|
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
|
||||||
|
|
||||||
return coal;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
|
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
|
||||||
|
@ -521,13 +519,13 @@ finalize:
|
||||||
|
|
||||||
spin_lock_bh(&priv->ring[ring].egress_lock);
|
spin_lock_bh(&priv->ring[ring].egress_lock);
|
||||||
|
|
||||||
|
priv->ring[ring].requests += nreq;
|
||||||
|
|
||||||
if (!priv->ring[ring].busy) {
|
if (!priv->ring[ring].busy) {
|
||||||
nreq -= safexcel_try_push_requests(priv, ring, nreq);
|
safexcel_try_push_requests(priv, ring);
|
||||||
priv->ring[ring].busy = true;
|
priv->ring[ring].busy = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->ring[ring].requests_left += nreq;
|
|
||||||
|
|
||||||
spin_unlock_bh(&priv->ring[ring].egress_lock);
|
spin_unlock_bh(&priv->ring[ring].egress_lock);
|
||||||
|
|
||||||
/* let the RDR know we have pending descriptors */
|
/* let the RDR know we have pending descriptors */
|
||||||
|
@ -631,7 +629,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
|
||||||
{
|
{
|
||||||
struct safexcel_request *sreq;
|
struct safexcel_request *sreq;
|
||||||
struct safexcel_context *ctx;
|
struct safexcel_context *ctx;
|
||||||
int ret, i, nreq, ndesc, tot_descs, done;
|
int ret, i, nreq, ndesc, tot_descs, handled = 0;
|
||||||
bool should_complete;
|
bool should_complete;
|
||||||
|
|
||||||
handle_results:
|
handle_results:
|
||||||
|
@ -667,6 +665,7 @@ handle_results:
|
||||||
|
|
||||||
kfree(sreq);
|
kfree(sreq);
|
||||||
tot_descs += ndesc;
|
tot_descs += ndesc;
|
||||||
|
handled++;
|
||||||
}
|
}
|
||||||
|
|
||||||
acknowledge:
|
acknowledge:
|
||||||
|
@ -685,11 +684,10 @@ acknowledge:
|
||||||
requests_left:
|
requests_left:
|
||||||
spin_lock_bh(&priv->ring[ring].egress_lock);
|
spin_lock_bh(&priv->ring[ring].egress_lock);
|
||||||
|
|
||||||
done = safexcel_try_push_requests(priv, ring,
|
priv->ring[ring].requests -= handled;
|
||||||
priv->ring[ring].requests_left);
|
safexcel_try_push_requests(priv, ring);
|
||||||
|
|
||||||
priv->ring[ring].requests_left -= done;
|
if (!priv->ring[ring].requests)
|
||||||
if (!done && !priv->ring[ring].requests_left)
|
|
||||||
priv->ring[ring].busy = false;
|
priv->ring[ring].busy = false;
|
||||||
|
|
||||||
spin_unlock_bh(&priv->ring[ring].egress_lock);
|
spin_unlock_bh(&priv->ring[ring].egress_lock);
|
||||||
|
@ -970,7 +968,7 @@ static int safexcel_probe(struct platform_device *pdev)
|
||||||
goto err_clk;
|
goto err_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->ring[i].requests_left = 0;
|
priv->ring[i].requests = 0;
|
||||||
priv->ring[i].busy = false;
|
priv->ring[i].busy = false;
|
||||||
|
|
||||||
crypto_init_queue(&priv->ring[i].queue,
|
crypto_init_queue(&priv->ring[i].queue,
|
||||||
|
|
|
@ -551,10 +551,8 @@ struct safexcel_crypto_priv {
|
||||||
struct crypto_queue queue;
|
struct crypto_queue queue;
|
||||||
spinlock_t queue_lock;
|
spinlock_t queue_lock;
|
||||||
|
|
||||||
/* Number of requests in the engine that needs the threshold
|
/* Number of requests in the engine. */
|
||||||
* interrupt to be set up.
|
int requests;
|
||||||
*/
|
|
||||||
int requests_left;
|
|
||||||
|
|
||||||
/* The ring is currently handling at least one request */
|
/* The ring is currently handling at least one request */
|
||||||
bool busy;
|
bool busy;
|
||||||
|
|
Loading…
Reference in New Issue