aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/inside-secure/safexcel.c
diff options
context:
space:
mode:
authorAntoine Ténart <antoine.tenart@free-electrons.com>2017-12-14 15:26:57 +0100
committerHerbert Xu <herbert@gondor.apana.org.au>2017-12-22 20:03:35 +1100
commit8732b298fa1cd266d8962d45109ba5cfa4212f37 (patch)
tree9ef229cefbd9dcc40e3485631fde604f455a1c18 /drivers/crypto/inside-secure/safexcel.c
parentcrypto: inside-secure - handle more result requests when counter is full (diff)
downloadlinux-dev-8732b298fa1cd266d8962d45109ba5cfa4212f37.tar.xz
linux-dev-8732b298fa1cd266d8962d45109ba5cfa4212f37.zip
crypto: inside-secure - retry to proceed the request later on fail
The dequeueing function was putting back a request in the crypto queue on failure (when not enough resources are available) which is not perfect as the request will be handled much later. This patch updates this logic by keeping a reference on the failed request to try proceeding it later when enough resources are available. Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/inside-secure/safexcel.c')
-rw-r--r--drivers/crypto/inside-secure/safexcel.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 8042922b4ed8..4c7f205d83f0 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -446,29 +446,36 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
+ /* If a request wasn't properly dequeued because of a lack of resources,
+ * proceeded it first,
+ */
+ req = priv->ring[ring].req;
+ backlog = priv->ring[ring].backlog;
+ if (req)
+ goto handle_req;
+
while (true) {
spin_lock_bh(&priv->ring[ring].queue_lock);
backlog = crypto_get_backlog(&priv->ring[ring].queue);
req = crypto_dequeue_request(&priv->ring[ring].queue);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!req)
+ if (!req) {
+ priv->ring[ring].req = NULL;
+ priv->ring[ring].backlog = NULL;
goto finalize;
+ }
+handle_req:
request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
- if (!request) {
- spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, req);
- spin_unlock_bh(&priv->ring[ring].queue_lock);
- goto finalize;
- }
+ if (!request)
+ goto request_failed;
ctx = crypto_tfm_ctx(req->tfm);
ret = ctx->send(req, ring, request, &commands, &results);
if (ret) {
kfree(request);
- req->complete(req, ret);
- goto finalize;
+ goto request_failed;
}
if (backlog)
@@ -483,6 +490,13 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
nreq++;
}
+request_failed:
+ /* Not enough resources to handle all the requests. Bail out and save
+ * the request and the backlog for the next dequeue call (per-ring).
+ */
+ priv->ring[ring].req = req;
+ priv->ring[ring].backlog = backlog;
+
finalize:
if (!nreq)
return;