提交 8732b298 编写于 作者: A Antoine Ténart 提交者: Herbert Xu

crypto: inside-secure - retry to proceed the request later on fail

The dequeueing function was putting back a request in the crypto queue
on failure (when not enough resources are available) which is not
perfect as the request will be handled much later. This patch updates
this logic by keeping a reference on the failed request to try
proceeding it later when enough resources are available.
Signed-off-by: NAntoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: NHerbert Xu <herbert@gondor.apana.org.au>
上级 7f77f5a4
...@@ -446,29 +446,36 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -446,29 +446,36 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
struct safexcel_request *request; struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
/* If a request wasn't properly dequeued because of a lack of resources,
* proceeded it first,
*/
req = priv->ring[ring].req;
backlog = priv->ring[ring].backlog;
if (req)
goto handle_req;
while (true) { while (true) {
spin_lock_bh(&priv->ring[ring].queue_lock); spin_lock_bh(&priv->ring[ring].queue_lock);
backlog = crypto_get_backlog(&priv->ring[ring].queue); backlog = crypto_get_backlog(&priv->ring[ring].queue);
req = crypto_dequeue_request(&priv->ring[ring].queue); req = crypto_dequeue_request(&priv->ring[ring].queue);
spin_unlock_bh(&priv->ring[ring].queue_lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!req) if (!req) {
priv->ring[ring].req = NULL;
priv->ring[ring].backlog = NULL;
goto finalize; goto finalize;
}
handle_req:
request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req)); request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
if (!request) { if (!request)
spin_lock_bh(&priv->ring[ring].queue_lock); goto request_failed;
crypto_enqueue_request(&priv->ring[ring].queue, req);
spin_unlock_bh(&priv->ring[ring].queue_lock);
goto finalize;
}
ctx = crypto_tfm_ctx(req->tfm); ctx = crypto_tfm_ctx(req->tfm);
ret = ctx->send(req, ring, request, &commands, &results); ret = ctx->send(req, ring, request, &commands, &results);
if (ret) { if (ret) {
kfree(request); kfree(request);
req->complete(req, ret); goto request_failed;
goto finalize;
} }
if (backlog) if (backlog)
...@@ -483,6 +490,13 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -483,6 +490,13 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
nreq++; nreq++;
} }
request_failed:
/* Not enough resources to handle all the requests. Bail out and save
* the request and the backlog for the next dequeue call (per-ring).
*/
priv->ring[ring].req = req;
priv->ring[ring].backlog = backlog;
finalize: finalize:
if (!nreq) if (!nreq)
return; return;
......
...@@ -499,6 +499,12 @@ struct safexcel_crypto_priv { ...@@ -499,6 +499,12 @@ struct safexcel_crypto_priv {
/* The ring is currently handling at least one request */ /* The ring is currently handling at least one request */
bool busy; bool busy;
/* Store for current requests when bailing out of the dequeueing
* function when no enough resources are available.
*/
struct crypto_async_request *req;
struct crypto_async_request *backlog;
} ring[EIP197_MAX_RINGS]; } ring[EIP197_MAX_RINGS];
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册