aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/crypto_engine.c
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/crypto_engine.c')
-rw-r--r--crypto/crypto_engine.c200
1 files changed, 168 insertions, 32 deletions
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index eb029ff1e05a..bb8e77077f02 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -9,6 +9,7 @@
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <crypto/engine.h>
#include <uapi/linux/sched/types.h>
#include "internal.h"
@@ -22,32 +23,37 @@
* @err: error number
*/
static void crypto_finalize_request(struct crypto_engine *engine,
- struct crypto_async_request *req, int err)
+ struct crypto_async_request *req, int err)
{
unsigned long flags;
- bool finalize_cur_req = false;
+ bool finalize_req = false;
int ret;
struct crypto_engine_ctx *enginectx;
- spin_lock_irqsave(&engine->queue_lock, flags);
- if (engine->cur_req == req)
- finalize_cur_req = true;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
+ /*
+ * If hardware cannot enqueue more requests
+ * and retry mechanism is not supported
+ * make sure we are completing the current request
+ */
+ if (!engine->retry_support) {
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ if (engine->cur_req == req) {
+ finalize_req = true;
+ engine->cur_req = NULL;
+ }
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ }
- if (finalize_cur_req) {
+ if (finalize_req || engine->retry_support) {
enginectx = crypto_tfm_ctx(req->tfm);
- if (engine->cur_req_prepared &&
+ if (enginectx->op.prepare_request &&
enginectx->op.unprepare_request) {
ret = enginectx->op.unprepare_request(engine, req);
if (ret)
dev_err(engine->dev, "failed to unprepare request\n");
}
- spin_lock_irqsave(&engine->queue_lock, flags);
- engine->cur_req = NULL;
- engine->cur_req_prepared = false;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
}
-
+ lockdep_assert_in_softirq();
req->complete(req, err);
kthread_queue_work(engine->kworker, &engine->pump_requests);
@@ -74,7 +80,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
spin_lock_irqsave(&engine->queue_lock, flags);
/* Make sure we are not already running a request */
- if (engine->cur_req)
+ if (!engine->retry_support && engine->cur_req)
goto out;
/* If another context is idling then defer */
@@ -108,13 +114,21 @@ static void crypto_pump_requests(struct crypto_engine *engine,
goto out;
}
+start_request:
/* Get the fist request from the engine queue to handle */
backlog = crypto_get_backlog(&engine->queue);
async_req = crypto_dequeue_request(&engine->queue);
if (!async_req)
goto out;
- engine->cur_req = async_req;
+ /*
+ * If hardware doesn't support the retry mechanism,
+ * keep track of the request we are processing now.
+ * We'll need it on completion (crypto_finalize_request).
+ */
+ if (!engine->retry_support)
+ engine->cur_req = async_req;
+
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -130,7 +144,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
ret = engine->prepare_crypt_hardware(engine);
if (ret) {
dev_err(engine->dev, "failed to prepare crypt hardware\n");
- goto req_err;
+ goto req_err_2;
}
}
@@ -141,28 +155,90 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (ret) {
dev_err(engine->dev, "failed to prepare request: %d\n",
ret);
- goto req_err;
+ goto req_err_2;
}
- engine->cur_req_prepared = true;
}
if (!enginectx->op.do_one_request) {
dev_err(engine->dev, "failed to do request\n");
ret = -EINVAL;
- goto req_err;
+ goto req_err_1;
}
+
ret = enginectx->op.do_one_request(engine, async_req);
- if (ret) {
- dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
- goto req_err;
+
+ /* Request unsuccessfully executed by hardware */
+ if (ret < 0) {
+ /*
+ * If hardware queue is full (-ENOSPC), requeue request
+ * regardless of backlog flag.
+ * Otherwise, unprepare and complete the request.
+ */
+ if (!engine->retry_support ||
+ (ret != -ENOSPC)) {
+ dev_err(engine->dev,
+ "Failed to do one request from queue: %d\n",
+ ret);
+ goto req_err_1;
+ }
+ /*
+ * If retry mechanism is supported,
+ * unprepare current request and
+ * enqueue it back into crypto-engine queue.
+ */
+ if (enginectx->op.unprepare_request) {
+ ret = enginectx->op.unprepare_request(engine,
+ async_req);
+ if (ret)
+ dev_err(engine->dev,
+ "failed to unprepare request\n");
+ }
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ /*
+ * If hardware was unable to execute request, enqueue it
+ * back in front of crypto-engine queue, to keep the order
+ * of requests.
+ */
+ crypto_enqueue_request_head(&engine->queue, async_req);
+
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
+ goto out;
}
- return;
-req_err:
- crypto_finalize_request(engine, async_req, ret);
+ goto retry;
+
+req_err_1:
+ if (enginectx->op.unprepare_request) {
+ ret = enginectx->op.unprepare_request(engine, async_req);
+ if (ret)
+ dev_err(engine->dev, "failed to unprepare request\n");
+ }
+
+req_err_2:
+ async_req->complete(async_req, ret);
+
+retry:
+ /* If retry mechanism is supported, send new requests to engine */
+ if (engine->retry_support) {
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ goto start_request;
+ }
return;
out:
spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+ /*
+ * Batch requests is possible only if
+ * hardware can enqueue multiple requests
+ */
+ if (engine->do_batch_requests) {
+ ret = engine->do_batch_requests(engine);
+ if (ret)
+ dev_err(engine->dev, "failed to do batch requests: %d\n",
+ ret);
+ }
+
+ return;
}
static void crypto_pump_work(struct kthread_work *work)
@@ -177,6 +253,7 @@ static void crypto_pump_work(struct kthread_work *work)
* crypto_transfer_request - transfer the new request into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
+ * @need_pump: indicates whether queue the pump of request to kthread_work
*/
static int crypto_transfer_request(struct crypto_engine *engine,
struct crypto_async_request *req,
@@ -253,6 +330,19 @@ int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
/**
+ * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
+ * into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
+ struct kpp_request *req)
+{
+ return crypto_transfer_request_to_engine(engine, &req->base);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
+
+/**
* crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
* to list into the engine queue
* @engine: the hardware engine
@@ -308,6 +398,19 @@ void crypto_finalize_hash_request(struct crypto_engine *engine,
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
/**
+ * crypto_finalize_kpp_request - finalize one kpp_request if the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_kpp_request(struct crypto_engine *engine,
+ struct kpp_request *req, int err)
+{
+ return crypto_finalize_request(engine, &req->base, err);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
+
+/**
* crypto_finalize_skcipher_request - finalize one skcipher_request if
* the request is done
* @engine: the hardware engine
@@ -386,17 +489,28 @@ int crypto_engine_stop(struct crypto_engine *engine)
EXPORT_SYMBOL_GPL(crypto_engine_stop);
/**
- * crypto_engine_alloc_init - allocate crypto hardware engine structure and
- * initialize it.
+ * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
+ * and initialize it by setting the maximum number of entries in the software
+ * crypto-engine queue.
* @dev: the device attached with one hardware engine
+ * @retry_support: whether hardware has support for retry mechanism
+ * @cbk_do_batch: pointer to a callback function to be invoked when executing
+ * a batch of requests.
+ * This has the form:
+ * callback(struct crypto_engine *engine)
+ * where:
+ * @engine: the crypto engine structure.
* @rt: whether this queue is set to run as a realtime task
+ * @qlen: maximum size of the crypto-engine queue
*
* This must be called from context that can sleep.
* Return: the crypto engine structure on success, else NULL.
*/
-struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
+struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
+ bool retry_support,
+ int (*cbk_do_batch)(struct crypto_engine *engine),
+ bool rt, int qlen)
{
- struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
struct crypto_engine *engine;
if (!dev)
@@ -411,12 +525,18 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
engine->running = false;
engine->busy = false;
engine->idling = false;
- engine->cur_req_prepared = false;
+ engine->retry_support = retry_support;
engine->priv_data = dev;
+ /*
+ * Batch requests is possible only if
+ * hardware has support for retry mechanism.
+ */
+ engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
+
snprintf(engine->name, sizeof(engine->name),
"%s-engine", dev_name(dev));
- crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
+ crypto_init_queue(&engine->queue, qlen);
spin_lock_init(&engine->queue_lock);
engine->kworker = kthread_create_worker(0, "%s", engine->name);
@@ -428,11 +548,27 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
if (engine->rt) {
dev_info(dev, "will run requests pump with realtime priority\n");
- sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
+ sched_set_fifo(engine->kworker->task);
}
return engine;
}
+EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
+
+/**
+ * crypto_engine_alloc_init - allocate crypto hardware engine structure and
+ * initialize it.
+ * @dev: the device attached with one hardware engine
+ * @rt: whether this queue is set to run as a realtime task
+ *
+ * This must be called from context that can sleep.
+ * Return: the crypto engine structure on success, else NULL.
+ */
+struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
+{
+ return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
+ CRYPTO_ENGINE_MAX_QLEN);
+}
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
/**