aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qat/qat_common/qat_algs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/qat/qat_common/qat_algs.c')
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c175
1 files changed, 99 insertions, 76 deletions
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index f998ed58457c..cad9c58caab1 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -17,7 +17,7 @@
#include <crypto/xts.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
-#include "adf_transport.h"
+#include "qat_algs_send.h"
#include "adf_common_drv.h"
#include "qat_crypto.h"
#include "icp_qat_hw.h"
@@ -46,19 +46,6 @@
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
-struct qat_alg_buf {
- u32 len;
- u32 resrvd;
- u64 addr;
-} __packed;
-
-struct qat_alg_buf_list {
- u64 resrvd;
- u32 num_bufs;
- u32 num_mapped_bufs;
- struct qat_alg_buf bufers[];
-} __packed __aligned(64);
-
/* Common content descriptor */
struct qat_alg_cd {
union {
@@ -618,7 +605,7 @@ static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct qat_crypto_instance *inst = NULL;
- int node = get_current_node();
+ int node = numa_node_id();
struct device *dev;
int ret;
@@ -686,14 +673,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
dma_addr_t blpout = qat_req->buf.bloutp;
size_t sz = qat_req->buf.sz;
size_t sz_out = qat_req->buf.sz_out;
+ int bl_dma_dir;
int i;
+ bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
for (i = 0; i < bl->num_bufs; i++)
dma_unmap_single(dev, bl->bufers[i].addr,
- bl->bufers[i].len, DMA_BIDIRECTIONAL);
+ bl->bufers[i].len, bl_dma_dir);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
- kfree(bl);
+
+ if (!qat_req->buf.sgl_src_valid)
+ kfree(bl);
+
if (blp != blpout) {
/* If out of place operation dma unmap only data */
int bufless = blout->num_bufs - blout->num_mapped_bufs;
@@ -701,17 +694,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
for (i = bufless; i < blout->num_bufs; i++) {
dma_unmap_single(dev, blout->bufers[i].addr,
blout->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
- kfree(blout);
+
+ if (!qat_req->buf.sgl_dst_valid)
+ kfree(blout);
}
}
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct scatterlist *sgl,
struct scatterlist *sglout,
- struct qat_crypto_request *qat_req)
+ struct qat_crypto_request *qat_req,
+ gfp_t flags)
{
struct device *dev = &GET_DEV(inst->accel_dev);
int i, sg_nctr = 0;
@@ -721,15 +717,27 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
dma_addr_t blp = DMA_MAPPING_ERROR;
dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
- size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
+ size_t sz_out, sz = struct_size(bufl, bufers, n);
+ int node = dev_to_node(&GET_DEV(inst->accel_dev));
+ int bufl_dma_dir;
if (unlikely(!n))
return -EINVAL;
- bufl = kzalloc_node(sz, GFP_ATOMIC,
- dev_to_node(&GET_DEV(inst->accel_dev)));
- if (unlikely(!bufl))
- return -ENOMEM;
+ qat_req->buf.sgl_src_valid = false;
+ qat_req->buf.sgl_dst_valid = false;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ bufl = kzalloc_node(sz, flags, node);
+ if (unlikely(!bufl))
+ return -ENOMEM;
+ } else {
+ bufl = &qat_req->buf.sgl_src.sgl_hdr;
+ memset(bufl, 0, sizeof(struct qat_alg_buf_list));
+ qat_req->buf.sgl_src_valid = true;
+ }
+
+ bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
for_each_sg(sgl, sg, n, i)
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
@@ -742,7 +750,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
- DMA_BIDIRECTIONAL);
+ bufl_dma_dir);
bufl->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
goto err_in;
@@ -760,12 +768,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf *bufers;
n = sg_nents(sglout);
- sz_out = struct_size(buflout, bufers, n + 1);
+ sz_out = struct_size(buflout, bufers, n);
sg_nctr = 0;
- buflout = kzalloc_node(sz_out, GFP_ATOMIC,
- dev_to_node(&GET_DEV(inst->accel_dev)));
- if (unlikely(!buflout))
- goto err_in;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ buflout = kzalloc_node(sz_out, flags, node);
+ if (unlikely(!buflout))
+ goto err_in;
+ } else {
+ buflout = &qat_req->buf.sgl_dst.sgl_hdr;
+ memset(buflout, 0, sizeof(struct qat_alg_buf_list));
+ qat_req->buf.sgl_dst_valid = true;
+ }
bufers = buflout->bufers;
for_each_sg(sglout, sg, n, i)
@@ -779,7 +793,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
goto err_out;
bufers[y].len = sg->length;
@@ -809,8 +823,10 @@ err_out:
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
- DMA_BIDIRECTIONAL);
- kfree(buflout);
+ DMA_FROM_DEVICE);
+
+ if (!qat_req->buf.sgl_dst_valid)
+ kfree(buflout);
err_in:
if (!dma_mapping_error(dev, blp))
@@ -821,9 +837,10 @@ err_in:
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
bufl->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ bufl_dma_dir);
- kfree(bufl);
+ if (!qat_req->buf.sgl_src_valid)
+ kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n");
return -ENOMEM;
@@ -925,8 +942,25 @@ void qat_alg_callback(void *resp)
struct icp_qat_fw_la_resp *qat_resp = resp;
struct qat_crypto_request *qat_req =
(void *)(__force long)qat_resp->opaque_data;
+ struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
qat_req->cb(qat_resp, qat_req);
+
+ qat_alg_send_backlog(backlog);
+}
+
+static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
+ struct qat_crypto_instance *inst,
+ struct crypto_async_request *base)
+{
+ struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+ alg_req->fw_req = (u32 *)&qat_req->req;
+ alg_req->tx_ring = inst->sym_tx;
+ alg_req->base = base;
+ alg_req->backlog = &inst->backlog;
+
+ return qat_alg_send_message(alg_req);
}
static int qat_alg_aead_dec(struct aead_request *areq)
@@ -939,14 +973,15 @@ static int qat_alg_aead_dec(struct aead_request *areq)
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
int digst_size = crypto_aead_authsize(aead_tfm);
- int ret, ctr = 0;
+ gfp_t f = qat_algs_alloc_flags(&areq->base);
+ int ret;
u32 cipher_len;
cipher_len = areq->cryptlen - digst_size;
if (cipher_len % AES_BLOCK_SIZE != 0)
return -EINVAL;
- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
if (unlikely(ret))
return ret;
@@ -965,15 +1000,12 @@ static int qat_alg_aead_dec(struct aead_request *areq)
auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_aead_enc(struct aead_request *areq)
@@ -984,14 +1016,15 @@ static int qat_alg_aead_enc(struct aead_request *areq)
struct qat_crypto_request *qat_req = aead_request_ctx(areq);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
+ gfp_t f = qat_algs_alloc_flags(&areq->base);
struct icp_qat_fw_la_bulk_req *msg;
u8 *iv = areq->iv;
- int ret, ctr = 0;
+ int ret;
if (areq->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
if (unlikely(ret))
return ret;
@@ -1013,15 +1046,11 @@ static int qat_alg_aead_enc(struct aead_request *areq)
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + areq->cryptlen;
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
-
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
@@ -1042,7 +1071,7 @@ static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
{
struct qat_crypto_instance *inst = NULL;
struct device *dev;
- int node = get_current_node();
+ int node = numa_node_id();
int ret;
inst = qat_crypto_get_instance_node(node);
@@ -1173,13 +1202,14 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ gfp_t f = qat_algs_alloc_flags(&req->base);
struct icp_qat_fw_la_bulk_req *msg;
- int ret, ctr = 0;
+ int ret;
if (req->cryptlen == 0)
return 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
if (unlikely(ret))
return ret;
@@ -1198,15 +1228,11 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req);
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
-
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
@@ -1242,13 +1268,14 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ gfp_t f = qat_algs_alloc_flags(&req->base);
struct icp_qat_fw_la_bulk_req *msg;
- int ret, ctr = 0;
+ int ret;
if (req->cryptlen == 0)
return 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
if (unlikely(ret))
return ret;
@@ -1268,15 +1295,11 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req);
qat_alg_update_iv(qat_req);
- do {
- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
- } while (ret == -EAGAIN && ctr++ < 10);
-
- if (ret == -EAGAIN) {
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
- return -EBUSY;
- }
- return -EINPROGRESS;
+
+ return ret;
}
static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)