aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qce
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/qce')
-rw-r--r--drivers/crypto/qce/Makefile7
-rw-r--r--drivers/crypto/qce/common.c244
-rw-r--r--drivers/crypto/qce/core.c4
-rw-r--r--drivers/crypto/qce/dma.c6
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c41
7 files changed, 170 insertions, 137 deletions
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 8caa04e1ec43..14ade8a7d664 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
qcrypto-objs := core.o \
common.o \
- dma.o \
- sha.o \
- skcipher.o
+ dma.o
+
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index da1188abc9ba..629e7f34dc09 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -45,52 +45,56 @@ qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
qce_write(qce, offset + i * sizeof(u32), 0);
}
-static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+static u32 qce_config_reg(struct qce_device *qce, int little)
{
- u32 cfg = 0;
+ u32 beats = (qce->burst_size >> 3) - 1;
+ u32 pipe_pair = qce->pipe_pair_id;
+ u32 config;
- if (IS_AES(flags)) {
- if (aes_key_size == AES_KEYSIZE_128)
- cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
- else if (aes_key_size == AES_KEYSIZE_256)
- cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
- }
+ config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
+ config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
+ BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
+ config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
+ config &= ~HIGH_SPD_EN_N_SHIFT;
- if (IS_AES(flags))
- cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
- else if (IS_DES(flags) || IS_3DES(flags))
- cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+ if (little)
+ config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
- if (IS_DES(flags))
- cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+ return config;
+}
- if (IS_3DES(flags))
- cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
+{
+ __be32 *d = dst;
+ const u8 *s = src;
+ unsigned int n;
- switch (flags & QCE_MODE_MASK) {
- case QCE_MODE_ECB:
- cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CBC:
- cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CTR:
- cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_XTS:
- cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CCM:
- cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
- cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
- break;
- default:
- return ~0;
+ n = len / sizeof(u32);
+ for (; n > 0; n--) {
+ *d = cpu_to_be32p((const __u32 *) s);
+ s += sizeof(__u32);
+ d++;
}
+}
- return cfg;
+static void qce_setup_config(struct qce_device *qce)
+{
+ u32 config;
+
+ /* get big endianness */
+ config = qce_config_reg(qce, 0);
+
+ /* clear status */
+ qce_write(qce, REG_STATUS, 0);
+ qce_write(qce, REG_CONFIG, config);
+}
+
+static inline void qce_crypto_go(struct qce_device *qce)
+{
+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
}
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
{
u32 cfg = 0;
@@ -137,88 +141,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
}
-static u32 qce_config_reg(struct qce_device *qce, int little)
-{
- u32 beats = (qce->burst_size >> 3) - 1;
- u32 pipe_pair = qce->pipe_pair_id;
- u32 config;
-
- config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
- config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
- BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
- config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
- config &= ~HIGH_SPD_EN_N_SHIFT;
-
- if (little)
- config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
-
- return config;
-}
-
-void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
-{
- __be32 *d = dst;
- const u8 *s = src;
- unsigned int n;
-
- n = len / sizeof(u32);
- for (; n > 0; n--) {
- *d = cpu_to_be32p((const __u32 *) s);
- s += sizeof(__u32);
- d++;
- }
-}
-
-static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
-{
- u8 swap[QCE_AES_IV_LENGTH];
- u32 i, j;
-
- if (ivsize > QCE_AES_IV_LENGTH)
- return;
-
- memset(swap, 0, QCE_AES_IV_LENGTH);
-
- for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
- i < QCE_AES_IV_LENGTH; i++, j--)
- swap[i] = src[j];
-
- qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
-}
-
-static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
- unsigned int enckeylen, unsigned int cryptlen)
-{
- u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
- unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
- unsigned int xtsdusize;
-
- qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
- enckeylen / 2);
- qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
-
- /* xts du size 512B */
- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
-}
-
-static void qce_setup_config(struct qce_device *qce)
-{
- u32 config;
-
- /* get big endianness */
- config = qce_config_reg(qce, 0);
-
- /* clear status */
- qce_write(qce, REG_STATUS, 0);
- qce_write(qce, REG_CONFIG, config);
-}
-
-static inline void qce_crypto_go(struct qce_device *qce)
-{
- qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
-}
-
static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
@@ -303,6 +225,87 @@ go_proc:
return 0;
}
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+{
+ u32 cfg = 0;
+
+ if (IS_AES(flags)) {
+ if (aes_key_size == AES_KEYSIZE_128)
+ cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
+ else if (aes_key_size == AES_KEYSIZE_256)
+ cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
+ }
+
+ if (IS_AES(flags))
+ cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
+ else if (IS_DES(flags) || IS_3DES(flags))
+ cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+
+ if (IS_DES(flags))
+ cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+
+ if (IS_3DES(flags))
+ cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+
+ switch (flags & QCE_MODE_MASK) {
+ case QCE_MODE_ECB:
+ cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CBC:
+ cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CTR:
+ cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_XTS:
+ cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CCM:
+ cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
+ cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
+ break;
+ default:
+ return ~0;
+ }
+
+ return cfg;
+}
+
+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
+{
+ u8 swap[QCE_AES_IV_LENGTH];
+ u32 i, j;
+
+ if (ivsize > QCE_AES_IV_LENGTH)
+ return;
+
+ memset(swap, 0, QCE_AES_IV_LENGTH);
+
+ for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
+ i < QCE_AES_IV_LENGTH; i++, j--)
+ swap[i] = src[j];
+
+ qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
+}
+
+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
+ unsigned int enckeylen, unsigned int cryptlen)
+{
+ u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+ unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+ unsigned int xtsdusize;
+
+ qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+ enckeylen / 2);
+ qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+ /* xts du size 512B */
+ xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+}
static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
@@ -384,15 +387,20 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
return 0;
}
+#endif
int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
u32 offset)
{
switch (type) {
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
return qce_setup_regs_skcipher(async_req, totallen, offset);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset);
+#endif
default:
return -EINVAL;
}
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 0a44a6eeacf5..cb6d61eb7302 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -22,8 +22,12 @@
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
&skcipher_ops,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
&ahash_ops,
+#endif
};
static void qce_unregister_algs(struct qce_device *qce)
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 40a59214d2e1..7da893dc00e7 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -47,7 +47,8 @@ void qce_dma_release(struct qce_dma_data *dma)
}
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
+ int max_ents)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
@@ -60,12 +61,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
if (!sg)
return ERR_PTR(-EINVAL);
- while (new_sgl && sg) {
+ while (new_sgl && sg && max_ents) {
sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
+ max_ents--;
}
return sg_last;
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 1e25a9e0e6f8..ed25a0d9829e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -42,6 +42,7 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add,
+ int max_ents);
#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 95ab16fc8fd6..1ab62e7d5f3c 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -396,8 +396,6 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
- if (ret)
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
kfree(buf);
err_free_req:
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index fee07323f8f9..4217b745f124 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -21,6 +21,7 @@ static void qce_skcipher_done(void *data)
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
+ struct qce_result_dump *result_buf = qce->dma.result_buf;
enum dma_data_direction dir_src, dir_dst;
u32 status;
int error;
@@ -45,6 +46,7 @@ static void qce_skcipher_done(void *data)
if (error < 0)
dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
+ memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
qce->async_req_done(tmpl->qce, error);
}
@@ -95,13 +97,13 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
}
- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
@@ -154,12 +156,13 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
int ret;
if (!key || !keylen)
return -EINVAL;
- switch (keylen) {
+ switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
break;
@@ -213,13 +216,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ int keylen;
int ret;
rctx->flags = tmpl->alg_flags;
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+ keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
- ctx->enc_keylen != AES_KEYSIZE_256) {
+ if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_256) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
@@ -252,7 +257,14 @@ static int qce_skcipher_init(struct crypto_skcipher *tfm)
memset(ctx, 0, sizeof(*ctx));
crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
+ return 0;
+}
+
+static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ qce_skcipher_init(tfm);
ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
@@ -270,6 +282,7 @@ struct qce_skcipher_def {
const char *name;
const char *drv_name;
unsigned int blocksize;
+ unsigned int chunksize;
unsigned int ivsize;
unsigned int min_keysize;
unsigned int max_keysize;
@@ -298,7 +311,8 @@ static const struct qce_skcipher_def skcipher_def[] = {
.flags = QCE_ALG_AES | QCE_MODE_CTR,
.name = "ctr(aes)",
.drv_name = "ctr-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
+ .chunksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -309,8 +323,8 @@ static const struct qce_skcipher_def skcipher_def[] = {
.drv_name = "xts-aes-qce",
.blocksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
},
{
.flags = QCE_ALG_DES | QCE_MODE_ECB,
@@ -368,6 +382,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
def->drv_name);
alg->base.cra_blocksize = def->blocksize;
+ alg->chunksize = def->chunksize;
alg->ivsize = def->ivsize;
alg->min_keysize = def->min_keysize;
alg->max_keysize = def->max_keysize;
@@ -379,14 +394,18 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->base.cra_priority = 300;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
alg->base.cra_alignmask = 0;
alg->base.cra_module = THIS_MODULE;
- alg->init = qce_skcipher_init;
- alg->exit = qce_skcipher_exit;
+ if (IS_AES(def->flags)) {
+ alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
+ alg->init = qce_skcipher_init_fallback;
+ alg->exit = qce_skcipher_exit;
+ } else {
+ alg->init = qce_skcipher_init;
+ }
INIT_LIST_HEAD(&tmpl->entry);
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;