aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/allwinner
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/allwinner')
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c22
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c16
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c105
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c54
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c133
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h19
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c184
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c102
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c390
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h33
14 files changed, 800 insertions, 277 deletions
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index dec79fa3ebaf..10fe9f73a5fb 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -20,7 +20,6 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
u32 mode = ctx->mode;
- void *backup_iv = NULL;
/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
u32 rx_cnt = SS_RX_DEFAULT;
u32 tx_cnt = 0;
@@ -48,10 +47,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
}
if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
- backup_iv = kzalloc(ivsize, GFP_KERNEL);
- if (!backup_iv)
- return -ENOMEM;
- scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
+ scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
+ areq->cryptlen - ivsize, ivsize, 0);
}
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
@@ -134,8 +131,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
if (areq->iv) {
if (mode & SS_DECRYPTION) {
- memcpy(areq->iv, backup_iv, ivsize);
- kfree_sensitive(backup_iv);
+ memcpy(areq->iv, ctx->backup_iv, ivsize);
+ memzero_explicit(ctx->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
ivsize, 0);
@@ -199,7 +196,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
unsigned int ileft = areq->cryptlen;
unsigned int oleft = areq->cryptlen;
unsigned int todo;
- void *backup_iv = NULL;
struct sg_mapping_iter mi, mo;
unsigned long pi = 0, po = 0; /* progress for in and out */
bool miter_err;
@@ -244,10 +240,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
return sun4i_ss_cipher_poll_fallback(areq);
if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
- backup_iv = kzalloc(ivsize, GFP_KERNEL);
- if (!backup_iv)
- return -ENOMEM;
- scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
+ scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
+ areq->cryptlen - ivsize, ivsize, 0);
}
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
@@ -384,8 +378,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
}
if (areq->iv) {
if (mode & SS_DECRYPTION) {
- memcpy(areq->iv, backup_iv, ivsize);
- kfree_sensitive(backup_iv);
+ memcpy(areq->iv, ctx->backup_iv, ivsize);
+ memzero_explicit(ctx->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
ivsize, 0);
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 44b8fc4b786d..006e40133c28 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -235,7 +235,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
#endif
};
-static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
+static int sun4i_ss_debugfs_show(struct seq_file *seq, void *v)
{
unsigned int i;
@@ -266,19 +266,7 @@ static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
}
return 0;
}
-
-static int sun4i_ss_dbgfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sun4i_ss_dbgfs_read, inode->i_private);
-}
-
-static const struct file_operations sun4i_ss_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = sun4i_ss_dbgfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sun4i_ss_debugfs);
/*
* Power management strategy: The device is suspended unless a TFM exists for
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 0fee6f4e2d90..ba59c7a48825 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -183,6 +183,7 @@ struct sun4i_tfm_ctx {
struct sun4i_cipher_req_ctx {
u32 mode;
+ u8 backup_iv[AES_BLOCK_SIZE];
struct skcipher_request fallback_req; // keep at the end
};
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 54ae8d16e493..74b4e910a38d 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -11,6 +11,7 @@
* You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -24,26 +25,62 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct scatterlist *sg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ce_alg_template *algt;
+ unsigned int todo, len;
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+
+ if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
+ sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
+ algt->stat_fb_maxsg++;
+ return true;
+ }
- if (sg_nents(areq->src) > MAX_SG || sg_nents(areq->dst) > MAX_SG)
+ if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) {
+ algt->stat_fb_leniv++;
return true;
+ }
- if (areq->cryptlen < crypto_skcipher_ivsize(tfm))
+ if (areq->cryptlen == 0) {
+ algt->stat_fb_len0++;
return true;
+ }
- if (areq->cryptlen == 0 || areq->cryptlen % 16)
+ if (areq->cryptlen % 16) {
+ algt->stat_fb_mod16++;
return true;
+ }
+ len = areq->cryptlen;
sg = areq->src;
while (sg) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ algt->stat_fb_srcali++;
+ return true;
+ }
+ todo = min(len, sg->length);
+ if (todo % 4) {
+ algt->stat_fb_srclen++;
return true;
+ }
+ len -= todo;
sg = sg_next(sg);
}
+
+ len = areq->cryptlen;
sg = areq->dst;
while (sg) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ algt->stat_fb_dstali++;
+ return true;
+ }
+ todo = min(len, sg->length);
+ if (todo % 4) {
+ algt->stat_fb_dstlen++;
return true;
+ }
+ len -= todo;
sg = sg_next(sg);
}
return false;
@@ -93,6 +130,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
int nr_sgs = 0;
int nr_sgd = 0;
int err = 0;
+ int ns = sg_nents_for_len(areq->src, areq->cryptlen);
+ int nd = sg_nents_for_len(areq->dst, areq->cryptlen);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
@@ -151,23 +190,13 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
rctx->ivlen = ivsize;
- rctx->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
- if (!rctx->bounce_iv) {
- err = -ENOMEM;
- goto theend_key;
- }
if (rctx->op_dir & CE_DECRYPTION) {
- rctx->backup_iv = kzalloc(ivsize, GFP_KERNEL);
- if (!rctx->backup_iv) {
- err = -ENOMEM;
- goto theend_key;
- }
offset = areq->cryptlen - ivsize;
- scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
+ scatterwalk_map_and_copy(chan->backup_iv, areq->src,
offset, ivsize, 0);
}
- memcpy(rctx->bounce_iv, areq->iv, ivsize);
- rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen,
+ memcpy(chan->bounce_iv, areq->iv, ivsize);
+ rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen,
DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
@@ -178,8 +207,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
}
if (areq->src == areq->dst) {
- nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
- DMA_BIDIRECTIONAL);
+ nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
@@ -187,15 +215,13 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
}
nr_sgd = nr_sgs;
} else {
- nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
- DMA_TO_DEVICE);
+ nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
goto theend_iv;
}
- nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
- DMA_FROM_DEVICE);
+ nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
err = -EINVAL;
@@ -240,14 +266,11 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
theend_sgs:
if (areq->src == areq->dst) {
- dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
} else {
if (nr_sgs > 0)
- dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
- DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
}
theend_iv:
@@ -256,16 +279,15 @@ theend_iv:
dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, rctx->backup_iv, ivsize);
- kfree_sensitive(rctx->backup_iv);
+ memcpy(areq->iv, chan->backup_iv, ivsize);
+ memzero_explicit(chan->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- kfree(rctx->bounce_iv);
+ memzero_explicit(chan->bounce_iv, ivsize);
}
-theend_key:
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
theend:
@@ -283,7 +305,9 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
flow = rctx->flow;
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
@@ -319,13 +343,13 @@ static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_r
dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, rctx->backup_iv, ivsize);
- kfree_sensitive(rctx->backup_iv);
+ memcpy(areq->iv, chan->backup_iv, ivsize);
+ memzero_explicit(chan->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- kfree(rctx->bounce_iv);
+ memzero_explicit(chan->bounce_iv, ivsize);
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
@@ -395,10 +419,9 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm);
-
- dev_info(op->ce->dev, "Fallback for %s is %s\n",
- crypto_tfm_alg_driver_name(&sktfm->base),
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
+ memcpy(algt->fbname,
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
+ CRYPTO_MAX_ALG_NAME);
op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index d8623c7e0d1d..9f6594699835 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -283,7 +283,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
@@ -310,7 +310,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
@@ -336,7 +336,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
@@ -363,7 +363,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
@@ -595,19 +595,47 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
continue;
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- seq_printf(seq, "%s %s %lu %lu\n",
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
ce_algs[i].alg.skcipher.base.cra_driver_name,
ce_algs[i].alg.skcipher.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
+ seq_printf(seq, "\tLast fallback is: %s\n",
+ ce_algs[i].fbname);
+ seq_printf(seq, "\tFallback due to 0 length: %lu\n",
+ ce_algs[i].stat_fb_len0);
+ seq_printf(seq, "\tFallback due to length !mod16: %lu\n",
+ ce_algs[i].stat_fb_mod16);
+ seq_printf(seq, "\tFallback due to length < IV: %lu\n",
+ ce_algs[i].stat_fb_leniv);
+ seq_printf(seq, "\tFallback due to source alignment: %lu\n",
+ ce_algs[i].stat_fb_srcali);
+ seq_printf(seq, "\tFallback due to dest alignment: %lu\n",
+ ce_algs[i].stat_fb_dstali);
+ seq_printf(seq, "\tFallback due to source length: %lu\n",
+ ce_algs[i].stat_fb_srclen);
+ seq_printf(seq, "\tFallback due to dest length: %lu\n",
+ ce_algs[i].stat_fb_dstlen);
+ seq_printf(seq, "\tFallback due to SG numbers: %lu\n",
+ ce_algs[i].stat_fb_maxsg);
break;
case CRYPTO_ALG_TYPE_AHASH:
- seq_printf(seq, "%s %s %lu %lu\n",
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
ce_algs[i].alg.hash.halg.base.cra_driver_name,
ce_algs[i].alg.hash.halg.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
+ seq_printf(seq, "\tLast fallback is: %s\n",
+ ce_algs[i].fbname);
+ seq_printf(seq, "\tFallback due to 0 length: %lu\n",
+ ce_algs[i].stat_fb_len0);
+ seq_printf(seq, "\tFallback due to length: %lu\n",
+ ce_algs[i].stat_fb_srclen);
+ seq_printf(seq, "\tFallback due to alignment: %lu\n",
+ ce_algs[i].stat_fb_srcali);
+ seq_printf(seq, "\tFallback due to SG numbers: %lu\n",
+ ce_algs[i].stat_fb_maxsg);
break;
case CRYPTO_ALG_TYPE_RNG:
- seq_printf(seq, "%s %s %lu %lu\n",
+ seq_printf(seq, "%s %s reqs=%lu bytes=%lu\n",
ce_algs[i].alg.rng.base.cra_driver_name,
ce_algs[i].alg.rng.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_bytes);
@@ -673,6 +701,18 @@ static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce)
err = -ENOMEM;
goto error_engine;
}
+ ce->chanlist[i].bounce_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ce->chanlist[i].bounce_iv) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ ce->chanlist[i].backup_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
+ GFP_KERNEL);
+ if (!ce->chanlist[i].backup_iv) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
}
return 0;
error_engine:
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 88194718a806..8b5b9b9d04c3 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -9,6 +9,7 @@
*
* You could find the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
@@ -49,9 +50,9 @@ int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
sizeof(struct sun8i_ce_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
- dev_info(op->ce->dev, "Fallback for %s is %s\n",
- crypto_tfm_alg_driver_name(tfm),
- crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
+ memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base),
+ CRYPTO_MAX_ALG_NAME);
+
err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
goto error_pm;
@@ -198,17 +199,32 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ce_alg_template *algt;
struct scatterlist *sg;
- if (areq->nbytes == 0)
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+
+ if (areq->nbytes == 0) {
+ algt->stat_fb_len0++;
return true;
+ }
/* we need to reserve one SG for padding one */
- if (sg_nents(areq->src) > MAX_SG - 1)
+ if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
+ algt->stat_fb_maxsg++;
return true;
+ }
sg = areq->src;
while (sg) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ if (sg->length % 4) {
+ algt->stat_fb_srclen++;
return true;
+ }
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ algt->stat_fb_srcali++;
+ return true;
+ }
sg = sg_next(sg);
}
return false;
@@ -228,7 +244,7 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
if (sun8i_ce_hash_need_fallback(areq))
return sun8i_ce_hash_digest_fb(areq);
- nr_sgs = sg_nents(areq->src);
+ nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
if (nr_sgs > MAX_SG - 1)
return sun8i_ce_hash_digest_fb(areq);
@@ -247,6 +263,64 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
return crypto_transfer_hash_request_to_engine(engine, areq);
}
+static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
+{
+ u64 fill, min_fill, j, k;
+ __be64 *bebits;
+ __le64 *lebits;
+
+ j = padi;
+ buf[j++] = cpu_to_le32(0x80);
+
+ if (bs == 64) {
+ fill = 64 - (byte_count % 64);
+ min_fill = 2 * sizeof(u32) + sizeof(u32);
+ } else {
+ fill = 128 - (byte_count % 128);
+ min_fill = 4 * sizeof(u32) + sizeof(u32);
+ }
+
+ if (fill < min_fill)
+ fill += bs;
+
+ k = j;
+ j += (fill - min_fill) / sizeof(u32);
+ if (j * 4 > bufsize) {
+ pr_err("%s OVERFLOW %llu\n", __func__, j);
+ return 0;
+ }
+ for (; k < j; k++)
+ buf[k] = 0;
+
+ if (le) {
+ /* MD5 */
+ lebits = (__le64 *)&buf[j];
+ *lebits = cpu_to_le64(byte_count << 3);
+ j += 2;
+ } else {
+ if (bs == 64) {
+ /* sha1 sha224 sha256 */
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ } else {
+ /* sha384 sha512*/
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count >> 61);
+ j += 2;
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ }
+ }
+ if (j * 4 > bufsize) {
+ pr_err("%s OVERFLOW %llu\n", __func__, j);
+ return 0;
+ }
+
+ return j;
+}
+
int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
{
struct ahash_request *areq = container_of(breq, struct ahash_request, base);
@@ -265,14 +339,11 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
__le32 *bf;
void *buf = NULL;
int j, i, todo;
- int nbw = 0;
- u64 fill, min_fill;
- __be64 *bebits;
- __le64 *lebits;
void *result = NULL;
u64 bs;
int digestsize;
dma_addr_t addr_res, addr_pad;
+ int ns = sg_nents_for_len(areq->src, areq->nbytes);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
ce = algt->ce;
@@ -317,7 +388,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
cet->t_sym_ctl = 0;
cet->t_asym_ctl = 0;
- nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
@@ -347,44 +418,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
byte_count = areq->nbytes;
j = 0;
- bf[j++] = cpu_to_le32(0x80);
-
- if (bs == 64) {
- fill = 64 - (byte_count % 64);
- min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
- } else {
- fill = 128 - (byte_count % 128);
- min_fill = 4 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
- }
-
- if (fill < min_fill)
- fill += bs;
-
- j += (fill - min_fill) / sizeof(u32);
switch (algt->ce_algo_id) {
case CE_ID_HASH_MD5:
- lebits = (__le64 *)&bf[j];
- *lebits = cpu_to_le64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 2 * bs, j, byte_count, true, bs);
break;
case CE_ID_HASH_SHA1:
case CE_ID_HASH_SHA224:
case CE_ID_HASH_SHA256:
- bebits = (__be64 *)&bf[j];
- *bebits = cpu_to_be64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
break;
case CE_ID_HASH_SHA384:
case CE_ID_HASH_SHA512:
- bebits = (__be64 *)&bf[j];
- *bebits = cpu_to_be64(byte_count >> 61);
- j += 2;
- bebits = (__be64 *)&bf[j];
- *bebits = cpu_to_be64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
break;
}
+ if (!j) {
+ err = -EINVAL;
+ goto theend;
+ }
addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
cet->t_src[i].addr = cpu_to_le32(addr_pad);
@@ -405,8 +457,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
- DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
@@ -414,6 +465,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
theend:
kfree(buf);
kfree(result);
+ local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index b3a9bbfb8831..b3cc43ea6c8a 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -108,11 +108,9 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
goto err_dst;
}
- err = pm_runtime_get_sync(ce->dev);
- if (err < 0) {
- pm_runtime_put_noidle(ce->dev);
+ err = pm_runtime_resume_and_get(ce->dev);
+ if (err < 0)
goto err_pm;
- }
mutex_lock(&ce->rnglock);
chan = &ce->chanlist[flow];
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index 19cd2e52f89d..c4b0a8b58842 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -54,11 +54,9 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa
goto err_dst;
}
- err = pm_runtime_get_sync(ce->dev);
- if (err < 0) {
- pm_runtime_put_noidle(ce->dev);
+ err = pm_runtime_resume_and_get(ce->dev);
+ if (err < 0)
goto err_pm;
- }
mutex_lock(&ce->rnglock);
chan = &ce->chanlist[flow];
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 624a5926f21f..8177aaba4434 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -186,6 +186,8 @@ struct ce_task {
* @status: set to 1 by interrupt if task is done
* @t_phy: Physical address of task
* @tl: pointer to the current ce_task for this flow
+ * @backup_iv: buffer which contain the next IV to store
+ * @bounce_iv: buffer which contain the IV
* @stat_req: number of request done by this flow
*/
struct sun8i_ce_flow {
@@ -195,6 +197,8 @@ struct sun8i_ce_flow {
dma_addr_t t_phy;
int timeout;
struct ce_task *tl;
+ void *backup_iv;
+ void *bounce_iv;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
unsigned long stat_req;
#endif
@@ -241,8 +245,6 @@ struct sun8i_ce_dev {
* struct sun8i_cipher_req_ctx - context for a skcipher request
* @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request
- * @backup_iv: buffer which contain the next IV to store
- * @bounce_iv: buffer which contain the IV
* @ivlen: size of bounce_iv
* @nr_sgs: The number of source SG (as given by dma_map_sg())
* @nr_sgd: The number of destination SG (as given by dma_map_sg())
@@ -253,8 +255,6 @@ struct sun8i_ce_dev {
struct sun8i_cipher_req_ctx {
u32 op_dir;
int flow;
- void *backup_iv;
- void *bounce_iv;
unsigned int ivlen;
int nr_sgs;
int nr_sgd;
@@ -333,11 +333,18 @@ struct sun8i_ce_alg_template {
struct ahash_alg hash;
struct rng_alg rng;
} alg;
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
unsigned long stat_req;
unsigned long stat_fb;
unsigned long stat_bytes;
-#endif
+ unsigned long stat_fb_maxsg;
+ unsigned long stat_fb_leniv;
+ unsigned long stat_fb_len0;
+ unsigned long stat_fb_mod16;
+ unsigned long stat_fb_srcali;
+ unsigned long stat_fb_srclen;
+ unsigned long stat_fb_dstali;
+ unsigned long stat_fb_dstlen;
+ char fbname[CRYPTO_MAX_ALG_NAME];
};
int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 9ef1c85c4aaa..910d6751644c 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -11,6 +11,7 @@
* You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -21,34 +22,53 @@
static bool sun8i_ss_need_fallback(struct skcipher_request *areq)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
struct scatterlist *in_sg = areq->src;
struct scatterlist *out_sg = areq->dst;
struct scatterlist *sg;
+ unsigned int todo, len;
- if (areq->cryptlen == 0 || areq->cryptlen % 16)
+ if (areq->cryptlen == 0 || areq->cryptlen % 16) {
+ algt->stat_fb_len++;
return true;
+ }
- if (sg_nents(areq->src) > 8 || sg_nents(areq->dst) > 8)
+ if (sg_nents_for_len(areq->src, areq->cryptlen) > 8 ||
+ sg_nents_for_len(areq->dst, areq->cryptlen) > 8) {
+ algt->stat_fb_sgnum++;
return true;
+ }
+ len = areq->cryptlen;
sg = areq->src;
while (sg) {
- if ((sg->length % 16) != 0)
- return true;
- if ((sg_dma_len(sg) % 16) != 0)
+ todo = min(len, sg->length);
+ if ((todo % 16) != 0) {
+ algt->stat_fb_sglen++;
return true;
- if (!IS_ALIGNED(sg->offset, 16))
+ }
+ if (!IS_ALIGNED(sg->offset, 16)) {
+ algt->stat_fb_align++;
return true;
+ }
+ len -= todo;
sg = sg_next(sg);
}
+ len = areq->cryptlen;
sg = areq->dst;
while (sg) {
- if ((sg->length % 16) != 0)
- return true;
- if ((sg_dma_len(sg) % 16) != 0)
+ todo = min(len, sg->length);
+ if ((todo % 16) != 0) {
+ algt->stat_fb_sglen++;
return true;
- if (!IS_ALIGNED(sg->offset, 16))
+ }
+ if (!IS_ALIGNED(sg->offset, 16)) {
+ algt->stat_fb_align++;
return true;
+ }
+ len -= todo;
sg = sg_next(sg);
}
@@ -92,6 +112,69 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
return err;
}
+static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct scatterlist *sg = areq->src;
+ unsigned int todo, offset;
+ unsigned int len = areq->cryptlen;
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
+ int i = 0;
+ u32 a;
+ int err;
+
+ rctx->ivlen = ivsize;
+ if (rctx->op_dir & SS_DECRYPTION) {
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(sf->biv, areq->src, offset,
+ ivsize, 0);
+ }
+
+ /* we need to copy all IVs from source in case DMA is bi-directionnal */
+ while (sg && len) {
+ if (sg_dma_len(sg) == 0) {
+ sg = sg_next(sg);
+ continue;
+ }
+ if (i == 0)
+ memcpy(sf->iv[0], areq->iv, ivsize);
+ a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, a)) {
+ memzero_explicit(sf->iv[i], ivsize);
+ dev_err(ss->dev, "Cannot DMA MAP IV\n");
+ err = -EFAULT;
+ goto dma_iv_error;
+ }
+ rctx->p_iv[i] = a;
+ /* we need to setup all others IVs only in the decrypt way */
+ if (rctx->op_dir & SS_ENCRYPTION)
+ return 0;
+ todo = min(len, sg_dma_len(sg));
+ len -= todo;
+ i++;
+ if (i < MAX_SG) {
+ offset = sg->length - ivsize;
+ scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0);
+ }
+ rctx->niv = i;
+ sg = sg_next(sg);
+ }
+
+ return 0;
+dma_iv_error:
+ i--;
+ while (i >= 0) {
+ dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
+ memzero_explicit(sf->iv[i], ivsize);
+ i--;
+ }
+ return err;
+}
+
static int sun8i_ss_cipher(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
@@ -100,12 +183,14 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ss_alg_template *algt;
+ struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
struct scatterlist *sg;
unsigned int todo, len, offset, ivsize;
- void *backup_iv = NULL;
int nr_sgs = 0;
int nr_sgd = 0;
int err = 0;
+ int nsgs = sg_nents_for_len(areq->src, areq->cryptlen);
+ int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen);
int i;
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
@@ -133,34 +218,12 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
- rctx->ivlen = ivsize;
- rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
- if (!rctx->biv) {
- err = -ENOMEM;
+ err = sun8i_ss_setup_ivs(areq);
+ if (err)
goto theend_key;
- }
- if (rctx->op_dir & SS_DECRYPTION) {
- backup_iv = kzalloc(ivsize, GFP_KERNEL);
- if (!backup_iv) {
- err = -ENOMEM;
- goto theend_key;
- }
- offset = areq->cryptlen - ivsize;
- scatterwalk_map_and_copy(backup_iv, areq->src, offset,
- ivsize, 0);
- }
- memcpy(rctx->biv, areq->iv, ivsize);
- rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ss->dev, rctx->p_iv)) {
- dev_err(ss->dev, "Cannot DMA MAP IV\n");
- err = -ENOMEM;
- goto theend_iv;
- }
}
if (areq->src == areq->dst) {
- nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
- DMA_BIDIRECTIONAL);
+ nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL);
if (nr_sgs <= 0 || nr_sgs > 8) {
dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
@@ -168,15 +231,13 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
}
nr_sgd = nr_sgs;
} else {
- nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
- DMA_TO_DEVICE);
+ nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE);
if (nr_sgs <= 0 || nr_sgs > 8) {
dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
goto theend_iv;
}
- nr_sgd = dma_map_sg(ss->dev, areq->dst, sg_nents(areq->dst),
- DMA_FROM_DEVICE);
+ nr_sgd = dma_map_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE);
if (nr_sgd <= 0 || nr_sgd > 8) {
dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd);
err = -EINVAL;
@@ -232,31 +293,26 @@ sgd_next:
theend_sgs:
if (areq->src == areq->dst) {
- dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
- DMA_TO_DEVICE);
- dma_unmap_sg(ss->dev, areq->dst, sg_nents(areq->dst),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE);
}
theend_iv:
- if (rctx->p_iv)
- dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen,
- DMA_TO_DEVICE);
-
if (areq->iv && ivsize > 0) {
- if (rctx->biv) {
- offset = areq->cryptlen - ivsize;
- if (rctx->op_dir & SS_DECRYPTION) {
- memcpy(areq->iv, backup_iv, ivsize);
- kfree_sensitive(backup_iv);
- } else {
- scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
- ivsize, 0);
- }
- kfree(rctx->biv);
+ for (i = 0; i < rctx->niv; i++) {
+ dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
+ memzero_explicit(sf->iv[i], ivsize);
+ }
+
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & SS_DECRYPTION) {
+ memcpy(areq->iv, sf->biv, ivsize);
+ memzero_explicit(sf->biv, ivsize);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
}
}
@@ -274,7 +330,9 @@ static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *ar
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = sun8i_ss_cipher(breq);
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
@@ -346,9 +404,9 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
crypto_skcipher_reqsize(op->fallback_tfm);
- dev_info(op->ss->dev, "Fallback for %s is %s\n",
- crypto_tfm_alg_driver_name(&sktfm->base),
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
+ memcpy(algt->fbname,
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
+ CRYPTO_MAX_ALG_NAME);
op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
op->enginectx.op.prepare_request = NULL;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 80e89066dbd1..ac2329e2b0e5 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -30,6 +30,8 @@
static const struct ss_variant ss_a80_variant = {
.alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
},
+ .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP,
+ },
.op_mode = { SS_OP_ECB, SS_OP_CBC,
},
.ss_clks = {
@@ -64,6 +66,7 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx
const char *name)
{
int flow = rctx->flow;
+ unsigned int ivlen = rctx->ivlen;
u32 v = SS_START;
int i;
@@ -102,15 +105,14 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx
mutex_lock(&ss->mlock);
writel(rctx->p_key, ss->base + SS_KEY_ADR_REG);
- if (i == 0) {
- if (rctx->p_iv)
- writel(rctx->p_iv, ss->base + SS_IV_ADR_REG);
- } else {
- if (rctx->biv) {
- if (rctx->op_dir == SS_ENCRYPTION)
- writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ if (ivlen) {
+ if (rctx->op_dir == SS_ENCRYPTION) {
+ if (i == 0)
+ writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG);
else
- writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG);
+ } else {
+ writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG);
}
}
@@ -407,6 +409,37 @@ static struct sun8i_ss_alg_template ss_algs[] = {
}
}
},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ss_algo_id = SS_ID_HASH_SHA1,
+ .alg.hash = {
+ .init = sun8i_ss_hash_init,
+ .update = sun8i_ss_hash_update,
+ .final = sun8i_ss_hash_final,
+ .finup = sun8i_ss_hash_finup,
+ .digest = sun8i_ss_hash_digest,
+ .export = sun8i_ss_hash_export,
+ .import = sun8i_ss_hash_import,
+ .setkey = sun8i_ss_hmac_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "hmac-sha1-sun8i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ss_hash_crainit,
+ .cra_exit = sun8i_ss_hash_craexit,
+ }
+ }
+ }
+},
#endif
};
@@ -428,6 +461,17 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
ss_algs[i].alg.skcipher.base.cra_driver_name,
ss_algs[i].alg.skcipher.base.cra_name,
ss_algs[i].stat_req, ss_algs[i].stat_fb);
+
+ seq_printf(seq, "\tLast fallback is: %s\n",
+ ss_algs[i].fbname);
+ seq_printf(seq, "\tFallback due to length: %lu\n",
+ ss_algs[i].stat_fb_len);
+ seq_printf(seq, "\tFallback due to SG length: %lu\n",
+ ss_algs[i].stat_fb_sglen);
+ seq_printf(seq, "\tFallback due to alignment: %lu\n",
+ ss_algs[i].stat_fb_align);
+ seq_printf(seq, "\tFallback due to SG numbers: %lu\n",
+ ss_algs[i].stat_fb_sgnum);
break;
case CRYPTO_ALG_TYPE_RNG:
seq_printf(seq, "%s %s reqs=%lu tsize=%lu\n",
@@ -440,6 +484,16 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
ss_algs[i].alg.hash.halg.base.cra_driver_name,
ss_algs[i].alg.hash.halg.base.cra_name,
ss_algs[i].stat_req, ss_algs[i].stat_fb);
+ seq_printf(seq, "\tLast fallback is: %s\n",
+ ss_algs[i].fbname);
+ seq_printf(seq, "\tFallback due to length: %lu\n",
+ ss_algs[i].stat_fb_len);
+ seq_printf(seq, "\tFallback due to SG length: %lu\n",
+ ss_algs[i].stat_fb_sglen);
+ seq_printf(seq, "\tFallback due to alignment: %lu\n",
+ ss_algs[i].stat_fb_align);
+ seq_printf(seq, "\tFallback due to SG numbers: %lu\n",
+ ss_algs[i].stat_fb_sgnum);
break;
}
}
@@ -462,7 +516,7 @@ static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
*/
static int allocate_flows(struct sun8i_ss_dev *ss)
{
- int i, err;
+ int i, j, err;
ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow),
GFP_KERNEL);
@@ -472,6 +526,36 @@ static int allocate_flows(struct sun8i_ss_dev *ss)
for (i = 0; i < MAXFLOW; i++) {
init_completion(&ss->flows[i].complete);
+ ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].biv) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+
+ for (j = 0; j < MAX_SG; j++) {
+ ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].iv[j]) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ }
+
+ /* the padding could be up to two block. */
+ ss->flows[i].pad = devm_kmalloc(ss->dev, MAX_PAD_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].pad) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].result) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+
ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true);
if (!ss->flows[i].engine) {
dev_err(ss->dev, "Cannot allocate engine\n");
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 3c073eb3db03..36a82b22953c 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -9,15 +9,104 @@
*
* You could find the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
+#include <crypto/hmac.h>
+#include <crypto/scatterwalk.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/md5.h>
#include "sun8i-ss.h"
+static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_shash *xtfm;
+ struct shash_desc *sdesc;
+ size_t len;
+ int ret = 0;
+
+ xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(xtfm))
+ return PTR_ERR(xtfm);
+
+ len = sizeof(*sdesc) + crypto_shash_descsize(xtfm);
+ sdesc = kmalloc(len, GFP_KERNEL);
+ if (!sdesc) {
+ ret = -ENOMEM;
+ goto err_hashkey_sdesc;
+ }
+ sdesc->tfm = xtfm;
+
+ ret = crypto_shash_init(sdesc);
+ if (ret) {
+ dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret);
+ goto err_hashkey;
+ }
+ ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key);
+ if (ret)
+ dev_err(tfmctx->ss->dev, "shash finup error\n");
+err_hashkey:
+ kfree(sdesc);
+err_hashkey_sdesc:
+ crypto_free_shash(xtfm);
+ return ret;
+}
+
+int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
+ struct ahash_alg *alg = __crypto_ahash_alg(ahash->base.__crt_alg);
+ struct sun8i_ss_alg_template *algt;
+ int digestsize, i;
+ int bs = crypto_ahash_blocksize(ahash);
+ int ret;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ digestsize = algt->alg.hash.halg.digestsize;
+
+ if (keylen > bs) {
+ ret = sun8i_ss_hashkey(tfmctx, key, keylen);
+ if (ret)
+ return ret;
+ tfmctx->keylen = digestsize;
+ } else {
+ tfmctx->keylen = keylen;
+ memcpy(tfmctx->key, key, keylen);
+ }
+
+ tfmctx->ipad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
+ if (!tfmctx->ipad)
+ return -ENOMEM;
+ tfmctx->opad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
+ if (!tfmctx->opad) {
+ ret = -ENOMEM;
+ goto err_opad;
+ }
+
+ memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen);
+ memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen);
+ memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen);
+ for (i = 0; i < bs; i++) {
+ tfmctx->ipad[i] ^= HMAC_IPAD_VALUE;
+ tfmctx->opad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen);
+ if (!ret)
+ return 0;
+
+ memzero_explicit(tfmctx->key, keylen);
+ kfree_sensitive(tfmctx->opad);
+err_opad:
+ kfree_sensitive(tfmctx->ipad);
+ return ret;
+}
+
int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
{
struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
@@ -49,9 +138,8 @@ int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
sizeof(struct sun8i_ss_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
- dev_info(op->ss->dev, "Fallback for %s is %s\n",
- crypto_tfm_alg_driver_name(tfm),
- crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
+ memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), CRYPTO_MAX_ALG_NAME);
+
err = pm_runtime_get_sync(op->ss->dev);
if (err < 0)
goto error_pm;
@@ -66,6 +154,9 @@ void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
{
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
+ kfree_sensitive(tfmctx->ipad);
+ kfree_sensitive(tfmctx->opad);
+
crypto_free_ahash(tfmctx->fallback_tfm);
pm_runtime_put_sync_suspend(tfmctx->ss->dev);
}
@@ -257,23 +348,48 @@ static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ss_alg_template *algt;
struct scatterlist *sg;
- if (areq->nbytes == 0)
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+
+ if (areq->nbytes == 0) {
+ algt->stat_fb_len++;
return true;
+ }
+
+ if (areq->nbytes >= MAX_PAD_SIZE - 64) {
+ algt->stat_fb_len++;
+ return true;
+ }
+
/* we need to reserve one SG for the padding one */
- if (sg_nents(areq->src) > MAX_SG - 1)
+ if (sg_nents(areq->src) > MAX_SG - 1) {
+ algt->stat_fb_sgnum++;
return true;
+ }
+
sg = areq->src;
while (sg) {
/* SS can operate hash only on full block size
* since SS support only MD5,sha1,sha224 and sha256, blocksize
* is always 64
- * TODO: handle request if last SG is not len%64
- * but this will need to copy data on a new SG of size=64
*/
- if (sg->length % 64 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ /* Only the last block could be bounced to the pad buffer */
+ if (sg->length % 64 && sg_next(sg)) {
+ algt->stat_fb_sglen++;
+ return true;
+ }
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ algt->stat_fb_align++;
return true;
+ }
+ if (sg->length % 4) {
+ algt->stat_fb_sglen++;
+ return true;
+ }
sg = sg_next(sg);
}
return false;
@@ -287,21 +403,11 @@ int sun8i_ss_hash_digest(struct ahash_request *areq)
struct sun8i_ss_alg_template *algt;
struct sun8i_ss_dev *ss;
struct crypto_engine *engine;
- struct scatterlist *sg;
- int nr_sgs, e, i;
+ int e;
if (sun8i_ss_hash_need_fallback(areq))
return sun8i_ss_hash_digest_fb(areq);
- nr_sgs = sg_nents(areq->src);
- if (nr_sgs > MAX_SG - 1)
- return sun8i_ss_hash_digest_fb(areq);
-
- for_each_sg(areq->src, sg, nr_sgs, i) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
- return sun8i_ss_hash_digest_fb(areq);
- }
-
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
ss = algt->ss;
@@ -312,6 +418,64 @@ int sun8i_ss_hash_digest(struct ahash_request *areq)
return crypto_transfer_hash_request_to_engine(engine, areq);
}
+static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
+{
+ u64 fill, min_fill, j, k;
+ __be64 *bebits;
+ __le64 *lebits;
+
+ j = padi;
+ buf[j++] = cpu_to_le32(0x80);
+
+ if (bs == 64) {
+ fill = 64 - (byte_count % 64);
+ min_fill = 2 * sizeof(u32) + sizeof(u32);
+ } else {
+ fill = 128 - (byte_count % 128);
+ min_fill = 4 * sizeof(u32) + sizeof(u32);
+ }
+
+ if (fill < min_fill)
+ fill += bs;
+
+ k = j;
+ j += (fill - min_fill) / sizeof(u32);
+ if (j * 4 > bufsize) {
+ pr_err("%s OVERFLOW %llu\n", __func__, j);
+ return 0;
+ }
+ for (; k < j; k++)
+ buf[k] = 0;
+
+ if (le) {
+ /* MD5 */
+ lebits = (__le64 *)&buf[j];
+ *lebits = cpu_to_le64(byte_count << 3);
+ j += 2;
+ } else {
+ if (bs == 64) {
+ /* sha1 sha224 sha256 */
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ } else {
+ /* sha384 sha512*/
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count >> 61);
+ j += 2;
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ }
+ }
+ if (j * 4 > bufsize) {
+ pr_err("%s OVERFLOW %llu\n", __func__, j);
+ return 0;
+ }
+
+ return j;
+}
+
/* sun8i_ss_hash_run - run an ahash request
* Send the data of the request to the SS along with an extra SG with padding
*/
@@ -319,20 +483,26 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
{
struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct sun8i_ss_alg_template *algt;
struct sun8i_ss_dev *ss;
struct scatterlist *sg;
+ int bs = crypto_ahash_blocksize(tfm);
int nr_sgs, err, digestsize;
unsigned int len;
- u64 fill, min_fill, byte_count;
+ u64 byte_count;
void *pad, *result;
- int j, i, todo;
- __be64 *bebits;
- __le64 *lebits;
- dma_addr_t addr_res, addr_pad;
+ int j, i, k, todo;
+ dma_addr_t addr_res, addr_pad, addr_xpad;
__le32 *bf;
+ /* HMAC step:
+ * 0: normal hashing
+ * 1: IPAD
+ * 2: OPAD
+ */
+ int hmac = 0;
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
ss = algt->ss;
@@ -341,18 +511,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
- /* the padding could be up to two block. */
- pad = kzalloc(algt->alg.hash.halg.base.cra_blocksize * 2, GFP_KERNEL | GFP_DMA);
- if (!pad)
- return -ENOMEM;
+ result = ss->flows[rctx->flow].result;
+ pad = ss->flows[rctx->flow].pad;
bf = (__le32 *)pad;
- result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
- if (!result) {
- kfree(pad);
- return -ENOMEM;
- }
-
for (i = 0; i < MAX_SG; i++) {
rctx->t_dst[i].addr = 0;
rctx->t_dst[i].len = 0;
@@ -375,17 +537,33 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
if (dma_mapping_error(ss->dev, addr_res)) {
dev_err(ss->dev, "DMA map dest\n");
err = -EINVAL;
- goto theend;
+ goto err_dma_result;
}
+ j = 0;
len = areq->nbytes;
- for_each_sg(areq->src, sg, nr_sgs, i) {
- rctx->t_src[i].addr = sg_dma_address(sg);
+ sg = areq->src;
+ i = 0;
+ while (len > 0 && sg) {
+ if (sg_dma_len(sg) == 0) {
+ sg = sg_next(sg);
+ continue;
+ }
todo = min(len, sg_dma_len(sg));
- rctx->t_src[i].len = todo / 4;
- len -= todo;
- rctx->t_dst[i].addr = addr_res;
- rctx->t_dst[i].len = digestsize / 4;
+ /* only the last SG could be with a size not modulo64 */
+ if (todo % 64 == 0) {
+ rctx->t_src[i].addr = sg_dma_address(sg);
+ rctx->t_src[i].len = todo / 4;
+ rctx->t_dst[i].addr = addr_res;
+ rctx->t_dst[i].len = digestsize / 4;
+ len -= todo;
+ } else {
+ scatterwalk_map_and_copy(bf, sg, 0, todo, 0);
+ j += todo / 4;
+ len -= todo;
+ }
+ sg = sg_next(sg);
+ i++;
}
if (len > 0) {
dev_err(ss->dev, "remaining len %d\n", len);
@@ -393,55 +571,139 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
goto theend;
}
+ if (j > 0)
+ i--;
+
+retry:
byte_count = areq->nbytes;
- j = 0;
- bf[j++] = cpu_to_le32(0x80);
+ if (tfmctx->keylen && hmac == 0) {
+ hmac = 1;
+ /* shift all SG one slot up, to free slot 0 for IPAD */
+ for (k = 6; k >= 0; k--) {
+ rctx->t_src[k + 1].addr = rctx->t_src[k].addr;
+ rctx->t_src[k + 1].len = rctx->t_src[k].len;
+ rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr;
+ rctx->t_dst[k + 1].len = rctx->t_dst[k].len;
+ }
+ addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE);
+ err = dma_mapping_error(ss->dev, addr_xpad);
+ if (err) {
+ dev_err(ss->dev, "Fail to create DMA mapping of ipad\n");
+ goto err_dma_xpad;
+ }
+ rctx->t_src[0].addr = addr_xpad;
+ rctx->t_src[0].len = bs / 4;
+ rctx->t_dst[0].addr = addr_res;
+ rctx->t_dst[0].len = digestsize / 4;
+ i++;
+ byte_count = areq->nbytes + bs;
+ }
+ if (tfmctx->keylen && hmac == 2) {
+ for (i = 0; i < MAX_SG; i++) {
+ rctx->t_src[i].addr = 0;
+ rctx->t_src[i].len = 0;
+ rctx->t_dst[i].addr = 0;
+ rctx->t_dst[i].len = 0;
+ }
- fill = 64 - (byte_count % 64);
- min_fill = 3 * sizeof(u32);
+ addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ss->dev, addr_res)) {
+ dev_err(ss->dev, "Fail to create DMA mapping of result\n");
+ err = -EINVAL;
+ goto err_dma_result;
+ }
+ addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE);
+ err = dma_mapping_error(ss->dev, addr_xpad);
+ if (err) {
+ dev_err(ss->dev, "Fail to create DMA mapping of opad\n");
+ goto err_dma_xpad;
+ }
+ rctx->t_src[0].addr = addr_xpad;
+ rctx->t_src[0].len = bs / 4;
- if (fill < min_fill)
- fill += 64;
+ memcpy(bf, result, digestsize);
+ j = digestsize / 4;
+ i = 1;
+ byte_count = digestsize + bs;
- j += (fill - min_fill) / sizeof(u32);
+ rctx->t_dst[0].addr = addr_res;
+ rctx->t_dst[0].len = digestsize / 4;
+ }
switch (algt->ss_algo_id) {
case SS_ID_HASH_MD5:
- lebits = (__le64 *)&bf[j];
- *lebits = cpu_to_le64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 4096, j, byte_count, true, bs);
break;
case SS_ID_HASH_SHA1:
case SS_ID_HASH_SHA224:
case SS_ID_HASH_SHA256:
- bebits = (__be64 *)&bf[j];
- *bebits = cpu_to_be64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 4096, j, byte_count, false, bs);
break;
}
+ if (!j) {
+ err = -EINVAL;
+ goto theend;
+ }
addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
- rctx->t_src[i].addr = addr_pad;
- rctx->t_src[i].len = j;
- rctx->t_dst[i].addr = addr_res;
- rctx->t_dst[i].len = digestsize / 4;
if (dma_mapping_error(ss->dev, addr_pad)) {
dev_err(ss->dev, "DMA error on padding SG\n");
err = -EINVAL;
- goto theend;
+ goto err_dma_pad;
}
+ rctx->t_src[i].addr = addr_pad;
+ rctx->t_src[i].len = j;
+ rctx->t_dst[i].addr = addr_res;
+ rctx->t_dst[i].len = digestsize / 4;
err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
+ /*
+ * mini helper for checking dma map/unmap
+ * flow start for hmac = 0 (and HMAC = 1)
+ * HMAC = 0
+ * MAP src
+ * MAP res
+ *
+ * retry:
+ * if hmac then hmac = 1
+ * MAP xpad (ipad)
+ * if hmac == 2
+ * MAP res
+ * MAP xpad (opad)
+ * MAP pad
+ * ACTION!
+ * UNMAP pad
+ * if hmac
+ * UNMAP xpad
+ * UNMAP res
+ * if hmac < 2
+ * UNMAP SRC
+ *
+ * if hmac = 1 then hmac = 2 goto retry
+ */
+
dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
- DMA_TO_DEVICE);
+
+err_dma_pad:
+ if (hmac > 0)
+ dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE);
+err_dma_xpad:
dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+err_dma_result:
+ if (hmac < 2)
+ dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (hmac == 1 && !err) {
+ hmac = 2;
+ goto retry;
+ }
- memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+ if (!err)
+ memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
theend:
- kfree(pad);
- kfree(result);
+ local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
index 246a6782674c..dd677e9ed06f 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
@@ -112,11 +112,9 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
goto err_iv;
}
- err = pm_runtime_get_sync(ss->dev);
- if (err < 0) {
- pm_runtime_put_noidle(ss->dev);
+ err = pm_runtime_resume_and_get(ss->dev);
+ if (err < 0)
goto err_pm;
- }
err = 0;
mutex_lock(&ss->mlock);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index 28188685b910..df6f08f6092f 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -82,6 +82,8 @@
#define PRNG_DATA_SIZE (160 / 8)
#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8)
+#define MAX_PAD_SIZE 4096
+
/*
* struct ss_clock - Describe clocks used by sun8i-ss
* @name: Name of clock needed by this variant
@@ -121,11 +123,19 @@ struct sginfo {
* @complete: completion for the current task on this flow
* @status: set to 1 by interrupt if task is done
* @stat_req: number of request done by this flow
+ * @iv: list of IV to use for each step
+ * @biv: buffer which contain the backuped IV
+ * @pad: padding buffer for hash operations
+ * @result: buffer for storing the result of hash operations
*/
struct sun8i_ss_flow {
struct crypto_engine *engine;
struct completion complete;
int status;
+ u8 *iv[MAX_SG];
+ u8 *biv;
+ void *pad;
+ void *result;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
unsigned long stat_req;
#endif
@@ -164,28 +174,28 @@ struct sun8i_ss_dev {
* @t_src: list of mapped SGs with their size
* @t_dst: list of mapped SGs with their size
* @p_key: DMA address of the key
- * @p_iv: DMA address of the IV
+ * @p_iv: DMA address of the IVs
+ * @niv: Number of IVs DMA mapped
* @method: current algorithm for this request
* @op_mode: op_mode for this request
* @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request
- * @ivlen: size of biv
+ * @ivlen: size of IVs
* @keylen: keylen for this request
- * @biv: buffer which contain the IV
* @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
struct sginfo t_src[MAX_SG];
struct sginfo t_dst[MAX_SG];
u32 p_key;
- u32 p_iv;
+ u32 p_iv[MAX_SG];
+ int niv;
u32 method;
u32 op_mode;
u32 op_dir;
int flow;
unsigned int ivlen;
unsigned int keylen;
- void *biv;
struct skcipher_request fallback_req; // keep at the end
};
@@ -229,6 +239,10 @@ struct sun8i_ss_hash_tfm_ctx {
struct crypto_engine_ctx enginectx;
struct crypto_ahash *fallback_tfm;
struct sun8i_ss_dev *ss;
+ u8 *ipad;
+ u8 *opad;
+ u8 key[SHA256_BLOCK_SIZE];
+ int keylen;
};
/*
@@ -269,11 +283,14 @@ struct sun8i_ss_alg_template {
struct rng_alg rng;
struct ahash_alg hash;
} alg;
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
unsigned long stat_req;
unsigned long stat_fb;
unsigned long stat_bytes;
-#endif
+ unsigned long stat_fb_len;
+ unsigned long stat_fb_sglen;
+ unsigned long stat_fb_align;
+ unsigned long stat_fb_sgnum;
+ char fbname[CRYPTO_MAX_ALG_NAME];
};
int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type);
@@ -306,3 +323,5 @@ int sun8i_ss_hash_update(struct ahash_request *areq);
int sun8i_ss_hash_finup(struct ahash_request *areq);
int sun8i_ss_hash_digest(struct ahash_request *areq);
int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq);
+int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen);