aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorAntoine Ténart <antoine.tenart@free-electrons.com>2017-12-26 17:21:17 +0100
committerHerbert Xu <herbert@gondor.apana.org.au>2018-01-05 18:43:06 +1100
commit809778e02cd45d0625439fee67688f655627bb3c (patch)
tree9b6ac57c9b59da9ba0572c803d89480c936ab2d6 /drivers/crypto
parentcrypto: inside-secure - avoid unmapping DMA memory that was not mapped (diff)
downloadlinux-dev-809778e02cd45d0625439fee67688f655627bb3c.tar.xz
linux-dev-809778e02cd45d0625439fee67688f655627bb3c.zip
crypto: inside-secure - fix hash when length is a multiple of a block
This patch fixes the hash support in the SafeXcel driver when the update size is a multiple of a block size, and when a final call is made just after with a size of 0. In such cases the driver should cache the last block from the update to avoid handling 0 length data on the final call (that's a hardware limitation). Cc: stable@vger.kernel.org Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver") Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c34
1 files changed, 24 insertions, 10 deletions
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index fbc03d6e7db7..122a2a58e98f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -189,17 +189,31 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
else
cache_len = queued - areq->nbytes;
- /*
- * If this is not the last request and the queued data does not fit
- * into full blocks, cache it for the next send() call.
- */
- extra = queued & (crypto_ahash_blocksize(ahash) - 1);
- if (!req->last_req && extra) {
- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
- req->cache_next, extra, areq->nbytes - extra);
+ if (!req->last_req) {
+ /* If this is not the last request and the queued data does not
+ * fit into full blocks, cache it for the next send() call.
+ */
+ extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+ if (!extra)
+ /* If this is not the last request and the queued data
+ * is a multiple of a block, cache the last one for now.
+ */
+ extra = queued - crypto_ahash_blocksize(ahash);
- queued -= extra;
- len -= extra;
+ if (extra) {
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req->cache_next, extra,
+ areq->nbytes - extra);
+
+ queued -= extra;
+ len -= extra;
+
+ if (!queued) {
+ *commands = 0;
+ *results = 0;
+ return 0;
+ }
+ }
}
spin_lock_bh(&priv->ring[ring].egress_lock);