From 8a4e4eb27c85b6eb273bcedce9ad16f759e8d6cf Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 3 Feb 2019 01:43:52 +0100 Subject: chacha20poly1305: permit unaligned strides on certain platforms The map allocations required to fix this are mostly slower than unaligned paths. Reported-by: Louis Sautier --- src/crypto/zinc/chacha20poly1305.c | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) (limited to 'src') diff --git a/src/crypto/zinc/chacha20poly1305.c b/src/crypto/zinc/chacha20poly1305.c index 28b9880..0001c92 100644 --- a/src/crypto/zinc/chacha20poly1305.c +++ b/src/crypto/zinc/chacha20poly1305.c @@ -20,18 +20,14 @@ static const u8 pad0[16] = { 0 }; -static struct crypto_alg chacha20_alg = { - .cra_blocksize = 1, - .cra_alignmask = sizeof(u32) - 1 -}; -static struct crypto_blkcipher chacha20_cipher = { - .base = { - .__crt_alg = &chacha20_alg - } -}; -static struct blkcipher_desc chacha20_desc = { - .tfm = &chacha20_cipher -}; +static struct blkcipher_desc desc = { .tfm = &(struct crypto_blkcipher){ + .base = { .__crt_alg = &(struct crypto_alg){ + .cra_blocksize = 1, +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + .cra_alignmask = sizeof(u32) - 1 +#endif + } } +} }; static inline void __chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, @@ -114,7 +110,7 @@ bool chacha20poly1305_encrypt_sg(struct scatterlist *dst, if (likely(src_len)) { blkcipher_walk_init(&walk, dst, src, src_len); - ret = blkcipher_walk_virt_block(&chacha20_desc, &walk, + ret = blkcipher_walk_virt_block(&desc, &walk, CHACHA20_BLOCK_SIZE); while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { size_t chunk_len = @@ -125,7 +121,7 @@ bool chacha20poly1305_encrypt_sg(struct scatterlist *dst, poly1305_update(&poly1305_state, walk.dst.virt.addr, chunk_len, simd_context); simd_relax(simd_context); - ret = blkcipher_walk_done(&chacha20_desc, &walk, + ret = blkcipher_walk_done(&desc, &walk, walk.nbytes % CHACHA20_BLOCK_SIZE); } if (walk.nbytes) { @@ -133,7 +129,7 @@ bool chacha20poly1305_encrypt_sg(struct scatterlist *dst, walk.src.virt.addr, walk.nbytes, simd_context); poly1305_update(&poly1305_state, walk.dst.virt.addr, walk.nbytes, simd_context); - ret = blkcipher_walk_done(&chacha20_desc, &walk, 0); + ret = blkcipher_walk_done(&desc, &walk, 0); } } if (unlikely(ret)) @@ -257,7 +253,7 @@ bool chacha20poly1305_decrypt_sg(struct scatterlist *dst, dst_len = src_len - POLY1305_MAC_SIZE; if (likely(dst_len)) { blkcipher_walk_init(&walk, dst, src, dst_len); - ret = blkcipher_walk_virt_block(&chacha20_desc, &walk, + ret = blkcipher_walk_virt_block(&desc, &walk, CHACHA20_BLOCK_SIZE); while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { size_t chunk_len = @@ -268,7 +264,7 @@ bool chacha20poly1305_decrypt_sg(struct scatterlist *dst, chacha20(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, chunk_len, simd_context); simd_relax(simd_context); - ret = blkcipher_walk_done(&chacha20_desc, &walk, + ret = blkcipher_walk_done(&desc, &walk, walk.nbytes % CHACHA20_BLOCK_SIZE); } if (walk.nbytes) { @@ -276,7 +272,7 @@ bool chacha20poly1305_decrypt_sg(struct scatterlist *dst, walk.nbytes, simd_context); chacha20(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes, simd_context); - ret = blkcipher_walk_done(&chacha20_desc, &walk, 0); + ret = blkcipher_walk_done(&desc, &walk, 0); } } if (unlikely(ret)) -- cgit v1.2.3-59-g8ed1b