From c9f1fd4f2f74f322d5bdc4ec2c6a38ab7462967b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 Aug 2019 13:31:35 +1000 Subject: Revert "crypto: aegis128 - add support for SIMD acceleration" This reverts commit ecc8bc81f2fb3976737ef312f824ba6053aa3590 ("crypto: aegis128 - provide a SIMD implementation based on NEON intrinsics") and commit 7cdc0ddbf74a19cecb2f0e9efa2cae9d3c665189 ("crypto: aegis128 - add support for SIMD acceleration"). They cause compile errors on platforms other than ARM because the mechanism to selectively compile the SIMD code is broken. Repoted-by: Heiko Carstens Reported-by: Stephen Rothwell Signed-off-by: Herbert Xu --- crypto/Kconfig | 5 - crypto/Makefile | 12 -- crypto/aegis128-core.c | 481 ------------------------------------------- crypto/aegis128-neon-inner.c | 149 -------------- crypto/aegis128-neon.c | 43 ---- crypto/aegis128.c | 447 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 447 insertions(+), 690 deletions(-) delete mode 100644 crypto/aegis128-core.c delete mode 100644 crypto/aegis128-neon-inner.c delete mode 100644 crypto/aegis128-neon.c create mode 100644 crypto/aegis128.c diff --git a/crypto/Kconfig b/crypto/Kconfig index 455a3354e291..8880c1fc51d8 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -306,11 +306,6 @@ config CRYPTO_AEGIS128 help Support for the AEGIS-128 dedicated AEAD algorithm. -config CRYPTO_AEGIS128_SIMD - bool "Support SIMD acceleration for AEGIS-128" - depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) - default y - config CRYPTO_AEGIS128_AESNI_SSE2 tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" depends on X86 && 64BIT diff --git a/crypto/Makefile b/crypto/Makefile index b3e16b4fb414..93375e124ff7 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -90,18 +90,6 @@ obj-$(CONFIG_CRYPTO_GCM) += gcm.o obj-$(CONFIG_CRYPTO_CCM) += ccm.o obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o -aegis128-y := aegis128-core.o - -ifeq ($(ARCH),arm) -CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv7-a -mfloat-abi=softfp -mfpu=crypto-neon-fp-armv8 -aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o -endif -ifeq ($(ARCH),arm64) -CFLAGS_aegis128-neon-inner.o += -ffreestanding -mcpu=generic+crypto -CFLAGS_REMOVE_aegis128-neon-inner.o += -mgeneral-regs-only -aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o -endif - obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c deleted file mode 100644 index f815b4685156..000000000000 --- a/crypto/aegis128-core.c +++ /dev/null @@ -1,481 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * The AEGIS-128 Authenticated-Encryption Algorithm - * - * Copyright (c) 2017-2018 Ondrej Mosnacek - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "aegis.h" - -#define AEGIS128_NONCE_SIZE 16 -#define AEGIS128_STATE_BLOCKS 5 -#define AEGIS128_KEY_SIZE 16 -#define AEGIS128_MIN_AUTH_SIZE 8 -#define AEGIS128_MAX_AUTH_SIZE 16 - -struct aegis_state { - union aegis_block blocks[AEGIS128_STATE_BLOCKS]; -}; - -struct aegis_ctx { - union aegis_block key; -}; - -struct aegis128_ops { - int (*skcipher_walk_init)(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); - - void (*crypt_chunk)(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -}; - -static bool have_simd; - -bool crypto_aegis128_have_simd(void); -void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg); -void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); - -static void crypto_aegis128_update(struct aegis_state *state) -{ - union aegis_block tmp; - unsigned int i; - - tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; - for (i = AEGIS128_STATE_BLOCKS - 1; i > 0; i--) - crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], - &state->blocks[i]); - crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); -} - -static void crypto_aegis128_update_a(struct aegis_state *state, - const union aegis_block *msg) -{ - if (have_simd && crypto_simd_usable()) { - crypto_aegis128_update_simd(state, msg); - return; - } - - crypto_aegis128_update(state); - crypto_aegis_block_xor(&state->blocks[0], msg); -} - -static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg) -{ - if (have_simd && crypto_simd_usable()) { - crypto_aegis128_update_simd(state, msg); - return; - } - - crypto_aegis128_update(state); - crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); -} - -static void crypto_aegis128_init(struct aegis_state *state, - const union aegis_block *key, - const u8 *iv) -{ - union aegis_block key_iv; - unsigned int i; - - key_iv = *key; - crypto_xor(key_iv.bytes, iv, AEGIS_BLOCK_SIZE); - - state->blocks[0] = key_iv; - state->blocks[1] = crypto_aegis_const[1]; - state->blocks[2] = crypto_aegis_const[0]; - state->blocks[3] = *key; - state->blocks[4] = *key; - - crypto_aegis_block_xor(&state->blocks[3], &crypto_aegis_const[0]); - crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]); - - for (i = 0; i < 5; i++) { - crypto_aegis128_update_a(state, key); - crypto_aegis128_update_a(state, &key_iv); - } -} - -static void crypto_aegis128_ad(struct aegis_state *state, - const u8 *src, unsigned int size) -{ - if (AEGIS_ALIGNED(src)) { - const union aegis_block *src_blk = - (const union aegis_block *)src; - - while (size >= AEGIS_BLOCK_SIZE) { - crypto_aegis128_update_a(state, src_blk); - - size -= AEGIS_BLOCK_SIZE; - src_blk++; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - crypto_aegis128_update_u(state, src); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - } - } -} - -static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - union aegis_block tmp; - - if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { - while (size >= AEGIS_BLOCK_SIZE) { - union aegis_block *dst_blk = - (union aegis_block *)dst; - const union aegis_block *src_blk = - (const union aegis_block *)src; - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&tmp, src_blk); - - crypto_aegis128_update_a(state, src_blk); - - *dst_blk = tmp; - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); - - crypto_aegis128_update_u(state, src); - - memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } - - if (size > 0) { - union aegis_block msg = {}; - memcpy(msg.bytes, src, size); - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - - crypto_aegis128_update_a(state, &msg); - - crypto_aegis_block_xor(&msg, &tmp); - - memcpy(dst, msg.bytes, size); - } -} - -static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - union aegis_block tmp; - - if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { - while (size >= AEGIS_BLOCK_SIZE) { - union aegis_block *dst_blk = - (union aegis_block *)dst; - const union aegis_block *src_blk = - (const union aegis_block *)src; - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&tmp, src_blk); - - crypto_aegis128_update_a(state, &tmp); - - *dst_blk = tmp; - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); - - crypto_aegis128_update_a(state, &tmp); - - memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } - - if (size > 0) { - union aegis_block msg = {}; - memcpy(msg.bytes, src, size); - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&msg, &tmp); - - memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size); - - crypto_aegis128_update_a(state, &msg); - - memcpy(dst, msg.bytes, size); - } -} - -static void crypto_aegis128_process_ad(struct aegis_state *state, - struct scatterlist *sg_src, - unsigned int assoclen) -{ - struct scatter_walk walk; - union aegis_block buf; - unsigned int pos = 0; - - scatterwalk_start(&walk, sg_src); - while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); - unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; - - if (pos + size >= AEGIS_BLOCK_SIZE) { - if (pos > 0) { - unsigned int fill = AEGIS_BLOCK_SIZE - pos; - memcpy(buf.bytes + pos, src, fill); - crypto_aegis128_update_a(state, &buf); - pos = 0; - left -= fill; - src += fill; - } - - crypto_aegis128_ad(state, src, left); - src += left & ~(AEGIS_BLOCK_SIZE - 1); - left &= AEGIS_BLOCK_SIZE - 1; - } - - memcpy(buf.bytes + pos, src, left); - - pos += left; - assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); - } - - if (pos > 0) { - memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos); - crypto_aegis128_update_a(state, &buf); - } -} - -static void crypto_aegis128_process_crypt(struct aegis_state *state, - struct aead_request *req, - const struct aegis128_ops *ops) -{ - struct skcipher_walk walk; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes); - - skcipher_walk_done(&walk, walk.nbytes - nbytes); - } -} - -static void crypto_aegis128_final(struct aegis_state *state, - union aegis_block *tag_xor, - u64 assoclen, u64 cryptlen) -{ - u64 assocbits = assoclen * 8; - u64 cryptbits = cryptlen * 8; - - union aegis_block tmp; - unsigned int i; - - tmp.words64[0] = cpu_to_le64(assocbits); - tmp.words64[1] = cpu_to_le64(cryptbits); - - crypto_aegis_block_xor(&tmp, &state->blocks[3]); - - for (i = 0; i < 7; i++) - crypto_aegis128_update_a(state, &tmp); - - for (i = 0; i < AEGIS128_STATE_BLOCKS; i++) - crypto_aegis_block_xor(tag_xor, &state->blocks[i]); -} - -static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) -{ - struct aegis_ctx *ctx = crypto_aead_ctx(aead); - - if (keylen != AEGIS128_KEY_SIZE) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE); - return 0; -} - -static int crypto_aegis128_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - if (authsize > AEGIS128_MAX_AUTH_SIZE) - return -EINVAL; - if (authsize < AEGIS128_MIN_AUTH_SIZE) - return -EINVAL; - return 0; -} - -static void crypto_aegis128_crypt(struct aead_request *req, - union aegis_block *tag_xor, - unsigned int cryptlen, - const struct aegis128_ops *ops) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aegis_ctx *ctx = crypto_aead_ctx(tfm); - struct aegis_state state; - - crypto_aegis128_init(&state, &ctx->key, req->iv); - crypto_aegis128_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_process_crypt(&state, req, ops); - crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen); -} - -static int crypto_aegis128_encrypt(struct aead_request *req) -{ - const struct aegis128_ops *ops = &(struct aegis128_ops){ - .skcipher_walk_init = skcipher_walk_aead_encrypt, - .crypt_chunk = crypto_aegis128_encrypt_chunk, - }; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union aegis_block tag = {}; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen; - - if (have_simd && crypto_simd_usable()) - ops = &(struct aegis128_ops){ - .skcipher_walk_init = skcipher_walk_aead_encrypt, - .crypt_chunk = crypto_aegis128_encrypt_chunk_simd }; - - crypto_aegis128_crypt(req, &tag, cryptlen, ops); - - scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, - authsize, 1); - return 0; -} - -static int crypto_aegis128_decrypt(struct aead_request *req) -{ - const struct aegis128_ops *ops = &(struct aegis128_ops){ - .skcipher_walk_init = skcipher_walk_aead_decrypt, - .crypt_chunk = crypto_aegis128_decrypt_chunk, - }; - static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {}; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union aegis_block tag; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen - authsize; - - scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, - authsize, 0); - - if (have_simd && crypto_simd_usable()) - ops = &(struct aegis128_ops){ - .skcipher_walk_init = skcipher_walk_aead_decrypt, - .crypt_chunk = crypto_aegis128_decrypt_chunk_simd }; - - crypto_aegis128_crypt(req, &tag, cryptlen, ops); - - return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; -} - -static struct aead_alg crypto_aegis128_alg = { - .setkey = crypto_aegis128_setkey, - .setauthsize = crypto_aegis128_setauthsize, - .encrypt = crypto_aegis128_encrypt, - .decrypt = crypto_aegis128_decrypt, - - .ivsize = AEGIS128_NONCE_SIZE, - .maxauthsize = AEGIS128_MAX_AUTH_SIZE, - .chunksize = AEGIS_BLOCK_SIZE, - - .base = { - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct aegis_ctx), - .cra_alignmask = 0, - - .cra_priority = 100, - - .cra_name = "aegis128", - .cra_driver_name = "aegis128-generic", - - .cra_module = THIS_MODULE, - } -}; - -static int __init crypto_aegis128_module_init(void) -{ - if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD)) - have_simd = crypto_aegis128_have_simd(); - - return crypto_register_aead(&crypto_aegis128_alg); -} - -static void __exit crypto_aegis128_module_exit(void) -{ - crypto_unregister_aead(&crypto_aegis128_alg); -} - -subsys_initcall(crypto_aegis128_module_init); -module_exit(crypto_aegis128_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ondrej Mosnacek "); -MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm"); -MODULE_ALIAS_CRYPTO("aegis128"); -MODULE_ALIAS_CRYPTO("aegis128-generic"); diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c deleted file mode 100644 index 26e9450a5833..000000000000 --- a/crypto/aegis128-neon-inner.c +++ /dev/null @@ -1,149 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2019 Linaro, Ltd. - */ - -#ifdef CONFIG_ARM64 -#include - -#define AES_ROUND "aese %0.16b, %1.16b \n\t aesmc %0.16b, %0.16b" -#else -#include - -#define AES_ROUND "aese.8 %q0, %q1 \n\t aesmc.8 %q0, %q0" -#endif - -#define AEGIS_BLOCK_SIZE 16 - -#include - -void *memcpy(void *dest, const void *src, size_t n); -void *memset(void *s, int c, size_t n); - -struct aegis128_state { - uint8x16_t v[5]; -}; - -static struct aegis128_state aegis128_load_state_neon(const void *state) -{ - return (struct aegis128_state){ { - vld1q_u8(state), - vld1q_u8(state + 16), - vld1q_u8(state + 32), - vld1q_u8(state + 48), - vld1q_u8(state + 64) - } }; -} - -static void aegis128_save_state_neon(struct aegis128_state st, void *state) -{ - vst1q_u8(state, st.v[0]); - vst1q_u8(state + 16, st.v[1]); - vst1q_u8(state + 32, st.v[2]); - vst1q_u8(state + 48, st.v[3]); - vst1q_u8(state + 64, st.v[4]); -} - -static uint8x16_t aegis_aes_round(uint8x16_t w) -{ - uint8x16_t z = {}; - - /* - * We use inline asm here instead of the vaeseq_u8/vaesmcq_u8 intrinsics - * to force the compiler to issue the aese/aesmc instructions in pairs. - * This is much faster on many cores, where the instruction pair can - * execute in a single cycle. - */ - asm(AES_ROUND : "+w"(w) : "w"(z)); - return w; -} - -static struct aegis128_state aegis128_update_neon(struct aegis128_state st, - uint8x16_t m) -{ - uint8x16_t t; - - t = aegis_aes_round(st.v[3]); - st.v[3] ^= aegis_aes_round(st.v[2]); - st.v[2] ^= aegis_aes_round(st.v[1]); - st.v[1] ^= aegis_aes_round(st.v[0]); - st.v[0] ^= aegis_aes_round(st.v[4]) ^ m; - st.v[4] ^= t; - - return st; -} - -void crypto_aegis128_update_neon(void *state, const void *msg) -{ - struct aegis128_state st = aegis128_load_state_neon(state); - - st = aegis128_update_neon(st, vld1q_u8(msg)); - - aegis128_save_state_neon(st, state); -} - -void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, - unsigned int size) -{ - struct aegis128_state st = aegis128_load_state_neon(state); - uint8x16_t tmp; - - while (size >= AEGIS_BLOCK_SIZE) { - uint8x16_t s = vld1q_u8(src); - - tmp = s ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; - st = aegis128_update_neon(st, s); - vst1q_u8(dst, tmp); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - - if (size > 0) { - uint8_t buf[AEGIS_BLOCK_SIZE] = {}; - uint8x16_t msg; - - memcpy(buf, src, size); - msg = vld1q_u8(buf); - tmp = msg ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; - st = aegis128_update_neon(st, msg); - vst1q_u8(buf, tmp); - memcpy(dst, buf, size); - } - - aegis128_save_state_neon(st, state); -} - -void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, - unsigned int size) -{ - struct aegis128_state st = aegis128_load_state_neon(state); - uint8x16_t tmp; - - while (size >= AEGIS_BLOCK_SIZE) { - tmp = vld1q_u8(src) ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; - st = aegis128_update_neon(st, tmp); - vst1q_u8(dst, tmp); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - - if (size > 0) { - uint8_t buf[AEGIS_BLOCK_SIZE] = {}; - uint8x16_t msg; - - memcpy(buf, src, size); - msg = vld1q_u8(buf) ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; - vst1q_u8(buf, msg); - memcpy(dst, buf, size); - - memset(buf + size, 0, AEGIS_BLOCK_SIZE - size); - msg = vld1q_u8(buf); - st = aegis128_update_neon(st, msg); - } - - aegis128_save_state_neon(st, state); -} diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c deleted file mode 100644 index c1c0a1686f67..000000000000 --- a/crypto/aegis128-neon.c +++ /dev/null @@ -1,43 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2019 Linaro Ltd - */ - -#include -#include - -#include "aegis.h" - -void crypto_aegis128_update_neon(void *state, const void *msg); -void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, - unsigned int size); -void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, - unsigned int size); - -bool crypto_aegis128_have_simd(void) -{ - return cpu_have_feature(cpu_feature(AES)); -} - -void crypto_aegis128_update_simd(union aegis_block *state, const void *msg) -{ - kernel_neon_begin(); - crypto_aegis128_update_neon(state, msg); - kernel_neon_end(); -} - -void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst, - const u8 *src, unsigned int size) -{ - kernel_neon_begin(); - crypto_aegis128_encrypt_chunk_neon(state, dst, src, size); - kernel_neon_end(); -} - -void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst, - const u8 *src, unsigned int size) -{ - kernel_neon_begin(); - crypto_aegis128_decrypt_chunk_neon(state, dst, src, size); - kernel_neon_end(); -} diff --git a/crypto/aegis128.c b/crypto/aegis128.c new file mode 100644 index 000000000000..32840d5e7f65 --- /dev/null +++ b/crypto/aegis128.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * The AEGIS-128 Authenticated-Encryption Algorithm + * + * Copyright (c) 2017-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aegis.h" + +#define AEGIS128_NONCE_SIZE 16 +#define AEGIS128_STATE_BLOCKS 5 +#define AEGIS128_KEY_SIZE 16 +#define AEGIS128_MIN_AUTH_SIZE 8 +#define AEGIS128_MAX_AUTH_SIZE 16 + +struct aegis_state { + union aegis_block blocks[AEGIS128_STATE_BLOCKS]; +}; + +struct aegis_ctx { + union aegis_block key; +}; + +struct aegis128_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_chunk)(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +}; + +static void crypto_aegis128_update(struct aegis_state *state) +{ + union aegis_block tmp; + unsigned int i; + + tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; + for (i = AEGIS128_STATE_BLOCKS - 1; i > 0; i--) + crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], + &state->blocks[i]); + crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); +} + +static void crypto_aegis128_update_a(struct aegis_state *state, + const union aegis_block *msg) +{ + crypto_aegis128_update(state); + crypto_aegis_block_xor(&state->blocks[0], msg); +} + +static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg) +{ + crypto_aegis128_update(state); + crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); +} + +static void crypto_aegis128_init(struct aegis_state *state, + const union aegis_block *key, + const u8 *iv) +{ + union aegis_block key_iv; + unsigned int i; + + key_iv = *key; + crypto_xor(key_iv.bytes, iv, AEGIS_BLOCK_SIZE); + + state->blocks[0] = key_iv; + state->blocks[1] = crypto_aegis_const[1]; + state->blocks[2] = crypto_aegis_const[0]; + state->blocks[3] = *key; + state->blocks[4] = *key; + + crypto_aegis_block_xor(&state->blocks[3], &crypto_aegis_const[0]); + crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]); + + for (i = 0; i < 5; i++) { + crypto_aegis128_update_a(state, key); + crypto_aegis128_update_a(state, &key_iv); + } +} + +static void crypto_aegis128_ad(struct aegis_state *state, + const u8 *src, unsigned int size) +{ + if (AEGIS_ALIGNED(src)) { + const union aegis_block *src_blk = + (const union aegis_block *)src; + + while (size >= AEGIS_BLOCK_SIZE) { + crypto_aegis128_update_a(state, src_blk); + + size -= AEGIS_BLOCK_SIZE; + src_blk++; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + crypto_aegis128_update_u(state, src); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + } + } +} + +static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_block tmp; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS_BLOCK_SIZE) { + union aegis_block *dst_blk = + (union aegis_block *)dst; + const union aegis_block *src_blk = + (const union aegis_block *)src; + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&tmp, src_blk); + + crypto_aegis128_update_a(state, src_blk); + + *dst_blk = tmp; + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); + + crypto_aegis128_update_u(state, src); + + memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } + + if (size > 0) { + union aegis_block msg = {}; + memcpy(msg.bytes, src, size); + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + + crypto_aegis128_update_a(state, &msg); + + crypto_aegis_block_xor(&msg, &tmp); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_block tmp; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS_BLOCK_SIZE) { + union aegis_block *dst_blk = + (union aegis_block *)dst; + const union aegis_block *src_blk = + (const union aegis_block *)src; + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&tmp, src_blk); + + crypto_aegis128_update_a(state, &tmp); + + *dst_blk = tmp; + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); + + crypto_aegis128_update_a(state, &tmp); + + memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } + + if (size > 0) { + union aegis_block msg = {}; + memcpy(msg.bytes, src, size); + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&msg, &tmp); + + memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size); + + crypto_aegis128_update_a(state, &msg); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis128_process_ad(struct aegis_state *state, + struct scatterlist *sg_src, + unsigned int assoclen) +{ + struct scatter_walk walk; + union aegis_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= AEGIS_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = AEGIS_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + crypto_aegis128_update_a(state, &buf); + pos = 0; + left -= fill; + src += fill; + } + + crypto_aegis128_ad(state, src, left); + src += left & ~(AEGIS_BLOCK_SIZE - 1); + left &= AEGIS_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos); + crypto_aegis128_update_a(state, &buf); + } +} + +static void crypto_aegis128_process_crypt(struct aegis_state *state, + struct aead_request *req, + const struct aegis128_ops *ops) +{ + struct skcipher_walk walk; + + ops->skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); + + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); + + skcipher_walk_done(&walk, walk.nbytes - nbytes); + } +} + +static void crypto_aegis128_final(struct aegis_state *state, + union aegis_block *tag_xor, + u64 assoclen, u64 cryptlen) +{ + u64 assocbits = assoclen * 8; + u64 cryptbits = cryptlen * 8; + + union aegis_block tmp; + unsigned int i; + + tmp.words64[0] = cpu_to_le64(assocbits); + tmp.words64[1] = cpu_to_le64(cryptbits); + + crypto_aegis_block_xor(&tmp, &state->blocks[3]); + + for (i = 0; i < 7; i++) + crypto_aegis128_update_a(state, &tmp); + + for (i = 0; i < AEGIS128_STATE_BLOCKS; i++) + crypto_aegis_block_xor(tag_xor, &state->blocks[i]); +} + +static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct aegis_ctx *ctx = crypto_aead_ctx(aead); + + if (keylen != AEGIS128_KEY_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE); + return 0; +} + +static int crypto_aegis128_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize > AEGIS128_MAX_AUTH_SIZE) + return -EINVAL; + if (authsize < AEGIS128_MIN_AUTH_SIZE) + return -EINVAL; + return 0; +} + +static void crypto_aegis128_crypt(struct aead_request *req, + union aegis_block *tag_xor, + unsigned int cryptlen, + const struct aegis128_ops *ops) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_ctx *ctx = crypto_aead_ctx(tfm); + struct aegis_state state; + + crypto_aegis128_init(&state, &ctx->key, req->iv); + crypto_aegis128_process_ad(&state, req->src, req->assoclen); + crypto_aegis128_process_crypt(&state, req, ops); + crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen); +} + +static int crypto_aegis128_encrypt(struct aead_request *req) +{ + static const struct aegis128_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_chunk = crypto_aegis128_encrypt_chunk, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_aegis128_crypt(req, &tag, cryptlen, &ops); + + scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, + authsize, 1); + return 0; +} + +static int crypto_aegis128_decrypt(struct aead_request *req) +{ + static const struct aegis128_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_chunk = crypto_aegis128_decrypt_chunk, + }; + static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, + authsize, 0); + + crypto_aegis128_crypt(req, &tag, cryptlen, &ops); + + return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; +} + +static struct aead_alg crypto_aegis128_alg = { + .setkey = crypto_aegis128_setkey, + .setauthsize = crypto_aegis128_setauthsize, + .encrypt = crypto_aegis128_encrypt, + .decrypt = crypto_aegis128_decrypt, + + .ivsize = AEGIS128_NONCE_SIZE, + .maxauthsize = AEGIS128_MAX_AUTH_SIZE, + .chunksize = AEGIS_BLOCK_SIZE, + + .base = { + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aegis_ctx), + .cra_alignmask = 0, + + .cra_priority = 100, + + .cra_name = "aegis128", + .cra_driver_name = "aegis128-generic", + + .cra_module = THIS_MODULE, + } +}; + +static int __init crypto_aegis128_module_init(void) +{ + return crypto_register_aead(&crypto_aegis128_alg); +} + +static void __exit crypto_aegis128_module_exit(void) +{ + crypto_unregister_aead(&crypto_aegis128_alg); +} + +subsys_initcall(crypto_aegis128_module_init); +module_exit(crypto_aegis128_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek "); +MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm"); +MODULE_ALIAS_CRYPTO("aegis128"); +MODULE_ALIAS_CRYPTO("aegis128-generic"); -- cgit v1.2.3-59-g8ed1b