aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/crypto
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2018-11-16 17:26:21 -0800
committerHerbert Xu <herbert@gondor.apana.org.au>2018-11-20 14:26:55 +0800
commit1ca1b917940c24ca3d1f490118c5474168622953 (patch)
tree26cde5a85d0792a8dd709a6f5316f76af8801a35 /arch/x86/crypto
parentcrypto: chacha20-generic - add XChaCha20 support (diff)
downloadlinux-dev-1ca1b917940c24ca3d1f490118c5474168622953.tar.xz
linux-dev-1ca1b917940c24ca3d1f490118c5474168622953.zip
crypto: chacha20-generic - refactor to allow varying number of rounds
In preparation for adding XChaCha12 support, rename/refactor chacha20-generic to support different numbers of rounds. The justification for needing XChaCha12 support is explained in more detail in the patch "crypto: chacha - add XChaCha12 support". The only difference between ChaCha{8,12,20} are the number of rounds itself; all other parts of the algorithm are the same. Therefore, remove the "20" from all definitions, structures, functions, files, etc. that will be shared by all ChaCha versions. Also make ->setkey() store the round count in the chacha_ctx (previously chacha20_ctx). The generic code then passes the round count through to chacha_block(). There will be a ->setkey() function for each explicitly allowed round count; the encrypt/decrypt functions will be the same. I decided not to do it the opposite way (same ->setkey() function for all round counts, with different encrypt/decrypt functions) because that would have required more boilerplate code in architecture-specific implementations of ChaCha and XChaCha. Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Acked-by: Martin Willi <martin@strongswan.org> Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto')
-rw-r--r--arch/x86/crypto/chacha20_glue.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index 9fd84fe6ec09..1e9e66509226 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -10,7 +10,7 @@
*/
#include <crypto/algapi.h>
-#include <crypto/chacha20.h>
+#include <crypto/chacha.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -35,8 +35,8 @@ static bool chacha20_use_avx2;
static unsigned int chacha20_advance(unsigned int len, unsigned int maxblocks)
{
- len = min(len, maxblocks * CHACHA20_BLOCK_SIZE);
- return round_up(len, CHACHA20_BLOCK_SIZE) / CHACHA20_BLOCK_SIZE;
+ len = min(len, maxblocks * CHACHA_BLOCK_SIZE);
+ return round_up(len, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE;
}
static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src,
@@ -44,38 +44,38 @@ static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src,
{
#ifdef CONFIG_AS_AVX2
if (chacha20_use_avx2) {
- while (bytes >= CHACHA20_BLOCK_SIZE * 8) {
+ while (bytes >= CHACHA_BLOCK_SIZE * 8) {
chacha20_8block_xor_avx2(state, dst, src, bytes);
- bytes -= CHACHA20_BLOCK_SIZE * 8;
- src += CHACHA20_BLOCK_SIZE * 8;
- dst += CHACHA20_BLOCK_SIZE * 8;
+ bytes -= CHACHA_BLOCK_SIZE * 8;
+ src += CHACHA_BLOCK_SIZE * 8;
+ dst += CHACHA_BLOCK_SIZE * 8;
state[12] += 8;
}
- if (bytes > CHACHA20_BLOCK_SIZE * 4) {
+ if (bytes > CHACHA_BLOCK_SIZE * 4) {
chacha20_8block_xor_avx2(state, dst, src, bytes);
state[12] += chacha20_advance(bytes, 8);
return;
}
- if (bytes > CHACHA20_BLOCK_SIZE * 2) {
+ if (bytes > CHACHA_BLOCK_SIZE * 2) {
chacha20_4block_xor_avx2(state, dst, src, bytes);
state[12] += chacha20_advance(bytes, 4);
return;
}
- if (bytes > CHACHA20_BLOCK_SIZE) {
+ if (bytes > CHACHA_BLOCK_SIZE) {
chacha20_2block_xor_avx2(state, dst, src, bytes);
state[12] += chacha20_advance(bytes, 2);
return;
}
}
#endif
- while (bytes >= CHACHA20_BLOCK_SIZE * 4) {
+ while (bytes >= CHACHA_BLOCK_SIZE * 4) {
chacha20_4block_xor_ssse3(state, dst, src, bytes);
- bytes -= CHACHA20_BLOCK_SIZE * 4;
- src += CHACHA20_BLOCK_SIZE * 4;
- dst += CHACHA20_BLOCK_SIZE * 4;
+ bytes -= CHACHA_BLOCK_SIZE * 4;
+ src += CHACHA_BLOCK_SIZE * 4;
+ dst += CHACHA_BLOCK_SIZE * 4;
state[12] += 4;
}
- if (bytes > CHACHA20_BLOCK_SIZE) {
+ if (bytes > CHACHA_BLOCK_SIZE) {
chacha20_4block_xor_ssse3(state, dst, src, bytes);
state[12] += chacha20_advance(bytes, 4);
return;
@@ -89,7 +89,7 @@ static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src,
static int chacha20_simd(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
u32 *state, state_buf[16 + 2] __aligned(8);
struct skcipher_walk walk;
int err;
@@ -97,12 +97,12 @@ static int chacha20_simd(struct skcipher_request *req)
BUILD_BUG_ON(CHACHA20_STATE_ALIGN != 16);
state = PTR_ALIGN(state_buf + 0, CHACHA20_STATE_ALIGN);
- if (req->cryptlen <= CHACHA20_BLOCK_SIZE || !may_use_simd())
- return crypto_chacha20_crypt(req);
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ return crypto_chacha_crypt(req);
err = skcipher_walk_virt(&walk, req, true);
- crypto_chacha20_init(state, ctx, walk.iv);
+ crypto_chacha_init(state, ctx, walk.iv);
kernel_fpu_begin();
@@ -128,13 +128,13 @@ static struct skcipher_alg alg = {
.base.cra_driver_name = "chacha20-simd",
.base.cra_priority = 300,
.base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha20_ctx),
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
- .min_keysize = CHACHA20_KEY_SIZE,
- .max_keysize = CHACHA20_KEY_SIZE,
- .ivsize = CHACHA20_IV_SIZE,
- .chunksize = CHACHA20_BLOCK_SIZE,
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
.setkey = crypto_chacha20_setkey,
.encrypt = chacha20_simd,
.decrypt = chacha20_simd,