From 3daa5e33142fdb38b74c117b654dd50426939944 Mon Sep 17 00:00:00 2001 From: René van Dorst Date: Tue, 20 Sep 2016 22:32:59 +0200 Subject: poly1305: optimize unaligned access --- src/crypto/chacha20poly1305.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'src/crypto/chacha20poly1305.c') diff --git a/src/crypto/chacha20poly1305.c b/src/crypto/chacha20poly1305.c index 5190894..6f6a825 100644 --- a/src/crypto/chacha20poly1305.c +++ b/src/crypto/chacha20poly1305.c @@ -248,13 +248,28 @@ struct poly1305_ctx { static void poly1305_init(struct poly1305_ctx *ctx, const u8 key[static POLY1305_KEY_SIZE]) { +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + u32 t0, t1, t2, t3; +#endif memset(ctx, 0, sizeof(struct poly1305_ctx)); /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ctx->r[0] = (le32_to_cpuvp(key + 0) >> 0) & 0x3ffffff; ctx->r[1] = (le32_to_cpuvp(key + 3) >> 2) & 0x3ffff03; ctx->r[2] = (le32_to_cpuvp(key + 6) >> 4) & 0x3ffc0ff; ctx->r[3] = (le32_to_cpuvp(key + 9) >> 6) & 0x3f03fff; ctx->r[4] = (le32_to_cpuvp(key + 12) >> 8) & 0x00fffff; +#else + t0 = le32_to_cpuvp(key + 0); + t1 = le32_to_cpuvp(key + 4); + t2 = le32_to_cpuvp(key + 8); + t3 = le32_to_cpuvp(key + 12); + ctx->r[0] = t0 & 0x3ffffff; t0 >>= 26; t0 |= t1 << 6; + ctx->r[1] = t0 & 0x3ffff03; t1 >>= 20; t1 |= t2 << 12; + ctx->r[2] = t1 & 0x3ffc0ff; t2 >>= 14; t2 |= t3 << 18; + ctx->r[3] = t2 & 0x3f03fff; t3 >>= 8; + ctx->r[4] = t3 & 0x00fffff; +#endif ctx->s[0] = le32_to_cpuvp(key + 16); ctx->s[1] = le32_to_cpuvp(key + 20); ctx->s[2] = le32_to_cpuvp(key + 24); @@ -267,6 +282,9 @@ static unsigned int poly1305_generic_blocks(struct poly1305_ctx *ctx, const u8 * u32 s1, s2, s3, s4; u32 h0, h1, h2, h3, h4; u64 d0, d1, d2, d3, d4; +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + u32 t0, t1, t2, t3; +#endif r0 = ctx->r[0]; r1 = ctx->r[1]; @@ -287,11 +305,23 @@ static unsigned int poly1305_generic_blocks(struct poly1305_ctx *ctx, const u8 * while (likely(srclen >= POLY1305_BLOCK_SIZE)) { /* h += m[i] */ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS h0 += (le32_to_cpuvp(src + 0) >> 0) & 0x3ffffff; h1 += (le32_to_cpuvp(src + 3) >> 2) & 0x3ffffff; h2 += (le32_to_cpuvp(src + 6) >> 4) & 0x3ffffff; h3 += (le32_to_cpuvp(src + 9) >> 6) & 0x3ffffff; h4 += (le32_to_cpuvp(src + 12) >> 8) | hibit; +#else + t0 = le32_to_cpuvp(src + 0); + t1 = le32_to_cpuvp(src + 4); + t2 = le32_to_cpuvp(src + 8); + t3 = le32_to_cpuvp(src + 12); + h0 += t0 & 0x3ffffff; + h1 += sr((((u64)t1 << 32) | t0), 26) & 0x3ffffff; + h2 += sr((((u64)t2 << 32) | t1), 20) & 0x3ffffff; + h3 += sr((((u64)t3 << 32) | t2), 14) & 0x3ffffff; + h4 += (t3 >> 8) | hibit; +#endif /* h *= r */ d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + mlt(h3, s2) + mlt(h4, s1); -- cgit v1.2.3-59-g8ed1b