/* Copyright (C) 2015-2017 Jason A. Donenfeld . All Rights Reserved. * Copyright 2015 Martin Willi. * Copyright 2016 The OpenSSL Project Authors. All Rights Reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain copyright notices, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * - Neither the name of the CRYPTOGAMS nor the names of its * copyright holder and contributors may be used to endorse or * promote products derived from this software without specific * prior written permission. * ALTERNATIVELY, provided that this notice is retained in full, this * product may be distributed under the terms of the GNU General Public * License (GPL), in which case the provisions of the GPL apply INSTEAD OF * those given above. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "chacha20poly1305.h" #include #include #include #include #include #include #if defined(CONFIG_X86_64) #include #include asmlinkage void poly1305_init_x86_64(void *ctx, const u8 key[16]); asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, size_t len, u32 padbit); asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[16], const u32 nonce[4]); #ifdef CONFIG_AS_SSSE3 asmlinkage void hchacha20_ssse3(u8 *derived_key, const u8 *nonce, const u8 *key); asmlinkage void chacha20_ssse3(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]); #endif #ifdef CONFIG_AS_AVX asmlinkage void poly1305_emit_avx(void *ctx, u8 mac[16], const u32 nonce[4]); asmlinkage void poly1305_blocks_avx(void *ctx, const u8 *inp, size_t len, u32 padbit); #endif #ifdef CONFIG_AS_AVX2 asmlinkage void chacha20_avx2(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]); asmlinkage void poly1305_blocks_avx2(void *ctx, const u8 *inp, size_t len, u32 padbit); #endif #ifdef CONFIG_AS_AVX512 asmlinkage void chacha20_avx512(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]); asmlinkage void poly1305_blocks_avx512(void *ctx, const u8 *inp, size_t len, u32 padbit); #endif static bool chacha20poly1305_use_ssse3 __read_mostly; static bool chacha20poly1305_use_avx __read_mostly; static bool chacha20poly1305_use_avx2 __read_mostly; static bool chacha20poly1305_use_avx512 __read_mostly; void __init chacha20poly1305_fpu_init(void) { chacha20poly1305_use_ssse3 = boot_cpu_has(X86_FEATURE_SSSE3); chacha20poly1305_use_avx = boot_cpu_has(X86_FEATURE_AVX) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); chacha20poly1305_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); #ifndef COMPAT_CANNOT_USE_AVX512 chacha20poly1305_use_avx512 = boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_ZMM_Hi256, NULL); #endif } #elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) asmlinkage void poly1305_init_arm(void *ctx, const u8 key[16]); asmlinkage void poly1305_blocks_arm(void *ctx, const u8 *inp, size_t len, u32 padbit); asmlinkage void poly1305_emit_arm(void *ctx, u8 mac[16], const u32 nonce[4]); asmlinkage void chacha20_arm(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]); #if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (!defined(__LINUX_ARM_ARCH__) || __LINUX_ARM_ARCH__ >= 7) #define ARM_USE_NEON #include #include asmlinkage void poly1305_blocks_neon(void *ctx, const u8 *inp, size_t len, u32 padbit); asmlinkage void poly1305_emit_neon(void *ctx, u8 mac[16], const u32 nonce[4]); asmlinkage void chacha20_neon(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]); #endif static bool chacha20poly1305_use_neon __read_mostly; void __init chacha20poly1305_fpu_init(void) { #if defined(CONFIG_ARM64) chacha20poly1305_use_neon = elf_hwcap & HWCAP_ASIMD; #elif defined(CONFIG_ARM) chacha20poly1305_use_neon = elf_hwcap & HWCAP_NEON; #endif } #elif defined(CONFIG_MIPS) && defined(CONFIG_64BIT) asmlinkage void poly1305_init_mips(void *ctx, const u8 key[16]); asmlinkage void poly1305_blocks_mips(void *ctx, const u8 *inp, size_t len, u32 padbit); asmlinkage void poly1305_emit_mips(void *ctx, u8 mac[16], const u32 nonce[4]); void __init chacha20poly1305_fpu_init(void) { } #else void __init chacha20poly1305_fpu_init(void) { } #endif #define CHACHA20_IV_SIZE 16 #define CHACHA20_KEY_SIZE 32 #define CHACHA20_BLOCK_SIZE 64 #define POLY1305_BLOCK_SIZE 16 #define POLY1305_KEY_SIZE 32 #define POLY1305_MAC_SIZE 16 static inline u32 le32_to_cpuvp(const void *p) { return le32_to_cpup(p); } static inline u64 le64_to_cpuvp(const void *p) { return le64_to_cpup(p); } static inline u32 rotl32(u32 v, u8 n) { return (v << n) | (v >> (sizeof(v) * 8 - n)); } struct chacha20_ctx { u32 state[CHACHA20_BLOCK_SIZE / sizeof(u32)]; } __aligned(32); static void chacha20_block_generic(struct chacha20_ctx *ctx, void *stream) { u32 x[CHACHA20_BLOCK_SIZE / sizeof(u32)]; __le32 *out = stream; int i; for (i = 0; i < ARRAY_SIZE(x); i++) x[i] = ctx->state[i]; for (i = 0; i < 20; i += 2) { x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16); x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16); x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16); x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16); x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12); x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12); x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12); x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12); x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8); x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8); x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8); x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8); x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7); x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7); x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7); x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7); x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16); x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16); x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16); x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16); x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12); x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12); x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12); x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12); x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8); x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8); x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8); x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8); x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7); x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7); x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7); x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7); } for (i = 0; i < ARRAY_SIZE(x); i++) out[i] = cpu_to_le32(x[i] + ctx->state[i]); ctx->state[12]++; } static const char constant[16] = "expand 32-byte k"; static void hchacha20_generic(u8 derived_key[CHACHA20POLY1305_KEYLEN], const u8 nonce[16], const u8 key[CHACHA20POLY1305_KEYLEN]) { u32 x[CHACHA20_BLOCK_SIZE / sizeof(u32)]; __le32 *out = (__force __le32 *)derived_key; int i; x[0] = le32_to_cpuvp(constant + 0); x[1] = le32_to_cpuvp(constant + 4); x[2] = le32_to_cpuvp(constant + 8); x[3] = le32_to_cpuvp(constant + 12); x[4] = le32_to_cpuvp(key + 0); x[5] = le32_to_cpuvp(key + 4); x[6] = le32_to_cpuvp(key + 8); x[7] = le32_to_cpuvp(key + 12); x[8] = le32_to_cpuvp(key + 16); x[9] = le32_to_cpuvp(key + 20); x[10] = le32_to_cpuvp(key + 24); x[11] = le32_to_cpuvp(key + 28); x[12] = le32_to_cpuvp(nonce + 0); x[13] = le32_to_cpuvp(nonce + 4); x[14] = le32_to_cpuvp(nonce + 8); x[15] = le32_to_cpuvp(nonce + 12); for (i = 0; i < 20; i += 2) { x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16); x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16); x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16); x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16); x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12); x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12); x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12); x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12); x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8); x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8); x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8); x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8); x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7); x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7); x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7); x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7); x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16); x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16); x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16); x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16); x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12); x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12); x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12); x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12); x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8); x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8); x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8); x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8); x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7); x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7); x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7); x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7); } out[0] = cpu_to_le32(x[0]); out[1] = cpu_to_le32(x[1]); out[2] = cpu_to_le32(x[2]); out[3] = cpu_to_le32(x[3]); out[4] = cpu_to_le32(x[12]); out[5] = cpu_to_le32(x[13]); out[6] = cpu_to_le32(x[14]); out[7] = cpu_to_le32(x[15]); } static inline void hchacha20(u8 derived_key[CHACHA20POLY1305_KEYLEN], const u8 nonce[16], const u8 key[CHACHA20POLY1305_KEYLEN], bool have_simd) { #if defined(CONFIG_X86_64) && defined(CONFIG_AS_SSSE3) if (have_simd && chacha20poly1305_use_ssse3) { hchacha20_ssse3(derived_key, nonce, key); return; } #endif hchacha20_generic(derived_key, nonce, key); } static void chacha20_keysetup(struct chacha20_ctx *ctx, const u8 key[CHACHA20_KEY_SIZE], const u8 nonce[sizeof(u64)]) { ctx->state[0] = le32_to_cpuvp(constant + 0); ctx->state[1] = le32_to_cpuvp(constant + 4); ctx->state[2] = le32_to_cpuvp(constant + 8); ctx->state[3] = le32_to_cpuvp(constant + 12); ctx->state[4] = le32_to_cpuvp(key + 0); ctx->state[5] = le32_to_cpuvp(key + 4); ctx->state[6] = le32_to_cpuvp(key + 8); ctx->state[7] = le32_to_cpuvp(key + 12); ctx->state[8] = le32_to_cpuvp(key + 16); ctx->state[9] = le32_to_cpuvp(key + 20); ctx->state[10] = le32_to_cpuvp(key + 24); ctx->state[11] = le32_to_cpuvp(key + 28); ctx->state[12] = 0; ctx->state[13] = 0; ctx->state[14] = le32_to_cpuvp(nonce + 0); ctx->state[15] = le32_to_cpuvp(nonce + 4); } static void chacha20_crypt(struct chacha20_ctx *ctx, u8 *dst, const u8 *src, u32 bytes, bool have_simd) { u8 buf[CHACHA20_BLOCK_SIZE]; if (!have_simd #if defined(CONFIG_X86_64) || !chacha20poly1305_use_ssse3 #elif defined(ARM_USE_NEON) || !chacha20poly1305_use_neon #endif ) goto no_simd; #if defined(CONFIG_X86_64) #ifdef CONFIG_AS_AVX512 if (chacha20poly1305_use_avx512) { chacha20_avx512(dst, src, bytes, &ctx->state[4], &ctx->state[12]); ctx->state[12] += (bytes + 63) / 64; return; } #endif #ifdef CONFIG_AS_AVX2 if (chacha20poly1305_use_avx2) { chacha20_avx2(dst, src, bytes, &ctx->state[4], &ctx->state[12]); ctx->state[12] += (bytes + 63) / 64; return; } #endif #ifdef CONFIG_AS_SSSE3 chacha20_ssse3(dst, src, bytes, &ctx->state[4], &ctx->state[12]); ctx->state[12] += (bytes + 63) / 64; return; #endif #elif defined(ARM_USE_NEON) chacha20_neon(dst, src, bytes, &ctx->state[4], &ctx->state[12]); ctx->state[12] += (bytes + 63) / 64; return; #endif no_simd: #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) chacha20_arm(dst, src, bytes, &ctx->state[4], &ctx->state[12]); ctx->state[12] += (bytes + 63) / 64; return; #endif if (dst != src) memcpy(dst, src, bytes); while (bytes >= CHACHA20_BLOCK_SIZE) { chacha20_block_generic(ctx, buf); crypto_xor(dst, buf, CHACHA20_BLOCK_SIZE); bytes -= CHACHA20_BLOCK_SIZE; dst += CHACHA20_BLOCK_SIZE; } if (bytes) { chacha20_block_generic(ctx, buf); crypto_xor(dst, buf, bytes); } } typedef void (*poly1305_blocks_f)(void *ctx, const u8 *inp, size_t len, u32 padbit); typedef void (*poly1305_emit_f)(void *ctx, u8 mac[16], const u32 nonce[4]); struct poly1305_ctx { u8 opaque[24 * sizeof(u64)]; u32 nonce[4]; u8 data[POLY1305_BLOCK_SIZE]; size_t num; struct { poly1305_blocks_f blocks; poly1305_emit_f emit; } func; } __aligned(8); #if !(defined(CONFIG_X86_64) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || (defined(CONFIG_MIPS) && defined(CONFIG_64BIT))) struct poly1305_internal { u32 h[5]; u32 r[4]; }; static void poly1305_init_generic(void *ctx, const u8 key[16]) { struct poly1305_internal *st = (struct poly1305_internal *)ctx; /* h = 0 */ st->h[0] = 0; st->h[1] = 0; st->h[2] = 0; st->h[3] = 0; st->h[4] = 0; /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ st->r[0] = le32_to_cpuvp(&key[ 0]) & 0x0fffffff; st->r[1] = le32_to_cpuvp(&key[ 4]) & 0x0ffffffc; st->r[2] = le32_to_cpuvp(&key[ 8]) & 0x0ffffffc; st->r[3] = le32_to_cpuvp(&key[12]) & 0x0ffffffc; } static void poly1305_blocks_generic(void *ctx, const u8 *inp, size_t len, u32 padbit) { #define CONSTANT_TIME_CARRY(a,b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1)) struct poly1305_internal *st = (struct poly1305_internal *)ctx; u32 r0, r1, r2, r3; u32 s1, s2, s3; u32 h0, h1, h2, h3, h4, c; u64 d0, d1, d2, d3; r0 = st->r[0]; r1 = st->r[1]; r2 = st->r[2]; r3 = st->r[3]; s1 = r1 + (r1 >> 2); s2 = r2 + (r2 >> 2); s3 = r3 + (r3 >> 2); h0 = st->h[0]; h1 = st->h[1]; h2 = st->h[2]; h3 = st->h[3]; h4 = st->h[4]; while (len >= POLY1305_BLOCK_SIZE) { /* h += m[i] */ h0 = (u32)(d0 = (u64)h0 + le32_to_cpuvp(inp + 0)); h1 = (u32)(d1 = (u64)h1 + (d0 >> 32) + le32_to_cpuvp(inp + 4)); h2 = (u32)(d2 = (u64)h2 + (d1 >> 32) + le32_to_cpuvp(inp + 8)); h3 = (u32)(d3 = (u64)h3 + (d2 >> 32) + le32_to_cpuvp(inp + 12)); h4 += (u32)(d3 >> 32) + padbit; /* h *= r "%" p, where "%" stands for "partial remainder" */ d0 = ((u64)h0 * r0) + ((u64)h1 * s3) + ((u64)h2 * s2) + ((u64)h3 * s1); d1 = ((u64)h0 * r1) + ((u64)h1 * r0) + ((u64)h2 * s3) + ((u64)h3 * s2) + (h4 * s1); d2 = ((u64)h0 * r2) + ((u64)h1 * r1) + ((u64)h2 * r0) + ((u64)h3 * s3) + (h4 * s2); d3 = ((u64)h0 * r3) + ((u64)h1 * r2) + ((u64)h2 * r1) + ((u64)h3 * r0) + (h4 * s3); h4 = (h4 * r0); /* last reduction step: */ /* a) h4:h0 = h4<<128 + d3<<96 + d2<<64 + d1<<32 + d0 */ h0 = (u32)d0; h1 = (u32)(d1 += d0 >> 32); h2 = (u32)(d2 += d1 >> 32); h3 = (u32)(d3 += d2 >> 32); h4 += (u32)(d3 >> 32); /* b) (h4:h0 += (h4:h0>>130) * 5) %= 2^130 */ c = (h4 >> 2) + (h4 & ~3U); h4 &= 3; h0 += c; h1 += (c = CONSTANT_TIME_CARRY(h0,c)); h2 += (c = CONSTANT_TIME_CARRY(h1,c)); h3 += (c = CONSTANT_TIME_CARRY(h2,c)); h4 += CONSTANT_TIME_CARRY(h3,c); /* * Occasional overflows to 3rd bit of h4 are taken care of * "naturally". If after this point we end up at the top of * this loop, then the overflow bit will be accounted for * in next iteration. If we end up in poly1305_emit, then * comparison to modulus below will still count as "carry * into 131st bit", so that properly reduced value will be * picked in conditional move. */ inp += POLY1305_BLOCK_SIZE; len -= POLY1305_BLOCK_SIZE; } st->h[0] = h0; st->h[1] = h1; st->h[2] = h2; st->h[3] = h3; st->h[4] = h4; #undef CONSTANT_TIME_CARRY } static void poly1305_emit_generic(void *ctx, u8 mac[16], const u32 nonce[4]) { struct poly1305_internal *st = (struct poly1305_internal *)ctx; __le32 *omac = (__force __le32 *)mac; u32 h0, h1, h2, h3, h4; u32 g0, g1, g2, g3, g4; u64 t; u32 mask; h0 = st->h[0]; h1 = st->h[1]; h2 = st->h[2]; h3 = st->h[3]; h4 = st->h[4]; /* compare to modulus by computing h + -p */ g0 = (u32)(t = (u64)h0 + 5); g1 = (u32)(t = (u64)h1 + (t >> 32)); g2 = (u32)(t = (u64)h2 + (t >> 32)); g3 = (u32)(t = (u64)h3 + (t >> 32)); g4 = h4 + (u32)(t >> 32); /* if there was carry into 131st bit, h3:h0 = g3:g0 */ mask = 0 - (g4 >> 2); g0 &= mask; g1 &= mask; g2 &= mask; g3 &= mask; mask = ~mask; h0 = (h0 & mask) | g0; h1 = (h1 & mask) | g1; h2 = (h2 & mask) | g2; h3 = (h3 & mask) | g3; /* mac = (h + nonce) % (2^128) */ h0 = (u32)(t = (u64)h0 + nonce[0]); h1 = (u32)(t = (u64)h1 + (t >> 32) + nonce[1]); h2 = (u32)(t = (u64)h2 + (t >> 32) + nonce[2]); h3 = (u32)(t = (u64)h3 + (t >> 32) + nonce[3]); omac[0] = cpu_to_le32(h0); omac[1] = cpu_to_le32(h1); omac[2] = cpu_to_le32(h2); omac[3] = cpu_to_le32(h3); } #endif static void poly1305_init(struct poly1305_ctx *ctx, const u8 key[POLY1305_KEY_SIZE], bool have_simd) { ctx->nonce[0] = le32_to_cpuvp(&key[16]); ctx->nonce[1] = le32_to_cpuvp(&key[20]); ctx->nonce[2] = le32_to_cpuvp(&key[24]); ctx->nonce[3] = le32_to_cpuvp(&key[28]); #if defined(CONFIG_X86_64) poly1305_init_x86_64(ctx->opaque, key); ctx->func.blocks = poly1305_blocks_x86_64; ctx->func.emit = poly1305_emit_x86_64; #ifdef CONFIG_AS_AVX512 if(chacha20poly1305_use_avx512 && have_simd) { ctx->func.blocks = poly1305_blocks_avx512; ctx->func.emit = poly1305_emit_avx; } else #endif #ifdef CONFIG_AS_AVX2 if (chacha20poly1305_use_avx2 && have_simd) { ctx->func.blocks = poly1305_blocks_avx2; ctx->func.emit = poly1305_emit_avx; } else #endif #ifdef CONFIG_AS_AVX if (chacha20poly1305_use_avx && have_simd) { ctx->func.blocks = poly1305_blocks_avx; ctx->func.emit = poly1305_emit_avx; } #endif #elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) poly1305_init_arm(ctx->opaque, key); ctx->func.blocks = poly1305_blocks_arm; ctx->func.emit = poly1305_emit_arm; #if defined(ARM_USE_NEON) if (chacha20poly1305_use_neon && have_simd) { ctx->func.blocks = poly1305_blocks_neon; ctx->func.emit = poly1305_emit_neon; } #endif #elif defined(CONFIG_MIPS) && defined(CONFIG_64BIT) poly1305_init_mips(ctx->opaque, key); ctx->func.blocks = poly1305_blocks_mips; ctx->func.emit = poly1305_emit_mips; #else poly1305_init_generic(ctx->opaque, key); #endif ctx->num = 0; } static void poly1305_update(struct poly1305_ctx *ctx, const u8 *inp, size_t len) { #if defined(CONFIG_X86_64) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || (defined(CONFIG_MIPS) && defined(CONFIG_64BIT)) const poly1305_blocks_f blocks = ctx->func.blocks; #else const poly1305_blocks_f blocks = poly1305_blocks_generic; #endif const size_t num = ctx->num; size_t rem;; if (num) { rem = POLY1305_BLOCK_SIZE - num; if (len >= rem) { memcpy(ctx->data + num, inp, rem); blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 1); inp += rem; len -= rem; } else { /* Still not enough data to process a block. */ memcpy(ctx->data + num, inp, len); ctx->num = num + len; return; } } rem = len % POLY1305_BLOCK_SIZE; len -= rem; if (len >= POLY1305_BLOCK_SIZE) { blocks(ctx->opaque, inp, len, 1); inp += len; } if (rem) memcpy(ctx->data, inp, rem); ctx->num = rem; } static void poly1305_finish(struct poly1305_ctx *ctx, u8 mac[16]) { #if defined(CONFIG_X86_64) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || (defined(CONFIG_MIPS) && defined(CONFIG_64BIT)) const poly1305_blocks_f blocks = ctx->func.blocks; const poly1305_emit_f emit = ctx->func.emit; #else const poly1305_blocks_f blocks = poly1305_blocks_generic; const poly1305_emit_f emit = poly1305_emit_generic; #endif size_t num = ctx->num; if (num) { ctx->data[num++] = 1; /* pad bit */ while (num < POLY1305_BLOCK_SIZE) ctx->data[num++] = 0; blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 0); } emit(ctx->opaque, mac, ctx->nonce); /* zero out the state */ memzero_explicit(ctx, sizeof(*ctx)); } static const u8 pad0[16] = { 0 }; static struct crypto_alg chacha20_alg = { .cra_blocksize = 1, .cra_alignmask = sizeof(u32) - 1 }; static struct crypto_blkcipher chacha20_cipher = { .base = { .__crt_alg = &chacha20_alg } }; static struct blkcipher_desc chacha20_desc = { .tfm = &chacha20_cipher }; static inline void __chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, const size_t ad_len, const u64 nonce, const u8 key[CHACHA20POLY1305_KEYLEN], bool have_simd) { struct poly1305_ctx poly1305_state; struct chacha20_ctx chacha20_state; u8 block0[CHACHA20_BLOCK_SIZE] = { 0 }; __le64 len; __le64 le_nonce = cpu_to_le64(nonce); chacha20_keysetup(&chacha20_state, key, (u8 *)&le_nonce); chacha20_crypt(&chacha20_state, block0, block0, sizeof(block0), have_simd); poly1305_init(&poly1305_state, block0, have_simd); memzero_explicit(block0, sizeof(block0)); poly1305_update(&poly1305_state, ad, ad_len); poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf); chacha20_crypt(&chacha20_state, dst, src, src_len, have_simd); poly1305_update(&poly1305_state, dst, src_len); poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf); len = cpu_to_le64(ad_len); poly1305_update(&poly1305_state, (u8 *)&len, sizeof(len)); len = cpu_to_le64(src_len); poly1305_update(&poly1305_state, (u8 *)&len, sizeof(len)); poly1305_finish(&poly1305_state, dst + src_len); memzero_explicit(&chacha20_state, sizeof(chacha20_state)); } void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, const size_t ad_len, const u64 nonce, const u8 key[CHACHA20POLY1305_KEYLEN]) { bool have_simd; have_simd = chacha20poly1305_init_simd(); __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, nonce, key, have_simd); chacha20poly1305_deinit_simd(have_simd); } bool chacha20poly1305_encrypt_sg(struct scatterlist *dst, struct scatterlist *src, const size_t src_len, const u8 *ad, const size_t ad_len, const u64 nonce, const u8 key[CHACHA20POLY1305_KEYLEN], bool have_simd) { struct poly1305_ctx poly1305_state; struct chacha20_ctx chacha20_state; int ret = 0; struct blkcipher_walk walk; u8 block0[CHACHA20_BLOCK_SIZE] = { 0 }; u8 mac[POLY1305_MAC_SIZE]; __le64 len; __le64 le_nonce = cpu_to_le64(nonce); chacha20_keysetup(&chacha20_state, key, (u8 *)&le_nonce); chacha20_crypt(&chacha20_state, block0, block0, sizeof(block0), have_simd); poly1305_init(&poly1305_state, block0, have_simd); memzero_explicit(block0, sizeof(block0)); poly1305_update(&poly1305_state, ad, ad_len); poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf); if (likely(src_len)) { blkcipher_walk_init(&walk, dst, src, src_len); ret = blkcipher_walk_virt_block(&chacha20_desc, &walk, CHACHA20_BLOCK_SIZE); while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { size_t chunk_len = rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE); chacha20_crypt(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, chunk_len, have_simd); poly1305_update(&poly1305_state, walk.dst.virt.addr, chunk_len); ret = blkcipher_walk_done(&chacha20_desc, &walk, walk.nbytes % CHACHA20_BLOCK_SIZE); } if (walk.nbytes) { chacha20_crypt(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes, have_simd); poly1305_update(&poly1305_state, walk.dst.virt.addr, walk.nbytes); ret = blkcipher_walk_done(&chacha20_desc, &walk, 0); } } if (unlikely(ret)) goto err; poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf); len = cpu_to_le64(ad_len); poly1305_update(&poly1305_state, (u8 *)&len, sizeof(len)); len = cpu_to_le64(src_len); poly1305_update(&poly1305_state, (u8 *)&len, sizeof(len)); poly1305_finish(&poly1305_state, mac); scatterwalk_map_and_copy(mac, dst, src_len, sizeof(mac), 1); err: memzero_explicit(&chacha20_state, sizeof(chacha20_state)); memzero_explicit(mac, sizeof(mac)); return !ret; } static inline bool __chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, const size_t ad_len, const u64 nonce, const u8 key[CHACHA20POLY1305_KEYLEN], bool have_simd) { struct poly1305_ctx poly1305_state; struct chacha20_ctx chacha20_state; int ret; u8 block0[CHACHA20_BLOCK_SIZE] = { 0 }; u8 mac[POLY1305_MAC_SIZE]; size_t dst_len; __le64 len; __le64 le_nonce = cpu_to_le64(nonce); if (unlikely(src_len < POLY1305_MAC_SIZE)) return false; chacha20_keysetup(&chacha20_state, key, (u8 *)&le_nonce); chacha20_crypt(&chacha20_state, block0, block0, sizeof(block0), have_simd); poly1305_init(&poly1305_state, block0, have_simd); memzero_explicit(block0, sizeof(block0)); poly1305_update(&poly1305_state, ad, ad_len); poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf); dst_len = src_len - POLY1305_MAC_SIZE; poly1305_update(&poly1305_state, src, dst_len); poly1305_update(&poly1305_state, pad0, (0x10 - dst_len) & 0xf); len = cpu_to_le64(ad_len); poly1305_update(&poly1305_state, (u8 *)&len, sizeof(len)); len = cpu_to_le64(dst_len); poly1305_update(&poly1305_state, (u8 *)&len, sizeof(len)); poly1305_finish(&poly1305_state, mac); ret = crypto_memneq(mac, src + dst_len, POLY1305_MAC_SIZE); memzero_explicit(mac, POLY1305_MAC_SIZE); if (likely(!ret)) chacha20_crypt(&chacha20_state, dst, src, dst_len, have_simd); memzero_explicit(&chacha20_state, sizeof(chacha20_state)); return !ret; } bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, const size_t ad_len, const u64 nonce, const u8 key[CHACHA20POLY1305_KEYLEN]) { bool have_simd, ret; have_simd = chacha20poly1305_init_simd(); ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, nonce, key, have_simd); chacha20poly1305_deinit_simd(have_simd); return ret; } bool chacha20poly1305_decrypt_sg(struct scatterlist *dst, struct scatterlist *src, const size_t src_len, const u8 *ad, const size_t ad_len, const u64 nonce, const u8 key[CHACHA20POLY1305_KEYLEN], bool have_simd) { struct poly1305_ctx poly1305_state; struct chacha20_ctx chacha20_state; struct blkcipher_walk walk; int ret = 0; u8 block0[CHACHA20_BLOCK_SIZE] = { 0 }; u8 read_mac[POLY1305_MAC_SIZE], computed_mac[POLY1305_MAC_SIZE]; size_t dst_len; __le64 len; __le64 le_nonce = cpu_to_le64(nonce); if (unlikely(src_len < POLY1305_MAC_SIZE)) return false; chacha20_keysetup(&chacha20_state, key, (u8 *)&le_nonce); chacha20_crypt(&chacha20_state, block0, block0, sizeof(block0), have_simd); poly1305_init(&poly1305_state, block0, have_simd); memzero_explicit(block0, sizeof(block0)); poly1305_update(&poly1305_state, ad, ad_len); poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf); dst_len = src_len - POLY1305_MAC_SIZE; if (likely(dst_len)) { blkcipher_walk_init(&walk, dst, src, dst_len); ret = blkcipher_walk_virt_block(&chacha20_desc, &walk, CHACHA20_BLOCK_SIZE); while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { size_t chunk_len = rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE); poly1305_update(&poly1305_state, walk.src.virt.addr, chunk_len); chacha20_crypt(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, chunk_len, have_simd); ret = blkcipher_walk_done(&chacha20_desc, &walk, walk.nbytes % CHACHA20_BLOCK_SIZE); } if (walk.nbytes) { poly1305_update(&poly1305_state, walk.src.virt.addr, walk.nbytes); chacha20_crypt(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes, have_simd); ret = blkcipher_walk_done(&chacha20_desc, &walk, 0); } } if (unlikely(ret)) goto err; poly1305_update(&poly1305_state, pad0, (0x10 - dst_len) & 0xf); len = cpu_to_le64(ad_len); poly1305_update(&poly1305_state, (u8 *)&len, sizeof(len)); len = cpu_to_le64(dst_len); poly1305_update(&poly1305_state, (u8 *)&len, sizeof(len)); poly1305_finish(&poly1305_state, computed_mac); scatterwalk_map_and_copy(read_mac, src, dst_len, POLY1305_MAC_SIZE, 0); ret = crypto_memneq(read_mac, computed_mac, POLY1305_MAC_SIZE); err: memzero_explicit(read_mac, POLY1305_MAC_SIZE); memzero_explicit(computed_mac, POLY1305_MAC_SIZE); memzero_explicit(&chacha20_state, sizeof(chacha20_state)); return !ret; } void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCELEN], const u8 key[CHACHA20POLY1305_KEYLEN]) { bool have_simd = chacha20poly1305_init_simd(); u8 derived_key[CHACHA20POLY1305_KEYLEN] __aligned(16); hchacha20(derived_key, nonce, key, have_simd); __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, le64_to_cpuvp(nonce + 16), derived_key, have_simd); memzero_explicit(derived_key, CHACHA20POLY1305_KEYLEN); chacha20poly1305_deinit_simd(have_simd); } bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCELEN], const u8 key[CHACHA20POLY1305_KEYLEN]) { bool ret, have_simd = chacha20poly1305_init_simd(); u8 derived_key[CHACHA20POLY1305_KEYLEN] __aligned(16); hchacha20(derived_key, nonce, key, have_simd); ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, le64_to_cpuvp(nonce + 16), derived_key, have_simd); memzero_explicit(derived_key, CHACHA20POLY1305_KEYLEN); chacha20poly1305_deinit_simd(have_simd); return ret; } #include "../selftest/chacha20poly1305.h"