aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--main.c10
-rw-r--r--poly1305-donna32.c247
-rw-r--r--poly1305-donna64.c225
-rw-r--r--poly1305-openssl.c86
5 files changed, 525 insertions, 45 deletions
diff --git a/Makefile b/Makefile
index 0688d0d..0a7958b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
ifneq ($(KERNELRELEASE),)
-kbench9000-y := main.o poly1305-hacl64.o poly1305-ref.o poly1305-openssl-asm.o poly1305-openssl.o
+kbench9000-y := main.o poly1305-hacl64.o poly1305-ref.o poly1305-openssl-asm.o poly1305-openssl.o poly1305-donna32.o poly1305-donna64.o
obj-m := kbench9000.o
ccflags-y += -O3
ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
diff --git a/main.c b/main.c
index 152af76..ff26798 100644
--- a/main.c
+++ b/main.c
@@ -75,6 +75,8 @@ declare_it(ossl_amd64)
declare_it(ossl_avx)
declare_it(ossl_avx2)
declare_it(ossl_avx512)
+declare_it(donna32)
+declare_it(donna64)
static bool verify(void)
{
@@ -86,6 +88,8 @@ static bool verify(void)
test_it(hacl64, {}, {});
test_it(ref, {}, {});
test_it(ossl_c, {}, {});
+ test_it(donna32, {}, {});
+ test_it(donna64, {}, {});
test_it(ossl_amd64, {}, {});
if (boot_cpu_has(X86_FEATURE_AVX) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
test_it(ossl_avx, kernel_fpu_begin(), kernel_fpu_end());
@@ -108,6 +112,8 @@ static int __init mod_init(void)
cycles_t start_ossl_avx[DOUBLING_STEPS + 1], end_ossl_avx[DOUBLING_STEPS + 1];
cycles_t start_ossl_avx2[DOUBLING_STEPS + 1], end_ossl_avx2[DOUBLING_STEPS + 1];
cycles_t start_ossl_avx512[DOUBLING_STEPS + 1], end_ossl_avx512[DOUBLING_STEPS + 1];
+ cycles_t start_donna32[DOUBLING_STEPS + 1], end_donna32[DOUBLING_STEPS + 1];
+ cycles_t start_donna64[DOUBLING_STEPS + 1], end_donna64[DOUBLING_STEPS + 1];
unsigned long flags;
DEFINE_SPINLOCK(lock);
@@ -128,6 +134,8 @@ static int __init mod_init(void)
do_it(hacl64);
do_it(ref);
do_it(ossl_c);
+ do_it(donna32);
+ do_it(donna64);
do_it(ossl_amd64);
if (boot_cpu_has(X86_FEATURE_AVX) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
do_it(ossl_avx);
@@ -147,6 +155,8 @@ static int __init mod_init(void)
report_it(hacl64);
report_it(ref);
report_it(ossl_c);
+ report_it(donna32);
+ report_it(donna64);
report_it(ossl_amd64);
if (boot_cpu_has(X86_FEATURE_AVX) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
report_it(ossl_avx);
diff --git a/poly1305-donna32.c b/poly1305-donna32.c
new file mode 100644
index 0000000..eec66dd
--- /dev/null
+++ b/poly1305-donna32.c
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is based in part on Andrew Moon's poly1305-donna, which is in the
+ * public domain.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/unaligned.h>
+
+enum {
+ POLY1305_BLOCK_SIZE = 16,
+ POLY1305_KEY_SIZE = 32,
+ POLY1305_MAC_SIZE = 16
+};
+
+struct poly1305_ctx {
+ u8 opaque[24 * sizeof(u64)];
+ u32 nonce[4];
+ u8 data[POLY1305_BLOCK_SIZE];
+ size_t num;
+} __aligned(8);
+
+
+struct poly1305_internal {
+ u32 h[5];
+ u32 r[5];
+};
+
+static void poly1305_init_generic(void *ctx, const u8 key[16])
+{
+ struct poly1305_internal *st = (struct poly1305_internal *)ctx;
+
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ st->r[0] = (get_unaligned_le32(&key[0])) & 0x3ffffff;
+ st->r[1] = (get_unaligned_le32(&key[3]) >> 2) & 0x3ffff03;
+ st->r[2] = (get_unaligned_le32(&key[6]) >> 4) & 0x3ffc0ff;
+ st->r[3] = (get_unaligned_le32(&key[9]) >> 6) & 0x3f03fff;
+ st->r[4] = (get_unaligned_le32(&key[12]) >> 8) & 0x00fffff;
+
+ /* h = 0 */
+ st->h[0] = 0;
+ st->h[1] = 0;
+ st->h[2] = 0;
+ st->h[3] = 0;
+ st->h[4] = 0;
+}
+
+static void poly1305_blocks_generic(void *ctx, const u8 *input, size_t len,
+ const u32 padbit)
+{
+ struct poly1305_internal *st = (struct poly1305_internal *)ctx;
+ const u32 hibit = padbit ? (1UL << 24) : 0;
+ u32 r0, r1, r2, r3, r4;
+ u32 s1, s2, s3, s4;
+ u32 h0, h1, h2, h3, h4;
+ u64 d0, d1, d2, d3, d4;
+ u32 c;
+
+ r0 = st->r[0];
+ r1 = st->r[1];
+ r2 = st->r[2];
+ r3 = st->r[3];
+ r4 = st->r[4];
+
+ s1 = r1 * 5;
+ s2 = r2 * 5;
+ s3 = r3 * 5;
+ s4 = r4 * 5;
+
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+ h3 = st->h[3];
+ h4 = st->h[4];
+
+ while (len >= POLY1305_BLOCK_SIZE) {
+ /* h += m[i] */
+ h0 += (get_unaligned_le32(&input[0])) & 0x3ffffff;
+ h1 += (get_unaligned_le32(&input[3]) >> 2) & 0x3ffffff;
+ h2 += (get_unaligned_le32(&input[6]) >> 4) & 0x3ffffff;
+ h3 += (get_unaligned_le32(&input[9]) >> 6) & 0x3ffffff;
+ h4 += (get_unaligned_le32(&input[12]) >> 8) | hibit;
+
+ /* h *= r */
+ d0 = ((u64)h0 * r0) + ((u64)h1 * s4) +
+ ((u64)h2 * s3) + ((u64)h3 * s2) +
+ ((u64)h4 * s1);
+ d1 = ((u64)h0 * r1) + ((u64)h1 * r0) +
+ ((u64)h2 * s4) + ((u64)h3 * s3) +
+ ((u64)h4 * s2);
+ d2 = ((u64)h0 * r2) + ((u64)h1 * r1) +
+ ((u64)h2 * r0) + ((u64)h3 * s4) +
+ ((u64)h4 * s3);
+ d3 = ((u64)h0 * r3) + ((u64)h1 * r2) +
+ ((u64)h2 * r1) + ((u64)h3 * r0) +
+ ((u64)h4 * s4);
+ d4 = ((u64)h0 * r4) + ((u64)h1 * r3) +
+ ((u64)h2 * r2) + ((u64)h3 * r1) +
+ ((u64)h4 * r0);
+
+ /* (partial) h %= p */
+ c = (u32)(d0 >> 26);
+ h0 = (u32)d0 & 0x3ffffff;
+ d1 += c;
+ c = (u32)(d1 >> 26);
+ h1 = (u32)d1 & 0x3ffffff;
+ d2 += c;
+ c = (u32)(d2 >> 26);
+ h2 = (u32)d2 & 0x3ffffff;
+ d3 += c;
+ c = (u32)(d3 >> 26);
+ h3 = (u32)d3 & 0x3ffffff;
+ d4 += c;
+ c = (u32)(d4 >> 26);
+ h4 = (u32)d4 & 0x3ffffff;
+ h0 += c * 5;
+ c = (h0 >> 26);
+ h0 = h0 & 0x3ffffff;
+ h1 += c;
+
+ input += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ }
+
+ st->h[0] = h0;
+ st->h[1] = h1;
+ st->h[2] = h2;
+ st->h[3] = h3;
+ st->h[4] = h4;
+}
+
+static void poly1305_emit_generic(void *ctx, u8 mac[16], const u32 nonce[4])
+{
+ struct poly1305_internal *st = (struct poly1305_internal *)ctx;
+ u32 h0, h1, h2, h3, h4, c;
+ u32 g0, g1, g2, g3, g4;
+ u64 f;
+ u32 mask;
+
+ /* fully carry h */
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+ h3 = st->h[3];
+ h4 = st->h[4];
+
+ c = h1 >> 26;
+ h1 = h1 & 0x3ffffff;
+ h2 += c;
+ c = h2 >> 26;
+ h2 = h2 & 0x3ffffff;
+ h3 += c;
+ c = h3 >> 26;
+ h3 = h3 & 0x3ffffff;
+ h4 += c;
+ c = h4 >> 26;
+ h4 = h4 & 0x3ffffff;
+ h0 += c * 5;
+ c = h0 >> 26;
+ h0 = h0 & 0x3ffffff;
+ h1 += c;
+
+ /* compute h + -p */
+ g0 = h0 + 5;
+ c = g0 >> 26;
+ g0 &= 0x3ffffff;
+ g1 = h1 + c;
+ c = g1 >> 26;
+ g1 &= 0x3ffffff;
+ g2 = h2 + c;
+ c = g2 >> 26;
+ g2 &= 0x3ffffff;
+ g3 = h3 + c;
+ c = g3 >> 26;
+ g3 &= 0x3ffffff;
+ g4 = h4 + c - (1UL << 26);
+
+ /* select h if h < p, or h + -p if h >= p */
+ mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1;
+ g0 &= mask;
+ g1 &= mask;
+ g2 &= mask;
+ g3 &= mask;
+ g4 &= mask;
+ mask = ~mask;
+
+ h0 = (h0 & mask) | g0;
+ h1 = (h1 & mask) | g1;
+ h2 = (h2 & mask) | g2;
+ h3 = (h3 & mask) | g3;
+ h4 = (h4 & mask) | g4;
+
+ /* h = h % (2^128) */
+ h0 = ((h0) | (h1 << 26)) & 0xffffffff;
+ h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff;
+ h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff;
+ h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff;
+
+ /* mac = (h + nonce) % (2^128) */
+ f = (u64)h0 + nonce[0];
+ h0 = (u32)f;
+ f = (u64)h1 + nonce[1] + (f >> 32);
+ h1 = (u32)f;
+ f = (u64)h2 + nonce[2] + (f >> 32);
+ h2 = (u32)f;
+ f = (u64)h3 + nonce[3] + (f >> 32);
+ h3 = (u32)f;
+
+ put_unaligned_le32(h0, &mac[0]);
+ put_unaligned_le32(h1, &mac[4]);
+ put_unaligned_le32(h2, &mac[8]);
+ put_unaligned_le32(h3, &mac[12]);
+}
+
+
+
+void poly1305_donna32(unsigned char *out, const unsigned char *in, unsigned long long inlen, const unsigned char *k)
+{
+ size_t rem;
+ struct poly1305_ctx ctx;
+ ctx.nonce[0] = get_unaligned_le32(&k[16]);
+ ctx.nonce[1] = get_unaligned_le32(&k[20]);
+ ctx.nonce[2] = get_unaligned_le32(&k[24]);
+ ctx.nonce[3] = get_unaligned_le32(&k[28]);
+ poly1305_init_generic(ctx.opaque, k);
+ ctx.num = 0;
+
+ rem = inlen % POLY1305_BLOCK_SIZE;
+ inlen -= rem;
+
+ if (inlen >= POLY1305_BLOCK_SIZE) {
+ poly1305_blocks_generic(ctx.opaque, in, inlen, 1);
+ in += inlen;
+ }
+ if (rem) {
+ memcpy(ctx.data, in, rem);
+ ctx.data[rem++] = 1; /* pad bit */
+ while (rem < POLY1305_BLOCK_SIZE)
+ ctx.data[rem++] = 0;
+ poly1305_blocks_generic(ctx.opaque, ctx.data, POLY1305_BLOCK_SIZE, 0);
+ }
+
+ poly1305_emit_generic(ctx.opaque, out, ctx.nonce);
+}
diff --git a/poly1305-donna64.c b/poly1305-donna64.c
new file mode 100644
index 0000000..ca35f5e
--- /dev/null
+++ b/poly1305-donna64.c
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is based in part on Andrew Moon's poly1305-donna, which is in the
+ * public domain.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/unaligned.h>
+
+enum {
+ POLY1305_BLOCK_SIZE = 16,
+ POLY1305_KEY_SIZE = 32,
+ POLY1305_MAC_SIZE = 16
+};
+
+struct poly1305_ctx {
+ u8 opaque[24 * sizeof(u64)];
+ u32 nonce[4];
+ u8 data[POLY1305_BLOCK_SIZE];
+ size_t num;
+} __aligned(8);
+
+
+typedef __uint128_t u128;
+
+struct poly1305_internal {
+ u64 r[3];
+ u64 h[3];
+};
+
+static void poly1305_init_generic(void *ctx, const u8 key[16])
+{
+ struct poly1305_internal *st = (struct poly1305_internal *)ctx;
+ u64 t0, t1;
+
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ t0 = get_unaligned_le64(&key[0]);
+ t1 = get_unaligned_le64(&key[8]);
+
+ /* wiped after finalization */
+ st->r[0] = (t0) &0xffc0fffffff;
+ st->r[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffff;
+ st->r[2] = ((t1 >> 24)) & 0x00ffffffc0f;
+
+ /* h = 0 */
+ st->h[0] = 0;
+ st->h[1] = 0;
+ st->h[2] = 0;
+}
+
+static void poly1305_blocks_generic(void *ctx, const u8 *input, size_t len,
+ const u32 padbit)
+{
+ struct poly1305_internal *st = (struct poly1305_internal *)ctx;
+ const u64 hibit = padbit ? (1ULL << 40) : 0;
+ u64 r0, r1, r2;
+ u64 s1, s2;
+ u64 h0, h1, h2;
+ u64 c;
+ u128 d0, d1, d2, d;
+
+ r0 = st->r[0];
+ r1 = st->r[1];
+ r2 = st->r[2];
+
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+
+ s1 = r1 * (5 << 2);
+ s2 = r2 * (5 << 2);
+
+ while (len >= POLY1305_BLOCK_SIZE) {
+ u64 t0, t1;
+
+ /* h += m[i] */
+ t0 = get_unaligned_le64(&input[0]);
+ t1 = get_unaligned_le64(&input[8]);
+
+ h0 += ((t0) &0xfffffffffff);
+ h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffff);
+ h2 += (((t1 >> 24)) & 0x3ffffffffff) | hibit;
+
+ /* h *= r */
+ d0 = ((u128)h0 * r0);
+ d = ((u128)h1 * s2);
+ d0 += d;
+ d = ((u128)h2 * s1);
+ d0 += d;
+ d1 = ((u128)h0 * r1);
+ d = ((u128)h1 * r0);
+ d1 += d;
+ d = ((u128)h2 * s2);
+ d1 += d;
+ d2 = ((u128)h0 * r2);
+ d = ((u128)h1 * r1);
+ d2 += d;
+ d = ((u128)h2 * r0);
+ d2 += d;
+
+ /* (partial) h %= p */
+ c = (u64)(d0 >> 44);
+ h0 = (u64)d0 & 0xfffffffffff;
+ d1 += c;
+ c = (u64)(d1 >> 44);
+ h1 = (u64)d1 & 0xfffffffffff;
+ d2 += c;
+ c = (u64)(d2 >> 42);
+ h2 = (u64)d2 & 0x3ffffffffff;
+ h0 += c * 5;
+ c = (h0 >> 44);
+ h0 = h0 & 0xfffffffffff;
+ h1 += c;
+
+ input += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ }
+
+ st->h[0] = h0;
+ st->h[1] = h1;
+ st->h[2] = h2;
+}
+
+static void poly1305_emit_generic(void *ctx, u8 mac[16], const u32 nonce[4])
+{
+ struct poly1305_internal *st = (struct poly1305_internal *)ctx;
+ u64 h0, h1, h2, c;
+ u64 g0, g1, g2;
+ u64 t0, t1;
+
+ /* fully carry h */
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+
+ c = (h1 >> 44);
+ h1 &= 0xfffffffffff;
+ h2 += c;
+ c = (h2 >> 42);
+ h2 &= 0x3ffffffffff;
+ h0 += c * 5;
+ c = (h0 >> 44);
+ h0 &= 0xfffffffffff;
+ h1 += c;
+ c = (h1 >> 44);
+ h1 &= 0xfffffffffff;
+ h2 += c;
+ c = (h2 >> 42);
+ h2 &= 0x3ffffffffff;
+ h0 += c * 5;
+ c = (h0 >> 44);
+ h0 &= 0xfffffffffff;
+ h1 += c;
+
+ /* compute h + -p */
+ g0 = h0 + 5;
+ c = (g0 >> 44);
+ g0 &= 0xfffffffffff;
+ g1 = h1 + c;
+ c = (g1 >> 44);
+ g1 &= 0xfffffffffff;
+ g2 = h2 + c - (1ULL << 42);
+
+ /* select h if h < p, or h + -p if h >= p */
+ c = (g2 >> ((sizeof(u64) * 8) - 1)) - 1;
+ g0 &= c;
+ g1 &= c;
+ g2 &= c;
+ c = ~c;
+ h0 = (h0 & c) | g0;
+ h1 = (h1 & c) | g1;
+ h2 = (h2 & c) | g2;
+
+ /* h = (h + nonce) */
+ t0 = ((u64)nonce[1] << 32) | nonce[0];
+ t1 = ((u64)nonce[3] << 32) | nonce[2];
+
+ h0 += ((t0) &0xfffffffffff);
+ c = (h0 >> 44);
+ h0 &= 0xfffffffffff;
+ h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffff) + c;
+ c = (h1 >> 44);
+ h1 &= 0xfffffffffff;
+ h2 += (((t1 >> 24)) & 0x3ffffffffff) + c;
+ h2 &= 0x3ffffffffff;
+
+ /* mac = h % (2^128) */
+ h0 = (h0 | (h1 << 44));
+ h1 = ((h1 >> 20) | (h2 << 24));
+
+ put_unaligned_le64(h0, &mac[0]);
+ put_unaligned_le64(h1, &mac[8]);
+}
+
+void poly1305_donna64(unsigned char *out, const unsigned char *in, unsigned long long inlen, const unsigned char *k)
+{
+ size_t rem;
+ struct poly1305_ctx ctx;
+ ctx.nonce[0] = get_unaligned_le32(&k[16]);
+ ctx.nonce[1] = get_unaligned_le32(&k[20]);
+ ctx.nonce[2] = get_unaligned_le32(&k[24]);
+ ctx.nonce[3] = get_unaligned_le32(&k[28]);
+ poly1305_init_generic(ctx.opaque, k);
+ ctx.num = 0;
+
+ rem = inlen % POLY1305_BLOCK_SIZE;
+ inlen -= rem;
+
+ if (inlen >= POLY1305_BLOCK_SIZE) {
+ poly1305_blocks_generic(ctx.opaque, in, inlen, 1);
+ in += inlen;
+ }
+ if (rem) {
+ memcpy(ctx.data, in, rem);
+ ctx.data[rem++] = 1; /* pad bit */
+ while (rem < POLY1305_BLOCK_SIZE)
+ ctx.data[rem++] = 0;
+ poly1305_blocks_generic(ctx.opaque, ctx.data, POLY1305_BLOCK_SIZE, 0);
+ }
+
+ poly1305_emit_generic(ctx.opaque, out, ctx.nonce);
+}
diff --git a/poly1305-openssl.c b/poly1305-openssl.c
index 30fe8bf..d7c64a7 100644
--- a/poly1305-openssl.c
+++ b/poly1305-openssl.c
@@ -9,6 +9,7 @@
#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/intel-family.h>
+#include <asm/unaligned.h>
asmlinkage void poly1305_init_x86_64(void *ctx, const u8 key[16]);
asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, size_t len, u32 padbit);
@@ -31,11 +32,6 @@ struct poly1305_ctx {
size_t num;
} __aligned(8);
-static inline u32 le32_to_cpuvp(const void *p)
-{
- return le32_to_cpup(p);
-}
-
struct poly1305_internal {
u32 h[5];
u32 r[4];
@@ -53,15 +49,17 @@ static void poly1305_init_generic(void *ctx, const u8 key[16])
st->h[4] = 0;
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
- st->r[0] = le32_to_cpuvp(&key[ 0]) & 0x0fffffff;
- st->r[1] = le32_to_cpuvp(&key[ 4]) & 0x0ffffffc;
- st->r[2] = le32_to_cpuvp(&key[ 8]) & 0x0ffffffc;
- st->r[3] = le32_to_cpuvp(&key[12]) & 0x0ffffffc;
+ st->r[0] = get_unaligned_le32(&key[0]) & 0x0fffffff;
+ st->r[1] = get_unaligned_le32(&key[4]) & 0x0ffffffc;
+ st->r[2] = get_unaligned_le32(&key[8]) & 0x0ffffffc;
+ st->r[3] = get_unaligned_le32(&key[12]) & 0x0ffffffc;
}
-static void poly1305_blocks_generic(void *ctx, const u8 *inp, size_t len, u32 padbit)
+static void poly1305_blocks_generic(void *ctx, const u8 *inp, size_t len,
+ const u32 padbit)
{
-#define CONSTANT_TIME_CARRY(a,b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
+#define CONSTANT_TIME_CARRY(a, b) \
+ ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
struct poly1305_internal *st = (struct poly1305_internal *)ctx;
u32 r0, r1, r2, r3;
u32 s1, s2, s3;
@@ -85,10 +83,10 @@ static void poly1305_blocks_generic(void *ctx, const u8 *inp, size_t len, u32 pa
while (len >= POLY1305_BLOCK_SIZE) {
/* h += m[i] */
- h0 = (u32)(d0 = (u64)h0 + le32_to_cpuvp(inp + 0));
- h1 = (u32)(d1 = (u64)h1 + (d0 >> 32) + le32_to_cpuvp(inp + 4));
- h2 = (u32)(d2 = (u64)h2 + (d1 >> 32) + le32_to_cpuvp(inp + 8));
- h3 = (u32)(d3 = (u64)h3 + (d2 >> 32) + le32_to_cpuvp(inp + 12));
+ h0 = (u32)(d0 = (u64)h0 + (0 ) + get_unaligned_le32(&inp[0]));
+ h1 = (u32)(d1 = (u64)h1 + (d0 >> 32) + get_unaligned_le32(&inp[4]));
+ h2 = (u32)(d2 = (u64)h2 + (d1 >> 32) + get_unaligned_le32(&inp[8]));
+ h3 = (u32)(d3 = (u64)h3 + (d2 >> 32) + get_unaligned_le32(&inp[12]));
h4 += (u32)(d3 >> 32) + padbit;
/* h *= r "%" p, where "%" stands for "partial remainder" */
@@ -124,10 +122,10 @@ static void poly1305_blocks_generic(void *ctx, const u8 *inp, size_t len, u32 pa
c = (h4 >> 2) + (h4 & ~3U);
h4 &= 3;
h0 += c;
- h1 += (c = CONSTANT_TIME_CARRY(h0,c));
- h2 += (c = CONSTANT_TIME_CARRY(h1,c));
- h3 += (c = CONSTANT_TIME_CARRY(h2,c));
- h4 += CONSTANT_TIME_CARRY(h3,c);
+ h1 += (c = CONSTANT_TIME_CARRY(h0, c));
+ h2 += (c = CONSTANT_TIME_CARRY(h1, c));
+ h3 += (c = CONSTANT_TIME_CARRY(h2, c));
+ h4 += CONSTANT_TIME_CARRY(h3, c);
/*
* Occasional overflows to 3rd bit of h4 are taken care of
* "naturally". If after this point we end up at the top of
@@ -153,7 +151,6 @@ static void poly1305_blocks_generic(void *ctx, const u8 *inp, size_t len, u32 pa
static void poly1305_emit_generic(void *ctx, u8 mac[16], const u32 nonce[4])
{
struct poly1305_internal *st = (struct poly1305_internal *)ctx;
- __le32 *omac = (__force __le32 *)mac;
u32 h0, h1, h2, h3, h4;
u32 g0, g1, g2, g3, g4;
u64 t;
@@ -190,20 +187,21 @@ static void poly1305_emit_generic(void *ctx, u8 mac[16], const u32 nonce[4])
h2 = (u32)(t = (u64)h2 + (t >> 32) + nonce[2]);
h3 = (u32)(t = (u64)h3 + (t >> 32) + nonce[3]);
- omac[0] = cpu_to_le32(h0);
- omac[1] = cpu_to_le32(h1);
- omac[2] = cpu_to_le32(h2);
- omac[3] = cpu_to_le32(h3);
+ put_unaligned_le32(h0, &mac[0]);
+ put_unaligned_le32(h1, &mac[4]);
+ put_unaligned_le32(h2, &mac[8]);
+ put_unaligned_le32(h3, &mac[12]);
}
+
void poly1305_ossl_c(unsigned char *out, const unsigned char *in, unsigned long long inlen, const unsigned char *k)
{
size_t rem;
struct poly1305_ctx ctx;
- ctx.nonce[0] = le32_to_cpuvp(&k[16]);
- ctx.nonce[1] = le32_to_cpuvp(&k[20]);
- ctx.nonce[2] = le32_to_cpuvp(&k[24]);
- ctx.nonce[3] = le32_to_cpuvp(&k[28]);
+ ctx.nonce[0] = get_unaligned_le32(&k[16]);
+ ctx.nonce[1] = get_unaligned_le32(&k[20]);
+ ctx.nonce[2] = get_unaligned_le32(&k[24]);
+ ctx.nonce[3] = get_unaligned_le32(&k[28]);
poly1305_init_generic(ctx.opaque, k);
ctx.num = 0;
@@ -229,10 +227,10 @@ void poly1305_ossl_amd64(unsigned char *out, const unsigned char *in, unsigned l
{
size_t rem;
struct poly1305_ctx ctx;
- ctx.nonce[0] = le32_to_cpuvp(&k[16]);
- ctx.nonce[1] = le32_to_cpuvp(&k[20]);
- ctx.nonce[2] = le32_to_cpuvp(&k[24]);
- ctx.nonce[3] = le32_to_cpuvp(&k[28]);
+ ctx.nonce[0] = get_unaligned_le32(&k[16]);
+ ctx.nonce[1] = get_unaligned_le32(&k[20]);
+ ctx.nonce[2] = get_unaligned_le32(&k[24]);
+ ctx.nonce[3] = get_unaligned_le32(&k[28]);
poly1305_init_x86_64(ctx.opaque, k);
ctx.num = 0;
@@ -258,10 +256,10 @@ void poly1305_ossl_avx(unsigned char *out, const unsigned char *in, unsigned lon
{
size_t rem;
struct poly1305_ctx ctx;
- ctx.nonce[0] = le32_to_cpuvp(&k[16]);
- ctx.nonce[1] = le32_to_cpuvp(&k[20]);
- ctx.nonce[2] = le32_to_cpuvp(&k[24]);
- ctx.nonce[3] = le32_to_cpuvp(&k[28]);
+ ctx.nonce[0] = get_unaligned_le32(&k[16]);
+ ctx.nonce[1] = get_unaligned_le32(&k[20]);
+ ctx.nonce[2] = get_unaligned_le32(&k[24]);
+ ctx.nonce[3] = get_unaligned_le32(&k[28]);
poly1305_init_x86_64(ctx.opaque, k);
ctx.num = 0;
@@ -287,10 +285,10 @@ void poly1305_ossl_avx2(unsigned char *out, const unsigned char *in, unsigned lo
{
size_t rem;
struct poly1305_ctx ctx;
- ctx.nonce[0] = le32_to_cpuvp(&k[16]);
- ctx.nonce[1] = le32_to_cpuvp(&k[20]);
- ctx.nonce[2] = le32_to_cpuvp(&k[24]);
- ctx.nonce[3] = le32_to_cpuvp(&k[28]);
+ ctx.nonce[0] = get_unaligned_le32(&k[16]);
+ ctx.nonce[1] = get_unaligned_le32(&k[20]);
+ ctx.nonce[2] = get_unaligned_le32(&k[24]);
+ ctx.nonce[3] = get_unaligned_le32(&k[28]);
poly1305_init_x86_64(ctx.opaque, k);
ctx.num = 0;
@@ -316,10 +314,10 @@ void poly1305_ossl_avx512(unsigned char *out, const unsigned char *in, unsigned
{
size_t rem;
struct poly1305_ctx ctx;
- ctx.nonce[0] = le32_to_cpuvp(&k[16]);
- ctx.nonce[1] = le32_to_cpuvp(&k[20]);
- ctx.nonce[2] = le32_to_cpuvp(&k[24]);
- ctx.nonce[3] = le32_to_cpuvp(&k[28]);
+ ctx.nonce[0] = get_unaligned_le32(&k[16]);
+ ctx.nonce[1] = get_unaligned_le32(&k[20]);
+ ctx.nonce[2] = get_unaligned_le32(&k[24]);
+ ctx.nonce[3] = get_unaligned_le32(&k[28]);
poly1305_init_x86_64(ctx.opaque, k);
ctx.num = 0;