aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/crypto/zinc/poly1305/poly1305-x86_64-glue.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/crypto/zinc/poly1305/poly1305-x86_64-glue.h')
-rw-r--r--src/crypto/zinc/poly1305/poly1305-x86_64-glue.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/src/crypto/zinc/poly1305/poly1305-x86_64-glue.h b/src/crypto/zinc/poly1305/poly1305-x86_64-glue.h
index b1248e8..585b579 100644
--- a/src/crypto/zinc/poly1305/poly1305-x86_64-glue.h
+++ b/src/crypto/zinc/poly1305/poly1305-x86_64-glue.h
@@ -67,6 +67,15 @@ struct poly1305_arch_internal {
struct { u32 r2, r1, r4, r3; } rn[9];
};
+/* The AVX code uses base 2^26, while the scalar code uses base 2^64. If we hit
+ * the unfortunate situation of using AVX and then having to go back to scalar
+ * -- because the user is silly and has called the update function from two
+ * separate contexts -- then we need to convert back to the original base before
+ * proceeding. It is possible to reason that the initial reduction below is
+ * sufficient given the implementation invariants. However, for an avoidance of
+ * doubt and because this is not performance critical, we do the full reduction
+ * anyway.
+ */
static void convert_to_base2_64(void *ctx)
{
struct poly1305_arch_internal *state = ctx;