From 26eeaa0bcb54e40cda62aeed528e5d50138fd40b Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 11 Dec 2016 14:37:45 +0100 Subject: crypto: use kernel's bitops functions --- src/crypto/blake2s.c | 13 ++++--------- src/crypto/siphash24.c | 20 +++++++++++--------- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/src/crypto/blake2s.c b/src/crypto/blake2s.c index 7b3e169..0d3281e 100644 --- a/src/crypto/blake2s.c +++ b/src/crypto/blake2s.c @@ -40,11 +40,6 @@ static const u8 blake2s_sigma[10][16] = { {10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0}, }; -static inline u32 rotr32(const u32 w, const u8 c) -{ - return (w >> c) | (w << (32 - c)); -} - static inline u32 le32_to_cpuvp(const void *p) { return le32_to_cpup(p); @@ -135,13 +130,13 @@ static inline void blake2s_compress(struct blake2s_state *state, const u8 block[ #define G(r,i,a,b,c,d) \ do { \ a = a + b + m[blake2s_sigma[r][2 * i + 0]]; \ - d = rotr32(d ^ a, 16); \ + d = ror32(d ^ a, 16); \ c = c + d; \ - b = rotr32(b ^ c, 12); \ + b = ror32(b ^ c, 12); \ a = a + b + m[blake2s_sigma[r][2 * i + 1]]; \ - d = rotr32(d ^ a, 8); \ + d = ror32(d ^ a, 8); \ c = c + d; \ - b = rotr32(b ^ c, 7); \ + b = ror32(b ^ c, 7); \ } while(0) #define ROUND(r) \ do { \ diff --git a/src/crypto/siphash24.c b/src/crypto/siphash24.c index d841894..c9d4127 100644 --- a/src/crypto/siphash24.c +++ b/src/crypto/siphash24.c @@ -4,15 +4,17 @@ #include -#define ROTL(x,b) (u64)(((x) << (b)) | ((x) >> (64 - (b)))) -#define U8TO64(p) le64_to_cpu(*(__le64 *)(p)) +static inline u64 le64_to_cpuvp(const void *p) +{ + return le64_to_cpup(p); +} #define SIPROUND \ do { \ - v0 += v1; v1 = ROTL(v1, 13); v1 ^= v0; v0 = ROTL(v0, 32); \ - v2 += v3; v3 = ROTL(v3, 16); v3 ^= v2; \ - v0 += v3; v3 = ROTL(v3, 21); v3 ^= v0; \ - v2 += v1; v1 = ROTL(v1, 17); v1 ^= v2; v2 = ROTL(v2, 32); \ + v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ + v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ + v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ + v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ } while(0) __attribute__((optimize("unroll-loops"))) @@ -23,8 +25,8 @@ u64 siphash24(const u8 *data, size_t len, const u8 key[SIPHASH24_KEY_LEN]) u64 v2 = 0x6c7967656e657261ULL; u64 v3 = 0x7465646279746573ULL; u64 b; - u64 k0 = U8TO64(key); - u64 k1 = U8TO64(key + sizeof(u64)); + u64 k0 = le64_to_cpuvp(key); + u64 k1 = le64_to_cpuvp(key + sizeof(u64)); u64 m; const u8 *end = data + len - (len % sizeof(u64)); const u8 left = len & (sizeof(u64) - 1); @@ -34,7 +36,7 @@ u64 siphash24(const u8 *data, size_t len, const u8 key[SIPHASH24_KEY_LEN]) v1 ^= k1; v0 ^= k0; for (; data != end; data += sizeof(u64)) { - m = U8TO64(data); + m = le64_to_cpuvp(data); v3 ^= m; SIPROUND; SIPROUND; -- cgit v1.2.3-59-g8ed1b