aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2018-09-11 20:05:10 -0700
committerHerbert Xu <herbert@gondor.apana.org.au>2018-09-21 13:24:50 +0800
commita5e9f557098e54af44ade5d501379be18435bfbf (patch)
tree4410647aaa269fd9a4e304851b06f83251b12853 /drivers/char
parentcrypto: xts - Drop use of auxiliary buffer (diff)
downloadlinux-dev-a5e9f557098e54af44ade5d501379be18435bfbf.tar.xz
linux-dev-a5e9f557098e54af44ade5d501379be18435bfbf.zip
crypto: chacha20 - Fix chacha20_block() keystream alignment (again)
In commit 9f480faec58c ("crypto: chacha20 - Fix keystream alignment for chacha20_block()"), I had missed that chacha20_block() can be called directly on the buffer passed to get_random_bytes(), which can have any alignment. So, while my commit didn't break anything, it didn't fully solve the alignment problems. Revert my solution and just update chacha20_block() to use put_unaligned_le32(), so the output buffer need not be aligned. This is simpler, and on many CPUs it's the same speed. But, I kept the 'tmp' buffers in extract_crng_user() and _get_random_bytes() 4-byte aligned, since that alignment is actually needed for _crng_backtrack_protect() too. Reported-by: Stephan Müller <smueller@chronox.de> Cc: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/random.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index bf5f99fc36f1..d22d967c50f0 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -433,9 +433,9 @@ static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
- __u32 out[CHACHA20_BLOCK_WORDS]);
+ __u8 out[CHACHA20_BLOCK_SIZE]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u32 tmp[CHACHA20_BLOCK_WORDS], int used);
+ __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -921,7 +921,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
unsigned long flags;
int i, num;
union {
- __u32 block[CHACHA20_BLOCK_WORDS];
+ __u8 block[CHACHA20_BLOCK_SIZE];
__u32 key[8];
} buf;
@@ -968,7 +968,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
static void _extract_crng(struct crng_state *crng,
- __u32 out[CHACHA20_BLOCK_WORDS])
+ __u8 out[CHACHA20_BLOCK_SIZE])
{
unsigned long v, flags;
@@ -985,7 +985,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
+static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
{
struct crng_state *crng = NULL;
@@ -1003,7 +1003,7 @@ static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u32 tmp[CHACHA20_BLOCK_WORDS], int used)
+ __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
{
unsigned long flags;
__u32 *s, *d;
@@ -1015,14 +1015,14 @@ static void _crng_backtrack_protect(struct crng_state *crng,
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = &tmp[used / sizeof(__u32)];
+ s = (__u32 *) &tmp[used];
d = &crng->state[4];
for (i=0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
+static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
{
struct crng_state *crng = NULL;
@@ -1038,7 +1038,7 @@ static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
- __u32 tmp[CHACHA20_BLOCK_WORDS];
+ __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1617,7 +1617,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u32 tmp[CHACHA20_BLOCK_WORDS];
+ __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -2243,7 +2243,7 @@ u64 get_random_u64(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((__u32 *)batch->entropy_u64);
+ extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
@@ -2273,7 +2273,7 @@ u32 get_random_u32(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng(batch->entropy_u32);
+ extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];