summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorderaadt <deraadt@openbsd.org>2011-01-07 04:56:51 +0000
committerderaadt <deraadt@openbsd.org>2011-01-07 04:56:51 +0000
commit97630e02531f904422be552e3a87f2cf5be89fe5 (patch)
tree3bdcd87ccadc55b6ed550572d82bd52c2089005d
parentsince randomwrite() also has no blocking operations at all, we need to (diff)
downloadwireguard-openbsd-97630e02531f904422be552e3a87f2cf5be89fe5.tar.xz
wireguard-openbsd-97630e02531f904422be552e3a87f2cf5be89fe5.zip
substantial rewrite. put a very thin mutex at the entropy-collection
side so that entropy events can come in from any kernel context. place a 2nd very thin mutex at the call-down path as well, so that any context can request random data. in the middle, meet with a bcopy that has no mutex, but copying unlocked data is actually a benefit. move the pool->MD5->RC4init sequence into a workq driven from a timeout, so that we can do all the heavy work without any mutex held or IPL; only grab the 2nd mutex to swap to a new RC4 state. (this workq design from tedu) ok dlg tedu
-rw-r--r--sys/dev/rnd.c555
-rw-r--r--sys/dev/rndvar.h4
2 files changed, 230 insertions, 329 deletions
diff --git a/sys/dev/rnd.c b/sys/dev/rnd.c
index 85982866df1..17e8cd05409 100644
--- a/sys/dev/rnd.c
+++ b/sys/dev/rnd.c
@@ -1,8 +1,9 @@
-/* $OpenBSD: rnd.c,v 1.125 2011/01/07 04:38:00 deraadt Exp $ */
+/* $OpenBSD: rnd.c,v 1.126 2011/01/07 04:56:51 deraadt Exp $ */
/*
- * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
+ * Copyright (c) 2011 Theo de Raadt.
* Copyright (c) 2008 Damien Miller.
+ * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
* Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
* All rights reserved.
*
@@ -39,13 +40,6 @@
*/
/*
- * This routine gathers environmental noise from device drivers, etc.,
- * and returns good random numbers, suitable for cryptographic or
- * other use.
- *
- * Theory of operation
- * ===================
- *
* Computers are very predictable devices. Hence it is extremely hard
* to produce truly random numbers on a computer --- as opposed to
* pseudo-random numbers, which can be easily generated by using an
@@ -61,23 +55,20 @@
* timings, inter-interrupt timings from some interrupts, and other
* events which are both (a) non-deterministic and (b) hard for an
* outside observer to measure. Randomness from these sources is
- * added to the "entropy pool", which is mixed using a CRC-like function.
- * This is not cryptographically strong, but it is adequate assuming
- * the randomness is not chosen maliciously, and it is fast enough that
- * the overhead of doing it on every interrupt is very reasonable.
- * As random bytes are mixed into the entropy pool, the routines keep
- * an *estimate* of how many bits of randomness have been stored into
- * the random number generator's internal state.
- *
- * When random bytes are desired, they are obtained by taking the MD5
- * hash of the content of the entropy pool. The MD5 hash avoids
- * exposing the internal state of the entropy pool. It is believed to
- * be computationally infeasible to derive any useful information
- * about the input of MD5 from its output. Even if it is possible to
- * analyze MD5 in some clever way, as long as the amount of data
- * returned from the generator is less than the inherent entropy in
- * the pool, the output data is totally unpredictable. For this
- * reason, the routine decreases its internal estimate of how many
+ * added to the "rnd states" queue; this is used as much of the
+ * source material which is mixed on occasion using a CRC-like function
+ * into the "entropy pool". This is not cryptographically strong, but
+ * it is adequate assuming the randomness is not chosen maliciously,
+ * and it very fast because the interrupt-time event is only to add
+ * a small random token to the "rnd states" queue.
+ *
+ * When random bytes are desired, they are obtained by pulling from
+ * the entropy pool and running a MD5 hash. The MD5 hash avoids
+ * exposing the internal state of the entropy pool. Even if it is
+ * possible to analyze MD5 in some clever way, as long as the amount
+ * of data returned from the generator is less than the inherent
+ * entropy in the pool, the output data is totally unpredictable. For
+ * this reason, the routine decreases its internal estimate of how many
* bits of "true randomness" are contained in the entropy pool as it
* outputs random numbers.
*
@@ -96,49 +87,6 @@
* arc4random_buf(), arc4random(), arc4random_uniform(), randomread()
* for the set of /dev/random nodes, and the sysctl kern.arandom.
*
- * Exported interfaces ---- input
- * ==============================
- *
- * The current exported interfaces for gathering environmental noise
- * from the devices are:
- *
- * void add_true_randomness(int data);
- * void add_timer_randomness(int data);
- * void add_mouse_randomness(int mouse_data);
- * void add_net_randomness(int isr);
- * void add_tty_randomness(int c);
- * void add_disk_randomness(int n);
- * void add_audio_randomness(int n);
- * void add_video_randomness(int n);
- *
- * add_true_randomness() uses true random number generators present
- * on some cryptographic and system chipsets. Entropy accounting
- * is not quitable, no timing is done, supplied 32 bits of pure entropy
- * are hashed into the pool plain and blindly, increasing the counter.
- *
- * add_timer_randomness() uses the random driver itselves timing,
- * measuring extract_entropy() and rndioctl() execution times.
- *
- * add_mouse_randomness() uses the mouse interrupt timing, as well as
- * the reported position of the mouse from the hardware.
- *
- * add_net_randomness() times the finishing time of net input.
- *
- * add_tty_randomness() uses the inter-keypress timing, as well as the
- * character as random inputs into the entropy pool.
- *
- * add_disk_randomness() times the finishing time of disk requests as well
- * as feeding both xfer size & time into the entropy pool.
- *
- * add_audio_randomness() times the finishing of audio codec dma
- * requests for both recording and playback, apparently supplies quite
- * a lot of entropy. I'd blame it on low resolution audio clock generators.
- *
- * All of these routines (except for add_true_randomness() of course)
- * try to estimate how many bits of randomness are in a particular
- * randomness source. They do this by keeping track of the first and
- * second order deltas of the event timings.
- *
* Acknowledgements:
* =================
*
@@ -171,6 +119,7 @@
#include <sys/fcntl.h>
#include <sys/timeout.h>
#include <sys/mutex.h>
+#include <sys/workq.h>
#include <sys/msgbuf.h>
#include <crypto/md5.h>
@@ -274,143 +223,45 @@
#error No primitive polynomial available for chosen POOLWORDS
#endif
-static void dequeue_randomness(void *);
-
-/* Master kernel random number pool. */
-struct random_bucket {
- u_int add_ptr;
- u_int entropy_count;
- u_char input_rotate;
- u_int32_t pool[POOLWORDS];
- u_int tmo;
-};
-struct random_bucket random_state;
-struct mutex rndlock;
+struct mutex entropylock = MUTEX_INITIALIZER(IPL_HIGH);
/*
- * This function adds a byte into the entropy pool. It does not
- * update the entropy estimate. The caller must do this if appropriate.
- *
- * The pool is stirred with a polynomial of degree POOLWORDS over GF(2);
- * see POOL_TAP[1-4] above
- *
- * Rotate the input word by a changing number of bits, to help assure
- * that all bits in the entropy get toggled. Otherwise, if the pool
- * is consistently feed small numbers (such as keyboard scan codes)
- * then the upper bits of the entropy pool will frequently remain
- * untouched.
+ * Raw entropy collection from device drivers; at interrupt context or not.
+ * add_*_randomness() provide data which is put into the entropy queue.
+ * Almost completely under the entropylock.
*/
-static void
-add_entropy_words(const u_int32_t *buf, u_int n)
-{
- /* derived from IEEE 802.3 CRC-32 */
- static const u_int32_t twist_table[8] = {
- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278
- };
-
- for (; n--; buf++) {
- u_int32_t w = (*buf << random_state.input_rotate) |
- (*buf >> (32 - random_state.input_rotate));
- u_int i = random_state.add_ptr =
- (random_state.add_ptr - 1) & POOLMASK;
- /*
- * Normally, we add 7 bits of rotation to the pool.
- * At the beginning of the pool, add an extra 7 bits
- * rotation, so that successive passes spread the
- * input bits across the pool evenly.
- */
- random_state.input_rotate =
- (random_state.input_rotate + (i ? 7 : 14)) & 31;
-
- /* XOR pool contents corresponding to polynomial terms */
- w ^= random_state.pool[(i + POOL_TAP1) & POOLMASK] ^
- random_state.pool[(i + POOL_TAP2) & POOLMASK] ^
- random_state.pool[(i + POOL_TAP3) & POOLMASK] ^
- random_state.pool[(i + POOL_TAP4) & POOLMASK] ^
- random_state.pool[(i + 1) & POOLMASK] ^
- random_state.pool[i]; /* + 2^POOLWORDS */
-
- random_state.pool[i] = (w >> 3) ^ twist_table[w & 7];
- }
-}
-
-/*
- * This function extracts randomness from the entropy pool, and
- * returns it in a buffer. This function computes how many remaining
- * bits of entropy are left in the pool, but it does not restrict the
- * number of bytes that are actually obtained.
- */
-static void
-extract_entropy(u_int8_t *buf, int nbytes)
-{
- u_char buffer[16];
- MD5_CTX tmp;
- u_int i;
-
- add_timer_randomness(nbytes);
-
- while (nbytes) {
- i = MIN(nbytes, sizeof(buffer));
-
- /* Hash the pool to get the output */
- MD5Init(&tmp);
- mtx_enter(&rndlock);
- MD5Update(&tmp, (u_int8_t*)random_state.pool,
- sizeof(random_state.pool));
- if (random_state.entropy_count / 8 > i)
- random_state.entropy_count -= i * 8;
- else
- random_state.entropy_count = 0;
- mtx_leave(&rndlock);
- MD5Final(buffer, &tmp);
-
- /* Copy data to destination buffer */
- bcopy(buffer, buf, i);
- nbytes -= i;
- buf += i;
-
- /* Modify pool so next hash will produce different results */
- add_timer_randomness(nbytes);
- dequeue_randomness(NULL);
- }
-
- /* Wipe data from memory */
- bzero(&tmp, sizeof(tmp));
- bzero(buffer, sizeof(buffer));
-}
-
-/* Entropy crediting API and handling of entropy-bearing events */
-
-#define QEVLEN (1024 / sizeof(struct rand_event))
-#define QEVSLOW (QEVLEN * 3 / 4) /* yet another 0.75 for 60-minutes hour /-; */
-#define QEVSBITS 10
-
-/* There is one of these per entropy source */
-struct timer_rand_state {
+struct timer_rand_state { /* There is one of these per entropy source */
u_int last_time;
u_int last_delta;
u_int last_delta2;
u_int dont_count_entropy : 1;
u_int max_entropy : 1;
-};
+} rnd_states[RND_SRC_NUM];
+
+#define QEVLEN (1024 / sizeof(struct rand_event))
+#define QEVSLOW (QEVLEN * 3 / 4) /* yet another 0.75 for 60-minutes hour /-; */
+#define QEVSBITS 10
struct rand_event {
struct timer_rand_state *re_state;
u_int re_nbits;
u_int re_time;
u_int re_val;
-};
-
-struct timer_rand_state rnd_states[RND_SRC_NUM];
-struct rand_event rnd_event_space[QEVLEN];
+} rnd_event_space[QEVLEN];
struct rand_event *rnd_event_head = rnd_event_space;
struct rand_event *rnd_event_tail = rnd_event_space;
+
struct timeout rnd_timeout;
struct rndstats rndstats;
-int rnd_attached;
-/* must be called at a proper spl, returns ptr to the next event */
+u_int32_t entropy_pool[POOLWORDS];
+u_int entropy_add_ptr;
+u_char entropy_input_rotate;
+
+static void dequeue_randomness(void *);
+static void add_entropy_words(const u_int32_t *, u_int);
+static void extract_entropy(u_int8_t *buf, int nbytes);
+
static __inline struct rand_event *
rnd_get(void)
{
@@ -427,7 +278,6 @@ rnd_get(void)
return p;
}
-/* must be called at a proper spl, returns next available item */
static __inline struct rand_event *
rnd_put(void)
{
@@ -442,7 +292,6 @@ rnd_put(void)
return rnd_event_head = p;
}
-/* must be called at a proper spl, returns number of items in the queue */
static __inline int
rnd_qlen(void)
{
@@ -464,6 +313,7 @@ rnd_qlen(void)
void
enqueue_randomness(int state, int val)
{
+ int delta, delta2, delta3;
struct timer_rand_state *p;
struct rand_event *rep;
struct timespec ts;
@@ -474,23 +324,11 @@ enqueue_randomness(int state, int val)
return;
#endif
+ nanotime(&ts);
+
p = &rnd_states[state];
val += state << 13;
- if (!rnd_attached) {
- if ((rep = rnd_put()) == NULL) {
- rndstats.rnd_drops++;
- return;
- }
-
- rep->re_state = &rnd_states[RND_SRC_TIMER];
- rep->re_nbits = 0;
- rep->re_time = 0;
- rep->re_time = val;
- return;
- }
-
- nanotime(&ts);
time = (ts.tv_nsec >> 10) + (ts.tv_sec << 20);
nbits = 0;
@@ -500,7 +338,6 @@ enqueue_randomness(int state, int val)
* deltas in order to make our estimate.
*/
if (!p->dont_count_entropy) {
- int delta, delta2, delta3;
delta = time - p->last_time;
delta2 = delta - p->last_delta;
delta3 = delta2 - p->last_delta2;
@@ -538,30 +375,31 @@ enqueue_randomness(int state, int val)
}
if (delta & 1)
nbits++;
+ } else if (p->max_entropy)
+ nbits = 8 * sizeof(val) - 1;
+
+ /* given the multi-order delta logic above, this should never happen */
+ if (nbits >= 32)
+ return;
+ mtx_enter(&entropylock);
+ if (!p->dont_count_entropy) {
/*
* the logic is to drop low-entropy entries,
* in hope for dequeuing to be more randomfull
*/
if (rnd_qlen() > QEVSLOW && nbits < QEVSBITS) {
rndstats.rnd_drople++;
- return;
+ goto done;
}
p->last_time = time;
p->last_delta = delta3;
p->last_delta2 = delta2;
- } else if (p->max_entropy)
- nbits = 8 * sizeof(val) - 1;
-
- /* given the multi-order delta logic above, this should never happen */
- if (nbits >= 32)
- return;
+ }
- mtx_enter(&rndlock);
if ((rep = rnd_put()) == NULL) {
rndstats.rnd_drops++;
- mtx_leave(&rndlock);
- return;
+ goto done;
}
rep->re_state = p;
@@ -574,13 +412,65 @@ enqueue_randomness(int state, int val)
rndstats.rnd_sc[state]++;
rndstats.rnd_sb[state] += nbits;
- if (rnd_qlen() > QEVSLOW/2 && !random_state.tmo) {
- random_state.tmo++;
+ if (rnd_qlen() > QEVSLOW/2 && timeout_initialized(&rnd_timeout) &&
+ timeout_pending(&rnd_timeout))
timeout_add(&rnd_timeout, 1);
+done:
+ mtx_leave(&entropylock);
+}
+
+/*
+ * This function adds a byte into the entropy pool. It does not
+ * update the entropy estimate. The caller must do this if appropriate.
+ *
+ * The pool is stirred with a polynomial of degree POOLWORDS over GF(2);
+ * see POOL_TAP[1-4] above
+ *
+ * Rotate the input word by a changing number of bits, to help assure
+ * that all bits in the entropy get toggled. Otherwise, if the pool
+ * is consistently feed small numbers (such as keyboard scan codes)
+ * then the upper bits of the entropy pool will frequently remain
+ * untouched.
+ */
+static void
+add_entropy_words(const u_int32_t *buf, u_int n)
+{
+ /* derived from IEEE 802.3 CRC-32 */
+ static const u_int32_t twist_table[8] = {
+ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278
+ };
+
+ for (; n--; buf++) {
+ u_int32_t w = (*buf << entropy_input_rotate) |
+ (*buf >> (32 - entropy_input_rotate));
+ u_int i = entropy_add_ptr =
+ (entropy_add_ptr - 1) & POOLMASK;
+ /*
+ * Normally, we add 7 bits of rotation to the pool.
+ * At the beginning of the pool, add an extra 7 bits
+ * rotation, so that successive passes spread the
+ * input bits across the pool evenly.
+ */
+ entropy_input_rotate =
+ (entropy_input_rotate + (i ? 7 : 14)) & 31;
+
+ /* XOR pool contents corresponding to polynomial terms */
+ w ^= entropy_pool[(i + POOL_TAP1) & POOLMASK] ^
+ entropy_pool[(i + POOL_TAP2) & POOLMASK] ^
+ entropy_pool[(i + POOL_TAP3) & POOLMASK] ^
+ entropy_pool[(i + POOL_TAP4) & POOLMASK] ^
+ entropy_pool[(i + 1) & POOLMASK] ^
+ entropy_pool[i]; /* + 2^POOLWORDS */
+
+ entropy_pool[i] = (w >> 3) ^ twist_table[w & 7];
}
- mtx_leave(&rndlock);
}
+/*
+ * Pulls entropy out of the queue and throws merges it into the pool
+ * with the CRC.
+ */
/* ARGSUSED */
static void
dequeue_randomness(void *v)
@@ -589,29 +479,68 @@ dequeue_randomness(void *v)
u_int32_t buf[2];
u_int nbits;
+ mtx_enter(&entropylock);
+
timeout_del(&rnd_timeout);
- rndstats.rnd_deqs++;
- mtx_enter(&rndlock);
+ rndstats.rnd_deqs++;
while ((rep = rnd_get())) {
-
buf[0] = rep->re_time;
buf[1] = rep->re_val;
nbits = rep->re_nbits;
- mtx_leave(&rndlock);
+ mtx_leave(&entropylock);
add_entropy_words(buf, 2);
+ mtx_enter(&entropylock);
rndstats.rnd_total += nbits;
- random_state.entropy_count += nbits;
- if (random_state.entropy_count > POOLBITS)
- random_state.entropy_count = POOLBITS;
+ }
+ mtx_leave(&entropylock);
+}
- mtx_enter(&rndlock);
+/*
+ * Grabs a chunk from the entropy_pool[] and slams it through MD5 when
+ * requested.
+ */
+static void
+extract_entropy(u_int8_t *buf, int nbytes)
+{
+ static u_int32_t extract_pool[POOLWORDS];
+ u_char buffer[16];
+ MD5_CTX tmp;
+ u_int i;
+
+ add_timer_randomness(nbytes);
+
+ while (nbytes) {
+ i = MIN(nbytes, sizeof(buffer));
+
+ /*
+ * INTENTIONALLY not protected by entropylock. Races
+ * during bcopy() result in acceptable input data; races
+ * during MD5Update() would create nasty data dependencies.
+ */
+ bcopy(entropy_pool, extract_pool,
+ sizeof(extract_pool));
+
+ /* Hash the pool to get the output */
+ MD5Init(&tmp);
+ MD5Update(&tmp, (u_int8_t *)extract_pool, sizeof(extract_pool));
+ MD5Final(buffer, &tmp);
+
+ /* Copy data to destination buffer */
+ bcopy(buffer, buf, i);
+ nbytes -= i;
+ buf += i;
+
+ /* Modify pool so next hash will produce different results */
+ add_timer_randomness(nbytes);
+ dequeue_randomness(NULL);
}
- random_state.tmo = 0;
- mtx_leave(&rndlock);
+ /* Wipe data from memory */
+ bzero(&tmp, sizeof(tmp));
+ bzero(buffer, sizeof(buffer));
}
/*
@@ -627,99 +556,12 @@ dequeue_randomness(void *v)
*/
#define ARC4_SUB_KEY_BYTES (256 / 8)
-struct timeout arc4_timeout;
+struct mutex rndlock = MUTEX_INITIALIZER(IPL_HIGH);
struct rc4_ctx arc4random_state;
-int arc4random_initialized;
-
-static void arc4_reinit(void *v);
-static void arc4_stir(void);
-static void arc4_reinit(void *v);
-static void arc4maybeinit(void);
-
-void
-randomattach(void)
-{
- mtx_init(&rndlock, IPL_HIGH);
-
- random_state.add_ptr = 0;
- random_state.entropy_count = 0;
- rnd_states[RND_SRC_TIMER].dont_count_entropy = 1;
- rnd_states[RND_SRC_TRUE].dont_count_entropy = 1;
- rnd_states[RND_SRC_TRUE].max_entropy = 1;
-
- if (msgbufp && msgbufp->msg_magic == MSG_MAGIC)
- add_entropy_words((u_int32_t *)msgbufp->msg_bufc,
- msgbufp->msg_bufs / sizeof(u_int32_t));
-
- timeout_set(&rnd_timeout, dequeue_randomness, NULL);
- timeout_set(&arc4_timeout, arc4_reinit, NULL);
- arc4_reinit(NULL);
-
- rnd_attached = 1;
-}
-
-static void
-arc4_stir(void)
-{
- struct timespec ts;
- u_int8_t buf[64], *p;
- int i;
-
- /*
- * Use MD5 PRNG data and a system timespec; early in the boot
- * process this is the best we can do -- some architectures do
- * not collect entropy very well during this time, but may have
- * clock information which is better than nothing.
- */
- extract_entropy((u_int8_t *)buf, sizeof buf);
- nanotime(&ts);
- for (p = (u_int8_t *)&ts, i = 0; i < sizeof(ts); i++)
- buf[i] ^= p[i];
-
- mtx_enter(&rndlock);
- rndstats.rnd_used += sizeof(buf) * 8;
-
- if (rndstats.arc4_nstirs > 0)
- rc4_crypt(&arc4random_state, buf, buf, sizeof(buf));
-
- rc4_keysetup(&arc4random_state, buf, sizeof(buf));
- rndstats.arc4_stirs += sizeof(buf);
- rndstats.arc4_nstirs++;
-
- /*
- * Throw away the first N words of output, as suggested in the
- * paper "Weaknesses in the Key Scheduling Algorithm of RC4"
- * by Fluher, Mantin, and Shamir. (N = 256 in our case.)
- */
- rc4_skip(&arc4random_state, 256 * 4);
- mtx_leave(&rndlock);
-}
-
-/*
- * Called by timeout to mark arc4 for stirring,
- * actual stirring happens on any access attempt.
- */
-static void
-arc4_reinit(void *v)
-{
- arc4random_initialized = 0;
-}
+struct timeout arc4_timeout;
-static void
-arc4maybeinit(void)
-{
-
- if (!arc4random_initialized) {
-#ifdef DIAGNOSTIC
- if (!rnd_attached)
- panic("arc4maybeinit: premature");
-#endif
- arc4random_initialized++;
- arc4_stir();
- /* 10 minutes, per dm@'s suggestion */
- timeout_add_sec(&arc4_timeout, 10 * 60);
- }
-}
+static void arc4_reinit(void *v); /* timeout to start reinit */
+static void arc4_init(void *, void *); /* actually do the reinit */
/* Return one word of randomness from an RC4 generator */
u_int32_t
@@ -727,7 +569,6 @@ arc4random(void)
{
u_int32_t ret;
- arc4maybeinit();
mtx_enter(&rndlock);
rc4_getbytes(&arc4random_state, (u_char *)&ret, sizeof(ret));
rndstats.arc4_reads += sizeof(ret);
@@ -763,8 +604,6 @@ arc4random_buf_large(void *buf, size_t n)
void
arc4random_buf(void *buf, size_t n)
{
- arc4maybeinit();
-
/* Satisfy large requests via an independent ARC4 instance */
if (n > ARC4_MAIN_MAX_BYTES) {
arc4random_buf_large(buf, n);
@@ -822,6 +661,71 @@ arc4random_uniform(u_int32_t upper_bound)
return r % upper_bound;
}
+/* ARGSUSED */
+static void
+arc4_init(void *v, void *w)
+{
+ struct rc4_ctx new_ctx;
+ struct timespec ts;
+ u_int8_t buf[64], *p;
+ int i;
+
+ /*
+ * Use MD5 PRNG data and a system timespec; early in the boot
+ * process this is the best we can do -- some architectures do
+ * not collect entropy very well during this time, but may have
+ * clock information which is better than nothing.
+ */
+ extract_entropy((u_int8_t *)buf, sizeof buf);
+ nanotime(&ts);
+ for (p = (u_int8_t *)&ts, i = 0; i < sizeof(ts); i++)
+ buf[i] ^= p[i];
+
+ rc4_keysetup(&new_ctx, buf, sizeof(buf));
+ /*
+ * Throw away the first N words of output, as suggested in the
+ * paper "Weaknesses in the Key Scheduling Algorithm of RC4"
+ * by Fluher, Mantin, and Shamir. (N = 256 in our case.)
+ */
+ rc4_skip(&new_ctx, 256 * 4);
+
+ mtx_enter(&rndlock);
+ bcopy(&new_ctx, &arc4random_state, sizeof(new_ctx));
+ rndstats.rnd_used += sizeof(buf) * 8;
+ rndstats.arc4_nstirs++;
+ mtx_leave(&rndlock);
+}
+
+/*
+ * Called by timeout to mark arc4 for stirring,
+ */
+static void
+arc4_reinit(void *v)
+{
+ workq_add_task(NULL, 0, arc4_init, NULL, NULL);
+ /* 10 minutes, per dm@'s suggestion */
+ timeout_add_sec(&arc4_timeout, 10 * 60);
+}
+
+void
+randomattach(void)
+{
+ rnd_states[RND_SRC_TIMER].dont_count_entropy = 1;
+ rnd_states[RND_SRC_TRUE].dont_count_entropy = 1;
+ rnd_states[RND_SRC_TRUE].max_entropy = 1;
+
+ if (msgbufp && msgbufp->msg_magic == MSG_MAGIC)
+ add_entropy_words((u_int32_t *)msgbufp->msg_bufc,
+ msgbufp->msg_bufs / sizeof(u_int32_t));
+
+ timeout_set(&rnd_timeout, dequeue_randomness, NULL);
+ dequeue_randomness(NULL);
+
+ timeout_set(&arc4_timeout, arc4_reinit, NULL);
+ arc4_init(NULL, NULL);
+ arc4_reinit(NULL);
+}
+
int
randomopen(dev_t dev, int flag, int mode, struct proc *p)
{
@@ -886,7 +790,7 @@ randomwrite(dev_t dev, struct uio *uio, int flags)
buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
while (!ret && uio->uio_resid > 0) {
- u_int n = min(POOLBYTES, uio->uio_resid);
+ int n = min(POOLBYTES, uio->uio_resid);
ret = uiomove(buf, n, uio);
if (ret)
@@ -899,11 +803,8 @@ randomwrite(dev_t dev, struct uio *uio, int flags)
newdata = 1;
}
- if (newdata) {
- mtx_enter(&rndlock);
- arc4random_initialized = 0;
- mtx_leave(&rndlock);
- }
+ if (newdata)
+ arc4_init(NULL, NULL);
free(buf, M_TEMP);
return ret;
diff --git a/sys/dev/rndvar.h b/sys/dev/rndvar.h
index 7aca15eeb3a..0c62a5bd033 100644
--- a/sys/dev/rndvar.h
+++ b/sys/dev/rndvar.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: rndvar.h,v 1.26 2011/01/06 15:41:51 deraadt Exp $ */
+/* $OpenBSD: rndvar.h,v 1.27 2011/01/07 04:56:52 deraadt Exp $ */
/*
* Copyright (c) 1996,2000 Michael Shalayeff.
@@ -49,7 +49,7 @@ struct rndstats {
quad_t rnd_reads; /* strong read calls -- unused */
quad_t arc4_reads; /* aRC4 data bytes read so far */
quad_t arc4_nstirs; /* arc4 pool stirs */
- quad_t arc4_stirs; /* arc4 pool stirs (bits used) */
+ quad_t arc4_stirs; /* arc4 pool stirs (bits used) -- unused */
quad_t rnd_pad[5];