diff --git a/lib/libc/gen/arc4random.c b/lib/libc/gen/arc4random.c index 1bb7a004c556e..44e653f5d2b19 100644 --- a/lib/libc/gen/arc4random.c +++ b/lib/libc/gen/arc4random.c @@ -1,4 +1,4 @@ -/* $NetBSD: arc4random.c,v 1.34 2024/01/20 14:52:47 christos Exp $ */ +/* $NetBSD: arc4random.c,v 1.35 2024/08/26 15:19:22 riastradh Exp $ */ /*- * Copyright (c) 2014 The NetBSD Foundation, Inc. @@ -52,7 +52,7 @@ */ #include -__RCSID("$NetBSD: arc4random.c,v 1.34 2024/01/20 14:52:47 christos Exp $"); +__RCSID("$NetBSD: arc4random.c,v 1.35 2024/08/26 15:19:22 riastradh Exp $"); #include "namespace.h" #include "reentrant.h" @@ -65,6 +65,7 @@ __RCSID("$NetBSD: arc4random.c,v 1.34 2024/01/20 14:52:47 christos Exp $"); #include #include +#include #include #include #include @@ -398,11 +399,67 @@ crypto_onetimestream(const void *seed, void *buf, size_t n) (void)explicit_memset(block, 0, sizeof block); } +/* + * entropy_epoch() + * + * Return the current entropy epoch, from the sysctl node + * kern.entropy.epoch. + * + * The entropy epoch is never zero. Initially, or on error, it is + * (unsigned)-1. It may wrap around but it skips (unsigned)-1 and + * 0 when it does. Changes happen less than once per second, so + * wraparound will only affect systems after 136 years of uptime. + * + * XXX This should get it from a page shared read-only by kernel + * with userland, but until we implement such a mechanism, this + * sysctl -- incurring the cost of a syscall -- will have to + * serve. + */ +static unsigned +entropy_epoch(void) +{ + static atomic_int mib0[3]; + static atomic_bool initialized = false; + int mib[3]; + unsigned epoch = -1; + size_t epochlen = sizeof(epoch); + + /* + * Resolve kern.entropy.epoch if we haven't already. Cache it + * for the next caller. Initialization is idempotent, so it's + * OK if two threads do it at once. + */ + if (atomic_load_explicit(&initialized, memory_order_acquire)) { + mib[0] = atomic_load_explicit(&mib0[0], memory_order_relaxed); + mib[1] = atomic_load_explicit(&mib0[1], memory_order_relaxed); + mib[2] = atomic_load_explicit(&mib0[2], memory_order_relaxed); + } else { + size_t nmib = __arraycount(mib); + + if (sysctlnametomib("kern.entropy.epoch", mib, &nmib) == -1) + return -1; + if (nmib != __arraycount(mib)) + return -1; + atomic_store_explicit(&mib0[0], mib[0], memory_order_relaxed); + atomic_store_explicit(&mib0[1], mib[1], memory_order_relaxed); + atomic_store_explicit(&mib0[2], mib[2], memory_order_relaxed); + atomic_store_explicit(&initialized, true, + memory_order_release); + } + + if (sysctl(mib, __arraycount(mib), &epoch, &epochlen, NULL, 0) == -1) + return -1; + if (epochlen != sizeof(epoch)) + return -1; + + return epoch; +} + /* arc4random state: per-thread, per-process (zeroed in child on fork) */ struct arc4random_prng { struct crypto_prng arc4_prng; - bool arc4_seeded; + unsigned arc4_epoch; }; static void @@ -413,6 +470,7 @@ arc4random_prng_addrandom(struct arc4random_prng *prng, const void *data, SHA256_CTX ctx; uint8_t buf[crypto_prng_SEEDBYTES]; size_t buflen = sizeof buf; + unsigned epoch = entropy_epoch(); __CTASSERT(sizeof buf == SHA256_DIGEST_LENGTH); @@ -436,7 +494,7 @@ arc4random_prng_addrandom(struct arc4random_prng *prng, const void *data, /* reseed(SHA256(prng() || sysctl(KERN_ARND) || data)) */ crypto_prng_seed(&prng->arc4_prng, buf); (void)explicit_memset(buf, 0, sizeof buf); - prng->arc4_seeded = true; + prng->arc4_epoch = epoch; } #ifdef _REENTRANT @@ -567,7 +625,7 @@ arc4random_prng_get(void) } /* Guarantee the PRNG is seeded. */ - if (__predict_false(!prng->arc4_seeded)) + if (__predict_false(prng->arc4_epoch != entropy_epoch())) arc4random_prng_addrandom(prng, NULL, 0); return prng;