提交 8ef35c86 编写于 作者: T Theodore Ts'o

random: set up the NUMA crng instances after the CRNG is fully initialized

Until the primary_crng is fully initialized, don't initialize the NUMA
crng nodes.  Otherwise users of /dev/urandom on NUMA systems before
the CRNG is fully initialized can get very bad quality randomness.  Of
course everyone should move to getrandom(2) where this won't be an
issue, but there's a lot of legacy code out there.  This related to
CVE-2018-1108.
Reported-by: NJann Horn <jannh@google.com>
Fixes: 1e7f583a ("random: make /dev/urandom scalable for silly...")
Cc: stable@kernel.org # 4.8+
Signed-off-by: NTheodore Ts'o <tytso@mit.edu>
上级 dc12baac
...@@ -787,6 +787,32 @@ static void crng_initialize(struct crng_state *crng) ...@@ -787,6 +787,32 @@ static void crng_initialize(struct crng_state *crng)
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
} }
#ifdef CONFIG_NUMA
static void numa_crng_init(void)
{
int i;
struct crng_state *crng;
struct crng_state **pool;
pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
for_each_online_node(i) {
crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i);
spin_lock_init(&crng->lock);
crng_initialize(crng);
pool[i] = crng;
}
mb();
if (cmpxchg(&crng_node_pool, NULL, pool)) {
for_each_node(i)
kfree(pool[i]);
kfree(pool);
}
}
#else
static void numa_crng_init(void) {}
#endif
/* /*
* crng_fast_load() can be called by code in the interrupt service * crng_fast_load() can be called by code in the interrupt service
* path. So we can't afford to dilly-dally. * path. So we can't afford to dilly-dally.
...@@ -893,6 +919,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) ...@@ -893,6 +919,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
spin_unlock_irqrestore(&primary_crng.lock, flags); spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng == &primary_crng && crng_init < 2) { if (crng == &primary_crng && crng_init < 2) {
invalidate_batched_entropy(); invalidate_batched_entropy();
numa_crng_init();
crng_init = 2; crng_init = 2;
process_random_ready_list(); process_random_ready_list();
wake_up_interruptible(&crng_init_wait); wake_up_interruptible(&crng_init_wait);
...@@ -1727,28 +1754,9 @@ static void init_std_data(struct entropy_store *r) ...@@ -1727,28 +1754,9 @@ static void init_std_data(struct entropy_store *r)
*/ */
static int rand_initialize(void) static int rand_initialize(void)
{ {
#ifdef CONFIG_NUMA
int i;
struct crng_state *crng;
struct crng_state **pool;
#endif
init_std_data(&input_pool); init_std_data(&input_pool);
init_std_data(&blocking_pool); init_std_data(&blocking_pool);
crng_initialize(&primary_crng); crng_initialize(&primary_crng);
#ifdef CONFIG_NUMA
pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
for_each_online_node(i) {
crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i);
spin_lock_init(&crng->lock);
crng_initialize(crng);
pool[i] = crng;
}
mb();
crng_node_pool = pool;
#endif
return 0; return 0;
} }
early_initcall(rand_initialize); early_initcall(rand_initialize);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册