提交 acd77500 编写于 作者: L Linus Torvalds

Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random

Pull random changes from Ted Ts'o:
 "Change /dev/random so that it uses the CRNG and only blocking if the
  CRNG hasn't initialized, instead of the old blocking pool. Also clean
  up archrandom.h, and some other miscellaneous cleanups"

* tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random: (24 commits)
  s390x: Mark archrandom.h functions __must_check
  powerpc: Mark archrandom.h functions __must_check
  powerpc: Use bool in archrandom.h
  x86: Mark archrandom.h functions __must_check
  linux/random.h: Mark CONFIG_ARCH_RANDOM functions __must_check
  linux/random.h: Use false with bool
  linux/random.h: Remove arch_has_random, arch_has_random_seed
  s390: Remove arch_has_random, arch_has_random_seed
  powerpc: Remove arch_has_random, arch_has_random_seed
  x86: Remove arch_has_random, arch_has_random_seed
  random: remove some dead code of poolinfo
  random: fix typo in add_timer_randomness()
  random: Add and use pr_fmt()
  random: convert to ENTROPY_BITS for better code readability
  random: remove unnecessary unlikely()
  random: remove kernel.random.read_wakeup_threshold
  random: delete code to pull data into pools
  random: remove the blocking pool
  random: make /dev/random be almost like /dev/urandom
  random: ignore GRND_RANDOM in getentropy(2)
  ...
......@@ -6,27 +6,28 @@
#include <asm/machdep.h>
static inline int arch_get_random_long(unsigned long *v)
static inline bool __must_check arch_get_random_long(unsigned long *v)
{
return 0;
return false;
}
static inline int arch_get_random_int(unsigned int *v)
static inline bool __must_check arch_get_random_int(unsigned int *v)
{
return 0;
return false;
}
static inline int arch_get_random_seed_long(unsigned long *v)
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
if (ppc_md.get_random_seed)
return ppc_md.get_random_seed(v);
return 0;
return false;
}
static inline int arch_get_random_seed_int(unsigned int *v)
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
unsigned long val;
int rc;
bool rc;
rc = arch_get_random_seed_long(&val);
if (rc)
......@@ -34,16 +35,6 @@ static inline int arch_get_random_seed_int(unsigned int *v)
return rc;
}
static inline int arch_has_random(void)
{
return 0;
}
static inline int arch_has_random_seed(void)
{
return !!ppc_md.get_random_seed;
}
#endif /* CONFIG_ARCH_RANDOM */
#ifdef CONFIG_PPC_POWERNV
......
......@@ -21,29 +21,17 @@ extern atomic64_t s390_arch_random_counter;
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
static inline bool arch_has_random(void)
static inline bool __must_check arch_get_random_long(unsigned long *v)
{
return false;
}
static inline bool arch_has_random_seed(void)
static inline bool __must_check arch_get_random_int(unsigned int *v)
{
if (static_branch_likely(&s390_arch_random_available))
return true;
return false;
}
static inline bool arch_get_random_long(unsigned long *v)
{
return false;
}
static inline bool arch_get_random_int(unsigned int *v)
{
return false;
}
static inline bool arch_get_random_seed_long(unsigned long *v)
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
if (static_branch_likely(&s390_arch_random_available)) {
return s390_arch_random_generate((u8 *)v, sizeof(*v));
......@@ -51,7 +39,7 @@ static inline bool arch_get_random_seed_long(unsigned long *v)
return false;
}
static inline bool arch_get_random_seed_int(unsigned int *v)
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
if (static_branch_likely(&s390_arch_random_available)) {
return s390_arch_random_generate((u8 *)v, sizeof(*v));
......
......@@ -27,7 +27,7 @@
/* Unconditional execution of RDRAND and RDSEED */
static inline bool rdrand_long(unsigned long *v)
static inline bool __must_check rdrand_long(unsigned long *v)
{
bool ok;
unsigned int retry = RDRAND_RETRY_LOOPS;
......@@ -41,7 +41,7 @@ static inline bool rdrand_long(unsigned long *v)
return false;
}
static inline bool rdrand_int(unsigned int *v)
static inline bool __must_check rdrand_int(unsigned int *v)
{
bool ok;
unsigned int retry = RDRAND_RETRY_LOOPS;
......@@ -55,7 +55,7 @@ static inline bool rdrand_int(unsigned int *v)
return false;
}
static inline bool rdseed_long(unsigned long *v)
static inline bool __must_check rdseed_long(unsigned long *v)
{
bool ok;
asm volatile(RDSEED_LONG
......@@ -64,7 +64,7 @@ static inline bool rdseed_long(unsigned long *v)
return ok;
}
static inline bool rdseed_int(unsigned int *v)
static inline bool __must_check rdseed_int(unsigned int *v)
{
bool ok;
asm volatile(RDSEED_INT
......@@ -73,10 +73,6 @@ static inline bool rdseed_int(unsigned int *v)
return ok;
}
/* Conditional execution based on CPU type */
#define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND)
#define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED)
/*
* These are the generic interfaces; they must not be declared if the
* stubs in <linux/random.h> are to be invoked,
......@@ -84,24 +80,24 @@ static inline bool rdseed_int(unsigned int *v)
*/
#ifdef CONFIG_ARCH_RANDOM
static inline bool arch_get_random_long(unsigned long *v)
static inline bool __must_check arch_get_random_long(unsigned long *v)
{
return arch_has_random() ? rdrand_long(v) : false;
return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_long(v) : false;
}
static inline bool arch_get_random_int(unsigned int *v)
static inline bool __must_check arch_get_random_int(unsigned int *v)
{
return arch_has_random() ? rdrand_int(v) : false;
return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_int(v) : false;
}
static inline bool arch_get_random_seed_long(unsigned long *v)
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
return arch_has_random_seed() ? rdseed_long(v) : false;
return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_long(v) : false;
}
static inline bool arch_get_random_seed_int(unsigned int *v)
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
return arch_has_random_seed() ? rdseed_int(v) : false;
return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_int(v) : false;
}
extern void x86_init_rdrand(struct cpuinfo_x86 *c);
......
......@@ -307,6 +307,8 @@
* Eastlake, Steve Crocker, and Jeff Schiller.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/utsname.h>
#include <linux/module.h>
#include <linux/kernel.h>
......@@ -354,7 +356,6 @@
#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
#define OUTPUT_POOL_SHIFT 10
#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
#define SEC_XFER_SIZE 512
#define EXTRACT_SIZE 10
......@@ -370,12 +371,6 @@
#define ENTROPY_SHIFT 3
#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
/*
* The minimum number of bits of entropy before we wake up a read on
* /dev/random. Should be enough to do a significant reseed.
*/
static int random_read_wakeup_bits = 64;
/*
* If the entropy count falls under this number of bits, then we
* should wake up processes which are selecting or polling on write
......@@ -436,42 +431,11 @@ static const struct poolinfo {
/* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
/* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
{ S(128), 104, 76, 51, 25, 1 },
/* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
/* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
{ S(32), 26, 19, 14, 7, 1 },
#if 0
/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
{ S(2048), 1638, 1231, 819, 411, 1 },
/* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
{ S(1024), 817, 615, 412, 204, 1 },
/* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
{ S(1024), 819, 616, 410, 207, 2 },
/* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
{ S(512), 411, 308, 208, 104, 1 },
/* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
{ S(512), 409, 307, 206, 102, 2 },
/* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
{ S(512), 409, 309, 205, 103, 2 },
/* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
{ S(256), 205, 155, 101, 52, 1 },
/* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
{ S(128), 103, 78, 51, 27, 2 },
/* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
{ S(64), 52, 39, 26, 14, 1 },
#endif
};
/*
* Static global variables
*/
static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static struct fasync_struct *fasync;
......@@ -530,11 +494,8 @@ struct entropy_store {
const struct poolinfo *poolinfo;
__u32 *pool;
const char *name;
struct entropy_store *pull;
struct work_struct push_work;
/* read-write data: */
unsigned long last_pulled;
spinlock_t lock;
unsigned short add_ptr;
unsigned short input_rotate;
......@@ -550,9 +511,7 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int fips);
static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
static void push_to_pool(struct work_struct *work);
static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
......@@ -561,16 +520,6 @@ static struct entropy_store input_pool = {
.pool = input_pool_data
};
static struct entropy_store blocking_pool = {
.poolinfo = &poolinfo_table[1],
.name = "blocking",
.pull = &input_pool,
.lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
.pool = blocking_pool_data,
.push_work = __WORK_INITIALIZER(blocking_pool.push_work,
push_to_pool),
};
static __u32 const twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
......@@ -759,22 +708,17 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
} while (unlikely(entropy_count < pool_size-2 && pnfrac));
}
if (unlikely(entropy_count < 0)) {
pr_warn("random: negative entropy/overflow: pool %s count %d\n",
if (WARN_ON(entropy_count < 0)) {
pr_warn("negative entropy/overflow: pool %s count %d\n",
r->name, entropy_count);
WARN_ON(1);
entropy_count = 0;
} else if (entropy_count > pool_size)
entropy_count = pool_size;
if ((r == &blocking_pool) && !r->initialized &&
(entropy_count >> ENTROPY_SHIFT) > 128)
has_initialized = 1;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
if (has_initialized) {
r->initialized = 1;
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
......@@ -783,36 +727,13 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
if (r == &input_pool) {
int entropy_bits = entropy_count >> ENTROPY_SHIFT;
struct entropy_store *other = &blocking_pool;
if (crng_init < 2) {
if (entropy_bits < 128)
return;
crng_reseed(&primary_crng, r);
entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
}
/* initialize the blocking pool if necessary */
if (entropy_bits >= random_read_wakeup_bits &&
!other->initialized) {
schedule_work(&other->push_work);
return;
}
/* should we wake readers? */
if (entropy_bits >= random_read_wakeup_bits &&
wq_has_sleeper(&random_read_wait)) {
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
entropy_bits = ENTROPY_BITS(r);
}
/* If the input pool is getting full, and the blocking
* pool has room, send some entropy to the blocking
* pool.
*/
if (!work_pending(&other->push_work) &&
(ENTROPY_BITS(r) > 6 * r->poolinfo->poolbytes) &&
(ENTROPY_BITS(other) <= 6 * other->poolinfo->poolbytes))
schedule_work(&other->push_work);
}
}
......@@ -884,7 +805,7 @@ static void crng_initialize(struct crng_state *crng)
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
pr_notice("random: crng done (trusting CPU's manufacturer)\n");
pr_notice("crng done (trusting CPU's manufacturer)\n");
}
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
......@@ -946,8 +867,7 @@ static int crng_fast_load(const char *cp, size_t len)
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
invalidate_batched_entropy();
crng_init = 1;
wake_up_interruptible(&crng_init_wait);
pr_notice("random: fast init done\n");
pr_notice("fast init done\n");
}
return 1;
}
......@@ -1032,16 +952,15 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
crng_init = 2;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
pr_notice("random: crng init done\n");
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
if (unseeded_warning.missed) {
pr_notice("random: %d get_random_xx warning(s) missed "
"due to ratelimiting\n",
pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
unseeded_warning.missed);
unseeded_warning.missed = 0;
}
if (urandom_warning.missed) {
pr_notice("random: %d urandom warning(s) missed "
"due to ratelimiting\n",
pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
urandom_warning.missed);
urandom_warning.missed = 0;
}
......@@ -1246,7 +1165,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
/*
* delta is now minimum absolute delta.
* Round down by 1 bit on general principles,
* and limit entropy entimate to 12 bits.
* and limit entropy estimate to 12 bits.
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
......@@ -1389,57 +1308,6 @@ EXPORT_SYMBOL_GPL(add_disk_randomness);
*
*********************************************************************/
/*
* This utility inline function is responsible for transferring entropy
* from the primary pool to the secondary extraction pool. We make
* sure we pull enough for a 'catastrophic reseed'.
*/
static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
if (!r->pull ||
r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
r->entropy_count > r->poolinfo->poolfracbits)
return;
_xfer_secondary_pool(r, nbytes);
}
static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
__u32 tmp[OUTPUT_POOL_WORDS];
int bytes = nbytes;
/* pull at least as much as a wakeup */
bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
/* but never more than the buffer size */
bytes = min_t(int, bytes, sizeof(tmp));
trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
bytes = extract_entropy(r->pull, tmp, bytes,
random_read_wakeup_bits / 8, 0);
mix_pool_bytes(r, tmp, bytes);
credit_entropy_bits(r, bytes*8);
}
/*
* Used as a workqueue function so that when the input pool is getting
* full, we can "spill over" some entropy to the output pools. That
* way the output pools can store some of the excess entropy instead
* of letting it go to waste.
*/
static void push_to_pool(struct work_struct *work)
{
struct entropy_store *r = container_of(work, struct entropy_store,
push_work);
BUG_ON(!r);
_xfer_secondary_pool(r, random_read_wakeup_bits/8);
trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
r->pull->entropy_count >> ENTROPY_SHIFT);
}
/*
* This function decides how many bytes to actually take from the
* given pool, and also debits the entropy count accordingly.
......@@ -1465,10 +1333,9 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
if (ibytes < min)
ibytes = 0;
if (unlikely(entropy_count < 0)) {
pr_warn("random: negative entropy count: pool %s count %d\n",
if (WARN_ON(entropy_count < 0)) {
pr_warn("negative entropy count: pool %s count %d\n",
r->name, entropy_count);
WARN_ON(1);
entropy_count = 0;
}
nfrac = ibytes << (ENTROPY_SHIFT + 3);
......@@ -1481,8 +1348,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
goto retry;
trace_debit_entropy(r->name, 8 * ibytes);
if (ibytes &&
(r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
......@@ -1603,7 +1469,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
spin_unlock_irqrestore(&r->lock, flags);
trace_extract_entropy(r->name, EXTRACT_SIZE,
ENTROPY_BITS(r), _RET_IP_);
xfer_secondary_pool(r, EXTRACT_SIZE);
extract_buf(r, tmp);
spin_lock_irqsave(&r->lock, flags);
memcpy(r->last_data, tmp, EXTRACT_SIZE);
......@@ -1612,60 +1477,11 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
}
trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
return _extract_entropy(r, buf, nbytes, fips_enabled);
}
/*
* This function extracts randomness from the "entropy pool", and
* returns it in a userspace buffer.
*/
static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
size_t nbytes)
{
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
int large_request = (nbytes > 256);
trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
if (!r->initialized && r->pull) {
xfer_secondary_pool(r, ENTROPY_BITS(r->pull)/8);
if (!r->initialized)
return 0;
}
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, 0, 0);
while (nbytes) {
if (large_request && need_resched()) {
if (signal_pending(current)) {
if (ret == 0)
ret = -ERESTARTSYS;
break;
}
schedule();
}
extract_buf(r, tmp);
i = min_t(int, nbytes, EXTRACT_SIZE);
if (copy_to_user(buf, tmp, i)) {
ret = -EFAULT;
break;
}
nbytes -= i;
buf += i;
ret += i;
}
/* Wipe data just returned from memory */
memzero_explicit(tmp, sizeof(tmp));
return ret;
}
#define warn_unseeded_randomness(previous) \
_warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
......@@ -1687,8 +1503,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
print_once = true;
#endif
if (__ratelimit(&unseeded_warning))
pr_notice("random: %s called from %pS with crng_init=%d\n",
func_name, caller, crng_init);
printk_deferred(KERN_NOTICE "random: %s called from %pS "
"with crng_init=%d\n", func_name, caller,
crng_init);
}
/*
......@@ -1931,7 +1748,6 @@ static void __init init_std_data(struct entropy_store *r)
ktime_t now = ktime_get_real();
unsigned long rv;
r->last_pulled = jiffies;
mix_pool_bytes(r, &now, sizeof(now));
for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_seed_long(&rv) &&
......@@ -1955,7 +1771,6 @@ static void __init init_std_data(struct entropy_store *r)
int __init rand_initialize(void)
{
init_std_data(&input_pool);
init_std_data(&blocking_pool);
crng_initialize(&primary_crng);
crng_global_init_time = jiffies;
if (ratelimit_disable) {
......@@ -1983,40 +1798,15 @@ void rand_initialize_disk(struct gendisk *disk)
#endif
static ssize_t
_random_read(int nonblock, char __user *buf, size_t nbytes)
urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
ssize_t n;
if (nbytes == 0)
return 0;
nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
while (1) {
n = extract_entropy_user(&blocking_pool, buf, nbytes);
if (n < 0)
return n;
trace_random_read(n*8, (nbytes-n)*8,
ENTROPY_BITS(&blocking_pool),
ENTROPY_BITS(&input_pool));
if (n > 0)
return n;
/* Pool is (near) empty. Maybe wait and retry. */
if (nonblock)
return -EAGAIN;
wait_event_interruptible(random_read_wait,
blocking_pool.initialized &&
(ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits));
if (signal_pending(current))
return -ERESTARTSYS;
}
}
int ret;
static ssize_t
random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
ret = extract_crng_user(buf, nbytes);
trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
return ret;
}
static ssize_t
......@@ -2024,22 +1814,29 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
unsigned long flags;
static int maxwarn = 10;
int ret;
if (!crng_ready() && maxwarn > 0) {
maxwarn--;
if (__ratelimit(&urandom_warning))
printk(KERN_NOTICE "random: %s: uninitialized "
"urandom read (%zd bytes read)\n",
current->comm, nbytes);
pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
current->comm, nbytes);
spin_lock_irqsave(&primary_crng.lock, flags);
crng_init_cnt = 0;
spin_unlock_irqrestore(&primary_crng.lock, flags);
}
nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
ret = extract_crng_user(buf, nbytes);
trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
return ret;
return urandom_read_nowarn(file, buf, nbytes, ppos);
}
static ssize_t
random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
int ret;
ret = wait_for_random_bytes();
if (ret != 0)
return ret;
return urandom_read_nowarn(file, buf, nbytes, ppos);
}
static __poll_t
......@@ -2047,10 +1844,10 @@ random_poll(struct file *file, poll_table * wait)
{
__poll_t mask;
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &crng_init_wait, wait);
poll_wait(file, &random_write_wait, wait);
mask = 0;
if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
if (crng_ready())
mask |= EPOLLIN | EPOLLRDNORM;
if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
mask |= EPOLLOUT | EPOLLWRNORM;
......@@ -2141,7 +1938,6 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
input_pool.entropy_count = 0;
blocking_pool.entropy_count = 0;
return 0;
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
......@@ -2185,23 +1981,27 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
{
int ret;
if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
return -EINVAL;
/*
* Requesting insecure and blocking randomness at the same time makes
* no sense.
*/
if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
return -EINVAL;
if (count > INT_MAX)
count = INT_MAX;
if (flags & GRND_RANDOM)
return _random_read(flags & GRND_NONBLOCK, buf, count);
if (!crng_ready()) {
if (!(flags & GRND_INSECURE) && !crng_ready()) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
ret = wait_for_random_bytes();
if (unlikely(ret))
return ret;
}
return urandom_read(NULL, buf, count, NULL);
return urandom_read_nowarn(NULL, buf, count, NULL);
}
/********************************************************************
......@@ -2214,8 +2014,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
#include <linux/sysctl.h>
static int min_read_thresh = 8, min_write_thresh;
static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
static int min_write_thresh;
static int max_write_thresh = INPUT_POOL_WORDS * 32;
static int random_min_urandom_seed = 60;
static char sysctl_bootid[16];
......@@ -2290,15 +2089,6 @@ struct ctl_table random_table[] = {
.proc_handler = proc_do_entropy,
.data = &input_pool.entropy_count,
},
{
.procname = "read_wakeup_threshold",
.data = &random_read_wakeup_bits,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_read_thresh,
.extra2 = &max_read_thresh,
},
{
.procname = "write_wakeup_threshold",
.data = &random_write_wakeup_bits,
......
......@@ -167,29 +167,21 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
#ifdef CONFIG_ARCH_RANDOM
# include <asm/archrandom.h>
#else
static inline bool arch_get_random_long(unsigned long *v)
static inline bool __must_check arch_get_random_long(unsigned long *v)
{
return 0;
return false;
}
static inline bool arch_get_random_int(unsigned int *v)
static inline bool __must_check arch_get_random_int(unsigned int *v)
{
return 0;
return false;
}
static inline bool arch_has_random(void)
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
return 0;
return false;
}
static inline bool arch_get_random_seed_long(unsigned long *v)
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
return 0;
}
static inline bool arch_get_random_seed_int(unsigned int *v)
{
return 0;
}
static inline bool arch_has_random_seed(void)
{
return 0;
return false;
}
#endif
......
......@@ -48,9 +48,11 @@ struct rand_pool_info {
* Flags for getrandom(2)
*
* GRND_NONBLOCK Don't block and return EAGAIN instead
* GRND_RANDOM Use the /dev/random pool instead of /dev/urandom
* GRND_RANDOM No effect
* GRND_INSECURE Return non-cryptographic random bytes
*/
#define GRND_NONBLOCK 0x0001
#define GRND_RANDOM 0x0002
#define GRND_INSECURE 0x0004
#endif /* _UAPI_LINUX_RANDOM_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册