core.c 12.8 KB
Newer Older
1
/*
C
Corentin LABBE 已提交
2 3 4 5 6 7 8 9 10
 * hw_random/core.c: HWRNG core API
 *
 * Copyright 2006 Michael Buesch <m@bues.ch>
 * Copyright 2005 (c) MontaVista Software, Inc.
 *
 * Please read Documentation/hw_random.txt for details on use.
 *
 * This software may be used and distributed according to the terms
 * of the GNU General Public License, incorporated herein by reference.
11 12
 */

13
#include <linux/delay.h>
14
#include <linux/device.h>
15 16
#include <linux/err.h>
#include <linux/fs.h>
17 18
#include <linux/hw_random.h>
#include <linux/kernel.h>
T
Torsten Duwe 已提交
19
#include <linux/kthread.h>
20
#include <linux/sched/signal.h>
21 22
#include <linux/miscdevice.h>
#include <linux/module.h>
23
#include <linux/random.h>
24 25
#include <linux/sched.h>
#include <linux/slab.h>
26
#include <linux/uaccess.h>
27 28 29 30

#define RNG_MODULE_NAME		"hw_random"

static struct hwrng *current_rng;
31 32
/* the current rng has been explicitly chosen by user via sysfs */
static int cur_rng_set_by_user;
T
Torsten Duwe 已提交
33
static struct task_struct *hwrng_fill;
34
/* list of registered rngs, sorted decending by quality */
35
static LIST_HEAD(rng_list);
36
/* Protects rng_list and current_rng */
37
static DEFINE_MUTEX(rng_mutex);
38 39
/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
static DEFINE_MUTEX(reading_mutex);
40
static int data_avail;
T
Torsten Duwe 已提交
41
static u8 *rng_buffer, *rng_fillbuf;
42 43
static unsigned short current_quality;
static unsigned short default_quality; /* = 0; default to "off" */
T
Torsten Duwe 已提交
44 45 46 47

module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
		 "current hwrng entropy estimation per mill");
48 49 50
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
		 "default entropy content of hwrng per mill");
T
Torsten Duwe 已提交
51

52
static void drop_current_rng(void);
53
static int hwrng_init(struct hwrng *rng);
T
Torsten Duwe 已提交
54
static void start_khwrngd(void);
55

56 57 58
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
			       int wait);

59 60 61 62
static size_t rng_buffer_size(void)
{
	return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
}
63

64 65 66
static void add_early_randomness(struct hwrng *rng)
{
	int bytes_read;
67
	size_t size = min_t(size_t, 16, rng_buffer_size());
68

69
	mutex_lock(&reading_mutex);
70
	bytes_read = rng_get_data(rng, rng_buffer, size, 0);
71
	mutex_unlock(&reading_mutex);
72
	if (bytes_read > 0)
73
		add_device_randomness(rng_buffer, bytes_read);
74 75
}

76 77 78 79 80 81
static inline void cleanup_rng(struct kref *kref)
{
	struct hwrng *rng = container_of(kref, struct hwrng, ref);

	if (rng->cleanup)
		rng->cleanup(rng);
R
Rusty Russell 已提交
82

83
	complete(&rng->cleanup_done);
84 85
}

86
static int set_current_rng(struct hwrng *rng)
87
{
88 89
	int err;

90
	BUG_ON(!mutex_is_locked(&rng_mutex));
91 92 93 94 95

	err = hwrng_init(rng);
	if (err)
		return err;

96
	drop_current_rng();
97
	current_rng = rng;
98 99

	return 0;
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
}

static void drop_current_rng(void)
{
	BUG_ON(!mutex_is_locked(&rng_mutex));
	if (!current_rng)
		return;

	/* decrease last reference for triggering the cleanup */
	kref_put(&current_rng->ref, cleanup_rng);
	current_rng = NULL;
}

/* Returns ERR_PTR(), NULL or refcounted hwrng */
static struct hwrng *get_current_rng(void)
{
	struct hwrng *rng;

	if (mutex_lock_interruptible(&rng_mutex))
		return ERR_PTR(-ERESTARTSYS);

	rng = current_rng;
	if (rng)
		kref_get(&rng->ref);

	mutex_unlock(&rng_mutex);
	return rng;
}

static void put_rng(struct hwrng *rng)
{
	/*
	 * Hold rng_mutex here so we serialize in case they set_current_rng
	 * on rng again immediately.
	 */
	mutex_lock(&rng_mutex);
	if (rng)
		kref_put(&rng->ref, cleanup_rng);
	mutex_unlock(&rng_mutex);
}

141
static int hwrng_init(struct hwrng *rng)
142
{
143 144 145
	if (kref_get_unless_zero(&rng->ref))
		goto skip_init;

146 147 148 149 150 151 152
	if (rng->init) {
		int ret;

		ret =  rng->init(rng);
		if (ret)
			return ret;
	}
153 154 155 156 157

	kref_init(&rng->ref);
	reinit_completion(&rng->cleanup_done);

skip_init:
158
	add_early_randomness(rng);
T
Torsten Duwe 已提交
159

160
	current_quality = rng->quality ? : default_quality;
161 162
	if (current_quality > 1024)
		current_quality = 1024;
163 164 165

	if (current_quality == 0 && hwrng_fill)
		kthread_stop(hwrng_fill);
T
Torsten Duwe 已提交
166 167 168
	if (current_quality > 0 && !hwrng_fill)
		start_khwrngd();

169
	return 0;
170 171 172 173 174 175 176 177 178 179 180 181
}

static int rng_dev_open(struct inode *inode, struct file *filp)
{
	/* enforce read-only access to this chrdev */
	if ((filp->f_mode & FMODE_READ) == 0)
		return -EINVAL;
	if (filp->f_mode & FMODE_WRITE)
		return -EINVAL;
	return 0;
}

182 183 184 185
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
			int wait) {
	int present;

186
	BUG_ON(!mutex_is_locked(&reading_mutex));
187 188 189 190 191 192 193 194 195 196 197 198 199 200
	if (rng->read)
		return rng->read(rng, (void *)buffer, size, wait);

	if (rng->data_present)
		present = rng->data_present(rng, wait);
	else
		present = 1;

	if (present)
		return rng->data_read(rng, (u32 *)buffer);

	return 0;
}

201 202 203 204
static ssize_t rng_dev_read(struct file *filp, char __user *buf,
			    size_t size, loff_t *offp)
{
	ssize_t ret = 0;
205
	int err = 0;
206
	int bytes_read, len;
207
	struct hwrng *rng;
208 209

	while (size) {
210 211 212
		rng = get_current_rng();
		if (IS_ERR(rng)) {
			err = PTR_ERR(rng);
213
			goto out;
214
		}
215
		if (!rng) {
216
			err = -ENODEV;
217
			goto out;
218
		}
219

220 221 222 223
		if (mutex_lock_interruptible(&reading_mutex)) {
			err = -ERESTARTSYS;
			goto out_put;
		}
224
		if (!data_avail) {
225
			bytes_read = rng_get_data(rng, rng_buffer,
226
				rng_buffer_size(),
227 228 229
				!(filp->f_flags & O_NONBLOCK));
			if (bytes_read < 0) {
				err = bytes_read;
230
				goto out_unlock_reading;
231 232
			}
			data_avail = bytes_read;
233
		}
234

235 236 237
		if (!data_avail) {
			if (filp->f_flags & O_NONBLOCK) {
				err = -EAGAIN;
238
				goto out_unlock_reading;
239 240 241 242 243 244 245 246 247 248 249
			}
		} else {
			len = data_avail;
			if (len > size)
				len = size;

			data_avail -= len;

			if (copy_to_user(buf + ret, rng_buffer + data_avail,
								len)) {
				err = -EFAULT;
250
				goto out_unlock_reading;
251 252 253 254
			}

			size -= len;
			ret += len;
255 256
		}

257
		mutex_unlock(&reading_mutex);
258
		put_rng(rng);
259

260 261
		if (need_resched())
			schedule_timeout_interruptible(1);
262 263 264

		if (signal_pending(current)) {
			err = -ERESTARTSYS;
265
			goto out;
266
		}
267 268 269
	}
out:
	return ret ? : err;
270

271 272
out_unlock_reading:
	mutex_unlock(&reading_mutex);
273
out_put:
274 275
	put_rng(rng);
	goto out;
276 277
}

278
static const struct file_operations rng_chrdev_ops = {
279 280 281
	.owner		= THIS_MODULE,
	.open		= rng_dev_open,
	.read		= rng_dev_read,
282
	.llseek		= noop_llseek,
283 284
};

285 286
static const struct attribute_group *rng_dev_groups[];

287
static struct miscdevice rng_miscdev = {
288
	.minor		= HWRNG_MINOR,
289
	.name		= RNG_MODULE_NAME,
290
	.nodename	= "hwrng",
291
	.fops		= &rng_chrdev_ops,
292
	.groups		= rng_dev_groups,
293 294
};

295 296 297 298 299 300 301 302 303 304 305 306 307 308
static int enable_best_rng(void)
{
	int ret = -ENODEV;

	BUG_ON(!mutex_is_locked(&rng_mutex));

	/* rng_list is sorted by quality, use the best (=first) one */
	if (!list_empty(&rng_list)) {
		struct hwrng *new_rng;

		new_rng = list_entry(rng_list.next, struct hwrng, list);
		ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
		if (!ret)
			cur_rng_set_by_user = 0;
309 310 311 312
	} else {
		drop_current_rng();
		cur_rng_set_by_user = 0;
		ret = 0;
313 314 315 316 317
	}

	return ret;
}

318 319
static ssize_t hwrng_attr_current_store(struct device *dev,
					struct device_attribute *attr,
320 321
					const char *buf, size_t len)
{
322
	int err = -ENODEV;
323 324 325 326 327
	struct hwrng *rng;

	err = mutex_lock_interruptible(&rng_mutex);
	if (err)
		return -ERESTARTSYS;
328 329 330 331 332 333 334

	if (sysfs_streq(buf, "")) {
		err = enable_best_rng();
	} else {
		list_for_each_entry(rng, &rng_list, list) {
			if (sysfs_streq(rng->name, buf)) {
				cur_rng_set_by_user = 1;
335
				err = set_current_rng(rng);
336 337
				break;
			}
338 339
		}
	}
340

341 342 343 344 345
	mutex_unlock(&rng_mutex);

	return err ? : len;
}

346 347
static ssize_t hwrng_attr_current_show(struct device *dev,
				       struct device_attribute *attr,
348 349 350
				       char *buf)
{
	ssize_t ret;
351
	struct hwrng *rng;
352

353 354 355 356 357 358
	rng = get_current_rng();
	if (IS_ERR(rng))
		return PTR_ERR(rng);

	ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
	put_rng(rng);
359 360 361 362

	return ret;
}

363 364
static ssize_t hwrng_attr_available_show(struct device *dev,
					 struct device_attribute *attr,
365 366 367 368 369 370 371 372 373 374
					 char *buf)
{
	int err;
	struct hwrng *rng;

	err = mutex_lock_interruptible(&rng_mutex);
	if (err)
		return -ERESTARTSYS;
	buf[0] = '\0';
	list_for_each_entry(rng, &rng_list, list) {
375 376
		strlcat(buf, rng->name, PAGE_SIZE);
		strlcat(buf, " ", PAGE_SIZE);
377
	}
378
	strlcat(buf, "\n", PAGE_SIZE);
379 380
	mutex_unlock(&rng_mutex);

381
	return strlen(buf);
382 383
}

384 385 386 387 388 389 390
static ssize_t hwrng_attr_selected_show(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
}

391 392 393 394 395 396
static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
		   hwrng_attr_current_show,
		   hwrng_attr_current_store);
static DEVICE_ATTR(rng_available, S_IRUGO,
		   hwrng_attr_available_show,
		   NULL);
397 398 399
static DEVICE_ATTR(rng_selected, S_IRUGO,
		   hwrng_attr_selected_show,
		   NULL);
400

401 402 403
static struct attribute *rng_dev_attrs[] = {
	&dev_attr_rng_current.attr,
	&dev_attr_rng_available.attr,
404
	&dev_attr_rng_selected.attr,
405 406 407 408
	NULL
};

ATTRIBUTE_GROUPS(rng_dev);
409

410
static void __exit unregister_miscdev(void)
411
{
412
	misc_deregister(&rng_miscdev);
413 414
}

415
static int __init register_miscdev(void)
416
{
417
	return misc_register(&rng_miscdev);
418 419
}

T
Torsten Duwe 已提交
420 421 422 423 424
static int hwrng_fillfn(void *unused)
{
	long rc;

	while (!kthread_should_stop()) {
425 426 427 428
		struct hwrng *rng;

		rng = get_current_rng();
		if (IS_ERR(rng) || !rng)
T
Torsten Duwe 已提交
429
			break;
430
		mutex_lock(&reading_mutex);
431
		rc = rng_get_data(rng, rng_fillbuf,
T
Torsten Duwe 已提交
432
				  rng_buffer_size(), 1);
433
		mutex_unlock(&reading_mutex);
434
		put_rng(rng);
T
Torsten Duwe 已提交
435 436 437 438 439
		if (rc <= 0) {
			pr_warn("hwrng: no data available\n");
			msleep_interruptible(10000);
			continue;
		}
440
		/* Outside lock, sure, but y'know: randomness. */
T
Torsten Duwe 已提交
441
		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
442
					   rc * current_quality * 8 >> 10);
T
Torsten Duwe 已提交
443
	}
444
	hwrng_fill = NULL;
T
Torsten Duwe 已提交
445 446 447 448 449 450
	return 0;
}

static void start_khwrngd(void)
{
	hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
451
	if (IS_ERR(hwrng_fill)) {
452
		pr_err("hwrng_fill thread creation failed\n");
T
Torsten Duwe 已提交
453 454 455 456
		hwrng_fill = NULL;
	}
}

457 458 459 460
int hwrng_register(struct hwrng *rng)
{
	int err = -EINVAL;
	struct hwrng *old_rng, *tmp;
461
	struct list_head *rng_list_ptr;
462

463
	if (!rng->name || (!rng->data_read && !rng->read))
464 465 466 467 468 469 470 471 472 473
		goto out;

	mutex_lock(&rng_mutex);
	/* Must not register two RNGs with the same name. */
	err = -EEXIST;
	list_for_each_entry(tmp, &rng_list, list) {
		if (strcmp(tmp->name, rng->name) == 0)
			goto out_unlock;
	}

474 475 476
	init_completion(&rng->cleanup_done);
	complete(&rng->cleanup_done);

477 478 479 480 481 482 483 484
	/* rng_list is sorted by decreasing quality */
	list_for_each(rng_list_ptr, &rng_list) {
		tmp = list_entry(rng_list_ptr, struct hwrng, list);
		if (tmp->quality < rng->quality)
			break;
	}
	list_add_tail(&rng->list, rng_list_ptr);

485
	old_rng = current_rng;
R
Rusty Russell 已提交
486
	err = 0;
487 488
	if (!old_rng ||
	    (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
489 490
		/*
		 * Set new rng as current as the new rng source
491 492
		 * provides better entropy quality and was not
		 * chosen by userspace.
493
		 */
494
		err = set_current_rng(rng);
495 496 497
		if (err)
			goto out_unlock;
	}
498

499 500 501 502 503 504 505 506 507 508 509
	if (old_rng && !rng->init) {
		/*
		 * Use a new device's input to add some randomness to
		 * the system.  If this rng device isn't going to be
		 * used right away, its init function hasn't been
		 * called yet; so only use the randomness from devices
		 * that don't need an init callback.
		 */
		add_early_randomness(rng);
	}

510 511 512 513 514 515 516
out_unlock:
	mutex_unlock(&rng_mutex);
out:
	return err;
}
EXPORT_SYMBOL_GPL(hwrng_register);

517
void hwrng_unregister(struct hwrng *rng)
518
{
519 520
	int err;

521 522 523
	mutex_lock(&rng_mutex);

	list_del(&rng->list);
524 525 526 527 528 529 530
	if (current_rng == rng) {
		err = enable_best_rng();
		if (err) {
			drop_current_rng();
			cur_rng_set_by_user = 0;
		}
	}
531

T
Torsten Duwe 已提交
532
	if (list_empty(&rng_list)) {
533
		mutex_unlock(&rng_mutex);
T
Torsten Duwe 已提交
534 535
		if (hwrng_fill)
			kthread_stop(hwrng_fill);
536 537
	} else
		mutex_unlock(&rng_mutex);
R
Rusty Russell 已提交
538

539
	wait_for_completion(&rng->cleanup_done);
540
}
541
EXPORT_SYMBOL_GPL(hwrng_unregister);
542

D
Dmitry Torokhov 已提交
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
static void devm_hwrng_release(struct device *dev, void *res)
{
	hwrng_unregister(*(struct hwrng **)res);
}

static int devm_hwrng_match(struct device *dev, void *res, void *data)
{
	struct hwrng **r = res;

	if (WARN_ON(!r || !*r))
		return 0;

	return *r == data;
}

int devm_hwrng_register(struct device *dev, struct hwrng *rng)
{
	struct hwrng **ptr;
	int error;

	ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	error = hwrng_register(rng);
	if (error) {
		devres_free(ptr);
		return error;
	}

	*ptr = rng;
	devres_add(dev, ptr);
	return 0;
}
EXPORT_SYMBOL_GPL(devm_hwrng_register);

void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
{
	devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
}
EXPORT_SYMBOL_GPL(devm_hwrng_unregister);

585 586
static int __init hwrng_modinit(void)
{
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
	int ret = -ENOMEM;

	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
	rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
	if (!rng_buffer)
		return -ENOMEM;

	rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
	if (!rng_fillbuf) {
		kfree(rng_buffer);
		return -ENOMEM;
	}

	ret = register_miscdev();
	if (ret) {
		kfree(rng_fillbuf);
		kfree(rng_buffer);
	}

	return ret;
607 608 609
}

static void __exit hwrng_modexit(void)
610 611 612 613
{
	mutex_lock(&rng_mutex);
	BUG_ON(current_rng);
	kfree(rng_buffer);
T
Torsten Duwe 已提交
614
	kfree(rng_fillbuf);
615
	mutex_unlock(&rng_mutex);
616 617

	unregister_miscdev();
618 619
}

620 621
module_init(hwrng_modinit);
module_exit(hwrng_modexit);
622 623 624

MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
MODULE_LICENSE("GPL");