core.c 12.5 KB
Newer Older
1
/*
C
Corentin LABBE 已提交
2 3 4 5 6 7 8 9 10
 * hw_random/core.c: HWRNG core API
 *
 * Copyright 2006 Michael Buesch <m@bues.ch>
 * Copyright 2005 (c) MontaVista Software, Inc.
 *
 * Please read Documentation/hw_random.txt for details on use.
 *
 * This software may be used and distributed according to the terms
 * of the GNU General Public License, incorporated herein by reference.
11 12
 */

13
#include <linux/delay.h>
14
#include <linux/device.h>
15 16
#include <linux/err.h>
#include <linux/fs.h>
17 18
#include <linux/hw_random.h>
#include <linux/kernel.h>
T
Torsten Duwe 已提交
19
#include <linux/kthread.h>
20
#include <linux/sched/signal.h>
21 22
#include <linux/miscdevice.h>
#include <linux/module.h>
23
#include <linux/random.h>
24 25
#include <linux/sched.h>
#include <linux/slab.h>
26
#include <linux/uaccess.h>
27 28 29 30

#define RNG_MODULE_NAME		"hw_random"

static struct hwrng *current_rng;
31 32
/* the current rng has been explicitly chosen by user via sysfs */
static int cur_rng_set_by_user;
T
Torsten Duwe 已提交
33
static struct task_struct *hwrng_fill;
34
/* list of registered rngs, sorted decending by quality */
35
static LIST_HEAD(rng_list);
36
/* Protects rng_list and current_rng */
37
static DEFINE_MUTEX(rng_mutex);
38 39
/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
static DEFINE_MUTEX(reading_mutex);
40
static int data_avail;
T
Torsten Duwe 已提交
41
static u8 *rng_buffer, *rng_fillbuf;
42 43
static unsigned short current_quality;
static unsigned short default_quality; /* = 0; default to "off" */
T
Torsten Duwe 已提交
44 45 46 47

module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
		 "current hwrng entropy estimation per mill");
48 49 50
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
		 "default entropy content of hwrng per mill");
T
Torsten Duwe 已提交
51

52
static void drop_current_rng(void);
53
static int hwrng_init(struct hwrng *rng);
T
Torsten Duwe 已提交
54
static void start_khwrngd(void);
55

56 57 58
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
			       int wait);

59 60 61 62
static size_t rng_buffer_size(void)
{
	return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
}
63

64 65 66
static void add_early_randomness(struct hwrng *rng)
{
	int bytes_read;
67
	size_t size = min_t(size_t, 16, rng_buffer_size());
68

69
	mutex_lock(&reading_mutex);
70
	bytes_read = rng_get_data(rng, rng_buffer, size, 1);
71
	mutex_unlock(&reading_mutex);
72
	if (bytes_read > 0)
73
		add_device_randomness(rng_buffer, bytes_read);
74 75
}

76 77 78 79 80 81
static inline void cleanup_rng(struct kref *kref)
{
	struct hwrng *rng = container_of(kref, struct hwrng, ref);

	if (rng->cleanup)
		rng->cleanup(rng);
R
Rusty Russell 已提交
82

83
	complete(&rng->cleanup_done);
84 85
}

86
static int set_current_rng(struct hwrng *rng)
87
{
88 89
	int err;

90
	BUG_ON(!mutex_is_locked(&rng_mutex));
91 92 93 94 95

	err = hwrng_init(rng);
	if (err)
		return err;

96
	drop_current_rng();
97
	current_rng = rng;
98 99

	return 0;
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
}

static void drop_current_rng(void)
{
	BUG_ON(!mutex_is_locked(&rng_mutex));
	if (!current_rng)
		return;

	/* decrease last reference for triggering the cleanup */
	kref_put(&current_rng->ref, cleanup_rng);
	current_rng = NULL;
}

/* Returns ERR_PTR(), NULL or refcounted hwrng */
static struct hwrng *get_current_rng(void)
{
	struct hwrng *rng;

	if (mutex_lock_interruptible(&rng_mutex))
		return ERR_PTR(-ERESTARTSYS);

	rng = current_rng;
	if (rng)
		kref_get(&rng->ref);

	mutex_unlock(&rng_mutex);
	return rng;
}

static void put_rng(struct hwrng *rng)
{
	/*
	 * Hold rng_mutex here so we serialize in case they set_current_rng
	 * on rng again immediately.
	 */
	mutex_lock(&rng_mutex);
	if (rng)
		kref_put(&rng->ref, cleanup_rng);
	mutex_unlock(&rng_mutex);
}

141
static int hwrng_init(struct hwrng *rng)
142
{
143 144 145
	if (kref_get_unless_zero(&rng->ref))
		goto skip_init;

146 147 148 149 150 151 152
	if (rng->init) {
		int ret;

		ret =  rng->init(rng);
		if (ret)
			return ret;
	}
153 154 155 156 157

	kref_init(&rng->ref);
	reinit_completion(&rng->cleanup_done);

skip_init:
158
	add_early_randomness(rng);
T
Torsten Duwe 已提交
159

160
	current_quality = rng->quality ? : default_quality;
161 162
	if (current_quality > 1024)
		current_quality = 1024;
163 164 165

	if (current_quality == 0 && hwrng_fill)
		kthread_stop(hwrng_fill);
T
Torsten Duwe 已提交
166 167 168
	if (current_quality > 0 && !hwrng_fill)
		start_khwrngd();

169
	return 0;
170 171 172 173 174 175 176 177 178 179 180 181
}

static int rng_dev_open(struct inode *inode, struct file *filp)
{
	/* enforce read-only access to this chrdev */
	if ((filp->f_mode & FMODE_READ) == 0)
		return -EINVAL;
	if (filp->f_mode & FMODE_WRITE)
		return -EINVAL;
	return 0;
}

182 183 184 185
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
			int wait) {
	int present;

186
	BUG_ON(!mutex_is_locked(&reading_mutex));
187 188 189 190 191 192 193 194 195 196 197 198 199 200
	if (rng->read)
		return rng->read(rng, (void *)buffer, size, wait);

	if (rng->data_present)
		present = rng->data_present(rng, wait);
	else
		present = 1;

	if (present)
		return rng->data_read(rng, (u32 *)buffer);

	return 0;
}

201 202 203 204
static ssize_t rng_dev_read(struct file *filp, char __user *buf,
			    size_t size, loff_t *offp)
{
	ssize_t ret = 0;
205
	int err = 0;
206
	int bytes_read, len;
207
	struct hwrng *rng;
208 209

	while (size) {
210 211 212
		rng = get_current_rng();
		if (IS_ERR(rng)) {
			err = PTR_ERR(rng);
213
			goto out;
214
		}
215
		if (!rng) {
216
			err = -ENODEV;
217
			goto out;
218
		}
219

220 221 222 223
		if (mutex_lock_interruptible(&reading_mutex)) {
			err = -ERESTARTSYS;
			goto out_put;
		}
224
		if (!data_avail) {
225
			bytes_read = rng_get_data(rng, rng_buffer,
226
				rng_buffer_size(),
227 228 229
				!(filp->f_flags & O_NONBLOCK));
			if (bytes_read < 0) {
				err = bytes_read;
230
				goto out_unlock_reading;
231 232
			}
			data_avail = bytes_read;
233
		}
234

235 236 237
		if (!data_avail) {
			if (filp->f_flags & O_NONBLOCK) {
				err = -EAGAIN;
238
				goto out_unlock_reading;
239 240 241 242 243 244 245 246 247 248 249
			}
		} else {
			len = data_avail;
			if (len > size)
				len = size;

			data_avail -= len;

			if (copy_to_user(buf + ret, rng_buffer + data_avail,
								len)) {
				err = -EFAULT;
250
				goto out_unlock_reading;
251 252 253 254
			}

			size -= len;
			ret += len;
255 256
		}

257
		mutex_unlock(&reading_mutex);
258
		put_rng(rng);
259

260 261
		if (need_resched())
			schedule_timeout_interruptible(1);
262 263 264

		if (signal_pending(current)) {
			err = -ERESTARTSYS;
265
			goto out;
266
		}
267 268 269
	}
out:
	return ret ? : err;
270

271 272
out_unlock_reading:
	mutex_unlock(&reading_mutex);
273
out_put:
274 275
	put_rng(rng);
	goto out;
276 277
}

278
static const struct file_operations rng_chrdev_ops = {
279 280 281
	.owner		= THIS_MODULE,
	.open		= rng_dev_open,
	.read		= rng_dev_read,
282
	.llseek		= noop_llseek,
283 284
};

285 286
static const struct attribute_group *rng_dev_groups[];

287
static struct miscdevice rng_miscdev = {
288
	.minor		= HWRNG_MINOR,
289
	.name		= RNG_MODULE_NAME,
290
	.nodename	= "hwrng",
291
	.fops		= &rng_chrdev_ops,
292
	.groups		= rng_dev_groups,
293 294
};

295 296
static ssize_t hwrng_attr_current_store(struct device *dev,
					struct device_attribute *attr,
297 298 299 300 301 302 303 304 305 306
					const char *buf, size_t len)
{
	int err;
	struct hwrng *rng;

	err = mutex_lock_interruptible(&rng_mutex);
	if (err)
		return -ERESTARTSYS;
	err = -ENODEV;
	list_for_each_entry(rng, &rng_list, list) {
307
		if (sysfs_streq(rng->name, buf)) {
308
			err = 0;
309
			cur_rng_set_by_user = 1;
310 311
			if (rng != current_rng)
				err = set_current_rng(rng);
312 313 314 315 316 317 318 319
			break;
		}
	}
	mutex_unlock(&rng_mutex);

	return err ? : len;
}

320 321
static ssize_t hwrng_attr_current_show(struct device *dev,
				       struct device_attribute *attr,
322 323 324
				       char *buf)
{
	ssize_t ret;
325
	struct hwrng *rng;
326

327 328 329 330 331 332
	rng = get_current_rng();
	if (IS_ERR(rng))
		return PTR_ERR(rng);

	ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
	put_rng(rng);
333 334 335 336

	return ret;
}

337 338
static ssize_t hwrng_attr_available_show(struct device *dev,
					 struct device_attribute *attr,
339 340 341 342 343 344 345 346 347 348
					 char *buf)
{
	int err;
	struct hwrng *rng;

	err = mutex_lock_interruptible(&rng_mutex);
	if (err)
		return -ERESTARTSYS;
	buf[0] = '\0';
	list_for_each_entry(rng, &rng_list, list) {
349 350
		strlcat(buf, rng->name, PAGE_SIZE);
		strlcat(buf, " ", PAGE_SIZE);
351
	}
352
	strlcat(buf, "\n", PAGE_SIZE);
353 354
	mutex_unlock(&rng_mutex);

355
	return strlen(buf);
356 357
}

358 359 360 361 362 363 364
static ssize_t hwrng_attr_selected_show(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
}

365 366 367 368 369 370
static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
		   hwrng_attr_current_show,
		   hwrng_attr_current_store);
static DEVICE_ATTR(rng_available, S_IRUGO,
		   hwrng_attr_available_show,
		   NULL);
371 372 373
static DEVICE_ATTR(rng_selected, S_IRUGO,
		   hwrng_attr_selected_show,
		   NULL);
374

375 376 377
static struct attribute *rng_dev_attrs[] = {
	&dev_attr_rng_current.attr,
	&dev_attr_rng_available.attr,
378
	&dev_attr_rng_selected.attr,
379 380 381 382
	NULL
};

ATTRIBUTE_GROUPS(rng_dev);
383

384
static void __exit unregister_miscdev(void)
385
{
386
	misc_deregister(&rng_miscdev);
387 388
}

389
static int __init register_miscdev(void)
390
{
391
	return misc_register(&rng_miscdev);
392 393
}

T
Torsten Duwe 已提交
394 395 396 397 398
static int hwrng_fillfn(void *unused)
{
	long rc;

	while (!kthread_should_stop()) {
399 400 401 402
		struct hwrng *rng;

		rng = get_current_rng();
		if (IS_ERR(rng) || !rng)
T
Torsten Duwe 已提交
403
			break;
404
		mutex_lock(&reading_mutex);
405
		rc = rng_get_data(rng, rng_fillbuf,
T
Torsten Duwe 已提交
406
				  rng_buffer_size(), 1);
407
		mutex_unlock(&reading_mutex);
408
		put_rng(rng);
T
Torsten Duwe 已提交
409 410 411 412 413
		if (rc <= 0) {
			pr_warn("hwrng: no data available\n");
			msleep_interruptible(10000);
			continue;
		}
414
		/* Outside lock, sure, but y'know: randomness. */
T
Torsten Duwe 已提交
415
		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
416
					   rc * current_quality * 8 >> 10);
T
Torsten Duwe 已提交
417
	}
418
	hwrng_fill = NULL;
T
Torsten Duwe 已提交
419 420 421 422 423 424
	return 0;
}

static void start_khwrngd(void)
{
	hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
425
	if (IS_ERR(hwrng_fill)) {
426
		pr_err("hwrng_fill thread creation failed\n");
T
Torsten Duwe 已提交
427 428 429 430
		hwrng_fill = NULL;
	}
}

431 432 433 434
int hwrng_register(struct hwrng *rng)
{
	int err = -EINVAL;
	struct hwrng *old_rng, *tmp;
435
	struct list_head *rng_list_ptr;
436

437
	if (!rng->name || (!rng->data_read && !rng->read))
438 439 440 441 442 443 444 445 446 447
		goto out;

	mutex_lock(&rng_mutex);
	/* Must not register two RNGs with the same name. */
	err = -EEXIST;
	list_for_each_entry(tmp, &rng_list, list) {
		if (strcmp(tmp->name, rng->name) == 0)
			goto out_unlock;
	}

448 449 450
	init_completion(&rng->cleanup_done);
	complete(&rng->cleanup_done);

451 452 453 454 455 456 457 458
	/* rng_list is sorted by decreasing quality */
	list_for_each(rng_list_ptr, &rng_list) {
		tmp = list_entry(rng_list_ptr, struct hwrng, list);
		if (tmp->quality < rng->quality)
			break;
	}
	list_add_tail(&rng->list, rng_list_ptr);

459
	old_rng = current_rng;
R
Rusty Russell 已提交
460
	err = 0;
461 462
	if (!old_rng ||
	    (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
463 464
		/*
		 * Set new rng as current as the new rng source
465 466
		 * provides better entropy quality and was not
		 * chosen by userspace.
467
		 */
468
		err = set_current_rng(rng);
469 470 471
		if (err)
			goto out_unlock;
	}
472

473 474 475 476 477 478 479 480 481 482 483
	if (old_rng && !rng->init) {
		/*
		 * Use a new device's input to add some randomness to
		 * the system.  If this rng device isn't going to be
		 * used right away, its init function hasn't been
		 * called yet; so only use the randomness from devices
		 * that don't need an init callback.
		 */
		add_early_randomness(rng);
	}

484 485 486 487 488 489 490
out_unlock:
	mutex_unlock(&rng_mutex);
out:
	return err;
}
EXPORT_SYMBOL_GPL(hwrng_register);

491
void hwrng_unregister(struct hwrng *rng)
492 493 494 495 496
{
	mutex_lock(&rng_mutex);

	list_del(&rng->list);
	if (current_rng == rng) {
497
		drop_current_rng();
498
		cur_rng_set_by_user = 0;
499
		/* rng_list is sorted by quality, use the best (=first) one */
500
		if (!list_empty(&rng_list)) {
501
			struct hwrng *new_rng;
502

503 504
			new_rng = list_entry(rng_list.next, struct hwrng, list);
			set_current_rng(new_rng);
505 506
		}
	}
507

T
Torsten Duwe 已提交
508
	if (list_empty(&rng_list)) {
509
		mutex_unlock(&rng_mutex);
T
Torsten Duwe 已提交
510 511
		if (hwrng_fill)
			kthread_stop(hwrng_fill);
512 513
	} else
		mutex_unlock(&rng_mutex);
R
Rusty Russell 已提交
514

515
	wait_for_completion(&rng->cleanup_done);
516
}
517
EXPORT_SYMBOL_GPL(hwrng_unregister);
518

D
Dmitry Torokhov 已提交
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
static void devm_hwrng_release(struct device *dev, void *res)
{
	hwrng_unregister(*(struct hwrng **)res);
}

static int devm_hwrng_match(struct device *dev, void *res, void *data)
{
	struct hwrng **r = res;

	if (WARN_ON(!r || !*r))
		return 0;

	return *r == data;
}

int devm_hwrng_register(struct device *dev, struct hwrng *rng)
{
	struct hwrng **ptr;
	int error;

	ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	error = hwrng_register(rng);
	if (error) {
		devres_free(ptr);
		return error;
	}

	*ptr = rng;
	devres_add(dev, ptr);
	return 0;
}
EXPORT_SYMBOL_GPL(devm_hwrng_register);

void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
{
	devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
}
EXPORT_SYMBOL_GPL(devm_hwrng_unregister);

561 562
static int __init hwrng_modinit(void)
{
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
	int ret = -ENOMEM;

	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
	rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
	if (!rng_buffer)
		return -ENOMEM;

	rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
	if (!rng_fillbuf) {
		kfree(rng_buffer);
		return -ENOMEM;
	}

	ret = register_miscdev();
	if (ret) {
		kfree(rng_fillbuf);
		kfree(rng_buffer);
	}

	return ret;
583 584 585
}

static void __exit hwrng_modexit(void)
586 587 588 589
{
	mutex_lock(&rng_mutex);
	BUG_ON(current_rng);
	kfree(rng_buffer);
T
Torsten Duwe 已提交
590
	kfree(rng_fillbuf);
591
	mutex_unlock(&rng_mutex);
592 593

	unregister_miscdev();
594 595
}

596 597
module_init(hwrng_modinit);
module_exit(hwrng_modexit);
598 599 600

MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
MODULE_LICENSE("GPL");