zcrypt_api.c 32.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
 *  zcrypt 2.1.0
4
 *
5
 *  Copyright IBM Corp. 2001, 2012
6 7 8 9 10 11 12
 *  Author(s): Robert Burroughs
 *	       Eric Rossman (edrossma@us.ibm.com)
 *	       Cornelia Huck <cornelia.huck@de.ibm.com>
 *
 *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
 *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
 *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
13
 *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
14 15 16 17 18 19 20 21
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/compat.h>
22
#include <linux/slab.h>
A
Arun Sharma 已提交
23
#include <linux/atomic.h>
24
#include <linux/uaccess.h>
25
#include <linux/hw_random.h>
26 27
#include <linux/debugfs.h>
#include <asm/debug.h>
28

29 30 31
#define CREATE_TRACE_POINTS
#include <asm/trace/zcrypt.h>

32
#include "zcrypt_api.h"
33
#include "zcrypt_debug.h"
34

35
#include "zcrypt_msgtype6.h"
36
#include "zcrypt_msgtype50.h"
37

38
/*
39 40 41
 * Module description.
 */
MODULE_AUTHOR("IBM Corporation");
42 43
MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
		   "Copyright IBM Corp. 2001, 2012");
44 45
MODULE_LICENSE("GPL");

46 47 48 49 50 51
/*
 * zcrypt tracepoint functions
 */
EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);

52 53 54 55
static int zcrypt_hwrng_seed = 1;
module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");

56 57 58 59
DEFINE_SPINLOCK(zcrypt_list_lock);
LIST_HEAD(zcrypt_card_list);
int zcrypt_device_count;

60
static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
61 62 63 64
static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);

atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
EXPORT_SYMBOL(zcrypt_rescan_req);
65

66 67
static LIST_HEAD(zcrypt_ops_list);

68 69
/* Zcrypt related debug feature stuff. */
debug_info_t *zcrypt_dbf_info;
70

71 72 73 74 75 76 77 78 79 80 81
/**
 * Process a rescan of the transport layer.
 *
 * Returns 1, if the rescan has been processed, otherwise 0.
 */
static inline int zcrypt_process_rescan(void)
{
	if (atomic_read(&zcrypt_rescan_req)) {
		atomic_set(&zcrypt_rescan_req, 0);
		atomic_inc(&zcrypt_rescan_count);
		ap_bus_force_rescan();
82
		ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n",
83
			   atomic_inc_return(&zcrypt_rescan_count));
84 85 86 87 88
		return 1;
	}
	return 0;
}

89 90
void zcrypt_msgtype_register(struct zcrypt_ops *zops)
{
91
	list_add_tail(&zops->list, &zcrypt_ops_list);
92 93 94 95 96 97 98
}

void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
{
	list_del_init(&zops->list);
}

99
struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
100 101 102
{
	struct zcrypt_ops *zops;

103
	list_for_each_entry(zops, &zcrypt_ops_list, list)
104
		if ((zops->variant == variant) &&
105 106 107
		    (!strncmp(zops->name, name, sizeof(zops->name))))
			return zops;
	return NULL;
108
}
109
EXPORT_SYMBOL(zcrypt_msgtype);
110

111
/**
112 113 114
 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
 *
 * This function is not supported beyond zcrypt 1.3.1.
115 116 117 118 119 120 121 122
 */
static ssize_t zcrypt_read(struct file *filp, char __user *buf,
			   size_t count, loff_t *f_pos)
{
	return -EPERM;
}

/**
123 124
 * zcrypt_write(): Not allowed.
 *
125 126 127 128 129 130 131 132 133
 * Write is is not allowed
 */
static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
			    size_t count, loff_t *f_pos)
{
	return -EPERM;
}

/**
134 135 136
 * zcrypt_open(): Count number of users.
 *
 * Device open function to count number of users.
137 138 139 140
 */
static int zcrypt_open(struct inode *inode, struct file *filp)
{
	atomic_inc(&zcrypt_open_count);
141
	return nonseekable_open(inode, filp);
142 143
}

144 145 146 147 148
/**
 * zcrypt_release(): Count number of users.
 *
 * Device close function to count number of users.
 */
149 150 151 152 153 154
static int zcrypt_release(struct inode *inode, struct file *filp)
{
	atomic_dec(&zcrypt_open_count);
	return 0;
}

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
						     struct zcrypt_queue *zq,
						     unsigned int weight)
{
	if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
		return NULL;
	zcrypt_queue_get(zq);
	get_device(&zq->queue->ap_dev.device);
	atomic_add(weight, &zc->load);
	atomic_add(weight, &zq->load);
	zq->request_count++;
	return zq;
}

static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
				     struct zcrypt_queue *zq,
				     unsigned int weight)
{
	struct module *mod = zq->queue->ap_dev.drv->driver.owner;

	zq->request_count--;
	atomic_sub(weight, &zc->load);
	atomic_sub(weight, &zq->load);
	put_device(&zq->queue->ap_dev.device);
	zcrypt_queue_put(zq);
	module_put(mod);
}

183 184 185 186 187
static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
				       struct zcrypt_card *pref_zc,
				       unsigned weight, unsigned pref_weight)
{
	if (!pref_zc)
188
		return false;
189 190 191 192 193 194 195 196 197 198 199 200 201
	weight += atomic_read(&zc->load);
	pref_weight += atomic_read(&pref_zc->load);
	if (weight == pref_weight)
		return atomic_read(&zc->card->total_request_count) >
			atomic_read(&pref_zc->card->total_request_count);
	return weight > pref_weight;
}

static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
					struct zcrypt_queue *pref_zq,
					unsigned weight, unsigned pref_weight)
{
	if (!pref_zq)
202
		return false;
203 204 205
	weight += atomic_read(&zq->load);
	pref_weight += atomic_read(&pref_zq->load);
	if (weight == pref_weight)
206 207
		return zq->queue->total_request_count >
			pref_zq->queue->total_request_count;
208 209 210
	return weight > pref_weight;
}

211
/*
212 213 214 215
 * zcrypt ioctls.
 */
static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
{
216 217 218 219
	struct zcrypt_card *zc, *pref_zc;
	struct zcrypt_queue *zq, *pref_zq;
	unsigned int weight, pref_weight;
	unsigned int func_code;
220 221 222 223 224 225 226 227
	int qid = 0, rc = -ENODEV;

	trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);

	if (mex->outputdatalength < mex->inputdatalength) {
		rc = -EINVAL;
		goto out;
	}
228

229
	/*
230 231 232 233 234 235
	 * As long as outputdatalength is big enough, we can set the
	 * outputdatalength equal to the inputdatalength, since that is the
	 * number of bytes we will copy in any case
	 */
	mex->outputdatalength = mex->inputdatalength;

236 237
	rc = get_rsa_modex_fc(mex, &func_code);
	if (rc)
238
		goto out;
239

240 241 242 243 244 245
	pref_zc = NULL;
	pref_zq = NULL;
	spin_lock(&zcrypt_list_lock);
	for_each_zcrypt_card(zc) {
		/* Check for online accelarator and CCA cards */
		if (!zc->online || !(zc->card->functions & 0x18000000))
246
			continue;
247 248 249
		/* Check for size limits */
		if (zc->min_mod_size > mex->inputdatalength ||
		    zc->max_mod_size < mex->inputdatalength)
250
			continue;
251 252
		/* get weight index of the card device	*/
		weight = zc->speed_rating[func_code];
253
		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
254
			continue;
255 256
		for_each_zcrypt_queue(zq, zc) {
			/* check if device is online and eligible */
257
			if (!zq->online || !zq->ops->rsa_modexpo)
258
				continue;
259 260
			if (zcrypt_queue_compare(zq, pref_zq,
						 weight, pref_weight))
261 262 263 264
				continue;
			pref_zc = zc;
			pref_zq = zq;
			pref_weight = weight;
265
		}
266
	}
267 268
	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
	spin_unlock(&zcrypt_list_lock);
269

270 271 272 273
	if (!pref_zq) {
		rc = -ENODEV;
		goto out;
	}
274

275
	qid = pref_zq->queue->qid;
276 277 278 279 280 281
	rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);

	spin_lock(&zcrypt_list_lock);
	zcrypt_drop_queue(pref_zc, pref_zq, weight);
	spin_unlock(&zcrypt_list_lock);

282 283 284
out:
	trace_s390_zcrypt_rep(mex, func_code, rc,
			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
285
	return rc;
286 287 288 289
}

static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
{
290 291 292 293
	struct zcrypt_card *zc, *pref_zc;
	struct zcrypt_queue *zq, *pref_zq;
	unsigned int weight, pref_weight;
	unsigned int func_code;
294 295 296 297 298 299 300 301
	int qid = 0, rc = -ENODEV;

	trace_s390_zcrypt_req(crt, TP_ICARSACRT);

	if (crt->outputdatalength < crt->inputdatalength) {
		rc = -EINVAL;
		goto out;
	}
302

303
	/*
304 305 306 307 308 309
	 * As long as outputdatalength is big enough, we can set the
	 * outputdatalength equal to the inputdatalength, since that is the
	 * number of bytes we will copy in any case
	 */
	crt->outputdatalength = crt->inputdatalength;

310 311
	rc = get_rsa_crt_fc(crt, &func_code);
	if (rc)
312
		goto out;
313

314 315 316 317 318 319
	pref_zc = NULL;
	pref_zq = NULL;
	spin_lock(&zcrypt_list_lock);
	for_each_zcrypt_card(zc) {
		/* Check for online accelarator and CCA cards */
		if (!zc->online || !(zc->card->functions & 0x18000000))
320
			continue;
321 322 323
		/* Check for size limits */
		if (zc->min_mod_size > crt->inputdatalength ||
		    zc->max_mod_size < crt->inputdatalength)
324
			continue;
325 326
		/* get weight index of the card device	*/
		weight = zc->speed_rating[func_code];
327
		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
328
			continue;
329 330
		for_each_zcrypt_queue(zq, zc) {
			/* check if device is online and eligible */
331
			if (!zq->online || !zq->ops->rsa_modexpo_crt)
332
				continue;
333 334
			if (zcrypt_queue_compare(zq, pref_zq,
						 weight, pref_weight))
335 336 337 338
				continue;
			pref_zc = zc;
			pref_zq = zq;
			pref_weight = weight;
339
		}
340
	}
341 342 343
	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
	spin_unlock(&zcrypt_list_lock);

344 345 346 347
	if (!pref_zq) {
		rc = -ENODEV;
		goto out;
	}
348

349
	qid = pref_zq->queue->qid;
350 351 352 353 354 355
	rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);

	spin_lock(&zcrypt_list_lock);
	zcrypt_drop_queue(pref_zc, pref_zq, weight);
	spin_unlock(&zcrypt_list_lock);

356 357 358
out:
	trace_s390_zcrypt_rep(crt, func_code, rc,
			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
359
	return rc;
360 361
}

362
long zcrypt_send_cprb(struct ica_xcRB *xcRB)
363
{
364 365
	struct zcrypt_card *zc, *pref_zc;
	struct zcrypt_queue *zq, *pref_zq;
366
	struct ap_message ap_msg;
367 368 369
	unsigned int weight, pref_weight;
	unsigned int func_code;
	unsigned short *domain;
370 371 372
	int qid = 0, rc = -ENODEV;

	trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
373

374
	rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
375
	if (rc)
376
		goto out;
377

378 379 380 381 382 383
	pref_zc = NULL;
	pref_zq = NULL;
	spin_lock(&zcrypt_list_lock);
	for_each_zcrypt_card(zc) {
		/* Check for online CCA cards */
		if (!zc->online || !(zc->card->functions & 0x10000000))
384
			continue;
385 386 387
		/* Check for user selected CCA card */
		if (xcRB->user_defined != AUTOSELECT &&
		    xcRB->user_defined != zc->card->id)
388
			continue;
389 390
		/* get weight index of the card device	*/
		weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
391
		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
392
			continue;
393 394 395
		for_each_zcrypt_queue(zq, zc) {
			/* check if device is online and eligible */
			if (!zq->online ||
396
			    !zq->ops->send_cprb ||
397 398 399
			    ((*domain != (unsigned short) AUTOSELECT) &&
			     (*domain != AP_QID_QUEUE(zq->queue->qid))))
				continue;
400 401
			if (zcrypt_queue_compare(zq, pref_zq,
						 weight, pref_weight))
402 403 404 405
				continue;
			pref_zc = zc;
			pref_zq = zq;
			pref_weight = weight;
406
		}
407
	}
408 409 410
	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
	spin_unlock(&zcrypt_list_lock);

411 412 413 414
	if (!pref_zq) {
		rc = -ENODEV;
		goto out;
	}
415 416

	/* in case of auto select, provide the correct domain */
417
	qid = pref_zq->queue->qid;
418
	if (*domain == (unsigned short) AUTOSELECT)
419
		*domain = AP_QID_QUEUE(qid);
420 421 422 423 424 425

	rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);

	spin_lock(&zcrypt_list_lock);
	zcrypt_drop_queue(pref_zc, pref_zq, weight);
	spin_unlock(&zcrypt_list_lock);
426 427 428 429

out:
	trace_s390_zcrypt_rep(xcRB, func_code, rc,
			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
430
	return rc;
431
}
432
EXPORT_SYMBOL(zcrypt_send_cprb);
433

434 435 436
static bool is_desired_ep11_card(unsigned int dev_id,
				 unsigned short target_num,
				 struct ep11_target_dev *targets)
437
{
438 439 440 441 442 443 444
	while (target_num-- > 0) {
		if (dev_id == targets->ap_id)
			return true;
		targets++;
	}
	return false;
}
445

446 447 448 449 450 451
static bool is_desired_ep11_queue(unsigned int dev_qid,
				  unsigned short target_num,
				  struct ep11_target_dev *targets)
{
	while (target_num-- > 0) {
		if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
452
			return true;
453
		targets++;
454 455 456 457 458 459
	}
	return false;
}

static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
{
460 461 462 463 464 465
	struct zcrypt_card *zc, *pref_zc;
	struct zcrypt_queue *zq, *pref_zq;
	struct ep11_target_dev *targets;
	unsigned short target_num;
	unsigned int weight, pref_weight;
	unsigned int func_code;
466
	struct ap_message ap_msg;
467 468 469
	int qid = 0, rc = -ENODEV;

	trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
470

471
	target_num = (unsigned short) xcrb->targets_num;
472 473

	/* empty list indicates autoselect (all available targets) */
474 475 476 477 478
	targets = NULL;
	if (target_num != 0) {
		struct ep11_target_dev __user *uptr;

		targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
479 480 481 482
		if (!targets) {
			rc = -ENOMEM;
			goto out;
		}
483

484 485
		uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
		if (copy_from_user(targets, uptr,
486 487 488 489
				   target_num * sizeof(*targets))) {
			rc = -EFAULT;
			goto out;
		}
490 491
	}

492 493
	rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
	if (rc)
494
		goto out_free;
495

496 497 498 499 500 501
	pref_zc = NULL;
	pref_zq = NULL;
	spin_lock(&zcrypt_list_lock);
	for_each_zcrypt_card(zc) {
		/* Check for online EP11 cards */
		if (!zc->online || !(zc->card->functions & 0x04000000))
502
			continue;
503 504 505
		/* Check for user selected EP11 card */
		if (targets &&
		    !is_desired_ep11_card(zc->card->id, target_num, targets))
506
			continue;
507 508
		/* get weight index of the card device	*/
		weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
509
		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
510
			continue;
511 512 513
		for_each_zcrypt_queue(zq, zc) {
			/* check if device is online and eligible */
			if (!zq->online ||
514
			    !zq->ops->send_ep11_cprb ||
515 516 517 518
			    (targets &&
			     !is_desired_ep11_queue(zq->queue->qid,
						    target_num, targets)))
				continue;
519 520
			if (zcrypt_queue_compare(zq, pref_zq,
						 weight, pref_weight))
521 522 523
				continue;
			pref_zc = zc;
			pref_zq = zq;
524 525
			pref_weight = weight;
		}
526
	}
527 528
	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
	spin_unlock(&zcrypt_list_lock);
529

530 531 532
	if (!pref_zq) {
		rc = -ENODEV;
		goto out_free;
533
	}
534

535
	qid = pref_zq->queue->qid;
536 537 538 539 540 541 542 543
	rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);

	spin_lock(&zcrypt_list_lock);
	zcrypt_drop_queue(pref_zc, pref_zq, weight);
	spin_unlock(&zcrypt_list_lock);

out_free:
	kfree(targets);
544 545 546
out:
	trace_s390_zcrypt_rep(xcrb, func_code, rc,
			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
547
	return rc;
548 549
}

550 551
static long zcrypt_rng(char *buffer)
{
552 553 554 555
	struct zcrypt_card *zc, *pref_zc;
	struct zcrypt_queue *zq, *pref_zq;
	unsigned int weight, pref_weight;
	unsigned int func_code;
556
	struct ap_message ap_msg;
557
	unsigned int domain;
558 559 560
	int qid = 0, rc = -ENODEV;

	trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
561

562
	rc = get_rng_fc(&ap_msg, &func_code, &domain);
563
	if (rc)
564
		goto out;
565

566 567 568 569 570 571
	pref_zc = NULL;
	pref_zq = NULL;
	spin_lock(&zcrypt_list_lock);
	for_each_zcrypt_card(zc) {
		/* Check for online CCA cards */
		if (!zc->online || !(zc->card->functions & 0x10000000))
572
			continue;
573 574
		/* get weight index of the card device	*/
		weight = zc->speed_rating[func_code];
575
		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
576
			continue;
577 578
		for_each_zcrypt_queue(zq, zc) {
			/* check if device is online and eligible */
579
			if (!zq->online || !zq->ops->rng)
580
				continue;
581 582
			if (zcrypt_queue_compare(zq, pref_zq,
						 weight, pref_weight))
583 584 585
				continue;
			pref_zc = zc;
			pref_zq = zq;
586 587 588
			pref_weight = weight;
		}
	}
589 590 591 592
	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
	spin_unlock(&zcrypt_list_lock);

	if (!pref_zq)
593 594
		return -ENODEV;

595
	qid = pref_zq->queue->qid;
596 597 598 599 600
	rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);

	spin_lock(&zcrypt_list_lock);
	zcrypt_drop_queue(pref_zc, pref_zq, weight);
	spin_unlock(&zcrypt_list_lock);
601 602 603 604

out:
	trace_s390_zcrypt_rep(buffer, func_code, rc,
			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
605
	return rc;
606 607
}

608
static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
609 610 611 612
{
	struct zcrypt_card *zc;
	struct zcrypt_queue *zq;
	struct zcrypt_device_status *stat;
613 614 615 616
	int card, queue;

	memset(devstatus, 0, MAX_ZDEV_ENTRIES
	       * sizeof(struct zcrypt_device_status));
617 618 619 620

	spin_lock(&zcrypt_list_lock);
	for_each_zcrypt_card(zc) {
		for_each_zcrypt_queue(zq, zc) {
621 622 623 624 625
			card = AP_QID_CARD(zq->queue->qid);
			if (card >= MAX_ZDEV_CARDIDS)
				continue;
			queue = AP_QID_QUEUE(zq->queue->qid);
			stat = &devstatus[card * AP_DOMAINS + queue];
626 627 628 629 630 631 632 633 634
			stat->hwtype = zc->card->ap_dev.device_type;
			stat->functions = zc->card->functions >> 26;
			stat->qid = zq->queue->qid;
			stat->online = zq->online ? 0x01 : 0x00;
		}
	}
	spin_unlock(&zcrypt_list_lock);
}

635
void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
636
{
637 638
	struct zcrypt_card *zc;
	struct zcrypt_queue *zq;
639 640 641 642 643
	struct zcrypt_device_status_ext *stat;
	int card, queue;

	memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
	       * sizeof(struct zcrypt_device_status_ext));
644

645 646 647
	spin_lock(&zcrypt_list_lock);
	for_each_zcrypt_card(zc) {
		for_each_zcrypt_queue(zq, zc) {
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
			card = AP_QID_CARD(zq->queue->qid);
			queue = AP_QID_QUEUE(zq->queue->qid);
			stat = &devstatus[card * AP_DOMAINS + queue];
			stat->hwtype = zc->card->ap_dev.device_type;
			stat->functions = zc->card->functions >> 26;
			stat->qid = zq->queue->qid;
			stat->online = zq->online ? 0x01 : 0x00;
		}
	}
	spin_unlock(&zcrypt_list_lock);
}
EXPORT_SYMBOL(zcrypt_device_status_mask_ext);

static void zcrypt_status_mask(char status[], size_t max_adapters)
{
	struct zcrypt_card *zc;
	struct zcrypt_queue *zq;
	int card;

	memset(status, 0, max_adapters);
	spin_lock(&zcrypt_list_lock);
	for_each_zcrypt_card(zc) {
		for_each_zcrypt_queue(zq, zc) {
			card = AP_QID_CARD(zq->queue->qid);
			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
			    || card >= max_adapters)
674
				continue;
675
			status[card] = zc->online ? zc->user_space_type : 0x0d;
676 677 678
		}
	}
	spin_unlock(&zcrypt_list_lock);
679 680
}

681
static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
682
{
683 684
	struct zcrypt_card *zc;
	struct zcrypt_queue *zq;
685
	int card;
686

687
	memset(qdepth, 0, max_adapters);
688
	spin_lock(&zcrypt_list_lock);
689
	local_bh_disable();
690 691
	for_each_zcrypt_card(zc) {
		for_each_zcrypt_queue(zq, zc) {
692 693 694
			card = AP_QID_CARD(zq->queue->qid);
			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
			    || card >= max_adapters)
695 696
				continue;
			spin_lock(&zq->queue->lock);
697
			qdepth[card] =
698 699 700 701
				zq->queue->pendingq_count +
				zq->queue->requestq_count;
			spin_unlock(&zq->queue->lock);
		}
702
	}
703
	local_bh_enable();
704
	spin_unlock(&zcrypt_list_lock);
705 706
}

707
static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
708
{
709 710
	struct zcrypt_card *zc;
	struct zcrypt_queue *zq;
711
	int card;
712

713
	memset(reqcnt, 0, sizeof(int) * max_adapters);
714
	spin_lock(&zcrypt_list_lock);
715
	local_bh_disable();
716 717
	for_each_zcrypt_card(zc) {
		for_each_zcrypt_queue(zq, zc) {
718 719 720
			card = AP_QID_CARD(zq->queue->qid);
			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
			    || card >= max_adapters)
721 722
				continue;
			spin_lock(&zq->queue->lock);
723
			reqcnt[card] = zq->queue->total_request_count;
724 725
			spin_unlock(&zq->queue->lock);
		}
726
	}
727
	local_bh_enable();
728
	spin_unlock(&zcrypt_list_lock);
729 730 731 732
}

static int zcrypt_pendingq_count(void)
{
733 734 735 736 737 738
	struct zcrypt_card *zc;
	struct zcrypt_queue *zq;
	int pendingq_count;

	pendingq_count = 0;
	spin_lock(&zcrypt_list_lock);
739
	local_bh_disable();
740 741 742 743 744 745 746 747
	for_each_zcrypt_card(zc) {
		for_each_zcrypt_queue(zq, zc) {
			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
				continue;
			spin_lock(&zq->queue->lock);
			pendingq_count += zq->queue->pendingq_count;
			spin_unlock(&zq->queue->lock);
		}
748
	}
749
	local_bh_enable();
750
	spin_unlock(&zcrypt_list_lock);
751 752 753 754 755
	return pendingq_count;
}

static int zcrypt_requestq_count(void)
{
756 757 758 759 760 761
	struct zcrypt_card *zc;
	struct zcrypt_queue *zq;
	int requestq_count;

	requestq_count = 0;
	spin_lock(&zcrypt_list_lock);
762
	local_bh_disable();
763 764 765 766 767 768 769 770
	for_each_zcrypt_card(zc) {
		for_each_zcrypt_queue(zq, zc) {
			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
				continue;
			spin_lock(&zq->queue->lock);
			requestq_count += zq->queue->requestq_count;
			spin_unlock(&zq->queue->lock);
		}
771
	}
772
	local_bh_enable();
773
	spin_unlock(&zcrypt_list_lock);
774 775 776 777 778 779
	return requestq_count;
}

static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
				  unsigned long arg)
{
780
	int rc = 0;
781 782 783 784 785 786 787 788 789 790

	switch (cmd) {
	case ICARSAMODEXPO: {
		struct ica_rsa_modexpo __user *umex = (void __user *) arg;
		struct ica_rsa_modexpo mex;
		if (copy_from_user(&mex, umex, sizeof(mex)))
			return -EFAULT;
		do {
			rc = zcrypt_rsa_modexpo(&mex);
		} while (rc == -EAGAIN);
791 792 793 794 795
		/* on failure: retry once again after a requested rescan */
		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
			do {
				rc = zcrypt_rsa_modexpo(&mex);
			} while (rc == -EAGAIN);
796
		if (rc) {
797
			ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
798
			return rc;
799
		}
800 801 802 803 804 805 806 807 808 809
		return put_user(mex.outputdatalength, &umex->outputdatalength);
	}
	case ICARSACRT: {
		struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
		struct ica_rsa_modexpo_crt crt;
		if (copy_from_user(&crt, ucrt, sizeof(crt)))
			return -EFAULT;
		do {
			rc = zcrypt_rsa_crt(&crt);
		} while (rc == -EAGAIN);
810 811 812 813 814
		/* on failure: retry once again after a requested rescan */
		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
			do {
				rc = zcrypt_rsa_crt(&crt);
			} while (rc == -EAGAIN);
815
		if (rc) {
816
			ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
817
			return rc;
818
		}
819 820
		return put_user(crt.outputdatalength, &ucrt->outputdatalength);
	}
821 822 823 824 825 826 827 828
	case ZSECSENDCPRB: {
		struct ica_xcRB __user *uxcRB = (void __user *) arg;
		struct ica_xcRB xcRB;
		if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
			return -EFAULT;
		do {
			rc = zcrypt_send_cprb(&xcRB);
		} while (rc == -EAGAIN);
829 830 831 832 833
		/* on failure: retry once again after a requested rescan */
		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
			do {
				rc = zcrypt_send_cprb(&xcRB);
			} while (rc == -EAGAIN);
834
		if (rc)
835
			ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc);
836 837 838 839
		if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
			return -EFAULT;
		return rc;
	}
840 841 842 843 844 845 846 847 848 849 850 851 852
	case ZSENDEP11CPRB: {
		struct ep11_urb __user *uxcrb = (void __user *)arg;
		struct ep11_urb xcrb;
		if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
			return -EFAULT;
		do {
			rc = zcrypt_send_ep11_cprb(&xcrb);
		} while (rc == -EAGAIN);
		/* on failure: retry once again after a requested rescan */
		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
			do {
				rc = zcrypt_send_ep11_cprb(&xcrb);
			} while (rc == -EAGAIN);
853
		if (rc)
854
			ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
855 856 857 858
		if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
			return -EFAULT;
		return rc;
	}
859 860 861 862
	case ZCRYPT_DEVICE_STATUS: {
		struct zcrypt_device_status_ext *device_status;
		size_t total_size = MAX_ZDEV_ENTRIES_EXT
			* sizeof(struct zcrypt_device_status_ext);
863

864
		device_status = kzalloc(total_size, GFP_KERNEL);
865 866
		if (!device_status)
			return -ENOMEM;
867
		zcrypt_device_status_mask_ext(device_status);
868
		if (copy_to_user((char __user *) arg, device_status,
869 870
				 total_size))
			rc = -EFAULT;
871
		kfree(device_status);
872
		return rc;
873
	}
874
	case ZCRYPT_STATUS_MASK: {
875
		char status[AP_DEVICES];
876 877 878

		zcrypt_status_mask(status, AP_DEVICES);
		if (copy_to_user((char __user *) arg, status, sizeof(status)))
879 880 881
			return -EFAULT;
		return 0;
	}
882
	case ZCRYPT_QDEPTH_MASK: {
883
		char qdepth[AP_DEVICES];
884 885 886

		zcrypt_qdepth_mask(qdepth, AP_DEVICES);
		if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
887 888 889
			return -EFAULT;
		return 0;
	}
890 891 892 893 894 895 896 897 898 899 900
	case ZCRYPT_PERDEV_REQCNT: {
		int *reqcnt;

		reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
		if (!reqcnt)
			return -ENOMEM;
		zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
		if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
			rc = -EFAULT;
		kfree(reqcnt);
		return rc;
901 902 903 904 905 906 907 908 909 910
	}
	case Z90STAT_REQUESTQ_COUNT:
		return put_user(zcrypt_requestq_count(), (int __user *) arg);
	case Z90STAT_PENDINGQ_COUNT:
		return put_user(zcrypt_pendingq_count(), (int __user *) arg);
	case Z90STAT_TOTALOPEN_COUNT:
		return put_user(atomic_read(&zcrypt_open_count),
				(int __user *) arg);
	case Z90STAT_DOMAIN_INDEX:
		return put_user(ap_domain_index, (int __user *) arg);
911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
	/*
	 * Deprecated ioctls
	 */
	case ZDEVICESTATUS: {
		/* the old ioctl supports only 64 adapters */
		struct zcrypt_device_status *device_status;
		size_t total_size = MAX_ZDEV_ENTRIES
			* sizeof(struct zcrypt_device_status);

		device_status = kzalloc(total_size, GFP_KERNEL);
		if (!device_status)
			return -ENOMEM;
		zcrypt_device_status_mask(device_status);
		if (copy_to_user((char __user *) arg, device_status,
				 total_size))
			rc = -EFAULT;
		kfree(device_status);
		return rc;
	}
	case Z90STAT_STATUS_MASK: {
		/* the old ioctl supports only 64 adapters */
		char status[MAX_ZDEV_CARDIDS];

		zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
		if (copy_to_user((char __user *) arg, status, sizeof(status)))
			return -EFAULT;
		return 0;
	}
	case Z90STAT_QDEPTH_MASK: {
		/* the old ioctl supports only 64 adapters */
		char qdepth[MAX_ZDEV_CARDIDS];

		zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
		if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
			return -EFAULT;
		return 0;
	}
	case Z90STAT_PERDEV_REQCNT: {
		/* the old ioctl supports only 64 adapters */
		int reqcnt[MAX_ZDEV_CARDIDS];

		zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
		if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
			return -EFAULT;
		return 0;
	}
957
	/* unknown ioctl number */
958
	default:
959
		ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
960 961 962 963 964
		return -ENOIOCTLCMD;
	}
}

#ifdef CONFIG_COMPAT
965
/*
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
 * ioctl32 conversion routines
 */
struct compat_ica_rsa_modexpo {
	compat_uptr_t	inputdata;
	unsigned int	inputdatalength;
	compat_uptr_t	outputdata;
	unsigned int	outputdatalength;
	compat_uptr_t	b_key;
	compat_uptr_t	n_modulus;
};

static long trans_modexpo32(struct file *filp, unsigned int cmd,
			    unsigned long arg)
{
	struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
	struct compat_ica_rsa_modexpo mex32;
	struct ica_rsa_modexpo mex64;
	long rc;

	if (copy_from_user(&mex32, umex32, sizeof(mex32)))
		return -EFAULT;
	mex64.inputdata = compat_ptr(mex32.inputdata);
	mex64.inputdatalength = mex32.inputdatalength;
	mex64.outputdata = compat_ptr(mex32.outputdata);
	mex64.outputdatalength = mex32.outputdatalength;
	mex64.b_key = compat_ptr(mex32.b_key);
	mex64.n_modulus = compat_ptr(mex32.n_modulus);
	do {
		rc = zcrypt_rsa_modexpo(&mex64);
	} while (rc == -EAGAIN);
996 997 998 999 1000 1001 1002 1003 1004
	/* on failure: retry once again after a requested rescan */
	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
		do {
			rc = zcrypt_rsa_modexpo(&mex64);
		} while (rc == -EAGAIN);
	if (rc)
		return rc;
	return put_user(mex64.outputdatalength,
			&umex32->outputdatalength);
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
}

struct compat_ica_rsa_modexpo_crt {
	compat_uptr_t	inputdata;
	unsigned int	inputdatalength;
	compat_uptr_t	outputdata;
	unsigned int	outputdatalength;
	compat_uptr_t	bp_key;
	compat_uptr_t	bq_key;
	compat_uptr_t	np_prime;
	compat_uptr_t	nq_prime;
	compat_uptr_t	u_mult_inv;
};

static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
				unsigned long arg)
{
	struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
	struct compat_ica_rsa_modexpo_crt crt32;
	struct ica_rsa_modexpo_crt crt64;
	long rc;

	if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
		return -EFAULT;
	crt64.inputdata = compat_ptr(crt32.inputdata);
	crt64.inputdatalength = crt32.inputdatalength;
	crt64.outputdata=  compat_ptr(crt32.outputdata);
	crt64.outputdatalength = crt32.outputdatalength;
	crt64.bp_key = compat_ptr(crt32.bp_key);
	crt64.bq_key = compat_ptr(crt32.bq_key);
	crt64.np_prime = compat_ptr(crt32.np_prime);
	crt64.nq_prime = compat_ptr(crt32.nq_prime);
	crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
	do {
		rc = zcrypt_rsa_crt(&crt64);
	} while (rc == -EAGAIN);
1041 1042 1043 1044 1045 1046 1047 1048 1049
	/* on failure: retry once again after a requested rescan */
	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
		do {
			rc = zcrypt_rsa_crt(&crt64);
		} while (rc == -EAGAIN);
	if (rc)
		return rc;
	return put_user(crt64.outputdatalength,
			&ucrt32->outputdatalength);
1050 1051
}

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
struct compat_ica_xcRB {
	unsigned short	agent_ID;
	unsigned int	user_defined;
	unsigned short	request_ID;
	unsigned int	request_control_blk_length;
	unsigned char	padding1[16 - sizeof (compat_uptr_t)];
	compat_uptr_t	request_control_blk_addr;
	unsigned int	request_data_length;
	char		padding2[16 - sizeof (compat_uptr_t)];
	compat_uptr_t	request_data_address;
	unsigned int	reply_control_blk_length;
	char		padding3[16 - sizeof (compat_uptr_t)];
	compat_uptr_t	reply_control_blk_addr;
	unsigned int	reply_data_length;
	char		padding4[16 - sizeof (compat_uptr_t)];
	compat_uptr_t	reply_data_addr;
	unsigned short	priority_window;
	unsigned int	status;
} __attribute__((packed));

static long trans_xcRB32(struct file *filp, unsigned int cmd,
			 unsigned long arg)
{
	struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
	struct compat_ica_xcRB xcRB32;
	struct ica_xcRB xcRB64;
	long rc;

	if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
		return -EFAULT;
	xcRB64.agent_ID = xcRB32.agent_ID;
	xcRB64.user_defined = xcRB32.user_defined;
	xcRB64.request_ID = xcRB32.request_ID;
	xcRB64.request_control_blk_length =
		xcRB32.request_control_blk_length;
	xcRB64.request_control_blk_addr =
		compat_ptr(xcRB32.request_control_blk_addr);
	xcRB64.request_data_length =
		xcRB32.request_data_length;
	xcRB64.request_data_address =
		compat_ptr(xcRB32.request_data_address);
	xcRB64.reply_control_blk_length =
		xcRB32.reply_control_blk_length;
	xcRB64.reply_control_blk_addr =
		compat_ptr(xcRB32.reply_control_blk_addr);
	xcRB64.reply_data_length = xcRB32.reply_data_length;
	xcRB64.reply_data_addr =
		compat_ptr(xcRB32.reply_data_addr);
	xcRB64.priority_window = xcRB32.priority_window;
	xcRB64.status = xcRB32.status;
	do {
		rc = zcrypt_send_cprb(&xcRB64);
	} while (rc == -EAGAIN);
1105 1106 1107 1108 1109
	/* on failure: retry once again after a requested rescan */
	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
		do {
			rc = zcrypt_send_cprb(&xcRB64);
		} while (rc == -EAGAIN);
1110 1111 1112 1113 1114 1115 1116 1117
	xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
	xcRB32.reply_data_length = xcRB64.reply_data_length;
	xcRB32.status = xcRB64.status;
	if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
			return -EFAULT;
	return rc;
}

1118
static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
1119 1120 1121 1122 1123 1124
			 unsigned long arg)
{
	if (cmd == ICARSAMODEXPO)
		return trans_modexpo32(filp, cmd, arg);
	if (cmd == ICARSACRT)
		return trans_modexpo_crt32(filp, cmd, arg);
1125 1126
	if (cmd == ZSECSENDCPRB)
		return trans_xcRB32(filp, cmd, arg);
1127 1128 1129 1130
	return zcrypt_unlocked_ioctl(filp, cmd, arg);
}
#endif

1131
/*
1132 1133
 * Misc device file operations.
 */
1134
static const struct file_operations zcrypt_fops = {
1135 1136 1137 1138 1139 1140 1141 1142
	.owner		= THIS_MODULE,
	.read		= zcrypt_read,
	.write		= zcrypt_write,
	.unlocked_ioctl	= zcrypt_unlocked_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl	= zcrypt_compat_ioctl,
#endif
	.open		= zcrypt_open,
1143 1144
	.release	= zcrypt_release,
	.llseek		= no_llseek,
1145 1146
};

1147
/*
1148 1149 1150 1151 1152 1153 1154 1155
 * Misc device.
 */
static struct miscdevice zcrypt_misc_device = {
	.minor	    = MISC_DYNAMIC_MINOR,
	.name	    = "z90crypt",
	.fops	    = &zcrypt_fops,
};

1156 1157 1158 1159 1160 1161 1162 1163 1164
static int zcrypt_rng_device_count;
static u32 *zcrypt_rng_buffer;
static int zcrypt_rng_buffer_index;
static DEFINE_MUTEX(zcrypt_rng_mutex);

static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
{
	int rc;

1165
	/*
1166 1167 1168 1169 1170
	 * We don't need locking here because the RNG API guarantees serialized
	 * read method calls.
	 */
	if (zcrypt_rng_buffer_index == 0) {
		rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1171 1172 1173
		/* on failure: retry once again after a requested rescan */
		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
			rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
		if (rc < 0)
			return -EIO;
		zcrypt_rng_buffer_index = rc / sizeof *data;
	}
	*data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
	return sizeof *data;
}

static struct hwrng zcrypt_rng_dev = {
	.name		= "zcrypt",
	.data_read	= zcrypt_rng_data_read,
1185
	.quality	= 990,
1186 1187
};

1188
int zcrypt_rng_device_add(void)
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
{
	int rc = 0;

	mutex_lock(&zcrypt_rng_mutex);
	if (zcrypt_rng_device_count == 0) {
		zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
		if (!zcrypt_rng_buffer) {
			rc = -ENOMEM;
			goto out;
		}
		zcrypt_rng_buffer_index = 0;
1200 1201
		if (!zcrypt_hwrng_seed)
			zcrypt_rng_dev.quality = 0;
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
		rc = hwrng_register(&zcrypt_rng_dev);
		if (rc)
			goto out_free;
		zcrypt_rng_device_count = 1;
	} else
		zcrypt_rng_device_count++;
	mutex_unlock(&zcrypt_rng_mutex);
	return 0;

out_free:
	free_page((unsigned long) zcrypt_rng_buffer);
out:
	mutex_unlock(&zcrypt_rng_mutex);
	return rc;
}

1218
void zcrypt_rng_device_remove(void)
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
{
	mutex_lock(&zcrypt_rng_mutex);
	zcrypt_rng_device_count--;
	if (zcrypt_rng_device_count == 0) {
		hwrng_unregister(&zcrypt_rng_dev);
		free_page((unsigned long) zcrypt_rng_buffer);
	}
	mutex_unlock(&zcrypt_rng_mutex);
}

1229 1230
int __init zcrypt_debug_init(void)
{
1231 1232 1233 1234
	zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
					 DBF_MAX_SPRINTF_ARGS * sizeof(long));
	debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
	debug_set_level(zcrypt_dbf_info, DBF_ERR);
1235

1236 1237 1238 1239 1240
	return 0;
}

void zcrypt_debug_exit(void)
{
1241
	debug_unregister(zcrypt_dbf_info);
1242 1243
}

1244
/**
1245 1246
 * zcrypt_api_init(): Module initialization.
 *
1247 1248 1249 1250 1251 1252
 * The module initialization code.
 */
int __init zcrypt_api_init(void)
{
	int rc;

1253 1254 1255 1256
	rc = zcrypt_debug_init();
	if (rc)
		goto out;

1257 1258
	/* Register the request sprayer. */
	rc = misc_register(&zcrypt_misc_device);
1259
	if (rc < 0)
1260 1261
		goto out;

1262 1263
	zcrypt_msgtype6_init();
	zcrypt_msgtype50_init();
1264 1265 1266 1267 1268 1269 1270
	return 0;

out:
	return rc;
}

/**
1271 1272
 * zcrypt_api_exit(): Module termination.
 *
1273 1274
 * The module termination code.
 */
1275
void __exit zcrypt_api_exit(void)
1276 1277
{
	misc_deregister(&zcrypt_misc_device);
1278 1279
	zcrypt_msgtype6_exit();
	zcrypt_msgtype50_exit();
1280
	zcrypt_debug_exit();
1281 1282 1283 1284
}

module_init(zcrypt_api_init);
module_exit(zcrypt_api_exit);