cryptd.c 17.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Software async crypto daemon.
 *
 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */

#include <crypto/algapi.h>
14
#include <crypto/internal/hash.h>
15
#include <crypto/cryptd.h>
16
#include <crypto/crypto_wq.h>
17 18 19 20 21 22 23 24 25
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>

26
#define CRYPTD_MAX_CPU_QLEN 100
27

28
struct cryptd_cpu_queue {
29
	struct crypto_queue queue;
30 31 32 33 34
	struct work_struct work;
};

struct cryptd_queue {
	struct cryptd_cpu_queue *cpu_queue;
35 36 37 38
};

struct cryptd_instance_ctx {
	struct crypto_spawn spawn;
39
	struct cryptd_queue *queue;
40 41
};

42 43 44 45 46
struct hashd_instance_ctx {
	struct crypto_shash_spawn spawn;
	struct cryptd_queue *queue;
};

47 48 49 50 51 52 53 54
struct cryptd_blkcipher_ctx {
	struct crypto_blkcipher *child;
};

struct cryptd_blkcipher_request_ctx {
	crypto_completion_t complete;
};

55
struct cryptd_hash_ctx {
56
	struct crypto_shash *child;
57 58 59 60
};

struct cryptd_hash_request_ctx {
	crypto_completion_t complete;
61
	struct shash_desc desc;
62
};
63

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
static void cryptd_queue_worker(struct work_struct *work);

static int cryptd_init_queue(struct cryptd_queue *queue,
			     unsigned int max_cpu_qlen)
{
	int cpu;
	struct cryptd_cpu_queue *cpu_queue;

	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
	if (!queue->cpu_queue)
		return -ENOMEM;
	for_each_possible_cpu(cpu) {
		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
	}
	return 0;
}

static void cryptd_fini_queue(struct cryptd_queue *queue)
{
	int cpu;
	struct cryptd_cpu_queue *cpu_queue;

	for_each_possible_cpu(cpu) {
		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
		BUG_ON(cpu_queue->queue.qlen);
	}
	free_percpu(queue->cpu_queue);
}

static int cryptd_enqueue_request(struct cryptd_queue *queue,
				  struct crypto_async_request *request)
{
	int cpu, err;
	struct cryptd_cpu_queue *cpu_queue;

	cpu = get_cpu();
	cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
	err = crypto_enqueue_request(&cpu_queue->queue, request);
	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
	put_cpu();

	return err;
}

/* Called in workqueue context, do one real cryption work (via
 * req->complete) and reschedule itself if there are more work to
 * do. */
static void cryptd_queue_worker(struct work_struct *work)
{
	struct cryptd_cpu_queue *cpu_queue;
	struct crypto_async_request *req, *backlog;

	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
	/* Only handle one request at a time to avoid hogging crypto
	 * workqueue. preempt_disable/enable is used to prevent
	 * being preempted by cryptd_enqueue_request() */
	preempt_disable();
	backlog = crypto_get_backlog(&cpu_queue->queue);
	req = crypto_dequeue_request(&cpu_queue->queue);
	preempt_enable();

	if (!req)
		return;

	if (backlog)
		backlog->complete(backlog, -EINPROGRESS);
	req->complete(req, 0);

	if (cpu_queue->queue.qlen)
		queue_work(kcrypto_wq, &cpu_queue->work);
}

static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
139 140 141
{
	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
142
	return ictx->queue;
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
}

static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
				   const u8 *key, unsigned int keylen)
{
	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
	struct crypto_blkcipher *child = ctx->child;
	int err;

	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
					  CRYPTO_TFM_REQ_MASK);
	err = crypto_blkcipher_setkey(child, key, keylen);
	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
					    CRYPTO_TFM_RES_MASK);
	return err;
}

static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
				   struct crypto_blkcipher *child,
				   int err,
				   int (*crypt)(struct blkcipher_desc *desc,
						struct scatterlist *dst,
						struct scatterlist *src,
						unsigned int len))
{
	struct cryptd_blkcipher_request_ctx *rctx;
	struct blkcipher_desc desc;

	rctx = ablkcipher_request_ctx(req);

174 175
	if (unlikely(err == -EINPROGRESS))
		goto out;
176 177 178 179 180 181 182 183 184

	desc.tfm = child;
	desc.info = req->info;
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;

	err = crypt(&desc, req->dst, req->src, req->nbytes);

	req->base.complete = rctx->complete;

185
out:
186
	local_bh_disable();
187
	rctx->complete(&req->base, err);
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
	local_bh_enable();
}

static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
{
	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
	struct crypto_blkcipher *child = ctx->child;

	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
			       crypto_blkcipher_crt(child)->encrypt);
}

static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
{
	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
	struct crypto_blkcipher *child = ctx->child;

	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
			       crypto_blkcipher_crt(child)->decrypt);
}

static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
				    crypto_completion_t complete)
{
	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
214
	struct cryptd_queue *queue;
215

216
	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
217 218 219
	rctx->complete = req->base.complete;
	req->base.complete = complete;

220
	return cryptd_enqueue_request(queue, &req->base);
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
}

static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
{
	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
}

static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
{
	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
}

static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
{
	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
	struct crypto_spawn *spawn = &ictx->spawn;
	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
	struct crypto_blkcipher *cipher;

	cipher = crypto_spawn_blkcipher(spawn);
	if (IS_ERR(cipher))
		return PTR_ERR(cipher);

	ctx->child = cipher;
	tfm->crt_ablkcipher.reqsize =
		sizeof(struct cryptd_blkcipher_request_ctx);
	return 0;
}

static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
{
	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);

	crypto_free_blkcipher(ctx->child);
}

258 259
static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
				   unsigned int tail)
260
{
261
	char *p;
262 263 264
	struct crypto_instance *inst;
	int err;

265 266 267 268 269
	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
	if (!p)
		return ERR_PTR(-ENOMEM);

	inst = (void *)(p + head);
270 271 272 273 274 275 276 277 278 279 280 281 282

	err = -ENAMETOOLONG;
	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
		     "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
		goto out_free_inst;

	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);

	inst->alg.cra_priority = alg->cra_priority + 50;
	inst->alg.cra_blocksize = alg->cra_blocksize;
	inst->alg.cra_alignmask = alg->cra_alignmask;

out:
283
	return p;
284 285

out_free_inst:
286 287
	kfree(p);
	p = ERR_PTR(err);
288 289 290
	goto out;
}

291 292 293
static int cryptd_create_blkcipher(struct crypto_template *tmpl,
				   struct rtattr **tb,
				   struct cryptd_queue *queue)
294
{
295
	struct cryptd_instance_ctx *ctx;
296 297
	struct crypto_instance *inst;
	struct crypto_alg *alg;
298
	int err;
299 300

	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
301
				  CRYPTO_ALG_TYPE_MASK);
302
	if (IS_ERR(alg))
303
		return PTR_ERR(alg);
304

305
	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
306
	err = PTR_ERR(inst);
307 308 309
	if (IS_ERR(inst))
		goto out_put_alg;

310 311 312 313 314 315 316 317
	ctx = crypto_instance_ctx(inst);
	ctx->queue = queue;

	err = crypto_init_spawn(&ctx->spawn, alg, inst,
				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
	if (err)
		goto out_free_inst;

318
	inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
319 320 321 322 323 324
	inst->alg.cra_type = &crypto_ablkcipher_type;

	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;

325 326
	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;

327 328 329 330 331 332 333 334 335
	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);

	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;

	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;

336 337 338 339 340 341 342
	err = crypto_register_instance(tmpl, inst);
	if (err) {
		crypto_drop_spawn(&ctx->spawn);
out_free_inst:
		kfree(inst);
	}

343 344
out_put_alg:
	crypto_mod_put(alg);
345
	return err;
346 347
}

348 349 350
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
351 352
	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
	struct crypto_shash_spawn *spawn = &ictx->spawn;
353
	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
354
	struct crypto_shash *hash;
355

356 357 358
	hash = crypto_spawn_shash(spawn);
	if (IS_ERR(hash))
		return PTR_ERR(hash);
359

360
	ctx->child = hash;
361 362 363
	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
				 sizeof(struct cryptd_hash_request_ctx) +
				 crypto_shash_descsize(hash));
364 365 366 367 368 369 370
	return 0;
}

static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);

371
	crypto_free_shash(ctx->child);
372 373 374 375 376 377
}

static int cryptd_hash_setkey(struct crypto_ahash *parent,
				   const u8 *key, unsigned int keylen)
{
	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
378
	struct crypto_shash *child = ctx->child;
379 380
	int err;

381 382 383 384 385 386
	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
				      CRYPTO_TFM_REQ_MASK);
	err = crypto_shash_setkey(child, key, keylen);
	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
				       CRYPTO_TFM_RES_MASK);
387 388 389 390 391 392 393 394
	return err;
}

static int cryptd_hash_enqueue(struct ahash_request *req,
				crypto_completion_t complete)
{
	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
395 396
	struct cryptd_queue *queue =
		cryptd_get_queue(crypto_ahash_tfm(tfm));
397 398 399 400

	rctx->complete = req->base.complete;
	req->base.complete = complete;

401
	return cryptd_enqueue_request(queue, &req->base);
402 403 404 405
}

static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
{
406 407 408 409 410
	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
	struct crypto_shash *child = ctx->child;
	struct ahash_request *req = ahash_request_cast(req_async);
	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
	struct shash_desc *desc = &rctx->desc;
411 412 413 414

	if (unlikely(err == -EINPROGRESS))
		goto out;

415 416
	desc->tfm = child;
	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
417

418
	err = crypto_shash_init(desc);
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434

	req->base.complete = rctx->complete;

out:
	local_bh_disable();
	rctx->complete(&req->base, err);
	local_bh_enable();
}

static int cryptd_hash_init_enqueue(struct ahash_request *req)
{
	return cryptd_hash_enqueue(req, cryptd_hash_init);
}

static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
{
435
	struct ahash_request *req = ahash_request_cast(req_async);
436 437 438 439 440 441 442
	struct cryptd_hash_request_ctx *rctx;

	rctx = ahash_request_ctx(req);

	if (unlikely(err == -EINPROGRESS))
		goto out;

443
	err = shash_ahash_update(req, &rctx->desc);
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459

	req->base.complete = rctx->complete;

out:
	local_bh_disable();
	rctx->complete(&req->base, err);
	local_bh_enable();
}

static int cryptd_hash_update_enqueue(struct ahash_request *req)
{
	return cryptd_hash_enqueue(req, cryptd_hash_update);
}

static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
{
460 461
	struct ahash_request *req = ahash_request_cast(req_async);
	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
462 463 464 465

	if (unlikely(err == -EINPROGRESS))
		goto out;

466
	err = crypto_shash_final(&rctx->desc, req->result);
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482

	req->base.complete = rctx->complete;

out:
	local_bh_disable();
	rctx->complete(&req->base, err);
	local_bh_enable();
}

static int cryptd_hash_final_enqueue(struct ahash_request *req)
{
	return cryptd_hash_enqueue(req, cryptd_hash_final);
}

static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
{
483 484 485 486 487
	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
	struct crypto_shash *child = ctx->child;
	struct ahash_request *req = ahash_request_cast(req_async);
	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
	struct shash_desc *desc = &rctx->desc;
488 489 490 491

	if (unlikely(err == -EINPROGRESS))
		goto out;

492 493
	desc->tfm = child;
	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
494

495
	err = shash_ahash_digest(req, desc);
496 497 498 499 500 501 502 503 504 505 506 507 508 509

	req->base.complete = rctx->complete;

out:
	local_bh_disable();
	rctx->complete(&req->base, err);
	local_bh_enable();
}

static int cryptd_hash_digest_enqueue(struct ahash_request *req)
{
	return cryptd_hash_enqueue(req, cryptd_hash_digest);
}

510 511
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
			      struct cryptd_queue *queue)
512
{
513
	struct hashd_instance_ctx *ctx;
514
	struct ahash_instance *inst;
515
	struct shash_alg *salg;
516
	struct crypto_alg *alg;
517
	int err;
518

519 520
	salg = shash_attr_alg(tb[1], 0, 0);
	if (IS_ERR(salg))
521
		return PTR_ERR(salg);
522

523
	alg = &salg->base;
524 525
	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
				     sizeof(*ctx));
526
	err = PTR_ERR(inst);
527 528 529
	if (IS_ERR(inst))
		goto out_put_alg;

530
	ctx = ahash_instance_ctx(inst);
531 532
	ctx->queue = queue;

533 534
	err = crypto_init_shash_spawn(&ctx->spawn, salg,
				      ahash_crypto_instance(inst));
535 536 537
	if (err)
		goto out_free_inst;

538
	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
539

540 541
	inst->alg.halg.digestsize = salg->digestsize;
	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
542

543 544
	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
545

546 547 548 549 550
	inst->alg.init   = cryptd_hash_init_enqueue;
	inst->alg.update = cryptd_hash_update_enqueue;
	inst->alg.final  = cryptd_hash_final_enqueue;
	inst->alg.setkey = cryptd_hash_setkey;
	inst->alg.digest = cryptd_hash_digest_enqueue;
551

552
	err = ahash_register_instance(tmpl, inst);
553 554 555 556 557 558
	if (err) {
		crypto_drop_shash(&ctx->spawn);
out_free_inst:
		kfree(inst);
	}

559 560
out_put_alg:
	crypto_mod_put(alg);
561
	return err;
562 563
}

564
static struct cryptd_queue queue;
565

566
static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
567 568 569 570 571
{
	struct crypto_attr_type *algt;

	algt = crypto_get_attr_type(tb);
	if (IS_ERR(algt))
572
		return PTR_ERR(algt);
573 574 575

	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
	case CRYPTO_ALG_TYPE_BLKCIPHER:
576
		return cryptd_create_blkcipher(tmpl, tb, &queue);
577
	case CRYPTO_ALG_TYPE_DIGEST:
578
		return cryptd_create_hash(tmpl, tb, &queue);
579 580
	}

581
	return -EINVAL;
582 583 584 585 586
}

static void cryptd_free(struct crypto_instance *inst)
{
	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
587 588 589 590 591 592 593 594
	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);

	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
	case CRYPTO_ALG_TYPE_AHASH:
		crypto_drop_shash(&hctx->spawn);
		kfree(ahash_instance(inst));
		return;
	}
595 596 597 598 599 600 601

	crypto_drop_spawn(&ctx->spawn);
	kfree(inst);
}

static struct crypto_template cryptd_tmpl = {
	.name = "cryptd",
602
	.create = cryptd_create,
603 604 605 606
	.free = cryptd_free,
	.module = THIS_MODULE,
};

607 608 609 610
struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
						  u32 type, u32 mask)
{
	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
611
	struct crypto_tfm *tfm;
612 613 614 615

	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
		return ERR_PTR(-EINVAL);
616 617 618 619 620
	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
	mask &= ~CRYPTO_ALG_TYPE_MASK;
	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
621 622
	if (IS_ERR(tfm))
		return ERR_CAST(tfm);
623 624
	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
		crypto_free_tfm(tfm);
625 626 627
		return ERR_PTR(-EINVAL);
	}

628
	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
}
EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);

struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
{
	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
	return ctx->child;
}
EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);

void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
{
	crypto_free_ablkcipher(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);

645 646 647 648
static int __init cryptd_init(void)
{
	int err;

649
	err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
650 651 652 653 654
	if (err)
		return err;

	err = crypto_register_template(&cryptd_tmpl);
	if (err)
655
		cryptd_fini_queue(&queue);
656 657 658 659 660 661

	return err;
}

static void __exit cryptd_exit(void)
{
662
	cryptd_fini_queue(&queue);
663 664 665 666 667 668 669 670
	crypto_unregister_template(&cryptd_tmpl);
}

module_init(cryptd_init);
module_exit(cryptd_exit);

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software async crypto daemon");