ahash.c 15.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Asynchronous Cryptographic Hash operations.
 *
 * This is the asynchronous version of hash.c with notification of
 * completion via a callback.
 *
 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */

16 17
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
18
#include <linux/bug.h>
19 20 21 22 23 24
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
25
#include <linux/cryptouser.h>
26
#include <linux/compiler.h>
27
#include <net/netlink.h>
28 29 30

#include "internal.h"

31 32 33 34
struct ahash_request_priv {
	crypto_completion_t complete;
	void *data;
	u8 *result;
35
	u32 flags;
36 37 38
	void *ubuf[] CRYPTO_MINALIGN_ATTR;
};

39 40 41 42 43 44
static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
{
	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
			    halg);
}

45 46 47 48 49 50 51
static int hash_walk_next(struct crypto_hash_walk *walk)
{
	unsigned int alignmask = walk->alignmask;
	unsigned int offset = walk->offset;
	unsigned int nbytes = min(walk->entrylen,
				  ((unsigned int)(PAGE_SIZE)) - offset);

52 53 54 55
	if (walk->flags & CRYPTO_ALG_ASYNC)
		walk->data = kmap(walk->pg);
	else
		walk->data = kmap_atomic(walk->pg);
56 57
	walk->data += offset;

58 59
	if (offset & alignmask) {
		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
60

61 62 63
		if (nbytes > unaligned)
			nbytes = unaligned;
	}
64 65 66 67 68 69 70 71 72 73 74

	walk->entrylen -= nbytes;
	return nbytes;
}

static int hash_walk_new_entry(struct crypto_hash_walk *walk)
{
	struct scatterlist *sg;

	sg = walk->sg;
	walk->offset = sg->offset;
75 76
	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
	walk->offset = offset_in_page(walk->offset);
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
	walk->entrylen = sg->length;

	if (walk->entrylen > walk->total)
		walk->entrylen = walk->total;
	walk->total -= walk->entrylen;

	return hash_walk_next(walk);
}

int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
	unsigned int alignmask = walk->alignmask;

	walk->data -= walk->offset;

92 93
	if (walk->entrylen && (walk->offset & alignmask) && !err) {
		unsigned int nbytes;
94

95 96 97
		walk->offset = ALIGN(walk->offset, alignmask + 1);
		nbytes = min(walk->entrylen,
			     (unsigned int)(PAGE_SIZE - walk->offset));
98
		if (nbytes) {
99
			walk->entrylen -= nbytes;
100 101 102
			walk->data += walk->offset;
			return nbytes;
		}
103 104
	}

105 106 107 108 109 110 111 112 113 114
	if (walk->flags & CRYPTO_ALG_ASYNC)
		kunmap(walk->pg);
	else {
		kunmap_atomic(walk->data);
		/*
		 * The may sleep test only makes sense for sync users.
		 * Async users don't need to sleep here anyway.
		 */
		crypto_yield(walk->flags);
	}
115 116 117 118

	if (err)
		return err;

119
	if (walk->entrylen) {
120 121
		walk->offset = 0;
		walk->pg++;
122
		return hash_walk_next(walk);
123
	}
124 125 126 127

	if (!walk->total)
		return 0;

128
	walk->sg = sg_next(walk->sg);
129 130 131 132 133 134 135 136 137 138

	return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_done);

int crypto_hash_walk_first(struct ahash_request *req,
			   struct crypto_hash_walk *walk)
{
	walk->total = req->nbytes;

139 140
	if (!walk->total) {
		walk->entrylen = 0;
141
		return 0;
142
	}
143 144 145

	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
	walk->sg = req->src;
146
	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
147 148 149 150 151

	return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);

152 153 154 155 156
int crypto_ahash_walk_first(struct ahash_request *req,
			    struct crypto_hash_walk *walk)
{
	walk->total = req->nbytes;

157 158
	if (!walk->total) {
		walk->entrylen = 0;
159
		return 0;
160
	}
161 162 163 164 165 166 167 168 169 170 171 172

	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
	walk->sg = req->src;
	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
	walk->flags |= CRYPTO_ALG_ASYNC;

	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);

	return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);

173 174 175 176 177 178 179 180 181
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
				unsigned int keylen)
{
	unsigned long alignmask = crypto_ahash_alignmask(tfm);
	int ret;
	u8 *buffer, *alignbuffer;
	unsigned long absize;

	absize = keylen + alignmask;
182
	buffer = kmalloc(absize, GFP_KERNEL);
183 184 185 186 187
	if (!buffer)
		return -ENOMEM;

	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
	memcpy(alignbuffer, key, keylen);
H
Herbert Xu 已提交
188
	ret = tfm->setkey(tfm, alignbuffer, keylen);
189
	kzfree(buffer);
190 191 192
	return ret;
}

193
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
194 195 196
			unsigned int keylen)
{
	unsigned long alignmask = crypto_ahash_alignmask(tfm);
197
	int err;
198 199

	if ((unsigned long)key & alignmask)
200 201 202 203 204 205
		err = ahash_setkey_unaligned(tfm, key, keylen);
	else
		err = tfm->setkey(tfm, key, keylen);

	if (err)
		return err;
206

207 208
	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
	return 0;
209
}
210
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
211

H
Herbert Xu 已提交
212 213 214 215 216 217
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
			  unsigned int keylen)
{
	return -ENOSYS;
}

218 219 220 221 222 223
static inline unsigned int ahash_align_buffer_size(unsigned len,
						   unsigned long mask)
{
	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
}

224
static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
225 226 227 228 229 230 231 232
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	unsigned long alignmask = crypto_ahash_alignmask(tfm);
	unsigned int ds = crypto_ahash_digestsize(tfm);
	struct ahash_request_priv *priv;

	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
233
		       GFP_KERNEL : GFP_ATOMIC);
234 235 236
	if (!priv)
		return -ENOMEM;

237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
	/*
	 * WARNING: Voodoo programming below!
	 *
	 * The code below is obscure and hard to understand, thus explanation
	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
	 * to understand the layout of structures used here!
	 *
	 * The code here will replace portions of the ORIGINAL request with
	 * pointers to new code and buffers so the hashing operation can store
	 * the result in aligned buffer. We will call the modified request
	 * an ADJUSTED request.
	 *
	 * The newly mangled request will look as such:
	 *
	 * req {
	 *   .result        = ADJUSTED[new aligned buffer]
	 *   .base.complete = ADJUSTED[pointer to completion function]
	 *   .base.data     = ADJUSTED[*req (pointer to self)]
	 *   .priv          = ADJUSTED[new priv] {
	 *           .result   = ORIGINAL(result)
	 *           .complete = ORIGINAL(base.complete)
	 *           .data     = ORIGINAL(base.data)
	 *   }
	 */

262 263 264
	priv->result = req->result;
	priv->complete = req->base.complete;
	priv->data = req->base.data;
265 266
	priv->flags = req->base.flags;

267 268 269 270 271
	/*
	 * WARNING: We do not backup req->priv here! The req->priv
	 *          is for internal use of the Crypto API and the
	 *          user must _NOT_ _EVER_ depend on it's content!
	 */
272 273

	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
274
	req->base.complete = cplt;
275 276 277
	req->base.data = req;
	req->priv = priv;

278 279 280
	return 0;
}

281
static void ahash_restore_req(struct ahash_request *req, int err)
282 283 284
{
	struct ahash_request_priv *priv = req->priv;

285 286 287 288
	if (!err)
		memcpy(priv->result, req->result,
		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));

289 290
	/* Restore the original crypto request. */
	req->result = priv->result;
291 292 293

	ahash_request_set_callback(req, priv->flags,
				   priv->complete, priv->data);
294 295 296 297 298 299
	req->priv = NULL;

	/* Free the req->priv.priv from the ADJUSTED request. */
	kzfree(priv);
}

300
static void ahash_notify_einprogress(struct ahash_request *req)
301 302
{
	struct ahash_request_priv *priv = req->priv;
303
	struct crypto_async_request oreq;
304

305
	oreq.data = priv->data;
306

307
	priv->complete(&oreq, -EINPROGRESS);
308 309 310 311 312 313
}

static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
{
	struct ahash_request *areq = req->data;

314 315 316 317 318
	if (err == -EINPROGRESS) {
		ahash_notify_einprogress(areq);
		return;
	}

319 320 321 322 323 324 325 326 327 328
	/*
	 * Restore the original request, see ahash_op_unaligned() for what
	 * goes where.
	 *
	 * The "struct ahash_request *req" here is in fact the "req.base"
	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
	 * is a pointer to self, it is also the ADJUSTED "req" .
	 */

	/* First copy req->result into req->priv.result */
329
	ahash_restore_req(areq, err);
330 331 332 333 334 335 336 337 338 339 340 341 342 343

	/* Complete the ORIGINAL request. */
	areq->base.complete(&areq->base, err);
}

static int ahash_op_unaligned(struct ahash_request *req,
			      int (*op)(struct ahash_request *))
{
	int err;

	err = ahash_save_req(req, ahash_op_unaligned_done);
	if (err)
		return err;

344
	err = op(req);
345
	if (err == -EINPROGRESS || err == -EBUSY)
346 347 348
		return err;

	ahash_restore_req(req, err);
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378

	return err;
}

static int crypto_ahash_op(struct ahash_request *req,
			   int (*op)(struct ahash_request *))
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	unsigned long alignmask = crypto_ahash_alignmask(tfm);

	if ((unsigned long)req->result & alignmask)
		return ahash_op_unaligned(req, op);

	return op(req);
}

int crypto_ahash_final(struct ahash_request *req)
{
	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);

int crypto_ahash_finup(struct ahash_request *req)
{
	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);

int crypto_ahash_digest(struct ahash_request *req)
{
379 380 381 382 383 384
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);

	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
		return -ENOKEY;

	return crypto_ahash_op(req, tfm->digest);
385 386 387
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);

388
static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
389
{
390
	struct ahash_request *areq = req->data;
391 392 393 394

	if (err == -EINPROGRESS)
		return;

395
	ahash_restore_req(areq, err);
396

397
	areq->base.complete(&areq->base, err);
398 399 400 401 402 403 404 405
}

static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
	if (err)
		goto out;

	req->base.complete = ahash_def_finup_done2;
406

407
	err = crypto_ahash_reqtfm(req)->final(req);
408
	if (err == -EINPROGRESS || err == -EBUSY)
409
		return err;
410 411

out:
412
	ahash_restore_req(req, err);
413 414 415 416 417 418 419
	return err;
}

static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
{
	struct ahash_request *areq = req->data;

420 421 422 423 424 425 426
	if (err == -EINPROGRESS) {
		ahash_notify_einprogress(areq);
		return;
	}

	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

427
	err = ahash_def_finup_finish1(areq, err);
428 429
	if (areq->priv)
		return;
430

431
	areq->base.complete(&areq->base, err);
432 433 434 435 436
}

static int ahash_def_finup(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
437
	int err;
438

439 440 441
	err = ahash_save_req(req, ahash_def_finup_done1);
	if (err)
		return err;
442

443
	err = tfm->update(req);
444
	if (err == -EINPROGRESS || err == -EBUSY)
445 446
		return err;

447
	return ahash_def_finup_finish1(req, err);
448 449
}

450 451 452 453 454
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
	struct ahash_alg *alg = crypto_ahash_alg(hash);

455 456
	hash->setkey = ahash_nosetkey;

457 458 459 460 461
	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
		return crypto_init_shash_ops_async(tfm);

	hash->init = alg->init;
	hash->update = alg->update;
462 463
	hash->final = alg->final;
	hash->finup = alg->finup ?: ahash_def_finup;
464
	hash->digest = alg->digest;
465 466
	hash->export = alg->export;
	hash->import = alg->import;
467

468
	if (alg->setkey) {
469
		hash->setkey = alg->setkey;
470 471
		if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
			crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
472
	}
473 474 475 476 477 478

	return 0;
}

static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
{
479 480
	if (alg->cra_type != &crypto_ahash_type)
		return sizeof(struct crypto_shash *);
481

482
	return crypto_alg_extsize(alg);
483 484
}

485
#ifdef CONFIG_NET
486 487 488 489
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
	struct crypto_report_hash rhash;

490
	strncpy(rhash.type, "ahash", sizeof(rhash.type));
491 492 493 494

	rhash.blocksize = alg->cra_blocksize;
	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;

D
David S. Miller 已提交
495 496 497
	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
		    sizeof(struct crypto_report_hash), &rhash))
		goto nla_put_failure;
498 499 500 501 502
	return 0;

nla_put_failure:
	return -EMSGSIZE;
}
503 504 505 506 507 508
#else
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
	return -ENOSYS;
}
#endif
509

510
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
511
	__maybe_unused;
512 513 514 515 516 517
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
{
	seq_printf(m, "type         : ahash\n");
	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
					     "yes" : "no");
	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
518 519
	seq_printf(m, "digestsize   : %u\n",
		   __crypto_hash_alg_common(alg)->digestsize);
520 521 522
}

const struct crypto_type crypto_ahash_type = {
523 524
	.extsize = crypto_ahash_extsize,
	.init_tfm = crypto_ahash_init_tfm,
525 526 527
#ifdef CONFIG_PROC_FS
	.show = crypto_ahash_show,
#endif
528
	.report = crypto_ahash_report,
529 530 531 532
	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
	.type = CRYPTO_ALG_TYPE_AHASH,
	.tfmsize = offsetof(struct crypto_ahash, base),
533 534 535
};
EXPORT_SYMBOL_GPL(crypto_ahash_type);

536 537 538 539 540 541 542
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
					u32 mask)
{
	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ahash);

543 544 545 546 547 548
int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
{
	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);

549 550 551 552 553
static int ahash_prepare_alg(struct ahash_alg *alg)
{
	struct crypto_alg *base = &alg->halg.base;

	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
554 555
	    alg->halg.statesize > PAGE_SIZE / 8 ||
	    alg->halg.statesize == 0)
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
		return -EINVAL;

	base->cra_type = &crypto_ahash_type;
	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;

	return 0;
}

int crypto_register_ahash(struct ahash_alg *alg)
{
	struct crypto_alg *base = &alg->halg.base;
	int err;

	err = ahash_prepare_alg(alg);
	if (err)
		return err;

	return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_ahash);

int crypto_unregister_ahash(struct ahash_alg *alg)
{
	return crypto_unregister_alg(&alg->halg.base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
int crypto_register_ahashes(struct ahash_alg *algs, int count)
{
	int i, ret;

	for (i = 0; i < count; i++) {
		ret = crypto_register_ahash(&algs[i]);
		if (ret)
			goto err;
	}

	return 0;

err:
	for (--i; i >= 0; --i)
		crypto_unregister_ahash(&algs[i]);

	return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_ahashes);

void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
{
	int i;

	for (i = count - 1; i >= 0; --i)
		crypto_unregister_ahash(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
int ahash_register_instance(struct crypto_template *tmpl,
			    struct ahash_instance *inst)
{
	int err;

	err = ahash_prepare_alg(&inst->alg);
	if (err)
		return err;

	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_register_instance);

void ahash_free_instance(struct crypto_instance *inst)
{
	crypto_drop_spawn(crypto_instance_ctx(inst));
	kfree(ahash_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_free_instance);

int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
			    struct hash_alg_common *alg,
			    struct crypto_instance *inst)
{
	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
				  &crypto_ahash_type);
}
EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);

struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
{
	struct crypto_alg *alg;

	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
}
EXPORT_SYMBOL_GPL(ahash_attr_alg);

651 652 653 654 655 656 657 658 659 660 661
bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
{
	struct crypto_alg *alg = &halg->base;

	if (alg->cra_type != &crypto_ahash_type)
		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));

	return __crypto_ahash_alg(alg)->setkey != NULL;
}
EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);

662 663
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");