ahash.c 15.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Asynchronous Cryptographic Hash operations.
 *
 * This is the asynchronous version of hash.c with notification of
 * completion via a callback.
 *
 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */

16 17
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
18
#include <linux/bug.h>
19 20 21 22 23 24
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
25
#include <linux/cryptouser.h>
26
#include <linux/compiler.h>
27
#include <net/netlink.h>
28 29 30

#include "internal.h"

31 32 33 34
struct ahash_request_priv {
	crypto_completion_t complete;
	void *data;
	u8 *result;
35
	u32 flags;
36 37 38
	void *ubuf[] CRYPTO_MINALIGN_ATTR;
};

39 40 41 42 43 44
static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
{
	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
			    halg);
}

45 46 47 48 49 50 51
static int hash_walk_next(struct crypto_hash_walk *walk)
{
	unsigned int alignmask = walk->alignmask;
	unsigned int offset = walk->offset;
	unsigned int nbytes = min(walk->entrylen,
				  ((unsigned int)(PAGE_SIZE)) - offset);

52 53 54 55
	if (walk->flags & CRYPTO_ALG_ASYNC)
		walk->data = kmap(walk->pg);
	else
		walk->data = kmap_atomic(walk->pg);
56 57
	walk->data += offset;

58 59
	if (offset & alignmask) {
		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
60

61 62 63
		if (nbytes > unaligned)
			nbytes = unaligned;
	}
64 65 66 67 68 69 70 71 72 73 74

	walk->entrylen -= nbytes;
	return nbytes;
}

static int hash_walk_new_entry(struct crypto_hash_walk *walk)
{
	struct scatterlist *sg;

	sg = walk->sg;
	walk->offset = sg->offset;
75 76
	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
	walk->offset = offset_in_page(walk->offset);
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
	walk->entrylen = sg->length;

	if (walk->entrylen > walk->total)
		walk->entrylen = walk->total;
	walk->total -= walk->entrylen;

	return hash_walk_next(walk);
}

int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
	unsigned int alignmask = walk->alignmask;
	unsigned int nbytes = walk->entrylen;

	walk->data -= walk->offset;

	if (nbytes && walk->offset & alignmask && !err) {
		walk->offset = ALIGN(walk->offset, alignmask + 1);
		walk->data += walk->offset;

		nbytes = min(nbytes,
			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
		walk->entrylen -= nbytes;

		return nbytes;
	}

104 105 106 107 108 109 110 111 112 113
	if (walk->flags & CRYPTO_ALG_ASYNC)
		kunmap(walk->pg);
	else {
		kunmap_atomic(walk->data);
		/*
		 * The may sleep test only makes sense for sync users.
		 * Async users don't need to sleep here anyway.
		 */
		crypto_yield(walk->flags);
	}
114 115 116 117

	if (err)
		return err;

118 119 120
	if (nbytes) {
		walk->offset = 0;
		walk->pg++;
121
		return hash_walk_next(walk);
122
	}
123 124 125 126

	if (!walk->total)
		return 0;

127
	walk->sg = sg_next(walk->sg);
128 129 130 131 132 133 134 135 136 137

	return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_done);

int crypto_hash_walk_first(struct ahash_request *req,
			   struct crypto_hash_walk *walk)
{
	walk->total = req->nbytes;

138 139
	if (!walk->total) {
		walk->entrylen = 0;
140
		return 0;
141
	}
142 143 144

	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
	walk->sg = req->src;
145
	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
146 147 148 149 150

	return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);

151 152 153 154 155
int crypto_ahash_walk_first(struct ahash_request *req,
			    struct crypto_hash_walk *walk)
{
	walk->total = req->nbytes;

156 157
	if (!walk->total) {
		walk->entrylen = 0;
158
		return 0;
159
	}
160 161 162 163 164 165 166 167 168 169 170 171

	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
	walk->sg = req->src;
	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
	walk->flags |= CRYPTO_ALG_ASYNC;

	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);

	return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);

172 173 174 175 176 177 178 179 180
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
				unsigned int keylen)
{
	unsigned long alignmask = crypto_ahash_alignmask(tfm);
	int ret;
	u8 *buffer, *alignbuffer;
	unsigned long absize;

	absize = keylen + alignmask;
181
	buffer = kmalloc(absize, GFP_KERNEL);
182 183 184 185 186
	if (!buffer)
		return -ENOMEM;

	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
	memcpy(alignbuffer, key, keylen);
H
Herbert Xu 已提交
187
	ret = tfm->setkey(tfm, alignbuffer, keylen);
188
	kzfree(buffer);
189 190 191
	return ret;
}

192
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
193 194 195
			unsigned int keylen)
{
	unsigned long alignmask = crypto_ahash_alignmask(tfm);
196
	int err;
197 198

	if ((unsigned long)key & alignmask)
199 200 201 202 203 204
		err = ahash_setkey_unaligned(tfm, key, keylen);
	else
		err = tfm->setkey(tfm, key, keylen);

	if (err)
		return err;
205

206 207
	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
	return 0;
208
}
209
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
210

H
Herbert Xu 已提交
211 212 213 214 215 216
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
			  unsigned int keylen)
{
	return -ENOSYS;
}

217 218 219 220 221 222
static inline unsigned int ahash_align_buffer_size(unsigned len,
						   unsigned long mask)
{
	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
}

223
static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
224 225 226 227 228 229 230 231
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	unsigned long alignmask = crypto_ahash_alignmask(tfm);
	unsigned int ds = crypto_ahash_digestsize(tfm);
	struct ahash_request_priv *priv;

	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
232
		       GFP_KERNEL : GFP_ATOMIC);
233 234 235
	if (!priv)
		return -ENOMEM;

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
	/*
	 * WARNING: Voodoo programming below!
	 *
	 * The code below is obscure and hard to understand, thus explanation
	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
	 * to understand the layout of structures used here!
	 *
	 * The code here will replace portions of the ORIGINAL request with
	 * pointers to new code and buffers so the hashing operation can store
	 * the result in aligned buffer. We will call the modified request
	 * an ADJUSTED request.
	 *
	 * The newly mangled request will look as such:
	 *
	 * req {
	 *   .result        = ADJUSTED[new aligned buffer]
	 *   .base.complete = ADJUSTED[pointer to completion function]
	 *   .base.data     = ADJUSTED[*req (pointer to self)]
	 *   .priv          = ADJUSTED[new priv] {
	 *           .result   = ORIGINAL(result)
	 *           .complete = ORIGINAL(base.complete)
	 *           .data     = ORIGINAL(base.data)
	 *   }
	 */

261 262 263
	priv->result = req->result;
	priv->complete = req->base.complete;
	priv->data = req->base.data;
264 265
	priv->flags = req->base.flags;

266 267 268 269 270
	/*
	 * WARNING: We do not backup req->priv here! The req->priv
	 *          is for internal use of the Crypto API and the
	 *          user must _NOT_ _EVER_ depend on it's content!
	 */
271 272

	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
273
	req->base.complete = cplt;
274 275 276
	req->base.data = req;
	req->priv = priv;

277 278 279
	return 0;
}

280
static void ahash_restore_req(struct ahash_request *req, int err)
281 282 283
{
	struct ahash_request_priv *priv = req->priv;

284 285 286 287
	if (!err)
		memcpy(priv->result, req->result,
		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));

288 289
	/* Restore the original crypto request. */
	req->result = priv->result;
290 291 292

	ahash_request_set_callback(req, priv->flags,
				   priv->complete, priv->data);
293 294 295 296 297 298
	req->priv = NULL;

	/* Free the req->priv.priv from the ADJUSTED request. */
	kzfree(priv);
}

299
static void ahash_notify_einprogress(struct ahash_request *req)
300 301
{
	struct ahash_request_priv *priv = req->priv;
302
	struct crypto_async_request oreq;
303

304
	oreq.data = priv->data;
305

306
	priv->complete(&oreq, -EINPROGRESS);
307 308 309 310 311 312
}

static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
{
	struct ahash_request *areq = req->data;

313 314 315 316 317
	if (err == -EINPROGRESS) {
		ahash_notify_einprogress(areq);
		return;
	}

318 319 320 321 322 323 324 325 326 327
	/*
	 * Restore the original request, see ahash_op_unaligned() for what
	 * goes where.
	 *
	 * The "struct ahash_request *req" here is in fact the "req.base"
	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
	 * is a pointer to self, it is also the ADJUSTED "req" .
	 */

	/* First copy req->result into req->priv.result */
328
	ahash_restore_req(areq, err);
329 330 331 332 333 334 335 336 337 338 339 340 341 342

	/* Complete the ORIGINAL request. */
	areq->base.complete(&areq->base, err);
}

static int ahash_op_unaligned(struct ahash_request *req,
			      int (*op)(struct ahash_request *))
{
	int err;

	err = ahash_save_req(req, ahash_op_unaligned_done);
	if (err)
		return err;

343
	err = op(req);
344
	if (err == -EINPROGRESS || err == -EBUSY)
345 346 347
		return err;

	ahash_restore_req(req, err);
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377

	return err;
}

static int crypto_ahash_op(struct ahash_request *req,
			   int (*op)(struct ahash_request *))
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	unsigned long alignmask = crypto_ahash_alignmask(tfm);

	if ((unsigned long)req->result & alignmask)
		return ahash_op_unaligned(req, op);

	return op(req);
}

int crypto_ahash_final(struct ahash_request *req)
{
	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);

int crypto_ahash_finup(struct ahash_request *req)
{
	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);

int crypto_ahash_digest(struct ahash_request *req)
{
378 379 380 381 382 383
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);

	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
		return -ENOKEY;

	return crypto_ahash_op(req, tfm->digest);
384 385 386
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);

387
static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
388
{
389
	struct ahash_request *areq = req->data;
390 391 392 393

	if (err == -EINPROGRESS)
		return;

394
	ahash_restore_req(areq, err);
395

396
	areq->base.complete(&areq->base, err);
397 398 399 400 401 402 403 404
}

static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
	if (err)
		goto out;

	req->base.complete = ahash_def_finup_done2;
405

406
	err = crypto_ahash_reqtfm(req)->final(req);
407
	if (err == -EINPROGRESS || err == -EBUSY)
408
		return err;
409 410

out:
411
	ahash_restore_req(req, err);
412 413 414 415 416 417 418
	return err;
}

static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
{
	struct ahash_request *areq = req->data;

419 420 421 422 423 424 425
	if (err == -EINPROGRESS) {
		ahash_notify_einprogress(areq);
		return;
	}

	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

426
	err = ahash_def_finup_finish1(areq, err);
427 428
	if (areq->priv)
		return;
429

430
	areq->base.complete(&areq->base, err);
431 432 433 434 435
}

static int ahash_def_finup(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
436
	int err;
437

438 439 440
	err = ahash_save_req(req, ahash_def_finup_done1);
	if (err)
		return err;
441

442
	err = tfm->update(req);
443
	if (err == -EINPROGRESS || err == -EBUSY)
444 445
		return err;

446
	return ahash_def_finup_finish1(req, err);
447 448
}

449 450 451 452 453
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
	struct ahash_alg *alg = crypto_ahash_alg(hash);

454 455
	hash->setkey = ahash_nosetkey;

456 457 458 459 460
	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
		return crypto_init_shash_ops_async(tfm);

	hash->init = alg->init;
	hash->update = alg->update;
461 462
	hash->final = alg->final;
	hash->finup = alg->finup ?: ahash_def_finup;
463
	hash->digest = alg->digest;
464 465
	hash->export = alg->export;
	hash->import = alg->import;
466

467
	if (alg->setkey) {
468
		hash->setkey = alg->setkey;
469 470
		if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
			crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
471
	}
472 473 474 475 476 477

	return 0;
}

static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
{
478 479
	if (alg->cra_type != &crypto_ahash_type)
		return sizeof(struct crypto_shash *);
480

481
	return crypto_alg_extsize(alg);
482 483
}

484
#ifdef CONFIG_NET
485 486 487 488
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
	struct crypto_report_hash rhash;

489
	strncpy(rhash.type, "ahash", sizeof(rhash.type));
490 491 492 493

	rhash.blocksize = alg->cra_blocksize;
	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;

D
David S. Miller 已提交
494 495 496
	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
		    sizeof(struct crypto_report_hash), &rhash))
		goto nla_put_failure;
497 498 499 500 501
	return 0;

nla_put_failure:
	return -EMSGSIZE;
}
502 503 504 505 506 507
#else
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
	return -ENOSYS;
}
#endif
508

509
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
510
	__maybe_unused;
511 512 513 514 515 516
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
{
	seq_printf(m, "type         : ahash\n");
	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
					     "yes" : "no");
	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
517 518
	seq_printf(m, "digestsize   : %u\n",
		   __crypto_hash_alg_common(alg)->digestsize);
519 520 521
}

const struct crypto_type crypto_ahash_type = {
522 523
	.extsize = crypto_ahash_extsize,
	.init_tfm = crypto_ahash_init_tfm,
524 525 526
#ifdef CONFIG_PROC_FS
	.show = crypto_ahash_show,
#endif
527
	.report = crypto_ahash_report,
528 529 530 531
	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
	.type = CRYPTO_ALG_TYPE_AHASH,
	.tfmsize = offsetof(struct crypto_ahash, base),
532 533 534
};
EXPORT_SYMBOL_GPL(crypto_ahash_type);

535 536 537 538 539 540 541
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
					u32 mask)
{
	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ahash);

542 543 544 545 546 547
int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
{
	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);

548 549 550 551 552
static int ahash_prepare_alg(struct ahash_alg *alg)
{
	struct crypto_alg *base = &alg->halg.base;

	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
553 554
	    alg->halg.statesize > PAGE_SIZE / 8 ||
	    alg->halg.statesize == 0)
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
		return -EINVAL;

	base->cra_type = &crypto_ahash_type;
	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;

	return 0;
}

int crypto_register_ahash(struct ahash_alg *alg)
{
	struct crypto_alg *base = &alg->halg.base;
	int err;

	err = ahash_prepare_alg(alg);
	if (err)
		return err;

	return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_ahash);

int crypto_unregister_ahash(struct ahash_alg *alg)
{
	return crypto_unregister_alg(&alg->halg.base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);

583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
int crypto_register_ahashes(struct ahash_alg *algs, int count)
{
	int i, ret;

	for (i = 0; i < count; i++) {
		ret = crypto_register_ahash(&algs[i]);
		if (ret)
			goto err;
	}

	return 0;

err:
	for (--i; i >= 0; --i)
		crypto_unregister_ahash(&algs[i]);

	return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_ahashes);

void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
{
	int i;

	for (i = count - 1; i >= 0; --i)
		crypto_unregister_ahash(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);

612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
int ahash_register_instance(struct crypto_template *tmpl,
			    struct ahash_instance *inst)
{
	int err;

	err = ahash_prepare_alg(&inst->alg);
	if (err)
		return err;

	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_register_instance);

void ahash_free_instance(struct crypto_instance *inst)
{
	crypto_drop_spawn(crypto_instance_ctx(inst));
	kfree(ahash_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_free_instance);

int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
			    struct hash_alg_common *alg,
			    struct crypto_instance *inst)
{
	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
				  &crypto_ahash_type);
}
EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);

struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
{
	struct crypto_alg *alg;

	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
}
EXPORT_SYMBOL_GPL(ahash_attr_alg);

650 651 652 653 654 655 656 657 658 659 660
bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
{
	struct crypto_alg *alg = &halg->base;

	if (alg->cra_type != &crypto_ahash_type)
		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));

	return __crypto_ahash_alg(alg)->setkey != NULL;
}
EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);

661 662
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");