ahash.c 15.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Asynchronous Cryptographic Hash operations.
 *
 * This is the asynchronous version of hash.c with notification of
 * completion via a callback.
 *
 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */

16 17
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
18
#include <linux/bug.h>
19 20 21 22 23 24
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
25
#include <linux/cryptouser.h>
26
#include <linux/compiler.h>
27
#include <net/netlink.h>
28 29 30

#include "internal.h"

31 32 33 34
struct ahash_request_priv {
	crypto_completion_t complete;
	void *data;
	u8 *result;
35
	u32 flags;
36 37 38
	void *ubuf[] CRYPTO_MINALIGN_ATTR;
};

39 40 41 42 43 44
static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
{
	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
			    halg);
}

45 46 47 48 49 50 51
static int hash_walk_next(struct crypto_hash_walk *walk)
{
	unsigned int alignmask = walk->alignmask;
	unsigned int offset = walk->offset;
	unsigned int nbytes = min(walk->entrylen,
				  ((unsigned int)(PAGE_SIZE)) - offset);

52 53 54 55
	if (walk->flags & CRYPTO_ALG_ASYNC)
		walk->data = kmap(walk->pg);
	else
		walk->data = kmap_atomic(walk->pg);
56 57
	walk->data += offset;

58 59
	if (offset & alignmask) {
		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
60

61 62 63
		if (nbytes > unaligned)
			nbytes = unaligned;
	}
64 65 66 67 68 69 70 71 72 73 74

	walk->entrylen -= nbytes;
	return nbytes;
}

static int hash_walk_new_entry(struct crypto_hash_walk *walk)
{
	struct scatterlist *sg;

	sg = walk->sg;
	walk->offset = sg->offset;
75 76
	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
	walk->offset = offset_in_page(walk->offset);
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
	walk->entrylen = sg->length;

	if (walk->entrylen > walk->total)
		walk->entrylen = walk->total;
	walk->total -= walk->entrylen;

	return hash_walk_next(walk);
}

int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
	unsigned int alignmask = walk->alignmask;
	unsigned int nbytes = walk->entrylen;

	walk->data -= walk->offset;

	if (nbytes && walk->offset & alignmask && !err) {
		walk->offset = ALIGN(walk->offset, alignmask + 1);
		walk->data += walk->offset;

		nbytes = min(nbytes,
			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
		walk->entrylen -= nbytes;

		return nbytes;
	}

104 105 106 107 108 109 110 111 112 113
	if (walk->flags & CRYPTO_ALG_ASYNC)
		kunmap(walk->pg);
	else {
		kunmap_atomic(walk->data);
		/*
		 * The may sleep test only makes sense for sync users.
		 * Async users don't need to sleep here anyway.
		 */
		crypto_yield(walk->flags);
	}
114 115 116 117

	if (err)
		return err;

118 119 120
	if (nbytes) {
		walk->offset = 0;
		walk->pg++;
121
		return hash_walk_next(walk);
122
	}
123 124 125 126

	if (!walk->total)
		return 0;

127
	walk->sg = sg_next(walk->sg);
128 129 130 131 132 133 134 135 136 137

	return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_done);

int crypto_hash_walk_first(struct ahash_request *req,
			   struct crypto_hash_walk *walk)
{
	walk->total = req->nbytes;

138 139
	if (!walk->total) {
		walk->entrylen = 0;
140
		return 0;
141
	}
142 143 144

	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
	walk->sg = req->src;
145
	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
146 147 148 149 150

	return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);

151 152 153 154 155
int crypto_ahash_walk_first(struct ahash_request *req,
			    struct crypto_hash_walk *walk)
{
	walk->total = req->nbytes;

156 157
	if (!walk->total) {
		walk->entrylen = 0;
158
		return 0;
159
	}
160 161 162 163 164 165 166 167 168 169 170 171

	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
	walk->sg = req->src;
	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
	walk->flags |= CRYPTO_ALG_ASYNC;

	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);

	return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);

172 173 174 175 176 177 178 179 180
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
				unsigned int keylen)
{
	unsigned long alignmask = crypto_ahash_alignmask(tfm);
	int ret;
	u8 *buffer, *alignbuffer;
	unsigned long absize;

	absize = keylen + alignmask;
181
	buffer = kmalloc(absize, GFP_KERNEL);
182 183 184 185 186
	if (!buffer)
		return -ENOMEM;

	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
	memcpy(alignbuffer, key, keylen);
H
Herbert Xu 已提交
187
	ret = tfm->setkey(tfm, alignbuffer, keylen);
188
	kzfree(buffer);
189 190 191
	return ret;
}

192
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
193 194 195 196 197 198 199
			unsigned int keylen)
{
	unsigned long alignmask = crypto_ahash_alignmask(tfm);

	if ((unsigned long)key & alignmask)
		return ahash_setkey_unaligned(tfm, key, keylen);

H
Herbert Xu 已提交
200
	return tfm->setkey(tfm, key, keylen);
201
}
202
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
203

H
Herbert Xu 已提交
204 205 206 207 208 209
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
			  unsigned int keylen)
{
	return -ENOSYS;
}

210 211 212 213 214 215
static inline unsigned int ahash_align_buffer_size(unsigned len,
						   unsigned long mask)
{
	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
}

216
static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
217 218 219 220 221 222 223 224
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	unsigned long alignmask = crypto_ahash_alignmask(tfm);
	unsigned int ds = crypto_ahash_digestsize(tfm);
	struct ahash_request_priv *priv;

	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
225
		       GFP_KERNEL : GFP_ATOMIC);
226 227 228
	if (!priv)
		return -ENOMEM;

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
	/*
	 * WARNING: Voodoo programming below!
	 *
	 * The code below is obscure and hard to understand, thus explanation
	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
	 * to understand the layout of structures used here!
	 *
	 * The code here will replace portions of the ORIGINAL request with
	 * pointers to new code and buffers so the hashing operation can store
	 * the result in aligned buffer. We will call the modified request
	 * an ADJUSTED request.
	 *
	 * The newly mangled request will look as such:
	 *
	 * req {
	 *   .result        = ADJUSTED[new aligned buffer]
	 *   .base.complete = ADJUSTED[pointer to completion function]
	 *   .base.data     = ADJUSTED[*req (pointer to self)]
	 *   .priv          = ADJUSTED[new priv] {
	 *           .result   = ORIGINAL(result)
	 *           .complete = ORIGINAL(base.complete)
	 *           .data     = ORIGINAL(base.data)
	 *   }
	 */

254 255 256
	priv->result = req->result;
	priv->complete = req->base.complete;
	priv->data = req->base.data;
257 258
	priv->flags = req->base.flags;

259 260 261 262 263
	/*
	 * WARNING: We do not backup req->priv here! The req->priv
	 *          is for internal use of the Crypto API and the
	 *          user must _NOT_ _EVER_ depend on it's content!
	 */
264 265

	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
266
	req->base.complete = cplt;
267 268 269
	req->base.data = req;
	req->priv = priv;

270 271 272
	return 0;
}

273
static void ahash_restore_req(struct ahash_request *req, int err)
274 275 276
{
	struct ahash_request_priv *priv = req->priv;

277 278 279 280
	if (!err)
		memcpy(priv->result, req->result,
		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));

281 282
	/* Restore the original crypto request. */
	req->result = priv->result;
283 284 285

	ahash_request_set_callback(req, priv->flags,
				   priv->complete, priv->data);
286 287 288 289 290 291
	req->priv = NULL;

	/* Free the req->priv.priv from the ADJUSTED request. */
	kzfree(priv);
}

292
static void ahash_notify_einprogress(struct ahash_request *req)
293 294
{
	struct ahash_request_priv *priv = req->priv;
295
	struct crypto_async_request oreq;
296

297
	oreq.data = priv->data;
298

299
	priv->complete(&oreq, -EINPROGRESS);
300 301 302 303 304 305
}

static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
{
	struct ahash_request *areq = req->data;

306 307 308 309 310
	if (err == -EINPROGRESS) {
		ahash_notify_einprogress(areq);
		return;
	}

311 312 313 314 315 316 317 318 319 320
	/*
	 * Restore the original request, see ahash_op_unaligned() for what
	 * goes where.
	 *
	 * The "struct ahash_request *req" here is in fact the "req.base"
	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
	 * is a pointer to self, it is also the ADJUSTED "req" .
	 */

	/* First copy req->result into req->priv.result */
321
	ahash_restore_req(areq, err);
322 323 324 325 326 327 328 329 330 331 332 333 334 335

	/* Complete the ORIGINAL request. */
	areq->base.complete(&areq->base, err);
}

static int ahash_op_unaligned(struct ahash_request *req,
			      int (*op)(struct ahash_request *))
{
	int err;

	err = ahash_save_req(req, ahash_op_unaligned_done);
	if (err)
		return err;

336
	err = op(req);
337 338 339 340 341 342
	if (err == -EINPROGRESS ||
	    (err == -EBUSY && (ahash_request_flags(req) &
			       CRYPTO_TFM_REQ_MAY_BACKLOG)))
		return err;

	ahash_restore_req(req, err);
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376

	return err;
}

static int crypto_ahash_op(struct ahash_request *req,
			   int (*op)(struct ahash_request *))
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	unsigned long alignmask = crypto_ahash_alignmask(tfm);

	if ((unsigned long)req->result & alignmask)
		return ahash_op_unaligned(req, op);

	return op(req);
}

int crypto_ahash_final(struct ahash_request *req)
{
	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);

int crypto_ahash_finup(struct ahash_request *req)
{
	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);

int crypto_ahash_digest(struct ahash_request *req)
{
	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);

377
static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
378
{
379
	struct ahash_request *areq = req->data;
380 381 382 383

	if (err == -EINPROGRESS)
		return;

384
	ahash_restore_req(areq, err);
385

386
	areq->base.complete(&areq->base, err);
387 388 389 390 391 392 393 394
}

static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
	if (err)
		goto out;

	req->base.complete = ahash_def_finup_done2;
395

396
	err = crypto_ahash_reqtfm(req)->final(req);
397 398 399 400
	if (err == -EINPROGRESS ||
	    (err == -EBUSY && (ahash_request_flags(req) &
			       CRYPTO_TFM_REQ_MAY_BACKLOG)))
		return err;
401 402

out:
403
	ahash_restore_req(req, err);
404 405 406 407 408 409 410
	return err;
}

static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
{
	struct ahash_request *areq = req->data;

411 412 413 414 415 416 417
	if (err == -EINPROGRESS) {
		ahash_notify_einprogress(areq);
		return;
	}

	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

418
	err = ahash_def_finup_finish1(areq, err);
419 420
	if (areq->priv)
		return;
421

422
	areq->base.complete(&areq->base, err);
423 424 425 426 427
}

static int ahash_def_finup(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
428
	int err;
429

430 431 432
	err = ahash_save_req(req, ahash_def_finup_done1);
	if (err)
		return err;
433

434
	err = tfm->update(req);
435 436 437 438 439
	if (err == -EINPROGRESS ||
	    (err == -EBUSY && (ahash_request_flags(req) &
			       CRYPTO_TFM_REQ_MAY_BACKLOG)))
		return err;

440
	return ahash_def_finup_finish1(req, err);
441 442 443 444 445 446 447 448 449 450 451 452
}

static int ahash_no_export(struct ahash_request *req, void *out)
{
	return -ENOSYS;
}

static int ahash_no_import(struct ahash_request *req, const void *in)
{
	return -ENOSYS;
}

453 454 455 456 457
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
	struct ahash_alg *alg = crypto_ahash_alg(hash);

458
	hash->setkey = ahash_nosetkey;
459
	hash->has_setkey = false;
460 461 462
	hash->export = ahash_no_export;
	hash->import = ahash_no_import;

463 464 465 466 467
	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
		return crypto_init_shash_ops_async(tfm);

	hash->init = alg->init;
	hash->update = alg->update;
468 469
	hash->final = alg->final;
	hash->finup = alg->finup ?: ahash_def_finup;
470
	hash->digest = alg->digest;
471

472
	if (alg->setkey) {
473
		hash->setkey = alg->setkey;
474 475
		hash->has_setkey = true;
	}
476 477 478 479
	if (alg->export)
		hash->export = alg->export;
	if (alg->import)
		hash->import = alg->import;
480 481 482 483 484 485

	return 0;
}

static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
{
486 487
	if (alg->cra_type != &crypto_ahash_type)
		return sizeof(struct crypto_shash *);
488

489
	return crypto_alg_extsize(alg);
490 491
}

492
#ifdef CONFIG_NET
493 494 495 496
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
	struct crypto_report_hash rhash;

497
	strncpy(rhash.type, "ahash", sizeof(rhash.type));
498 499 500 501

	rhash.blocksize = alg->cra_blocksize;
	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;

D
David S. Miller 已提交
502 503 504
	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
		    sizeof(struct crypto_report_hash), &rhash))
		goto nla_put_failure;
505 506 507 508 509
	return 0;

nla_put_failure:
	return -EMSGSIZE;
}
510 511 512 513 514 515
#else
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
	return -ENOSYS;
}
#endif
516

517
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
518
	__maybe_unused;
519 520 521 522 523 524
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
{
	seq_printf(m, "type         : ahash\n");
	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
					     "yes" : "no");
	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
525 526
	seq_printf(m, "digestsize   : %u\n",
		   __crypto_hash_alg_common(alg)->digestsize);
527 528 529
}

const struct crypto_type crypto_ahash_type = {
530 531
	.extsize = crypto_ahash_extsize,
	.init_tfm = crypto_ahash_init_tfm,
532 533 534
#ifdef CONFIG_PROC_FS
	.show = crypto_ahash_show,
#endif
535
	.report = crypto_ahash_report,
536 537 538 539
	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
	.type = CRYPTO_ALG_TYPE_AHASH,
	.tfmsize = offsetof(struct crypto_ahash, base),
540 541 542
};
EXPORT_SYMBOL_GPL(crypto_ahash_type);

543 544 545 546 547 548 549
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
					u32 mask)
{
	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ahash);

550 551 552 553 554 555
int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
{
	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);

556 557 558 559 560
static int ahash_prepare_alg(struct ahash_alg *alg)
{
	struct crypto_alg *base = &alg->halg.base;

	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
561 562
	    alg->halg.statesize > PAGE_SIZE / 8 ||
	    alg->halg.statesize == 0)
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
		return -EINVAL;

	base->cra_type = &crypto_ahash_type;
	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;

	return 0;
}

int crypto_register_ahash(struct ahash_alg *alg)
{
	struct crypto_alg *base = &alg->halg.base;
	int err;

	err = ahash_prepare_alg(alg);
	if (err)
		return err;

	return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_ahash);

int crypto_unregister_ahash(struct ahash_alg *alg)
{
	return crypto_unregister_alg(&alg->halg.base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);

int ahash_register_instance(struct crypto_template *tmpl,
			    struct ahash_instance *inst)
{
	int err;

	err = ahash_prepare_alg(&inst->alg);
	if (err)
		return err;

	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_register_instance);

void ahash_free_instance(struct crypto_instance *inst)
{
	crypto_drop_spawn(crypto_instance_ctx(inst));
	kfree(ahash_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_free_instance);

int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
			    struct hash_alg_common *alg,
			    struct crypto_instance *inst)
{
	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
				  &crypto_ahash_type);
}
EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);

struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
{
	struct crypto_alg *alg;

	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
}
EXPORT_SYMBOL_GPL(ahash_attr_alg);

629 630
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");