ahash.c 14.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Asynchronous Cryptographic Hash operations.
 *
 * This is the asynchronous version of hash.c with notification of
 * completion via a callback.
 *
 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */

16 17
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
18
#include <linux/bug.h>
19 20 21 22 23 24
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
25
#include <linux/cryptouser.h>
26
#include <linux/compiler.h>
27
#include <net/netlink.h>
28 29 30

#include "internal.h"

31 32 33 34 35 36 37
struct ahash_request_priv {
	crypto_completion_t complete;
	void *data;
	u8 *result;
	void *ubuf[] CRYPTO_MINALIGN_ATTR;
};

38 39 40 41 42 43
static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
{
	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
			    halg);
}

44 45 46 47 48 49 50
static int hash_walk_next(struct crypto_hash_walk *walk)
{
	unsigned int alignmask = walk->alignmask;
	unsigned int offset = walk->offset;
	unsigned int nbytes = min(walk->entrylen,
				  ((unsigned int)(PAGE_SIZE)) - offset);

51 52 53 54
	if (walk->flags & CRYPTO_ALG_ASYNC)
		walk->data = kmap(walk->pg);
	else
		walk->data = kmap_atomic(walk->pg);
55 56
	walk->data += offset;

57 58
	if (offset & alignmask) {
		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
59

60 61 62
		if (nbytes > unaligned)
			nbytes = unaligned;
	}
63 64 65 66 67 68 69 70 71 72 73

	walk->entrylen -= nbytes;
	return nbytes;
}

static int hash_walk_new_entry(struct crypto_hash_walk *walk)
{
	struct scatterlist *sg;

	sg = walk->sg;
	walk->offset = sg->offset;
74 75
	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
	walk->offset = offset_in_page(walk->offset);
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
	walk->entrylen = sg->length;

	if (walk->entrylen > walk->total)
		walk->entrylen = walk->total;
	walk->total -= walk->entrylen;

	return hash_walk_next(walk);
}

int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
	unsigned int alignmask = walk->alignmask;
	unsigned int nbytes = walk->entrylen;

	walk->data -= walk->offset;

	if (nbytes && walk->offset & alignmask && !err) {
		walk->offset = ALIGN(walk->offset, alignmask + 1);
		walk->data += walk->offset;

		nbytes = min(nbytes,
			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
		walk->entrylen -= nbytes;

		return nbytes;
	}

103 104 105 106 107 108 109 110 111 112
	if (walk->flags & CRYPTO_ALG_ASYNC)
		kunmap(walk->pg);
	else {
		kunmap_atomic(walk->data);
		/*
		 * The may sleep test only makes sense for sync users.
		 * Async users don't need to sleep here anyway.
		 */
		crypto_yield(walk->flags);
	}
113 114 115 116

	if (err)
		return err;

117 118 119
	if (nbytes) {
		walk->offset = 0;
		walk->pg++;
120
		return hash_walk_next(walk);
121
	}
122 123 124 125

	if (!walk->total)
		return 0;

126
	walk->sg = sg_next(walk->sg);
127 128 129 130 131 132 133 134 135 136

	return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_done);

int crypto_hash_walk_first(struct ahash_request *req,
			   struct crypto_hash_walk *walk)
{
	walk->total = req->nbytes;

137 138
	if (!walk->total) {
		walk->entrylen = 0;
139
		return 0;
140
	}
141 142 143

	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
	walk->sg = req->src;
144
	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
145 146 147 148 149

	return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);

150 151 152 153 154
int crypto_ahash_walk_first(struct ahash_request *req,
			    struct crypto_hash_walk *walk)
{
	walk->total = req->nbytes;

155 156
	if (!walk->total) {
		walk->entrylen = 0;
157
		return 0;
158
	}
159 160 161 162 163 164 165 166 167 168 169 170

	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
	walk->sg = req->src;
	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
	walk->flags |= CRYPTO_ALG_ASYNC;

	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);

	return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);

171 172 173 174 175 176 177 178 179
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
				unsigned int keylen)
{
	unsigned long alignmask = crypto_ahash_alignmask(tfm);
	int ret;
	u8 *buffer, *alignbuffer;
	unsigned long absize;

	absize = keylen + alignmask;
180
	buffer = kmalloc(absize, GFP_KERNEL);
181 182 183 184 185
	if (!buffer)
		return -ENOMEM;

	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
	memcpy(alignbuffer, key, keylen);
H
Herbert Xu 已提交
186
	ret = tfm->setkey(tfm, alignbuffer, keylen);
187
	kzfree(buffer);
188 189 190
	return ret;
}

191
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
192 193 194 195 196 197 198
			unsigned int keylen)
{
	unsigned long alignmask = crypto_ahash_alignmask(tfm);

	if ((unsigned long)key & alignmask)
		return ahash_setkey_unaligned(tfm, key, keylen);

H
Herbert Xu 已提交
199
	return tfm->setkey(tfm, key, keylen);
200
}
201
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
202

H
Herbert Xu 已提交
203 204 205 206 207 208
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
			  unsigned int keylen)
{
	return -ENOSYS;
}

209 210 211 212 213 214
static inline unsigned int ahash_align_buffer_size(unsigned len,
						   unsigned long mask)
{
	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
}

215
static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
216 217 218 219 220 221 222 223
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	unsigned long alignmask = crypto_ahash_alignmask(tfm);
	unsigned int ds = crypto_ahash_digestsize(tfm);
	struct ahash_request_priv *priv;

	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
224
		       GFP_KERNEL : GFP_ATOMIC);
225 226 227
	if (!priv)
		return -ENOMEM;

228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
	/*
	 * WARNING: Voodoo programming below!
	 *
	 * The code below is obscure and hard to understand, thus explanation
	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
	 * to understand the layout of structures used here!
	 *
	 * The code here will replace portions of the ORIGINAL request with
	 * pointers to new code and buffers so the hashing operation can store
	 * the result in aligned buffer. We will call the modified request
	 * an ADJUSTED request.
	 *
	 * The newly mangled request will look as such:
	 *
	 * req {
	 *   .result        = ADJUSTED[new aligned buffer]
	 *   .base.complete = ADJUSTED[pointer to completion function]
	 *   .base.data     = ADJUSTED[*req (pointer to self)]
	 *   .priv          = ADJUSTED[new priv] {
	 *           .result   = ORIGINAL(result)
	 *           .complete = ORIGINAL(base.complete)
	 *           .data     = ORIGINAL(base.data)
	 *   }
	 */

253 254 255
	priv->result = req->result;
	priv->complete = req->base.complete;
	priv->data = req->base.data;
256 257 258 259 260
	/*
	 * WARNING: We do not backup req->priv here! The req->priv
	 *          is for internal use of the Crypto API and the
	 *          user must _NOT_ _EVER_ depend on it's content!
	 */
261 262

	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
263
	req->base.complete = cplt;
264 265 266
	req->base.data = req;
	req->priv = priv;

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	return 0;
}

static void ahash_restore_req(struct ahash_request *req)
{
	struct ahash_request_priv *priv = req->priv;

	/* Restore the original crypto request. */
	req->result = priv->result;
	req->base.complete = priv->complete;
	req->base.data = priv->data;
	req->priv = NULL;

	/* Free the req->priv.priv from the ADJUSTED request. */
	kzfree(priv);
}

static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
{
	struct ahash_request_priv *priv = req->priv;

	if (err == -EINPROGRESS)
		return;

	if (!err)
		memcpy(priv->result, req->result,
		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));

	ahash_restore_req(req);
}

static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
{
	struct ahash_request *areq = req->data;

	/*
	 * Restore the original request, see ahash_op_unaligned() for what
	 * goes where.
	 *
	 * The "struct ahash_request *req" here is in fact the "req.base"
	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
	 * is a pointer to self, it is also the ADJUSTED "req" .
	 */

	/* First copy req->result into req->priv.result */
	ahash_op_unaligned_finish(areq, err);

	/* Complete the ORIGINAL request. */
	areq->base.complete(&areq->base, err);
}

static int ahash_op_unaligned(struct ahash_request *req,
			      int (*op)(struct ahash_request *))
{
	int err;

	err = ahash_save_req(req, ahash_op_unaligned_done);
	if (err)
		return err;

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
	err = op(req);
	ahash_op_unaligned_finish(req, err);

	return err;
}

static int crypto_ahash_op(struct ahash_request *req,
			   int (*op)(struct ahash_request *))
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	unsigned long alignmask = crypto_ahash_alignmask(tfm);

	if ((unsigned long)req->result & alignmask)
		return ahash_op_unaligned(req, op);

	return op(req);
}

int crypto_ahash_final(struct ahash_request *req)
{
	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);

int crypto_ahash_finup(struct ahash_request *req)
{
	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);

int crypto_ahash_digest(struct ahash_request *req)
{
	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);

static void ahash_def_finup_finish2(struct ahash_request *req, int err)
{
	struct ahash_request_priv *priv = req->priv;

	if (err == -EINPROGRESS)
		return;

	if (!err)
		memcpy(priv->result, req->result,
		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));

374
	ahash_restore_req(req);
375 376 377 378 379 380 381 382
}

static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
{
	struct ahash_request *areq = req->data;

	ahash_def_finup_finish2(areq, err);

383
	areq->base.complete(&areq->base, err);
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
}

static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
	if (err)
		goto out;

	req->base.complete = ahash_def_finup_done2;
	req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_ahash_reqtfm(req)->final(req);

out:
	ahash_def_finup_finish2(req, err);
	return err;
}

static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
{
	struct ahash_request *areq = req->data;

	err = ahash_def_finup_finish1(areq, err);

406
	areq->base.complete(&areq->base, err);
407 408 409 410 411
}

static int ahash_def_finup(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
412
	int err;
413

414 415 416
	err = ahash_save_req(req, ahash_def_finup_done1);
	if (err)
		return err;
417

418 419
	err = tfm->update(req);
	return ahash_def_finup_finish1(req, err);
420 421 422 423 424 425 426 427 428 429 430 431
}

static int ahash_no_export(struct ahash_request *req, void *out)
{
	return -ENOSYS;
}

static int ahash_no_import(struct ahash_request *req, const void *in)
{
	return -ENOSYS;
}

432 433 434 435 436
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
	struct ahash_alg *alg = crypto_ahash_alg(hash);

437
	hash->setkey = ahash_nosetkey;
438
	hash->has_setkey = false;
439 440 441
	hash->export = ahash_no_export;
	hash->import = ahash_no_import;

442 443 444 445 446
	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
		return crypto_init_shash_ops_async(tfm);

	hash->init = alg->init;
	hash->update = alg->update;
447 448
	hash->final = alg->final;
	hash->finup = alg->finup ?: ahash_def_finup;
449
	hash->digest = alg->digest;
450

451
	if (alg->setkey) {
452
		hash->setkey = alg->setkey;
453 454
		hash->has_setkey = true;
	}
455 456 457 458
	if (alg->export)
		hash->export = alg->export;
	if (alg->import)
		hash->import = alg->import;
459 460 461 462 463 464

	return 0;
}

static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
{
465 466
	if (alg->cra_type != &crypto_ahash_type)
		return sizeof(struct crypto_shash *);
467

468
	return crypto_alg_extsize(alg);
469 470
}

471
#ifdef CONFIG_NET
472 473 474 475
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
	struct crypto_report_hash rhash;

476
	strncpy(rhash.type, "ahash", sizeof(rhash.type));
477 478 479 480

	rhash.blocksize = alg->cra_blocksize;
	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;

D
David S. Miller 已提交
481 482 483
	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
		    sizeof(struct crypto_report_hash), &rhash))
		goto nla_put_failure;
484 485 486 487 488
	return 0;

nla_put_failure:
	return -EMSGSIZE;
}
489 490 491 492 493 494
#else
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
	return -ENOSYS;
}
#endif
495

496
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
497
	__maybe_unused;
498 499 500 501 502 503
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
{
	seq_printf(m, "type         : ahash\n");
	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
					     "yes" : "no");
	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
504 505
	seq_printf(m, "digestsize   : %u\n",
		   __crypto_hash_alg_common(alg)->digestsize);
506 507 508
}

const struct crypto_type crypto_ahash_type = {
509 510
	.extsize = crypto_ahash_extsize,
	.init_tfm = crypto_ahash_init_tfm,
511 512 513
#ifdef CONFIG_PROC_FS
	.show = crypto_ahash_show,
#endif
514
	.report = crypto_ahash_report,
515 516 517 518
	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
	.type = CRYPTO_ALG_TYPE_AHASH,
	.tfmsize = offsetof(struct crypto_ahash, base),
519 520 521
};
EXPORT_SYMBOL_GPL(crypto_ahash_type);

522 523 524 525 526 527 528
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
					u32 mask)
{
	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ahash);

529 530 531 532 533 534
int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
{
	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);

535 536 537 538 539
static int ahash_prepare_alg(struct ahash_alg *alg)
{
	struct crypto_alg *base = &alg->halg.base;

	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
540 541
	    alg->halg.statesize > PAGE_SIZE / 8 ||
	    alg->halg.statesize == 0)
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
		return -EINVAL;

	base->cra_type = &crypto_ahash_type;
	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;

	return 0;
}

int crypto_register_ahash(struct ahash_alg *alg)
{
	struct crypto_alg *base = &alg->halg.base;
	int err;

	err = ahash_prepare_alg(alg);
	if (err)
		return err;

	return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_ahash);

int crypto_unregister_ahash(struct ahash_alg *alg)
{
	return crypto_unregister_alg(&alg->halg.base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);

int ahash_register_instance(struct crypto_template *tmpl,
			    struct ahash_instance *inst)
{
	int err;

	err = ahash_prepare_alg(&inst->alg);
	if (err)
		return err;

	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_register_instance);

void ahash_free_instance(struct crypto_instance *inst)
{
	crypto_drop_spawn(crypto_instance_ctx(inst));
	kfree(ahash_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_free_instance);

int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
			    struct hash_alg_common *alg,
			    struct crypto_instance *inst)
{
	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
				  &crypto_ahash_type);
}
EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);

struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
{
	struct crypto_alg *alg;

	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
}
EXPORT_SYMBOL_GPL(ahash_attr_alg);

608 609
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");