ima_crypto.c 16.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
 *
 * Authors:
 * Mimi Zohar <zohar@us.ibm.com>
 * Kylene Hall <kjhall@us.ibm.com>
 *
 * File: ima_crypto.c
10
 *	Calculates md5/sha1 file hash, template hash, boot-aggreate hash
11 12 13
 */

#include <linux/kernel.h>
14 15
#include <linux/moduleparam.h>
#include <linux/ratelimit.h>
16 17 18 19
#include <linux/file.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/err.h>
20
#include <linux/slab.h>
21
#include <crypto/hash.h>
22

23 24
#include "ima.h"

25 26 27 28 29
/* minimum file size for ahash use */
static unsigned long ima_ahash_minsize;
module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
/* default is 0 - 1 page. */
static int ima_maxorder;
static unsigned int ima_bufsize = PAGE_SIZE;

static int param_set_bufsize(const char *val, const struct kernel_param *kp)
{
	unsigned long long size;
	int order;

	size = memparse(val, NULL);
	order = get_order(size);
	if (order >= MAX_ORDER)
		return -EINVAL;
	ima_maxorder = order;
	ima_bufsize = PAGE_SIZE << order;
	return 0;
}

48
static const struct kernel_param_ops param_ops_bufsize = {
49 50 51 52 53 54 55 56
	.set = param_set_bufsize,
	.get = param_get_uint,
};
#define param_check_bufsize(name, p) __param_check(name, p, unsigned int)

module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");

57
static struct crypto_shash *ima_shash_tfm;
58
static struct crypto_ahash *ima_ahash_tfm;
59

60
int __init ima_init_crypto(void)
61
{
62
	long rc;
63

64
	ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
65 66
	if (IS_ERR(ima_shash_tfm)) {
		rc = PTR_ERR(ima_shash_tfm);
67 68
		pr_err("Can not allocate %s (reason: %ld)\n",
		       hash_algo_name[ima_hash_algo], rc);
69 70
		return rc;
	}
71 72
	pr_info("Allocated hash algorithm: %s\n",
		hash_algo_name[ima_hash_algo]);
73
	return 0;
74 75
}

76 77 78 79 80
static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
{
	struct crypto_shash *tfm = ima_shash_tfm;
	int rc;

81 82 83 84
	if (algo < 0 || algo >= HASH_ALGO__LAST)
		algo = ima_hash_algo;

	if (algo != ima_hash_algo) {
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
		tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
		if (IS_ERR(tfm)) {
			rc = PTR_ERR(tfm);
			pr_err("Can not allocate %s (reason: %d)\n",
			       hash_algo_name[algo], rc);
		}
	}
	return tfm;
}

static void ima_free_tfm(struct crypto_shash *tfm)
{
	if (tfm != ima_shash_tfm)
		crypto_free_shash(tfm);
}

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
/**
 * ima_alloc_pages() - Allocate contiguous pages.
 * @max_size:       Maximum amount of memory to allocate.
 * @allocated_size: Returned size of actual allocation.
 * @last_warn:      Should the min_size allocation warn or not.
 *
 * Tries to do opportunistic allocation for memory first trying to allocate
 * max_size amount of memory and then splitting that until zero order is
 * reached. Allocation is tried without generating allocation warnings unless
 * last_warn is set. Last_warn set affects only last allocation of zero order.
 *
 * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
 *
 * Return pointer to allocated memory, or NULL on failure.
 */
static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
			     int last_warn)
{
	void *ptr;
	int order = ima_maxorder;
121
	gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162

	if (order)
		order = min(get_order(max_size), order);

	for (; order; order--) {
		ptr = (void *)__get_free_pages(gfp_mask, order);
		if (ptr) {
			*allocated_size = PAGE_SIZE << order;
			return ptr;
		}
	}

	/* order is zero - one page */

	gfp_mask = GFP_KERNEL;

	if (!last_warn)
		gfp_mask |= __GFP_NOWARN;

	ptr = (void *)__get_free_pages(gfp_mask, 0);
	if (ptr) {
		*allocated_size = PAGE_SIZE;
		return ptr;
	}

	*allocated_size = 0;
	return NULL;
}

/**
 * ima_free_pages() - Free pages allocated by ima_alloc_pages().
 * @ptr:  Pointer to allocated pages.
 * @size: Size of allocated buffer.
 */
static void ima_free_pages(void *ptr, size_t size)
{
	if (!ptr)
		return;
	free_pages((unsigned long)ptr, get_order(size));
}

163 164 165 166 167
static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
{
	struct crypto_ahash *tfm = ima_ahash_tfm;
	int rc;

M
Mimi Zohar 已提交
168 169 170 171
	if (algo < 0 || algo >= HASH_ALGO__LAST)
		algo = ima_hash_algo;

	if (algo != ima_hash_algo || !tfm) {
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
		tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
		if (!IS_ERR(tfm)) {
			if (algo == ima_hash_algo)
				ima_ahash_tfm = tfm;
		} else {
			rc = PTR_ERR(tfm);
			pr_err("Can not allocate %s (reason: %d)\n",
			       hash_algo_name[algo], rc);
		}
	}
	return tfm;
}

static void ima_free_atfm(struct crypto_ahash *tfm)
{
	if (tfm != ima_ahash_tfm)
		crypto_free_ahash(tfm);
}

191
static inline int ahash_wait(int err, struct crypto_wait *wait)
192 193
{

194
	err = crypto_wait_req(err, wait);
195

196
	if (err)
197 198 199 200 201 202 203 204 205 206
		pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);

	return err;
}

static int ima_calc_file_hash_atfm(struct file *file,
				   struct ima_digest_data *hash,
				   struct crypto_ahash *tfm)
{
	loff_t i_size, offset;
207
	char *rbuf[2] = { NULL, };
208
	int rc, rbuf_len, active = 0, ahash_rc = 0;
209 210
	struct ahash_request *req;
	struct scatterlist sg[1];
211
	struct crypto_wait wait;
212
	size_t rbuf_size[2];
213 214 215 216 217 218 219

	hash->length = crypto_ahash_digestsize(tfm);

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req)
		return -ENOMEM;

220
	crypto_init_wait(&wait);
221 222
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
				   CRYPTO_TFM_REQ_MAY_SLEEP,
223
				   crypto_req_done, &wait);
224

225
	rc = ahash_wait(crypto_ahash_init(req), &wait);
226 227 228 229 230 231 232 233
	if (rc)
		goto out1;

	i_size = i_size_read(file_inode(file));

	if (i_size == 0)
		goto out2;

234 235 236 237
	/*
	 * Try to allocate maximum size of memory.
	 * Fail if even a single page cannot be allocated.
	 */
238 239
	rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
	if (!rbuf[0]) {
240 241 242 243
		rc = -ENOMEM;
		goto out1;
	}

244 245 246 247 248 249 250 251 252 253 254
	/* Only allocate one buffer if that is enough. */
	if (i_size > rbuf_size[0]) {
		/*
		 * Try to allocate secondary buffer. If that fails fallback to
		 * using single buffering. Use previous memory allocation size
		 * as baseline for possible allocation size.
		 */
		rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
					  &rbuf_size[1], 0);
	}

255
	for (offset = 0; offset < i_size; offset += rbuf_len) {
256 257 258 259 260
		if (!rbuf[1] && offset) {
			/* Not using two buffers, and it is not the first
			 * read/request, wait for the completion of the
			 * previous ahash_update() request.
			 */
261
			rc = ahash_wait(ahash_rc, &wait);
262 263 264 265 266
			if (rc)
				goto out3;
		}
		/* read buffer */
		rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
267 268
		rc = integrity_kernel_read(file, offset, rbuf[active],
					   rbuf_len);
269 270 271
		if (rc != rbuf_len) {
			if (rc >= 0)
				rc = -EINVAL;
272 273 274 275 276
			/*
			 * Forward current rc, do not overwrite with return value
			 * from ahash_wait()
			 */
			ahash_wait(ahash_rc, &wait);
277
			goto out3;
278
		}
279 280 281 282 283 284

		if (rbuf[1] && offset) {
			/* Using two buffers, and it is not the first
			 * read/request, wait for the completion of the
			 * previous ahash_update() request.
			 */
285
			rc = ahash_wait(ahash_rc, &wait);
286 287
			if (rc)
				goto out3;
288 289
		}

290
		sg_init_one(&sg[0], rbuf[active], rbuf_len);
291 292
		ahash_request_set_crypt(req, sg, NULL, rbuf_len);

293 294 295 296
		ahash_rc = crypto_ahash_update(req);

		if (rbuf[1])
			active = !active; /* swap buffers, if we use two */
297
	}
298
	/* wait for the last update request to complete */
299
	rc = ahash_wait(ahash_rc, &wait);
300 301 302
out3:
	ima_free_pages(rbuf[0], rbuf_size[0]);
	ima_free_pages(rbuf[1], rbuf_size[1]);
303 304 305
out2:
	if (!rc) {
		ahash_request_set_crypt(req, NULL, hash->digest, 0);
306
		rc = ahash_wait(crypto_ahash_final(req), &wait);
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
	}
out1:
	ahash_request_free(req);
	return rc;
}

static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
{
	struct crypto_ahash *tfm;
	int rc;

	tfm = ima_alloc_atfm(hash->algo);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	rc = ima_calc_file_hash_atfm(file, hash, tfm);

	ima_free_atfm(tfm);

	return rc;
}

329 330 331
static int ima_calc_file_hash_tfm(struct file *file,
				  struct ima_digest_data *hash,
				  struct crypto_shash *tfm)
332
{
M
Mimi Zohar 已提交
333
	loff_t i_size, offset = 0;
334
	char *rbuf;
335
	int rc;
336
	SHASH_DESC_ON_STACK(shash, tfm);
337

338
	shash->tfm = tfm;
339

340 341
	hash->length = crypto_shash_digestsize(tfm);

342
	rc = crypto_shash_init(shash);
343 344 345
	if (rc != 0)
		return rc;

346 347 348
	i_size = i_size_read(file_inode(file));

	if (i_size == 0)
349
		goto out;
350 351 352 353 354

	rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!rbuf)
		return -ENOMEM;

355 356 357
	while (offset < i_size) {
		int rbuf_len;

358
		rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
359 360 361 362
		if (rbuf_len < 0) {
			rc = rbuf_len;
			break;
		}
363 364
		if (rbuf_len == 0) {	/* unexpected EOF */
			rc = -EINVAL;
M
Mimi Zohar 已提交
365
			break;
366
		}
367 368
		offset += rbuf_len;

369
		rc = crypto_shash_update(shash, rbuf, rbuf_len);
370 371 372
		if (rc)
			break;
	}
373
	kfree(rbuf);
374
out:
375
	if (!rc)
376
		rc = crypto_shash_final(shash, hash->digest);
377 378 379
	return rc;
}

380
static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
381
{
382
	struct crypto_shash *tfm;
383 384
	int rc;

385 386 387
	tfm = ima_alloc_tfm(hash->algo);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);
388 389 390

	rc = ima_calc_file_hash_tfm(file, hash, tfm);

391
	ima_free_tfm(tfm);
392 393 394 395

	return rc;
}

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
/*
 * ima_calc_file_hash - calculate file hash
 *
 * Asynchronous hash (ahash) allows using HW acceleration for calculating
 * a hash. ahash performance varies for different data sizes on different
 * crypto accelerators. shash performance might be better for smaller files.
 * The 'ima.ahash_minsize' module parameter allows specifying the best
 * minimum file size for using ahash on the system.
 *
 * If the ima.ahash_minsize parameter is not specified, this function uses
 * shash for the hash calculation.  If ahash fails, it falls back to using
 * shash.
 */
int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
{
	loff_t i_size;
	int rc;
413 414
	struct file *f = file;
	bool new_file_instance = false, modified_flags = false;
415

416 417 418 419 420 421 422 423 424 425
	/*
	 * For consistency, fail file's opened with the O_DIRECT flag on
	 * filesystems mounted with/without DAX option.
	 */
	if (file->f_flags & O_DIRECT) {
		hash->length = hash_digest_size[ima_hash_algo];
		hash->algo = ima_hash_algo;
		return -EINVAL;
	}

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
	/* Open a new file instance in O_RDONLY if we cannot read */
	if (!(file->f_mode & FMODE_READ)) {
		int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
				O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
		flags |= O_RDONLY;
		f = dentry_open(&file->f_path, flags, file->f_cred);
		if (IS_ERR(f)) {
			/*
			 * Cannot open the file again, lets modify f_flags
			 * of original and continue
			 */
			pr_info_ratelimited("Unable to reopen file for reading.\n");
			f = file;
			f->f_flags |= FMODE_READ;
			modified_flags = true;
		} else {
			new_file_instance = true;
		}
	}

	i_size = i_size_read(file_inode(f));
447 448

	if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
449
		rc = ima_calc_file_ahash(f, hash);
450
		if (!rc)
451
			goto out;
452 453
	}

454 455 456 457 458 459 460
	rc = ima_calc_file_shash(f, hash);
out:
	if (new_file_instance)
		fput(f);
	else if (modified_flags)
		f->f_flags &= ~FMODE_READ;
	return rc;
461 462
}

463
/*
464
 * Calculate the hash of template data
465
 */
466
static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
467
					 struct ima_template_desc *td,
468 469 470
					 int num_fields,
					 struct ima_digest_data *hash,
					 struct crypto_shash *tfm)
471
{
472
	SHASH_DESC_ON_STACK(shash, tfm);
473
	int rc, i;
474

475
	shash->tfm = tfm;
476

477
	hash->length = crypto_shash_digestsize(tfm);
478

479
	rc = crypto_shash_init(shash);
480 481 482 483
	if (rc != 0)
		return rc;

	for (i = 0; i < num_fields; i++) {
484 485 486
		u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
		u8 *data_to_hash = field_data[i].data;
		u32 datalen = field_data[i].len;
487 488
		u32 datalen_to_hash =
		    !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
489

490
		if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
491
			rc = crypto_shash_update(shash,
492 493
						(const u8 *) &datalen_to_hash,
						sizeof(datalen_to_hash));
494 495
			if (rc)
				break;
496 497 498 499
		} else if (strcmp(td->fields[i]->field_id, "n") == 0) {
			memcpy(buffer, data_to_hash, datalen);
			data_to_hash = buffer;
			datalen = IMA_EVENT_NAME_LEN_MAX + 1;
500
		}
501
		rc = crypto_shash_update(shash, data_to_hash, datalen);
502 503 504 505 506
		if (rc)
			break;
	}

	if (!rc)
507
		rc = crypto_shash_final(shash, hash->digest);
508 509

	return rc;
510 511
}

512 513
int ima_calc_field_array_hash(struct ima_field_data *field_data,
			      struct ima_template_desc *desc, int num_fields,
514
			      struct ima_digest_data *hash)
515 516 517 518 519 520 521 522
{
	struct crypto_shash *tfm;
	int rc;

	tfm = ima_alloc_tfm(hash->algo);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

523 524
	rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
					   hash, tfm);
525 526 527 528 529 530

	ima_free_tfm(tfm);

	return rc;
}

531 532 533 534 535 536
static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
				  struct ima_digest_data *hash,
				  struct crypto_ahash *tfm)
{
	struct ahash_request *req;
	struct scatterlist sg;
537
	struct crypto_wait wait;
538 539 540 541 542 543 544 545
	int rc, ahash_rc = 0;

	hash->length = crypto_ahash_digestsize(tfm);

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req)
		return -ENOMEM;

546
	crypto_init_wait(&wait);
547 548
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
				   CRYPTO_TFM_REQ_MAY_SLEEP,
549
				   crypto_req_done, &wait);
550

551
	rc = ahash_wait(crypto_ahash_init(req), &wait);
552 553 554 555 556 557 558 559 560
	if (rc)
		goto out;

	sg_init_one(&sg, buf, len);
	ahash_request_set_crypt(req, &sg, NULL, len);

	ahash_rc = crypto_ahash_update(req);

	/* wait for the update request to complete */
561
	rc = ahash_wait(ahash_rc, &wait);
562 563
	if (!rc) {
		ahash_request_set_crypt(req, NULL, hash->digest, 0);
564
		rc = ahash_wait(crypto_ahash_final(req), &wait);
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	}
out:
	ahash_request_free(req);
	return rc;
}

static int calc_buffer_ahash(const void *buf, loff_t len,
			     struct ima_digest_data *hash)
{
	struct crypto_ahash *tfm;
	int rc;

	tfm = ima_alloc_atfm(hash->algo);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);

	ima_free_atfm(tfm);

	return rc;
}

588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
static int calc_buffer_shash_tfm(const void *buf, loff_t size,
				struct ima_digest_data *hash,
				struct crypto_shash *tfm)
{
	SHASH_DESC_ON_STACK(shash, tfm);
	unsigned int len;
	int rc;

	shash->tfm = tfm;

	hash->length = crypto_shash_digestsize(tfm);

	rc = crypto_shash_init(shash);
	if (rc != 0)
		return rc;

	while (size) {
		len = size < PAGE_SIZE ? size : PAGE_SIZE;
		rc = crypto_shash_update(shash, buf, len);
		if (rc)
			break;
		buf += len;
		size -= len;
	}

	if (!rc)
		rc = crypto_shash_final(shash, hash->digest);
	return rc;
}

618 619
static int calc_buffer_shash(const void *buf, loff_t len,
			     struct ima_digest_data *hash)
620 621 622 623 624 625 626 627 628 629 630 631 632 633
{
	struct crypto_shash *tfm;
	int rc;

	tfm = ima_alloc_tfm(hash->algo);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	rc = calc_buffer_shash_tfm(buf, len, hash, tfm);

	ima_free_tfm(tfm);
	return rc;
}

634 635 636 637 638 639 640 641 642 643 644 645 646 647
int ima_calc_buffer_hash(const void *buf, loff_t len,
			 struct ima_digest_data *hash)
{
	int rc;

	if (ima_ahash_minsize && len >= ima_ahash_minsize) {
		rc = calc_buffer_ahash(buf, len, hash);
		if (!rc)
			return 0;
	}

	return calc_buffer_shash(buf, len, hash);
}

648
static void __init ima_pcrread(u32 idx, struct tpm_digest *d)
649
{
650
	if (!ima_tpm_chip)
651 652
		return;

653
	if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
654
		pr_err("Error Communicating to TPM chip\n");
655 656 657
}

/*
658 659 660 661 662 663 664 665 666
 * The boot_aggregate is a cumulative hash over TPM registers 0 - 7.  With
 * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
 * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
 * allowing firmware to configure and enable different banks.
 *
 * Knowing which TPM bank is read to calculate the boot_aggregate digest
 * needs to be conveyed to a verifier.  For this reason, use the same
 * hash algorithm for reading the TPM PCRs as for calculating the boot
 * aggregate digest as stored in the measurement list.
667
 */
668
static int __init ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
669
					      struct crypto_shash *tfm)
670
{
671
	struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
672 673
	int rc;
	u32 i;
674
	SHASH_DESC_ON_STACK(shash, tfm);
675

676
	shash->tfm = tfm;
677

678 679 680
	pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
		 d.alg_id);

681
	rc = crypto_shash_init(shash);
682 683 684 685 686
	if (rc != 0)
		return rc;

	/* cumulative sha1 over tpm registers 0-7 */
	for (i = TPM_PCR0; i < TPM_PCR8; i++) {
687
		ima_pcrread(i, &d);
688
		/* now accumulate with current aggregate */
689 690
		rc = crypto_shash_update(shash, d.digest,
					 crypto_shash_digestsize(tfm));
691 692
	}
	if (!rc)
693
		crypto_shash_final(shash, digest);
694 695
	return rc;
}
696 697 698 699

int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
{
	struct crypto_shash *tfm;
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	u16 crypto_id, alg_id;
	int rc, i, bank_idx = -1;

	for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
		crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
		if (crypto_id == hash->algo) {
			bank_idx = i;
			break;
		}

		if (crypto_id == HASH_ALGO_SHA256)
			bank_idx = i;

		if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
			bank_idx = i;
	}

	if (bank_idx == -1) {
		pr_err("No suitable TPM algorithm for boot aggregate\n");
		return 0;
	}

	hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
723 724 725 726 727 728

	tfm = ima_alloc_tfm(hash->algo);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	hash->length = crypto_shash_digestsize(tfm);
729 730
	alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
	rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
731 732 733 734 735

	ima_free_tfm(tfm);

	return rc;
}